uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
bool exit; /* No further actions should be processed. */
+ bool use_recirc; /* Should generate recirc? */
+ struct xlate_recirc recirc; /* Information used for generating
+ * recirculation actions */
+
/* OpenFlow 1.1+ action set.
*
* 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
static bool
stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
{
+ /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
- return eth_addr_equals(flow->dl_dst, eth_addr_stp);
+ return is_stp(flow);
}
static void
return xport->xbundle;
}
- /* Special-case OFPP_NONE, which a controller may use as the ingress
- * port for traffic that it is sourcing. */
- if (in_port == OFPP_NONE) {
+ /* Special-case OFPP_NONE (OF1.0) and OFPP_CONTROLLER (OF1.1+),
+ * which a controller may use as the ingress port for traffic that
+ * it is sourcing. */
+ if (in_port == OFPP_CONTROLLER || in_port == OFPP_NONE) {
return &ofpp_none_bundle;
}
/* Partially configured bundle with no slaves. Drop the packet. */
return;
} else if (!out_xbundle->bond) {
- ctx->xout->use_recirc = false;
+ ctx->use_recirc = false;
xport = CONTAINER_OF(list_front(&out_xbundle->xports), struct xport,
bundle_node);
} else {
struct ofport_dpif *ofport;
- struct xlate_recirc *xr = &ctx->xout->recirc;
+ struct xlate_recirc *xr = &ctx->recirc;
struct flow_wildcards *wc = &ctx->xout->wc;
if (ctx->xbridge->enable_recirc) {
- ctx->xout->use_recirc = bond_may_recirc(
+ ctx->use_recirc = bond_may_recirc(
out_xbundle->bond, &xr->recirc_id, &xr->hash_basis);
- if (ctx->xout->use_recirc) {
+ if (ctx->use_recirc) {
/* Only TCP mode uses recirculation. */
xr->hash_alg = OVS_HASH_ALG_L4;
bond_update_post_recirc_rules(out_xbundle->bond, false);
/* If ctx->xout->use_recirc is set, the main thread will handle stats
* accounting for this bond. */
- if (!ctx->xout->use_recirc) {
+ if (!ctx->use_recirc) {
if (ctx->xin->resubmit_stats) {
bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
ctx->xin->resubmit_stats->n_bytes);
bfd_process_packet(xport->bfd, flow, packet);
/* If POLL received, immediately sends FINAL back. */
if (bfd_should_send_packet(xport->bfd)) {
- if (xport->peer) {
- ofproto_dpif_monitor_port_send_soon(xport->ofport);
- } else {
- ofproto_dpif_monitor_port_send_soon_safe(xport->ofport);
- }
+ ofproto_dpif_monitor_port_send_soon(xport->ofport);
}
}
return SLOW_BFD;
xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
return;
} else if (check_stp) {
- if (eth_addr_equals(ctx->base_flow.dl_dst, eth_addr_stp)) {
+ if (is_stp(&ctx->base_flow)) {
if (!xport_stp_listen_state(xport)) {
xlate_report(ctx, "STP not in listening state, "
"skipping bpdu output");
&ctx->xout->odp_actions,
&ctx->xout->wc);
- if (ctx->xout->use_recirc) {
+ if (ctx->use_recirc) {
struct ovs_action_hash *act_hash;
- struct xlate_recirc *xr = &ctx->xout->recirc;
+ struct xlate_recirc *xr = &ctx->recirc;
/* Hash action. */
act_hash = nl_msg_put_unspec_uninit(&ctx->xout->odp_actions,
xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule)
{
struct rule_dpif *old_rule = ctx->rule;
- struct rule_actions *actions;
+ const struct rule_actions *actions;
if (ctx->xin->resubmit_stats) {
rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
}
- if (ctx->xin->xcache) {
- struct xc_entry *entry;
-
- entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
- entry->u.rule = rule;
- rule_dpif_ref(rule);
- }
ctx->resubmits++;
ctx->recurse++;
!skip_wildcards
? &ctx->xout->wc : NULL,
honor_table_miss,
- &ctx->table_id, &rule);
+ &ctx->table_id, &rule,
+ ctx->xin->xcache != NULL);
ctx->xin->flow.in_port.ofp_port = old_in_port;
if (ctx->xin->resubmit_hook) {
}
choose_miss_rule(config, ctx->xbridge->miss_rule,
- ctx->xbridge->no_packet_in_rule, &rule);
+ ctx->xbridge->no_packet_in_rule, &rule,
+ ctx->xin->xcache != NULL);
match:
if (rule) {
+ /* Fill in the cache entry here instead of xlate_recursively
+ * to make the reference counting more explicit. We take a
+ * reference in the lookups above if we are going to cache the
+ * rule. */
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
+ entry->u.rule = rule;
+ }
xlate_recursively(ctx, rule);
- rule_dpif_unref(rule);
}
ctx->table_id = old_table_id;
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
entry->u.learn.ofproto = ctx->xin->ofproto;
+ /* Lookup the learned rule, taking a reference on it. The reference
+ * is released when this cache entry is deleted. */
rule_dpif_lookup(ctx->xbridge->ofproto, &ctx->xin->flow, NULL,
- &entry->u.learn.rule);
+ &entry->u.learn.rule, true);
}
}
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
+ /* XC_RULE already holds a reference on the rule, none is taken
+ * here. */
entry->u.fin.rule = ctx->rule;
entry->u.fin.idle = oft->fin_idle_timeout;
entry->u.fin.hard = oft->fin_hard_timeout;
- rule_dpif_ref(ctx->rule);
}
}
}
static bool
may_receive(const struct xport *xport, struct xlate_ctx *ctx)
{
- if (xport->config & (eth_addr_equals(ctx->xin->flow.dl_dst, eth_addr_stp)
+ if (xport->config & (is_stp(&ctx->xin->flow)
? OFPUTIL_PC_NO_RECV_STP
: OFPUTIL_PC_NO_RECV)) {
return false;
struct flow *flow = &xin->flow;
struct rule_dpif *rule = NULL;
- struct rule_actions *actions = NULL;
+ const struct rule_actions *actions = NULL;
enum slow_path_reason special;
const struct ofpact *ofpacts;
struct xport *in_port;
ctx.xbridge = xbridge_lookup(xin->ofproto);
if (!ctx.xbridge) {
- goto out;
+ return;
}
ctx.rule = xin->rule;
ctx.orig_skb_priority = flow->skb_priority;
ctx.table_id = 0;
ctx.exit = false;
+ ctx.use_recirc = false;
if (!xin->ofpacts && !ctx.rule) {
ctx.table_id = rule_dpif_lookup(ctx.xbridge->ofproto, flow,
!xin->skip_wildcards ? wc : NULL,
- &rule);
+ &rule, ctx.xin->xcache != NULL);
if (ctx.xin->resubmit_stats) {
rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats);
}
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
- rule_dpif_ref(rule);
entry->u.rule = rule;
}
ctx.rule = rule;
}
xout->fail_open = ctx.rule && rule_dpif_is_fail_open(ctx.rule);
- xout->use_recirc = false;
if (xin->ofpacts) {
ofpacts = xin->ofpacts;
break;
case OFPC_FRAG_DROP:
- goto out;
+ return;
case OFPC_FRAG_REASM:
OVS_NOT_REACHED();
wc->masks.tp_src &= htons(UINT8_MAX);
wc->masks.tp_dst &= htons(UINT8_MAX);
}
-
-out:
- rule_dpif_unref(rule);
}
/* Sends 'packet' out 'ofport'.
struct rule_dpif *rule = entry->u.learn.rule;
/* Reset the modified time for a rule that is equivalent to
- * the currently cached rule. If the rule is not the exact
- * rule wehave cached, update the reference that we have. */
- entry->u.learn.rule = ofproto_dpif_refresh_rule(rule);
+ * the currently cached rule. If the rule is not the exact
+ * rule we have cached, update the reference that we have. */
+ entry->u.learn.rule = ofproto_dpif_refresh_rule(rule);
}
break;
case XC_NORMAL:
mbridge_unref(entry->u.mirror.mbridge);
break;
case XC_LEARN:
+ /* 'u.learn.rule' is the learned rule. */
rule_dpif_unref(entry->u.learn.rule);
break;
case XC_NORMAL:
free(entry->u.normal.flow);
break;
case XC_FIN_TIMEOUT:
- rule_dpif_unref(entry->u.fin.rule);
+ /* 'u.fin.rule' is always already held as a XC_RULE, which
+ * has already released it's reference above. */
break;
default:
OVS_NOT_REACHED();