ofproto: Inline actions in struct rule_actions.
[sliver-openvswitch.git] / ofproto / ofproto-dpif-xlate.c
index 248382f..e2ac9ad 100644 (file)
@@ -770,8 +770,9 @@ xport_stp_listen_state(const struct xport *xport)
 static bool
 stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
 {
+    /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
     memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
-    return eth_addr_equals(flow->dl_dst, eth_addr_stp);
+    return is_stp(flow);
 }
 
 static void
@@ -1820,7 +1821,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
         xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
         return;
     } else if (check_stp) {
-        if (eth_addr_equals(ctx->base_flow.dl_dst, eth_addr_stp)) {
+        if (is_stp(&ctx->base_flow)) {
             if (!xport_stp_listen_state(xport)) {
                 xlate_report(ctx, "STP not in listening state, "
                              "skipping bpdu output");
@@ -1991,18 +1992,11 @@ static void
 xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule)
 {
     struct rule_dpif *old_rule = ctx->rule;
-    struct rule_actions *actions;
+    const struct rule_actions *actions;
 
     if (ctx->xin->resubmit_stats) {
         rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
     }
-    if (ctx->xin->xcache) {
-        struct xc_entry *entry;
-
-        entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
-        entry->u.rule = rule;
-        rule_dpif_ref(rule);
-    }
 
     ctx->resubmits++;
     ctx->recurse++;
@@ -2057,7 +2051,8 @@ xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
                                               !skip_wildcards
                                               ? &ctx->xout->wc : NULL,
                                               honor_table_miss,
-                                              &ctx->table_id, &rule);
+                                              &ctx->table_id, &rule,
+                                              ctx->xin->xcache != NULL);
         ctx->xin->flow.in_port.ofp_port = old_in_port;
 
         if (ctx->xin->resubmit_hook) {
@@ -2090,12 +2085,22 @@ xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
         }
 
         choose_miss_rule(config, ctx->xbridge->miss_rule,
-                         ctx->xbridge->no_packet_in_rule, &rule);
+                         ctx->xbridge->no_packet_in_rule, &rule,
+                         ctx->xin->xcache != NULL);
 
 match:
         if (rule) {
+            /* Fill in the cache entry here instead of xlate_recursively
+             * to make the reference counting more explicit.  We take a
+             * reference in the lookups above if we are going to cache the
+             * rule. */
+            if (ctx->xin->xcache) {
+                struct xc_entry *entry;
+
+                entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
+                entry->u.rule = rule;
+            }
             xlate_recursively(ctx, rule);
-            rule_dpif_unref(rule);
         }
 
         ctx->table_id = old_table_id;
@@ -2653,8 +2658,10 @@ xlate_learn_action(struct xlate_ctx *ctx,
 
         entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
         entry->u.learn.ofproto = ctx->xin->ofproto;
+        /* Lookup the learned rule, taking a reference on it.  The reference
+         * is released when this cache entry is deleted. */
         rule_dpif_lookup(ctx->xbridge->ofproto, &ctx->xin->flow, NULL,
-                         &entry->u.learn.rule);
+                         &entry->u.learn.rule, true);
     }
 }
 
@@ -2678,10 +2685,11 @@ xlate_fin_timeout(struct xlate_ctx *ctx,
             struct xc_entry *entry;
 
             entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
+            /* XC_RULE already holds a reference on the rule, none is taken
+             * here. */
             entry->u.fin.rule = ctx->rule;
             entry->u.fin.idle = oft->fin_idle_timeout;
             entry->u.fin.hard = oft->fin_hard_timeout;
-            rule_dpif_ref(ctx->rule);
         }
     }
 }
@@ -2717,7 +2725,7 @@ xlate_sample_action(struct xlate_ctx *ctx,
 static bool
 may_receive(const struct xport *xport, struct xlate_ctx *ctx)
 {
-    if (xport->config & (eth_addr_equals(ctx->xin->flow.dl_dst, eth_addr_stp)
+    if (xport->config & (is_stp(&ctx->xin->flow)
                          ? OFPUTIL_PC_NO_RECV_STP
                          : OFPUTIL_PC_NO_RECV)) {
         return false;
@@ -3183,7 +3191,7 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout)
     struct flow *flow = &xin->flow;
     struct rule_dpif *rule = NULL;
 
-    struct rule_actions *actions = NULL;
+    const struct rule_actions *actions = NULL;
     enum slow_path_reason special;
     const struct ofpact *ofpacts;
     struct xport *in_port;
@@ -3230,7 +3238,7 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout)
 
     ctx.xbridge = xbridge_lookup(xin->ofproto);
     if (!ctx.xbridge) {
-        goto out;
+        return;
     }
 
     ctx.rule = xin->rule;
@@ -3263,7 +3271,7 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout)
     if (!xin->ofpacts && !ctx.rule) {
         ctx.table_id = rule_dpif_lookup(ctx.xbridge->ofproto, flow,
                                         !xin->skip_wildcards ? wc : NULL,
-                                        &rule);
+                                        &rule, ctx.xin->xcache != NULL);
         if (ctx.xin->resubmit_stats) {
             rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats);
         }
@@ -3271,7 +3279,6 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout)
             struct xc_entry *entry;
 
             entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
-            rule_dpif_ref(rule);
             entry->u.rule = rule;
         }
         ctx.rule = rule;
@@ -3309,7 +3316,7 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout)
             break;
 
         case OFPC_FRAG_DROP:
-            goto out;
+            return;
 
         case OFPC_FRAG_REASM:
             OVS_NOT_REACHED();
@@ -3456,9 +3463,6 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout)
         wc->masks.tp_src &= htons(UINT8_MAX);
         wc->masks.tp_dst &= htons(UINT8_MAX);
     }
-
-out:
-    rule_dpif_unref(rule);
 }
 
 /* Sends 'packet' out 'ofport'.
@@ -3580,9 +3584,9 @@ xlate_push_stats(struct xlate_cache *xcache, bool may_learn,
                 struct rule_dpif *rule = entry->u.learn.rule;
 
                 /* Reset the modified time for a rule that is equivalent to
-                 * the currently cached rule. If the rule is not the exact
-                 * rule wehave cached, update the reference that we have. */
-                    entry->u.learn.rule = ofproto_dpif_refresh_rule(rule);
+                 * the currently cached rule.  If the rule is not the exact
+                 * rule we have cached, update the reference that we have. */
+                entry->u.learn.rule = ofproto_dpif_refresh_rule(rule);
             }
             break;
         case XC_NORMAL:
@@ -3651,13 +3655,15 @@ xlate_cache_clear(struct xlate_cache *xcache)
             mbridge_unref(entry->u.mirror.mbridge);
             break;
         case XC_LEARN:
+            /* 'u.learn.rule' is the learned rule. */
             rule_dpif_unref(entry->u.learn.rule);
             break;
         case XC_NORMAL:
             free(entry->u.normal.flow);
             break;
         case XC_FIN_TIMEOUT:
-            rule_dpif_unref(entry->u.fin.rule);
+            /* 'u.fin.rule' is always already held as a XC_RULE, which
+             * has already released it's reference above. */
             break;
         default:
             OVS_NOT_REACHED();