X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=ofproto%2Fofproto-dpif.c;h=8b65becfc28d2af13c57ee45830c5fbfe9ef806f;hb=299016266ed13376a7d671f66d4e0181b41098e3;hp=42138b9913d30cf74d643fdfdcf64258765fb9e0;hpb=015e08bcc1e211e3326d21b8c4178b0152252d29;p=sliver-openvswitch.git diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c index 42138b991..8b65becfc 100644 --- a/ofproto/ofproto-dpif.c +++ b/ofproto/ofproto-dpif.c @@ -22,6 +22,7 @@ #include "autopath.h" #include "bond.h" +#include "bundle.h" #include "byte-order.h" #include "connmgr.h" #include "coverage.h" @@ -59,7 +60,7 @@ COVERAGE_DEFINE(facet_invalidated); COVERAGE_DEFINE(facet_revalidate); COVERAGE_DEFINE(facet_unexpected); -/* Maximum depth of flow table recursion (due to NXAST_RESUBMIT actions) in a +/* Maximum depth of flow table recursion (due to resubmit actions) in a * flow translation. */ #define MAX_RESUBMIT_RECURSION 16 @@ -95,8 +96,8 @@ static struct rule_dpif *rule_dpif_cast(const struct rule *rule) return rule ? CONTAINER_OF(rule, struct rule_dpif, up) : NULL; } -static struct rule_dpif *rule_dpif_lookup(struct ofproto_dpif *ofproto, - const struct flow *flow); +static struct rule_dpif *rule_dpif_lookup(struct ofproto_dpif *, + const struct flow *, uint8_t table); #define MAX_MIRRORS 32 typedef uint32_t mirror_mask_t; @@ -187,6 +188,7 @@ struct action_xlate_ctx { uint32_t priority; /* Current flow priority. 0 if none. */ struct flow base_flow; /* Flow at the last commit. */ uint32_t base_priority; /* Priority at the last commit. */ + uint8_t table_id; /* OpenFlow table ID where flow was found. */ }; static void action_xlate_ctx_init(struct action_xlate_ctx *, @@ -332,6 +334,8 @@ struct ofproto_dpif { /* Support for debugging async flow mods. */ struct list completions; + + bool has_bundle_action; /* True when the first bundle action appears. */ }; /* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only @@ -416,7 +420,7 @@ dealloc(struct ofproto *ofproto_) } static int -construct(struct ofproto *ofproto_) +construct(struct ofproto *ofproto_, int *n_tablesp) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); const char *name = ofproto->up.name; @@ -461,12 +465,11 @@ construct(struct ofproto *ofproto_) list_init(&ofproto->completions); - ofproto->up.tables = xmalloc(sizeof *ofproto->up.tables); - classifier_init(&ofproto->up.tables[0]); - ofproto->up.n_tables = 1; - ofproto_dpif_unixctl_init(); + ofproto->has_bundle_action = false; + + *n_tablesp = 255; return 0; } @@ -487,14 +490,18 @@ destruct(struct ofproto *ofproto_) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); struct rule_dpif *rule, *next_rule; - struct cls_cursor cursor; + struct classifier *table; int i; complete_operations(ofproto); - cls_cursor_init(&cursor, &ofproto->up.tables[0], NULL); - CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) { - ofproto_rule_destroy(&rule->up); + OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) { + struct cls_cursor cursor; + + cls_cursor_init(&cursor, table, NULL); + CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) { + ofproto_rule_destroy(&rule->up); + } } for (i = 0; i < MAX_MIRRORS; i++) { @@ -709,6 +716,7 @@ port_construct(struct ofport *port_) port->bundle = NULL; port->cfm = NULL; port->tag = tag_create_random(); + port->may_enable = true; if (ofproto->sflow) { dpif_sflow_add_port(ofproto->sflow, port->odp_port, @@ -1261,6 +1269,7 @@ mirror_set(struct ofproto *ofproto_, void *aux, mirror = ofproto->mirrors[idx] = xzalloc(sizeof *mirror); mirror->ofproto = ofproto; mirror->idx = idx; + mirror->aux = aux; mirror->out_vlan = -1; mirror->name = NULL; } @@ -1436,6 +1445,14 @@ port_run(struct ofport_dpif *ofport) enable = enable && lacp_slave_may_enable(ofport->bundle->lacp, ofport); } + if (ofport->may_enable != enable) { + struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto); + + if (ofproto->has_bundle_action) { + ofproto->need_revalidate = true; + } + } + ofport->may_enable = enable; } @@ -1638,7 +1655,7 @@ handle_miss_upcall(struct ofproto_dpif *ofproto, struct dpif_upcall *upcall) facet = facet_lookup_valid(ofproto, &flow); if (!facet) { - struct rule_dpif *rule = rule_dpif_lookup(ofproto, &flow); + struct rule_dpif *rule = rule_dpif_lookup(ofproto, &flow, 0); if (!rule) { /* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */ struct ofport_dpif *port = get_ofp_port(ofproto, flow.in_port); @@ -1732,7 +1749,7 @@ static int expire(struct ofproto_dpif *ofproto) { struct rule_dpif *rule, *next_rule; - struct cls_cursor cursor; + struct classifier *table; int dp_max_idle; /* Update stats for each flow in the datapath. */ @@ -1743,9 +1760,13 @@ expire(struct ofproto_dpif *ofproto) expire_facets(ofproto, dp_max_idle); /* Expire OpenFlow flows whose idle_timeout or hard_timeout has passed. */ - cls_cursor_init(&cursor, &ofproto->up.tables[0], NULL); - CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) { - rule_expire(rule); + OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) { + struct cls_cursor cursor; + + cls_cursor_init(&cursor, table, NULL); + CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) { + rule_expire(rule); + } } /* All outstanding data in existing flows has been accounted, so it's a @@ -1856,11 +1877,12 @@ facet_max_idle(const struct ofproto_dpif *ofproto) * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each facet * that is installed in the kernel gets dropped in the appropriate bucket. * After the histogram has been built, we compute the cutoff so that only - * the most-recently-used 1% of facets (but at least 1000 flows) are kept - * cached. At least the most-recently-used bucket of facets is kept, so - * actually an arbitrary number of facets can be kept in any given - * expiration run (though the next run will delete most of those unless - * they receive additional data). + * the most-recently-used 1% of facets (but at least + * ofproto->up.flow_eviction_threshold flows) are kept cached. At least + * the most-recently-used bucket of facets is kept, so actually an + * arbitrary number of facets can be kept in any given expiration run + * (though the next run will delete most of those unless they receive + * additional data). * * This requires a second pass through the facets, in addition to the pass * made by update_stats(), because the former function never looks @@ -1875,7 +1897,7 @@ facet_max_idle(const struct ofproto_dpif *ofproto) int i; total = hmap_count(&ofproto->facets); - if (total <= 1000) { + if (total <= ofproto->up.flow_eviction_threshold) { return N_BUCKETS * BUCKET_WIDTH; } @@ -1893,7 +1915,8 @@ facet_max_idle(const struct ofproto_dpif *ofproto) subtotal = bucket = 0; do { subtotal += buckets[bucket++]; - } while (bucket < N_BUCKETS && subtotal < MAX(1000, total / 100)); + } while (bucket < N_BUCKETS && + subtotal < MAX(ofproto->up.flow_eviction_threshold, total / 100)); if (VLOG_IS_DBG_ENABLED()) { struct ds s; @@ -2032,7 +2055,7 @@ execute_odp_actions(struct ofproto_dpif *ofproto, const struct flow *flow, struct ofpbuf *packet) { if (actions_len == NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t)) - && odp_actions->nla_type == ODP_ACTION_ATTR_CONTROLLER) { + && odp_actions->nla_type == ODP_ACTION_ATTR_USERSPACE) { /* As an optimization, avoid a round-trip from userspace to kernel to * userspace. This also avoids possibly filling up kernel packet * buffers along the way. */ @@ -2412,7 +2435,7 @@ facet_revalidate(struct ofproto_dpif *ofproto, struct facet *facet) COVERAGE_INC(facet_revalidate); /* Determine the new rule. */ - new_rule = rule_dpif_lookup(ofproto, &facet->flow); + new_rule = rule_dpif_lookup(ofproto, &facet->flow, 0); if (!new_rule) { /* No new rule, so delete the facet. */ facet_remove(ofproto, facet); @@ -2570,10 +2593,11 @@ flow_push_stats(const struct rule_dpif *rule, /* Rules. */ static struct rule_dpif * -rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow) +rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow, + uint8_t table_id) { return rule_dpif_cast(rule_from_cls_rule( - classifier_lookup(&ofproto->up.tables[0], + classifier_lookup(&ofproto->up.tables[table_id], flow))); } @@ -2695,7 +2719,7 @@ rule_execute(struct rule *rule_, struct flow *flow, struct ofpbuf *packet) /* Otherwise, if 'rule' is in fact the correct rule for 'packet', then * create a new facet for it and use that. */ - if (rule_dpif_lookup(ofproto, flow) == rule) { + if (rule_dpif_lookup(ofproto, flow, 0) == rule) { facet = facet_create(rule, flow, packet); facet_execute(ofproto, facet, packet); facet_install(ofproto, facet, true); @@ -2770,7 +2794,7 @@ send_packet(struct ofproto_dpif *ofproto, uint32_t odp_port, static void do_xlate_actions(const union ofp_action *in, size_t n_in, struct action_xlate_ctx *ctx); -static bool xlate_normal(struct action_xlate_ctx *); +static void xlate_normal(struct action_xlate_ctx *); static void commit_odp_actions(struct action_xlate_ctx *ctx) @@ -2794,6 +2818,11 @@ commit_odp_actions(struct action_xlate_ctx *ctx) base->nw_dst = flow->nw_dst; } + if (base->nw_tos != flow->nw_tos) { + nl_msg_put_u8(odp_actions, ODP_ACTION_ATTR_SET_NW_TOS, flow->nw_tos); + base->nw_tos = flow->nw_tos; + } + if (base->vlan_tci != flow->vlan_tci) { if (!(flow->vlan_tci & htons(VLAN_CFI))) { nl_msg_put_flag(odp_actions, ODP_ACTION_ATTR_STRIP_VLAN); @@ -2862,18 +2891,23 @@ add_output_action(struct action_xlate_ctx *ctx, uint16_t ofp_port) } static void -xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port) +xlate_table_action(struct action_xlate_ctx *ctx, + uint16_t in_port, uint8_t table_id) { if (ctx->recurse < MAX_RESUBMIT_RECURSION) { struct rule_dpif *rule; uint16_t old_in_port; + uint8_t old_table_id; + + old_table_id = ctx->table_id; + ctx->table_id = table_id; /* Look up a flow with 'in_port' as the input port. Then restore the * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will * have surprising behavior). */ old_in_port = ctx->flow.in_port; ctx->flow.in_port = in_port; - rule = rule_dpif_lookup(ctx->ofproto, &ctx->flow); + rule = rule_dpif_lookup(ctx->ofproto, &ctx->flow, table_id); ctx->flow.in_port = old_in_port; if (ctx->resubmit_hook) { @@ -2885,14 +2919,31 @@ xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port) do_xlate_actions(rule->up.actions, rule->up.n_actions, ctx); ctx->recurse--; } + + ctx->table_id = old_table_id; } else { static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1); - VLOG_ERR_RL(&recurse_rl, "NXAST_RESUBMIT recursed over %d times", + VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times", MAX_RESUBMIT_RECURSION); } } +static void +xlate_resubmit_table(struct action_xlate_ctx *ctx, + const struct nx_action_resubmit *nar) +{ + uint16_t in_port; + uint8_t table_id; + + in_port = (nar->in_port == htons(OFPP_IN_PORT) + ? ctx->flow.in_port + : ntohs(nar->in_port)); + table_id = nar->table == 255 ? ctx->table_id : nar->table; + + xlate_table_action(ctx, in_port, table_id); +} + static void flood_packets(struct action_xlate_ctx *ctx, ovs_be32 mask) { @@ -2923,7 +2974,7 @@ xlate_output_action__(struct action_xlate_ctx *ctx, add_output_action(ctx, ctx->flow.in_port); break; case OFPP_TABLE: - xlate_table_action(ctx, ctx->flow.in_port); + xlate_table_action(ctx, ctx->flow.in_port, ctx->table_id); break; case OFPP_NORMAL: xlate_normal(ctx); @@ -2936,7 +2987,7 @@ xlate_output_action__(struct action_xlate_ctx *ctx, break; case OFPP_CONTROLLER: commit_odp_actions(ctx); - nl_msg_put_u64(ctx->odp_actions, ODP_ACTION_ATTR_CONTROLLER, max_len); + nl_msg_put_u64(ctx->odp_actions, ODP_ACTION_ATTR_USERSPACE, max_len); break; case OFPP_LOCAL: add_output_action(ctx, OFPP_LOCAL); @@ -3047,6 +3098,28 @@ xlate_autopath(struct action_xlate_ctx *ctx, autopath_execute(naa, &ctx->flow, ofp_port); } +static bool +slave_enabled_cb(uint16_t ofp_port, void *ofproto_) +{ + struct ofproto_dpif *ofproto = ofproto_; + struct ofport_dpif *port; + + switch (ofp_port) { + case OFPP_IN_PORT: + case OFPP_TABLE: + case OFPP_NORMAL: + case OFPP_FLOOD: + case OFPP_ALL: + case OFPP_LOCAL: + return true; + case OFPP_CONTROLLER: /* Not supported by the bundle action. */ + return false; + default: + port = get_ofp_port(ofproto, ofp_port); + return port ? port->may_enable : false; + } +} + static void do_xlate_actions(const union ofp_action *in, size_t n_in, struct action_xlate_ctx *ctx) @@ -3072,6 +3145,7 @@ do_xlate_actions(const union ofp_action *in, size_t n_in, const struct nx_action_set_queue *nasq; const struct nx_action_multipath *nam; const struct nx_action_autopath *naa; + const struct nx_action_bundle *nab; enum ofputil_action_code code; ovs_be64 tun_id; @@ -3115,7 +3189,7 @@ do_xlate_actions(const union ofp_action *in, size_t n_in, break; case OFPUTIL_OFPAT_SET_NW_TOS: - ctx->flow.nw_tos = ia->nw_tos.nw_tos; + ctx->flow.nw_tos = ia->nw_tos.nw_tos & IP_DSCP_MASK; break; case OFPUTIL_OFPAT_SET_TP_SRC: @@ -3132,7 +3206,11 @@ do_xlate_actions(const union ofp_action *in, size_t n_in, case OFPUTIL_NXAST_RESUBMIT: nar = (const struct nx_action_resubmit *) ia; - xlate_table_action(ctx, ntohs(nar->in_port)); + xlate_table_action(ctx, ntohs(nar->in_port), ctx->table_id); + break; + + case OFPUTIL_NXAST_RESUBMIT_TABLE: + xlate_resubmit_table(ctx, (const struct nx_action_resubmit *) ia); break; case OFPUTIL_NXAST_SET_TUNNEL: @@ -3178,6 +3256,21 @@ do_xlate_actions(const union ofp_action *in, size_t n_in, naa = (const struct nx_action_autopath *) ia; xlate_autopath(ctx, naa); break; + + case OFPUTIL_NXAST_BUNDLE: + ctx->ofproto->has_bundle_action = true; + nab = (const struct nx_action_bundle *) ia; + xlate_output_action__(ctx, bundle_execute(nab, &ctx->flow, + slave_enabled_cb, + ctx->ofproto), 0); + break; + + case OFPUTIL_NXAST_BUNDLE_LOAD: + ctx->ofproto->has_bundle_action = true; + nab = (const struct nx_action_bundle *) ia; + bundle_execute_load(nab, &ctx->flow, slave_enabled_cb, + ctx->ofproto); + break; } } } @@ -3207,6 +3300,7 @@ xlate_actions(struct action_xlate_ctx *ctx, ctx->priority = 0; ctx->base_priority = 0; ctx->base_flow = ctx->flow; + ctx->table_id = 0; if (process_special(ctx->ofproto, &ctx->flow, ctx->packet)) { ctx->may_set_up_flow = false; @@ -3319,7 +3413,8 @@ dst_is_duplicate(const struct dst_set *set, const struct dst *test) static bool ofbundle_trunks_vlan(const struct ofbundle *bundle, uint16_t vlan) { - return bundle->vlan < 0 && vlan_bitmap_contains(bundle->trunks, vlan); + return (bundle->vlan < 0 + && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan))); } static bool @@ -3365,7 +3460,7 @@ compose_dsts(struct action_xlate_ctx *ctx, uint16_t vlan, static bool vlan_is_mirrored(const struct ofmirror *m, int vlan) { - return vlan_bitmap_contains(m->vlans, vlan); + return !m->vlans || bitmap_is_set(m->vlans, vlan); } /* Returns true if a packet with Ethernet destination MAC 'dst' may be mirrored @@ -3715,10 +3810,7 @@ is_admissible(struct ofproto_dpif *ofproto, const struct flow *flow, return true; } -/* If the composed actions may be applied to any packet in the given 'flow', - * returns true. Otherwise, the actions should only be applied to 'packet', or - * not at all, if 'packet' was NULL. */ -static bool +static void xlate_normal(struct action_xlate_ctx *ctx) { struct ofbundle *in_bundle; @@ -3749,7 +3841,8 @@ xlate_normal(struct action_xlate_ctx *ctx) * of time where we could learn from a packet reflected on a bond and * blackhole packets before the learning table is updated to reflect * the correct port. */ - return false; + ctx->may_set_up_flow = false; + return; } else { out_bundle = OFBUNDLE_FLOOD; } @@ -3763,8 +3856,6 @@ done: if (in_bundle) { compose_actions(ctx, vlan, in_bundle, out_bundle); } - - return true; } static bool @@ -3863,7 +3954,8 @@ struct ofproto_trace { }; static void -trace_format_rule(struct ds *result, int level, const struct rule *rule) +trace_format_rule(struct ds *result, uint8_t table_id, int level, + const struct rule_dpif *rule) { ds_put_char_multiple(result, '\t', level); if (!rule) { @@ -3871,14 +3963,14 @@ trace_format_rule(struct ds *result, int level, const struct rule *rule) return; } - ds_put_format(result, "Rule: cookie=%#"PRIx64" ", - ntohll(rule->flow_cookie)); - cls_rule_format(&rule->cr, result); + ds_put_format(result, "Rule: table=%"PRIu8" cookie=%#"PRIx64" ", + table_id, ntohll(rule->up.flow_cookie)); + cls_rule_format(&rule->up.cr, result); ds_put_char(result, '\n'); ds_put_char_multiple(result, '\t', level); ds_put_cstr(result, "OpenFlow "); - ofp_print_actions(result, rule->actions, rule->n_actions); + ofp_print_actions(result, rule->up.actions, rule->up.n_actions); ds_put_char(result, '\n'); } @@ -3905,33 +3997,78 @@ trace_resubmit(struct action_xlate_ctx *ctx, struct rule_dpif *rule) ds_put_char(result, '\n'); trace_format_flow(result, ctx->recurse + 1, "Resubmitted flow", trace); - trace_format_rule(result, ctx->recurse + 1, &rule->up); + trace_format_rule(result, ctx->table_id, ctx->recurse + 1, rule); } static void ofproto_unixctl_trace(struct unixctl_conn *conn, const char *args_, void *aux OVS_UNUSED) { - char *dpname, *in_port_s, *tun_id_s, *packet_s; + char *dpname, *arg1, *arg2, *arg3; char *args = xstrdup(args_); char *save_ptr = NULL; struct ofproto_dpif *ofproto; - struct ofpbuf packet; + struct ofpbuf odp_key; + struct ofpbuf *packet; struct rule_dpif *rule; struct ds result; struct flow flow; - uint16_t in_port; - ovs_be64 tun_id; char *s; - ofpbuf_init(&packet, strlen(args) / 2); + packet = NULL; + ofpbuf_init(&odp_key, 0); ds_init(&result); dpname = strtok_r(args, " ", &save_ptr); - tun_id_s = strtok_r(NULL, " ", &save_ptr); - in_port_s = strtok_r(NULL, " ", &save_ptr); - packet_s = strtok_r(NULL, "", &save_ptr); /* Get entire rest of line. */ - if (!dpname || !in_port_s || !packet_s) { + arg1 = strtok_r(NULL, " ", &save_ptr); + arg2 = strtok_r(NULL, " ", &save_ptr); + arg3 = strtok_r(NULL, "", &save_ptr); /* Get entire rest of line. */ + if (dpname && arg1 && !arg2 && !arg3) { + /* ofproto/trace dpname flow */ + int error; + + /* Convert string to ODP key. */ + ofpbuf_init(&odp_key, 0); + error = odp_flow_key_from_string(arg1, &odp_key); + if (error) { + unixctl_command_reply(conn, 501, "Bad flow syntax"); + goto exit; + } + + /* Convert odp_key to flow. */ + error = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow); + if (error) { + unixctl_command_reply(conn, 501, "Invalid flow"); + goto exit; + } + } else if (dpname && arg1 && arg2 && arg3) { + /* ofproto/trace dpname tun_id in_port packet */ + uint16_t in_port; + ovs_be64 tun_id; + + tun_id = htonll(strtoull(arg1, NULL, 0)); + in_port = ofp_port_to_odp_port(atoi(arg2)); + + packet = ofpbuf_new(strlen(args) / 2); + arg3 = ofpbuf_put_hex(packet, arg3, NULL); + arg3 += strspn(arg3, " "); + if (*arg3 != '\0') { + unixctl_command_reply(conn, 501, "Trailing garbage in command"); + goto exit; + } + if (packet->size < ETH_HEADER_LEN) { + unixctl_command_reply(conn, 501, + "Packet data too short for Ethernet"); + goto exit; + } + + ds_put_cstr(&result, "Packet: "); + s = ofp_packet_to_string(packet->data, packet->size, packet->size); + ds_put_cstr(&result, s); + free(s); + + flow_extract(packet, tun_id, in_port, &flow); + } else { unixctl_command_reply(conn, 501, "Bad command syntax"); goto exit; } @@ -3943,39 +4080,19 @@ ofproto_unixctl_trace(struct unixctl_conn *conn, const char *args_, goto exit; } - tun_id = htonll(strtoull(tun_id_s, NULL, 0)); - in_port = ofp_port_to_odp_port(atoi(in_port_s)); - - packet_s = ofpbuf_put_hex(&packet, packet_s, NULL); - packet_s += strspn(packet_s, " "); - if (*packet_s != '\0') { - unixctl_command_reply(conn, 501, "Trailing garbage in command"); - goto exit; - } - if (packet.size < ETH_HEADER_LEN) { - unixctl_command_reply(conn, 501, "Packet data too short for Ethernet"); - goto exit; - } - - ds_put_cstr(&result, "Packet: "); - s = ofp_packet_to_string(packet.data, packet.size, packet.size); - ds_put_cstr(&result, s); - free(s); - - flow_extract(&packet, tun_id, in_port, &flow); ds_put_cstr(&result, "Flow: "); flow_format(&result, &flow); ds_put_char(&result, '\n'); - rule = rule_dpif_lookup(ofproto, &flow); - trace_format_rule(&result, 0, &rule->up); + rule = rule_dpif_lookup(ofproto, &flow, 0); + trace_format_rule(&result, 0, 0, rule); if (rule) { struct ofproto_trace trace; struct ofpbuf *odp_actions; trace.result = &result; trace.flow = flow; - action_xlate_ctx_init(&trace.ctx, ofproto, &flow, &packet); + action_xlate_ctx_init(&trace.ctx, ofproto, &flow, packet); trace.ctx.resubmit_hook = trace_resubmit; odp_actions = xlate_actions(&trace.ctx, rule->up.actions, rule->up.n_actions); @@ -3985,13 +4102,23 @@ ofproto_unixctl_trace(struct unixctl_conn *conn, const char *args_, ds_put_cstr(&result, "Datapath actions: "); format_odp_actions(&result, odp_actions->data, odp_actions->size); ofpbuf_delete(odp_actions); + + if (!trace.ctx.may_set_up_flow) { + if (packet) { + ds_put_cstr(&result, "\nThis flow is not cachable."); + } else { + ds_put_cstr(&result, "\nThe datapath actions are incomplete--" + "for complete actions, please supply a packet."); + } + } } unixctl_command_reply(conn, 200, ds_cstr(&result)); exit: ds_destroy(&result); - ofpbuf_uninit(&packet); + ofpbuf_delete(packet); + ofpbuf_uninit(&odp_key); free(args); }