X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=ofproto%2Fofproto-dpif.c;h=88ec2c0a577cfb009e9494be72ba7312dc45e4ca;hb=6d324ae5dbe13066aa60d71c4e5d2beaaab6af21;hp=9d2f5ba4dd8728af4918a09741d77be8e99ba81e;hpb=1fa96cf431bc0bdfd81aeab50e1344355f2fb73e;p=sliver-openvswitch.git diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c index 9d2f5ba4d..88ec2c0a5 100644 --- a/ofproto/ofproto-dpif.c +++ b/ofproto/ofproto-dpif.c @@ -32,6 +32,7 @@ #include "fail-open.h" #include "hmapx.h" #include "lacp.h" +#include "learn.h" #include "mac-learning.h" #include "multipath.h" #include "netdev.h" @@ -60,10 +61,14 @@ COVERAGE_DEFINE(facet_invalidated); COVERAGE_DEFINE(facet_revalidate); COVERAGE_DEFINE(facet_unexpected); -/* Maximum depth of flow table recursion (due to NXAST_RESUBMIT actions) in a +/* Maximum depth of flow table recursion (due to resubmit actions) in a * flow translation. */ #define MAX_RESUBMIT_RECURSION 16 +/* Number of implemented OpenFlow tables. */ +enum { N_TABLES = 255 }; +BUILD_ASSERT_DECL(N_TABLES >= 1 && N_TABLES <= 255); + struct ofport_dpif; struct ofproto_dpif; @@ -88,6 +93,8 @@ struct rule_dpif { uint64_t packet_count; /* Number of packets received. */ uint64_t byte_count; /* Number of bytes received. */ + tag_type tag; /* Caches rule_calculate_tag() result. */ + struct list facets; /* List of "struct facet"s. */ }; @@ -96,8 +103,8 @@ static struct rule_dpif *rule_dpif_cast(const struct rule *rule) return rule ? CONTAINER_OF(rule, struct rule_dpif, up) : NULL; } -static struct rule_dpif *rule_dpif_lookup(struct ofproto_dpif *ofproto, - const struct flow *flow); +static struct rule_dpif *rule_dpif_lookup(struct ofproto_dpif *, + const struct flow *, uint8_t table); #define MAX_MIRRORS 32 typedef uint32_t mirror_mask_t; @@ -166,6 +173,12 @@ struct action_xlate_ctx { * revalidating without a packet to refer to. */ const struct ofpbuf *packet; + /* Should OFPP_NORMAL MAC learning and NXAST_LEARN actions execute? We + * want to execute them if we are actually processing a packet, or if we + * are accounting for packets that the datapath has processed, but not if + * we are just revalidating. */ + bool may_learn; + /* If nonnull, called just before executing a resubmit action. * * This is normally null so the client has to set it manually after @@ -176,9 +189,11 @@ struct action_xlate_ctx { * to look at them after it returns. */ struct ofpbuf *odp_actions; /* Datapath actions. */ - tag_type tags; /* Tags associated with OFPP_NORMAL actions. */ + tag_type tags; /* Tags associated with actions. */ bool may_set_up_flow; /* True ordinarily; false if the actions must * be reassessed for every packet. */ + bool has_learn; /* Actions include NXAST_LEARN? */ + bool has_normal; /* Actions output to OFPP_NORMAL? */ uint16_t nf_output_iface; /* Output interface index for NetFlow. */ /* xlate_actions() initializes and uses these members, but the client has no @@ -188,6 +203,7 @@ struct action_xlate_ctx { uint32_t priority; /* Current flow priority. 0 if none. */ struct flow base_flow; /* Flow at the last commit. */ uint32_t base_priority; /* Priority at the last commit. */ + uint8_t table_id; /* OpenFlow table ID where flow was found. */ }; static void action_xlate_ctx_init(struct action_xlate_ctx *, @@ -206,12 +222,8 @@ struct facet { * dpif_execute(). * * - Do include packets and bytes that were obtained from the datapath - * when a flow was deleted (e.g. dpif_flow_del()) or when its - * statistics were reset (e.g. dpif_flow_put() with + * when its statistics were reset (e.g. dpif_flow_put() with * DPIF_FP_ZERO_STATS). - * - * - Do not include any packets or bytes that can currently be obtained - * from the datapath by, e.g., dpif_flow_get(). */ uint64_t packet_count; /* Number of packets received. */ uint64_t byte_count; /* Number of bytes received. */ @@ -223,10 +235,7 @@ struct facet { uint64_t rs_byte_count; /* Bytes pushed to resubmit children. */ long long int rs_used; /* Used time pushed to resubmit children. */ - /* Number of bytes passed to account_cb. This may include bytes that can - * currently obtained from the datapath (thus, it can be greater than - * byte_count). */ - uint64_t accounted_bytes; + uint64_t accounted_bytes; /* Bytes processed by facet_account(). */ struct hmap_node hmap_node; /* In owning ofproto's 'facets' hmap. */ struct list list_node; /* In owning rule's 'facets' list. */ @@ -235,6 +244,8 @@ struct facet { bool installed; /* Installed in datapath? */ bool may_install; /* True ordinarily; false if actions must * be reassessed for every packet. */ + bool has_learn; /* Actions include NXAST_LEARN? */ + bool has_normal; /* Actions output to OFPP_NORMAL? */ size_t actions_len; /* Number of bytes in actions[]. */ struct nlattr *actions; /* Datapath actions. */ tag_type tags; /* Tags. */ @@ -268,10 +279,10 @@ static void facet_update_time(struct ofproto_dpif *, struct facet *, long long int used); static void facet_update_stats(struct ofproto_dpif *, struct facet *, const struct dpif_flow_stats *); +static void facet_reset_counters(struct facet *); static void facet_reset_dp_stats(struct facet *, struct dpif_flow_stats *); static void facet_push_stats(struct facet *); -static void facet_account(struct ofproto_dpif *, struct facet *, - uint64_t extra_bytes); +static void facet_account(struct ofproto_dpif *, struct facet *); static bool facet_is_controller_flow(struct facet *); @@ -279,6 +290,11 @@ static void flow_push_stats(const struct rule_dpif *, struct flow *, uint64_t packets, uint64_t bytes, long long int used); +static uint32_t rule_calculate_tag(const struct flow *, + const struct flow_wildcards *, + uint32_t basis); +static void rule_invalidate(const struct rule_dpif *); + struct ofport_dpif { struct ofport up; @@ -307,6 +323,17 @@ struct dpif_completion { struct ofoperation *op; }; +/* Extra information about a classifier table. + * Currently used just for optimized flow revalidation. */ +struct table_dpif { + /* If either of these is nonnull, then this table has a form that allows + * flows to be tagged to avoid revalidating most flows for the most common + * kinds of flow table changes. */ + struct cls_table *catchall_table; /* Table that wildcards all fields. */ + struct cls_table *other_table; /* Table with any other wildcard set. */ + uint32_t basis; /* Keeps each table's tags separate. */ +}; + struct ofproto_dpif { struct ofproto up; struct dpif *dpif; @@ -328,6 +355,9 @@ struct ofproto_dpif { /* Facets. */ struct hmap facets; + + /* Revalidation. */ + struct table_dpif tables[N_TABLES]; bool need_revalidate; struct tag_set revalidate_set; @@ -419,7 +449,7 @@ dealloc(struct ofproto *ofproto_) } static int -construct(struct ofproto *ofproto_) +construct(struct ofproto *ofproto_, int *n_tablesp) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); const char *name = ofproto->up.name; @@ -459,19 +489,24 @@ construct(struct ofproto *ofproto_) timer_set_duration(&ofproto->next_expiration, 1000); hmap_init(&ofproto->facets); + + for (i = 0; i < N_TABLES; i++) { + struct table_dpif *table = &ofproto->tables[i]; + + table->catchall_table = NULL; + table->other_table = NULL; + table->basis = random_uint32(); + } ofproto->need_revalidate = false; tag_set_init(&ofproto->revalidate_set); list_init(&ofproto->completions); - ofproto->up.tables = xmalloc(sizeof *ofproto->up.tables); - classifier_init(&ofproto->up.tables[0]); - ofproto->up.n_tables = 1; - ofproto_dpif_unixctl_init(); ofproto->has_bundle_action = false; + *n_tablesp = N_TABLES; return 0; } @@ -492,14 +527,18 @@ destruct(struct ofproto *ofproto_) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); struct rule_dpif *rule, *next_rule; - struct cls_cursor cursor; + struct classifier *table; int i; complete_operations(ofproto); - cls_cursor_init(&cursor, &ofproto->up.tables[0], NULL); - CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) { - ofproto_rule_destroy(&rule->up); + OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) { + struct cls_cursor cursor; + + cls_cursor_init(&cursor, table, NULL); + CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) { + ofproto_rule_destroy(&rule->up); + } } for (i = 0; i < MAX_MIRRORS; i++) { @@ -564,6 +603,8 @@ run(struct ofproto *ofproto_) bundle_run(bundle); } + mac_learning_run(ofproto->ml, &ofproto->revalidate_set); + /* Now revalidate if there's anything to do. */ if (ofproto->need_revalidate || !tag_set_is_empty(&ofproto->revalidate_set)) { @@ -611,6 +652,7 @@ wait(struct ofproto *ofproto_) HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) { bundle_wait(bundle); } + mac_learning_wait(ofproto->ml); if (ofproto->need_revalidate) { /* Shouldn't happen, but if it does just go around again. */ VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()"); @@ -662,7 +704,7 @@ static void get_tables(struct ofproto *ofproto_, struct ofp_table_stats *ots) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); - struct odp_stats s; + struct ovs_dp_stats s; strcpy(ots->name, "classifier"); @@ -714,6 +756,7 @@ port_construct(struct ofport *port_) port->bundle = NULL; port->cfm = NULL; port->tag = tag_create_random(); + port->may_enable = true; if (ofproto->sflow) { dpif_sflow_add_port(ofproto->sflow, port->odp_port, @@ -814,6 +857,20 @@ get_cfm_fault(const struct ofport *ofport_) return ofport->cfm ? cfm_get_fault(ofport->cfm) : -1; } + +static int +get_cfm_remote_mpids(const struct ofport *ofport_, const uint64_t **rmps, + size_t *n_rmps) +{ + struct ofport_dpif *ofport = ofport_dpif_cast(ofport_); + + if (ofport->cfm) { + cfm_get_remote_mpids(ofport->cfm, rmps, n_rmps); + return 0; + } else { + return -1; + } +} /* Bundles. */ @@ -1112,7 +1169,7 @@ bundle_remove(struct ofport *port_) } static void -send_pdu_cb(void *port_, const struct lacp_pdu *pdu) +send_pdu_cb(void *port_, const void *pdu, size_t pdu_size) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 10); struct ofport_dpif *port = port_; @@ -1121,13 +1178,14 @@ send_pdu_cb(void *port_, const struct lacp_pdu *pdu) error = netdev_get_etheraddr(port->up.netdev, ea); if (!error) { - struct lacp_pdu *packet_pdu; struct ofpbuf packet; + void *packet_pdu; ofpbuf_init(&packet, 0); packet_pdu = eth_compose(&packet, eth_addr_lacp, ea, ETH_TYPE_LACP, - sizeof *packet_pdu); - *packet_pdu = *pdu; + pdu_size); + memcpy(packet_pdu, pdu, pdu_size); + error = netdev_send(port->up.netdev, &packet); if (error) { VLOG_WARN_RL(&rl, "port %s: sending LACP PDU on iface %s failed " @@ -1266,6 +1324,7 @@ mirror_set(struct ofproto *ofproto_, void *aux, mirror = ofproto->mirrors[idx] = xzalloc(sizeof *mirror); mirror->ofproto = ofproto; mirror->idx = idx; + mirror->aux = aux; mirror->out_vlan = -1; mirror->name = NULL; } @@ -1391,6 +1450,14 @@ is_mirror_output_bundle(struct ofproto *ofproto_, void *aux) struct ofbundle *bundle = bundle_lookup(ofproto, aux); return bundle && bundle->mirror_out != 0; } + +static void +forward_bpdu_changed(struct ofproto *ofproto_) +{ + struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); + /* Revalidate cached flows whenever forward_bpdu option changes. */ + ofproto->need_revalidate = true; +} /* Ports. */ @@ -1605,19 +1672,21 @@ static bool process_special(struct ofproto_dpif *ofproto, const struct flow *flow, const struct ofpbuf *packet) { - if (cfm_should_process_flow(flow)) { - struct ofport_dpif *ofport = get_ofp_port(ofproto, flow->in_port); - if (packet && ofport && ofport->cfm) { + struct ofport_dpif *ofport = get_ofp_port(ofproto, flow->in_port); + + if (!ofport) { + return false; + } + + if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow)) { + if (packet) { cfm_process_heartbeat(ofport->cfm, packet); } return true; - } else if (flow->dl_type == htons(ETH_TYPE_LACP)) { - struct ofport_dpif *port = get_ofp_port(ofproto, flow->in_port); - if (packet && port && port->bundle && port->bundle->lacp) { - const struct lacp_pdu *pdu = parse_lacp_packet(packet); - if (pdu) { - lacp_process_pdu(port->bundle->lacp, port, pdu); - } + } else if (ofport->bundle && ofport->bundle->lacp + && flow->dl_type == htons(ETH_TYPE_LACP)) { + if (packet) { + lacp_process_packet(ofport->bundle->lacp, ofport, packet); } return true; } @@ -1646,12 +1715,12 @@ handle_miss_upcall(struct ofproto_dpif *ofproto, struct dpif_upcall *upcall) /* Check with in-band control to see if this packet should be sent * to the local port regardless of the flow table. */ if (connmgr_msg_in_hook(ofproto->up.connmgr, &flow, upcall->packet)) { - send_packet(ofproto, ODPP_LOCAL, upcall->packet); + send_packet(ofproto, OVSP_LOCAL, upcall->packet); } facet = facet_lookup_valid(ofproto, &flow); if (!facet) { - struct rule_dpif *rule = rule_dpif_lookup(ofproto, &flow); + struct rule_dpif *rule = rule_dpif_lookup(ofproto, &flow, 0); if (!rule) { /* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */ struct ofport_dpif *port = get_ofp_port(ofproto, flow.in_port); @@ -1745,7 +1814,7 @@ static int expire(struct ofproto_dpif *ofproto) { struct rule_dpif *rule, *next_rule; - struct cls_cursor cursor; + struct classifier *table; int dp_max_idle; /* Update stats for each flow in the datapath. */ @@ -1756,9 +1825,13 @@ expire(struct ofproto_dpif *ofproto) expire_facets(ofproto, dp_max_idle); /* Expire OpenFlow flows whose idle_timeout or hard_timeout has passed. */ - cls_cursor_init(&cursor, &ofproto->up.tables[0], NULL); - CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) { - rule_expire(rule); + OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) { + struct cls_cursor cursor; + + cls_cursor_init(&cursor, table, NULL); + CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) { + rule_expire(rule); + } } /* All outstanding data in existing flows has been accounted, so it's a @@ -1805,7 +1878,7 @@ update_stats(struct ofproto_dpif *p) ds_init(&s); odp_flow_key_format(key, key_len, &s); - VLOG_WARN_RL(&rl, "failed to convert ODP flow key to flow: %s", + VLOG_WARN_RL(&rl, "failed to convert datapath flow key to flow: %s", ds_cstr(&s)); ds_destroy(&s); @@ -1832,7 +1905,7 @@ update_stats(struct ofproto_dpif *p) facet->dp_byte_count = stats->n_bytes; facet_update_time(p, facet, stats->used); - facet_account(p, facet, stats->n_bytes); + facet_account(p, facet); facet_push_stats(facet); } else { /* There's a flow in the datapath that we know nothing about. @@ -1869,11 +1942,12 @@ facet_max_idle(const struct ofproto_dpif *ofproto) * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each facet * that is installed in the kernel gets dropped in the appropriate bucket. * After the histogram has been built, we compute the cutoff so that only - * the most-recently-used 1% of facets (but at least 1000 flows) are kept - * cached. At least the most-recently-used bucket of facets is kept, so - * actually an arbitrary number of facets can be kept in any given - * expiration run (though the next run will delete most of those unless - * they receive additional data). + * the most-recently-used 1% of facets (but at least + * ofproto->up.flow_eviction_threshold flows) are kept cached. At least + * the most-recently-used bucket of facets is kept, so actually an + * arbitrary number of facets can be kept in any given expiration run + * (though the next run will delete most of those unless they receive + * additional data). * * This requires a second pass through the facets, in addition to the pass * made by update_stats(), because the former function never looks @@ -1888,7 +1962,7 @@ facet_max_idle(const struct ofproto_dpif *ofproto) int i; total = hmap_count(&ofproto->facets); - if (total <= 1000) { + if (total <= ofproto->up.flow_eviction_threshold) { return N_BUCKETS * BUCKET_WIDTH; } @@ -1906,7 +1980,8 @@ facet_max_idle(const struct ofproto_dpif *ofproto) subtotal = bucket = 0; do { subtotal += buckets[bucket++]; - } while (bucket < N_BUCKETS && subtotal < MAX(1000, total / 100)); + } while (bucket < N_BUCKETS && + subtotal < MAX(ofproto->up.flow_eviction_threshold, total / 100)); if (VLOG_IS_DBG_ENABLED()) { struct ds s; @@ -1978,7 +2053,7 @@ rule_expire(struct rule_dpif *rule) /* Has 'rule' expired? */ now = time_msec(); if (rule->up.hard_timeout - && now > rule->up.created + rule->up.hard_timeout * 1000) { + && now > rule->up.modified + rule->up.hard_timeout * 1000) { reason = OFPRR_HARD_TIMEOUT; } else if (rule->up.idle_timeout && list_is_empty(&rule->facets) && now > rule->used + rule->up.idle_timeout * 1000) { @@ -2045,7 +2120,7 @@ execute_odp_actions(struct ofproto_dpif *ofproto, const struct flow *flow, struct ofpbuf *packet) { if (actions_len == NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t)) - && odp_actions->nla_type == ODP_ACTION_ATTR_CONTROLLER) { + && odp_actions->nla_type == OVS_ACTION_ATTR_USERSPACE) { /* As an optimization, avoid a round-trip from userspace to kernel to * userspace. This also avoids possibly filling up kernel packet * buffers along the way. */ @@ -2087,8 +2162,8 @@ execute_odp_actions(struct ofproto_dpif *ofproto, const struct flow *flow, * applying flow_extract() to 'packet' would yield the same flow as * 'facet->flow'. * - * 'facet' must have accurately composed ODP actions; that is, it must not be - * in need of revalidation. + * 'facet' must have accurately composed datapath actions; that is, it must + * not be in need of revalidation. * * Takes ownership of 'packet'. */ static void @@ -2124,7 +2199,7 @@ facet_remove(struct ofproto_dpif *ofproto, struct facet *facet) facet_free(facet); } -/* Composes the ODP actions for 'facet' based on its rule's actions. */ +/* Composes the datapath actions for 'facet' based on its rule's actions. */ static void facet_make_actions(struct ofproto_dpif *p, struct facet *facet, const struct ofpbuf *packet) @@ -2137,6 +2212,8 @@ facet_make_actions(struct ofproto_dpif *p, struct facet *facet, odp_actions = xlate_actions(&ctx, rule->up.actions, rule->up.n_actions); facet->tags = ctx.tags; facet->may_install = ctx.may_set_up_flow; + facet->has_learn = ctx.has_learn; + facet->has_normal = ctx.has_normal; facet->nf_flow.output_iface = ctx.nf_output_iface; if (facet->actions_len != odp_actions->size @@ -2205,40 +2282,32 @@ vlan_tci_to_openflow_vlan(ovs_be16 vlan_tci) } static void -facet_account(struct ofproto_dpif *ofproto, - struct facet *facet, uint64_t extra_bytes) +facet_account(struct ofproto_dpif *ofproto, struct facet *facet) { - uint64_t total_bytes, n_bytes; - struct ofbundle *in_bundle; + uint64_t n_bytes; const struct nlattr *a; - tag_type dummy = 0; unsigned int left; ovs_be16 vlan_tci; - int vlan; - total_bytes = facet->byte_count + extra_bytes; - if (total_bytes <= facet->accounted_bytes) { + if (facet->byte_count <= facet->accounted_bytes) { return; } - n_bytes = total_bytes - facet->accounted_bytes; - facet->accounted_bytes = total_bytes; + n_bytes = facet->byte_count - facet->accounted_bytes; + facet->accounted_bytes = facet->byte_count; - /* Test that 'tags' is nonzero to ensure that only flows that include an - * OFPP_NORMAL action are used for learning and bond slave rebalancing. - * This works because OFPP_NORMAL always sets a nonzero tag value. - * - * Feed information from the active flows back into the learning table to + /* Feed information from the active flows back into the learning table to * ensure that table is always in sync with what is actually flowing * through the datapath. */ - if (!facet->tags - || !is_admissible(ofproto, &facet->flow, false, &dummy, - &vlan, &in_bundle)) { - return; - } + if (facet->has_learn || facet->has_normal) { + struct action_xlate_ctx ctx; - update_learning_table(ofproto, &facet->flow, vlan, in_bundle); + action_xlate_ctx_init(&ctx, ofproto, &facet->flow, NULL); + ctx.may_learn = true; + ofpbuf_delete(xlate_actions(&ctx, facet->rule->up.actions, + facet->rule->up.n_actions)); + } - if (!ofproto->has_bonded_bundles) { + if (!facet->has_normal || !ofproto->has_bonded_bundles) { return; } @@ -2252,7 +2321,7 @@ facet_account(struct ofproto_dpif *ofproto, struct ofport_dpif *port; switch (nl_attr_type(a)) { - case ODP_ACTION_ATTR_OUTPUT: + case OVS_ACTION_ATTR_OUTPUT: port = get_odp_port(ofproto, nl_attr_get_u32(a)); if (port && port->bundle && port->bundle->bond) { bond_account(port->bundle->bond, &facet->flow, @@ -2260,11 +2329,11 @@ facet_account(struct ofproto_dpif *ofproto, } break; - case ODP_ACTION_ATTR_STRIP_VLAN: + case OVS_ACTION_ATTR_POP_VLAN: vlan_tci = htons(0); break; - case ODP_ACTION_ATTR_SET_DL_TCI: + case OVS_ACTION_ATTR_PUSH_VLAN: vlan_tci = nl_attr_get_be16(a); break; } @@ -2337,7 +2406,7 @@ facet_flush_stats(struct ofproto_dpif *ofproto, struct facet *facet) assert(!facet->dp_packet_count); facet_push_stats(facet); - facet_account(ofproto, facet, 0); + facet_account(ofproto, facet); if (ofproto->netflow && !facet_is_controller_flow(facet)) { struct ofexpired expired; @@ -2353,11 +2422,7 @@ facet_flush_stats(struct ofproto_dpif *ofproto, struct facet *facet) /* Reset counters to prevent double counting if 'facet' ever gets * reinstalled. */ - facet->packet_count = 0; - facet->byte_count = 0; - facet->rs_packet_count = 0; - facet->rs_byte_count = 0; - facet->accounted_bytes = 0; + facet_reset_counters(facet); netflow_flow_clear(&facet->nf_flow); } @@ -2425,14 +2490,14 @@ facet_revalidate(struct ofproto_dpif *ofproto, struct facet *facet) COVERAGE_INC(facet_revalidate); /* Determine the new rule. */ - new_rule = rule_dpif_lookup(ofproto, &facet->flow); + new_rule = rule_dpif_lookup(ofproto, &facet->flow, 0); if (!new_rule) { /* No new rule, so delete the facet. */ facet_remove(ofproto, facet); return false; } - /* Calculate new ODP actions. + /* Calculate new datapath actions. * * We do not modify any 'facet' state yet, because we might need to, e.g., * emit a NetFlow expiration and, if so, we need to have the old state @@ -2444,8 +2509,8 @@ facet_revalidate(struct ofproto_dpif *ofproto, struct facet *facet) || memcmp(facet->actions, odp_actions->data, facet->actions_len)); - /* If the ODP actions changed or the installability changed, then we need - * to talk to the datapath. */ + /* If the datapath actions changed or the installability changed, + * then we need to talk to the datapath. */ if (actions_changed || ctx.may_set_up_flow != facet->installed) { if (ctx.may_set_up_flow) { struct dpif_flow_stats stats; @@ -2466,6 +2531,8 @@ facet_revalidate(struct ofproto_dpif *ofproto, struct facet *facet) facet->tags = ctx.tags; facet->nf_flow.output_iface = ctx.nf_output_iface; facet->may_install = ctx.may_set_up_flow; + facet->has_learn = ctx.has_learn; + facet->has_normal = ctx.has_normal; if (actions_changed) { free(facet->actions); facet->actions_len = odp_actions->size; @@ -2519,6 +2586,16 @@ facet_update_stats(struct ofproto_dpif *ofproto, struct facet *facet, } } +static void +facet_reset_counters(struct facet *facet) +{ + facet->packet_count = 0; + facet->byte_count = 0; + facet->rs_packet_count = 0; + facet->rs_byte_count = 0; + facet->accounted_bytes = 0; +} + static void facet_push_stats(struct facet *facet) { @@ -2583,10 +2660,15 @@ flow_push_stats(const struct rule_dpif *rule, /* Rules. */ static struct rule_dpif * -rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow) +rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow, + uint8_t table_id) { + if (table_id >= N_TABLES) { + return NULL; + } + return rule_dpif_cast(rule_from_cls_rule( - classifier_lookup(&ofproto->up.tables[0], + classifier_lookup(&ofproto->up.tables[table_id], flow))); } @@ -2595,7 +2677,7 @@ complete_operation(struct rule_dpif *rule) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto); - ofproto->need_revalidate = true; + rule_invalidate(rule); if (clogged) { struct dpif_completion *c = xmalloc(sizeof *c); c->op = rule->up.pending; @@ -2625,6 +2707,7 @@ rule_construct(struct rule *rule_) struct rule_dpif *rule = rule_dpif_cast(rule_); struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto); struct rule_dpif *victim; + uint8_t table_id; int error; error = validate_actions(rule->up.actions, rule->up.n_actions, @@ -2644,6 +2727,13 @@ rule_construct(struct rule *rule_) rule->facets = victim->facets; list_moved(&rule->facets); LIST_FOR_EACH (facet, list_node, &rule->facets) { + /* XXX: We're only clearing our local counters here. It's possible + * that quite a few packets are unaccounted for in the datapath + * statistics. These will be accounted to the new rule instead of + * cleared as required. This could be fixed by clearing out the + * datapath statistics for this facet, but currently it doesn't + * seem worth it. */ + facet_reset_counters(facet); facet->rule = rule; } } else { @@ -2651,6 +2741,12 @@ rule_construct(struct rule *rule_) list_init(&rule->facets); } + table_id = rule->up.table_id; + rule->tag = (victim ? victim->tag + : table_id == 0 ? 0 + : rule_calculate_tag(&rule->up.cr.flow, &rule->up.cr.wc, + ofproto->tables[table_id].basis)); + complete_operation(rule); return 0; } @@ -2708,7 +2804,7 @@ rule_execute(struct rule *rule_, struct flow *flow, struct ofpbuf *packet) /* Otherwise, if 'rule' is in fact the correct rule for 'packet', then * create a new facet for it and use that. */ - if (rule_dpif_lookup(ofproto, flow) == rule) { + if (rule_dpif_lookup(ofproto, flow, 0) == rule) { facet = facet_create(rule, flow, packet); facet_execute(ofproto, facet, packet); facet_install(ofproto, facet, true); @@ -2765,7 +2861,7 @@ send_packet(struct ofproto_dpif *ofproto, uint32_t odp_port, odp_flow_key_from_flow(&key, &flow); ofpbuf_init(&odp_actions, 32); - nl_msg_put_u32(&odp_actions, ODP_ACTION_ATTR_OUTPUT, odp_port); + nl_msg_put_u32(&odp_actions, OVS_ACTION_ATTR_OUTPUT, odp_port); error = dpif_execute(ofproto->dpif, key.data, key.size, odp_actions.data, odp_actions.size, @@ -2779,11 +2875,11 @@ send_packet(struct ofproto_dpif *ofproto, uint32_t odp_port, return error; } -/* OpenFlow to ODP action translation. */ +/* OpenFlow to datapath action translation. */ static void do_xlate_actions(const union ofp_action *in, size_t n_in, struct action_xlate_ctx *ctx); -static bool xlate_normal(struct action_xlate_ctx *); +static void xlate_normal(struct action_xlate_ctx *); static void commit_odp_actions(struct action_xlate_ctx *ctx) @@ -2793,58 +2889,66 @@ commit_odp_actions(struct action_xlate_ctx *ctx) struct ofpbuf *odp_actions = ctx->odp_actions; if (base->tun_id != flow->tun_id) { - nl_msg_put_be64(odp_actions, ODP_ACTION_ATTR_SET_TUNNEL, flow->tun_id); + nl_msg_put_be64(odp_actions, OVS_ACTION_ATTR_SET_TUNNEL, flow->tun_id); base->tun_id = flow->tun_id; } if (base->nw_src != flow->nw_src) { - nl_msg_put_be32(odp_actions, ODP_ACTION_ATTR_SET_NW_SRC, flow->nw_src); + nl_msg_put_be32(odp_actions, OVS_ACTION_ATTR_SET_NW_SRC, flow->nw_src); base->nw_src = flow->nw_src; } if (base->nw_dst != flow->nw_dst) { - nl_msg_put_be32(odp_actions, ODP_ACTION_ATTR_SET_NW_DST, flow->nw_dst); + nl_msg_put_be32(odp_actions, OVS_ACTION_ATTR_SET_NW_DST, flow->nw_dst); base->nw_dst = flow->nw_dst; } + if (base->nw_tos != flow->nw_tos) { + nl_msg_put_u8(odp_actions, OVS_ACTION_ATTR_SET_NW_TOS, flow->nw_tos); + base->nw_tos = flow->nw_tos; + } + if (base->vlan_tci != flow->vlan_tci) { if (!(flow->vlan_tci & htons(VLAN_CFI))) { - nl_msg_put_flag(odp_actions, ODP_ACTION_ATTR_STRIP_VLAN); + nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN); } else { - nl_msg_put_be16(odp_actions, ODP_ACTION_ATTR_SET_DL_TCI, + if (base->vlan_tci != htons(0)) { + nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN); + } + nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_PUSH_VLAN, flow->vlan_tci & ~htons(VLAN_CFI)); } base->vlan_tci = flow->vlan_tci; } if (base->tp_src != flow->tp_src) { - nl_msg_put_be16(odp_actions, ODP_ACTION_ATTR_SET_TP_SRC, flow->tp_src); + nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_SET_TP_SRC, flow->tp_src); base->tp_src = flow->tp_src; } if (base->tp_dst != flow->tp_dst) { - nl_msg_put_be16(odp_actions, ODP_ACTION_ATTR_SET_TP_DST, flow->tp_dst); + nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_SET_TP_DST, flow->tp_dst); base->tp_dst = flow->tp_dst; } if (!eth_addr_equals(base->dl_src, flow->dl_src)) { - nl_msg_put_unspec(odp_actions, ODP_ACTION_ATTR_SET_DL_SRC, + nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_SET_DL_SRC, flow->dl_src, ETH_ADDR_LEN); memcpy(base->dl_src, flow->dl_src, ETH_ADDR_LEN); } if (!eth_addr_equals(base->dl_dst, flow->dl_dst)) { - nl_msg_put_unspec(odp_actions, ODP_ACTION_ATTR_SET_DL_DST, + nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_SET_DL_DST, flow->dl_dst, ETH_ADDR_LEN); memcpy(base->dl_dst, flow->dl_dst, ETH_ADDR_LEN); } if (ctx->base_priority != ctx->priority) { if (ctx->priority) { - nl_msg_put_u32(odp_actions, ODP_ACTION_ATTR_SET_PRIORITY, + nl_msg_put_u32(odp_actions, OVS_ACTION_ATTR_SET_PRIORITY, ctx->priority); } else { - nl_msg_put_flag(odp_actions, ODP_ACTION_ATTR_POP_PRIORITY); + nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_PRIORITY); } ctx->base_priority = ctx->priority; } @@ -2870,23 +2974,42 @@ add_output_action(struct action_xlate_ctx *ctx, uint16_t ofp_port) } commit_odp_actions(ctx); - nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_OUTPUT, odp_port); + nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT, odp_port); ctx->nf_output_iface = ofp_port; } static void -xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port) +xlate_table_action(struct action_xlate_ctx *ctx, + uint16_t in_port, uint8_t table_id) { if (ctx->recurse < MAX_RESUBMIT_RECURSION) { + struct ofproto_dpif *ofproto = ctx->ofproto; struct rule_dpif *rule; uint16_t old_in_port; + uint8_t old_table_id; + + old_table_id = ctx->table_id; + ctx->table_id = table_id; - /* Look up a flow with 'in_port' as the input port. Then restore the - * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will - * have surprising behavior). */ + /* Look up a flow with 'in_port' as the input port. */ old_in_port = ctx->flow.in_port; ctx->flow.in_port = in_port; - rule = rule_dpif_lookup(ctx->ofproto, &ctx->flow); + rule = rule_dpif_lookup(ofproto, &ctx->flow, table_id); + + /* Tag the flow. */ + if (table_id > 0 && table_id < N_TABLES) { + struct table_dpif *table = &ofproto->tables[table_id]; + if (table->other_table) { + ctx->tags |= (rule + ? rule->tag + : rule_calculate_tag(&ctx->flow, + &table->other_table->wc, + table->basis)); + } + } + + /* Restore the original input port. Otherwise OFPP_NORMAL and + * OFPP_IN_PORT will have surprising behavior. */ ctx->flow.in_port = old_in_port; if (ctx->resubmit_hook) { @@ -2898,14 +3021,31 @@ xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port) do_xlate_actions(rule->up.actions, rule->up.n_actions, ctx); ctx->recurse--; } + + ctx->table_id = old_table_id; } else { static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1); - VLOG_ERR_RL(&recurse_rl, "NXAST_RESUBMIT recursed over %d times", + VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times", MAX_RESUBMIT_RECURSION); } } +static void +xlate_resubmit_table(struct action_xlate_ctx *ctx, + const struct nx_action_resubmit *nar) +{ + uint16_t in_port; + uint8_t table_id; + + in_port = (nar->in_port == htons(OFPP_IN_PORT) + ? ctx->flow.in_port + : ntohs(nar->in_port)); + table_id = nar->table == 255 ? ctx->table_id : nar->table; + + xlate_table_action(ctx, in_port, table_id); +} + static void flood_packets(struct action_xlate_ctx *ctx, ovs_be32 mask) { @@ -2915,7 +3055,7 @@ flood_packets(struct action_xlate_ctx *ctx, ovs_be32 mask) HMAP_FOR_EACH (ofport, up.hmap_node, &ctx->ofproto->up.ports) { uint16_t ofp_port = ofport->up.ofp_port; if (ofp_port != ctx->flow.in_port && !(ofport->up.opp.config & mask)) { - nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_OUTPUT, + nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT, ofport->odp_port); } } @@ -2936,7 +3076,7 @@ xlate_output_action__(struct action_xlate_ctx *ctx, add_output_action(ctx, ctx->flow.in_port); break; case OFPP_TABLE: - xlate_table_action(ctx, ctx->flow.in_port); + xlate_table_action(ctx, ctx->flow.in_port, ctx->table_id); break; case OFPP_NORMAL: xlate_normal(ctx); @@ -2949,7 +3089,7 @@ xlate_output_action__(struct action_xlate_ctx *ctx, break; case OFPP_CONTROLLER: commit_odp_actions(ctx); - nl_msg_put_u64(ctx->odp_actions, ODP_ACTION_ATTR_CONTROLLER, max_len); + nl_msg_put_u64(ctx->odp_actions, OVS_ACTION_ATTR_USERSPACE, max_len); break; case OFPP_LOCAL: add_output_action(ctx, OFPP_LOCAL); @@ -2973,6 +3113,19 @@ xlate_output_action__(struct action_xlate_ctx *ctx, } } +static void +xlate_output_reg_action(struct action_xlate_ctx *ctx, + const struct nx_action_output_reg *naor) +{ + uint64_t ofp_port; + + ofp_port = nxm_read_field_bits(naor->src, naor->ofs_nbits, &ctx->flow); + + if (ofp_port <= UINT16_MAX) { + xlate_output_action__(ctx, ofp_port, ntohs(naor->max_len)); + } +} + static void xlate_output_action(struct action_xlate_ctx *ctx, const struct ofp_action_output *oao) @@ -2996,14 +3149,14 @@ xlate_enqueue_action(struct action_xlate_ctx *ctx, return; } - /* Figure out ODP output port. */ + /* Figure out datapath output port. */ ofp_port = ntohs(oae->port); if (ofp_port == OFPP_IN_PORT) { ofp_port = ctx->flow.in_port; } odp_port = ofp_port_to_odp_port(ofp_port); - /* Add ODP actions. */ + /* Add datapath actions. */ ctx_priority = ctx->priority; ctx->priority = priority; add_output_action(ctx, odp_port); @@ -3082,6 +3235,26 @@ slave_enabled_cb(uint16_t ofp_port, void *ofproto_) } } +static void +xlate_learn_action(struct action_xlate_ctx *ctx, + const struct nx_action_learn *learn) +{ + static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1); + struct ofputil_flow_mod fm; + int error; + + learn_execute(learn, &ctx->flow, &fm); + + error = ofproto_flow_mod(&ctx->ofproto->up, &fm); + if (error && !VLOG_DROP_WARN(&rl)) { + char *msg = ofputil_error_to_string(error); + VLOG_WARN("learning action failed to modify flow table (%s)", msg); + free(msg); + } + + free(fm.actions); +} + static void do_xlate_actions(const union ofp_action *in, size_t n_in, struct action_xlate_ctx *ctx) @@ -3108,6 +3281,7 @@ do_xlate_actions(const union ofp_action *in, size_t n_in, const struct nx_action_multipath *nam; const struct nx_action_autopath *naa; const struct nx_action_bundle *nab; + const struct nx_action_output_reg *naor; enum ofputil_action_code code; ovs_be64 tun_id; @@ -3168,7 +3342,11 @@ do_xlate_actions(const union ofp_action *in, size_t n_in, case OFPUTIL_NXAST_RESUBMIT: nar = (const struct nx_action_resubmit *) ia; - xlate_table_action(ctx, ntohs(nar->in_port)); + xlate_table_action(ctx, ntohs(nar->in_port), ctx->table_id); + break; + + case OFPUTIL_NXAST_RESUBMIT_TABLE: + xlate_resubmit_table(ctx, (const struct nx_action_resubmit *) ia); break; case OFPUTIL_NXAST_SET_TUNNEL: @@ -3222,6 +3400,25 @@ do_xlate_actions(const union ofp_action *in, size_t n_in, slave_enabled_cb, ctx->ofproto), 0); break; + + case OFPUTIL_NXAST_BUNDLE_LOAD: + ctx->ofproto->has_bundle_action = true; + nab = (const struct nx_action_bundle *) ia; + bundle_execute_load(nab, &ctx->flow, slave_enabled_cb, + ctx->ofproto); + break; + + case OFPUTIL_NXAST_OUTPUT_REG: + naor = (const struct nx_action_output_reg *) ia; + xlate_output_reg_action(ctx, naor); + break; + + case OFPUTIL_NXAST_LEARN: + ctx->has_learn = true; + if (ctx->may_learn) { + xlate_learn_action(ctx, (const struct nx_action_learn *) ia); + } + break; } } } @@ -3234,6 +3431,7 @@ action_xlate_ctx_init(struct action_xlate_ctx *ctx, ctx->ofproto = ofproto; ctx->flow = *flow; ctx->packet = packet; + ctx->may_learn = packet != NULL; ctx->resubmit_hook = NULL; } @@ -3246,11 +3444,15 @@ xlate_actions(struct action_xlate_ctx *ctx, ctx->odp_actions = ofpbuf_new(512); ctx->tags = 0; ctx->may_set_up_flow = true; + ctx->has_learn = false; + ctx->has_normal = false; ctx->nf_output_iface = NF_OUT_DROP; ctx->recurse = 0; ctx->priority = 0; ctx->base_priority = 0; ctx->base_flow = ctx->flow; + ctx->base_flow.tun_id = 0; + ctx->table_id = 0; if (process_special(ctx->ofproto, &ctx->flow, ctx->packet)) { ctx->may_set_up_flow = false; @@ -3363,7 +3565,8 @@ dst_is_duplicate(const struct dst_set *set, const struct dst *test) static bool ofbundle_trunks_vlan(const struct ofbundle *bundle, uint16_t vlan) { - return bundle->vlan < 0 && vlan_bitmap_contains(bundle->trunks, vlan); + return (bundle->vlan < 0 + && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan))); } static bool @@ -3409,7 +3612,7 @@ compose_dsts(struct action_xlate_ctx *ctx, uint16_t vlan, static bool vlan_is_mirrored(const struct ofmirror *m, int vlan) { - return vlan_bitmap_contains(m->vlans, vlan); + return !m->vlans || bitmap_is_set(m->vlans, vlan); } /* Returns true if a packet with Ethernet destination MAC 'dst' may be mirrored @@ -3543,7 +3746,7 @@ compose_actions(struct action_xlate_ctx *ctx, uint16_t vlan, continue; } nl_msg_put_u32(ctx->odp_actions, - ODP_ACTION_ATTR_OUTPUT, dst->port->odp_port); + OVS_ACTION_ATTR_OUTPUT, dst->port->odp_port); } /* Then output the rest. */ @@ -3554,18 +3757,22 @@ compose_actions(struct action_xlate_ctx *ctx, uint16_t vlan, } if (dst->vlan != cur_vlan) { if (dst->vlan == OFP_VLAN_NONE) { - nl_msg_put_flag(ctx->odp_actions, ODP_ACTION_ATTR_STRIP_VLAN); + nl_msg_put_flag(ctx->odp_actions, OVS_ACTION_ATTR_POP_VLAN); } else { ovs_be16 tci; + + if (cur_vlan != OFP_VLAN_NONE) { + nl_msg_put_flag(ctx->odp_actions, OVS_ACTION_ATTR_POP_VLAN); + } tci = htons(dst->vlan & VLAN_VID_MASK); tci |= ctx->flow.vlan_tci & htons(VLAN_PCP_MASK); nl_msg_put_be16(ctx->odp_actions, - ODP_ACTION_ATTR_SET_DL_TCI, tci); + OVS_ACTION_ATTR_PUSH_VLAN, tci); } cur_vlan = dst->vlan; } nl_msg_put_u32(ctx->odp_actions, - ODP_ACTION_ATTR_OUTPUT, dst->port->odp_port); + OVS_ACTION_ATTR_OUTPUT, dst->port->odp_port); } dst_set_free(&set); @@ -3711,6 +3918,7 @@ is_admissible(struct ofproto_dpif *ofproto, const struct flow *flow, "port %"PRIu16, ofproto->up.name, flow->in_port); } + *vlanp = -1; return false; } *vlanp = vlan = flow_get_vlan(ofproto, flow, in_bundle, have_packet); @@ -3718,8 +3926,10 @@ is_admissible(struct ofproto_dpif *ofproto, const struct flow *flow, return false; } - /* Drop frames for reserved multicast addresses. */ - if (eth_addr_is_reserved(flow->dl_dst)) { + /* Drop frames for reserved multicast addresses + * only if forward_bpdu option is absent. */ + if (eth_addr_is_reserved(flow->dl_dst) && + !ofproto->up.forward_bpdu) { return false; } @@ -3759,10 +3969,7 @@ is_admissible(struct ofproto_dpif *ofproto, const struct flow *flow, return true; } -/* If the composed actions may be applied to any packet in the given 'flow', - * returns true. Otherwise, the actions should only be applied to 'packet', or - * not at all, if 'packet' was NULL. */ -static bool +static void xlate_normal(struct action_xlate_ctx *ctx) { struct ofbundle *in_bundle; @@ -3770,6 +3977,8 @@ xlate_normal(struct action_xlate_ctx *ctx) struct mac_entry *mac; int vlan; + ctx->has_normal = true; + /* Check whether we should drop packets in this flow. */ if (!is_admissible(ctx->ofproto, &ctx->flow, ctx->packet != NULL, &ctx->tags, &vlan, &in_bundle)) { @@ -3777,8 +3986,8 @@ xlate_normal(struct action_xlate_ctx *ctx) goto done; } - /* Learn source MAC (but don't try to learn from revalidation). */ - if (ctx->packet) { + /* Learn source MAC. */ + if (ctx->may_learn) { update_learning_table(ctx->ofproto, &ctx->flow, vlan, in_bundle); } @@ -3793,7 +4002,8 @@ xlate_normal(struct action_xlate_ctx *ctx) * of time where we could learn from a packet reflected on a bond and * blackhole packets before the learning table is updated to reflect * the correct port. */ - return false; + ctx->may_set_up_flow = false; + return; } else { out_bundle = OFBUNDLE_FLOOD; } @@ -3807,8 +4017,120 @@ done: if (in_bundle) { compose_actions(ctx, vlan, in_bundle, out_bundle); } +} + +/* Optimized flow revalidation. + * + * It's a difficult problem, in general, to tell which facets need to have + * their actions recalculated whenever the OpenFlow flow table changes. We + * don't try to solve that general problem: for most kinds of OpenFlow flow + * table changes, we recalculate the actions for every facet. This is + * relatively expensive, but it's good enough if the OpenFlow flow table + * doesn't change very often. + * + * However, we can expect one particular kind of OpenFlow flow table change to + * happen frequently: changes caused by MAC learning. To avoid wasting a lot + * of CPU on revalidating every facet whenever MAC learning modifies the flow + * table, we add a special case that applies to flow tables in which every rule + * has the same form (that is, the same wildcards), except that the table is + * also allowed to have a single "catch-all" flow that matches all packets. We + * optimize this case by tagging all of the facets that resubmit into the table + * and invalidating the same tag whenever a flow changes in that table. The + * end result is that we revalidate just the facets that need it (and sometimes + * a few more, but not all of the facets or even all of the facets that + * resubmit to the table modified by MAC learning). */ + +/* Calculates the tag to use for 'flow' and wildcards 'wc' when it is inserted + * into an OpenFlow table with the given 'basis'. */ +static uint32_t +rule_calculate_tag(const struct flow *flow, const struct flow_wildcards *wc, + uint32_t secret) +{ + if (flow_wildcards_is_catchall(wc)) { + return 0; + } else { + struct flow tag_flow = *flow; + flow_zero_wildcards(&tag_flow, wc); + return tag_create_deterministic(flow_hash(&tag_flow, secret)); + } +} - return true; +/* Following a change to OpenFlow table 'table_id' in 'ofproto', update the + * taggability of that table. + * + * This function must be called after *each* change to a flow table. If you + * skip calling it on some changes then the pointer comparisons at the end can + * be invalid if you get unlucky. For example, if a flow removal causes a + * cls_table to be destroyed and then a flow insertion causes a cls_table with + * different wildcards to be created with the same address, then this function + * will incorrectly skip revalidation. */ +static void +table_update_taggable(struct ofproto_dpif *ofproto, uint8_t table_id) +{ + struct table_dpif *table = &ofproto->tables[table_id]; + const struct classifier *cls = &ofproto->up.tables[table_id]; + struct cls_table *catchall, *other; + struct cls_table *t; + + catchall = other = NULL; + + switch (hmap_count(&cls->tables)) { + case 0: + /* We could tag this OpenFlow table but it would make the logic a + * little harder and it's a corner case that doesn't seem worth it + * yet. */ + break; + + case 1: + case 2: + HMAP_FOR_EACH (t, hmap_node, &cls->tables) { + if (cls_table_is_catchall(t)) { + catchall = t; + } else if (!other) { + other = t; + } else { + /* Indicate that we can't tag this by setting both tables to + * NULL. (We know that 'catchall' is already NULL.) */ + other = NULL; + } + } + break; + + default: + /* Can't tag this table. */ + break; + } + + if (table->catchall_table != catchall || table->other_table != other) { + table->catchall_table = catchall; + table->other_table = other; + ofproto->need_revalidate = true; + } +} + +/* Given 'rule' that has changed in some way (either it is a rule being + * inserted, a rule being deleted, or a rule whose actions are being + * modified), marks facets for revalidation to ensure that packets will be + * forwarded correctly according to the new state of the flow table. + * + * This function must be called after *each* change to a flow table. See + * the comment on table_update_taggable() for more information. */ +static void +rule_invalidate(const struct rule_dpif *rule) +{ + struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto); + + table_update_taggable(ofproto, rule->up.table_id); + + if (!ofproto->need_revalidate) { + struct table_dpif *table = &ofproto->tables[rule->up.table_id]; + + if (table->other_table && rule->tag) { + tag_set_add(&ofproto->revalidate_set, rule->tag); + } else { + ofproto->need_revalidate = true; + } + } } static bool @@ -3907,7 +4229,8 @@ struct ofproto_trace { }; static void -trace_format_rule(struct ds *result, int level, const struct rule *rule) +trace_format_rule(struct ds *result, uint8_t table_id, int level, + const struct rule_dpif *rule) { ds_put_char_multiple(result, '\t', level); if (!rule) { @@ -3915,14 +4238,14 @@ trace_format_rule(struct ds *result, int level, const struct rule *rule) return; } - ds_put_format(result, "Rule: cookie=%#"PRIx64" ", - ntohll(rule->flow_cookie)); - cls_rule_format(&rule->cr, result); + ds_put_format(result, "Rule: table=%"PRIu8" cookie=%#"PRIx64" ", + table_id, ntohll(rule->up.flow_cookie)); + cls_rule_format(&rule->up.cr, result); ds_put_char(result, '\n'); ds_put_char_multiple(result, '\t', level); ds_put_cstr(result, "OpenFlow "); - ofp_print_actions(result, rule->actions, rule->n_actions); + ofp_print_actions(result, rule->up.actions, rule->up.n_actions); ds_put_char(result, '\n'); } @@ -3941,6 +4264,20 @@ trace_format_flow(struct ds *result, int level, const char *title, ds_put_char(result, '\n'); } +static void +trace_format_regs(struct ds *result, int level, const char *title, + struct ofproto_trace *trace) +{ + size_t i; + + ds_put_char_multiple(result, '\t', level); + ds_put_format(result, "%s:", title); + for (i = 0; i < FLOW_N_REGS; i++) { + ds_put_format(result, " reg%zu=0x%"PRIx32, i, trace->flow.regs[i]); + } + ds_put_char(result, '\n'); +} + static void trace_resubmit(struct action_xlate_ctx *ctx, struct rule_dpif *rule) { @@ -3949,33 +4286,85 @@ trace_resubmit(struct action_xlate_ctx *ctx, struct rule_dpif *rule) ds_put_char(result, '\n'); trace_format_flow(result, ctx->recurse + 1, "Resubmitted flow", trace); - trace_format_rule(result, ctx->recurse + 1, &rule->up); + trace_format_regs(result, ctx->recurse + 1, "Resubmitted regs", trace); + trace_format_rule(result, ctx->table_id, ctx->recurse + 1, rule); } static void ofproto_unixctl_trace(struct unixctl_conn *conn, const char *args_, void *aux OVS_UNUSED) { - char *dpname, *in_port_s, *tun_id_s, *packet_s; + char *dpname, *arg1, *arg2, *arg3; char *args = xstrdup(args_); char *save_ptr = NULL; struct ofproto_dpif *ofproto; - struct ofpbuf packet; + struct ofpbuf odp_key; + struct ofpbuf *packet; struct rule_dpif *rule; struct ds result; struct flow flow; - uint16_t in_port; - ovs_be64 tun_id; char *s; - ofpbuf_init(&packet, strlen(args) / 2); + packet = NULL; + ofpbuf_init(&odp_key, 0); ds_init(&result); dpname = strtok_r(args, " ", &save_ptr); - tun_id_s = strtok_r(NULL, " ", &save_ptr); - in_port_s = strtok_r(NULL, " ", &save_ptr); - packet_s = strtok_r(NULL, "", &save_ptr); /* Get entire rest of line. */ - if (!dpname || !in_port_s || !packet_s) { + arg1 = strtok_r(NULL, " ", &save_ptr); + arg2 = strtok_r(NULL, " ", &save_ptr); + arg3 = strtok_r(NULL, "", &save_ptr); /* Get entire rest of line. */ + if (dpname && arg1 && (!arg2 || !strcmp(arg2, "-generate")) && !arg3) { + /* ofproto/trace dpname flow [-generate] */ + int error; + + /* Convert string to datapath key. */ + ofpbuf_init(&odp_key, 0); + error = odp_flow_key_from_string(arg1, &odp_key); + if (error) { + unixctl_command_reply(conn, 501, "Bad flow syntax"); + goto exit; + } + + /* Convert odp_key to flow. */ + error = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow); + if (error) { + unixctl_command_reply(conn, 501, "Invalid flow"); + goto exit; + } + + /* Generate a packet, if requested. */ + if (arg2) { + packet = ofpbuf_new(0); + flow_compose(packet, &flow); + } + } else if (dpname && arg1 && arg2 && arg3) { + /* ofproto/trace dpname tun_id in_port packet */ + uint16_t in_port; + ovs_be64 tun_id; + + tun_id = htonll(strtoull(arg1, NULL, 0)); + in_port = ofp_port_to_odp_port(atoi(arg2)); + + packet = ofpbuf_new(strlen(args) / 2); + arg3 = ofpbuf_put_hex(packet, arg3, NULL); + arg3 += strspn(arg3, " "); + if (*arg3 != '\0') { + unixctl_command_reply(conn, 501, "Trailing garbage in command"); + goto exit; + } + if (packet->size < ETH_HEADER_LEN) { + unixctl_command_reply(conn, 501, + "Packet data too short for Ethernet"); + goto exit; + } + + ds_put_cstr(&result, "Packet: "); + s = ofp_packet_to_string(packet->data, packet->size, packet->size); + ds_put_cstr(&result, s); + free(s); + + flow_extract(packet, tun_id, in_port, &flow); + } else { unixctl_command_reply(conn, 501, "Bad command syntax"); goto exit; } @@ -3987,39 +4376,19 @@ ofproto_unixctl_trace(struct unixctl_conn *conn, const char *args_, goto exit; } - tun_id = htonll(strtoull(tun_id_s, NULL, 0)); - in_port = ofp_port_to_odp_port(atoi(in_port_s)); - - packet_s = ofpbuf_put_hex(&packet, packet_s, NULL); - packet_s += strspn(packet_s, " "); - if (*packet_s != '\0') { - unixctl_command_reply(conn, 501, "Trailing garbage in command"); - goto exit; - } - if (packet.size < ETH_HEADER_LEN) { - unixctl_command_reply(conn, 501, "Packet data too short for Ethernet"); - goto exit; - } - - ds_put_cstr(&result, "Packet: "); - s = ofp_packet_to_string(packet.data, packet.size, packet.size); - ds_put_cstr(&result, s); - free(s); - - flow_extract(&packet, tun_id, in_port, &flow); ds_put_cstr(&result, "Flow: "); flow_format(&result, &flow); ds_put_char(&result, '\n'); - rule = rule_dpif_lookup(ofproto, &flow); - trace_format_rule(&result, 0, &rule->up); + rule = rule_dpif_lookup(ofproto, &flow, 0); + trace_format_rule(&result, 0, 0, rule); if (rule) { struct ofproto_trace trace; struct ofpbuf *odp_actions; trace.result = &result; trace.flow = flow; - action_xlate_ctx_init(&trace.ctx, ofproto, &flow, &packet); + action_xlate_ctx_init(&trace.ctx, ofproto, &flow, packet); trace.ctx.resubmit_hook = trace_resubmit; odp_actions = xlate_actions(&trace.ctx, rule->up.actions, rule->up.n_actions); @@ -4029,13 +4398,23 @@ ofproto_unixctl_trace(struct unixctl_conn *conn, const char *args_, ds_put_cstr(&result, "Datapath actions: "); format_odp_actions(&result, odp_actions->data, odp_actions->size); ofpbuf_delete(odp_actions); + + if (!trace.ctx.may_set_up_flow) { + if (packet) { + ds_put_cstr(&result, "\nThis flow is not cachable."); + } else { + ds_put_cstr(&result, "\nThe datapath actions are incomplete--" + "for complete actions, please supply a packet."); + } + } } unixctl_command_reply(conn, 200, ds_cstr(&result)); exit: ds_destroy(&result); - ofpbuf_uninit(&packet); + ofpbuf_delete(packet); + ofpbuf_uninit(&odp_key); free(args); } @@ -4115,9 +4494,11 @@ const struct ofproto_class ofproto_dpif_class = { set_sflow, set_cfm, get_cfm_fault, + get_cfm_remote_mpids, bundle_set, bundle_remove, mirror_set, set_flood_vlans, is_mirror_output_bundle, + forward_bpdu_changed, };