X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=ofproto%2Fofproto-dpif.c;h=6af49747fa851f14958e18a32e02d3ee4b224fa9;hb=a6b7506dab305d91fc5f2ac6416a714e5fa09dd4;hp=5e0a7f0aa38b38b70ef7a62bbbcfd1d794f1e92d;hpb=a1aeea86475db086ce95679962fb6d03d0a645f3;p=sliver-openvswitch.git diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c index 5e0a7f0aa..6af49747f 100644 --- a/ofproto/ofproto-dpif.c +++ b/ofproto/ofproto-dpif.c @@ -25,6 +25,7 @@ #include "bond.h" #include "bundle.h" #include "byte-order.h" +#include "connectivity.h" #include "connmgr.h" #include "coverage.h" #include "cfm.h" @@ -57,6 +58,7 @@ #include "ofproto-dpif-upcall.h" #include "ofproto-dpif-xlate.h" #include "poll-loop.h" +#include "seq.h" #include "simap.h" #include "smap.h" #include "timer.h" @@ -284,8 +286,6 @@ struct facet { long long int prev_used; /* Used time from last stats push. */ /* Accounting. */ - uint64_t accounted_bytes; /* Bytes processed by facet_account(). */ - struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */ uint16_t tcp_flags; /* TCP flags seen for this 'rule'. */ struct xlate_out xout; @@ -317,7 +317,6 @@ static void flow_push_stats(struct ofproto_dpif *, struct flow *, struct dpif_flow_stats *, bool may_learn); static void facet_push_stats(struct facet *, bool may_learn); static void facet_learn(struct facet *); -static void facet_account(struct facet *); static void push_all_stats(void); static bool facet_is_controller_flow(struct facet *); @@ -389,8 +388,6 @@ static void port_run(struct ofport_dpif *); static int set_bfd(struct ofport *, const struct smap *); static int set_cfm(struct ofport *, const struct cfm_settings *); static void ofport_update_peer(struct ofport_dpif *); -static void run_fast_rl(void); -static int run_fast(struct ofproto *); struct dpif_completion { struct list list_node; @@ -422,11 +419,6 @@ COVERAGE_DEFINE(rev_flow_table); COVERAGE_DEFINE(rev_mac_learning); COVERAGE_DEFINE(rev_inconsistency); -struct avg_subfacet_rates { - double add_rate; /* Moving average of new flows created per minute. */ - double del_rate; /* Moving average of flows deleted per minute. */ -}; - /* All datapaths of a given type share a single dpif backer instance. */ struct dpif_backer { char *type; @@ -436,7 +428,7 @@ struct dpif_backer { struct timer next_expiration; struct ovs_rwlock odp_to_ofport_lock; - struct hmap odp_to_ofport_map OVS_GUARDED; /* ODP port to ofport map. */ + struct hmap odp_to_ofport_map OVS_GUARDED; /* Contains "struct ofport"s. */ struct simap tnl_backers; /* Set of dpif ports backing tunnels. */ @@ -456,7 +448,6 @@ struct dpif_backer { * exposed via "ovs-appctl dpif/show". The goal is to learn about * traffic patterns in ways that we can use later to improve Open vSwitch * performance in new situations. */ - long long int created; /* Time when it is created. */ unsigned max_n_subfacet; /* Maximum number of flows */ unsigned avg_n_subfacet; /* Average number of flows. */ long long int avg_subfacet_life; /* Average life span of subfacets. */ @@ -487,6 +478,7 @@ struct ofproto_dpif { struct hmap bundles; /* Contains "struct ofbundle"s. */ struct mac_learning *ml; bool has_bonded_bundles; + bool lacp_enabled; struct mbridge *mbridge; /* Facets. */ @@ -511,6 +503,7 @@ struct ofproto_dpif { struct sset ghost_ports; /* Ports with no datapath port. */ struct sset port_poll_set; /* Queued names for port_poll() reply. */ int port_poll_errno; /* Last errno for port_poll() reply. */ + uint64_t change_seq; /* Connectivity status changes. */ /* Per ofproto's dpif stats. */ uint64_t n_hit; @@ -549,9 +542,6 @@ static void handle_upcalls(struct dpif_backer *); /* Flow expiration. */ static int expire(struct dpif_backer *); -/* NetFlow. */ -static void send_netflow_active_timeouts(struct ofproto_dpif *); - /* Global variables. */ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); @@ -679,6 +669,8 @@ type_run(const char *type) dpif_run(backer->dpif); + handle_upcalls(backer); + /* The most natural place to push facet statistics is when they're pulled * from the datapath. However, when there are many flows in the datapath, * this expensive operation can occur so frequently, that it reduces our @@ -806,10 +798,9 @@ type_run(const char *type) ofproto->no_packet_in_rule, ofproto->ml, ofproto->stp, ofproto->mbridge, ofproto->sflow, ofproto->ipfix, - ofproto->up.frag_handling, + ofproto->netflow, ofproto->up.frag_handling, ofproto->up.forward_bpdu, - connmgr_has_in_band(ofproto->up.connmgr), - ofproto->netflow != NULL); + connmgr_has_in_band(ofproto->up.connmgr)); HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) { xlate_bundle_set(ofproto, bundle, bundle->name, @@ -840,7 +831,6 @@ type_run(const char *type) ovs_rwlock_unlock(&ofproto->facets.rwlock); CLS_CURSOR_FOR_EACH_SAFE (facet, next, cr, &cursor) { facet_revalidate(facet); - run_fast_rl(); } } @@ -1007,44 +997,6 @@ process_dpif_port_error(struct dpif_backer *backer, int error) } } -static int -dpif_backer_run_fast(struct dpif_backer *backer) -{ - handle_upcalls(backer); - - return 0; -} - -static int -type_run_fast(const char *type) -{ - struct dpif_backer *backer; - - backer = shash_find_data(&all_dpif_backers, type); - if (!backer) { - /* This is not necessarily a problem, since backers are only - * created on demand. */ - return 0; - } - - return dpif_backer_run_fast(backer); -} - -static void -run_fast_rl(void) -{ - static long long int port_rl = LLONG_MIN; - - if (time_msec() >= port_rl) { - struct ofproto_dpif *ofproto; - - HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) { - run_fast(&ofproto->up); - } - port_rl = time_msec() + 200; - } -} - static void type_wait(const char *type) { @@ -1225,7 +1177,6 @@ open_dpif_backer(const char *type, struct dpif_backer **backerp) backer->n_handler_threads = n_handler_threads; backer->max_n_subfacet = 0; - backer->created = time_msec(); backer->avg_n_subfacet = 0; backer->avg_subfacet_life = 0; @@ -1252,6 +1203,7 @@ construct(struct ofproto *ofproto_) ofproto->ml = mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME); ofproto->mbridge = mbridge_create(); ofproto->has_bonded_bundles = false; + ofproto->lacp_enabled = false; ovs_mutex_init(&ofproto->stats_mutex); ovs_mutex_init(&ofproto->vsp_mutex); @@ -1269,6 +1221,7 @@ construct(struct ofproto *ofproto_) sset_init(&ofproto->ghost_ports); sset_init(&ofproto->port_poll_set); ofproto->port_poll_errno = 0; + ofproto->change_seq = 0; SHASH_FOR_EACH_SAFE (node, next, &init_ofp_ports) { struct iface_hint *iface_hint = node->data; @@ -1426,7 +1379,7 @@ destruct(struct ofproto *ofproto_) mbridge_unref(ofproto->mbridge); - netflow_destroy(ofproto->netflow); + netflow_unref(ofproto->netflow); dpif_sflow_unref(ofproto->sflow); hmap_destroy(&ofproto->bundles); mac_learning_unref(ofproto->ml); @@ -1446,37 +1399,11 @@ destruct(struct ofproto *ofproto_) close_dpif_backer(ofproto->backer); } -static int -run_fast(struct ofproto *ofproto_) -{ - struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); - struct ofproto_packet_in *pin, *next_pin; - struct list pins; - - /* Do not perform any periodic activity required by 'ofproto' while - * waiting for flow restore to complete. */ - if (ofproto_get_flow_restore_wait()) { - return 0; - } - - guarded_list_pop_all(&ofproto->pins, &pins); - LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) { - connmgr_send_packet_in(ofproto->up.connmgr, pin); - list_remove(&pin->list_node); - free(CONST_CAST(void *, pin->up.packet)); - free(pin); - } - - return 0; -} - static int run(struct ofproto *ofproto_) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); - struct ofport_dpif *ofport; - struct ofbundle *bundle; - int error; + uint64_t new_seq; if (mbridge_need_revalidate(ofproto->mbridge)) { ofproto->backer->need_revalidate = REV_RECONFIGURE; @@ -1485,21 +1412,23 @@ run(struct ofproto *ofproto_) ovs_rwlock_unlock(&ofproto->ml->rwlock); } - /* Do not perform any periodic activity below required by 'ofproto' while + /* Do not perform any periodic activity required by 'ofproto' while * waiting for flow restore to complete. */ - if (ofproto_get_flow_restore_wait()) { - return 0; - } + if (!ofproto_get_flow_restore_wait()) { + struct ofproto_packet_in *pin, *next_pin; + struct list pins; - error = run_fast(ofproto_); - if (error) { - return error; + guarded_list_pop_all(&ofproto->pins, &pins); + LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) { + connmgr_send_packet_in(ofproto->up.connmgr, pin); + list_remove(&pin->list_node); + free(CONST_CAST(void *, pin->up.packet)); + free(pin); + } } if (ofproto->netflow) { - if (netflow_run(ofproto->netflow)) { - send_netflow_active_timeouts(ofproto); - } + netflow_run(ofproto->netflow); } if (ofproto->sflow) { dpif_sflow_run(ofproto->sflow); @@ -1508,11 +1437,22 @@ run(struct ofproto *ofproto_) dpif_ipfix_run(ofproto->ipfix); } - HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) { - port_run(ofport); + new_seq = seq_read(connectivity_seq_get()); + if (ofproto->change_seq != new_seq) { + struct ofport_dpif *ofport; + + HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) { + port_run(ofport); + } + + ofproto->change_seq = new_seq; } - HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) { - bundle_run(bundle); + if (ofproto->lacp_enabled || ofproto->has_bonded_bundles) { + struct ofbundle *bundle; + + HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) { + bundle_run(bundle); + } } stp_run(ofproto); @@ -1552,7 +1492,6 @@ static void wait(struct ofproto *ofproto_) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); - struct ofbundle *bundle; if (ofproto_get_flow_restore_wait()) { return; @@ -1564,8 +1503,12 @@ wait(struct ofproto *ofproto_) if (ofproto->ipfix) { dpif_ipfix_wait(ofproto->ipfix); } - HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) { - bundle_wait(bundle); + if (ofproto->lacp_enabled || ofproto->has_bonded_bundles) { + struct ofbundle *bundle; + + HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) { + bundle_wait(bundle); + } } if (ofproto->netflow) { netflow_wait(ofproto->netflow); @@ -2179,6 +2122,24 @@ get_stp_port_status(struct ofport *ofport_, s->state = stp_port_get_state(sp); s->sec_in_state = (time_msec() - ofport->stp_state_entered) / 1000; s->role = stp_port_get_role(sp); + + return 0; +} + +static int +get_stp_port_stats(struct ofport *ofport_, + struct ofproto_port_stp_stats *s) +{ + struct ofport_dpif *ofport = ofport_dpif_cast(ofport_); + struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto); + struct stp_port *sp = ofport->stp_port; + + if (!ofproto->stp || !sp) { + s->enabled = false; + return 0; + } + + s->enabled = true; stp_port_get_counts(sp, &s->tx_count, &s->rx_count, &s->error_count); return 0; @@ -2449,6 +2410,7 @@ bundle_set(struct ofproto *ofproto_, void *aux, /* LACP. */ if (s->lacp) { + ofproto->lacp_enabled = true; if (!bundle->lacp) { ofproto->backer->need_revalidate = REV_RECONFIGURE; bundle->lacp = lacp_create(); @@ -2796,13 +2758,6 @@ get_ofp_port(const struct ofproto_dpif *ofproto, ofp_port_t ofp_port) return ofport ? ofport_dpif_cast(ofport) : NULL; } -static struct ofport_dpif * -get_odp_port(const struct ofproto_dpif *ofproto, odp_port_t odp_port) -{ - struct ofport_dpif *port = odp_port_to_ofport(ofproto->backer, odp_port); - return port && &ofproto->up == port->up.ofproto ? port : NULL; -} - static void ofproto_port_from_dpif_port(struct ofproto_dpif *ofproto, struct ofproto_port *ofproto_port, @@ -3558,10 +3513,8 @@ update_subfacet_stats(struct subfacet *subfacet, subfacet->dp_byte_count = stats->n_bytes; subfacet_update_stats(subfacet, &diff); - if (facet->accounted_bytes < facet->byte_count) { + if (diff.n_packets) { facet_learn(facet); - facet_account(facet); - facet->accounted_bytes = facet->byte_count; } } @@ -3630,7 +3583,6 @@ update_stats(struct dpif_backer *backer) delete_unexpected_flow(backer, key, key_len); break; } - run_fast_rl(); } dpif_flow_dump_done(&dump); } @@ -3823,8 +3775,6 @@ facet_create(const struct flow_miss *miss) facet->learn_rl = time_msec() + 500; list_init(&facet->subfacets); - netflow_flow_init(&facet->nf_flow); - netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used); xlate_out_copy(&facet->xout, &miss->xout); @@ -3834,7 +3784,6 @@ facet_create(const struct flow_miss *miss) classifier_insert(&ofproto->facets, &facet->cr); ovs_rwlock_unlock(&ofproto->facets.rwlock); - facet->nf_flow.output_iface = facet->xout.nf_output_iface; return facet; } @@ -3954,54 +3903,6 @@ facet_learn(struct facet *facet) facet_push_stats(facet, true); } -static void -facet_account(struct facet *facet) -{ - const struct nlattr *a; - unsigned int left; - ovs_be16 vlan_tci; - uint64_t n_bytes; - - if (!facet->xout.has_normal || !facet->ofproto->has_bonded_bundles) { - return; - } - n_bytes = facet->byte_count - facet->accounted_bytes; - - /* This loop feeds byte counters to bond_account() for rebalancing to use - * as a basis. We also need to track the actual VLAN on which the packet - * is going to be sent to ensure that it matches the one passed to - * bond_choose_output_slave(). (Otherwise, we will account to the wrong - * hash bucket.) - * - * We use the actions from an arbitrary subfacet because they should all - * be equally valid for our purpose. */ - vlan_tci = facet->flow.vlan_tci; - NL_ATTR_FOR_EACH_UNSAFE (a, left, facet->xout.odp_actions.data, - facet->xout.odp_actions.size) { - const struct ovs_action_push_vlan *vlan; - struct ofport_dpif *port; - - switch (nl_attr_type(a)) { - case OVS_ACTION_ATTR_OUTPUT: - port = get_odp_port(facet->ofproto, nl_attr_get_odp_port(a)); - if (port && port->bundle && port->bundle->bond) { - bond_account(port->bundle->bond, &facet->flow, - vlan_tci_to_vid(vlan_tci), n_bytes); - } - break; - - case OVS_ACTION_ATTR_POP_VLAN: - vlan_tci = htons(0); - break; - - case OVS_ACTION_ATTR_PUSH_VLAN: - vlan = nl_attr_get(a); - vlan_tci = vlan->vlan_tci; - break; - } - } -} - /* Returns true if the only action for 'facet' is to send to the controller. * (We don't report NetFlow expiration messages for such facets because they * are just part of the control logic for the network, not real traffic). */ @@ -4048,25 +3949,15 @@ facet_flush_stats(struct facet *facet) } facet_push_stats(facet, false); - if (facet->accounted_bytes < facet->byte_count) { - facet_account(facet); - facet->accounted_bytes = facet->byte_count; - } if (ofproto->netflow && !facet_is_controller_flow(facet)) { - struct ofexpired expired; - expired.flow = facet->flow; - expired.packet_count = facet->packet_count; - expired.byte_count = facet->byte_count; - expired.used = facet->used; - netflow_expire(ofproto->netflow, &facet->nf_flow, &expired); + netflow_expire(ofproto->netflow, &facet->flow); + netflow_flow_clear(ofproto->netflow, &facet->flow); } /* Reset counters to prevent double counting if 'facet' ever gets * reinstalled. */ facet_reset_counters(facet); - - netflow_flow_clear(&facet->nf_flow); facet->tcp_flags = 0; } @@ -4181,7 +4072,7 @@ facet_revalidate(struct facet *facet) error = xlate_receive(ofproto->backer, NULL, subfacet->key, subfacet->key_len, &recv_flow, NULL, - &recv_ofproto, NULL); + &recv_ofproto, NULL, NULL, NULL, NULL); if (error || recv_ofproto != ofproto || facet != facet_find(ofproto, &recv_flow)) { @@ -4201,6 +4092,10 @@ facet_revalidate(struct facet *facet) xlate_in_init(&xin, ofproto, &facet->flow, new_rule, 0, NULL); xlate_actions(&xin, &xout); flow_wildcards_or(&xout.wc, &xout.wc, &wc); + /* Make sure non -packet fields are not masked. If not cleared, + * the memcmp() below may fail, causing an otherwise valid facet + * to be removed. */ + flow_wildcards_clear_non_packet_fields(&xout.wc); /* A facet's slow path reason should only change under dramatic * circumstances. Rather than try to update everything, it's simpler to @@ -4242,7 +4137,6 @@ facet_revalidate(struct facet *facet) facet->xout.has_fin_timeout = xout.has_fin_timeout; facet->xout.nf_output_iface = xout.nf_output_iface; facet->xout.mirrors = xout.mirrors; - facet->nf_flow.output_iface = facet->xout.nf_output_iface; ovs_mutex_lock(&new_rule->up.mutex); facet->used = MAX(facet->used, new_rule->up.created); @@ -4260,24 +4154,14 @@ facet_reset_counters(struct facet *facet) facet->byte_count = 0; facet->prev_packet_count = 0; facet->prev_byte_count = 0; - facet->accounted_bytes = 0; } static void flow_push_stats(struct ofproto_dpif *ofproto, struct flow *flow, struct dpif_flow_stats *stats, bool may_learn) { - struct ofport_dpif *in_port; struct xlate_in xin; - in_port = get_ofp_port(ofproto, flow->in_port.ofp_port); - if (in_port && in_port->is_tunnel) { - netdev_vport_inc_rx(in_port->up.netdev, stats); - if (in_port->bfd) { - bfd_account_rx(in_port->bfd, stats); - } - } - xlate_in_init(&xin, ofproto, flow, NULL, stats->tcp_flags, NULL); xin.resubmit_stats = stats; xin.may_learn = may_learn; @@ -4302,18 +4186,12 @@ facet_push_stats(struct facet *facet, bool may_learn) facet->prev_packet_count = facet->packet_count; facet->prev_byte_count = facet->byte_count; facet->prev_used = facet->used; - - netflow_flow_update_time(facet->ofproto->netflow, &facet->nf_flow, - facet->used); - netflow_flow_update_flags(&facet->nf_flow, facet->tcp_flags); - mirror_update_stats(facet->ofproto->mbridge, facet->xout.mirrors, - stats.n_packets, stats.n_bytes); flow_push_stats(facet->ofproto, &facet->flow, &stats, may_learn); } } static void -push_all_stats__(bool run_fast) +push_all_stats(void) { static long long int rl = LLONG_MIN; struct ofproto_dpif *ofproto; @@ -4330,9 +4208,6 @@ push_all_stats__(bool run_fast) cls_cursor_init(&cursor, &ofproto->facets, NULL); CLS_CURSOR_FOR_EACH (facet, cr, &cursor) { facet_push_stats(facet, false); - if (run_fast) { - run_fast_rl(); - } } ovs_rwlock_unlock(&ofproto->facets.rwlock); } @@ -4340,12 +4215,6 @@ push_all_stats__(bool run_fast) rl = time_msec() + 100; } -static void -push_all_stats(void) -{ - push_all_stats__(true); -} - void rule_dpif_credit_stats(struct rule_dpif *rule, const struct dpif_flow_stats *stats) @@ -4496,7 +4365,6 @@ subfacet_destroy_batch(struct dpif_backer *backer, subfacet_reset_dp_stats(subfacets[i], &stats[i]); subfacets[i]->path = SF_NOT_INSTALLED; subfacet_destroy(subfacets[i]); - run_fast_rl(); } } @@ -4779,11 +4647,7 @@ rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes) { struct rule_dpif *rule = rule_dpif_cast(rule_); - /* push_all_stats() can handle flow misses which, when using the learn - * action, can cause rules to be added and deleted. This can corrupt our - * caller's datastructures which assume that rule_get_stats() doesn't have - * an impact on the flow table. To be safe, we disable miss handling. */ - push_all_stats__(false); + push_all_stats(); /* Start from historical data for 'rule' itself that are no longer tracked * in facets. This counts, for example, facets that have expired. */ @@ -5057,7 +4921,7 @@ set_netflow(struct ofproto *ofproto_, return netflow_set_options(ofproto->netflow, netflow_options); } else if (ofproto->netflow) { ofproto->backer->need_revalidate = REV_RECONFIGURE; - netflow_destroy(ofproto->netflow); + netflow_unref(ofproto->netflow); ofproto->netflow = NULL; } @@ -5072,46 +4936,6 @@ get_netflow_ids(const struct ofproto *ofproto_, dpif_get_netflow_ids(ofproto->backer->dpif, engine_type, engine_id); } - -static void -send_active_timeout(struct ofproto_dpif *ofproto, struct facet *facet) -{ - if (!facet_is_controller_flow(facet) && - netflow_active_timeout_expired(ofproto->netflow, &facet->nf_flow)) { - struct subfacet *subfacet; - struct ofexpired expired; - - LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) { - if (subfacet->path == SF_FAST_PATH) { - struct dpif_flow_stats stats; - - subfacet_install(subfacet, &facet->xout.odp_actions, - &stats); - subfacet_update_stats(subfacet, &stats); - } - } - - expired.flow = facet->flow; - expired.packet_count = facet->packet_count; - expired.byte_count = facet->byte_count; - expired.used = facet->used; - netflow_expire(ofproto->netflow, &facet->nf_flow, &expired); - } -} - -static void -send_netflow_active_timeouts(struct ofproto_dpif *ofproto) -{ - struct cls_cursor cursor; - struct facet *facet; - - ovs_rwlock_rdlock(&ofproto->facets.rwlock); - cls_cursor_init(&cursor, &ofproto->facets, NULL); - CLS_CURSOR_FOR_EACH (facet, cr, &cursor) { - send_active_timeout(ofproto, facet); - } - ovs_rwlock_unlock(&ofproto->facets.rwlock); -} static struct ofproto_dpif * ofproto_dpif_lookup(const char *name) @@ -5374,7 +5198,7 @@ parse_flow_and_packet(int argc, const char *argv[], } if (xlate_receive(backer, NULL, odp_key.data, odp_key.size, flow, - NULL, ofprotop, NULL)) { + NULL, ofprotop, NULL, NULL, NULL, NULL)) { error = "Invalid datapath flow"; goto exit; } @@ -6314,14 +6138,12 @@ const struct ofproto_class ofproto_dpif_class = { del, port_open_type, type_run, - type_run_fast, type_wait, alloc, construct, destruct, dealloc, run, - run_fast, wait, get_memory_usage, flush, @@ -6367,6 +6189,7 @@ const struct ofproto_class ofproto_dpif_class = { get_stp_status, set_stp_port, get_stp_port_status, + get_stp_port_stats, set_queues, bundle_set, bundle_remove,