X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=ofproto%2Fofproto-dpif.c;h=80c7c4c1e5f0f4cec50b38da1e90afc15003d949;hb=69fc54f47bbc35e81bfe2e38e57f5dcfd9858df4;hp=8c7164b2b201b74c883b9e5d745fbf770532701b;hpb=0f74f625166ed49d7623bd1a3216a1d73fdeb208;p=sliver-openvswitch.git diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c index 8c7164b2b..80c7c4c1e 100644 --- a/ofproto/ofproto-dpif.c +++ b/ofproto/ofproto-dpif.c @@ -52,6 +52,7 @@ #include "ofproto-dpif-ipfix.h" #include "ofproto-dpif-mirror.h" #include "ofproto-dpif-sflow.h" +#include "ofproto-dpif-upcall.h" #include "ofproto-dpif-xlate.h" #include "poll-loop.h" #include "simap.h" @@ -71,6 +72,8 @@ COVERAGE_DEFINE(facet_revalidate); COVERAGE_DEFINE(facet_unexpected); COVERAGE_DEFINE(facet_suppress); COVERAGE_DEFINE(subfacet_install_fail); +COVERAGE_DEFINE(packet_in_overflow); +COVERAGE_DEFINE(flow_mod_overflow); /* Number of implemented OpenFlow tables. */ enum { N_TABLES = 255 }; @@ -80,12 +83,7 @@ BUILD_ASSERT_DECL(N_TABLES >= 2 && N_TABLES <= 255); struct flow_miss; struct facet; -static struct rule_dpif *rule_dpif_lookup(struct ofproto_dpif *, - const struct flow *, - struct flow_wildcards *wc); - static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes); -static void rule_invalidate(const struct rule_dpif *); struct ofbundle { struct hmap_node hmap_node; /* In struct ofproto's "bundles" hmap. */ @@ -169,8 +167,7 @@ struct subfacet { #define SUBFACET_DESTROY_MAX_BATCH 50 -static struct subfacet *subfacet_create(struct facet *, struct flow_miss *miss, - long long int now); +static struct subfacet *subfacet_create(struct facet *, struct flow_miss *); static struct subfacet *subfacet_find(struct dpif_backer *, const struct nlattr *key, size_t key_len, uint32_t key_hash); @@ -246,7 +243,6 @@ struct facet { uint8_t tcp_flags; /* TCP flags seen for this 'rule'. */ struct xlate_out xout; - bool fail_open; /* Facet matched the fail open rule. */ /* Storage for a single subfacet, to reduce malloc() time and space * overhead. (A facet always has at least one subfacet and in the common @@ -258,9 +254,7 @@ struct facet { long long int learn_rl; /* Rate limiter for facet_learn(). */ }; -static struct facet *facet_create(const struct flow_miss *, struct rule_dpif *, - struct xlate_out *, - struct dpif_flow_stats *); +static struct facet *facet_create(const struct flow_miss *); static void facet_remove(struct facet *); static void facet_free(struct facet *); @@ -273,6 +267,8 @@ static bool facet_check_consistency(struct facet *); static void facet_flush_stats(struct facet *); static void facet_reset_counters(struct facet *); +static void flow_push_stats(struct ofproto_dpif *, struct flow *, + struct dpif_flow_stats *, bool may_learn); static void facet_push_stats(struct facet *, bool may_learn); static void facet_learn(struct facet *); static void facet_account(struct facet *); @@ -289,7 +285,6 @@ struct ofport_dpif { struct list bundle_node; /* In struct ofbundle's "ports" list. */ struct cfm *cfm; /* Connectivity Fault Management, if any. */ struct bfd *bfd; /* BFD, if any. */ - tag_type tag; /* Tag associated with this port. */ bool may_enable; /* May be enabled in bonds. */ bool is_tunnel; /* This port is a tunnel. */ long long int carrier_seq; /* Carrier status changes. */ @@ -300,7 +295,9 @@ struct ofport_dpif { enum stp_state stp_state; /* Always STP_DISABLED if STP not in use. */ long long int stp_state_entered; - struct hmap priorities; /* Map of attached 'priority_to_dscp's. */ + /* Queue to DSCP mapping. */ + struct ofproto_port_queue *qdscp; + size_t n_qdscp; /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.) * @@ -312,16 +309,6 @@ struct ofport_dpif { int vlandev_vid; }; -/* Node in 'ofport_dpif''s 'priorities' map. Used to maintain a map from - * 'priority' (the datapath's term for QoS queue) to the dscp bits which all - * traffic egressing the 'ofport' with that priority should be marked with. */ -struct priority_to_dscp { - struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'priorities' map. */ - uint32_t priority; /* Priority of this queue (see struct flow). */ - - uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */ -}; - /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.) * * This is deprecated. It is only for compatibility with broken device drivers @@ -336,7 +323,6 @@ struct vlan_splinter { int vid; }; -static bool vsp_adjust_flow(const struct ofproto_dpif *, struct flow *); static void vsp_remove(struct ofport_dpif *); static void vsp_add(struct ofport_dpif *, ofp_port_t realdev_ofp_port, int vid); @@ -357,26 +343,15 @@ static void port_run_fast(struct ofport_dpif *); static void port_wait(struct ofport_dpif *); static int set_bfd(struct ofport *, const struct smap *); static int set_cfm(struct ofport *, const struct cfm_settings *); -static void ofport_clear_priorities(struct ofport_dpif *); static void ofport_update_peer(struct ofport_dpif *); static void run_fast_rl(void); +static int run_fast(struct ofproto *); struct dpif_completion { struct list list_node; struct ofoperation *op; }; -/* Extra information about a classifier table. - * Currently used just for optimized flow revalidation. */ -struct table_dpif { - /* If either of these is nonnull, then this table has a form that allows - * flows to be tagged to avoid revalidating most flows for the most common - * kinds of flow table changes. */ - struct cls_table *catchall_table; /* Table that wildcards all fields. */ - struct cls_table *other_table; /* Table with any other wildcard set. */ - uint32_t basis; /* Keeps each table's tags separate. */ -}; - /* Reasons that we might need to revalidate every facet, and corresponding * coverage counters. * @@ -388,25 +363,20 @@ struct table_dpif { enum revalidate_reason { REV_RECONFIGURE = 1, /* Switch configuration changed. */ REV_STP, /* Spanning tree protocol port status change. */ + REV_BOND, /* Bonding changed. */ REV_PORT_TOGGLED, /* Port enabled or disabled by CFM, LACP, ...*/ REV_FLOW_TABLE, /* Flow table changed. */ + REV_MAC_LEARNING, /* Mac learning changed. */ REV_INCONSISTENCY /* Facet self-check failed. */ }; COVERAGE_DEFINE(rev_reconfigure); COVERAGE_DEFINE(rev_stp); +COVERAGE_DEFINE(rev_bond); COVERAGE_DEFINE(rev_port_toggled); COVERAGE_DEFINE(rev_flow_table); +COVERAGE_DEFINE(rev_mac_learning); COVERAGE_DEFINE(rev_inconsistency); -/* Drop keys are odp flow keys which have drop flows installed in the kernel. - * These are datapath flows which have no associated ofproto, if they did we - * would use facets. */ -struct drop_key { - struct hmap_node hmap_node; - struct nlattr *key; - size_t key_len; -}; - struct avg_subfacet_rates { double add_rate; /* Moving average of new flows created per minute. */ double del_rate; /* Moving average of flows deleted per minute. */ @@ -417,14 +387,16 @@ struct dpif_backer { char *type; int refcount; struct dpif *dpif; + struct udpif *udpif; struct timer next_expiration; - struct hmap odp_to_ofport_map; /* ODP port to ofport mapping. */ + + struct ovs_rwlock odp_to_ofport_lock; + struct hmap odp_to_ofport_map OVS_GUARDED; /* ODP port to ofport map. */ struct simap tnl_backers; /* Set of dpif ports backing tunnels. */ /* Facet revalidation flags applying to facets which use this backer. */ enum revalidate_reason need_revalidate; /* Revalidate every facet. */ - struct tag_set revalidate_set; /* Revalidate only matching facets. */ struct hmap drop_keys; /* Set of dropped odp keys. */ bool recv_set_enable; /* Enables or disables receiving packets. */ @@ -457,14 +429,15 @@ struct dpif_backer { /* Number of subfacets added or deleted from 'created' to 'last_minute.' */ unsigned long long int total_subfacet_add_count; unsigned long long int total_subfacet_del_count; + + /* Number of upcall handling threads. */ + unsigned int n_handler_threads; }; /* All existing ofproto_backer instances, indexed by ofproto->up.type. */ static struct shash all_dpif_backers = SHASH_INITIALIZER(&all_dpif_backers); static void drop_key_clear(struct dpif_backer *); -static struct ofport_dpif * -odp_port_to_ofport(const struct dpif_backer *, odp_port_t odp_port); static void update_moving_averages(struct dpif_backer *backer); struct ofproto_dpif { @@ -490,9 +463,6 @@ struct ofproto_dpif { struct classifier facets; /* Contains 'struct facet's. */ long long int consistency_rl; - /* Revalidation. */ - struct table_dpif tables[N_TABLES]; - /* Support for debugging async flow mods. */ struct list completions; @@ -504,8 +474,9 @@ struct ofproto_dpif { long long int stp_last_tick; /* VLAN splinters. */ - struct hmap realdev_vid_map; /* (realdev,vid) -> vlandev. */ - struct hmap vlandev_map; /* vlandev -> (realdev,vid). */ + struct ovs_mutex vsp_mutex; + struct hmap realdev_vid_map OVS_GUARDED; /* (realdev,vid) -> vlandev. */ + struct hmap vlandev_map OVS_GUARDED; /* vlandev -> (realdev,vid). */ /* Ports. */ struct sset ports; /* Set of standard port names. */ @@ -516,6 +487,15 @@ struct ofproto_dpif { /* Per ofproto's dpif stats. */ uint64_t n_hit; uint64_t n_missed; + + /* Work queues. */ + struct ovs_mutex flow_mod_mutex; + struct list flow_mods OVS_GUARDED; + size_t n_flow_mods OVS_GUARDED; + + struct ovs_mutex pin_mutex; + struct list pins OVS_GUARDED; + size_t n_pins OVS_GUARDED; }; /* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only @@ -540,10 +520,11 @@ ofproto_dpif_cast(const struct ofproto *ofproto) static struct ofport_dpif *get_ofp_port(const struct ofproto_dpif *ofproto, ofp_port_t ofp_port); +static void ofproto_trace(struct ofproto_dpif *, const struct flow *, + const struct ofpbuf *packet, struct ds *); /* Upcalls. */ -#define FLOW_MISS_MAX_BATCH 50 -static int handle_upcalls(struct dpif_backer *, unsigned int max_batch); +static void handle_upcalls(struct dpif_backer *); /* Flow expiration. */ static int expire(struct dpif_backer *); @@ -560,18 +541,43 @@ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); /* Initial mappings of port to bridge mappings. */ static struct shash init_ofp_ports = SHASH_INITIALIZER(&init_ofp_ports); -int +/* Executes and takes ownership of 'fm'. */ +void ofproto_dpif_flow_mod(struct ofproto_dpif *ofproto, struct ofputil_flow_mod *fm) { - return ofproto_flow_mod(&ofproto->up, fm); + ovs_mutex_lock(&ofproto->flow_mod_mutex); + if (ofproto->n_flow_mods > 1024) { + ovs_mutex_unlock(&ofproto->flow_mod_mutex); + COVERAGE_INC(flow_mod_overflow); + free(fm->ofpacts); + free(fm); + return; + } + + list_push_back(&ofproto->flow_mods, &fm->list_node); + ofproto->n_flow_mods++; + ovs_mutex_unlock(&ofproto->flow_mod_mutex); } +/* Appends 'pin' to the queue of "packet ins" to be sent to the controller. + * Takes ownership of 'pin' and pin->packet. */ void ofproto_dpif_send_packet_in(struct ofproto_dpif *ofproto, struct ofputil_packet_in *pin) { - connmgr_send_packet_in(ofproto->up.connmgr, pin); + ovs_mutex_lock(&ofproto->pin_mutex); + if (ofproto->n_pins > 1024) { + ovs_mutex_unlock(&ofproto->pin_mutex); + COVERAGE_INC(packet_in_overflow); + free(CONST_CAST(void *, pin->packet)); + free(pin); + return; + } + + list_push_back(&ofproto->pins, &pin->list_node); + ofproto->n_pins++; + ovs_mutex_unlock(&ofproto->pin_mutex); } /* Factory functions. */ @@ -693,17 +699,25 @@ type_run(const char *type) error = dpif_recv_set(backer->dpif, backer->recv_set_enable); if (error) { + udpif_recv_set(backer->udpif, 0, false); VLOG_ERR("Failed to enable receiving packets in dpif."); return error; } + udpif_recv_set(backer->udpif, n_handler_threads, + backer->recv_set_enable); dpif_flow_flush(backer->dpif); backer->need_revalidate = REV_RECONFIGURE; } - if (backer->need_revalidate - || !tag_set_is_empty(&backer->revalidate_set)) { - struct tag_set revalidate_set = backer->revalidate_set; - bool need_revalidate = backer->need_revalidate; + /* If the n_handler_threads is reconfigured, call udpif_recv_set() + * to reset the handler threads. */ + if (backer->n_handler_threads != n_handler_threads) { + udpif_recv_set(backer->udpif, n_handler_threads, + backer->recv_set_enable); + backer->n_handler_threads = n_handler_threads; + } + + if (backer->need_revalidate) { struct ofproto_dpif *ofproto; struct simap_node *node; struct simap tmp_backers; @@ -764,68 +778,73 @@ type_run(const char *type) switch (backer->need_revalidate) { case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break; case REV_STP: COVERAGE_INC(rev_stp); break; + case REV_BOND: COVERAGE_INC(rev_bond); break; case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break; case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break; + case REV_MAC_LEARNING: COVERAGE_INC(rev_mac_learning); break; case REV_INCONSISTENCY: COVERAGE_INC(rev_inconsistency); break; } - - if (backer->need_revalidate) { - /* Clear the drop_keys in case we should now be accepting some - * formerly dropped flows. */ - drop_key_clear(backer); - } - - /* Clear the revalidation flags. */ - tag_set_init(&backer->revalidate_set); backer->need_revalidate = 0; + /* Clear the drop_keys in case we should now be accepting some + * formerly dropped flows. */ + drop_key_clear(backer); + HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) { struct facet *facet, *next; + struct ofport_dpif *ofport; struct cls_cursor cursor; + struct ofbundle *bundle; if (ofproto->backer != backer) { continue; } - if (need_revalidate) { - struct ofport_dpif *ofport; - struct ofbundle *bundle; - - xlate_ofproto_set(ofproto, ofproto->up.name, ofproto->ml, - ofproto->mbridge, ofproto->sflow, - ofproto->ipfix, ofproto->up.frag_handling, - ofproto->up.forward_bpdu, - connmgr_has_in_band(ofproto->up.connmgr), - ofproto->netflow != NULL, - ofproto->stp != NULL); - - HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) { - xlate_bundle_set(ofproto, bundle, bundle->name, - bundle->vlan_mode, bundle->vlan, - bundle->trunks, bundle->use_priority_tags, - bundle->bond, bundle->lacp, - bundle->floodable); - } + ovs_rwlock_wrlock(&xlate_rwlock); + xlate_ofproto_set(ofproto, ofproto->up.name, + ofproto->backer->dpif, ofproto->miss_rule, + ofproto->no_packet_in_rule, ofproto->ml, + ofproto->stp, ofproto->mbridge, + ofproto->sflow, ofproto->ipfix, + ofproto->up.frag_handling, + ofproto->up.forward_bpdu, + connmgr_has_in_band(ofproto->up.connmgr), + ofproto->netflow != NULL); - HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) { - xlate_ofport_set(ofproto, ofport->bundle, ofport, - ofport->up.ofp_port, ofport->odp_port, - ofport->up.netdev, ofport->cfm, - ofport->bfd, ofport->peer, - ofport->up.pp.config, ofport->stp_state, - ofport->is_tunnel, ofport->may_enable); - } + HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) { + xlate_bundle_set(ofproto, bundle, bundle->name, + bundle->vlan_mode, bundle->vlan, + bundle->trunks, bundle->use_priority_tags, + bundle->bond, bundle->lacp, + bundle->floodable); } + HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) { + int stp_port = ofport->stp_port + ? stp_port_no(ofport->stp_port) + : 0; + xlate_ofport_set(ofproto, ofport->bundle, ofport, + ofport->up.ofp_port, ofport->odp_port, + ofport->up.netdev, ofport->cfm, + ofport->bfd, ofport->peer, stp_port, + ofport->qdscp, ofport->n_qdscp, + ofport->up.pp.config, ofport->is_tunnel, + ofport->may_enable); + } + ovs_rwlock_unlock(&xlate_rwlock); + + /* Only ofproto-dpif cares about the facet classifier so we just + * lock cls_cursor_init() to appease the thread safety analysis. */ + ovs_rwlock_rdlock(&ofproto->facets.rwlock); cls_cursor_init(&cursor, &ofproto->facets, NULL); + ovs_rwlock_unlock(&ofproto->facets.rwlock); CLS_CURSOR_FOR_EACH_SAFE (facet, next, cr, &cursor) { - if (need_revalidate - || tag_set_intersects(&revalidate_set, facet->xout.tags)) { - facet_revalidate(facet); - run_fast_rl(); - } + facet_revalidate(facet); + run_fast_rl(); } } + + udpif_revalidate(backer->udpif); } if (!backer->recv_set_enable) { @@ -962,10 +981,12 @@ process_dpif_port_change(struct dpif_backer *backer, const char *devname) /* 'ofport''s datapath port number has changed from * 'ofport->odp_port' to 'port.port_no'. Update our internal data * structures to match. */ + ovs_rwlock_wrlock(&backer->odp_to_ofport_lock); hmap_remove(&backer->odp_to_ofport_map, &ofport->odp_port_node); ofport->odp_port = port.port_no; hmap_insert(&backer->odp_to_ofport_map, &ofport->odp_port_node, hash_odp_port(port.port_no)); + ovs_rwlock_unlock(&backer->odp_to_ofport_lock); backer->need_revalidate = REV_RECONFIGURE; } } @@ -987,32 +1008,10 @@ process_dpif_port_error(struct dpif_backer *backer, int error) } static int -dpif_backer_run_fast(struct dpif_backer *backer, int max_batch) +dpif_backer_run_fast(struct dpif_backer *backer) { - unsigned int work; - - /* If recv_set_enable is false, we should not handle upcalls. */ - if (!backer->recv_set_enable) { - return 0; - } - - /* Handle one or more batches of upcalls, until there's nothing left to do - * or until we do a fixed total amount of work. - * - * We do work in batches because it can be much cheaper to set up a number - * of flows and fire off their patches all at once. We do multiple batches - * because in some cases handling a packet can cause another packet to be - * queued almost immediately as part of the return flow. Both - * optimizations can make major improvements on some benchmarks and - * presumably for real traffic as well. */ - work = 0; - while (work < max_batch) { - int retval = handle_upcalls(backer, max_batch - work); - if (retval <= 0) { - return -retval; - } - work += retval; - } + udpif_run(backer->udpif); + handle_upcalls(backer); return 0; } @@ -1029,44 +1028,22 @@ type_run_fast(const char *type) return 0; } - return dpif_backer_run_fast(backer, FLOW_MISS_MAX_BATCH); + return dpif_backer_run_fast(backer); } static void run_fast_rl(void) { static long long int port_rl = LLONG_MIN; - static unsigned int backer_rl = 0; if (time_msec() >= port_rl) { struct ofproto_dpif *ofproto; - struct ofport_dpif *ofport; HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) { - - HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) { - port_run_fast(ofport); - } + run_fast(&ofproto->up); } port_rl = time_msec() + 200; } - - /* XXX: We have to be careful not to do too much work in this function. If - * we call dpif_backer_run_fast() too often, or with too large a batch, - * performance improves signifcantly, but at a cost. It's possible for the - * number of flows in the datapath to increase without bound, and for poll - * loops to take 10s of seconds. The correct solution to this problem, - * long term, is to separate flow miss handling into it's own thread so it - * isn't affected by revalidations, and expirations. Until then, this is - * the best we can do. */ - if (++backer_rl >= 10) { - struct shash_node *node; - - backer_rl = 0; - SHASH_FOR_EACH (node, &all_dpif_backers) { - dpif_backer_run_fast(node->data, 1); - } - } } static void @@ -1086,6 +1063,8 @@ type_wait(const char *type) } timer_wait(&backer->next_expiration); + dpif_wait(backer->dpif); + udpif_wait(backer->udpif); } /* Basic life-cycle. */ @@ -1121,10 +1100,12 @@ close_dpif_backer(struct dpif_backer *backer) hmap_destroy(&backer->drop_keys); simap_destroy(&backer->tnl_backers); + ovs_rwlock_destroy(&backer->odp_to_ofport_lock); hmap_destroy(&backer->odp_to_ofport_map); node = shash_find(&all_dpif_backers, backer->type); free(backer->type); shash_delete(&all_dpif_backers, node); + udpif_destroy(backer->udpif); dpif_close(backer->dpif); ovs_assert(hmap_is_empty(&backer->subfacets)); @@ -1194,17 +1175,18 @@ open_dpif_backer(const char *type, struct dpif_backer **backerp) free(backer); return error; } + backer->udpif = udpif_create(backer, backer->dpif); backer->type = xstrdup(type); backer->governor = NULL; backer->refcount = 1; hmap_init(&backer->odp_to_ofport_map); + ovs_rwlock_init(&backer->odp_to_ofport_lock); hmap_init(&backer->drop_keys); hmap_init(&backer->subfacets); timer_set_duration(&backer->next_expiration, 1000); backer->need_revalidate = 0; simap_init(&backer->tnl_backers); - tag_set_init(&backer->revalidate_set); backer->recv_set_enable = !ofproto_get_flow_restore_wait(); *backerp = backer; @@ -1241,6 +1223,9 @@ open_dpif_backer(const char *type, struct dpif_backer **backerp) close_dpif_backer(backer); return error; } + udpif_recv_set(backer->udpif, n_handler_threads, + backer->recv_set_enable); + backer->n_handler_threads = n_handler_threads; backer->max_n_subfacet = 0; backer->created = time_msec(); @@ -1263,9 +1248,8 @@ construct(struct ofproto *ofproto_) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); struct shash_node *node, *next; - odp_port_t max_ports; + uint32_t max_ports; int error; - int i; error = open_dpif_backer(ofproto->up.type, &ofproto->backer); if (error) { @@ -1273,8 +1257,7 @@ construct(struct ofproto *ofproto_) } max_ports = dpif_get_max_ports(ofproto->backer->dpif); - ofproto_init_max_ports(ofproto_, u16_to_ofp(MIN(odp_to_u32(max_ports), - ofp_to_u16(OFPP_MAX)))); + ofproto_init_max_ports(ofproto_, MIN(max_ports, ofp_to_u16(OFPP_MAX))); ofproto->netflow = NULL; ofproto->sflow = NULL; @@ -1284,19 +1267,24 @@ construct(struct ofproto *ofproto_) ofproto->ml = mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME); ofproto->mbridge = mbridge_create(); ofproto->has_bonded_bundles = false; + ovs_mutex_init(&ofproto->vsp_mutex); classifier_init(&ofproto->facets); ofproto->consistency_rl = LLONG_MIN; - for (i = 0; i < N_TABLES; i++) { - struct table_dpif *table = &ofproto->tables[i]; + list_init(&ofproto->completions); - table->catchall_table = NULL; - table->other_table = NULL; - table->basis = random_uint32(); - } + ovs_mutex_init(&ofproto->flow_mod_mutex); + ovs_mutex_lock(&ofproto->flow_mod_mutex); + list_init(&ofproto->flow_mods); + ofproto->n_flow_mods = 0; + ovs_mutex_unlock(&ofproto->flow_mod_mutex); - list_init(&ofproto->completions); + ovs_mutex_init(&ofproto->pin_mutex); + ovs_mutex_lock(&ofproto->pin_mutex); + list_init(&ofproto->pins); + ofproto->n_pins = 0; + ovs_mutex_unlock(&ofproto->pin_mutex); ofproto_dpif_unixctl_init(); @@ -1369,9 +1357,12 @@ add_internal_flow(struct ofproto_dpif *ofproto, int id, return error; } - *rulep = rule_dpif_lookup_in_table(ofproto, &fm.match.flow, NULL, - TBL_INTERNAL); - ovs_assert(*rulep != NULL); + if (rule_dpif_lookup_in_table(ofproto, &fm.match.flow, NULL, TBL_INTERNAL, + rulep)) { + ovs_rwlock_unlock(&(*rulep)->up.evict); + } else { + NOT_REACHED(); + } return 0; } @@ -1428,10 +1419,14 @@ destruct(struct ofproto *ofproto_) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); struct rule_dpif *rule, *next_rule; + struct ofputil_packet_in *pin, *next_pin; + struct ofputil_flow_mod *fm, *next_fm; struct oftable *table; ofproto->backer->need_revalidate = REV_RECONFIGURE; + ovs_rwlock_wrlock(&xlate_rwlock); xlate_remove_ofproto(ofproto); + ovs_rwlock_unlock(&xlate_rwlock); hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node); complete_operations(ofproto); @@ -1439,11 +1434,33 @@ destruct(struct ofproto *ofproto_) OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) { struct cls_cursor cursor; + ovs_rwlock_wrlock(&table->cls.rwlock); cls_cursor_init(&cursor, &table->cls, NULL); CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) { - ofproto_rule_destroy(&rule->up); + ofproto_rule_destroy(&ofproto->up, &table->cls, &rule->up); } + ovs_rwlock_unlock(&table->cls.rwlock); + } + + ovs_mutex_lock(&ofproto->flow_mod_mutex); + LIST_FOR_EACH_SAFE (fm, next_fm, list_node, &ofproto->flow_mods) { + list_remove(&fm->list_node); + ofproto->n_flow_mods--; + free(fm->ofpacts); + free(fm); + } + ovs_mutex_unlock(&ofproto->flow_mod_mutex); + ovs_mutex_destroy(&ofproto->flow_mod_mutex); + + ovs_mutex_lock(&ofproto->pin_mutex); + LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &ofproto->pins) { + list_remove(&pin->list_node); + ofproto->n_pins--; + free(CONST_CAST(void *, pin->packet)); + free(pin); } + ovs_mutex_unlock(&ofproto->pin_mutex); + ovs_mutex_destroy(&ofproto->pin_mutex); mbridge_unref(ofproto->mbridge); @@ -1461,6 +1478,8 @@ destruct(struct ofproto *ofproto_) sset_destroy(&ofproto->ghost_ports); sset_destroy(&ofproto->port_poll_set); + ovs_mutex_destroy(&ofproto->vsp_mutex); + close_dpif_backer(ofproto->backer); } @@ -1468,6 +1487,9 @@ static int run_fast(struct ofproto *ofproto_) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); + struct ofputil_packet_in *pin, *next_pin; + struct ofputil_flow_mod *fm, *next_fm; + struct list flow_mods, pins; struct ofport_dpif *ofport; /* Do not perform any periodic activity required by 'ofproto' while @@ -1476,6 +1498,47 @@ run_fast(struct ofproto *ofproto_) return 0; } + ovs_mutex_lock(&ofproto->flow_mod_mutex); + if (ofproto->n_flow_mods) { + flow_mods = ofproto->flow_mods; + list_moved(&flow_mods); + list_init(&ofproto->flow_mods); + ofproto->n_flow_mods = 0; + } else { + list_init(&flow_mods); + } + ovs_mutex_unlock(&ofproto->flow_mod_mutex); + + LIST_FOR_EACH_SAFE (fm, next_fm, list_node, &flow_mods) { + int error = ofproto_flow_mod(&ofproto->up, fm); + if (error && !VLOG_DROP_WARN(&rl)) { + VLOG_WARN("learning action failed to modify flow table (%s)", + ofperr_get_name(error)); + } + + list_remove(&fm->list_node); + free(fm->ofpacts); + free(fm); + } + + ovs_mutex_lock(&ofproto->pin_mutex); + if (ofproto->n_pins) { + pins = ofproto->pins; + list_moved(&pins); + list_init(&ofproto->pins); + ofproto->n_pins = 0; + } else { + list_init(&pins); + } + ovs_mutex_unlock(&ofproto->pin_mutex); + + LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) { + connmgr_send_packet_in(ofproto->up.connmgr, pin); + list_remove(&pin->list_node); + free(CONST_CAST(void *, pin->packet)); + free(pin); + } + HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) { port_run_fast(ofport); } @@ -1497,7 +1560,9 @@ run(struct ofproto *ofproto_) if (mbridge_need_revalidate(ofproto->mbridge)) { ofproto->backer->need_revalidate = REV_RECONFIGURE; - mac_learning_flush(ofproto->ml, NULL); + ovs_rwlock_wrlock(&ofproto->ml->rwlock); + mac_learning_flush(ofproto->ml); + ovs_rwlock_unlock(&ofproto->ml->rwlock); } /* Do not perform any periodic activity below required by 'ofproto' while @@ -1519,6 +1584,9 @@ run(struct ofproto *ofproto_) if (ofproto->sflow) { dpif_sflow_run(ofproto->sflow); } + if (ofproto->ipfix) { + dpif_ipfix_run(ofproto->ipfix); + } HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) { port_run(ofport); @@ -1528,9 +1596,14 @@ run(struct ofproto *ofproto_) } stp_run(ofproto); - mac_learning_run(ofproto->ml, &ofproto->backer->revalidate_set); + ovs_rwlock_wrlock(&ofproto->ml->rwlock); + if (mac_learning_run(ofproto->ml)) { + ofproto->backer->need_revalidate = REV_MAC_LEARNING; + } + ovs_rwlock_unlock(&ofproto->ml->rwlock); /* Check the consistency of a random facet, to aid debugging. */ + ovs_rwlock_rdlock(&ofproto->facets.rwlock); if (time_msec() >= ofproto->consistency_rl && !classifier_is_empty(&ofproto->facets) && !ofproto->backer->need_revalidate) { @@ -1546,13 +1619,11 @@ run(struct ofproto *ofproto_) hmap_node); facet = CONTAINER_OF(cr, struct facet, cr); - if (!tag_set_intersects(&ofproto->backer->revalidate_set, - facet->xout.tags)) { - if (!facet_check_consistency(facet)) { - ofproto->backer->need_revalidate = REV_INCONSISTENCY; - } + if (!facet_check_consistency(facet)) { + ofproto->backer->need_revalidate = REV_INCONSISTENCY; } } + ovs_rwlock_unlock(&ofproto->facets.rwlock); return 0; } @@ -1572,13 +1643,11 @@ wait(struct ofproto *ofproto_) return; } - dpif_wait(ofproto->backer->dpif); - dpif_recv_wait(ofproto->backer->dpif); if (ofproto->sflow) { dpif_sflow_wait(ofproto->sflow); } - if (!tag_set_is_empty(&ofproto->backer->revalidate_set)) { - poll_immediate_wake(); + if (ofproto->ipfix) { + dpif_ipfix_wait(ofproto->ipfix); } HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) { port_wait(ofport); @@ -1589,7 +1658,9 @@ wait(struct ofproto *ofproto_) if (ofproto->netflow) { netflow_wait(ofproto->netflow); } + ovs_rwlock_rdlock(&ofproto->ml->rwlock); mac_learning_wait(ofproto->ml); + ovs_rwlock_unlock(&ofproto->ml->rwlock); stp_wait(ofproto); if (ofproto->backer->need_revalidate) { /* Shouldn't happen, but if it does just go around again. */ @@ -1606,12 +1677,16 @@ get_memory_usage(const struct ofproto *ofproto_, struct simap *usage) size_t n_subfacets = 0; struct facet *facet; + ovs_rwlock_rdlock(&ofproto->facets.rwlock); simap_increase(usage, "facets", classifier_count(&ofproto->facets)); + ovs_rwlock_unlock(&ofproto->facets.rwlock); + ovs_rwlock_rdlock(&ofproto->facets.rwlock); cls_cursor_init(&cursor, &ofproto->facets, NULL); CLS_CURSOR_FOR_EACH (facet, cr, &cursor) { n_subfacets += list_size(&facet->subfacets); } + ovs_rwlock_unlock(&ofproto->facets.rwlock); simap_increase(usage, "subfacets", n_subfacets); } @@ -1713,13 +1788,13 @@ port_construct(struct ofport *port_) port->bundle = NULL; port->cfm = NULL; port->bfd = NULL; - port->tag = tag_create_random(); port->may_enable = true; port->stp_port = NULL; port->stp_state = STP_DISABLED; port->is_tunnel = false; port->peer = NULL; - hmap_init(&port->priorities); + port->qdscp = NULL; + port->n_qdscp = 0; port->realdev_ofp_port = 0; port->vlandev_vid = 0; port->carrier_seq = netdev_get_carrier_resets(netdev); @@ -1758,8 +1833,10 @@ port_construct(struct ofport *port_) return EBUSY; } + ovs_rwlock_wrlock(&ofproto->backer->odp_to_ofport_lock); hmap_insert(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node, hash_odp_port(port->odp_port)); + ovs_rwlock_unlock(&ofproto->backer->odp_to_ofport_lock); } dpif_port_destroy(&dpif_port); @@ -1780,7 +1857,9 @@ port_destruct(struct ofport *port_) const char *dp_port_name; ofproto->backer->need_revalidate = REV_RECONFIGURE; + ovs_rwlock_wrlock(&xlate_rwlock); xlate_ofport_remove(port); + ovs_rwlock_unlock(&xlate_rwlock); dp_port_name = netdev_vport_get_dpif_port(port->up.netdev, namebuf, sizeof namebuf); @@ -1800,7 +1879,9 @@ port_destruct(struct ofport *port_) } if (port->odp_port != ODPP_NONE && !port->is_tunnel) { + ovs_rwlock_wrlock(&ofproto->backer->odp_to_ofport_lock); hmap_remove(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node); + ovs_rwlock_unlock(&ofproto->backer->odp_to_ofport_lock); } tnl_port_del(port); @@ -1813,8 +1894,7 @@ port_destruct(struct ofport *port_) dpif_sflow_del_port(ofproto->sflow, port->odp_port); } - ofport_clear_priorities(port); - hmap_destroy(&port->priorities); + free(port->qdscp); } static void @@ -1830,6 +1910,10 @@ port_modified(struct ofport *port_) cfm_set_netdev(port->cfm, port->up.netdev); } + if (port->bfd) { + bfd_set_netdev(port->bfd, port->up.netdev); + } + if (port->is_tunnel && tnl_port_reconfigure(port, port->up.netdev, port->odp_port)) { ofproto_dpif_cast(port->up.ofproto)->backer->need_revalidate = @@ -1894,20 +1978,25 @@ set_ipfix( { struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); struct dpif_ipfix *di = ofproto->ipfix; + bool has_options = bridge_exporter_options || flow_exporters_options; - if (bridge_exporter_options || flow_exporters_options) { - if (!di) { - di = ofproto->ipfix = dpif_ipfix_create(); - } + if (has_options && !di) { + di = ofproto->ipfix = dpif_ipfix_create(); + } + + if (di) { + /* Call set_options in any case to cleanly flush the flow + * caches in the last exporters that are to be destroyed. */ dpif_ipfix_set_options( di, bridge_exporter_options, flow_exporters_options, n_flow_exporters_options); - } else { - if (di) { + + if (!has_options) { dpif_ipfix_unref(di); ofproto->ipfix = NULL; } } + return 0; } @@ -1964,7 +2053,8 @@ set_bfd(struct ofport *ofport_, const struct smap *cfg) struct bfd *old; old = ofport->bfd; - ofport->bfd = bfd_configure(old, netdev_get_name(ofport->up.netdev), cfg); + ofport->bfd = bfd_configure(old, netdev_get_name(ofport->up.netdev), + cfg, ofport->up.netdev); if (ofport->bfd != old) { ofproto->backer->need_revalidate = REV_RECONFIGURE; } @@ -2042,7 +2132,7 @@ set_stp(struct ofproto *ofproto_, const struct ofproto_stp_settings *s) set_stp_port(ofport, NULL); } - stp_destroy(ofproto->stp); + stp_unref(ofproto->stp); ofproto->stp = NULL; } @@ -2088,8 +2178,9 @@ update_stp_port_state(struct ofport_dpif *ofport) if (stp_learn_in_state(ofport->stp_state) != stp_learn_in_state(state)) { /* xxx Learning action flows should also be flushed. */ - mac_learning_flush(ofproto->ml, - &ofproto->backer->revalidate_set); + ovs_rwlock_wrlock(&ofproto->ml->rwlock); + mac_learning_flush(ofproto->ml); + ovs_rwlock_unlock(&ofproto->ml->rwlock); } fwd_change = stp_forward_in_state(ofport->stp_state) != stp_forward_in_state(state); @@ -2194,7 +2285,9 @@ stp_run(struct ofproto_dpif *ofproto) } if (stp_check_and_reset_fdb_flush(ofproto->stp)) { - mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set); + ovs_rwlock_wrlock(&ofproto->ml->rwlock); + mac_learning_flush(ofproto->ml); + ovs_rwlock_unlock(&ofproto->ml->rwlock); } } } @@ -2206,129 +2299,25 @@ stp_wait(struct ofproto_dpif *ofproto) poll_timer_wait(1000); } } - -/* Returns true if STP should process 'flow'. Sets fields in 'wc' that - * were used to make the determination.*/ -bool -stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc) -{ - memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst); - return eth_addr_equals(flow->dl_dst, eth_addr_stp); -} - -void -stp_process_packet(const struct ofport_dpif *ofport, - const struct ofpbuf *packet) -{ - struct ofpbuf payload = *packet; - struct eth_header *eth = payload.data; - struct stp_port *sp = ofport->stp_port; - - /* Sink packets on ports that have STP disabled when the bridge has - * STP enabled. */ - if (!sp || stp_port_get_state(sp) == STP_DISABLED) { - return; - } - - /* Trim off padding on payload. */ - if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) { - payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN; - } - - if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) { - stp_received_bpdu(sp, payload.data, payload.size); - } -} -int -ofproto_dpif_queue_to_priority(const struct ofproto_dpif *ofproto, - uint32_t queue_id, uint32_t *priority) -{ - return dpif_queue_to_priority(ofproto->backer->dpif, queue_id, priority); -} - -static struct priority_to_dscp * -get_priority(const struct ofport_dpif *ofport, uint32_t priority) -{ - struct priority_to_dscp *pdscp; - uint32_t hash; - - hash = hash_int(priority, 0); - HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &ofport->priorities) { - if (pdscp->priority == priority) { - return pdscp; - } - } - return NULL; -} - -bool -ofproto_dpif_dscp_from_priority(const struct ofport_dpif *ofport, - uint32_t priority, uint8_t *dscp) -{ - struct priority_to_dscp *pdscp = get_priority(ofport, priority); - *dscp = pdscp ? pdscp->dscp : 0; - return pdscp != NULL; -} - -static void -ofport_clear_priorities(struct ofport_dpif *ofport) -{ - struct priority_to_dscp *pdscp, *next; - - HMAP_FOR_EACH_SAFE (pdscp, next, hmap_node, &ofport->priorities) { - hmap_remove(&ofport->priorities, &pdscp->hmap_node); - free(pdscp); - } -} - static int -set_queues(struct ofport *ofport_, - const struct ofproto_port_queue *qdscp_list, +set_queues(struct ofport *ofport_, const struct ofproto_port_queue *qdscp, size_t n_qdscp) { struct ofport_dpif *ofport = ofport_dpif_cast(ofport_); struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto); - struct hmap new = HMAP_INITIALIZER(&new); - size_t i; - for (i = 0; i < n_qdscp; i++) { - struct priority_to_dscp *pdscp; - uint32_t priority; - uint8_t dscp; - - dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK; - if (dpif_queue_to_priority(ofproto->backer->dpif, qdscp_list[i].queue, - &priority)) { - continue; - } - - pdscp = get_priority(ofport, priority); - if (pdscp) { - hmap_remove(&ofport->priorities, &pdscp->hmap_node); - } else { - pdscp = xmalloc(sizeof *pdscp); - pdscp->priority = priority; - pdscp->dscp = dscp; - ofproto->backer->need_revalidate = REV_RECONFIGURE; - } - - if (pdscp->dscp != dscp) { - pdscp->dscp = dscp; - ofproto->backer->need_revalidate = REV_RECONFIGURE; - } - - hmap_insert(&new, &pdscp->hmap_node, hash_int(pdscp->priority, 0)); - } - - if (!hmap_is_empty(&ofport->priorities)) { - ofport_clear_priorities(ofport); + if (ofport->n_qdscp != n_qdscp + || (n_qdscp && memcmp(ofport->qdscp, qdscp, + n_qdscp * sizeof *qdscp))) { ofproto->backer->need_revalidate = REV_RECONFIGURE; + free(ofport->qdscp); + ofport->qdscp = n_qdscp + ? xmemdup(qdscp, n_qdscp * sizeof *qdscp) + : NULL; + ofport->n_qdscp = n_qdscp; } - hmap_swap(&new, &ofport->priorities); - hmap_destroy(&new); - return 0; } @@ -2351,6 +2340,7 @@ bundle_flush_macs(struct ofbundle *bundle, bool all_ofprotos) struct mac_entry *mac, *next_mac; ofproto->backer->need_revalidate = REV_RECONFIGURE; + ovs_rwlock_wrlock(&ml->rwlock); LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) { if (mac->port.p == bundle) { if (all_ofprotos) { @@ -2360,11 +2350,12 @@ bundle_flush_macs(struct ofbundle *bundle, bool all_ofprotos) if (o != ofproto) { struct mac_entry *e; - e = mac_learning_lookup(o->ml, mac->mac, mac->vlan, - NULL); + ovs_rwlock_wrlock(&o->ml->rwlock); + e = mac_learning_lookup(o->ml, mac->mac, mac->vlan); if (e) { mac_learning_expire(o->ml, e); } + ovs_rwlock_unlock(&o->ml->rwlock); } } } @@ -2372,6 +2363,7 @@ bundle_flush_macs(struct ofbundle *bundle, bool all_ofprotos) mac_learning_expire(ml, mac); } } + ovs_rwlock_unlock(&ml->rwlock); } static struct ofbundle * @@ -2437,7 +2429,7 @@ bundle_add_port(struct ofbundle *bundle, ofp_port_t ofp_port, if (port->bundle != bundle) { bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE; if (port->bundle) { - bundle_del_port(port); + bundle_remove(&port->up); } port->bundle = bundle; @@ -2468,7 +2460,9 @@ bundle_destroy(struct ofbundle *bundle) ofproto = bundle->ofproto; mbridge_unregister_bundle(ofproto->mbridge, bundle->aux); + ovs_rwlock_wrlock(&xlate_rwlock); xlate_bundle_remove(bundle); + ovs_rwlock_unlock(&xlate_rwlock); LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) { bundle_del_port(port); @@ -2715,6 +2709,7 @@ bundle_send_learning_packets(struct ofbundle *bundle) struct mac_entry *e; error = n_packets = n_errors = 0; + ovs_rwlock_rdlock(&ofproto->ml->rwlock); LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) { if (e->port.p != bundle) { struct ofpbuf *learning_packet; @@ -2737,6 +2732,7 @@ bundle_send_learning_packets(struct ofbundle *bundle) n_packets++; } } + ovs_rwlock_unlock(&ofproto->ml->rwlock); if (n_errors) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); @@ -2762,8 +2758,10 @@ bundle_run(struct ofbundle *bundle) bond_slave_set_may_enable(bundle->bond, port, port->may_enable); } - bond_run(bundle->bond, &bundle->ofproto->backer->revalidate_set, - lacp_status(bundle->lacp)); + if (bond_run(bundle->bond, lacp_status(bundle->lacp))) { + bundle->ofproto->backer->need_revalidate = REV_BOND; + } + if (bond_should_send_learning_packets(bundle->bond)) { bundle_send_learning_packets(bundle); } @@ -2829,9 +2827,11 @@ static int set_flood_vlans(struct ofproto *ofproto_, unsigned long *flood_vlans) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); + ovs_rwlock_wrlock(&ofproto->ml->rwlock); if (mac_learning_set_flood_vlans(ofproto->ml, flood_vlans)) { - mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set); + mac_learning_flush(ofproto->ml); } + ovs_rwlock_unlock(&ofproto->ml->rwlock); return 0; } @@ -2855,8 +2855,10 @@ set_mac_table_config(struct ofproto *ofproto_, unsigned int idle_time, size_t max_entries) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); + ovs_rwlock_wrlock(&ofproto->ml->rwlock); mac_learning_set_idle_time(ofproto->ml, idle_time); mac_learning_set_max_entries(ofproto->ml, max_entries); + ovs_rwlock_unlock(&ofproto->ml->rwlock); } /* Ports. */ @@ -2890,7 +2892,7 @@ ofport_update_peer(struct ofport_dpif *ofport) { const struct ofproto_dpif *ofproto; struct dpif_backer *backer; - const char *peer_name; + char *peer_name; if (!netdev_vport_is_patch(ofport->up.netdev)) { return; @@ -2912,7 +2914,7 @@ ofport_update_peer(struct ofport_dpif *ofport) HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) { struct ofport *peer_ofport; struct ofport_dpif *peer; - const char *peer_peer; + char *peer_peer; if (ofproto->backer != backer) { continue; @@ -2930,9 +2932,11 @@ ofport_update_peer(struct ofport_dpif *ofport) ofport->peer = peer; ofport->peer->peer = ofport; } + free(peer_peer); - return; + break; } + free(peer_name); } static void @@ -2963,6 +2967,8 @@ port_run(struct ofport_dpif *ofport) long long int carrier_seq = netdev_get_carrier_resets(ofport->up.netdev); bool carrier_changed = carrier_seq != ofport->carrier_seq; bool enable = netdev_get_carrier(ofport->up.netdev); + bool cfm_enable = false; + bool bfd_enable = false; ofport->carrier_seq = carrier_seq; @@ -2972,16 +2978,20 @@ port_run(struct ofport_dpif *ofport) int cfm_opup = cfm_get_opup(ofport->cfm); cfm_run(ofport->cfm); - enable = enable && !cfm_get_fault(ofport->cfm); + cfm_enable = !cfm_get_fault(ofport->cfm); if (cfm_opup >= 0) { - enable = enable && cfm_opup; + cfm_enable = cfm_enable && cfm_opup; } } if (ofport->bfd) { bfd_run(ofport->bfd); - enable = enable && bfd_forwarding(ofport->bfd); + bfd_enable = bfd_forwarding(ofport->bfd); + } + + if (ofport->bfd || ofport->cfm) { + enable = enable && (cfm_enable || bfd_enable); } if (ofport->bundle) { @@ -3255,26 +3265,6 @@ port_is_lacp_current(const struct ofport *ofport_) /* Upcall handling. */ -/* Flow miss batching. - * - * Some dpifs implement operations faster when you hand them off in a batch. - * To allow batching, "struct flow_miss" queues the dpif-related work needed - * for a given flow. Each "struct flow_miss" corresponds to sending one or - * more packets, plus possibly installing the flow in the dpif. - * - * So far we only batch the operations that affect flow setup time the most. - * It's possible to batch more than that, but the benefit might be minimal. */ -struct flow_miss { - struct hmap_node hmap_node; - struct ofproto_dpif *ofproto; - struct flow flow; - enum odp_key_fitness key_fitness; - const struct nlattr *key; - size_t key_len; - struct list packets; - enum dpif_upcall_type upcall_type; -}; - struct flow_miss_op { struct dpif_op dpif_op; @@ -3290,96 +3280,6 @@ struct flow_miss_op { struct subfacet *subfacet; }; -/* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_NO_MATCH to each - * OpenFlow controller as necessary according to their individual - * configurations. */ -static void -send_packet_in_miss(struct ofproto_dpif *ofproto, const struct ofpbuf *packet, - const struct flow *flow) -{ - struct ofputil_packet_in pin; - - pin.packet = packet->data; - pin.packet_len = packet->size; - pin.reason = OFPR_NO_MATCH; - pin.controller_id = 0; - - pin.table_id = 0; - pin.cookie = 0; - - pin.send_len = 0; /* not used for flow table misses */ - - flow_get_metadata(flow, &pin.fmd); - - connmgr_send_packet_in(ofproto->up.connmgr, &pin); -} - -static struct flow_miss * -flow_miss_find(struct hmap *todo, const struct ofproto_dpif *ofproto, - const struct flow *flow, uint32_t hash) -{ - struct flow_miss *miss; - - HMAP_FOR_EACH_WITH_HASH (miss, hmap_node, hash, todo) { - if (miss->ofproto == ofproto && flow_equal(&miss->flow, flow)) { - return miss; - } - } - - return NULL; -} - -/* Partially Initializes 'op' as an "execute" operation for 'miss' and - * 'packet'. The caller must initialize op->actions and op->actions_len. If - * 'miss' is associated with a subfacet the caller must also initialize the - * returned op->subfacet, and if anything needs to be freed after processing - * the op, the caller must initialize op->garbage also. */ -static void -init_flow_miss_execute_op(struct flow_miss *miss, struct ofpbuf *packet, - struct flow_miss_op *op) -{ - if (miss->flow.in_port.ofp_port - != vsp_realdev_to_vlandev(miss->ofproto, miss->flow.in_port.ofp_port, - miss->flow.vlan_tci)) { - /* This packet was received on a VLAN splinter port. We - * added a VLAN to the packet to make the packet resemble - * the flow, but the actions were composed assuming that - * the packet contained no VLAN. So, we must remove the - * VLAN header from the packet before trying to execute the - * actions. */ - eth_pop_vlan(packet); - } - - op->subfacet = NULL; - op->xout_garbage = false; - op->dpif_op.type = DPIF_OP_EXECUTE; - op->dpif_op.u.execute.key = miss->key; - op->dpif_op.u.execute.key_len = miss->key_len; - op->dpif_op.u.execute.packet = packet; - ofpbuf_use_stack(&op->mask, &op->maskbuf, sizeof op->maskbuf); -} - -/* Helper for handle_flow_miss_without_facet() and - * handle_flow_miss_with_facet(). */ -static void -handle_flow_miss_common(struct ofproto_dpif *ofproto, struct ofpbuf *packet, - const struct flow *flow, bool fail_open) -{ - if (fail_open) { - /* - * Extra-special case for fail-open mode. - * - * We are in fail-open mode and the packet matched the fail-open - * rule, but we are connected to a controller too. We should send - * the packet up to the controller in the hope that it will try to - * set up a flow and thereby allow us to exit fail-open. - * - * See the top-level comment in fail-open.c for more information. - */ - send_packet_in_miss(ofproto, packet, flow); - } -} - /* Figures out whether a flow that missed in 'ofproto', whose details are in * 'miss' masked by 'wc', is likely to be worth tracking in detail in userspace * and (usually) installing a datapath flow. The answer is usually "yes" (a @@ -3388,7 +3288,7 @@ handle_flow_miss_common(struct ofproto_dpif *ofproto, struct ofpbuf *packet, * flows we impose some heuristics to decide which flows are likely to be worth * tracking. */ static bool -flow_miss_should_make_facet(struct flow_miss *miss, struct flow_wildcards *wc) +flow_miss_should_make_facet(struct flow_miss *miss) { struct dpif_backer *backer = miss->ofproto->backer; uint32_t hash; @@ -3413,96 +3313,33 @@ flow_miss_should_make_facet(struct flow_miss *miss, struct flow_wildcards *wc) backer->governor = governor_create(); } - hash = flow_hash_in_wildcards(&miss->flow, wc, 0); + hash = flow_hash_in_wildcards(&miss->flow, &miss->xout.wc, 0); return governor_should_install_flow(backer->governor, hash, list_size(&miss->packets)); } -/* Handles 'miss' without creating a facet or subfacet or creating any datapath - * flow. 'miss->flow' must have matched 'rule' and been xlated into 'xout'. - * May add an "execute" operation to 'ops' and increment '*n_ops'. */ -static void -handle_flow_miss_without_facet(struct rule_dpif *rule, struct xlate_out *xout, - struct flow_miss *miss, - struct flow_miss_op *ops, size_t *n_ops) -{ - struct ofpbuf *packet; - - LIST_FOR_EACH (packet, list_node, &miss->packets) { - - COVERAGE_INC(facet_suppress); - - handle_flow_miss_common(miss->ofproto, packet, &miss->flow, - rule->up.cr.priority == FAIL_OPEN_PRIORITY); - - if (xout->slow) { - struct xlate_in xin; - - xlate_in_init(&xin, miss->ofproto, &miss->flow, rule, 0, packet); - xlate_actions_for_side_effects(&xin); - } - - if (xout->odp_actions.size) { - struct flow_miss_op *op = &ops[*n_ops]; - struct dpif_execute *execute = &op->dpif_op.u.execute; - - init_flow_miss_execute_op(miss, packet, op); - xlate_out_copy(&op->xout, xout); - execute->actions = op->xout.odp_actions.data; - execute->actions_len = op->xout.odp_actions.size; - op->xout_garbage = true; - - (*n_ops)++; - } - } -} - /* Handles 'miss', which matches 'facet'. May add any required datapath * operations to 'ops', incrementing '*n_ops' for each new op. * - * All of the packets in 'miss' are considered to have arrived at time 'now'. - * This is really important only for new facets: if we just called time_msec() - * here, then the new subfacet or its packets could look (occasionally) as - * though it was used some time after the facet was used. That can make a - * one-packet flow look like it has a nonzero duration, which looks odd in - * e.g. NetFlow statistics. - * - * If non-null, 'stats' will be folded into 'facet'. */ + * All of the packets in 'miss' are considered to have arrived at time + * 'miss->stats.used'. This is really important only for new facets: if we + * just called time_msec() here, then the new subfacet or its packets could + * look (occasionally) as though it was used some time after the facet was + * used. That can make a one-packet flow look like it has a nonzero duration, + * which looks odd in e.g. NetFlow statistics. */ static void handle_flow_miss_with_facet(struct flow_miss *miss, struct facet *facet, - long long int now, struct dpif_flow_stats *stats, struct flow_miss_op *ops, size_t *n_ops) { enum subfacet_path want_path; struct subfacet *subfacet; - struct ofpbuf *packet; - want_path = facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH; - - LIST_FOR_EACH (packet, list_node, &miss->packets) { - struct flow_miss_op *op = &ops[*n_ops]; - - handle_flow_miss_common(miss->ofproto, packet, &miss->flow, - facet->fail_open); - - if (want_path != SF_FAST_PATH) { - struct rule_dpif *rule; - struct xlate_in xin; + facet->packet_count += miss->stats.n_packets; + facet->prev_packet_count += miss->stats.n_packets; + facet->byte_count += miss->stats.n_bytes; + facet->prev_byte_count += miss->stats.n_bytes; - rule = rule_dpif_lookup(facet->ofproto, &facet->flow, NULL); - xlate_in_init(&xin, facet->ofproto, &miss->flow, rule, 0, packet); - xlate_actions_for_side_effects(&xin); - } - - if (facet->xout.odp_actions.size) { - struct dpif_execute *execute = &op->dpif_op.u.execute; - - init_flow_miss_execute_op(miss, packet, op); - execute->actions = facet->xout.odp_actions.data, - execute->actions_len = facet->xout.odp_actions.size; - (*n_ops)++; - } - } + want_path = facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH; /* Don't install the flow if it's the result of the "userspace" * action for an already installed facet. This can occur when a @@ -3511,21 +3348,11 @@ handle_flow_miss_with_facet(struct flow_miss *miss, struct facet *facet, * be rejected as overlapping by the datapath. */ if (miss->upcall_type == DPIF_UC_ACTION && !list_is_empty(&facet->subfacets)) { - if (stats) { - facet->used = MAX(facet->used, stats->used); - facet->packet_count += stats->n_packets; - facet->byte_count += stats->n_bytes; - facet->tcp_flags |= stats->tcp_flags; - } return; } - subfacet = subfacet_create(facet, miss, now); - if (stats) { - subfacet_update_stats(subfacet, stats); - } - - if (miss->upcall_type == DPIF_UC_MISS || subfacet->path != want_path) { + subfacet = subfacet_create(facet, miss); + if (subfacet->path != want_path) { struct flow_miss_op *op = &ops[(*n_ops)++]; struct dpif_flow_put *put = &op->dpif_op.u.flow_put; @@ -3564,55 +3391,25 @@ static void handle_flow_miss(struct flow_miss *miss, struct flow_miss_op *ops, size_t *n_ops) { - struct ofproto_dpif *ofproto = miss->ofproto; - struct dpif_flow_stats stats__; - struct dpif_flow_stats *stats = &stats__; - struct ofpbuf *packet; struct facet *facet; - long long int now; - now = time_msec(); - memset(stats, 0, sizeof *stats); - stats->used = now; - LIST_FOR_EACH (packet, list_node, &miss->packets) { - stats->tcp_flags |= packet_get_tcp_flags(packet, &miss->flow); - stats->n_bytes += packet->size; - stats->n_packets++; - } + miss->ofproto->n_missed += list_size(&miss->packets); - facet = facet_lookup_valid(ofproto, &miss->flow); + facet = facet_lookup_valid(miss->ofproto, &miss->flow); if (!facet) { - struct flow_wildcards wc; - struct rule_dpif *rule; - struct xlate_out xout; - struct xlate_in xin; - - flow_wildcards_init_catchall(&wc); - rule = rule_dpif_lookup(ofproto, &miss->flow, &wc); - rule_credit_stats(rule, stats); - - xlate_in_init(&xin, ofproto, &miss->flow, rule, stats->tcp_flags, - NULL); - xin.resubmit_stats = stats; - xin.may_learn = true; - xlate_actions(&xin, &xout); - flow_wildcards_or(&xout.wc, &xout.wc, &wc); - /* There does not exist a bijection between 'struct flow' and datapath * flow keys with fitness ODP_FIT_TO_LITTLE. This breaks a fundamental * assumption used throughout the facet and subfacet handling code. * Since we have to handle these misses in userspace anyway, we simply * skip facet creation, avoiding the problem altogether. */ if (miss->key_fitness == ODP_FIT_TOO_LITTLE - || !flow_miss_should_make_facet(miss, &xout.wc)) { - handle_flow_miss_without_facet(rule, &xout, miss, ops, n_ops); + || !flow_miss_should_make_facet(miss)) { return; } - facet = facet_create(miss, rule, &xout, stats); - stats = NULL; + facet = facet_create(miss); } - handle_flow_miss_with_facet(miss, facet, now, stats, ops, n_ops); + handle_flow_miss_with_facet(miss, facet, ops, n_ops); } static struct drop_key * @@ -3651,196 +3448,24 @@ drop_key_clear(struct dpif_backer *backer) } hmap_remove(&backer->drop_keys, &drop_key->hmap_node); - free(drop_key->key); - free(drop_key); - } -} - -/* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key' - * respectively), populates 'flow' with the result of odp_flow_key_to_flow(). - * Optionally, if nonnull, populates 'fitnessp' with the fitness of 'flow' as - * returned by odp_flow_key_to_flow(). Also, optionally populates 'ofproto' - * with the ofproto_dpif, and 'odp_in_port' with the datapath in_port, that - * 'packet' ingressed. - * - * If 'ofproto' is nonnull, requires 'flow''s in_port to exist. Otherwise sets - * 'flow''s in_port to OFPP_NONE. - * - * This function does post-processing on data returned from - * odp_flow_key_to_flow() to help make VLAN splinters transparent to the rest - * of the upcall processing logic. In particular, if the extracted in_port is - * a VLAN splinter port, it replaces flow->in_port by the "real" port, sets - * flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes - * a VLAN header onto 'packet' (if it is nonnull). - * - * Similarly, this function also includes some logic to help with tunnels. It - * may modify 'flow' as necessary to make the tunneling implementation - * transparent to the upcall processing logic. - * - * Returns 0 if successful, ENODEV if the parsed flow has no associated ofport, - * or some other positive errno if there are other problems. */ -static int -ofproto_receive(const struct dpif_backer *backer, struct ofpbuf *packet, - const struct nlattr *key, size_t key_len, - struct flow *flow, enum odp_key_fitness *fitnessp, - struct ofproto_dpif **ofproto, odp_port_t *odp_in_port) -{ - const struct ofport_dpif *port; - enum odp_key_fitness fitness; - int error = ENODEV; - - fitness = odp_flow_key_to_flow(key, key_len, flow); - if (fitness == ODP_FIT_ERROR) { - error = EINVAL; - goto exit; - } - - if (odp_in_port) { - *odp_in_port = flow->in_port.odp_port; + drop_key_destroy(drop_key); } - port = (tnl_port_should_receive(flow) - ? tnl_port_receive(flow) - : odp_port_to_ofport(backer, flow->in_port.odp_port)); - flow->in_port.ofp_port = port ? port->up.ofp_port : OFPP_NONE; - if (!port) { - goto exit; - } - - /* XXX: Since the tunnel module is not scoped per backer, for a tunnel port - * it's theoretically possible that we'll receive an ofport belonging to an - * entirely different datapath. In practice, this can't happen because no - * platforms has two separate datapaths which each support tunneling. */ - ovs_assert(ofproto_dpif_cast(port->up.ofproto)->backer == backer); - - if (vsp_adjust_flow(ofproto_dpif_cast(port->up.ofproto), flow)) { - if (packet) { - /* Make the packet resemble the flow, so that it gets sent to - * an OpenFlow controller properly, so that it looks correct - * for sFlow, and so that flow_extract() will get the correct - * vlan_tci if it is called on 'packet'. - * - * The allocated space inside 'packet' probably also contains - * 'key', that is, both 'packet' and 'key' are probably part of - * a struct dpif_upcall (see the large comment on that - * structure definition), so pushing data on 'packet' is in - * general not a good idea since it could overwrite 'key' or - * free it as a side effect. However, it's OK in this special - * case because we know that 'packet' is inside a Netlink - * attribute: pushing 4 bytes will just overwrite the 4-byte - * "struct nlattr", which is fine since we don't need that - * header anymore. */ - eth_push_vlan(packet, flow->vlan_tci); - } - /* We can't reproduce 'key' from 'flow'. */ - fitness = fitness == ODP_FIT_PERFECT ? ODP_FIT_TOO_MUCH : fitness; - } - error = 0; - - if (ofproto) { - *ofproto = ofproto_dpif_cast(port->up.ofproto); - } - -exit: - if (fitnessp) { - *fitnessp = fitness; - } - return error; + udpif_drop_key_clear(backer->udpif); } static void -handle_miss_upcalls(struct dpif_backer *backer, struct dpif_upcall *upcalls, - size_t n_upcalls) +handle_flow_misses(struct dpif_backer *backer, struct flow_miss_batch *fmb) { - struct dpif_upcall *upcall; + struct flow_miss_op flow_miss_ops[FLOW_MISS_MAX_BATCH]; + struct dpif_op *dpif_ops[FLOW_MISS_MAX_BATCH]; struct flow_miss *miss; - struct flow_miss misses[FLOW_MISS_MAX_BATCH]; - struct flow_miss_op flow_miss_ops[FLOW_MISS_MAX_BATCH * 2]; - struct dpif_op *dpif_ops[FLOW_MISS_MAX_BATCH * 2]; - struct hmap todo; - int n_misses; - size_t n_ops; - size_t i; - - if (!n_upcalls) { - return; - } - - /* Construct the to-do list. - * - * This just amounts to extracting the flow from each packet and sticking - * the packets that have the same flow in the same "flow_miss" structure so - * that we can process them together. */ - hmap_init(&todo); - n_misses = 0; - for (upcall = upcalls; upcall < &upcalls[n_upcalls]; upcall++) { - struct flow_miss *miss = &misses[n_misses]; - struct flow_miss *existing_miss; - struct ofproto_dpif *ofproto; - odp_port_t odp_in_port; - struct flow flow; - uint32_t hash; - int error; - - error = ofproto_receive(backer, upcall->packet, upcall->key, - upcall->key_len, &flow, &miss->key_fitness, - &ofproto, &odp_in_port); - if (error == ENODEV) { - struct drop_key *drop_key; - - /* Received packet on datapath port for which we couldn't - * associate an ofproto. This can happen if a port is removed - * while traffic is being received. Print a rate-limited message - * in case it happens frequently. Install a drop flow so - * that future packets of the flow are inexpensively dropped - * in the kernel. */ - VLOG_INFO_RL(&rl, "received packet on unassociated datapath port " - "%"PRIu32, odp_in_port); - - drop_key = drop_key_lookup(backer, upcall->key, upcall->key_len); - if (!drop_key) { - drop_key = xmalloc(sizeof *drop_key); - drop_key->key = xmemdup(upcall->key, upcall->key_len); - drop_key->key_len = upcall->key_len; - - hmap_insert(&backer->drop_keys, &drop_key->hmap_node, - hash_bytes(drop_key->key, drop_key->key_len, 0)); - dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY, - drop_key->key, drop_key->key_len, - NULL, 0, NULL, 0, NULL); - } - continue; - } - if (error) { - continue; - } - - ofproto->n_missed++; - flow_extract(upcall->packet, flow.skb_priority, flow.skb_mark, - &flow.tunnel, &flow.in_port, &miss->flow); - - /* Add other packets to a to-do list. */ - hash = flow_hash(&miss->flow, 0); - existing_miss = flow_miss_find(&todo, ofproto, &miss->flow, hash); - if (!existing_miss) { - hmap_insert(&todo, &miss->hmap_node, hash); - miss->ofproto = ofproto; - miss->key = upcall->key; - miss->key_len = upcall->key_len; - miss->upcall_type = upcall->type; - list_init(&miss->packets); - - n_misses++; - } else { - miss = existing_miss; - } - list_push_back(&miss->packets, &upcall->packet->list_node); - } + size_t n_ops, i; /* Process each element in the to-do list, constructing the set of * operations to batch. */ n_ops = 0; - HMAP_FOR_EACH (miss, hmap_node, &todo) { + HMAP_FOR_EACH (miss, hmap_node, &fmb->misses) { handle_flow_miss(miss, flow_miss_ops, &n_ops); } ovs_assert(n_ops <= ARRAY_SIZE(flow_miss_ops)); @@ -3873,66 +3498,6 @@ handle_miss_upcalls(struct dpif_backer *backer, struct dpif_upcall *upcalls, subfacet->path = SF_NOT_INSTALLED; } - - /* Free memory. */ - if (flow_miss_ops[i].xout_garbage) { - xlate_out_uninit(&flow_miss_ops[i].xout); - } - } - hmap_destroy(&todo); -} - -static enum { SFLOW_UPCALL, MISS_UPCALL, BAD_UPCALL, FLOW_SAMPLE_UPCALL, - IPFIX_UPCALL } -classify_upcall(const struct dpif_upcall *upcall) -{ - size_t userdata_len; - union user_action_cookie cookie; - - /* First look at the upcall type. */ - switch (upcall->type) { - case DPIF_UC_ACTION: - break; - - case DPIF_UC_MISS: - return MISS_UPCALL; - - case DPIF_N_UC_TYPES: - default: - VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, upcall->type); - return BAD_UPCALL; - } - - /* "action" upcalls need a closer look. */ - if (!upcall->userdata) { - VLOG_WARN_RL(&rl, "action upcall missing cookie"); - return BAD_UPCALL; - } - userdata_len = nl_attr_get_size(upcall->userdata); - if (userdata_len < sizeof cookie.type - || userdata_len > sizeof cookie) { - VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %zu", - userdata_len); - return BAD_UPCALL; - } - memset(&cookie, 0, sizeof cookie); - memcpy(&cookie, nl_attr_get(upcall->userdata), userdata_len); - if (userdata_len == sizeof cookie.sflow - && cookie.type == USER_ACTION_COOKIE_SFLOW) { - return SFLOW_UPCALL; - } else if (userdata_len == sizeof cookie.slow_path - && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) { - return MISS_UPCALL; - } else if (userdata_len == sizeof cookie.flow_sample - && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) { - return FLOW_SAMPLE_UPCALL; - } else if (userdata_len == sizeof cookie.ipfix - && cookie.type == USER_ACTION_COOKIE_IPFIX) { - return IPFIX_UPCALL; - } else { - VLOG_WARN_RL(&rl, "invalid user cookie of type %"PRIu16 - " and size %zu", cookie.type, userdata_len); - return BAD_UPCALL; } } @@ -3945,8 +3510,8 @@ handle_sflow_upcall(struct dpif_backer *backer, struct flow flow; odp_port_t odp_in_port; - if (ofproto_receive(backer, upcall->packet, upcall->key, upcall->key_len, - &flow, NULL, &ofproto, &odp_in_port) + if (xlate_receive(backer, upcall->packet, upcall->key, upcall->key_len, + &flow, NULL, &ofproto, &odp_in_port) || !ofproto->sflow) { return; } @@ -3965,8 +3530,8 @@ handle_flow_sample_upcall(struct dpif_backer *backer, union user_action_cookie cookie; struct flow flow; - if (ofproto_receive(backer, upcall->packet, upcall->key, upcall->key_len, - &flow, NULL, &ofproto, NULL) + if (xlate_receive(backer, upcall->packet, upcall->key, upcall->key_len, + &flow, NULL, &ofproto, NULL) || !ofproto->ipfix) { return; } @@ -3990,8 +3555,8 @@ handle_ipfix_upcall(struct dpif_backer *backer, struct ofproto_dpif *ofproto; struct flow flow; - if (ofproto_receive(backer, upcall->packet, upcall->key, upcall->key_len, - &flow, NULL, &ofproto, NULL) + if (xlate_receive(backer, upcall->packet, upcall->key, upcall->key_len, + &flow, NULL, &ofproto, NULL) || !ofproto->ipfix) { return; } @@ -4001,66 +3566,64 @@ handle_ipfix_upcall(struct dpif_backer *backer, dpif_ipfix_bridge_sample(ofproto->ipfix, upcall->packet, &flow); } -static int -handle_upcalls(struct dpif_backer *backer, unsigned int max_batch) +static void +handle_upcalls(struct dpif_backer *backer) { - struct dpif_upcall misses[FLOW_MISS_MAX_BATCH]; - struct ofpbuf miss_bufs[FLOW_MISS_MAX_BATCH]; - uint64_t miss_buf_stubs[FLOW_MISS_MAX_BATCH][4096 / 8]; + struct flow_miss_batch *fmb; int n_processed; - int n_misses; - int i; - - ovs_assert(max_batch <= FLOW_MISS_MAX_BATCH); - n_misses = 0; - for (n_processed = 0; n_processed < max_batch; n_processed++) { - struct dpif_upcall *upcall = &misses[n_misses]; - struct ofpbuf *buf = &miss_bufs[n_misses]; - int error; + for (n_processed = 0; n_processed < FLOW_MISS_MAX_BATCH; n_processed++) { + struct upcall *upcall = upcall_next(backer->udpif); - ofpbuf_use_stub(buf, miss_buf_stubs[n_misses], - sizeof miss_buf_stubs[n_misses]); - error = dpif_recv(backer->dpif, upcall, buf); - if (error) { - ofpbuf_uninit(buf); + if (!upcall) { break; } - switch (classify_upcall(upcall)) { - case MISS_UPCALL: - /* Handle it later. */ - n_misses++; - break; - + switch (upcall->type) { case SFLOW_UPCALL: - handle_sflow_upcall(backer, upcall); - ofpbuf_uninit(buf); + handle_sflow_upcall(backer, &upcall->dpif_upcall); break; case FLOW_SAMPLE_UPCALL: - handle_flow_sample_upcall(backer, upcall); - ofpbuf_uninit(buf); + handle_flow_sample_upcall(backer, &upcall->dpif_upcall); break; case IPFIX_UPCALL: - handle_ipfix_upcall(backer, upcall); - ofpbuf_uninit(buf); + handle_ipfix_upcall(backer, &upcall->dpif_upcall); break; case BAD_UPCALL: - ofpbuf_uninit(buf); break; + + case MISS_UPCALL: + NOT_REACHED(); } + + upcall_destroy(upcall); } - /* Handle deferred MISS_UPCALL processing. */ - handle_miss_upcalls(backer, misses, n_misses); - for (i = 0; i < n_misses; i++) { - ofpbuf_uninit(&miss_bufs[i]); + for (n_processed = 0; n_processed < FLOW_MISS_MAX_BATCH; n_processed++) { + struct drop_key *drop_key = drop_key_next(backer->udpif); + if (!drop_key) { + break; + } + + if (!drop_key_lookup(backer, drop_key->key, drop_key->key_len)) { + hmap_insert(&backer->drop_keys, &drop_key->hmap_node, + hash_bytes(drop_key->key, drop_key->key_len, 0)); + dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY, + drop_key->key, drop_key->key_len, + NULL, 0, NULL, 0, NULL); + } else { + drop_key_destroy(drop_key); + } } - return n_processed; + fmb = flow_miss_batch_next(backer->udpif); + if (fmb) { + handle_flow_misses(backer, fmb); + flow_miss_batch_destroy(fmb); + } } /* Flow expiration. */ @@ -4121,10 +3684,12 @@ expire(struct dpif_backer *backer) /* Expire OpenFlow flows whose idle_timeout or hard_timeout * has passed. */ + ovs_mutex_lock(&ofproto->up.expirable_mutex); LIST_FOR_EACH_SAFE (rule, next_rule, expirable, &ofproto->up.expirable) { rule_expire(rule_dpif_cast(rule)); } + ovs_mutex_unlock(&ofproto->up.expirable_mutex); /* All outstanding data in existing flows has been accounted, so it's a * good time to do bond rebalancing. */ @@ -4133,7 +3698,7 @@ expire(struct dpif_backer *backer) HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) { if (bundle->bond) { - bond_rebalance(bundle->bond, &backer->revalidate_set); + bond_rebalance(bundle->bond); } } } @@ -4386,6 +3951,7 @@ expire_subfacets(struct dpif_backer *backer, int dp_max_idle) static void rule_expire(struct rule_dpif *rule) { + uint16_t idle_timeout, hard_timeout; long long int now; uint8_t reason; @@ -4394,22 +3960,27 @@ rule_expire(struct rule_dpif *rule) return; } + ovs_mutex_lock(&rule->up.timeout_mutex); + hard_timeout = rule->up.hard_timeout; + idle_timeout = rule->up.idle_timeout; + ovs_mutex_unlock(&rule->up.timeout_mutex); + /* Has 'rule' expired? */ now = time_msec(); - if (rule->up.hard_timeout - && now > rule->up.modified + rule->up.hard_timeout * 1000) { + if (hard_timeout && now > rule->up.modified + hard_timeout * 1000) { reason = OFPRR_HARD_TIMEOUT; - } else if (rule->up.idle_timeout - && now > rule->up.used + rule->up.idle_timeout * 1000) { + } else if (idle_timeout && now > rule->up.used + idle_timeout * 1000) { reason = OFPRR_IDLE_TIMEOUT; } else { return; } - COVERAGE_INC(ofproto_dpif_expired); + if (!ovs_rwlock_trywrlock(&rule->up.evict)) { + COVERAGE_INC(ofproto_dpif_expired); - /* Get rid of the rule. */ - ofproto_rule_expire(&rule->up, reason); + /* Get rid of the rule. */ + ofproto_rule_expire(&rule->up, reason); + } } /* Facets. */ @@ -4426,8 +3997,7 @@ rule_expire(struct rule_dpif *rule) * The facet will initially have no subfacets. The caller should create (at * least) one subfacet with subfacet_create(). */ static struct facet * -facet_create(const struct flow_miss *miss, struct rule_dpif *rule, - struct xlate_out *xout, struct dpif_flow_stats *stats) +facet_create(const struct flow_miss *miss) { struct ofproto_dpif *ofproto = miss->ofproto; struct facet *facet; @@ -4435,10 +4005,7 @@ facet_create(const struct flow_miss *miss, struct rule_dpif *rule, facet = xzalloc(sizeof *facet); facet->ofproto = miss->ofproto; - facet->packet_count = facet->prev_packet_count = stats->n_packets; - facet->byte_count = facet->prev_byte_count = stats->n_bytes; - facet->tcp_flags = stats->tcp_flags; - facet->used = stats->used; + facet->used = miss->stats.used; facet->flow = miss->flow; facet->learn_rl = time_msec() + 500; @@ -4446,15 +4013,15 @@ facet_create(const struct flow_miss *miss, struct rule_dpif *rule, netflow_flow_init(&facet->nf_flow); netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used); - xlate_out_copy(&facet->xout, xout); + xlate_out_copy(&facet->xout, &miss->xout); match_init(&match, &facet->flow, &facet->xout.wc); cls_rule_init(&facet->cr, &match, OFP_DEFAULT_PRIORITY); + ovs_rwlock_wrlock(&ofproto->facets.rwlock); classifier_insert(&ofproto->facets, &facet->cr); + ovs_rwlock_unlock(&ofproto->facets.rwlock); facet->nf_flow.output_iface = facet->xout.nf_output_iface; - facet->fail_open = rule->up.cr.priority == FAIL_OPEN_PRIORITY; - return facet; } @@ -4518,7 +4085,9 @@ facet_remove(struct facet *facet) &facet->subfacets) { subfacet_destroy__(subfacet); } + ovs_rwlock_wrlock(&facet->ofproto->facets.rwlock); classifier_remove(&facet->ofproto->facets, &facet->cr); + ovs_rwlock_unlock(&facet->ofproto->facets.rwlock); cls_rule_destroy(&facet->cr); facet_free(facet); } @@ -4602,16 +4171,19 @@ facet_is_controller_flow(struct facet *facet) { if (facet) { struct ofproto_dpif *ofproto = facet->ofproto; - const struct rule_dpif *rule = rule_dpif_lookup(ofproto, &facet->flow, - NULL); - const struct ofpact *ofpacts = rule->up.ofpacts; - size_t ofpacts_len = rule->up.ofpacts_len; - - if (ofpacts_len > 0 && - ofpacts->type == OFPACT_CONTROLLER && - ofpact_next(ofpacts) >= ofpact_end(ofpacts, ofpacts_len)) { - return true; - } + const struct ofpact *ofpacts; + struct rule_dpif *rule; + size_t ofpacts_len; + bool is_controller; + + rule_dpif_lookup(ofproto, &facet->flow, NULL, &rule); + ofpacts_len = rule->up.ofpacts_len; + ofpacts = rule->up.ofpacts; + is_controller = ofpacts_len > 0 + && ofpacts->type == OFPACT_CONTROLLER + && ofpact_next(ofpacts) >= ofpact_end(ofpacts, ofpacts_len); + rule_release(rule); + return is_controller; } return false; } @@ -4662,7 +4234,11 @@ facet_flush_stats(struct facet *facet) static struct facet * facet_find(struct ofproto_dpif *ofproto, const struct flow *flow) { - struct cls_rule *cr = classifier_lookup(&ofproto->facets, flow, NULL); + struct cls_rule *cr; + + ovs_rwlock_rdlock(&ofproto->facets.rwlock); + cr = classifier_lookup(&ofproto->facets, flow, NULL); + ovs_rwlock_unlock(&ofproto->facets.rwlock); return cr ? CONTAINER_OF(cr, struct facet, cr) : NULL; } @@ -4677,9 +4253,7 @@ facet_lookup_valid(struct ofproto_dpif *ofproto, const struct flow *flow) facet = facet_find(ofproto, flow); if (facet - && (ofproto->backer->need_revalidate - || tag_set_intersects(&ofproto->backer->revalidate_set, - facet->xout.tags)) + && ofproto->backer->need_revalidate && !facet_revalidate(facet)) { return NULL; } @@ -4696,17 +4270,16 @@ facet_check_consistency(struct facet *facet) struct xlate_in xin; struct rule_dpif *rule; - bool ok, fail_open; + bool ok; /* Check the datapath actions for consistency. */ - rule = rule_dpif_lookup(facet->ofproto, &facet->flow, NULL); + rule_dpif_lookup(facet->ofproto, &facet->flow, NULL, &rule); xlate_in_init(&xin, facet->ofproto, &facet->flow, rule, 0, NULL); xlate_actions(&xin, &xout); + rule_release(rule); - fail_open = rule->up.cr.priority == FAIL_OPEN_PRIORITY; ok = ofpbuf_equal(&facet->xout.odp_actions, &xout.odp_actions) - && facet->xout.slow == xout.slow - && facet->fail_open == fail_open; + && facet->xout.slow == xout.slow; if (!ok && !VLOG_DROP_WARN(&rl)) { struct ds s = DS_EMPTY_INITIALIZER; @@ -4727,10 +4300,6 @@ facet_check_consistency(struct facet *facet) ds_put_format(&s, " slow path incorrect. should be %d", xout.slow); } - if (facet->fail_open != fail_open) { - ds_put_format(&s, " fail open incorrect. should be %s", - fail_open ? "true" : "false"); - } ds_destroy(&s); } xlate_out_uninit(&xout); @@ -4747,7 +4316,7 @@ facet_check_consistency(struct facet *facet) * where it is and recompiles its actions anyway. * * - If any of 'facet''s subfacets correspond to a new flow according to - * ofproto_receive(), 'facet' is removed. + * xlate_receive(), 'facet' is removed. * * Returns true if 'facet' is still valid. False if 'facet' was removed. */ static bool @@ -4770,9 +4339,9 @@ facet_revalidate(struct facet *facet) struct flow recv_flow; int error; - error = ofproto_receive(ofproto->backer, NULL, subfacet->key, - subfacet->key_len, &recv_flow, NULL, - &recv_ofproto, NULL); + error = xlate_receive(ofproto->backer, NULL, subfacet->key, + subfacet->key_len, &recv_flow, NULL, + &recv_ofproto, NULL); if (error || recv_ofproto != ofproto || facet != facet_find(ofproto, &recv_flow)) { @@ -4782,7 +4351,7 @@ facet_revalidate(struct facet *facet) } flow_wildcards_init_catchall(&wc); - new_rule = rule_dpif_lookup(ofproto, &facet->flow, &wc); + rule_dpif_lookup(ofproto, &facet->flow, &wc, &new_rule); /* Calculate new datapath actions. * @@ -4805,6 +4374,7 @@ facet_revalidate(struct facet *facet) || memcmp(&facet->xout.wc, &xout.wc, sizeof xout.wc)) { facet_remove(facet); xlate_out_uninit(&xout); + rule_release(new_rule); return false; } @@ -4826,7 +4396,6 @@ facet_revalidate(struct facet *facet) } /* Update 'facet' now that we've taken care of all the old state. */ - facet->xout.tags = xout.tags; facet->xout.slow = xout.slow; facet->xout.has_learn = xout.has_learn; facet->xout.has_normal = xout.has_normal; @@ -4835,9 +4404,9 @@ facet_revalidate(struct facet *facet) facet->xout.mirrors = xout.mirrors; facet->nf_flow.output_iface = facet->xout.nf_output_iface; facet->used = MAX(facet->used, new_rule->up.created); - facet->fail_open = new_rule->up.cr.priority == FAIL_OPEN_PRIORITY; xlate_out_uninit(&xout); + rule_release(new_rule); return true; } @@ -4851,6 +4420,28 @@ facet_reset_counters(struct facet *facet) facet->accounted_bytes = 0; } +static void +flow_push_stats(struct ofproto_dpif *ofproto, struct flow *flow, + struct dpif_flow_stats *stats, bool may_learn) +{ + struct ofport_dpif *in_port; + struct rule_dpif *rule; + struct xlate_in xin; + + in_port = get_ofp_port(ofproto, flow->in_port.ofp_port); + if (in_port && in_port->is_tunnel) { + netdev_vport_inc_rx(in_port->up.netdev, stats); + } + + rule_dpif_lookup(ofproto, flow, NULL, &rule); + rule_credit_stats(rule, stats); + xlate_in_init(&xin, ofproto, flow, rule, stats->tcp_flags, NULL); + xin.resubmit_stats = stats; + xin.may_learn = may_learn; + xlate_actions_for_side_effects(&xin); + rule_release(rule); +} + static void facet_push_stats(struct facet *facet, bool may_learn) { @@ -4866,33 +4457,16 @@ facet_push_stats(struct facet *facet, bool may_learn) stats.tcp_flags = facet->tcp_flags; if (may_learn || stats.n_packets || facet->used > facet->prev_used) { - struct ofproto_dpif *ofproto = facet->ofproto; - struct ofport_dpif *in_port; - struct rule_dpif *rule; - struct xlate_in xin; - facet->prev_packet_count = facet->packet_count; facet->prev_byte_count = facet->byte_count; facet->prev_used = facet->used; - in_port = get_ofp_port(ofproto, facet->flow.in_port.ofp_port); - if (in_port && in_port->is_tunnel) { - netdev_vport_inc_rx(in_port->up.netdev, &stats); - } - - rule = rule_dpif_lookup(ofproto, &facet->flow, NULL); - rule_credit_stats(rule, &stats); - netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, + netflow_flow_update_time(facet->ofproto->netflow, &facet->nf_flow, facet->used); netflow_flow_update_flags(&facet->nf_flow, facet->tcp_flags); - mirror_update_stats(ofproto->mbridge, facet->xout.mirrors, + mirror_update_stats(facet->ofproto->mbridge, facet->xout.mirrors, stats.n_packets, stats.n_bytes); - - xlate_in_init(&xin, ofproto, &facet->flow, rule, stats.tcp_flags, - NULL); - xin.resubmit_stats = &stats; - xin.may_learn = may_learn; - xlate_actions_for_side_effects(&xin); + flow_push_stats(facet->ofproto, &facet->flow, &stats, may_learn); } } @@ -4910,6 +4484,7 @@ push_all_stats__(bool run_fast) struct cls_cursor cursor; struct facet *facet; + ovs_rwlock_rdlock(&ofproto->facets.rwlock); cls_cursor_init(&cursor, &ofproto->facets, NULL); CLS_CURSOR_FOR_EACH (facet, cr, &cursor) { facet_push_stats(facet, false); @@ -4917,6 +4492,7 @@ push_all_stats__(bool run_fast) run_fast_rl(); } } + ovs_rwlock_unlock(&ofproto->facets.rwlock); } rl = time_msec() + 100; @@ -4931,9 +4507,11 @@ push_all_stats(void) void rule_credit_stats(struct rule_dpif *rule, const struct dpif_flow_stats *stats) { + ovs_mutex_lock(&rule->stats_mutex); rule->packet_count += stats->n_packets; rule->byte_count += stats->n_bytes; ofproto_rule_update_used(&rule->up, stats->used); + ovs_mutex_unlock(&rule->stats_mutex); } /* Subfacets. */ @@ -4960,8 +4538,7 @@ subfacet_find(struct dpif_backer *backer, const struct nlattr *key, * existing subfacet if there is one, otherwise creates and returns a * new subfacet. */ static struct subfacet * -subfacet_create(struct facet *facet, struct flow_miss *miss, - long long int now) +subfacet_create(struct facet *facet, struct flow_miss *miss) { struct dpif_backer *backer = miss->ofproto->backer; enum odp_key_fitness key_fitness = miss->key_fitness; @@ -4995,8 +4572,8 @@ subfacet_create(struct facet *facet, struct flow_miss *miss, subfacet->key_fitness = key_fitness; subfacet->key = xmemdup(key, key_len); subfacet->key_len = key_len; - subfacet->used = now; - subfacet->created = now; + subfacet->used = miss->stats.used; + subfacet->created = subfacet->used; subfacet->dp_packet_count = 0; subfacet->dp_byte_count = 0; subfacet->path = SF_NOT_INSTALLED; @@ -5189,31 +4766,39 @@ subfacet_update_stats(struct subfacet *subfacet, /* Lookup 'flow' in 'ofproto''s classifier. If 'wc' is non-null, sets * the fields that were relevant as part of the lookup. */ -static struct rule_dpif * +void rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow, - struct flow_wildcards *wc) + struct flow_wildcards *wc, struct rule_dpif **rule) { - struct rule_dpif *rule; + struct ofport_dpif *port; - rule = rule_dpif_lookup_in_table(ofproto, flow, wc, 0); - if (rule) { - return rule; + if (rule_dpif_lookup_in_table(ofproto, flow, wc, 0, rule)) { + return; + } + port = get_ofp_port(ofproto, flow->in_port.ofp_port); + if (!port) { + VLOG_WARN_RL(&rl, "packet-in on unknown OpenFlow port %"PRIu16, + flow->in_port.ofp_port); } - return rule_dpif_miss_rule(ofproto, flow); + *rule = choose_miss_rule(port ? port->up.pp.config : 0, ofproto->miss_rule, + ofproto->no_packet_in_rule); + ovs_rwlock_rdlock(&(*rule)->up.evict); } -struct rule_dpif * +bool rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, const struct flow *flow, struct flow_wildcards *wc, - uint8_t table_id) + uint8_t table_id, struct rule_dpif **rule) + OVS_TRY_RDLOCK(true, (*rule)->up.evict) { struct cls_rule *cls_rule; struct classifier *cls; bool frag; + *rule = NULL; if (table_id >= N_TABLES) { - return NULL; + return false; } if (wc) { @@ -5222,6 +4807,7 @@ rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, } cls = &ofproto->up.tables[table_id].cls; + ovs_rwlock_rdlock(&cls->rwlock); frag = (flow->nw_frag & FLOW_NW_FRAG_ANY) != 0; if (frag && ofproto->up.frag_handling == OFPC_FRAG_NORMAL) { /* We must pretend that transport ports are unavailable. */ @@ -5237,25 +4823,35 @@ rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, } else { cls_rule = classifier_lookup(cls, flow, wc); } - return rule_dpif_cast(rule_from_cls_rule(cls_rule)); + + *rule = rule_dpif_cast(rule_from_cls_rule(cls_rule)); + if (*rule && ovs_rwlock_tryrdlock(&(*rule)->up.evict)) { + /* The rule is in the process of being removed. Best we can do is + * pretend it isn't there. */ + *rule = NULL; + } + ovs_rwlock_unlock(&cls->rwlock); + + return *rule != NULL; } +/* Given a port configuration (specified as zero if there's no port), chooses + * which of 'miss_rule' and 'no_packet_in_rule' should be used in case of a + * flow table miss. */ struct rule_dpif * -rule_dpif_miss_rule(struct ofproto_dpif *ofproto, const struct flow *flow) +choose_miss_rule(enum ofputil_port_config config, struct rule_dpif *miss_rule, + struct rule_dpif *no_packet_in_rule) { - struct ofport_dpif *port; - - port = get_ofp_port(ofproto, flow->in_port.ofp_port); - if (!port) { - VLOG_WARN_RL(&rl, "packet-in on unknown OpenFlow port %"PRIu16, - flow->in_port.ofp_port); - return ofproto->miss_rule; - } + return config & OFPUTIL_PC_NO_PACKET_IN ? no_packet_in_rule : miss_rule; +} - if (port->up.pp.config & OFPUTIL_PC_NO_PACKET_IN) { - return ofproto->no_packet_in_rule; +void +rule_release(struct rule_dpif *rule) + OVS_NO_THREAD_SAFETY_ANALYSIS +{ + if (rule) { + ovs_rwlock_unlock(&rule->up.evict); } - return ofproto->miss_rule; } static void @@ -5263,7 +4859,7 @@ complete_operation(struct rule_dpif *rule) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto); - rule_invalidate(rule); + ofproto->backer->need_revalidate = REV_FLOW_TABLE; if (clogged) { struct dpif_completion *c = xmalloc(sizeof *c); c->op = rule->up.pending; @@ -5291,35 +4887,21 @@ static enum ofperr rule_construct(struct rule *rule_) { struct rule_dpif *rule = rule_dpif_cast(rule_); - struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto); - struct rule_dpif *victim; - uint8_t table_id; - + ovs_mutex_init(&rule->stats_mutex); + ovs_mutex_lock(&rule->stats_mutex); rule->packet_count = 0; rule->byte_count = 0; - - table_id = rule->up.table_id; - victim = rule_dpif_cast(ofoperation_get_victim(rule->up.pending)); - if (victim) { - rule->tag = victim->tag; - } else if (table_id == 0) { - rule->tag = 0; - } else { - struct flow flow; - - miniflow_expand(&rule->up.cr.match.flow, &flow); - rule->tag = rule_calculate_tag(&flow, &rule->up.cr.match.mask, - ofproto->tables[table_id].basis); - } - + ovs_mutex_unlock(&rule->stats_mutex); complete_operation(rule); return 0; } static void -rule_destruct(struct rule *rule) +rule_destruct(struct rule *rule_) { - complete_operation(rule_dpif_cast(rule)); + struct rule_dpif *rule = rule_dpif_cast(rule_); + complete_operation(rule); + ovs_mutex_destroy(&rule->stats_mutex); } static void @@ -5335,8 +4917,10 @@ rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes) /* Start from historical data for 'rule' itself that are no longer tracked * in facets. This counts, for example, facets that have expired. */ + ovs_mutex_lock(&rule->stats_mutex); *packets = rule->packet_count; *bytes = rule->byte_count; + ovs_mutex_unlock(&rule->stats_mutex); } static void @@ -5462,158 +5046,16 @@ compose_slow_path(const struct ofproto_dpif *ofproto, const struct flow *flow, ODPP_NONE); odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path, &buf); } else { - put_userspace_action(ofproto, &buf, flow, &cookie, - sizeof cookie.slow_path); + odp_port_t odp_port; + uint32_t pid; + + odp_port = ofp_port_to_odp_port(ofproto, flow->in_port.ofp_port); + pid = dpif_port_get_pid(ofproto->backer->dpif, odp_port); + odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path, &buf); } *actionsp = buf.data; *actions_lenp = buf.size; } - -size_t -put_userspace_action(const struct ofproto_dpif *ofproto, - struct ofpbuf *odp_actions, - const struct flow *flow, - const union user_action_cookie *cookie, - const size_t cookie_size) -{ - uint32_t pid; - - pid = dpif_port_get_pid(ofproto->backer->dpif, - ofp_port_to_odp_port(ofproto, - flow->in_port.ofp_port)); - - return odp_put_userspace_action(pid, cookie, cookie_size, odp_actions); -} - -tag_type -calculate_flow_tag(struct ofproto_dpif *ofproto, const struct flow *flow, - uint8_t table_id, struct rule_dpif *rule) -{ - if (table_id > 0 && table_id < N_TABLES) { - struct table_dpif *table = &ofproto->tables[table_id]; - if (table->other_table) { - return (rule && rule->tag - ? rule->tag - : rule_calculate_tag(flow, &table->other_table->mask, - table->basis)); - } - } - - return 0; -} - -/* Optimized flow revalidation. - * - * It's a difficult problem, in general, to tell which facets need to have - * their actions recalculated whenever the OpenFlow flow table changes. We - * don't try to solve that general problem: for most kinds of OpenFlow flow - * table changes, we recalculate the actions for every facet. This is - * relatively expensive, but it's good enough if the OpenFlow flow table - * doesn't change very often. - * - * However, we can expect one particular kind of OpenFlow flow table change to - * happen frequently: changes caused by MAC learning. To avoid wasting a lot - * of CPU on revalidating every facet whenever MAC learning modifies the flow - * table, we add a special case that applies to flow tables in which every rule - * has the same form (that is, the same wildcards), except that the table is - * also allowed to have a single "catch-all" flow that matches all packets. We - * optimize this case by tagging all of the facets that resubmit into the table - * and invalidating the same tag whenever a flow changes in that table. The - * end result is that we revalidate just the facets that need it (and sometimes - * a few more, but not all of the facets or even all of the facets that - * resubmit to the table modified by MAC learning). */ - -/* Calculates the tag to use for 'flow' and mask 'mask' when it is inserted - * into an OpenFlow table with the given 'basis'. */ -tag_type -rule_calculate_tag(const struct flow *flow, const struct minimask *mask, - uint32_t secret) -{ - if (minimask_is_catchall(mask)) { - return 0; - } else { - uint32_t hash = flow_hash_in_minimask(flow, mask, secret); - return tag_create_deterministic(hash); - } -} - -/* Following a change to OpenFlow table 'table_id' in 'ofproto', update the - * taggability of that table. - * - * This function must be called after *each* change to a flow table. If you - * skip calling it on some changes then the pointer comparisons at the end can - * be invalid if you get unlucky. For example, if a flow removal causes a - * cls_table to be destroyed and then a flow insertion causes a cls_table with - * different wildcards to be created with the same address, then this function - * will incorrectly skip revalidation. */ -static void -table_update_taggable(struct ofproto_dpif *ofproto, uint8_t table_id) -{ - struct table_dpif *table = &ofproto->tables[table_id]; - const struct oftable *oftable = &ofproto->up.tables[table_id]; - struct cls_table *catchall, *other; - struct cls_table *t; - - catchall = other = NULL; - - switch (hmap_count(&oftable->cls.tables)) { - case 0: - /* We could tag this OpenFlow table but it would make the logic a - * little harder and it's a corner case that doesn't seem worth it - * yet. */ - break; - - case 1: - case 2: - HMAP_FOR_EACH (t, hmap_node, &oftable->cls.tables) { - if (cls_table_is_catchall(t)) { - catchall = t; - } else if (!other) { - other = t; - } else { - /* Indicate that we can't tag this by setting both tables to - * NULL. (We know that 'catchall' is already NULL.) */ - other = NULL; - } - } - break; - - default: - /* Can't tag this table. */ - break; - } - - if (table->catchall_table != catchall || table->other_table != other) { - table->catchall_table = catchall; - table->other_table = other; - ofproto->backer->need_revalidate = REV_FLOW_TABLE; - } -} - -/* Given 'rule' that has changed in some way (either it is a rule being - * inserted, a rule being deleted, or a rule whose actions are being - * modified), marks facets for revalidation to ensure that packets will be - * forwarded correctly according to the new state of the flow table. - * - * This function must be called after *each* change to a flow table. See - * the comment on table_update_taggable() for more information. */ -static void -rule_invalidate(const struct rule_dpif *rule) -{ - struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto); - - table_update_taggable(ofproto, rule->up.table_id); - - if (!ofproto->backer->need_revalidate) { - struct table_dpif *table = &ofproto->tables[rule->up.table_id]; - - if (table->other_table && rule->tag) { - tag_set_add(&ofproto->backer->revalidate_set, rule->tag); - } else { - ofproto->backer->need_revalidate = REV_FLOW_TABLE; - } - } -} static bool set_frag_handling(struct ofproto *ofproto_, @@ -5725,10 +5167,12 @@ send_netflow_active_timeouts(struct ofproto_dpif *ofproto) struct cls_cursor cursor; struct facet *facet; + ovs_rwlock_rdlock(&ofproto->facets.rwlock); cls_cursor_init(&cursor, &ofproto->facets, NULL); CLS_CURSOR_FOR_EACH (facet, cr, &cursor) { send_active_timeout(ofproto, facet); } + ovs_rwlock_unlock(&ofproto->facets.rwlock); } static struct ofproto_dpif * @@ -5757,10 +5201,14 @@ ofproto_unixctl_fdb_flush(struct unixctl_conn *conn, int argc, unixctl_command_reply_error(conn, "no such bridge"); return; } - mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set); + ovs_rwlock_wrlock(&ofproto->ml->rwlock); + mac_learning_flush(ofproto->ml); + ovs_rwlock_unlock(&ofproto->ml->rwlock); } else { HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) { - mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set); + ovs_rwlock_wrlock(&ofproto->ml->rwlock); + mac_learning_flush(ofproto->ml); + ovs_rwlock_unlock(&ofproto->ml->rwlock); } } @@ -5789,6 +5237,7 @@ ofproto_unixctl_fdb_show(struct unixctl_conn *conn, int argc OVS_UNUSED, } ds_put_cstr(&ds, " port VLAN MAC Age\n"); + ovs_rwlock_rdlock(&ofproto->ml->rwlock); LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) { struct ofbundle *bundle = e->port.p; char name[OFP_MAX_PORT_NAME_LEN]; @@ -5799,6 +5248,7 @@ ofproto_unixctl_fdb_show(struct unixctl_conn *conn, int argc OVS_UNUSED, name, e->vlan, ETH_ADDR_ARGS(e->mac), mac_entry_age(ofproto->ml, e)); } + ovs_rwlock_unlock(&ofproto->ml->rwlock); unixctl_command_reply(conn, ds_cstr(&ds)); ds_destroy(&ds); } @@ -5961,10 +5411,8 @@ ofproto_unixctl_trace(struct unixctl_conn *conn, int argc, const char *argv[], backer = node->data; } - /* Extract the ofproto_dpif object from the ofproto_receive() - * function. */ - if (ofproto_receive(backer, NULL, odp_key.data, - odp_key.size, &flow, NULL, &ofproto, NULL)) { + if (xlate_receive(backer, NULL, odp_key.data, odp_key.size, &flow, + NULL, &ofproto, NULL)) { unixctl_command_reply_error(conn, "Invalid datapath flow"); goto exit; } @@ -6000,7 +5448,7 @@ ofproto_unixctl_trace(struct unixctl_conn *conn, int argc, const char *argv[], /* Use the metadata from the flow and the packet argument * to reconstruct the flow. */ - flow_extract(packet, flow.skb_priority, flow.skb_mark, NULL, + flow_extract(packet, flow.skb_priority, flow.pkt_mark, NULL, &in_port_, &flow); } } @@ -6015,17 +5463,19 @@ exit: ofpbuf_uninit(&odp_mask); } -void +static void ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow, const struct ofpbuf *packet, struct ds *ds) { struct rule_dpif *rule; + struct flow_wildcards wc; ds_put_cstr(ds, "Flow: "); flow_format(ds, flow); ds_put_char(ds, '\n'); - rule = rule_dpif_lookup(ofproto, flow, NULL); + flow_wildcards_init_catchall(&wc); + rule_dpif_lookup(ofproto, flow, &wc, &rule); trace_format_rule(ds, 0, rule); if (rule == ofproto->miss_rule) { @@ -6055,6 +5505,7 @@ ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow, trace.xin.report_hook = trace_report; xlate_actions(&trace.xin, &trace.xout); + flow_wildcards_or(&trace.xout.wc, &trace.xout.wc, &wc); ds_put_char(ds, '\n'); trace_format_flow(ds, 0, "Final flow", &trace); @@ -6095,6 +5546,8 @@ ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow, xlate_out_uninit(&trace.xout); } + + rule_release(rule); } static void @@ -6123,12 +5576,14 @@ ofproto_dpif_self_check__(struct ofproto_dpif *ofproto, struct ds *reply) int errors; errors = 0; + ovs_rwlock_rdlock(&ofproto->facets.rwlock); cls_cursor_init(&cursor, &ofproto->facets, NULL); CLS_CURSOR_FOR_EACH (facet, cr, &cursor) { if (!facet_check_consistency(facet)) { errors++; } } + ovs_rwlock_unlock(&ofproto->facets.rwlock); if (errors) { ofproto->backer->need_revalidate = REV_INCONSISTENCY; } @@ -6349,6 +5804,7 @@ ofproto_unixctl_dpif_dump_megaflows(struct unixctl_conn *conn, return; } + ovs_rwlock_rdlock(&ofproto->facets.rwlock); cls_cursor_init(&cursor, &ofproto->facets, NULL); CLS_CURSOR_FOR_EACH (facet, cr, &cursor) { cls_rule_format(&facet->cr, &ds); @@ -6371,6 +5827,7 @@ ofproto_unixctl_dpif_dump_megaflows(struct unixctl_conn *conn, } ds_put_cstr(&ds, "\n"); } + ovs_rwlock_unlock(&ofproto->facets.rwlock); ds_chomp(&ds, '\n'); unixctl_command_reply(conn, ds_cstr(&ds)); @@ -6452,7 +5909,7 @@ ofproto_unixctl_dpif_dump_flows(struct unixctl_conn *conn, } odp_flow_format(subfacet->key, subfacet->key_len, - mask.data, mask.size, &ds); + mask.data, mask.size, &ds, false); ds_put_format(&ds, ", packets:%"PRIu64", bytes:%"PRIu64", used:", subfacet->dp_packet_count, subfacet->dp_byte_count); @@ -6594,20 +6051,20 @@ hash_realdev_vid(ofp_port_t realdev_ofp_port, int vid) bool ofproto_has_vlan_splinters(const struct ofproto_dpif *ofproto) + OVS_EXCLUDED(ofproto->vsp_mutex) { - return !hmap_is_empty(&ofproto->realdev_vid_map); + bool ret; + + ovs_mutex_lock(&ofproto->vsp_mutex); + ret = !hmap_is_empty(&ofproto->realdev_vid_map); + ovs_mutex_unlock(&ofproto->vsp_mutex); + return ret; } -/* Returns the OFP port number of the Linux VLAN device that corresponds to - * 'vlan_tci' on the network device with port number 'realdev_ofp_port' in - * 'struct ofport_dpif'. For example, given 'realdev_ofp_port' of eth0 and - * 'vlan_tci' 9, it would return the port number of eth0.9. - * - * Unless VLAN splinters are enabled for port 'realdev_ofp_port', this - * function just returns its 'realdev_ofp_port' argument. */ -ofp_port_t -vsp_realdev_to_vlandev(const struct ofproto_dpif *ofproto, - ofp_port_t realdev_ofp_port, ovs_be16 vlan_tci) +static ofp_port_t +vsp_realdev_to_vlandev__(const struct ofproto_dpif *ofproto, + ofp_port_t realdev_ofp_port, ovs_be16 vlan_tci) + OVS_REQUIRES(ofproto->vsp_mutex) { if (!hmap_is_empty(&ofproto->realdev_vid_map)) { int vid = vlan_tci_to_vid(vlan_tci); @@ -6625,6 +6082,26 @@ vsp_realdev_to_vlandev(const struct ofproto_dpif *ofproto, return realdev_ofp_port; } +/* Returns the OFP port number of the Linux VLAN device that corresponds to + * 'vlan_tci' on the network device with port number 'realdev_ofp_port' in + * 'struct ofport_dpif'. For example, given 'realdev_ofp_port' of eth0 and + * 'vlan_tci' 9, it would return the port number of eth0.9. + * + * Unless VLAN splinters are enabled for port 'realdev_ofp_port', this + * function just returns its 'realdev_ofp_port' argument. */ +ofp_port_t +vsp_realdev_to_vlandev(const struct ofproto_dpif *ofproto, + ofp_port_t realdev_ofp_port, ovs_be16 vlan_tci) + OVS_EXCLUDED(ofproto->vsp_mutex) +{ + ofp_port_t ret; + + ovs_mutex_lock(&ofproto->vsp_mutex); + ret = vsp_realdev_to_vlandev__(ofproto, realdev_ofp_port, vlan_tci); + ovs_mutex_unlock(&ofproto->vsp_mutex); + return ret; +} + static struct vlan_splinter * vlandev_find(const struct ofproto_dpif *ofproto, ofp_port_t vlandev_ofp_port) { @@ -6653,6 +6130,7 @@ vlandev_find(const struct ofproto_dpif *ofproto, ofp_port_t vlandev_ofp_port) static ofp_port_t vsp_vlandev_to_realdev(const struct ofproto_dpif *ofproto, ofp_port_t vlandev_ofp_port, int *vid) + OVS_REQUIRES(ofproto->vsp_mutex) { if (!hmap_is_empty(&ofproto->vlandev_map)) { const struct vlan_splinter *vsp; @@ -6674,13 +6152,16 @@ vsp_vlandev_to_realdev(const struct ofproto_dpif *ofproto, * 'flow->vlan_tci' to the VLAN VID, and returns true. Otherwise (which is * always the case unless VLAN splinters are enabled), returns false without * making any changes. */ -static bool +bool vsp_adjust_flow(const struct ofproto_dpif *ofproto, struct flow *flow) + OVS_EXCLUDED(ofproto->vsp_mutex) { ofp_port_t realdev; int vid; + ovs_mutex_lock(&ofproto->vsp_mutex); realdev = vsp_vlandev_to_realdev(ofproto, flow->in_port.ofp_port, &vid); + ovs_mutex_unlock(&ofproto->vsp_mutex); if (!realdev) { return false; } @@ -6698,6 +6179,7 @@ vsp_remove(struct ofport_dpif *port) struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto); struct vlan_splinter *vsp; + ovs_mutex_lock(&ofproto->vsp_mutex); vsp = vlandev_find(ofproto, port->up.ofp_port); if (vsp) { hmap_remove(&ofproto->vlandev_map, &vsp->vlandev_node); @@ -6708,6 +6190,7 @@ vsp_remove(struct ofport_dpif *port) } else { VLOG_ERR("missing vlan device record"); } + ovs_mutex_unlock(&ofproto->vsp_mutex); } static void @@ -6715,24 +6198,27 @@ vsp_add(struct ofport_dpif *port, ofp_port_t realdev_ofp_port, int vid) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto); + ovs_mutex_lock(&ofproto->vsp_mutex); if (!vsp_vlandev_to_realdev(ofproto, port->up.ofp_port, NULL) - && (vsp_realdev_to_vlandev(ofproto, realdev_ofp_port, htons(vid)) + && (vsp_realdev_to_vlandev__(ofproto, realdev_ofp_port, htons(vid)) == realdev_ofp_port)) { struct vlan_splinter *vsp; vsp = xmalloc(sizeof *vsp); - hmap_insert(&ofproto->vlandev_map, &vsp->vlandev_node, - hash_ofp_port(port->up.ofp_port)); - hmap_insert(&ofproto->realdev_vid_map, &vsp->realdev_vid_node, - hash_realdev_vid(realdev_ofp_port, vid)); vsp->realdev_ofp_port = realdev_ofp_port; vsp->vlandev_ofp_port = port->up.ofp_port; vsp->vid = vid; port->realdev_ofp_port = realdev_ofp_port; + + hmap_insert(&ofproto->vlandev_map, &vsp->vlandev_node, + hash_ofp_port(port->up.ofp_port)); + hmap_insert(&ofproto->realdev_vid_map, &vsp->realdev_vid_node, + hash_realdev_vid(realdev_ofp_port, vid)); } else { VLOG_ERR("duplicate vlan device record"); } + ovs_mutex_unlock(&ofproto->vsp_mutex); } static odp_port_t @@ -6742,18 +6228,21 @@ ofp_port_to_odp_port(const struct ofproto_dpif *ofproto, ofp_port_t ofp_port) return ofport ? ofport->odp_port : ODPP_NONE; } -static struct ofport_dpif * +struct ofport_dpif * odp_port_to_ofport(const struct dpif_backer *backer, odp_port_t odp_port) { struct ofport_dpif *port; + ovs_rwlock_rdlock(&backer->odp_to_ofport_lock); HMAP_FOR_EACH_IN_BUCKET (port, odp_port_node, hash_odp_port(odp_port), &backer->odp_to_ofport_map) { if (port->odp_port == odp_port) { + ovs_rwlock_unlock(&backer->odp_to_ofport_lock); return port; } } + ovs_rwlock_unlock(&backer->odp_to_ofport_lock); return NULL; }