X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Fdpif-netdev.c;h=b3a37426078a04082ff24c90a601dcd9f58b7608;hb=bdd534291ff72077390f7b1745c4443ad3833d99;hp=cc00f61c139cdf9e8e017e495f44185899354af4;hpb=8a4e3a858ae1feef4098837414665aa525f2d1e5;p=sliver-openvswitch.git diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index cc00f61c1..b3a374260 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -39,6 +39,7 @@ #include "dynamic-string.h" #include "flow.h" #include "hmap.h" +#include "latch.h" #include "list.h" #include "meta-flow.h" #include "netdev.h" @@ -158,6 +159,11 @@ struct dp_netdev { struct ovs_rwlock port_rwlock; struct hmap ports OVS_GUARDED; struct seq *port_seq; /* Incremented whenever a port changes. */ + + /* Forwarding threads. */ + struct latch exit_latch; + struct dp_forwarder *forwarders; + size_t n_forwarders; }; static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp, @@ -281,6 +287,15 @@ struct dp_netdev_actions *dp_netdev_actions_ref( const struct dp_netdev_actions *); void dp_netdev_actions_unref(struct dp_netdev_actions *); +/* A thread that receives packets from some ports, looks them up in the flow + * table, and executes the actions it finds. */ +struct dp_forwarder { + struct dp_netdev *dp; + pthread_t thread; + char *name; + uint32_t min_hash, max_hash; +}; + /* Interface to netdev-based datapath. */ struct dpif_netdev { struct dpif dpif; @@ -317,6 +332,7 @@ static void dp_netdev_execute_actions(struct dp_netdev *dp, static void dp_netdev_port_input(struct dp_netdev *dp, struct ofpbuf *packet, struct pkt_metadata *) OVS_REQ_RDLOCK(dp->port_rwlock); +static void dp_netdev_set_threads(struct dp_netdev *, int n); static struct dpif_netdev * dpif_netdev_cast(const struct dpif *dpif) @@ -351,10 +367,17 @@ dpif_netdev_class_is_dummy(const struct dpif_class *class) return class != &dpif_netdev_class; } +static bool +dpif_netdev_class_is_planetlab(const struct dpif_class *class) +{ + return class == &dpif_planetlab_class; +} + static const char * dpif_netdev_port_open_type(const struct dpif_class *class, const char *type) { return strcmp(type, "internal") ? type + : dpif_netdev_class_is_planetlab(class) ? "pltap" : dpif_netdev_class_is_dummy(class) ? "dummy" : "tap"; } @@ -383,7 +406,8 @@ choose_port(struct dp_netdev *dp, const char *name) { uint32_t port_no; - if (dp->class != &dpif_netdev_class) { + if (dp->class != &dpif_netdev_class && + dp->class != &dpif_planetlab_class) { const char *p; int start_no = 0; @@ -453,6 +477,7 @@ create_dp_netdev(const char *name, const struct dpif_class *class, ovs_rwlock_init(&dp->port_rwlock); hmap_init(&dp->ports); dp->port_seq = seq_create(); + latch_init(&dp->exit_latch); ovs_rwlock_wrlock(&dp->port_rwlock); error = do_add_port(dp, name, "internal", ODPP_LOCAL); @@ -461,6 +486,7 @@ create_dp_netdev(const char *name, const struct dpif_class *class, dp_netdev_free(dp); return error; } + dp_netdev_set_threads(dp, 2); *dpp = dp; return 0; @@ -518,6 +544,9 @@ dp_netdev_free(struct dp_netdev *dp) shash_find_and_delete(&dp_netdevs, dp->name); + dp_netdev_set_threads(dp, 0); + free(dp->forwarders); + dp_netdev_flow_flush(dp); ovs_rwlock_wrlock(&dp->port_rwlock); HMAP_FOR_EACH_SAFE (port, next, node, &dp->ports) { @@ -539,6 +568,7 @@ dp_netdev_free(struct dp_netdev *dp) hmap_destroy(&dp->ports); atomic_flag_destroy(&dp->destroyed); ovs_refcount_destroy(&dp->ref_cnt); + latch_destroy(&dp->exit_latch); free(CONST_CAST(char *, dp->name)); free(dp); } @@ -586,9 +616,9 @@ dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats) { struct dp_netdev *dp = get_dp_netdev(dpif); - ovs_rwlock_rdlock(&dp->cls.rwlock); + fat_rwlock_rdlock(&dp->cls.rwlock); stats->n_flows = hmap_count(&dp->flow_table); - ovs_rwlock_unlock(&dp->cls.rwlock); + fat_rwlock_unlock(&dp->cls.rwlock); stats->n_hit = ovsthread_counter_read(dp->n_hit); stats->n_missed = ovsthread_counter_read(dp->n_missed); @@ -861,11 +891,11 @@ dp_netdev_flow_flush(struct dp_netdev *dp) struct dp_netdev_flow *netdev_flow, *next; ovs_mutex_lock(&dp->flow_mutex); - ovs_rwlock_wrlock(&dp->cls.rwlock); + fat_rwlock_wrlock(&dp->cls.rwlock); HMAP_FOR_EACH_SAFE (netdev_flow, next, node, &dp->flow_table) { dp_netdev_remove_flow(dp, netdev_flow); } - ovs_rwlock_unlock(&dp->cls.rwlock); + fat_rwlock_unlock(&dp->cls.rwlock); ovs_mutex_unlock(&dp->flow_mutex); } @@ -969,10 +999,10 @@ dp_netdev_lookup_flow(const struct dp_netdev *dp, const struct flow *flow) { struct dp_netdev_flow *netdev_flow; - ovs_rwlock_rdlock(&dp->cls.rwlock); + fat_rwlock_rdlock(&dp->cls.rwlock); netdev_flow = dp_netdev_flow_cast(classifier_lookup(&dp->cls, flow, NULL)); dp_netdev_flow_ref(netdev_flow); - ovs_rwlock_unlock(&dp->cls.rwlock); + fat_rwlock_unlock(&dp->cls.rwlock); return netdev_flow; } @@ -1011,7 +1041,10 @@ dpif_netdev_mask_from_nlattrs(const struct nlattr *key, uint32_t key_len, struct flow *mask) { if (mask_key_len) { - if (odp_flow_key_to_mask(mask_key, mask_key_len, mask, flow)) { + enum odp_key_fitness fitness; + + fitness = odp_flow_key_to_mask(mask_key, mask_key_len, mask, flow); + if (fitness) { /* This should not happen: it indicates that * odp_flow_key_from_mask() and odp_flow_key_to_mask() * disagree on the acceptable form of a mask. Log the problem @@ -1024,7 +1057,8 @@ dpif_netdev_mask_from_nlattrs(const struct nlattr *key, uint32_t key_len, ds_init(&s); odp_flow_format(key, key_len, mask_key, mask_key_len, NULL, &s, true); - VLOG_ERR("internal error parsing flow mask %s", ds_cstr(&s)); + VLOG_ERR("internal error parsing flow mask %s (%s)", + ds_cstr(&s), odp_key_fitness_to_string(fitness)); ds_destroy(&s); } @@ -1101,9 +1135,9 @@ dpif_netdev_flow_get(const struct dpif *dpif, return error; } - ovs_rwlock_rdlock(&dp->cls.rwlock); + fat_rwlock_rdlock(&dp->cls.rwlock); netdev_flow = dp_netdev_find_flow(dp, &key); - ovs_rwlock_unlock(&dp->cls.rwlock); + fat_rwlock_unlock(&dp->cls.rwlock); if (netdev_flow) { struct dp_netdev_actions *actions = NULL; @@ -1152,13 +1186,13 @@ dp_netdev_flow_add(struct dp_netdev *dp, const struct flow *flow, match_init(&match, flow, wc); cls_rule_init(CONST_CAST(struct cls_rule *, &netdev_flow->cr), &match, NETDEV_RULE_PRIORITY); - ovs_rwlock_wrlock(&dp->cls.rwlock); + fat_rwlock_wrlock(&dp->cls.rwlock); classifier_insert(&dp->cls, CONST_CAST(struct cls_rule *, &netdev_flow->cr)); hmap_insert(&dp->flow_table, CONST_CAST(struct hmap_node *, &netdev_flow->node), flow_hash(flow, 0)); - ovs_rwlock_unlock(&dp->cls.rwlock); + fat_rwlock_unlock(&dp->cls.rwlock); ovs_mutex_unlock(&netdev_flow->mutex); @@ -1259,7 +1293,7 @@ dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del) } ovs_mutex_lock(&dp->flow_mutex); - ovs_rwlock_wrlock(&dp->cls.rwlock); + fat_rwlock_wrlock(&dp->cls.rwlock); netdev_flow = dp_netdev_find_flow(dp, &key); if (netdev_flow) { if (del->stats) { @@ -1271,7 +1305,7 @@ dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del) } else { error = ENOENT; } - ovs_rwlock_unlock(&dp->cls.rwlock); + fat_rwlock_unlock(&dp->cls.rwlock); ovs_mutex_unlock(&dp->flow_mutex); return error; @@ -1310,13 +1344,13 @@ dpif_netdev_flow_dump_next(const struct dpif *dpif, void *state_, struct dp_netdev_flow *netdev_flow; struct hmap_node *node; - ovs_rwlock_rdlock(&dp->cls.rwlock); + fat_rwlock_rdlock(&dp->cls.rwlock); node = hmap_at_position(&dp->flow_table, &state->bucket, &state->offset); if (node) { netdev_flow = CONTAINER_OF(node, struct dp_netdev_flow, node); dp_netdev_flow_ref(netdev_flow); } - ovs_rwlock_unlock(&dp->cls.rwlock); + fat_rwlock_unlock(&dp->cls.rwlock); if (!node) { return EOF; } @@ -1339,7 +1373,8 @@ dpif_netdev_flow_dump_next(const struct dpif *dpif, void *state_, ofpbuf_use_stack(&buf, &state->maskbuf, sizeof state->maskbuf); minimask_expand(&netdev_flow->cr.match.mask, &wc); odp_flow_key_from_mask(&buf, &wc.masks, &netdev_flow->flow, - odp_to_u32(wc.masks.in_port.odp_port)); + odp_to_u32(wc.masks.in_port.odp_port), + SIZE_MAX); *mask = buf.data; *mask_len = buf.size; @@ -1521,6 +1556,126 @@ dp_netdev_actions_unref(struct dp_netdev_actions *actions) } } +static void * +dp_forwarder_main(void *f_) +{ + struct dp_forwarder *f = f_; + struct dp_netdev *dp = f->dp; + struct ofpbuf packet; + + f->name = xasprintf("forwarder_%u", ovsthread_id_self()); + set_subprogram_name("%s", f->name); + + ofpbuf_init(&packet, 0); + while (!latch_is_set(&dp->exit_latch)) { + bool received_anything; + int i; + + ovs_rwlock_rdlock(&dp->port_rwlock); + for (i = 0; i < 50; i++) { + struct dp_netdev_port *port; + + received_anything = false; + HMAP_FOR_EACH (port, node, &f->dp->ports) { + if (port->rx + && port->node.hash >= f->min_hash + && port->node.hash <= f->max_hash) { + int buf_size; + int error; + int mtu; + + if (netdev_get_mtu(port->netdev, &mtu)) { + mtu = ETH_PAYLOAD_MAX; + } + buf_size = DP_NETDEV_HEADROOM + VLAN_ETH_HEADER_LEN + mtu; + + ofpbuf_clear(&packet); + ofpbuf_reserve_with_tailroom(&packet, DP_NETDEV_HEADROOM, + buf_size); + + error = netdev_rx_recv(port->rx, &packet); + if (!error) { + struct pkt_metadata md + = PKT_METADATA_INITIALIZER(port->port_no); + dp_netdev_port_input(dp, &packet, &md); + + received_anything = true; + } else if (error != EAGAIN && error != EOPNOTSUPP) { + static struct vlog_rate_limit rl + = VLOG_RATE_LIMIT_INIT(1, 5); + + VLOG_ERR_RL(&rl, "error receiving data from %s: %s", + netdev_get_name(port->netdev), + ovs_strerror(error)); + } + } + } + + if (!received_anything) { + break; + } + } + + if (received_anything) { + poll_immediate_wake(); + } else { + struct dp_netdev_port *port; + + HMAP_FOR_EACH (port, node, &f->dp->ports) + if (port->rx + && port->node.hash >= f->min_hash + && port->node.hash <= f->max_hash) { + netdev_rx_wait(port->rx); + } + seq_wait(dp->port_seq, seq_read(dp->port_seq)); + latch_wait(&dp->exit_latch); + } + ovs_rwlock_unlock(&dp->port_rwlock); + + poll_block(); + } + ofpbuf_uninit(&packet); + + free(f->name); + + return NULL; +} + +static void +dp_netdev_set_threads(struct dp_netdev *dp, int n) +{ + int i; + + if (n == dp->n_forwarders) { + return; + } + + /* Stop existing threads. */ + latch_set(&dp->exit_latch); + for (i = 0; i < dp->n_forwarders; i++) { + struct dp_forwarder *f = &dp->forwarders[i]; + + xpthread_join(f->thread, NULL); + } + latch_poll(&dp->exit_latch); + free(dp->forwarders); + + /* Start new threads. */ + dp->forwarders = xmalloc(n * sizeof *dp->forwarders); + dp->n_forwarders = n; + for (i = 0; i < n; i++) { + struct dp_forwarder *f = &dp->forwarders[i]; + + f->dp = dp; + f->min_hash = UINT32_MAX / n * i; + f->max_hash = UINT32_MAX / n * (i + 1) - 1; + if (i == n - 1) { + f->max_hash = UINT32_MAX; + } + xpthread_create(&f->thread, NULL, dp_forwarder_main, f); + } +} + static void dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow, const struct ofpbuf *packet) @@ -1564,74 +1719,6 @@ dp_netdev_port_input(struct dp_netdev *dp, struct ofpbuf *packet, } } -static void -dpif_netdev_run(struct dpif *dpif) -{ - struct dp_netdev_port *port; - struct dp_netdev *dp; - struct ofpbuf packet; - - dp = get_dp_netdev(dpif); - ofpbuf_init(&packet, 0); - - ovs_rwlock_rdlock(&dp->port_rwlock); - HMAP_FOR_EACH (port, node, &dp->ports) { - int buf_size; - int error; - int mtu; - - error = netdev_get_mtu(port->netdev, &mtu); - if (error) { - mtu = ETH_PAYLOAD_MAX; - } - buf_size = DP_NETDEV_HEADROOM + VLAN_ETH_HEADER_LEN + mtu; - - ofpbuf_clear(&packet); - ofpbuf_reserve_with_tailroom(&packet, DP_NETDEV_HEADROOM, buf_size); - - error = port->rx ? netdev_rx_recv(port->rx, &packet) : EOPNOTSUPP; - if (!error) { - struct pkt_metadata md = PKT_METADATA_INITIALIZER(port->port_no); - dp_netdev_port_input(dp, &packet, &md); - } else if (error != EAGAIN && error != EOPNOTSUPP) { - static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); - - VLOG_ERR_RL(&rl, "error receiving data from %s: %s", - netdev_get_name(port->netdev), ovs_strerror(error)); - } - } - ovs_rwlock_unlock(&dp->port_rwlock); - - ofpbuf_uninit(&packet); -} - -static void -dpif_netdev_wait(struct dpif *dpif) -{ - struct dp_netdev_port *port; - - /* There is a race here, if thread A calls dpif_netdev_wait(dpif) and - * thread B calls dpif_port_add(dpif) or dpif_port_remove(dpif) before - * A makes it to poll_block(). - * - * But I think it doesn't matter: - * - * - In the dpif_port_add() case, A will not wake up when a packet - * arrives on the new port, but this would also happen if the - * ordering were reversed. - * - * - In the dpif_port_remove() case, A might wake up spuriously, but - * that is harmless. */ - - ovs_mutex_lock(&dp_netdev_mutex); - HMAP_FOR_EACH (port, node, &get_dp_netdev(dpif)->ports) { - if (port->rx) { - netdev_rx_wait(port->rx); - } - } - ovs_mutex_unlock(&dp_netdev_mutex); -} - static int dp_netdev_output_userspace(struct dp_netdev *dp, struct ofpbuf *packet, int queue_no, const struct flow *flow, @@ -1747,40 +1834,48 @@ dp_netdev_execute_actions(struct dp_netdev *dp, const struct flow *key, odp_execute_actions(&aux, packet, md, actions, actions_len, dp_execute_cb); } +#define DPIF_NETDEV_CLASS_FUNCTIONS \ + dpif_netdev_enumerate, \ + dpif_netdev_port_open_type, \ + dpif_netdev_open, \ + dpif_netdev_close, \ + dpif_netdev_destroy, \ + NULL, \ + NULL, \ + dpif_netdev_get_stats, \ + dpif_netdev_port_add, \ + dpif_netdev_port_del, \ + dpif_netdev_port_query_by_number, \ + dpif_netdev_port_query_by_name, \ + NULL, /* port_get_pid */ \ + dpif_netdev_port_dump_start, \ + dpif_netdev_port_dump_next, \ + dpif_netdev_port_dump_done, \ + dpif_netdev_port_poll, \ + dpif_netdev_port_poll_wait, \ + dpif_netdev_flow_get, \ + dpif_netdev_flow_put, \ + dpif_netdev_flow_del, \ + dpif_netdev_flow_flush, \ + dpif_netdev_flow_dump_start, \ + dpif_netdev_flow_dump_next, \ + dpif_netdev_flow_dump_done, \ + dpif_netdev_execute, \ + NULL, /* operate */ \ + dpif_netdev_recv_set, \ + dpif_netdev_queue_to_priority, \ + dpif_netdev_recv, \ + dpif_netdev_recv_wait, \ + dpif_netdev_recv_purge, \ + const struct dpif_class dpif_netdev_class = { "netdev", - dpif_netdev_enumerate, - dpif_netdev_port_open_type, - dpif_netdev_open, - dpif_netdev_close, - dpif_netdev_destroy, - dpif_netdev_run, - dpif_netdev_wait, - dpif_netdev_get_stats, - dpif_netdev_port_add, - dpif_netdev_port_del, - dpif_netdev_port_query_by_number, - dpif_netdev_port_query_by_name, - NULL, /* port_get_pid */ - dpif_netdev_port_dump_start, - dpif_netdev_port_dump_next, - dpif_netdev_port_dump_done, - dpif_netdev_port_poll, - dpif_netdev_port_poll_wait, - dpif_netdev_flow_get, - dpif_netdev_flow_put, - dpif_netdev_flow_del, - dpif_netdev_flow_flush, - dpif_netdev_flow_dump_start, - dpif_netdev_flow_dump_next, - dpif_netdev_flow_dump_done, - dpif_netdev_execute, - NULL, /* operate */ - dpif_netdev_recv_set, - dpif_netdev_queue_to_priority, - dpif_netdev_recv, - dpif_netdev_recv_wait, - dpif_netdev_recv_purge, + DPIF_NETDEV_CLASS_FUNCTIONS +}; + +const struct dpif_class dpif_planetlab_class = { + "planetlab", + DPIF_NETDEV_CLASS_FUNCTIONS }; static void @@ -1861,3 +1956,4 @@ dpif_dummy_register(bool override) "DP PORT NEW-NUMBER", 3, 3, dpif_dummy_change_port_number, NULL); } +