X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Fdpif-netdev.c;h=3d0c09fdec9d9ce51faea9489828a28b7591c0b2;hb=63be20bee256f305801c0674b29e5773355d2379;hp=443bbcb1334011e2137bc99516877660005f079a;hpb=679ba04cab922f230d6694e4c9fcf2e158b6a9e9;p=sliver-openvswitch.git diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 443bbcb13..3d0c09fde 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -49,6 +49,7 @@ #include "odp-util.h" #include "ofp-print.h" #include "ofpbuf.h" +#include "ovs-rcu.h" #include "packets.h" #include "poll-loop.h" #include "random.h" @@ -73,7 +74,6 @@ enum { MAX_FLOWS = 65536 }; /* Maximum number of flows in flow table. */ enum { DP_NETDEV_HEADROOM = 2 + VLAN_HEADER_LEN }; /* Queues. */ -enum { N_QUEUES = 2 }; /* Number of queues for dpif_recv(). */ enum { MAX_QUEUE_LEN = 128 }; /* Maximum number of packets per queue. */ enum { QUEUE_MASK = MAX_QUEUE_LEN - 1 }; BUILD_ASSERT_DECL(IS_POW2(MAX_QUEUE_LEN)); @@ -90,14 +90,17 @@ struct dp_netdev_upcall { struct ofpbuf buf; /* ofpbuf instance for upcall.packet. */ }; -/* A queue passing packets from a struct dp_netdev to its clients. +/* A queue passing packets from a struct dp_netdev to its clients (handlers). * * * Thread-safety * ============= * - * Any access at all requires the owning 'dp_netdev''s queue_mutex. */ + * Any access at all requires the owning 'dp_netdev''s queue_rwlock and + * its own mutex. */ struct dp_netdev_queue { + struct ovs_mutex mutex; + struct seq *seq; /* Incremented whenever a packet is queued. */ struct dp_netdev_upcall upcalls[MAX_QUEUE_LEN] OVS_GUARDED; unsigned int head OVS_GUARDED; unsigned int tail OVS_GUARDED; @@ -118,7 +121,7 @@ struct dp_netdev_queue { * port_rwlock * flow_mutex * cls.rwlock - * queue_mutex + * queue_rwlock */ struct dp_netdev { const struct dpif_class *const class; @@ -140,10 +143,12 @@ struct dp_netdev { /* Queues. * - * Everything in 'queues' is protected by 'queue_mutex'. */ - struct ovs_mutex queue_mutex; - struct dp_netdev_queue queues[N_QUEUES]; - struct seq *queue_seq; /* Incremented whenever a packet is queued. */ + * 'queue_rwlock' protects the modification of 'handler_queues' and + * 'n_handlers'. The queue elements are protected by its + * 'handler_queues''s mutex. */ + struct fat_rwlock queue_rwlock; + struct dp_netdev_queue *handler_queues; + uint32_t n_handlers; /* Statistics. * @@ -245,12 +250,6 @@ struct dp_netdev_flow { const struct hmap_node node; /* In owning dp_netdev's 'flow_table'. */ const struct flow flow; /* The flow that created this entry. */ - /* Number of references. - * The classifier owns one reference. - * Any thread trying to keep a rule from being freed should hold its own - * reference. */ - struct ovs_refcount ref_cnt; - /* Protects members marked OVS_GUARDED. * * Acquire after datapath's flow_mutex. */ @@ -266,12 +265,10 @@ struct dp_netdev_flow { * Reading 'actions' requires 'mutex'. * Writing 'actions' requires 'mutex' and (to allow for transactions) the * datapath's flow_mutex. */ - struct dp_netdev_actions *actions OVS_GUARDED; + OVSRCU_TYPE(struct dp_netdev_actions *) actions; }; -static struct dp_netdev_flow *dp_netdev_flow_ref( - const struct dp_netdev_flow *); -static void dp_netdev_flow_unref(struct dp_netdev_flow *); +static void dp_netdev_flow_free(struct dp_netdev_flow *); /* Contained by struct dp_netdev_flow's 'stats' member. */ struct dp_netdev_flow_stats { @@ -294,8 +291,6 @@ struct dp_netdev_flow_stats { * 'flow' is the dp_netdev_flow for which 'flow->actions == actions') or that * owns a reference to 'actions->ref_cnt' (or both). */ struct dp_netdev_actions { - struct ovs_refcount ref_cnt; - /* These members are immutable: they do not change during the struct's * lifetime. */ struct nlattr *actions; /* Sequence of OVS_ACTION_ATTR_* attributes. */ @@ -304,9 +299,9 @@ struct dp_netdev_actions { struct dp_netdev_actions *dp_netdev_actions_create(const struct nlattr *, size_t); -struct dp_netdev_actions *dp_netdev_actions_ref( - const struct dp_netdev_actions *); -void dp_netdev_actions_unref(struct dp_netdev_actions *); +struct dp_netdev_actions *dp_netdev_flow_get_actions( + const struct dp_netdev_flow *); +static void dp_netdev_actions_free(struct dp_netdev_actions *); /* A thread that receives packets from some ports, looks them up in the flow * table, and executes the actions it finds. */ @@ -338,12 +333,15 @@ static int do_add_port(struct dp_netdev *dp, const char *devname, OVS_REQ_WRLOCK(dp->port_rwlock); static int do_del_port(struct dp_netdev *dp, odp_port_t port_no) OVS_REQ_WRLOCK(dp->port_rwlock); +static void dp_netdev_destroy_all_queues(struct dp_netdev *dp) + OVS_REQ_WRLOCK(dp->queue_rwlock); static int dpif_netdev_open(const struct dpif_class *, const char *name, bool create, struct dpif **); static int dp_netdev_output_userspace(struct dp_netdev *dp, struct ofpbuf *, - int queue_no, const struct flow *, - const struct nlattr *userdata) - OVS_EXCLUDED(dp->queue_mutex); + int queue_no, int type, + const struct flow *, + const struct nlattr *userdata) + OVS_EXCLUDED(dp->queue_rwlock); static void dp_netdev_execute_actions(struct dp_netdev *dp, const struct flow *, struct ofpbuf *, struct pkt_metadata *, @@ -461,7 +459,6 @@ create_dp_netdev(const char *name, const struct dpif_class *class, { struct dp_netdev *dp; int error; - int i; dp = xzalloc(sizeof *dp); shash_add(&dp_netdevs, name, dp); @@ -475,13 +472,7 @@ create_dp_netdev(const char *name, const struct dpif_class *class, classifier_init(&dp->cls, NULL); hmap_init(&dp->flow_table); - ovs_mutex_init(&dp->queue_mutex); - ovs_mutex_lock(&dp->queue_mutex); - for (i = 0; i < N_QUEUES; i++) { - dp->queues[i].head = dp->queues[i].tail = 0; - } - ovs_mutex_unlock(&dp->queue_mutex); - dp->queue_seq = seq_create(); + fat_rwlock_init(&dp->queue_rwlock); ovsthread_stats_init(&dp->stats); @@ -529,20 +520,21 @@ dpif_netdev_open(const struct dpif_class *class, const char *name, static void dp_netdev_purge_queues(struct dp_netdev *dp) + OVS_REQ_WRLOCK(dp->queue_rwlock) { int i; - ovs_mutex_lock(&dp->queue_mutex); - for (i = 0; i < N_QUEUES; i++) { - struct dp_netdev_queue *q = &dp->queues[i]; + for (i = 0; i < dp->n_handlers; i++) { + struct dp_netdev_queue *q = &dp->handler_queues[i]; + ovs_mutex_lock(&q->mutex); while (q->tail != q->head) { struct dp_netdev_upcall *u = &q->upcalls[q->tail++ & QUEUE_MASK]; ofpbuf_uninit(&u->upcall.packet); ofpbuf_uninit(&u->buf); } + ovs_mutex_unlock(&q->mutex); } - ovs_mutex_unlock(&dp->queue_mutex); } /* Requires dp_netdev_mutex so that we can't get a new reference to 'dp' @@ -573,9 +565,11 @@ dp_netdev_free(struct dp_netdev *dp) } ovsthread_stats_destroy(&dp->stats); - dp_netdev_purge_queues(dp); - seq_destroy(dp->queue_seq); - ovs_mutex_destroy(&dp->queue_mutex); + fat_rwlock_wrlock(&dp->queue_rwlock); + dp_netdev_destroy_all_queues(dp); + fat_rwlock_unlock(&dp->queue_rwlock); + + fat_rwlock_destroy(&dp->queue_rwlock); classifier_destroy(&dp->cls); hmap_destroy(&dp->flow_table); @@ -870,6 +864,24 @@ dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname, return error; } +static void +dp_netdev_flow_free(struct dp_netdev_flow *flow) +{ + struct dp_netdev_flow_stats *bucket; + size_t i; + + OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &flow->stats) { + ovs_mutex_destroy(&bucket->mutex); + free_cacheline(bucket); + } + ovsthread_stats_destroy(&flow->stats); + + cls_rule_destroy(CONST_CAST(struct cls_rule *, &flow->cr)); + dp_netdev_actions_free(dp_netdev_flow_get_actions(flow)); + ovs_mutex_destroy(&flow->mutex); + free(flow); +} + static void dp_netdev_remove_flow(struct dp_netdev *dp, struct dp_netdev_flow *flow) OVS_REQ_WRLOCK(dp->cls.rwlock) @@ -880,39 +892,7 @@ dp_netdev_remove_flow(struct dp_netdev *dp, struct dp_netdev_flow *flow) classifier_remove(&dp->cls, cr); hmap_remove(&dp->flow_table, node); - dp_netdev_flow_unref(flow); -} - -static struct dp_netdev_flow * -dp_netdev_flow_ref(const struct dp_netdev_flow *flow_) -{ - struct dp_netdev_flow *flow = CONST_CAST(struct dp_netdev_flow *, flow_); - if (flow) { - ovs_refcount_ref(&flow->ref_cnt); - } - return flow; -} - -static void -dp_netdev_flow_unref(struct dp_netdev_flow *flow) -{ - if (flow && ovs_refcount_unref(&flow->ref_cnt) == 1) { - struct dp_netdev_flow_stats *bucket; - size_t i; - - OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &flow->stats) { - ovs_mutex_destroy(&bucket->mutex); - free_cacheline(bucket); - } - ovsthread_stats_destroy(&flow->stats); - - cls_rule_destroy(CONST_CAST(struct cls_rule *, &flow->cr)); - ovs_mutex_lock(&flow->mutex); - dp_netdev_actions_unref(flow->actions); - ovs_mutex_unlock(&flow->mutex); - ovs_mutex_destroy(&flow->mutex); - free(flow); - } + ovsrcu_postpone(dp_netdev_flow_free, flow); } static void @@ -1031,7 +1011,6 @@ dp_netdev_lookup_flow(const struct dp_netdev *dp, const struct flow *flow) fat_rwlock_rdlock(&dp->cls.rwlock); netdev_flow = dp_netdev_flow_cast(classifier_lookup(&dp->cls, flow, NULL)); - dp_netdev_flow_ref(netdev_flow); fat_rwlock_unlock(&dp->cls.rwlock); return netdev_flow; @@ -1046,7 +1025,7 @@ dp_netdev_find_flow(const struct dp_netdev *dp, const struct flow *flow) HMAP_FOR_EACH_WITH_HASH (netdev_flow, node, flow_hash(flow, 0), &dp->flow_table) { if (flow_equal(&netdev_flow->flow, flow)) { - return dp_netdev_flow_ref(netdev_flow); + return netdev_flow; } } @@ -1177,25 +1156,17 @@ dpif_netdev_flow_get(const struct dpif *dpif, fat_rwlock_unlock(&dp->cls.rwlock); if (netdev_flow) { - struct dp_netdev_actions *actions = NULL; - if (stats) { get_dpif_flow_stats(netdev_flow, stats); } - ovs_mutex_lock(&netdev_flow->mutex); if (actionsp) { - actions = dp_netdev_actions_ref(netdev_flow->actions); - } - ovs_mutex_unlock(&netdev_flow->mutex); - - dp_netdev_flow_unref(netdev_flow); + struct dp_netdev_actions *actions; - if (actionsp) { + actions = dp_netdev_flow_get_actions(netdev_flow); *actionsp = ofpbuf_clone_data(actions->actions, actions->size); - dp_netdev_actions_unref(actions); } - } else { + } else { error = ENOENT; } @@ -1214,14 +1185,13 @@ dp_netdev_flow_add(struct dp_netdev *dp, const struct flow *flow, netdev_flow = xzalloc(sizeof *netdev_flow); *CONST_CAST(struct flow *, &netdev_flow->flow) = *flow; - ovs_refcount_init(&netdev_flow->ref_cnt); ovs_mutex_init(&netdev_flow->mutex); - ovs_mutex_lock(&netdev_flow->mutex); ovsthread_stats_init(&netdev_flow->stats); - netdev_flow->actions = dp_netdev_actions_create(actions, actions_len); + ovsrcu_set(&netdev_flow->actions, + dp_netdev_actions_create(actions, actions_len)); match_init(&match, flow, wc); cls_rule_init(CONST_CAST(struct cls_rule *, &netdev_flow->cr), @@ -1234,8 +1204,6 @@ dp_netdev_flow_add(struct dp_netdev *dp, const struct flow *flow, flow_hash(flow, 0)); fat_rwlock_unlock(&dp->cls.rwlock); - ovs_mutex_unlock(&netdev_flow->mutex); - return 0; } @@ -1300,10 +1268,8 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) new_actions = dp_netdev_actions_create(put->actions, put->actions_len); - ovs_mutex_lock(&netdev_flow->mutex); - old_actions = netdev_flow->actions; - netdev_flow->actions = new_actions; - ovs_mutex_unlock(&netdev_flow->mutex); + old_actions = dp_netdev_flow_get_actions(netdev_flow); + ovsrcu_set(&netdev_flow->actions, new_actions); if (put->stats) { get_dpif_flow_stats(netdev_flow, put->stats); @@ -1312,14 +1278,13 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) clear_stats(netdev_flow); } - dp_netdev_actions_unref(old_actions); + ovsrcu_postpone(dp_netdev_actions_free, old_actions); } else if (put->flags & DPIF_FP_CREATE) { error = EEXIST; } else { /* Overlapping flow. */ error = EINVAL; } - dp_netdev_flow_unref(netdev_flow); } ovs_mutex_unlock(&dp->flow_mutex); @@ -1347,7 +1312,6 @@ dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del) get_dpif_flow_stats(netdev_flow, del->stats); } dp_netdev_remove_flow(dp, netdev_flow); - dp_netdev_flow_unref(netdev_flow); } else { error = ENOENT; } @@ -1385,7 +1349,6 @@ dpif_netdev_flow_dump_state_uninit(void *state_) { struct dp_netdev_flow_state *state = state_; - dp_netdev_actions_unref(state->actions); free(state); } @@ -1402,6 +1365,7 @@ dpif_netdev_flow_dump_start(const struct dpif *dpif OVS_UNUSED, void **iterp) return 0; } +/* XXX the caller must use 'actions' without quiescing */ static int dpif_netdev_flow_dump_next(const struct dpif *dpif, void *iter_, void *state_, const struct nlattr **key, size_t *key_len, @@ -1424,7 +1388,6 @@ dpif_netdev_flow_dump_next(const struct dpif *dpif, void *iter_, void *state_, node = hmap_at_position(&dp->flow_table, &iter->bucket, &iter->offset); if (node) { netdev_flow = CONTAINER_OF(node, struct dp_netdev_flow, node); - dp_netdev_flow_ref(netdev_flow); } fat_rwlock_unlock(&dp->cls.rwlock); if (!node) { @@ -1462,16 +1425,13 @@ dpif_netdev_flow_dump_next(const struct dpif *dpif, void *iter_, void *state_, } if (actions || stats) { - dp_netdev_actions_unref(state->actions); state->actions = NULL; - ovs_mutex_lock(&netdev_flow->mutex); if (actions) { - state->actions = dp_netdev_actions_ref(netdev_flow->actions); + state->actions = dp_netdev_flow_get_actions(netdev_flow); *actions = state->actions->actions; *actions_len = state->actions->size; } - ovs_mutex_unlock(&netdev_flow->mutex); if (stats) { get_dpif_flow_stats(netdev_flow, &state->stats); @@ -1479,8 +1439,6 @@ dpif_netdev_flow_dump_next(const struct dpif *dpif, void *iter_, void *state_, } } - dp_netdev_flow_unref(netdev_flow); - return 0; } @@ -1517,9 +1475,77 @@ dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute) return 0; } +static void +dp_netdev_destroy_all_queues(struct dp_netdev *dp) + OVS_REQ_WRLOCK(dp->queue_rwlock) +{ + size_t i; + + dp_netdev_purge_queues(dp); + + for (i = 0; i < dp->n_handlers; i++) { + struct dp_netdev_queue *q = &dp->handler_queues[i]; + + ovs_mutex_destroy(&q->mutex); + seq_destroy(q->seq); + } + free(dp->handler_queues); + dp->handler_queues = NULL; + dp->n_handlers = 0; +} + +static void +dp_netdev_refresh_queues(struct dp_netdev *dp, uint32_t n_handlers) + OVS_REQ_WRLOCK(dp->queue_rwlock) +{ + if (dp->n_handlers != n_handlers) { + size_t i; + + dp_netdev_destroy_all_queues(dp); + + dp->n_handlers = n_handlers; + dp->handler_queues = xzalloc(n_handlers * sizeof *dp->handler_queues); + + for (i = 0; i < n_handlers; i++) { + struct dp_netdev_queue *q = &dp->handler_queues[i]; + + ovs_mutex_init(&q->mutex); + q->seq = seq_create(); + } + } +} + +static int +dpif_netdev_recv_set(struct dpif *dpif, bool enable) +{ + struct dp_netdev *dp = get_dp_netdev(dpif); + + if ((dp->handler_queues != NULL) == enable) { + return 0; + } + + fat_rwlock_wrlock(&dp->queue_rwlock); + if (!enable) { + dp_netdev_destroy_all_queues(dp); + } else { + dp_netdev_refresh_queues(dp, 1); + } + fat_rwlock_unlock(&dp->queue_rwlock); + + return 0; +} + static int -dpif_netdev_recv_set(struct dpif *dpif OVS_UNUSED, bool enable OVS_UNUSED) +dpif_netdev_handlers_set(struct dpif *dpif, uint32_t n_handlers) { + struct dp_netdev *dp = get_dp_netdev(dpif); + + fat_rwlock_wrlock(&dp->queue_rwlock); + if (dp->handler_queues) { + dp_netdev_refresh_queues(dp, n_handlers); + } + fat_rwlock_unlock(&dp->queue_rwlock); + return 0; } @@ -1531,62 +1557,86 @@ dpif_netdev_queue_to_priority(const struct dpif *dpif OVS_UNUSED, return 0; } -static struct dp_netdev_queue * -find_nonempty_queue(struct dp_netdev *dp) - OVS_REQUIRES(dp->queue_mutex) +static bool +dp_netdev_recv_check(const struct dp_netdev *dp, const uint32_t handler_id) + OVS_REQ_RDLOCK(dp->queue_rwlock) { - int i; + static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); - for (i = 0; i < N_QUEUES; i++) { - struct dp_netdev_queue *q = &dp->queues[i]; - if (q->head != q->tail) { - return q; - } + if (!dp->handler_queues) { + VLOG_WARN_RL(&rl, "receiving upcall disabled"); + return false; } - return NULL; + + if (handler_id >= dp->n_handlers) { + VLOG_WARN_RL(&rl, "handler index out of bound"); + return false; + } + + return true; } static int -dpif_netdev_recv(struct dpif *dpif, struct dpif_upcall *upcall, - struct ofpbuf *buf) +dpif_netdev_recv(struct dpif *dpif, uint32_t handler_id, + struct dpif_upcall *upcall, struct ofpbuf *buf) { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_queue *q; - int error; + int error = 0; + + fat_rwlock_rdlock(&dp->queue_rwlock); + + if (!dp_netdev_recv_check(dp, handler_id)) { + error = EAGAIN; + goto out; + } - ovs_mutex_lock(&dp->queue_mutex); - q = find_nonempty_queue(dp); - if (q) { + q = &dp->handler_queues[handler_id]; + ovs_mutex_lock(&q->mutex); + if (q->head != q->tail) { struct dp_netdev_upcall *u = &q->upcalls[q->tail++ & QUEUE_MASK]; *upcall = u->upcall; ofpbuf_uninit(buf); *buf = u->buf; - - error = 0; } else { error = EAGAIN; } - ovs_mutex_unlock(&dp->queue_mutex); + ovs_mutex_unlock(&q->mutex); + +out: + fat_rwlock_unlock(&dp->queue_rwlock); return error; } static void -dpif_netdev_recv_wait(struct dpif *dpif) +dpif_netdev_recv_wait(struct dpif *dpif, uint32_t handler_id) { struct dp_netdev *dp = get_dp_netdev(dpif); + struct dp_netdev_queue *q; uint64_t seq; - ovs_mutex_lock(&dp->queue_mutex); - seq = seq_read(dp->queue_seq); - if (find_nonempty_queue(dp)) { + fat_rwlock_rdlock(&dp->queue_rwlock); + + if (!dp_netdev_recv_check(dp, handler_id)) { + goto out; + } + + q = &dp->handler_queues[handler_id]; + ovs_mutex_lock(&q->mutex); + seq = seq_read(q->seq); + if (q->head != q->tail) { poll_immediate_wake(); } else { - seq_wait(dp->queue_seq, seq); + seq_wait(q->seq, seq); } - ovs_mutex_unlock(&dp->queue_mutex); + + ovs_mutex_unlock(&q->mutex); + +out: + fat_rwlock_unlock(&dp->queue_rwlock); } static void @@ -1594,7 +1644,9 @@ dpif_netdev_recv_purge(struct dpif *dpif) { struct dpif_netdev *dpif_netdev = dpif_netdev_cast(dpif); + fat_rwlock_wrlock(&dpif_netdev->dp->queue_rwlock); dp_netdev_purge_queues(dpif_netdev->dp); + fat_rwlock_unlock(&dpif_netdev->dp->queue_rwlock); } /* Creates and returns a new 'struct dp_netdev_actions', with a reference count @@ -1606,35 +1658,23 @@ dp_netdev_actions_create(const struct nlattr *actions, size_t size) struct dp_netdev_actions *netdev_actions; netdev_actions = xmalloc(sizeof *netdev_actions); - ovs_refcount_init(&netdev_actions->ref_cnt); netdev_actions->actions = xmemdup(actions, size); netdev_actions->size = size; return netdev_actions; } -/* Increments 'actions''s refcount. */ struct dp_netdev_actions * -dp_netdev_actions_ref(const struct dp_netdev_actions *actions_) +dp_netdev_flow_get_actions(const struct dp_netdev_flow *flow) { - struct dp_netdev_actions *actions; - - actions = CONST_CAST(struct dp_netdev_actions *, actions_); - if (actions) { - ovs_refcount_ref(&actions->ref_cnt); - } - return actions; + return ovsrcu_get(struct dp_netdev_actions *, &flow->actions); } -/* Decrements 'actions''s refcount and frees 'actions' if the refcount reaches - * 0. */ -void -dp_netdev_actions_unref(struct dp_netdev_actions *actions) +static void +dp_netdev_actions_free(struct dp_netdev_actions *actions) { - if (actions && ovs_refcount_unref(&actions->ref_cnt) == 1) { - free(actions->actions); - free(actions); - } + free(actions->actions); + free(actions); } static void * @@ -1767,9 +1807,10 @@ dp_netdev_flow_stats_new_cb(void) static void dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow, - const struct ofpbuf *packet) + const struct ofpbuf *packet, + const struct flow *key) { - uint16_t tcp_flags = packet_get_tcp_flags(packet, &netdev_flow->flow); + uint16_t tcp_flags = ntohs(key->tcp_flags); long long int now = time_msec(); struct dp_netdev_flow_stats *bucket; @@ -1819,40 +1860,39 @@ dp_netdev_port_input(struct dp_netdev *dp, struct ofpbuf *packet, if (netdev_flow) { struct dp_netdev_actions *actions; - dp_netdev_flow_used(netdev_flow, packet); - - ovs_mutex_lock(&netdev_flow->mutex); - actions = dp_netdev_actions_ref(netdev_flow->actions); - ovs_mutex_unlock(&netdev_flow->mutex); + dp_netdev_flow_used(netdev_flow, packet, &key); + actions = dp_netdev_flow_get_actions(netdev_flow); dp_netdev_execute_actions(dp, &key, packet, md, actions->actions, actions->size); - dp_netdev_actions_unref(actions); - dp_netdev_flow_unref(netdev_flow); dp_netdev_count_packet(dp, DP_STAT_HIT); - } else { + } else if (dp->handler_queues) { dp_netdev_count_packet(dp, DP_STAT_MISS); - dp_netdev_output_userspace(dp, packet, DPIF_UC_MISS, &key, NULL); + dp_netdev_output_userspace(dp, packet, + flow_hash_5tuple(&key, 0) % dp->n_handlers, + DPIF_UC_MISS, &key, NULL); } } static int dp_netdev_output_userspace(struct dp_netdev *dp, struct ofpbuf *packet, - int queue_no, const struct flow *flow, + int queue_no, int type, const struct flow *flow, const struct nlattr *userdata) - OVS_EXCLUDED(dp->queue_mutex) + OVS_EXCLUDED(dp->queue_rwlock) { - struct dp_netdev_queue *q = &dp->queues[queue_no]; + struct dp_netdev_queue *q; int error; - ovs_mutex_lock(&dp->queue_mutex); + fat_rwlock_rdlock(&dp->queue_rwlock); + q = &dp->handler_queues[queue_no]; + ovs_mutex_lock(&q->mutex); if (q->head - q->tail < MAX_QUEUE_LEN) { struct dp_netdev_upcall *u = &q->upcalls[q->head++ & QUEUE_MASK]; struct dpif_upcall *upcall = &u->upcall; struct ofpbuf *buf = &u->buf; size_t buf_size; - upcall->type = queue_no; + upcall->type = type; /* Allocate buffer big enough for everything. */ buf_size = ODPUTIL_FLOW_KEY_BYTES; @@ -1877,14 +1917,15 @@ dp_netdev_output_userspace(struct dp_netdev *dp, struct ofpbuf *packet, upcall->packet = *packet; ofpbuf_use(packet, NULL, 0); - seq_change(dp->queue_seq); + seq_change(q->seq); error = 0; } else { dp_netdev_count_packet(dp, DP_STAT_LOST); error = ENOBUFS; } - ovs_mutex_unlock(&dp->queue_mutex); + ovs_mutex_unlock(&q->mutex); + fat_rwlock_unlock(&dp->queue_rwlock); return error; } @@ -1921,7 +1962,10 @@ dp_execute_cb(void *aux_, struct ofpbuf *packet, if (!may_steal) { packet = ofpbuf_clone_with_headroom(packet, DP_NETDEV_HEADROOM); } - dp_netdev_output_userspace(aux->dp, packet, DPIF_UC_ACTION, aux->key, + dp_netdev_output_userspace(aux->dp, packet, + flow_hash_5tuple(aux->key, 0) + % aux->dp->n_handlers, + DPIF_UC_ACTION, aux->key, userdata); if (!may_steal) { ofpbuf_uninit(packet); @@ -1984,6 +2028,7 @@ const struct dpif_class dpif_netdev_class = { dpif_netdev_execute, NULL, /* operate */ dpif_netdev_recv_set, + dpif_netdev_handlers_set, dpif_netdev_queue_to_priority, dpif_netdev_recv, dpif_netdev_recv_wait,