X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=ofproto%2Fofproto-dpif-upcall.c;h=717563a3fe68599116793a8343e82e324eb058e5;hb=6f12bda359fb13fb2c0d6f958f56956bb76e47d7;hp=3f9ad58bc5b0d3d56129f570e66261a7d1491921;hpb=8bfaca5b9a63a4900c3c99fc6a97e2b821870f66;p=sliver-openvswitch.git diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c index 3f9ad58bc..717563a3f 100644 --- a/ofproto/ofproto-dpif-upcall.c +++ b/ofproto/ofproto-dpif-upcall.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc. +/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,77 +21,115 @@ #include "connmgr.h" #include "coverage.h" -#include "dynamic-string.h" #include "dpif.h" +#include "dynamic-string.h" #include "fail-open.h" #include "guarded-list.h" #include "latch.h" -#include "seq.h" #include "list.h" #include "netlink.h" #include "ofpbuf.h" #include "ofproto-dpif-ipfix.h" #include "ofproto-dpif-sflow.h" +#include "ofproto-dpif-xlate.h" +#include "ovs-rcu.h" #include "packets.h" #include "poll-loop.h" +#include "seq.h" +#include "unixctl.h" #include "vlog.h" #define MAX_QUEUE_LENGTH 512 +#define FLOW_MISS_MAX_BATCH 50 +#define REVALIDATE_MAX_BATCH 50 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall); -COVERAGE_DEFINE(drop_queue_overflow); -COVERAGE_DEFINE(upcall_queue_overflow); -COVERAGE_DEFINE(fmb_queue_overflow); -COVERAGE_DEFINE(fmb_queue_revalidated); +COVERAGE_DEFINE(upcall_duplicate_flow); -/* A thread that processes each upcall handed to it by the dispatcher thread, - * forwards the upcall's packet, and then queues it to the main ofproto_dpif - * to possibly set up a kernel flow as a cache. */ +/* A thread that reads upcalls from dpif, forwards each upcall's packet, + * and possibly sets up a kernel flow as a cache. */ struct handler { struct udpif *udpif; /* Parent udpif. */ pthread_t thread; /* Thread ID. */ + char *name; /* Thread name. */ + uint32_t handler_id; /* Handler id. */ +}; - struct ovs_mutex mutex; /* Mutex guarding the following. */ - - /* Atomic queue of unprocessed upcalls. */ - struct list upcalls OVS_GUARDED; - size_t n_upcalls OVS_GUARDED; - - bool need_signal; /* Only changed by the dispatcher. */ +/* A thread that processes datapath flows, updates OpenFlow statistics, and + * updates or removes them if necessary. */ +struct revalidator { + struct udpif *udpif; /* Parent udpif. */ + char *name; /* Thread name. */ - pthread_cond_t wake_cond; /* Wakes 'thread' while holding - 'mutex'. */ + pthread_t thread; /* Thread ID. */ + struct hmap *ukeys; /* Points into udpif->ukeys for this + revalidator. Used for GC phase. */ }; /* An upcall handler for ofproto_dpif. * - * udpif is implemented as a "dispatcher" thread that reads upcalls from the - * kernel. It processes each upcall just enough to figure out its next - * destination. For a "miss" upcall (MISS_UPCALL), this is one of several - * "handler" threads (see struct handler). Other upcalls are queued to the - * main ofproto_dpif. */ + * udpif keeps records of two kind of logically separate units: + * + * upcall handling + * --------------- + * + * - An array of 'struct handler's for upcall handling and flow + * installation. + * + * flow revalidation + * ----------------- + * + * - Revalidation threads which read the datapath flow table and maintains + * them. + */ struct udpif { + struct list list_node; /* In all_udpifs list. */ + struct dpif *dpif; /* Datapath handle. */ struct dpif_backer *backer; /* Opaque dpif_backer pointer. */ uint32_t secret; /* Random seed for upcall hash. */ - pthread_t dispatcher; /* Dispatcher thread ID. */ - struct handler *handlers; /* Upcall handlers. */ size_t n_handlers; - /* Queues to pass up to ofproto-dpif. */ - struct guarded_list drop_keys; /* "struct drop key"s. */ - struct guarded_list fmbs; /* "struct flow_miss_batch"es. */ + struct revalidator *revalidators; /* Flow revalidators. */ + size_t n_revalidators; - /* Number of times udpif_revalidate() has been called. */ - atomic_uint reval_seq; + struct latch exit_latch; /* Tells child threads to exit. */ - struct seq *wait_seq; + /* Revalidation. */ + struct seq *reval_seq; /* Incremented to force revalidation. */ + bool need_revalidate; /* As indicated by 'reval_seq'. */ + bool reval_exit; /* Set by leader on 'exit_latch. */ + pthread_barrier_t reval_barrier; /* Barrier used by revalidators. */ + struct dpif_flow_dump dump; /* DPIF flow dump state. */ + long long int dump_duration; /* Duration of the last flow dump. */ + struct seq *dump_seq; /* Increments each dump iteration. */ - struct latch exit_latch; /* Tells child threads to exit. */ + /* There are 'n_revalidators' ukey hmaps. Each revalidator retains a + * reference to one of these for garbage collection. + * + * During the flow dump phase, revalidators insert into these with a random + * distribution. During the garbage collection phase, each revalidator + * takes care of garbage collecting one of these hmaps. */ + struct { + struct ovs_mutex mutex; /* Guards the following. */ + struct hmap hmap OVS_GUARDED; /* Datapath flow keys. */ + } *ukeys; + + /* Datapath flow statistics. */ + unsigned int max_n_flows; + unsigned int avg_n_flows; + + /* Following fields are accessed and modified by different threads. */ + atomic_uint flow_limit; /* Datapath flow hard limit. */ + + /* n_flows_mutex prevents multiple threads updating these concurrently. */ + atomic_uint64_t n_flows; /* Number of flows in the datapath. */ + atomic_llong n_flows_timestamp; /* Last time n_flows was updated. */ + struct ovs_mutex n_flows_mutex; }; enum upcall_type { @@ -103,7 +141,6 @@ enum upcall_type { }; struct upcall { - struct list list_node; /* For queuing upcalls. */ struct flow_miss *flow_miss; /* This upcall's flow_miss. */ /* Raw upcall plus data for keeping track of the memory backing it. */ @@ -112,29 +149,124 @@ struct upcall { uint64_t upcall_stub[512 / 8]; /* Buffer to reduce need for malloc(). */ }; -static void upcall_destroy(struct upcall *); +/* 'udpif_key's are responsible for tracking the little bit of state udpif + * needs to do flow expiration which can't be pulled directly from the + * datapath. They may be created or maintained by any revalidator during + * the dump phase, but are owned by a single revalidator, and are destroyed + * by that revalidator during the garbage-collection phase. + * + * While some elements of a udpif_key are protected by a mutex, the ukey itself + * is not. Therefore it is not safe to destroy a udpif_key except when all + * revalidators are in garbage collection phase, or they aren't running. */ +struct udpif_key { + struct hmap_node hmap_node; /* In parent revalidator 'ukeys' map. */ + + /* These elements are read only once created, and therefore aren't + * protected by a mutex. */ + const struct nlattr *key; /* Datapath flow key. */ + size_t key_len; /* Length of 'key'. */ + + struct ovs_mutex mutex; /* Guards the following. */ + struct dpif_flow_stats stats OVS_GUARDED; /* Last known stats.*/ + long long int created OVS_GUARDED; /* Estimate of creation time. */ + bool mark OVS_GUARDED; /* For mark and sweep garbage + collection. */ + bool flow_exists OVS_GUARDED; /* Ensures flows are only deleted + once. */ + + struct xlate_cache *xcache OVS_GUARDED; /* Cache for xlate entries that + * are affected by this ukey. + * Used for stats and learning.*/ + struct odputil_keybuf key_buf; /* Memory for 'key'. */ +}; + +/* Flow miss batching. + * + * Some dpifs implement operations faster when you hand them off in a batch. + * To allow batching, "struct flow_miss" queues the dpif-related work needed + * for a given flow. Each "struct flow_miss" corresponds to sending one or + * more packets, plus possibly installing the flow in the dpif. */ +struct flow_miss { + struct hmap_node hmap_node; + struct ofproto_dpif *ofproto; + + struct flow flow; + const struct nlattr *key; + size_t key_len; + enum dpif_upcall_type upcall_type; + struct dpif_flow_stats stats; + odp_port_t odp_in_port; + + uint64_t slow_path_buf[128 / 8]; + struct odputil_keybuf mask_buf; + + struct xlate_out xout; + + bool put; +}; static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); - -static void recv_upcalls(struct udpif *); -static void handle_upcalls(struct udpif *, struct list *upcalls); -static void miss_destroy(struct flow_miss *); -static void *udpif_dispatcher(void *); +static struct list all_udpifs = LIST_INITIALIZER(&all_udpifs); + +static size_t read_upcalls(struct handler *, + struct upcall upcalls[FLOW_MISS_MAX_BATCH], + struct flow_miss miss_buf[FLOW_MISS_MAX_BATCH], + struct hmap *); +static void handle_upcalls(struct handler *, struct hmap *, struct upcall *, + size_t n_upcalls); +static void udpif_stop_threads(struct udpif *); +static void udpif_start_threads(struct udpif *, size_t n_handlers, + size_t n_revalidators); static void *udpif_upcall_handler(void *); +static void *udpif_revalidator(void *); +static uint64_t udpif_get_n_flows(struct udpif *); +static void revalidate(struct revalidator *); +static void revalidator_sweep(struct revalidator *); +static void revalidator_purge(struct revalidator *); +static void upcall_unixctl_show(struct unixctl_conn *conn, int argc, + const char *argv[], void *aux); +static void upcall_unixctl_disable_megaflows(struct unixctl_conn *, int argc, + const char *argv[], void *aux); +static void upcall_unixctl_enable_megaflows(struct unixctl_conn *, int argc, + const char *argv[], void *aux); +static void upcall_unixctl_set_flow_limit(struct unixctl_conn *conn, int argc, + const char *argv[], void *aux); + +static struct udpif_key *ukey_create(const struct nlattr *key, size_t key_len, + long long int used); +static void ukey_delete(struct revalidator *, struct udpif_key *); + +static atomic_bool enable_megaflows = ATOMIC_VAR_INIT(true); struct udpif * udpif_create(struct dpif_backer *backer, struct dpif *dpif) { + static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER; struct udpif *udpif = xzalloc(sizeof *udpif); + if (ovsthread_once_start(&once)) { + unixctl_command_register("upcall/show", "", 0, 0, upcall_unixctl_show, + NULL); + unixctl_command_register("upcall/disable-megaflows", "", 0, 0, + upcall_unixctl_disable_megaflows, NULL); + unixctl_command_register("upcall/enable-megaflows", "", 0, 0, + upcall_unixctl_enable_megaflows, NULL); + unixctl_command_register("upcall/set-flow-limit", "", 1, 1, + upcall_unixctl_set_flow_limit, NULL); + ovsthread_once_done(&once); + } + udpif->dpif = dpif; udpif->backer = backer; + atomic_init(&udpif->flow_limit, MIN(ofproto_flow_limit, 10000)); udpif->secret = random_uint32(); - udpif->wait_seq = seq_create(); + udpif->reval_seq = seq_create(); + udpif->dump_seq = seq_create(); latch_init(&udpif->exit_latch); - guarded_list_init(&udpif->drop_keys); - guarded_list_init(&udpif->fmbs); - atomic_init(&udpif->reval_seq, 0); + list_push_back(&all_udpifs, &udpif->list_node); + atomic_init(&udpif->n_flows, 0); + atomic_init(&udpif->n_flows_timestamp, LLONG_MIN); + ovs_mutex_init(&udpif->n_flows_mutex); return udpif; } @@ -142,105 +274,158 @@ udpif_create(struct dpif_backer *backer, struct dpif *dpif) void udpif_destroy(struct udpif *udpif) { - struct flow_miss_batch *fmb; - struct drop_key *drop_key; - - udpif_recv_set(udpif, 0, false); - - while ((drop_key = drop_key_next(udpif))) { - drop_key_destroy(drop_key); - } - - while ((fmb = flow_miss_batch_next(udpif))) { - flow_miss_batch_destroy(fmb); - } + udpif_stop_threads(udpif); - guarded_list_destroy(&udpif->drop_keys); - guarded_list_destroy(&udpif->fmbs); + list_remove(&udpif->list_node); latch_destroy(&udpif->exit_latch); - seq_destroy(udpif->wait_seq); + seq_destroy(udpif->reval_seq); + seq_destroy(udpif->dump_seq); + ovs_mutex_destroy(&udpif->n_flows_mutex); free(udpif); } -/* Tells 'udpif' to begin or stop handling flow misses depending on the value - * of 'enable'. 'n_handlers' is the number of upcall_handler threads to - * create. Passing 'n_handlers' as zero is equivalent to passing 'enable' as - * false. */ -void -udpif_recv_set(struct udpif *udpif, size_t n_handlers, bool enable) +/* Stops the handler and revalidator threads, must be enclosed in + * ovsrcu quiescent state unless when destroying udpif. */ +static void +udpif_stop_threads(struct udpif *udpif) { - n_handlers = enable ? n_handlers : 0; - - /* Stop the old threads (if any). */ - if (udpif->handlers && udpif->n_handlers != n_handlers) { + if (udpif && (udpif->n_handlers != 0 || udpif->n_revalidators != 0)) { size_t i; latch_set(&udpif->exit_latch); - /* Wake the handlers so they can exit. */ for (i = 0; i < udpif->n_handlers; i++) { struct handler *handler = &udpif->handlers[i]; - ovs_mutex_lock(&handler->mutex); - xpthread_cond_signal(&handler->wake_cond); - ovs_mutex_unlock(&handler->mutex); + xpthread_join(handler->thread, NULL); } - xpthread_join(udpif->dispatcher, NULL); - for (i = 0; i < udpif->n_handlers; i++) { - struct handler *handler = &udpif->handlers[i]; - struct upcall *miss, *next; + for (i = 0; i < udpif->n_revalidators; i++) { + xpthread_join(udpif->revalidators[i].thread, NULL); + } - xpthread_join(handler->thread, NULL); + for (i = 0; i < udpif->n_revalidators; i++) { + struct revalidator *revalidator = &udpif->revalidators[i]; - ovs_mutex_lock(&handler->mutex); - LIST_FOR_EACH_SAFE (miss, next, list_node, &handler->upcalls) { - list_remove(&miss->list_node); - upcall_destroy(miss); - } - ovs_mutex_unlock(&handler->mutex); - ovs_mutex_destroy(&handler->mutex); + /* Delete ukeys, and delete all flows from the datapath to prevent + * double-counting stats. */ + revalidator_purge(revalidator); + free(revalidator->name); - xpthread_cond_destroy(&handler->wake_cond); + hmap_destroy(&udpif->ukeys[i].hmap); + ovs_mutex_destroy(&udpif->ukeys[i].mutex); + } + + for (i = 0; i < udpif->n_handlers; i++) { + free(udpif->handlers[i].name); } latch_poll(&udpif->exit_latch); + xpthread_barrier_destroy(&udpif->reval_barrier); + + free(udpif->revalidators); + udpif->revalidators = NULL; + udpif->n_revalidators = 0; + free(udpif->handlers); udpif->handlers = NULL; udpif->n_handlers = 0; + + free(udpif->ukeys); + udpif->ukeys = NULL; } +} - /* Start new threads (if necessary). */ - if (!udpif->handlers && n_handlers) { +/* Starts the handler and revalidator threads, must be enclosed in + * ovsrcu quiescent state. */ +static void +udpif_start_threads(struct udpif *udpif, size_t n_handlers, + size_t n_revalidators) +{ + if (udpif && n_handlers && n_revalidators) { size_t i; udpif->n_handlers = n_handlers; + udpif->n_revalidators = n_revalidators; + udpif->handlers = xzalloc(udpif->n_handlers * sizeof *udpif->handlers); for (i = 0; i < udpif->n_handlers; i++) { struct handler *handler = &udpif->handlers[i]; handler->udpif = udpif; - list_init(&handler->upcalls); - handler->need_signal = false; - xpthread_cond_init(&handler->wake_cond, NULL); - ovs_mutex_init(&handler->mutex); + handler->handler_id = i; xpthread_create(&handler->thread, NULL, udpif_upcall_handler, handler); } - xpthread_create(&udpif->dispatcher, NULL, udpif_dispatcher, udpif); + + xpthread_barrier_init(&udpif->reval_barrier, NULL, + udpif->n_revalidators); + udpif->reval_exit = false; + udpif->revalidators = xzalloc(udpif->n_revalidators + * sizeof *udpif->revalidators); + udpif->ukeys = xmalloc(sizeof *udpif->ukeys * n_revalidators); + for (i = 0; i < udpif->n_revalidators; i++) { + struct revalidator *revalidator = &udpif->revalidators[i]; + + revalidator->udpif = udpif; + hmap_init(&udpif->ukeys[i].hmap); + ovs_mutex_init(&udpif->ukeys[i].mutex); + revalidator->ukeys = &udpif->ukeys[i].hmap; + xpthread_create(&revalidator->thread, NULL, udpif_revalidator, + revalidator); + } } } +/* Tells 'udpif' how many threads it should use to handle upcalls. + * 'n_handlers' and 'n_revalidators' can never be zero. 'udpif''s + * datapath handle must have packet reception enabled before starting + * threads. */ void -udpif_wait(struct udpif *udpif) +udpif_set_threads(struct udpif *udpif, size_t n_handlers, + size_t n_revalidators) { - uint64_t seq = seq_read(udpif->wait_seq); - if (!guarded_list_is_empty(&udpif->drop_keys) || - !guarded_list_is_empty(&udpif->fmbs)) { - poll_immediate_wake(); - } else { - seq_wait(udpif->wait_seq, seq); + int error; + + ovs_assert(udpif); + ovs_assert(n_handlers && n_revalidators); + + ovsrcu_quiesce_start(); + if (udpif->n_handlers != n_handlers + || udpif->n_revalidators != n_revalidators) { + udpif_stop_threads(udpif); } + + error = dpif_handlers_set(udpif->dpif, n_handlers); + if (error) { + VLOG_ERR("failed to configure handlers in dpif %s: %s", + dpif_name(udpif->dpif), ovs_strerror(error)); + return; + } + + if (!udpif->handlers && !udpif->revalidators) { + udpif_start_threads(udpif, n_handlers, n_revalidators); + } + ovsrcu_quiesce_end(); +} + +/* Waits for all ongoing upcall translations to complete. This ensures that + * there are no transient references to any removed ofprotos (or other + * objects). In particular, this should be called after an ofproto is removed + * (e.g. via xlate_remove_ofproto()) but before it is destroyed. */ +void +udpif_synchronize(struct udpif *udpif) +{ + /* This is stronger than necessary. It would be sufficient to ensure + * (somehow) that each handler and revalidator thread had passed through + * its main loop once. */ + size_t n_handlers = udpif->n_handlers; + size_t n_revalidators = udpif->n_revalidators; + + ovsrcu_quiesce_start(); + udpif_stop_threads(udpif); + udpif_start_threads(udpif, n_handlers, n_revalidators); + ovsrcu_quiesce_end(); } /* Notifies 'udpif' that something changed which may render previous @@ -248,187 +433,216 @@ udpif_wait(struct udpif *udpif) void udpif_revalidate(struct udpif *udpif) { - struct flow_miss_batch *fmb, *next_fmb; - unsigned int junk; - struct list fmbs; - - /* Since we remove each miss on revalidation, their statistics won't be - * accounted to the appropriate 'facet's in the upper layer. In most - * cases, this is alright because we've already pushed the stats to the - * relevant rules. */ - atomic_add(&udpif->reval_seq, 1, &junk); - - guarded_list_pop_all(&udpif->fmbs, &fmbs); - LIST_FOR_EACH_SAFE (fmb, next_fmb, list_node, &fmbs) { - list_remove(&fmb->list_node); - flow_miss_batch_destroy(fmb); - } - - udpif_drop_key_clear(udpif); + seq_change(udpif->reval_seq); } -/* Destroys and deallocates 'upcall'. */ -static void -upcall_destroy(struct upcall *upcall) +/* Returns a seq which increments every time 'udpif' pulls stats from the + * datapath. Callers can use this to get a sense of when might be a good time + * to do periodic work which relies on relatively up to date statistics. */ +struct seq * +udpif_dump_seq(struct udpif *udpif) { - if (upcall) { - ofpbuf_uninit(&upcall->upcall_buf); - free(upcall); - } + return udpif->dump_seq; } -/* Retrieves the next batch of processed flow misses for 'udpif' to install. - * The caller is responsible for destroying it with flow_miss_batch_destroy(). - */ -struct flow_miss_batch * -flow_miss_batch_next(struct udpif *udpif) +void +udpif_get_memory_usage(struct udpif *udpif, struct simap *usage) { - int i; - - for (i = 0; i < 50; i++) { - struct flow_miss_batch *next; - unsigned int reval_seq; - struct list *next_node; - - next_node = guarded_list_pop_front(&udpif->fmbs); - if (!next_node) { - break; - } + size_t i; - next = CONTAINER_OF(next_node, struct flow_miss_batch, list_node); - atomic_read(&udpif->reval_seq, &reval_seq); - if (next->reval_seq == reval_seq) { - return next; - } + simap_increase(usage, "handlers", udpif->n_handlers); - flow_miss_batch_destroy(next); + simap_increase(usage, "revalidators", udpif->n_revalidators); + for (i = 0; i < udpif->n_revalidators; i++) { + ovs_mutex_lock(&udpif->ukeys[i].mutex); + simap_increase(usage, "udpif keys", hmap_count(&udpif->ukeys[i].hmap)); + ovs_mutex_unlock(&udpif->ukeys[i].mutex); } - - return NULL; } -/* Destroys and deallocates 'fmb'. */ +/* Remove flows from a single datapath. */ void -flow_miss_batch_destroy(struct flow_miss_batch *fmb) +udpif_flush(struct udpif *udpif) { - struct flow_miss *miss, *next; - struct upcall *upcall, *next_upcall; + size_t n_handlers, n_revalidators; - if (!fmb) { - return; - } + n_handlers = udpif->n_handlers; + n_revalidators = udpif->n_revalidators; - HMAP_FOR_EACH_SAFE (miss, next, hmap_node, &fmb->misses) { - hmap_remove(&fmb->misses, &miss->hmap_node); - miss_destroy(miss); - } + ovsrcu_quiesce_start(); - LIST_FOR_EACH_SAFE (upcall, next_upcall, list_node, &fmb->upcalls) { - list_remove(&upcall->list_node); - upcall_destroy(upcall); - } + udpif_stop_threads(udpif); + dpif_flow_flush(udpif->dpif); + udpif_start_threads(udpif, n_handlers, n_revalidators); - hmap_destroy(&fmb->misses); - free(fmb); + ovsrcu_quiesce_end(); } -/* Retrieves the next drop key which ofproto-dpif needs to process. The caller - * is responsible for destroying it with drop_key_destroy(). */ -struct drop_key * -drop_key_next(struct udpif *udpif) +/* Removes all flows from all datapaths. */ +static void +udpif_flush_all_datapaths(void) { - struct list *next = guarded_list_pop_front(&udpif->drop_keys); - return next ? CONTAINER_OF(next, struct drop_key, list_node) : NULL; -} + struct udpif *udpif; -/* Destroys and deallocates 'drop_key'. */ -void -drop_key_destroy(struct drop_key *drop_key) -{ - if (drop_key) { - free(drop_key->key); - free(drop_key); + LIST_FOR_EACH (udpif, list_node, &all_udpifs) { + udpif_flush(udpif); } } -/* Clears all drop keys waiting to be processed by drop_key_next(). */ -void -udpif_drop_key_clear(struct udpif *udpif) + +static uint64_t +udpif_get_n_flows(struct udpif *udpif) { - struct drop_key *drop_key, *next; - struct list list; - - guarded_list_pop_all(&udpif->drop_keys, &list); - LIST_FOR_EACH_SAFE (drop_key, next, list_node, &list) { - list_remove(&drop_key->list_node); - drop_key_destroy(drop_key); + long long int time, now; + uint64_t flow_count; + + now = time_msec(); + atomic_read(&udpif->n_flows_timestamp, &time); + if (time < now - 100 && !ovs_mutex_trylock(&udpif->n_flows_mutex)) { + struct dpif_dp_stats stats; + + atomic_store(&udpif->n_flows_timestamp, now); + dpif_get_dp_stats(udpif->dpif, &stats); + flow_count = stats.n_flows; + atomic_store(&udpif->n_flows, flow_count); + ovs_mutex_unlock(&udpif->n_flows_mutex); + } else { + atomic_read(&udpif->n_flows, &flow_count); } + return flow_count; } - -/* The dispatcher thread is responsible for receiving upcalls from the kernel, - * assigning them to a upcall_handler thread. */ + +/* The upcall handler thread tries to read a batch of FLOW_MISS_MAX_BATCH + * upcalls from dpif, processes the batch and installs corresponding flows + * in dpif. */ static void * -udpif_dispatcher(void *arg) +udpif_upcall_handler(void *arg) { - struct udpif *udpif = arg; + struct handler *handler = arg; + struct udpif *udpif = handler->udpif; + struct hmap misses = HMAP_INITIALIZER(&misses); + + handler->name = xasprintf("handler_%u", ovsthread_id_self()); + set_subprogram_name("%s", handler->name); + + while (!latch_is_set(&handler->udpif->exit_latch)) { + struct upcall upcalls[FLOW_MISS_MAX_BATCH]; + struct flow_miss miss_buf[FLOW_MISS_MAX_BATCH]; + struct flow_miss *miss; + size_t n_upcalls, i; + + n_upcalls = read_upcalls(handler, upcalls, miss_buf, &misses); + if (!n_upcalls) { + dpif_recv_wait(udpif->dpif, handler->handler_id); + latch_wait(&udpif->exit_latch); + poll_block(); + } else { + handle_upcalls(handler, &misses, upcalls, n_upcalls); - set_subprogram_name("dispatcher"); - while (!latch_is_set(&udpif->exit_latch)) { - recv_upcalls(udpif); - dpif_recv_wait(udpif->dpif); - latch_wait(&udpif->exit_latch); - poll_block(); + HMAP_FOR_EACH (miss, hmap_node, &misses) { + xlate_out_uninit(&miss->xout); + } + hmap_clear(&misses); + for (i = 0; i < n_upcalls; i++) { + ofpbuf_uninit(&upcalls[i].dpif_upcall.packet); + ofpbuf_uninit(&upcalls[i].upcall_buf); + } + } + coverage_clear(); } + hmap_destroy(&misses); return NULL; } -/* The miss handler thread is responsible for processing miss upcalls retrieved - * by the dispatcher thread. Once finished it passes the processed miss - * upcalls to ofproto-dpif where they're installed in the datapath. */ static void * -udpif_upcall_handler(void *arg) +udpif_revalidator(void *arg) { - struct handler *handler = arg; - - set_subprogram_name("upcall_%u", ovsthread_id_self()); + /* Used by all revalidators. */ + struct revalidator *revalidator = arg; + struct udpif *udpif = revalidator->udpif; + bool leader = revalidator == &udpif->revalidators[0]; + + /* Used only by the leader. */ + long long int start_time = 0; + uint64_t last_reval_seq = 0; + unsigned int flow_limit = 0; + size_t n_flows = 0; + + revalidator->name = xasprintf("revalidator_%u", ovsthread_id_self()); + set_subprogram_name("%s", revalidator->name); for (;;) { - struct list misses = LIST_INITIALIZER(&misses); - size_t i; + if (leader) { + uint64_t reval_seq; - ovs_mutex_lock(&handler->mutex); + reval_seq = seq_read(udpif->reval_seq); + udpif->need_revalidate = last_reval_seq != reval_seq; + last_reval_seq = reval_seq; - if (latch_is_set(&handler->udpif->exit_latch)) { - ovs_mutex_unlock(&handler->mutex); - return NULL; - } + n_flows = udpif_get_n_flows(udpif); + udpif->max_n_flows = MAX(n_flows, udpif->max_n_flows); + udpif->avg_n_flows = (udpif->avg_n_flows + n_flows) / 2; - if (!handler->n_upcalls) { - ovs_mutex_cond_wait(&handler->wake_cond, &handler->mutex); - } + /* Only the leader checks the exit latch to prevent a race where + * some threads think it's true and exit and others think it's + * false and block indefinitely on the reval_barrier */ + udpif->reval_exit = latch_is_set(&udpif->exit_latch); - for (i = 0; i < FLOW_MISS_MAX_BATCH; i++) { - if (handler->n_upcalls) { - handler->n_upcalls--; - list_push_back(&misses, list_pop_front(&handler->upcalls)); - } else { - break; + start_time = time_msec(); + if (!udpif->reval_exit) { + dpif_flow_dump_start(&udpif->dump, udpif->dpif); } } - ovs_mutex_unlock(&handler->mutex); - handle_upcalls(handler->udpif, &misses); + /* Wait for the leader to start the flow dump. */ + xpthread_barrier_wait(&udpif->reval_barrier); + if (udpif->reval_exit) { + break; + } + revalidate(revalidator); + + /* Wait for all flows to have been dumped before we garbage collect. */ + xpthread_barrier_wait(&udpif->reval_barrier); + revalidator_sweep(revalidator); + + /* Wait for all revalidators to finish garbage collection. */ + xpthread_barrier_wait(&udpif->reval_barrier); + + if (leader) { + long long int duration; + + dpif_flow_dump_done(&udpif->dump); + seq_change(udpif->dump_seq); + + duration = MAX(time_msec() - start_time, 1); + atomic_read(&udpif->flow_limit, &flow_limit); + udpif->dump_duration = duration; + if (duration > 2000) { + flow_limit /= duration / 1000; + } else if (duration > 1300) { + flow_limit = flow_limit * 3 / 4; + } else if (duration < 1000 && n_flows > 2000 + && flow_limit < n_flows * 1000 / duration) { + flow_limit += 1000; + } + flow_limit = MIN(ofproto_flow_limit, MAX(flow_limit, 1000)); + atomic_store(&udpif->flow_limit, flow_limit); - coverage_clear(); + if (duration > 2000) { + VLOG_INFO("Spent an unreasonably long %lldms dumping flows", + duration); + } + + poll_timer_wait_until(start_time + MIN(ofproto_max_idle, 500)); + seq_wait(udpif->reval_seq, last_reval_seq); + latch_wait(&udpif->exit_latch); + poll_block(); + } } + + return NULL; } -static void -miss_destroy(struct flow_miss *miss) -{ - xlate_out_uninit(&miss->xout); -} - static enum upcall_type classify_upcall(const struct upcall *upcall) { @@ -465,16 +679,16 @@ classify_upcall(const struct upcall *upcall) } memset(&cookie, 0, sizeof cookie); memcpy(&cookie, nl_attr_get(dpif_upcall->userdata), userdata_len); - if (userdata_len == sizeof cookie.sflow + if (userdata_len == MAX(8, sizeof cookie.sflow) && cookie.type == USER_ACTION_COOKIE_SFLOW) { return SFLOW_UPCALL; - } else if (userdata_len == sizeof cookie.slow_path + } else if (userdata_len == MAX(8, sizeof cookie.slow_path) && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) { return MISS_UPCALL; - } else if (userdata_len == sizeof cookie.flow_sample + } else if (userdata_len == MAX(8, sizeof cookie.flow_sample) && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) { return FLOW_SAMPLE_UPCALL; - } else if (userdata_len == sizeof cookie.ipfix + } else if (userdata_len == MAX(8, sizeof cookie.ipfix) && cookie.type == USER_ACTION_COOKIE_IPFIX) { return IPFIX_UPCALL; } else { @@ -484,88 +698,26 @@ classify_upcall(const struct upcall *upcall) } } +/* Calculates slow path actions for 'xout'. 'buf' must statically be + * initialized with at least 128 bytes of space. */ static void -recv_upcalls(struct udpif *udpif) +compose_slow_path(struct udpif *udpif, struct xlate_out *xout, + struct flow *flow, odp_port_t odp_in_port, + struct ofpbuf *buf) { - int n; - - for (;;) { - uint32_t hash = udpif->secret; - struct handler *handler; - struct upcall *upcall; - size_t n_bytes, left; - struct nlattr *nla; - int error; - - upcall = xmalloc(sizeof *upcall); - ofpbuf_use_stub(&upcall->upcall_buf, upcall->upcall_stub, - sizeof upcall->upcall_stub); - error = dpif_recv(udpif->dpif, &upcall->dpif_upcall, - &upcall->upcall_buf); - if (error) { - upcall_destroy(upcall); - break; - } - - n_bytes = 0; - NL_ATTR_FOR_EACH (nla, left, upcall->dpif_upcall.key, - upcall->dpif_upcall.key_len) { - enum ovs_key_attr type = nl_attr_type(nla); - if (type == OVS_KEY_ATTR_IN_PORT - || type == OVS_KEY_ATTR_TCP - || type == OVS_KEY_ATTR_UDP) { - if (nl_attr_get_size(nla) == 4) { - hash = mhash_add(hash, nl_attr_get_u32(nla)); - n_bytes += 4; - } else { - VLOG_WARN_RL(&rl, - "Netlink attribute with incorrect size."); - } - } - } - hash = mhash_finish(hash, n_bytes); - - handler = &udpif->handlers[hash % udpif->n_handlers]; - - ovs_mutex_lock(&handler->mutex); - if (handler->n_upcalls < MAX_QUEUE_LENGTH) { - list_push_back(&handler->upcalls, &upcall->list_node); - if (handler->n_upcalls == 0) { - handler->need_signal = true; - } - handler->n_upcalls++; - if (handler->need_signal && - handler->n_upcalls >= FLOW_MISS_MAX_BATCH) { - handler->need_signal = false; - xpthread_cond_signal(&handler->wake_cond); - } - ovs_mutex_unlock(&handler->mutex); - if (!VLOG_DROP_DBG(&rl)) { - struct ds ds = DS_EMPTY_INITIALIZER; - - odp_flow_key_format(upcall->dpif_upcall.key, - upcall->dpif_upcall.key_len, - &ds); - VLOG_DBG("dispatcher: enqueue (%s)", ds_cstr(&ds)); - ds_destroy(&ds); - } - } else { - ovs_mutex_unlock(&handler->mutex); - COVERAGE_INC(upcall_queue_overflow); - upcall_destroy(upcall); - } - } - - for (n = 0; n < udpif->n_handlers; ++n) { - struct handler *handler = &udpif->handlers[n]; - - if (handler->need_signal) { - handler->need_signal = false; - ovs_mutex_lock(&handler->mutex); - xpthread_cond_signal(&handler->wake_cond); - ovs_mutex_unlock(&handler->mutex); - } - } + union user_action_cookie cookie; + odp_port_t port; + uint32_t pid; + + cookie.type = USER_ACTION_COOKIE_SLOW_PATH; + cookie.slow_path.unused = 0; + cookie.slow_path.reason = xout->slow; + + port = xout->slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP | SLOW_STP) + ? ODPP_NONE + : odp_in_port; + pid = dpif_port_get_pid(udpif->dpif, port, flow_hash_5tuple(flow, 0)); + odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path, buf); } static struct flow_miss * @@ -583,21 +735,24 @@ flow_miss_find(struct hmap *todo, const struct ofproto_dpif *ofproto, return NULL; } -static void -handle_upcalls(struct udpif *udpif, struct list *upcalls) +/* Reads and classifies upcalls. Returns the number of upcalls successfully + * read. */ +static size_t +read_upcalls(struct handler *handler, + struct upcall upcalls[FLOW_MISS_MAX_BATCH], + struct flow_miss miss_buf[FLOW_MISS_MAX_BATCH], + struct hmap *misses) { - struct dpif_op *opsp[FLOW_MISS_MAX_BATCH]; - struct dpif_op ops[FLOW_MISS_MAX_BATCH]; - struct upcall *upcall, *next; - struct flow_miss_batch *fmb; - size_t n_misses, n_ops, i; - struct flow_miss *miss; - unsigned int reval_seq; - enum upcall_type type; - bool fail_open; + struct udpif *udpif = handler->udpif; + size_t i; + size_t n_misses = 0; + size_t n_upcalls = 0; - /* Extract the flow from each upcall. Construct in fmb->misses a hash - * table that maps each unique flow to a 'struct flow_miss'. + /* + * Try reading FLOW_MISS_MAX_BATCH upcalls from dpif. + * + * Extract the flow from each upcall. Construct in 'misses' a hash table + * that maps each unique flow to a 'struct flow_miss'. * * Most commonly there is a single packet per flow_miss, but there are * several reasons why there might be more than one, e.g.: @@ -615,30 +770,36 @@ handle_upcalls(struct udpif *udpif, struct list *upcalls) * other end of the connection, which gives OVS a chance to set up a * datapath flow.) */ - fmb = xmalloc(sizeof *fmb); - atomic_read(&udpif->reval_seq, &fmb->reval_seq); - hmap_init(&fmb->misses); - list_init(&fmb->upcalls); - n_misses = 0; - LIST_FOR_EACH_SAFE (upcall, next, list_node, upcalls) { - struct dpif_upcall *dupcall = &upcall->dpif_upcall; - struct ofpbuf *packet = dupcall->packet; - struct flow_miss *miss = &fmb->miss_buf[n_misses]; + for (i = 0; i < FLOW_MISS_MAX_BATCH; i++) { + struct upcall *upcall = &upcalls[n_upcalls]; + struct flow_miss *miss = &miss_buf[n_misses]; + struct dpif_upcall *dupcall; + struct ofpbuf *packet; struct flow_miss *existing_miss; struct ofproto_dpif *ofproto; struct dpif_sflow *sflow; struct dpif_ipfix *ipfix; - odp_port_t odp_in_port; struct flow flow; + enum upcall_type type; + odp_port_t odp_in_port; int error; + ofpbuf_use_stub(&upcall->upcall_buf, upcall->upcall_stub, + sizeof upcall->upcall_stub); + error = dpif_recv(udpif->dpif, handler->handler_id, + &upcall->dpif_upcall, &upcall->upcall_buf); + if (error) { + ofpbuf_uninit(&upcall->upcall_buf); + break; + } + + dupcall = &upcall->dpif_upcall; + packet = &dupcall->packet; error = xlate_receive(udpif->backer, packet, dupcall->key, - dupcall->key_len, &flow, &miss->key_fitness, - &ofproto, &odp_in_port); + dupcall->key_len, &flow, + &ofproto, &ipfix, &sflow, NULL, &odp_in_port); if (error) { if (error == ENODEV) { - struct drop_key *drop_key; - /* Received packet on datapath port for which we couldn't * associate an ofproto. This can happen if a port is removed * while traffic is being received. Print a rate-limited @@ -647,37 +808,24 @@ handle_upcalls(struct udpif *udpif, struct list *upcalls) * in the kernel. */ VLOG_INFO_RL(&rl, "received packet on unassociated datapath " "port %"PRIu32, odp_in_port); - - drop_key = xmalloc(sizeof *drop_key); - drop_key->key = xmemdup(dupcall->key, dupcall->key_len); - drop_key->key_len = dupcall->key_len; - - if (guarded_list_push_back(&udpif->drop_keys, - &drop_key->list_node, - MAX_QUEUE_LENGTH)) { - seq_change(udpif->wait_seq); - } else { - COVERAGE_INC(drop_queue_overflow); - drop_key_destroy(drop_key); - } + dpif_flow_put(udpif->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY, + dupcall->key, dupcall->key_len, NULL, 0, NULL, 0, + NULL); } - list_remove(&upcall->list_node); - upcall_destroy(upcall); - continue; + goto destroy_upcall; } type = classify_upcall(upcall); if (type == MISS_UPCALL) { uint32_t hash; + struct pkt_metadata md = pkt_metadata_from_flow(&flow); - flow_extract(packet, flow.skb_priority, flow.pkt_mark, - &flow.tunnel, &flow.in_port, &miss->flow); - + flow_extract(packet, &md, &miss->flow); hash = flow_hash(&miss->flow, 0); - existing_miss = flow_miss_find(&fmb->misses, ofproto, &miss->flow, + existing_miss = flow_miss_find(misses, ofproto, &miss->flow, hash); if (!existing_miss) { - hmap_insert(&fmb->misses, &miss->hmap_node, hash); + hmap_insert(misses, &miss->hmap_node, hash); miss->ofproto = ofproto; miss->key = dupcall->key; miss->key_len = dupcall->key_len; @@ -686,42 +834,39 @@ handle_upcalls(struct udpif *udpif, struct list *upcalls) miss->stats.n_bytes = 0; miss->stats.used = time_msec(); miss->stats.tcp_flags = 0; - + miss->odp_in_port = odp_in_port; + miss->put = false; n_misses++; } else { miss = existing_miss; } - miss->stats.tcp_flags |= packet_get_tcp_flags(packet, &miss->flow); - miss->stats.n_bytes += packet->size; + miss->stats.tcp_flags |= ntohs(miss->flow.tcp_flags); + miss->stats.n_bytes += ofpbuf_size(packet); miss->stats.n_packets++; upcall->flow_miss = miss; + n_upcalls++; continue; } switch (type) { case SFLOW_UPCALL: - sflow = xlate_get_sflow(ofproto); if (sflow) { union user_action_cookie cookie; memset(&cookie, 0, sizeof cookie); memcpy(&cookie, nl_attr_get(dupcall->userdata), sizeof cookie.sflow); - dpif_sflow_received(sflow, dupcall->packet, &flow, odp_in_port, + dpif_sflow_received(sflow, packet, &flow, odp_in_port, &cookie); - dpif_sflow_unref(sflow); } break; case IPFIX_UPCALL: - ipfix = xlate_get_ipfix(ofproto); if (ipfix) { - dpif_ipfix_bridge_sample(ipfix, dupcall->packet, &flow); - dpif_ipfix_unref(ipfix); + dpif_ipfix_bridge_sample(ipfix, packet, &flow); } break; case FLOW_SAMPLE_UPCALL: - ipfix = xlate_get_ipfix(ofproto); if (ipfix) { union user_action_cookie cookie; @@ -731,24 +876,45 @@ handle_upcalls(struct udpif *udpif, struct list *upcalls) /* The flow reflects exactly the contents of the packet. * Sample the packet using it. */ - dpif_ipfix_flow_sample(ipfix, dupcall->packet, &flow, + dpif_ipfix_flow_sample(ipfix, packet, &flow, cookie.flow_sample.collector_set_id, cookie.flow_sample.probability, cookie.flow_sample.obs_domain_id, cookie.flow_sample.obs_point_id); - dpif_ipfix_unref(ipfix); } break; case BAD_UPCALL: break; case MISS_UPCALL: - NOT_REACHED(); + OVS_NOT_REACHED(); } - list_remove(&upcall->list_node); - upcall_destroy(upcall); + dpif_ipfix_unref(ipfix); + dpif_sflow_unref(sflow); + +destroy_upcall: + ofpbuf_uninit(&upcall->dpif_upcall.packet); + ofpbuf_uninit(&upcall->upcall_buf); } + return n_upcalls; +} + +static void +handle_upcalls(struct handler *handler, struct hmap *misses, + struct upcall *upcalls, size_t n_upcalls) +{ + struct udpif *udpif = handler->udpif; + struct dpif_op *opsp[FLOW_MISS_MAX_BATCH * 2]; + struct dpif_op ops[FLOW_MISS_MAX_BATCH * 2]; + struct flow_miss *miss; + size_t n_ops, i; + unsigned int flow_limit; + bool fail_open, may_put; + + atomic_read(&udpif->flow_limit, &flow_limit); + may_put = udpif_get_n_flows(udpif) < flow_limit; + /* Initialize each 'struct flow_miss's ->xout. * * We do this per-flow_miss rather than per-packet because, most commonly, @@ -757,13 +923,21 @@ handle_upcalls(struct udpif *udpif, struct list *upcalls) * We can't do this in the previous loop because we need the TCP flags for * all the packets in each miss. */ fail_open = false; - HMAP_FOR_EACH (miss, hmap_node, &fmb->misses) { + HMAP_FOR_EACH (miss, hmap_node, misses) { struct xlate_in xin; xlate_in_init(&xin, miss->ofproto, &miss->flow, NULL, miss->stats.tcp_flags, NULL); xin.may_learn = true; - xin.resubmit_stats = &miss->stats; + + if (miss->upcall_type == DPIF_UC_MISS) { + xin.resubmit_stats = &miss->stats; + } else { + /* For non-miss upcalls, there's a flow in the datapath which this + * packet was accounted to. Presumably the revalidators will deal + * with pushing its stats eventually. */ + } + xlate_actions(&xin, &miss->xout); fail_open = fail_open || miss->xout.fail_open; } @@ -781,9 +955,16 @@ handle_upcalls(struct udpif *udpif, struct list *upcalls) * The loop fills 'ops' with an array of operations to execute in the * datapath. */ n_ops = 0; - LIST_FOR_EACH (upcall, list_node, upcalls) { + for (i = 0; i < n_upcalls; i++) { + struct upcall *upcall = &upcalls[i]; struct flow_miss *miss = upcall->flow_miss; - struct ofpbuf *packet = upcall->dpif_upcall.packet; + struct ofpbuf *packet = &upcall->dpif_upcall.packet; + struct dpif_op *op; + ovs_be16 flow_vlan_tci; + + /* Save a copy of flow.vlan_tci in case it is changed to + * generate proper mega flow masks for VLAN splinter flows. */ + flow_vlan_tci = miss->flow.vlan_tci; if (miss->xout.slow) { struct xlate_in xin; @@ -792,76 +973,709 @@ handle_upcalls(struct udpif *udpif, struct list *upcalls) xlate_actions_for_side_effects(&xin); } - if (miss->xout.odp_actions.size) { - struct dpif_op *op; - - if (miss->flow.in_port.ofp_port - != vsp_realdev_to_vlandev(miss->ofproto, - miss->flow.in_port.ofp_port, - miss->flow.vlan_tci)) { - /* This packet was received on a VLAN splinter port. We - * added a VLAN to the packet to make the packet resemble - * the flow, but the actions were composed assuming that - * the packet contained no VLAN. So, we must remove the - * VLAN header from the packet before trying to execute the - * actions. */ + if (miss->flow.in_port.ofp_port + != vsp_realdev_to_vlandev(miss->ofproto, + miss->flow.in_port.ofp_port, + miss->flow.vlan_tci)) { + /* This packet was received on a VLAN splinter port. We + * added a VLAN to the packet to make the packet resemble + * the flow, but the actions were composed assuming that + * the packet contained no VLAN. So, we must remove the + * VLAN header from the packet before trying to execute the + * actions. */ + if (ofpbuf_size(&miss->xout.odp_actions)) { eth_pop_vlan(packet); } + /* Remove the flow vlan tags inserted by vlan splinter logic + * to ensure megaflow masks generated match the data path flow. */ + miss->flow.vlan_tci = 0; + } + + /* Do not install a flow into the datapath if: + * + * - The datapath already has too many flows. + * + * - An earlier iteration of this loop already put the same flow. + * + * - We received this packet via some flow installed in the kernel + * already. */ + if (may_put + && !miss->put + && upcall->dpif_upcall.type == DPIF_UC_MISS) { + struct ofpbuf mask; + bool megaflow; + + miss->put = true; + + atomic_read(&enable_megaflows, &megaflow); + ofpbuf_use_stack(&mask, &miss->mask_buf, sizeof miss->mask_buf); + if (megaflow) { + size_t max_mpls; + + max_mpls = ofproto_dpif_get_max_mpls_depth(miss->ofproto); + odp_flow_key_from_mask(&mask, &miss->xout.wc.masks, + &miss->flow, UINT32_MAX, max_mpls); + } + + op = &ops[n_ops++]; + op->type = DPIF_OP_FLOW_PUT; + op->u.flow_put.flags = DPIF_FP_CREATE | DPIF_FP_MODIFY; + op->u.flow_put.key = miss->key; + op->u.flow_put.key_len = miss->key_len; + op->u.flow_put.mask = ofpbuf_data(&mask); + op->u.flow_put.mask_len = ofpbuf_size(&mask); + op->u.flow_put.stats = NULL; + + if (!miss->xout.slow) { + op->u.flow_put.actions = ofpbuf_data(&miss->xout.odp_actions); + op->u.flow_put.actions_len = ofpbuf_size(&miss->xout.odp_actions); + } else { + struct ofpbuf buf; + + ofpbuf_use_stack(&buf, miss->slow_path_buf, + sizeof miss->slow_path_buf); + compose_slow_path(udpif, &miss->xout, &miss->flow, + miss->odp_in_port, &buf); + op->u.flow_put.actions = ofpbuf_data(&buf); + op->u.flow_put.actions_len = ofpbuf_size(&buf); + } + } + + /* + * The 'miss' may be shared by multiple upcalls. Restore + * the saved flow vlan_tci field before processing the next + * upcall. */ + miss->flow.vlan_tci = flow_vlan_tci; + + if (ofpbuf_size(&miss->xout.odp_actions)) { + op = &ops[n_ops++]; op->type = DPIF_OP_EXECUTE; - op->u.execute.key = miss->key; - op->u.execute.key_len = miss->key_len; op->u.execute.packet = packet; - op->u.execute.actions = miss->xout.odp_actions.data; - op->u.execute.actions_len = miss->xout.odp_actions.size; + odp_key_to_pkt_metadata(miss->key, miss->key_len, + &op->u.execute.md); + op->u.execute.actions = ofpbuf_data(&miss->xout.odp_actions); + op->u.execute.actions_len = ofpbuf_size(&miss->xout.odp_actions); op->u.execute.needs_help = (miss->xout.slow & SLOW_ACTION) != 0; } } - /* Execute batch. */ - for (i = 0; i < n_ops; i++) { - opsp[i] = &ops[i]; - } - dpif_operate(udpif->dpif, opsp, n_ops); - /* Special case for fail-open mode. * * If we are in fail-open mode, but we are connected to a controller too, * then we should send the packet up to the controller in the hope that it * will try to set up a flow and thereby allow us to exit fail-open. * - * See the top-level comment in fail-open.c for more information. */ + * See the top-level comment in fail-open.c for more information. + * + * Copy packets before they are modified by execution. */ if (fail_open) { - LIST_FOR_EACH (upcall, list_node, upcalls) { + for (i = 0; i < n_upcalls; i++) { + struct upcall *upcall = &upcalls[i]; struct flow_miss *miss = upcall->flow_miss; - struct ofpbuf *packet = upcall->dpif_upcall.packet; + struct ofpbuf *packet = &upcall->dpif_upcall.packet; struct ofproto_packet_in *pin; pin = xmalloc(sizeof *pin); - pin->up.packet = xmemdup(packet->data, packet->size); - pin->up.packet_len = packet->size; + pin->up.packet = xmemdup(ofpbuf_data(packet), ofpbuf_size(packet)); + pin->up.packet_len = ofpbuf_size(packet); pin->up.reason = OFPR_NO_MATCH; pin->up.table_id = 0; pin->up.cookie = OVS_BE64_MAX; flow_get_metadata(&miss->flow, &pin->up.fmd); pin->send_len = 0; /* Not used for flow table misses. */ - pin->generated_by_table_miss = false; + pin->miss_type = OFPROTO_PACKET_IN_NO_MISS; ofproto_dpif_send_packet_in(miss->ofproto, pin); } } - list_move(&fmb->upcalls, upcalls); + /* Execute batch. */ + for (i = 0; i < n_ops; i++) { + opsp[i] = &ops[i]; + } + dpif_operate(udpif->dpif, opsp, n_ops); +} + +/* Must be called with udpif->ukeys[hash % udpif->n_revalidators].mutex. */ +static struct udpif_key * +ukey_lookup__(struct udpif *udpif, const struct nlattr *key, size_t key_len, + uint32_t hash) +{ + struct udpif_key *ukey; + struct hmap *hmap = &udpif->ukeys[hash % udpif->n_revalidators].hmap; + + HMAP_FOR_EACH_WITH_HASH (ukey, hmap_node, hash, hmap) { + if (ukey->key_len == key_len && !memcmp(ukey->key, key, key_len)) { + return ukey; + } + } + return NULL; +} + +static struct udpif_key * +ukey_lookup(struct udpif *udpif, const struct nlattr *key, size_t key_len, + uint32_t hash) +{ + struct udpif_key *ukey; + uint32_t idx = hash % udpif->n_revalidators; + + ovs_mutex_lock(&udpif->ukeys[idx].mutex); + ukey = ukey_lookup__(udpif, key, key_len, hash); + ovs_mutex_unlock(&udpif->ukeys[idx].mutex); + + return ukey; +} + +static struct udpif_key * +ukey_create(const struct nlattr *key, size_t key_len, long long int used) +{ + struct udpif_key *ukey = xmalloc(sizeof *ukey); + ovs_mutex_init(&ukey->mutex); + + ukey->key = (struct nlattr *) &ukey->key_buf; + memcpy(&ukey->key_buf, key, key_len); + ukey->key_len = key_len; + + ovs_mutex_lock(&ukey->mutex); + ukey->mark = false; + ukey->flow_exists = true; + ukey->created = used ? used : time_msec(); + memset(&ukey->stats, 0, sizeof ukey->stats); + ukey->xcache = NULL; + ovs_mutex_unlock(&ukey->mutex); + + return ukey; +} + +/* Checks for a ukey in 'udpif->ukeys' with the same 'ukey->key' and 'hash', + * and inserts 'ukey' if it does not exist. + * + * Returns true if 'ukey' was inserted into 'udpif->ukeys', false otherwise. */ +static bool +udpif_insert_ukey(struct udpif *udpif, struct udpif_key *ukey, uint32_t hash) +{ + struct udpif_key *duplicate; + uint32_t idx = hash % udpif->n_revalidators; + bool ok; + + ovs_mutex_lock(&udpif->ukeys[idx].mutex); + duplicate = ukey_lookup__(udpif, ukey->key, ukey->key_len, hash); + if (duplicate) { + ok = false; + } else { + hmap_insert(&udpif->ukeys[idx].hmap, &ukey->hmap_node, hash); + ok = true; + } + ovs_mutex_unlock(&udpif->ukeys[idx].mutex); + + return ok; +} + +static void +ukey_delete(struct revalidator *revalidator, struct udpif_key *ukey) + OVS_NO_THREAD_SAFETY_ANALYSIS +{ + if (revalidator) { + hmap_remove(revalidator->ukeys, &ukey->hmap_node); + } + xlate_cache_delete(ukey->xcache); + ovs_mutex_destroy(&ukey->mutex); + free(ukey); +} + +static bool +should_revalidate(uint64_t packets, long long int used) +{ + long long int metric, now, duration; + + /* Calculate the mean time between seeing these packets. If this + * exceeds the threshold, then delete the flow rather than performing + * costly revalidation for flows that aren't being hit frequently. + * + * This is targeted at situations where the dump_duration is high (~1s), + * and revalidation is triggered by a call to udpif_revalidate(). In + * these situations, revalidation of all flows causes fluctuations in the + * flow_limit due to the interaction with the dump_duration and max_idle. + * This tends to result in deletion of low-throughput flows anyway, so + * skip the revalidation and just delete those flows. */ + packets = MAX(packets, 1); + now = MAX(used, time_msec()); + duration = now - used; + metric = duration / packets; + + if (metric > 200) { + return false; + } + return true; +} + +static bool +revalidate_ukey(struct udpif *udpif, struct udpif_key *ukey, + const struct nlattr *mask, size_t mask_len, + const struct nlattr *actions, size_t actions_len, + const struct dpif_flow_stats *stats) +{ + uint64_t slow_path_buf[128 / 8]; + struct xlate_out xout, *xoutp; + struct netflow *netflow; + struct ofproto_dpif *ofproto; + struct dpif_flow_stats push; + struct ofpbuf xout_actions; + struct flow flow, dp_mask; + uint32_t *dp32, *xout32; + odp_port_t odp_in_port; + struct xlate_in xin; + long long int last_used; + int error; + size_t i; + bool may_learn, ok; + + ok = false; + xoutp = NULL; + netflow = NULL; + + ovs_mutex_lock(&ukey->mutex); + last_used = ukey->stats.used; + push.used = stats->used; + push.tcp_flags = stats->tcp_flags; + push.n_packets = stats->n_packets > ukey->stats.n_packets + ? stats->n_packets - ukey->stats.n_packets + : 0; + push.n_bytes = stats->n_bytes > ukey->stats.n_bytes + ? stats->n_bytes - ukey->stats.n_bytes + : 0; + ukey->stats = *stats; + + if (!ukey->flow_exists) { + /* Don't bother revalidating if the flow was already deleted. */ + goto exit; + } + + if (udpif->need_revalidate && last_used + && !should_revalidate(push.n_packets, last_used)) { + ok = false; + goto exit; + } + + if (!push.n_packets && !udpif->need_revalidate) { + ok = true; + goto exit; + } + + may_learn = push.n_packets > 0; + if (ukey->xcache && !udpif->need_revalidate) { + xlate_push_stats(ukey->xcache, may_learn, &push); + ok = true; + goto exit; + } + + error = xlate_receive(udpif->backer, NULL, ukey->key, ukey->key_len, &flow, + &ofproto, NULL, NULL, &netflow, &odp_in_port); + if (error) { + goto exit; + } + + if (udpif->need_revalidate) { + xlate_cache_clear(ukey->xcache); + } + if (!ukey->xcache) { + ukey->xcache = xlate_cache_new(); + } + + xlate_in_init(&xin, ofproto, &flow, NULL, push.tcp_flags, NULL); + xin.resubmit_stats = push.n_packets ? &push : NULL; + xin.xcache = ukey->xcache; + xin.may_learn = may_learn; + xin.skip_wildcards = !udpif->need_revalidate; + xlate_actions(&xin, &xout); + xoutp = &xout; + + if (!udpif->need_revalidate) { + ok = true; + goto exit; + } - atomic_read(&udpif->reval_seq, &reval_seq); - if (reval_seq != fmb->reval_seq) { - COVERAGE_INC(fmb_queue_revalidated); - flow_miss_batch_destroy(fmb); - } else if (!guarded_list_push_back(&udpif->fmbs, &fmb->list_node, - MAX_QUEUE_LENGTH)) { - COVERAGE_INC(fmb_queue_overflow); - flow_miss_batch_destroy(fmb); + if (!xout.slow) { + ofpbuf_use_const(&xout_actions, ofpbuf_data(&xout.odp_actions), + ofpbuf_size(&xout.odp_actions)); } else { - seq_change(udpif->wait_seq); + ofpbuf_use_stack(&xout_actions, slow_path_buf, sizeof slow_path_buf); + compose_slow_path(udpif, &xout, &flow, odp_in_port, &xout_actions); + } + + if (actions_len != ofpbuf_size(&xout_actions) + || memcmp(ofpbuf_data(&xout_actions), actions, actions_len)) { + goto exit; + } + + if (odp_flow_key_to_mask(mask, mask_len, &dp_mask, &flow) + == ODP_FIT_ERROR) { + goto exit; + } + + /* Since the kernel is free to ignore wildcarded bits in the mask, we can't + * directly check that the masks are the same. Instead we check that the + * mask in the kernel is more specific i.e. less wildcarded, than what + * we've calculated here. This guarantees we don't catch any packets we + * shouldn't with the megaflow. */ + dp32 = (uint32_t *) &dp_mask; + xout32 = (uint32_t *) &xout.wc.masks; + for (i = 0; i < FLOW_U32S; i++) { + if ((dp32[i] | xout32[i]) != dp32[i]) { + goto exit; + } + } + ok = true; + +exit: + ovs_mutex_unlock(&ukey->mutex); + if (netflow) { + if (!ok) { + netflow_expire(netflow, &flow); + netflow_flow_clear(netflow, &flow); + } + netflow_unref(netflow); + } + xlate_out_uninit(xoutp); + return ok; +} + +struct dump_op { + struct udpif_key *ukey; + struct dpif_flow_stats stats; /* Stats for 'op'. */ + struct dpif_op op; /* Flow del operation. */ +}; + +static void +dump_op_init(struct dump_op *op, const struct nlattr *key, size_t key_len, + struct udpif_key *ukey) +{ + op->ukey = ukey; + op->op.type = DPIF_OP_FLOW_DEL; + op->op.u.flow_del.key = key; + op->op.u.flow_del.key_len = key_len; + op->op.u.flow_del.stats = &op->stats; +} + +static void +push_dump_ops__(struct udpif *udpif, struct dump_op *ops, size_t n_ops) +{ + struct dpif_op *opsp[REVALIDATE_MAX_BATCH]; + size_t i; + + ovs_assert(n_ops <= REVALIDATE_MAX_BATCH); + for (i = 0; i < n_ops; i++) { + opsp[i] = &ops[i].op; + } + dpif_operate(udpif->dpif, opsp, n_ops); + + for (i = 0; i < n_ops; i++) { + struct dump_op *op = &ops[i]; + struct dpif_flow_stats *push, *stats, push_buf; + + stats = op->op.u.flow_del.stats; + if (op->ukey) { + push = &push_buf; + ovs_mutex_lock(&op->ukey->mutex); + push->used = MAX(stats->used, op->ukey->stats.used); + push->tcp_flags = stats->tcp_flags | op->ukey->stats.tcp_flags; + push->n_packets = stats->n_packets - op->ukey->stats.n_packets; + push->n_bytes = stats->n_bytes - op->ukey->stats.n_bytes; + ovs_mutex_unlock(&op->ukey->mutex); + } else { + push = stats; + } + + if (push->n_packets || netflow_exists()) { + struct ofproto_dpif *ofproto; + struct netflow *netflow; + struct flow flow; + bool may_learn; + + may_learn = push->n_packets > 0; + if (op->ukey) { + ovs_mutex_lock(&op->ukey->mutex); + if (op->ukey->xcache) { + xlate_push_stats(op->ukey->xcache, may_learn, push); + ovs_mutex_unlock(&op->ukey->mutex); + continue; + } + ovs_mutex_unlock(&op->ukey->mutex); + } + + if (!xlate_receive(udpif->backer, NULL, op->op.u.flow_del.key, + op->op.u.flow_del.key_len, &flow, &ofproto, + NULL, NULL, &netflow, NULL)) { + struct xlate_in xin; + + xlate_in_init(&xin, ofproto, &flow, NULL, push->tcp_flags, + NULL); + xin.resubmit_stats = push->n_packets ? push : NULL; + xin.may_learn = may_learn; + xin.skip_wildcards = true; + xlate_actions_for_side_effects(&xin); + + if (netflow) { + netflow_expire(netflow, &flow); + netflow_flow_clear(netflow, &flow); + netflow_unref(netflow); + } + } + } + } +} + +static void +push_dump_ops(struct revalidator *revalidator, + struct dump_op *ops, size_t n_ops) +{ + int i; + + push_dump_ops__(revalidator->udpif, ops, n_ops); + for (i = 0; i < n_ops; i++) { + ukey_delete(revalidator, ops[i].ukey); + } +} + +static void +revalidate(struct revalidator *revalidator) +{ + struct udpif *udpif = revalidator->udpif; + + struct dump_op ops[REVALIDATE_MAX_BATCH]; + const struct nlattr *key, *mask, *actions; + size_t key_len, mask_len, actions_len; + const struct dpif_flow_stats *stats; + long long int now; + unsigned int flow_limit; + size_t n_ops; + void *state; + + n_ops = 0; + now = time_msec(); + atomic_read(&udpif->flow_limit, &flow_limit); + + dpif_flow_dump_state_init(udpif->dpif, &state); + while (dpif_flow_dump_next(&udpif->dump, state, &key, &key_len, &mask, + &mask_len, &actions, &actions_len, &stats)) { + struct udpif_key *ukey; + bool mark, may_destroy; + long long int used, max_idle; + uint32_t hash; + size_t n_flows; + + hash = hash_bytes(key, key_len, udpif->secret); + ukey = ukey_lookup(udpif, key, key_len, hash); + + used = stats->used; + if (!used && ukey) { + ovs_mutex_lock(&ukey->mutex); + + if (ukey->mark || !ukey->flow_exists) { + /* The flow has already been dumped. This can occasionally + * occur if the datapath is changed in the middle of a flow + * dump. Rather than perform the same work twice, skip the + * flow this time. */ + ovs_mutex_unlock(&ukey->mutex); + COVERAGE_INC(upcall_duplicate_flow); + continue; + } + + used = ukey->created; + ovs_mutex_unlock(&ukey->mutex); + } + + n_flows = udpif_get_n_flows(udpif); + max_idle = ofproto_max_idle; + if (n_flows > flow_limit) { + max_idle = 100; + } + + if ((used && used < now - max_idle) || n_flows > flow_limit * 2) { + mark = false; + } else { + if (!ukey) { + ukey = ukey_create(key, key_len, used); + if (!udpif_insert_ukey(udpif, ukey, hash)) { + /* The same ukey has already been created. This means that + * another revalidator is processing this flow + * concurrently, so don't bother processing it. */ + ukey_delete(NULL, ukey); + continue; + } + } + + mark = revalidate_ukey(udpif, ukey, mask, mask_len, actions, + actions_len, stats); + } + + if (ukey) { + ovs_mutex_lock(&ukey->mutex); + ukey->mark = ukey->flow_exists = mark; + ovs_mutex_unlock(&ukey->mutex); + } + + if (!mark) { + dump_op_init(&ops[n_ops++], key, key_len, ukey); + } + + may_destroy = dpif_flow_dump_next_may_destroy_keys(&udpif->dump, + state); + + /* Only update 'now' immediately before 'buffer' will be updated. + * This gives us the current time relative to the time the datapath + * will write into 'stats'. */ + if (may_destroy) { + now = time_msec(); + } + + /* Only do a dpif_operate when we've hit our maximum batch, or when our + * memory is about to be clobbered by the next call to + * dpif_flow_dump_next(). */ + if (n_ops == REVALIDATE_MAX_BATCH || (n_ops && may_destroy)) { + push_dump_ops__(udpif, ops, n_ops); + n_ops = 0; + } + } + + if (n_ops) { + push_dump_ops__(udpif, ops, n_ops); + } + + dpif_flow_dump_state_uninit(udpif->dpif, state); +} + +static void +revalidator_sweep__(struct revalidator *revalidator, bool purge) + OVS_NO_THREAD_SAFETY_ANALYSIS +{ + struct dump_op ops[REVALIDATE_MAX_BATCH]; + struct udpif_key *ukey, *next; + size_t n_ops; + + n_ops = 0; + + /* During garbage collection, this revalidator completely owns its ukeys + * map, and therefore doesn't need to do any locking. */ + HMAP_FOR_EACH_SAFE (ukey, next, hmap_node, revalidator->ukeys) { + if (!purge && ukey->mark) { + ukey->mark = false; + } else if (!ukey->flow_exists) { + ukey_delete(revalidator, ukey); + } else { + struct dump_op *op = &ops[n_ops++]; + + /* If we have previously seen a flow in the datapath, but didn't + * see it during the most recent dump, delete it. This allows us + * to clean up the ukey and keep the statistics consistent. */ + dump_op_init(op, ukey->key, ukey->key_len, ukey); + if (n_ops == REVALIDATE_MAX_BATCH) { + push_dump_ops(revalidator, ops, n_ops); + n_ops = 0; + } + } + } + + if (n_ops) { + push_dump_ops(revalidator, ops, n_ops); + } +} + +static void +revalidator_sweep(struct revalidator *revalidator) +{ + revalidator_sweep__(revalidator, false); +} + +static void +revalidator_purge(struct revalidator *revalidator) +{ + revalidator_sweep__(revalidator, true); +} + +static void +upcall_unixctl_show(struct unixctl_conn *conn, int argc OVS_UNUSED, + const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED) +{ + struct ds ds = DS_EMPTY_INITIALIZER; + struct udpif *udpif; + + LIST_FOR_EACH (udpif, list_node, &all_udpifs) { + unsigned int flow_limit; + size_t i; + + atomic_read(&udpif->flow_limit, &flow_limit); + + ds_put_format(&ds, "%s:\n", dpif_name(udpif->dpif)); + ds_put_format(&ds, "\tflows : (current %"PRIu64")" + " (avg %u) (max %u) (limit %u)\n", udpif_get_n_flows(udpif), + udpif->avg_n_flows, udpif->max_n_flows, flow_limit); + ds_put_format(&ds, "\tdump duration : %lldms\n", udpif->dump_duration); + + ds_put_char(&ds, '\n'); + for (i = 0; i < n_revalidators; i++) { + struct revalidator *revalidator = &udpif->revalidators[i]; + + ovs_mutex_lock(&udpif->ukeys[i].mutex); + ds_put_format(&ds, "\t%s: (keys %"PRIuSIZE")\n", revalidator->name, + hmap_count(&udpif->ukeys[i].hmap)); + ovs_mutex_unlock(&udpif->ukeys[i].mutex); + } + } + + unixctl_command_reply(conn, ds_cstr(&ds)); + ds_destroy(&ds); +} + +/* Disable using the megaflows. + * + * This command is only needed for advanced debugging, so it's not + * documented in the man page. */ +static void +upcall_unixctl_disable_megaflows(struct unixctl_conn *conn, + int argc OVS_UNUSED, + const char *argv[] OVS_UNUSED, + void *aux OVS_UNUSED) +{ + atomic_store(&enable_megaflows, false); + udpif_flush_all_datapaths(); + unixctl_command_reply(conn, "megaflows disabled"); +} + +/* Re-enable using megaflows. + * + * This command is only needed for advanced debugging, so it's not + * documented in the man page. */ +static void +upcall_unixctl_enable_megaflows(struct unixctl_conn *conn, + int argc OVS_UNUSED, + const char *argv[] OVS_UNUSED, + void *aux OVS_UNUSED) +{ + atomic_store(&enable_megaflows, true); + udpif_flush_all_datapaths(); + unixctl_command_reply(conn, "megaflows enabled"); +} + +/* Set the flow limit. + * + * This command is only needed for advanced debugging, so it's not + * documented in the man page. */ +static void +upcall_unixctl_set_flow_limit(struct unixctl_conn *conn, + int argc OVS_UNUSED, + const char *argv[] OVS_UNUSED, + void *aux OVS_UNUSED) +{ + struct ds ds = DS_EMPTY_INITIALIZER; + struct udpif *udpif; + unsigned int flow_limit = atoi(argv[1]); + + LIST_FOR_EACH (udpif, list_node, &all_udpifs) { + atomic_store(&udpif->flow_limit, flow_limit); } + ds_put_format(&ds, "set flow_limit to %u\n", flow_limit); + unixctl_command_reply(conn, ds_cstr(&ds)); + ds_destroy(&ds); }