+static const char *
+port_open_type(const char *datapath_type, const char *port_type)
+{
+ return dpif_port_open_type(datapath_type, port_type);
+}
+
+/* Type functions. */
+
+static struct ofproto_dpif *
+lookup_ofproto_dpif_by_port_name(const char *name)
+{
+ struct ofproto_dpif *ofproto;
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ if (sset_contains(&ofproto->ports, name)) {
+ return ofproto;
+ }
+ }
+
+ return NULL;
+}
+
+static int
+type_run(const char *type)
+{
+ static long long int push_timer = LLONG_MIN;
+ struct dpif_backer *backer;
+ char *devname;
+ int error;
+
+ backer = shash_find_data(&all_dpif_backers, type);
+ if (!backer) {
+ /* This is not necessarily a problem, since backers are only
+ * created on demand. */
+ return 0;
+ }
+
+ dpif_run(backer->dpif);
+
+ /* The most natural place to push facet statistics is when they're pulled
+ * from the datapath. However, when there are many flows in the datapath,
+ * this expensive operation can occur so frequently, that it reduces our
+ * ability to quickly set up flows. To reduce the cost, we push statistics
+ * here instead. */
+ if (time_msec() > push_timer) {
+ push_timer = time_msec() + 2000;
+ push_all_stats();
+ }
+
+ if (backer->need_revalidate
+ || !tag_set_is_empty(&backer->revalidate_set)) {
+ struct tag_set revalidate_set = backer->revalidate_set;
+ bool need_revalidate = backer->need_revalidate;
+ struct ofproto_dpif *ofproto;
+ struct simap_node *node;
+ struct simap tmp_backers;
+
+ /* Handle tunnel garbage collection. */
+ simap_init(&tmp_backers);
+ simap_swap(&backer->tnl_backers, &tmp_backers);
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ struct ofport_dpif *iter;
+
+ if (backer != ofproto->backer) {
+ continue;
+ }
+
+ HMAP_FOR_EACH (iter, up.hmap_node, &ofproto->up.ports) {
+ const char *dp_port;
+
+ if (!iter->tnl_port) {
+ continue;
+ }
+
+ dp_port = netdev_vport_get_dpif_port(iter->up.netdev);
+ node = simap_find(&tmp_backers, dp_port);
+ if (node) {
+ simap_put(&backer->tnl_backers, dp_port, node->data);
+ simap_delete(&tmp_backers, node);
+ node = simap_find(&backer->tnl_backers, dp_port);
+ } else {
+ node = simap_find(&backer->tnl_backers, dp_port);
+ if (!node) {
+ uint32_t odp_port = UINT32_MAX;
+
+ if (!dpif_port_add(backer->dpif, iter->up.netdev,
+ &odp_port)) {
+ simap_put(&backer->tnl_backers, dp_port, odp_port);
+ node = simap_find(&backer->tnl_backers, dp_port);
+ }
+ }
+ }
+
+ iter->odp_port = node ? node->data : OVSP_NONE;
+ if (tnl_port_reconfigure(&iter->up, iter->odp_port,
+ &iter->tnl_port)) {
+ backer->need_revalidate = REV_RECONFIGURE;
+ }
+ }
+ }
+
+ SIMAP_FOR_EACH (node, &tmp_backers) {
+ dpif_port_del(backer->dpif, node->data);
+ }
+ simap_destroy(&tmp_backers);
+
+ switch (backer->need_revalidate) {
+ case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break;
+ case REV_STP: COVERAGE_INC(rev_stp); break;
+ case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break;
+ case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break;
+ case REV_INCONSISTENCY: COVERAGE_INC(rev_inconsistency); break;
+ }
+
+ if (backer->need_revalidate) {
+ /* Clear the drop_keys in case we should now be accepting some
+ * formerly dropped flows. */
+ drop_key_clear(backer);
+ }
+
+ /* Clear the revalidation flags. */
+ tag_set_init(&backer->revalidate_set);
+ backer->need_revalidate = 0;
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ struct facet *facet, *next;
+
+ if (ofproto->backer != backer) {
+ continue;
+ }
+
+ HMAP_FOR_EACH_SAFE (facet, next, hmap_node, &ofproto->facets) {
+ if (need_revalidate
+ || tag_set_intersects(&revalidate_set, facet->tags)) {
+ facet_revalidate(facet);
+ run_fast_rl();
+ }
+ }
+ }
+ }
+
+ if (timer_expired(&backer->next_expiration)) {
+ int delay = expire(backer);
+ timer_set_duration(&backer->next_expiration, delay);
+ }
+
+ /* Check for port changes in the dpif. */
+ while ((error = dpif_port_poll(backer->dpif, &devname)) == 0) {
+ struct ofproto_dpif *ofproto;
+ struct dpif_port port;
+
+ /* Don't report on the datapath's device. */
+ if (!strcmp(devname, dpif_base_name(backer->dpif))) {
+ goto next;
+ }
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
+ &all_ofproto_dpifs) {
+ if (simap_contains(&ofproto->backer->tnl_backers, devname)) {
+ goto next;
+ }
+ }
+
+ ofproto = lookup_ofproto_dpif_by_port_name(devname);
+ if (dpif_port_query_by_name(backer->dpif, devname, &port)) {
+ /* The port was removed. If we know the datapath,
+ * report it through poll_set(). If we don't, it may be
+ * notifying us of a removal we initiated, so ignore it.
+ * If there's a pending ENOBUFS, let it stand, since
+ * everything will be reevaluated. */
+ if (ofproto && ofproto->port_poll_errno != ENOBUFS) {
+ sset_add(&ofproto->port_poll_set, devname);
+ ofproto->port_poll_errno = 0;
+ }
+ } else if (!ofproto) {
+ /* The port was added, but we don't know with which
+ * ofproto we should associate it. Delete it. */
+ dpif_port_del(backer->dpif, port.port_no);
+ }
+ dpif_port_destroy(&port);
+
+ next:
+ free(devname);
+ }
+
+ if (error != EAGAIN) {
+ struct ofproto_dpif *ofproto;
+
+ /* There was some sort of error, so propagate it to all
+ * ofprotos that use this backer. */
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
+ &all_ofproto_dpifs) {
+ if (ofproto->backer == backer) {
+ sset_clear(&ofproto->port_poll_set);
+ ofproto->port_poll_errno = error;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+dpif_backer_run_fast(struct dpif_backer *backer, int max_batch)
+{
+ unsigned int work;
+
+ /* Handle one or more batches of upcalls, until there's nothing left to do
+ * or until we do a fixed total amount of work.
+ *
+ * We do work in batches because it can be much cheaper to set up a number
+ * of flows and fire off their patches all at once. We do multiple batches
+ * because in some cases handling a packet can cause another packet to be
+ * queued almost immediately as part of the return flow. Both
+ * optimizations can make major improvements on some benchmarks and
+ * presumably for real traffic as well. */
+ work = 0;
+ while (work < max_batch) {
+ int retval = handle_upcalls(backer, max_batch - work);
+ if (retval <= 0) {
+ return -retval;
+ }
+ work += retval;
+ }
+
+ return 0;
+}
+
+static int
+type_run_fast(const char *type)
+{
+ struct dpif_backer *backer;
+
+ backer = shash_find_data(&all_dpif_backers, type);
+ if (!backer) {
+ /* This is not necessarily a problem, since backers are only
+ * created on demand. */
+ return 0;
+ }
+
+ return dpif_backer_run_fast(backer, FLOW_MISS_MAX_BATCH);
+}
+
+static void
+run_fast_rl(void)
+{
+ static long long int port_rl = LLONG_MIN;
+ static unsigned int backer_rl = 0;
+
+ if (time_msec() >= port_rl) {
+ struct ofproto_dpif *ofproto;
+ struct ofport_dpif *ofport;
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+
+ HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
+ port_run_fast(ofport);
+ }
+ }
+ port_rl = time_msec() + 200;
+ }
+
+ /* XXX: We have to be careful not to do too much work in this function. If
+ * we call dpif_backer_run_fast() too often, or with too large a batch,
+ * performance improves signifcantly, but at a cost. It's possible for the
+ * number of flows in the datapath to increase without bound, and for poll
+ * loops to take 10s of seconds. The correct solution to this problem,
+ * long term, is to separate flow miss handling into it's own thread so it
+ * isn't affected by revalidations, and expirations. Until then, this is
+ * the best we can do. */
+ if (++backer_rl >= 10) {
+ struct shash_node *node;
+
+ backer_rl = 0;
+ SHASH_FOR_EACH (node, &all_dpif_backers) {
+ dpif_backer_run_fast(node->data, 1);
+ }
+ }
+}
+
+static void
+type_wait(const char *type)
+{
+ struct dpif_backer *backer;
+
+ backer = shash_find_data(&all_dpif_backers, type);
+ if (!backer) {
+ /* This is not necessarily a problem, since backers are only
+ * created on demand. */
+ return;
+ }
+
+ timer_wait(&backer->next_expiration);
+}
+\f