/*
- * Copyright (c) 2009, 2010, 2011, 2012 Nicira, Inc.
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <errno.h>
-#include "autopath.h"
#include "bond.h"
#include "bundle.h"
#include "byte-order.h"
#include "mac-learning.h"
#include "meta-flow.h"
#include "multipath.h"
+#include "netdev-vport.h"
#include "netdev.h"
#include "netlink.h"
#include "nx-match.h"
#include "ofproto-dpif-sflow.h"
#include "poll-loop.h"
#include "simap.h"
+#include "smap.h"
#include "timer.h"
+#include "tunnel.h"
#include "unaligned.h"
#include "unixctl.h"
#include "vlan-bitmap.h"
struct ofport_dpif;
struct ofproto_dpif;
+struct flow_miss;
+struct facet;
struct rule_dpif {
struct rule up;
static void rule_credit_stats(struct rule_dpif *,
const struct dpif_flow_stats *);
-static void flow_push_stats(struct rule_dpif *, const struct flow *,
- const struct dpif_flow_stats *);
+static void flow_push_stats(struct facet *, const struct dpif_flow_stats *);
static tag_type rule_calculate_tag(const struct flow *,
const struct minimask *, uint32_t basis);
static void rule_invalidate(const struct rule_dpif *);
* this flow when actions change header fields. */
struct flow flow;
+ /* stack for the push and pop actions.
+ * Each stack element is of the type "union mf_subvalue". */
+ struct ofpbuf stack;
+ union mf_subvalue init_stack[1024 / sizeof(union mf_subvalue)];
+
/* The packet corresponding to 'flow', or a null pointer if we are
* revalidating without a packet to refer to. */
const struct ofpbuf *packet;
uint32_t orig_skb_priority; /* Priority when packet arrived. */
uint8_t table_id; /* OpenFlow table ID where flow was found. */
uint32_t sflow_n_outputs; /* Number of output ports. */
- uint16_t sflow_odp_port; /* Output port for composing sFlow action. */
+ uint32_t sflow_odp_port; /* Output port for composing sFlow action. */
uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
bool exit; /* No further actions should be processed. */
- struct flow orig_flow; /* Copy of original flow. */
+};
+
+/* Initial values of fields of the packet that may be changed during
+ * flow processing and needed later. */
+struct initial_vals {
+ /* This is the value of vlan_tci in the packet as actually received from
+ * dpif. This is the same as the facet's flow.vlan_tci unless the packet
+ * was received via a VLAN splinter. In that case, this value is 0
+ * (because the packet as actually received from the dpif had no 802.1Q
+ * tag) but the facet's flow.vlan_tci is set to the VLAN that the splinter
+ * represents.
+ *
+ * This member should be removed when the VLAN splinters feature is no
+ * longer needed. */
+ ovs_be16 vlan_tci;
+
+ /* If received on a tunnel, the IP TOS value of the tunnel. */
+ uint8_t tunnel_ip_tos;
};
static void action_xlate_ctx_init(struct action_xlate_ctx *,
struct ofproto_dpif *, const struct flow *,
- ovs_be16 initial_tci, struct rule_dpif *,
+ const struct initial_vals *initial_vals,
+ struct rule_dpif *,
uint8_t tcp_flags, const struct ofpbuf *);
static void xlate_actions(struct action_xlate_ctx *,
const struct ofpact *ofpacts, size_t ofpacts_len,
static void xlate_actions_for_side_effects(struct action_xlate_ctx *,
const struct ofpact *ofpacts,
size_t ofpacts_len);
+static void xlate_table_action(struct action_xlate_ctx *, uint16_t in_port,
+ uint8_t table_id, bool may_packet_in);
static size_t put_userspace_action(const struct ofproto_dpif *,
struct ofpbuf *odp_actions,
struct list list_node; /* In struct facet's 'facets' list. */
struct facet *facet; /* Owning facet. */
- /* Key.
- *
- * To save memory in the common case, 'key' is NULL if 'key_fitness' is
- * ODP_FIT_PERFECT, that is, odp_flow_key_from_flow() can accurately
- * regenerate the ODP flow key from ->facet->flow. */
enum odp_key_fitness key_fitness;
struct nlattr *key;
int key_len;
long long int used; /* Time last used; time created if not used. */
+ long long int created; /* Time created. */
uint64_t dp_packet_count; /* Last known packet count in the datapath. */
uint64_t dp_byte_count; /* Last known byte count in the datapath. */
enum slow_path_reason slow; /* 0 if fast path may be used. */
enum subfacet_path path; /* Installed in datapath? */
- /* This value is normally the same as ->facet->flow.vlan_tci. Only VLAN
- * splinters can cause it to differ. This value should be removed when
- * the VLAN splinters feature is no longer needed. */
- ovs_be16 initial_tci; /* Initial VLAN TCI value. */
+ /* Initial values of the packet that may be needed later. */
+ struct initial_vals initial_vals;
+
+ /* Datapath port the packet arrived on. This is needed to remove
+ * flows for ports that are no longer part of the bridge. Since the
+ * flow definition only has the OpenFlow port number and the port is
+ * no longer part of the bridge, we can't determine the datapath port
+ * number needed to delete the flow from the datapath. */
+ uint32_t odp_in_port;
};
-static struct subfacet *subfacet_create(struct facet *, enum odp_key_fitness,
- const struct nlattr *key,
- size_t key_len, ovs_be16 initial_tci,
+#define SUBFACET_DESTROY_MAX_BATCH 50
+
+static struct subfacet *subfacet_create(struct facet *, struct flow_miss *miss,
long long int now);
static struct subfacet *subfacet_find(struct ofproto_dpif *,
- const struct nlattr *key, size_t key_len);
+ const struct nlattr *key, size_t key_len,
+ uint32_t key_hash);
static void subfacet_destroy(struct subfacet *);
static void subfacet_destroy__(struct subfacet *);
-static void subfacet_get_key(struct subfacet *, struct odputil_keybuf *,
- struct ofpbuf *key);
+static void subfacet_destroy_batch(struct ofproto_dpif *,
+ struct subfacet **, int n);
static void subfacet_reset_dp_stats(struct subfacet *,
struct dpif_flow_stats *);
static void subfacet_update_time(struct subfacet *, long long int used);
/* Storage for a single subfacet, to reduce malloc() time and space
* overhead. (A facet always has at least one subfacet and in the common
- * case has exactly one subfacet.) */
+ * case has exactly one subfacet. However, 'one_subfacet' may not
+ * always be valid, since it could have been removed after newer
+ * subfacets were pushed onto the 'subfacets' list.) */
struct subfacet one_subfacet;
+
+ long long int learn_rl; /* Rate limiter for facet_learn(). */
};
static struct facet *facet_create(struct rule_dpif *,
static void facet_push_stats(struct facet *);
static void facet_learn(struct facet *);
static void facet_account(struct facet *);
+static void push_all_stats(void);
+
+static struct subfacet *facet_get_subfacet(struct facet *);
static bool facet_is_controller_flow(struct facet *);
struct ofport_dpif {
+ struct hmap_node odp_port_node; /* In dpif_backer's "odp_to_ofport_map". */
struct ofport up;
uint32_t odp_port;
struct list bundle_node; /* In struct ofbundle's "ports" list. */
struct cfm *cfm; /* Connectivity Fault Management, if any. */
tag_type tag; /* Tag associated with this port. */
- uint32_t bond_stable_id; /* stable_id to use as bond slave, or 0. */
bool may_enable; /* May be enabled in bonds. */
long long int carrier_seq; /* Carrier status changes. */
+ struct tnl_port *tnl_port; /* Tunnel handle, or null. */
/* Spanning tree. */
struct stp_port *stp_port; /* Spanning Tree Protocol, if any. */
static void vsp_remove(struct ofport_dpif *);
static void vsp_add(struct ofport_dpif *, uint16_t realdev_ofp_port, int vid);
+static uint32_t ofp_port_to_odp_port(const struct ofproto_dpif *,
+ uint16_t ofp_port);
+static uint16_t odp_port_to_ofp_port(const struct ofproto_dpif *,
+ uint32_t odp_port);
+
static struct ofport_dpif *
ofport_dpif_cast(const struct ofport *ofport)
{
- assert(ofport->ofproto->ofproto_class == &ofproto_dpif_class);
+ ovs_assert(ofport->ofproto->ofproto_class == &ofproto_dpif_class);
return ofport ? CONTAINER_OF(ofport, struct ofport_dpif, up) : NULL;
}
static void port_wait(struct ofport_dpif *);
static int set_cfm(struct ofport *, const struct cfm_settings *);
static void ofport_clear_priorities(struct ofport_dpif *);
+static void run_fast_rl(void);
struct dpif_completion {
struct list list_node;
COVERAGE_DEFINE(rev_flow_table);
COVERAGE_DEFINE(rev_inconsistency);
+/* Drop keys are odp flow keys which have drop flows installed in the kernel.
+ * These are datapath flows which have no associated ofproto, if they did we
+ * would use facets. */
+struct drop_key {
+ struct hmap_node hmap_node;
+ struct nlattr *key;
+ size_t key_len;
+};
+
+/* All datapaths of a given type share a single dpif backer instance. */
+struct dpif_backer {
+ char *type;
+ int refcount;
+ struct dpif *dpif;
+ struct timer next_expiration;
+ struct hmap odp_to_ofport_map; /* ODP port to ofport mapping. */
+
+ struct simap tnl_backers; /* Set of dpif ports backing tunnels. */
+
+ /* Facet revalidation flags applying to facets which use this backer. */
+ enum revalidate_reason need_revalidate; /* Revalidate every facet. */
+ struct tag_set revalidate_set; /* Revalidate only matching facets. */
+
+ struct hmap drop_keys; /* Set of dropped odp keys. */
+};
+
+/* All existing ofproto_backer instances, indexed by ofproto->up.type. */
+static struct shash all_dpif_backers = SHASH_INITIALIZER(&all_dpif_backers);
+
+static void drop_key_clear(struct dpif_backer *);
+static struct ofport_dpif *
+odp_port_to_ofport(const struct dpif_backer *, uint32_t odp_port);
+
+static void dpif_stats_update_hit_count(struct ofproto_dpif *ofproto,
+ uint64_t delta);
+struct avg_subfacet_rates {
+ double add_rate; /* Moving average of new flows created per minute. */
+ double del_rate; /* Moving average of flows deleted per minute. */
+};
+static void show_dp_rates(struct ds *ds, const char *heading,
+ const struct avg_subfacet_rates *rates);
+static void exp_mavg(double *avg, int base, double new);
+
struct ofproto_dpif {
struct hmap_node all_ofproto_dpifs_node; /* In 'all_ofproto_dpifs'. */
struct ofproto up;
- struct dpif *dpif;
+ struct dpif_backer *backer;
/* Special OpenFlow rules. */
struct rule_dpif *miss_rule; /* Sends flow table misses to controller. */
bool has_mirrors;
bool has_bonded_bundles;
- /* Expiration. */
- struct timer next_expiration;
-
/* Facets. */
struct hmap facets;
struct hmap subfacets;
struct governor *governor;
+ long long int consistency_rl;
/* Revalidation. */
struct table_dpif tables[N_TABLES];
- enum revalidate_reason need_revalidate;
- struct tag_set revalidate_set;
/* Support for debugging async flow mods. */
struct list completions;
/* VLAN splinters. */
struct hmap realdev_vid_map; /* (realdev,vid) -> vlandev. */
struct hmap vlandev_map; /* vlandev -> (realdev,vid). */
+
+ /* Ports. */
+ struct sset ports; /* Set of standard port names. */
+ struct sset ghost_ports; /* Ports with no datapath port. */
+ struct sset port_poll_set; /* Queued names for port_poll() reply. */
+ int port_poll_errno; /* Last errno for port_poll() reply. */
+
+ /* Per ofproto's dpif stats. */
+ uint64_t n_hit;
+ uint64_t n_missed;
+
+ /* Subfacet statistics.
+ *
+ * These keep track of the total number of subfacets added and deleted and
+ * flow life span. They are useful for computing the flow rates stats
+ * exposed via "ovs-appctl dpif/show". The goal is to learn about
+ * traffic patterns in ways that we can use later to improve Open vSwitch
+ * performance in new situations. */
+ long long int created; /* Time when it is created. */
+ unsigned int max_n_subfacet; /* Maximum number of flows */
+
+ /* The average number of subfacets... */
+ struct avg_subfacet_rates hourly; /* ...over the last hour. */
+ struct avg_subfacet_rates daily; /* ...over the last day. */
+ long long int last_minute; /* Last time 'hourly' was updated. */
+
+ /* Number of subfacets added or deleted since 'last_minute'. */
+ unsigned int subfacet_add_count;
+ unsigned int subfacet_del_count;
+
+ /* Number of subfacets added or deleted from 'created' to 'last_minute.' */
+ unsigned long long int total_subfacet_add_count;
+ unsigned long long int total_subfacet_del_count;
+
+ /* Sum of the number of milliseconds that each subfacet existed,
+ * over the subfacets that have been added and then later deleted. */
+ unsigned long long int total_subfacet_life_span;
+
+ /* Incremented by the number of currently existing subfacets, each
+ * time we pull statistics from the kernel. */
+ unsigned long long int total_subfacet_count;
+
+ /* Number of times we pull statistics from the kernel. */
+ unsigned long long int n_update_stats;
};
+static unsigned long long int avg_subfacet_life_span(
+ const struct ofproto_dpif *);
+static double avg_subfacet_count(const struct ofproto_dpif *ofproto);
+static void update_moving_averages(struct ofproto_dpif *ofproto);
+static void dpif_stats_update_hit_count(struct ofproto_dpif *ofproto,
+ uint64_t delta);
+static void update_max_subfacet_count(struct ofproto_dpif *ofproto);
/* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
* for debugging the asynchronous flow_mod implementation.) */
static struct ofproto_dpif *
ofproto_dpif_cast(const struct ofproto *ofproto)
{
- assert(ofproto->ofproto_class == &ofproto_dpif_class);
+ ovs_assert(ofproto->ofproto_class == &ofproto_dpif_class);
return CONTAINER_OF(ofproto, struct ofproto_dpif, up);
}
static struct ofport_dpif *get_odp_port(const struct ofproto_dpif *,
uint32_t odp_port);
static void ofproto_trace(struct ofproto_dpif *, const struct flow *,
- const struct ofpbuf *, ovs_be16 initial_tci,
- struct ds *);
+ const struct ofpbuf *,
+ const struct initial_vals *, struct ds *);
/* Packet processing. */
static void update_learning_table(struct ofproto_dpif *,
struct ofbundle *);
/* Upcalls. */
#define FLOW_MISS_MAX_BATCH 50
-static int handle_upcalls(struct ofproto_dpif *, unsigned int max_batch);
+static int handle_upcalls(struct dpif_backer *, unsigned int max_batch);
/* Flow expiration. */
-static int expire(struct ofproto_dpif *);
+static int expire(struct dpif_backer *);
/* NetFlow. */
static void send_netflow_active_timeouts(struct ofproto_dpif *);
const struct flow *flow);
/* Global variables. */
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+
+/* Initial mappings of port to bridge mappings. */
+static struct shash init_ofp_ports = SHASH_INITIALIZER(&init_ofp_ports);
\f
/* Factory functions. */
+static void
+init(const struct shash *iface_hints)
+{
+ struct shash_node *node;
+
+ /* Make a local copy, since we don't own 'iface_hints' elements. */
+ SHASH_FOR_EACH(node, iface_hints) {
+ const struct iface_hint *orig_hint = node->data;
+ struct iface_hint *new_hint = xmalloc(sizeof *new_hint);
+
+ new_hint->br_name = xstrdup(orig_hint->br_name);
+ new_hint->br_type = xstrdup(orig_hint->br_type);
+ new_hint->ofp_port = orig_hint->ofp_port;
+
+ shash_add(&init_ofp_ports, node->name, new_hint);
+ }
+}
+
static void
enumerate_types(struct sset *types)
{
static int
enumerate_names(const char *type, struct sset *names)
{
- return dp_enumerate_names(type, names);
+ struct ofproto_dpif *ofproto;
+
+ sset_clear(names);
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ if (strcmp(type, ofproto->up.type)) {
+ continue;
+ }
+ sset_add(names, ofproto->up.name);
+ }
+
+ return 0;
}
static int
return error;
}
\f
+static const char *
+port_open_type(const char *datapath_type, const char *port_type)
+{
+ return dpif_port_open_type(datapath_type, port_type);
+}
+
+/* Type functions. */
+
+static struct ofproto_dpif *
+lookup_ofproto_dpif_by_port_name(const char *name)
+{
+ struct ofproto_dpif *ofproto;
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ if (sset_contains(&ofproto->ports, name)) {
+ return ofproto;
+ }
+ }
+
+ return NULL;
+}
+
+static int
+type_run(const char *type)
+{
+ static long long int push_timer = LLONG_MIN;
+ struct dpif_backer *backer;
+ char *devname;
+ int error;
+
+ backer = shash_find_data(&all_dpif_backers, type);
+ if (!backer) {
+ /* This is not necessarily a problem, since backers are only
+ * created on demand. */
+ return 0;
+ }
+
+ dpif_run(backer->dpif);
+
+ /* The most natural place to push facet statistics is when they're pulled
+ * from the datapath. However, when there are many flows in the datapath,
+ * this expensive operation can occur so frequently, that it reduces our
+ * ability to quickly set up flows. To reduce the cost, we push statistics
+ * here instead. */
+ if (time_msec() > push_timer) {
+ push_timer = time_msec() + 2000;
+ push_all_stats();
+ }
+
+ if (backer->need_revalidate
+ || !tag_set_is_empty(&backer->revalidate_set)) {
+ struct tag_set revalidate_set = backer->revalidate_set;
+ bool need_revalidate = backer->need_revalidate;
+ struct ofproto_dpif *ofproto;
+ struct simap_node *node;
+ struct simap tmp_backers;
+
+ /* Handle tunnel garbage collection. */
+ simap_init(&tmp_backers);
+ simap_swap(&backer->tnl_backers, &tmp_backers);
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ struct ofport_dpif *iter;
+
+ if (backer != ofproto->backer) {
+ continue;
+ }
+
+ HMAP_FOR_EACH (iter, up.hmap_node, &ofproto->up.ports) {
+ const char *dp_port;
+
+ if (!iter->tnl_port) {
+ continue;
+ }
+
+ dp_port = netdev_vport_get_dpif_port(iter->up.netdev);
+ node = simap_find(&tmp_backers, dp_port);
+ if (node) {
+ simap_put(&backer->tnl_backers, dp_port, node->data);
+ simap_delete(&tmp_backers, node);
+ node = simap_find(&backer->tnl_backers, dp_port);
+ } else {
+ node = simap_find(&backer->tnl_backers, dp_port);
+ if (!node) {
+ uint32_t odp_port = UINT32_MAX;
+
+ if (!dpif_port_add(backer->dpif, iter->up.netdev,
+ &odp_port)) {
+ simap_put(&backer->tnl_backers, dp_port, odp_port);
+ node = simap_find(&backer->tnl_backers, dp_port);
+ }
+ }
+ }
+
+ iter->odp_port = node ? node->data : OVSP_NONE;
+ if (tnl_port_reconfigure(&iter->up, iter->odp_port,
+ &iter->tnl_port)) {
+ backer->need_revalidate = REV_RECONFIGURE;
+ }
+ }
+ }
+
+ SIMAP_FOR_EACH (node, &tmp_backers) {
+ dpif_port_del(backer->dpif, node->data);
+ }
+ simap_destroy(&tmp_backers);
+
+ switch (backer->need_revalidate) {
+ case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break;
+ case REV_STP: COVERAGE_INC(rev_stp); break;
+ case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break;
+ case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break;
+ case REV_INCONSISTENCY: COVERAGE_INC(rev_inconsistency); break;
+ }
+
+ if (backer->need_revalidate) {
+ /* Clear the drop_keys in case we should now be accepting some
+ * formerly dropped flows. */
+ drop_key_clear(backer);
+ }
+
+ /* Clear the revalidation flags. */
+ tag_set_init(&backer->revalidate_set);
+ backer->need_revalidate = 0;
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ struct facet *facet, *next;
+
+ if (ofproto->backer != backer) {
+ continue;
+ }
+
+ HMAP_FOR_EACH_SAFE (facet, next, hmap_node, &ofproto->facets) {
+ if (need_revalidate
+ || tag_set_intersects(&revalidate_set, facet->tags)) {
+ facet_revalidate(facet);
+ run_fast_rl();
+ }
+ }
+ }
+ }
+
+ if (timer_expired(&backer->next_expiration)) {
+ int delay = expire(backer);
+ timer_set_duration(&backer->next_expiration, delay);
+ }
+
+ /* Check for port changes in the dpif. */
+ while ((error = dpif_port_poll(backer->dpif, &devname)) == 0) {
+ struct ofproto_dpif *ofproto;
+ struct dpif_port port;
+
+ /* Don't report on the datapath's device. */
+ if (!strcmp(devname, dpif_base_name(backer->dpif))) {
+ goto next;
+ }
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
+ &all_ofproto_dpifs) {
+ if (simap_contains(&ofproto->backer->tnl_backers, devname)) {
+ goto next;
+ }
+ }
+
+ ofproto = lookup_ofproto_dpif_by_port_name(devname);
+ if (dpif_port_query_by_name(backer->dpif, devname, &port)) {
+ /* The port was removed. If we know the datapath,
+ * report it through poll_set(). If we don't, it may be
+ * notifying us of a removal we initiated, so ignore it.
+ * If there's a pending ENOBUFS, let it stand, since
+ * everything will be reevaluated. */
+ if (ofproto && ofproto->port_poll_errno != ENOBUFS) {
+ sset_add(&ofproto->port_poll_set, devname);
+ ofproto->port_poll_errno = 0;
+ }
+ } else if (!ofproto) {
+ /* The port was added, but we don't know with which
+ * ofproto we should associate it. Delete it. */
+ dpif_port_del(backer->dpif, port.port_no);
+ }
+ dpif_port_destroy(&port);
+
+ next:
+ free(devname);
+ }
+
+ if (error != EAGAIN) {
+ struct ofproto_dpif *ofproto;
+
+ /* There was some sort of error, so propagate it to all
+ * ofprotos that use this backer. */
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
+ &all_ofproto_dpifs) {
+ if (ofproto->backer == backer) {
+ sset_clear(&ofproto->port_poll_set);
+ ofproto->port_poll_errno = error;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+dpif_backer_run_fast(struct dpif_backer *backer, int max_batch)
+{
+ unsigned int work;
+
+ /* Handle one or more batches of upcalls, until there's nothing left to do
+ * or until we do a fixed total amount of work.
+ *
+ * We do work in batches because it can be much cheaper to set up a number
+ * of flows and fire off their patches all at once. We do multiple batches
+ * because in some cases handling a packet can cause another packet to be
+ * queued almost immediately as part of the return flow. Both
+ * optimizations can make major improvements on some benchmarks and
+ * presumably for real traffic as well. */
+ work = 0;
+ while (work < max_batch) {
+ int retval = handle_upcalls(backer, max_batch - work);
+ if (retval <= 0) {
+ return -retval;
+ }
+ work += retval;
+ }
+
+ return 0;
+}
+
+static int
+type_run_fast(const char *type)
+{
+ struct dpif_backer *backer;
+
+ backer = shash_find_data(&all_dpif_backers, type);
+ if (!backer) {
+ /* This is not necessarily a problem, since backers are only
+ * created on demand. */
+ return 0;
+ }
+
+ return dpif_backer_run_fast(backer, FLOW_MISS_MAX_BATCH);
+}
+
+static void
+run_fast_rl(void)
+{
+ static long long int port_rl = LLONG_MIN;
+ static unsigned int backer_rl = 0;
+
+ if (time_msec() >= port_rl) {
+ struct ofproto_dpif *ofproto;
+ struct ofport_dpif *ofport;
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+
+ HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
+ port_run_fast(ofport);
+ }
+ }
+ port_rl = time_msec() + 200;
+ }
+
+ /* XXX: We have to be careful not to do too much work in this function. If
+ * we call dpif_backer_run_fast() too often, or with too large a batch,
+ * performance improves signifcantly, but at a cost. It's possible for the
+ * number of flows in the datapath to increase without bound, and for poll
+ * loops to take 10s of seconds. The correct solution to this problem,
+ * long term, is to separate flow miss handling into it's own thread so it
+ * isn't affected by revalidations, and expirations. Until then, this is
+ * the best we can do. */
+ if (++backer_rl >= 10) {
+ struct shash_node *node;
+
+ backer_rl = 0;
+ SHASH_FOR_EACH (node, &all_dpif_backers) {
+ dpif_backer_run_fast(node->data, 1);
+ }
+ }
+}
+
+static void
+type_wait(const char *type)
+{
+ struct dpif_backer *backer;
+
+ backer = shash_find_data(&all_dpif_backers, type);
+ if (!backer) {
+ /* This is not necessarily a problem, since backers are only
+ * created on demand. */
+ return;
+ }
+
+ timer_wait(&backer->next_expiration);
+}
+\f
/* Basic life-cycle. */
static int add_internal_flows(struct ofproto_dpif *);
free(ofproto);
}
+static void
+close_dpif_backer(struct dpif_backer *backer)
+{
+ struct shash_node *node;
+
+ ovs_assert(backer->refcount > 0);
+
+ if (--backer->refcount) {
+ return;
+ }
+
+ drop_key_clear(backer);
+ hmap_destroy(&backer->drop_keys);
+
+ simap_destroy(&backer->tnl_backers);
+ hmap_destroy(&backer->odp_to_ofport_map);
+ node = shash_find(&all_dpif_backers, backer->type);
+ free(backer->type);
+ shash_delete(&all_dpif_backers, node);
+ dpif_close(backer->dpif);
+
+ free(backer);
+}
+
+/* Datapath port slated for removal from datapath. */
+struct odp_garbage {
+ struct list list_node;
+ uint32_t odp_port;
+};
+
+static int
+open_dpif_backer(const char *type, struct dpif_backer **backerp)
+{
+ struct dpif_backer *backer;
+ struct dpif_port_dump port_dump;
+ struct dpif_port port;
+ struct shash_node *node;
+ struct list garbage_list;
+ struct odp_garbage *garbage, *next;
+ struct sset names;
+ char *backer_name;
+ const char *name;
+ int error;
+
+ backer = shash_find_data(&all_dpif_backers, type);
+ if (backer) {
+ backer->refcount++;
+ *backerp = backer;
+ return 0;
+ }
+
+ backer_name = xasprintf("ovs-%s", type);
+
+ /* Remove any existing datapaths, since we assume we're the only
+ * userspace controlling the datapath. */
+ sset_init(&names);
+ dp_enumerate_names(type, &names);
+ SSET_FOR_EACH(name, &names) {
+ struct dpif *old_dpif;
+
+ /* Don't remove our backer if it exists. */
+ if (!strcmp(name, backer_name)) {
+ continue;
+ }
+
+ if (dpif_open(name, type, &old_dpif)) {
+ VLOG_WARN("couldn't open old datapath %s to remove it", name);
+ } else {
+ dpif_delete(old_dpif);
+ dpif_close(old_dpif);
+ }
+ }
+ sset_destroy(&names);
+
+ backer = xmalloc(sizeof *backer);
+
+ error = dpif_create_and_open(backer_name, type, &backer->dpif);
+ free(backer_name);
+ if (error) {
+ VLOG_ERR("failed to open datapath of type %s: %s", type,
+ strerror(error));
+ free(backer);
+ return error;
+ }
+
+ backer->type = xstrdup(type);
+ backer->refcount = 1;
+ hmap_init(&backer->odp_to_ofport_map);
+ hmap_init(&backer->drop_keys);
+ timer_set_duration(&backer->next_expiration, 1000);
+ backer->need_revalidate = 0;
+ simap_init(&backer->tnl_backers);
+ tag_set_init(&backer->revalidate_set);
+ *backerp = backer;
+
+ dpif_flow_flush(backer->dpif);
+
+ /* Loop through the ports already on the datapath and remove any
+ * that we don't need anymore. */
+ list_init(&garbage_list);
+ dpif_port_dump_start(&port_dump, backer->dpif);
+ while (dpif_port_dump_next(&port_dump, &port)) {
+ node = shash_find(&init_ofp_ports, port.name);
+ if (!node && strcmp(port.name, dpif_base_name(backer->dpif))) {
+ garbage = xmalloc(sizeof *garbage);
+ garbage->odp_port = port.port_no;
+ list_push_front(&garbage_list, &garbage->list_node);
+ }
+ }
+ dpif_port_dump_done(&port_dump);
+
+ LIST_FOR_EACH_SAFE (garbage, next, list_node, &garbage_list) {
+ dpif_port_del(backer->dpif, garbage->odp_port);
+ list_remove(&garbage->list_node);
+ free(garbage);
+ }
+
+ shash_add(&all_dpif_backers, type, backer);
+
+ error = dpif_recv_set(backer->dpif, true);
+ if (error) {
+ VLOG_ERR("failed to listen on datapath of type %s: %s",
+ type, strerror(error));
+ close_dpif_backer(backer);
+ return error;
+ }
+
+ return error;
+}
+
static int
construct(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- const char *name = ofproto->up.name;
+ struct shash_node *node, *next;
int max_ports;
int error;
int i;
- error = dpif_create_and_open(name, ofproto->up.type, &ofproto->dpif);
+ error = open_dpif_backer(ofproto->up.type, &ofproto->backer);
if (error) {
- VLOG_ERR("failed to open datapath %s: %s", name, strerror(error));
return error;
}
- max_ports = dpif_get_max_ports(ofproto->dpif);
+ max_ports = dpif_get_max_ports(ofproto->backer->dpif);
ofproto_init_max_ports(ofproto_, MIN(max_ports, OFPP_MAX));
ofproto->n_matches = 0;
- dpif_flow_flush(ofproto->dpif);
- dpif_recv_purge(ofproto->dpif);
-
- error = dpif_recv_set(ofproto->dpif, true);
- if (error) {
- VLOG_ERR("failed to listen on datapath %s: %s", name, strerror(error));
- dpif_close(ofproto->dpif);
- return error;
- }
-
ofproto->netflow = NULL;
ofproto->sflow = NULL;
ofproto->stp = NULL;
}
ofproto->has_bonded_bundles = false;
- timer_set_duration(&ofproto->next_expiration, 1000);
-
hmap_init(&ofproto->facets);
hmap_init(&ofproto->subfacets);
ofproto->governor = NULL;
+ ofproto->consistency_rl = LLONG_MIN;
for (i = 0; i < N_TABLES; i++) {
struct table_dpif *table = &ofproto->tables[i];
table->other_table = NULL;
table->basis = random_uint32();
}
- ofproto->need_revalidate = 0;
- tag_set_init(&ofproto->revalidate_set);
list_init(&ofproto->completions);
hmap_init(&ofproto->vlandev_map);
hmap_init(&ofproto->realdev_vid_map);
+ sset_init(&ofproto->ports);
+ sset_init(&ofproto->ghost_ports);
+ sset_init(&ofproto->port_poll_set);
+ ofproto->port_poll_errno = 0;
+
+ SHASH_FOR_EACH_SAFE (node, next, &init_ofp_ports) {
+ struct iface_hint *iface_hint = node->data;
+
+ if (!strcmp(iface_hint->br_name, ofproto->up.name)) {
+ /* Check if the datapath already has this port. */
+ if (dpif_port_exists(ofproto->backer->dpif, node->name)) {
+ sset_add(&ofproto->ports, node->name);
+ }
+
+ free(iface_hint->br_name);
+ free(iface_hint->br_type);
+ free(iface_hint);
+ shash_delete(&init_ofp_ports, node);
+ }
+ }
+
hmap_insert(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node,
hash_string(ofproto->up.name, 0));
memset(&ofproto->stats, 0, sizeof ofproto->stats);
error = add_internal_flows(ofproto);
ofproto->up.tables[TBL_INTERNAL].flags = OFTABLE_HIDDEN | OFTABLE_READONLY;
+ ofproto->n_hit = 0;
+ ofproto->n_missed = 0;
+
+ ofproto->max_n_subfacet = 0;
+ ofproto->created = time_msec();
+ ofproto->last_minute = ofproto->created;
+ memset(&ofproto->hourly, 0, sizeof ofproto->hourly);
+ memset(&ofproto->daily, 0, sizeof ofproto->daily);
+ ofproto->subfacet_add_count = 0;
+ ofproto->subfacet_del_count = 0;
+ ofproto->total_subfacet_add_count = 0;
+ ofproto->total_subfacet_del_count = 0;
+ ofproto->total_subfacet_life_span = 0;
+ ofproto->total_subfacet_count = 0;
+ ofproto->n_update_stats = 0;
+
return error;
}
}
*rulep = rule_dpif_lookup__(ofproto, &fm.match.flow, TBL_INTERNAL);
- assert(*rulep != NULL);
+ ovs_assert(*rulep != NULL);
return 0;
}
hmap_destroy(&ofproto->vlandev_map);
hmap_destroy(&ofproto->realdev_vid_map);
- dpif_close(ofproto->dpif);
+ sset_destroy(&ofproto->ports);
+ sset_destroy(&ofproto->ghost_ports);
+ sset_destroy(&ofproto->port_poll_set);
+
+ close_dpif_backer(ofproto->backer);
}
static int
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct ofport_dpif *ofport;
- unsigned int work;
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
port_run_fast(ofport);
}
- /* Handle one or more batches of upcalls, until there's nothing left to do
- * or until we do a fixed total amount of work.
- *
- * We do work in batches because it can be much cheaper to set up a number
- * of flows and fire off their patches all at once. We do multiple batches
- * because in some cases handling a packet can cause another packet to be
- * queued almost immediately as part of the return flow. Both
- * optimizations can make major improvements on some benchmarks and
- * presumably for real traffic as well. */
- work = 0;
- while (work < FLOW_MISS_MAX_BATCH) {
- int retval = handle_upcalls(ofproto, FLOW_MISS_MAX_BATCH - work);
- if (retval <= 0) {
- return -retval;
- }
- work += retval;
- }
return 0;
}
if (!clogged) {
complete_operations(ofproto);
}
- dpif_run(ofproto->dpif);
error = run_fast(ofproto_);
if (error) {
return error;
}
- if (timer_expired(&ofproto->next_expiration)) {
- int delay = expire(ofproto);
- timer_set_duration(&ofproto->next_expiration, delay);
- }
-
if (ofproto->netflow) {
if (netflow_run(ofproto->netflow)) {
send_netflow_active_timeouts(ofproto);
}
stp_run(ofproto);
- mac_learning_run(ofproto->ml, &ofproto->revalidate_set);
-
- /* Now revalidate if there's anything to do. */
- if (ofproto->need_revalidate
- || !tag_set_is_empty(&ofproto->revalidate_set)) {
- struct tag_set revalidate_set = ofproto->revalidate_set;
- bool revalidate_all = ofproto->need_revalidate;
- struct facet *facet;
-
- switch (ofproto->need_revalidate) {
- case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break;
- case REV_STP: COVERAGE_INC(rev_stp); break;
- case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break;
- case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break;
- case REV_INCONSISTENCY: COVERAGE_INC(rev_inconsistency); break;
- }
-
- /* Clear the revalidation flags. */
- tag_set_init(&ofproto->revalidate_set);
- ofproto->need_revalidate = 0;
-
- HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
- if (revalidate_all
- || tag_set_intersects(&revalidate_set, facet->tags)) {
- facet_revalidate(facet);
- }
- }
- }
+ mac_learning_run(ofproto->ml, &ofproto->backer->revalidate_set);
/* Check the consistency of a random facet, to aid debugging. */
- if (!hmap_is_empty(&ofproto->facets) && !ofproto->need_revalidate) {
+ if (time_msec() >= ofproto->consistency_rl
+ && !hmap_is_empty(&ofproto->facets)
+ && !ofproto->backer->need_revalidate) {
struct facet *facet;
+ ofproto->consistency_rl = time_msec() + 250;
+
facet = CONTAINER_OF(hmap_random_node(&ofproto->facets),
struct facet, hmap_node);
- if (!tag_set_intersects(&ofproto->revalidate_set, facet->tags)) {
+ if (!tag_set_intersects(&ofproto->backer->revalidate_set,
+ facet->tags)) {
if (!facet_check_consistency(facet)) {
- ofproto->need_revalidate = REV_INCONSISTENCY;
+ ofproto->backer->need_revalidate = REV_INCONSISTENCY;
}
}
}
poll_immediate_wake();
}
- dpif_wait(ofproto->dpif);
- dpif_recv_wait(ofproto->dpif);
+ dpif_wait(ofproto->backer->dpif);
+ dpif_recv_wait(ofproto->backer->dpif);
if (ofproto->sflow) {
dpif_sflow_wait(ofproto->sflow);
}
- if (!tag_set_is_empty(&ofproto->revalidate_set)) {
+ if (!tag_set_is_empty(&ofproto->backer->revalidate_set)) {
poll_immediate_wake();
}
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
}
mac_learning_wait(ofproto->ml);
stp_wait(ofproto);
- if (ofproto->need_revalidate) {
+ if (ofproto->backer->need_revalidate) {
/* Shouldn't happen, but if it does just go around again. */
VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
poll_immediate_wake();
- } else {
- timer_wait(&ofproto->next_expiration);
}
if (ofproto->governor) {
governor_wait(ofproto->governor);
flush(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct facet *facet, *next_facet;
-
- HMAP_FOR_EACH_SAFE (facet, next_facet, hmap_node, &ofproto->facets) {
- /* Mark the facet as not installed so that facet_remove() doesn't
- * bother trying to uninstall it. There is no point in uninstalling it
- * individually since we are about to blow away all the facets with
- * dpif_flow_flush(). */
- struct subfacet *subfacet;
+ struct subfacet *subfacet, *next_subfacet;
+ struct subfacet *batch[SUBFACET_DESTROY_MAX_BATCH];
+ int n_batch;
- LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
- subfacet->path = SF_NOT_INSTALLED;
- subfacet->dp_packet_count = 0;
- subfacet->dp_byte_count = 0;
+ n_batch = 0;
+ HMAP_FOR_EACH_SAFE (subfacet, next_subfacet, hmap_node,
+ &ofproto->subfacets) {
+ if (subfacet->path != SF_NOT_INSTALLED) {
+ batch[n_batch++] = subfacet;
+ if (n_batch >= SUBFACET_DESTROY_MAX_BATCH) {
+ subfacet_destroy_batch(ofproto, batch, n_batch);
+ n_batch = 0;
+ }
+ } else {
+ subfacet_destroy(subfacet);
}
- facet_remove(facet);
}
- dpif_flow_flush(ofproto->dpif);
+
+ if (n_batch > 0) {
+ subfacet_destroy_batch(ofproto, batch, n_batch);
+ }
}
static void
strcpy(ots->name, "classifier");
- dpif_get_dp_stats(ofproto->dpif, &s);
+ dpif_get_dp_stats(ofproto->backer->dpif, &s);
+
ots->lookup_count = htonll(s.n_hit + s.n_missed);
ots->matched_count = htonll(s.n_hit + ofproto->n_matches);
}
{
struct ofport_dpif *port = ofport_dpif_cast(port_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
+ const struct netdev *netdev = port->up.netdev;
+ struct dpif_port dpif_port;
+ int error;
- ofproto->need_revalidate = REV_RECONFIGURE;
- port->odp_port = ofp_port_to_odp_port(port->up.ofp_port);
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
port->bundle = NULL;
port->cfm = NULL;
port->tag = tag_create_random();
port->may_enable = true;
port->stp_port = NULL;
port->stp_state = STP_DISABLED;
+ port->tnl_port = NULL;
hmap_init(&port->priorities);
port->realdev_ofp_port = 0;
port->vlandev_vid = 0;
- port->carrier_seq = netdev_get_carrier_resets(port->up.netdev);
+ port->carrier_seq = netdev_get_carrier_resets(netdev);
+
+ if (netdev_vport_is_patch(netdev)) {
+ /* XXX By bailing out here, we don't do required sFlow work. */
+ port->odp_port = OVSP_NONE;
+ return 0;
+ }
+
+ error = dpif_port_query_by_name(ofproto->backer->dpif,
+ netdev_vport_get_dpif_port(netdev),
+ &dpif_port);
+ if (error) {
+ return error;
+ }
+
+ port->odp_port = dpif_port.port_no;
+
+ if (netdev_get_tunnel_config(netdev)) {
+ port->tnl_port = tnl_port_add(&port->up, port->odp_port);
+ } else {
+ /* Sanity-check that a mapping doesn't already exist. This
+ * shouldn't happen for non-tunnel ports. */
+ if (odp_port_to_ofp_port(ofproto, port->odp_port) != OFPP_NONE) {
+ VLOG_ERR("port %s already has an OpenFlow port number",
+ dpif_port.name);
+ dpif_port_destroy(&dpif_port);
+ return EBUSY;
+ }
+
+ hmap_insert(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node,
+ hash_int(port->odp_port, 0));
+ }
+ dpif_port_destroy(&dpif_port);
if (ofproto->sflow) {
- dpif_sflow_add_port(ofproto->sflow, port_);
+ dpif_sflow_add_port(ofproto->sflow, port_, port->odp_port);
}
return 0;
{
struct ofport_dpif *port = ofport_dpif_cast(port_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
+ const char *dp_port_name = netdev_vport_get_dpif_port(port->up.netdev);
+ const char *devname = netdev_get_name(port->up.netdev);
+
+ if (dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
+ /* The underlying device is still there, so delete it. This
+ * happens when the ofproto is being destroyed, since the caller
+ * assumes that removal of attached ports will happen as part of
+ * destruction. */
+ if (!port->tnl_port) {
+ dpif_port_del(ofproto->backer->dpif, port->odp_port);
+ }
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ }
- ofproto->need_revalidate = REV_RECONFIGURE;
+ if (port->odp_port != OVSP_NONE && !port->tnl_port) {
+ hmap_remove(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node);
+ }
+
+ tnl_port_del(port->tnl_port);
+ sset_find_and_delete(&ofproto->ports, devname);
+ sset_find_and_delete(&ofproto->ghost_ports, devname);
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
bundle_remove(port_);
set_cfm(port_, NULL);
if (ofproto->sflow) {
if (changed & (OFPUTIL_PC_NO_RECV | OFPUTIL_PC_NO_RECV_STP |
OFPUTIL_PC_NO_FWD | OFPUTIL_PC_NO_FLOOD |
OFPUTIL_PC_NO_PACKET_IN)) {
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
if (changed & OFPUTIL_PC_NO_FLOOD && port->bundle) {
bundle_update(port->bundle);
if (!ds) {
struct ofport_dpif *ofport;
- ds = ofproto->sflow = dpif_sflow_create(ofproto->dpif);
+ ds = ofproto->sflow = dpif_sflow_create();
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
- dpif_sflow_add_port(ds, &ofport->up);
+ dpif_sflow_add_port(ds, &ofport->up, ofport->odp_port);
}
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
dpif_sflow_set_options(ds, sflow_options);
} else {
if (ds) {
dpif_sflow_destroy(ds);
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
ofproto->sflow = NULL;
}
}
struct ofproto_dpif *ofproto;
ofproto = ofproto_dpif_cast(ofport->up.ofproto);
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
ofport->cfm = cfm_create(netdev_get_name(ofport->up.netdev));
}
return error;
}
-static int
-get_cfm_fault(const struct ofport *ofport_)
-{
- struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
-
- return ofport->cfm ? cfm_get_fault(ofport->cfm) : -1;
-}
-
-static int
-get_cfm_opup(const struct ofport *ofport_)
-{
- struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
-
- return ofport->cfm ? cfm_get_opup(ofport->cfm) : -1;
-}
-
-static int
-get_cfm_remote_mpids(const struct ofport *ofport_, const uint64_t **rmps,
- size_t *n_rmps)
+static bool
+get_cfm_status(const struct ofport *ofport_,
+ struct ofproto_cfm_status *status)
{
struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
if (ofport->cfm) {
- cfm_get_remote_mpids(ofport->cfm, rmps, n_rmps);
- return 0;
+ status->faults = cfm_get_fault(ofport->cfm);
+ status->remote_opstate = cfm_get_opup(ofport->cfm);
+ status->health = cfm_get_health(ofport->cfm);
+ cfm_get_remote_mpids(ofport->cfm, &status->rmps, &status->n_rmps);
+ return true;
} else {
- return -1;
+ return false;
}
}
-
-static int
-get_cfm_health(const struct ofport *ofport_)
-{
- struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
-
- return ofport->cfm ? cfm_get_health(ofport->cfm) : -1;
-}
\f
/* Spanning Tree. */
/* Only revalidate flows if the configuration changed. */
if (!s != !ofproto->stp) {
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
if (s) {
if (stp_learn_in_state(ofport->stp_state)
!= stp_learn_in_state(state)) {
/* xxx Learning action flows should also be flushed. */
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ mac_learning_flush(ofproto->ml,
+ &ofproto->backer->revalidate_set);
}
fwd_change = stp_forward_in_state(ofport->stp_state)
!= stp_forward_in_state(state);
- ofproto->need_revalidate = REV_STP;
+ ofproto->backer->need_revalidate = REV_STP;
ofport->stp_state = state;
ofport->stp_state_entered = time_msec();
}
if (stp_check_and_reset_fdb_flush(ofproto->stp)) {
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
}
}
}
uint8_t dscp;
dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
- if (dpif_queue_to_priority(ofproto->dpif, qdscp_list[i].queue,
+ if (dpif_queue_to_priority(ofproto->backer->dpif, qdscp_list[i].queue,
&priority)) {
continue;
}
pdscp = xmalloc(sizeof *pdscp);
pdscp->priority = priority;
pdscp->dscp = dscp;
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
if (pdscp->dscp != dscp) {
pdscp->dscp = dscp;
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
hmap_insert(&new, &pdscp->hmap_node, hash_int(pdscp->priority, 0));
if (!hmap_is_empty(&ofport->priorities)) {
ofport_clear_priorities(ofport);
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
hmap_swap(&new, &ofport->priorities);
struct mac_learning *ml = ofproto->ml;
struct mac_entry *mac, *next_mac;
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
if (mac->port.p == bundle) {
if (all_ofprotos) {
e = mac_learning_lookup(o->ml, mac->mac, mac->vlan,
NULL);
if (e) {
- tag_set_add(&o->revalidate_set, e->tag);
mac_learning_expire(o->ml, e);
}
}
{
struct ofbundle *bundle = port->bundle;
- bundle->ofproto->need_revalidate = REV_RECONFIGURE;
+ bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
list_remove(&port->bundle_node);
port->bundle = NULL;
static bool
bundle_add_port(struct ofbundle *bundle, uint32_t ofp_port,
- struct lacp_slave_settings *lacp,
- uint32_t bond_stable_id)
+ struct lacp_slave_settings *lacp)
{
struct ofport_dpif *port;
}
if (port->bundle != bundle) {
- bundle->ofproto->need_revalidate = REV_RECONFIGURE;
+ bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
if (port->bundle) {
bundle_del_port(port);
}
}
}
if (lacp) {
- port->bundle->ofproto->need_revalidate = REV_RECONFIGURE;
+ bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
lacp_slave_register(bundle->lacp, port, lacp);
}
- port->bond_stable_id = bond_stable_id;
-
return true;
}
mirror_destroy(m);
} else if (hmapx_find_and_delete(&m->srcs, bundle)
|| hmapx_find_and_delete(&m->dsts, bundle)) {
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
}
}
return 0;
}
- assert(s->n_slaves == 1 || s->bond != NULL);
- assert((s->lacp != NULL) == (s->lacp_slaves != NULL));
+ ovs_assert(s->n_slaves == 1 || s->bond != NULL);
+ ovs_assert((s->lacp != NULL) == (s->lacp_slaves != NULL));
bundle = bundle_lookup(ofproto, aux);
if (!bundle) {
/* LACP. */
if (s->lacp) {
if (!bundle->lacp) {
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
bundle->lacp = lacp_create();
}
lacp_configure(bundle->lacp, s->lacp);
ok = true;
for (i = 0; i < s->n_slaves; i++) {
if (!bundle_add_port(bundle, s->slaves[i],
- s->lacp ? &s->lacp_slaves[i] : NULL,
- s->bond_stable_ids ? s->bond_stable_ids[i] : 0)) {
+ s->lacp ? &s->lacp_slaves[i] : NULL)) {
ok = false;
}
}
found: ;
}
}
- assert(list_size(&bundle->ports) <= s->n_slaves);
+ ovs_assert(list_size(&bundle->ports) <= s->n_slaves);
if (list_is_empty(&bundle->ports)) {
bundle_destroy(bundle);
bundle->ofproto->has_bonded_bundles = true;
if (bundle->bond) {
if (bond_reconfigure(bundle->bond, s->bond)) {
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
} else {
bundle->bond = bond_create(s->bond);
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
- bond_slave_register(bundle->bond, port, port->bond_stable_id,
- port->up.netdev);
+ bond_slave_register(bundle->bond, port, port->up.netdev);
}
} else {
bond_destroy(bundle->bond);
bond_slave_set_may_enable(bundle->bond, port, port->may_enable);
}
- bond_run(bundle->bond, &bundle->ofproto->revalidate_set,
+ bond_run(bundle->bond, &bundle->ofproto->backer->revalidate_set,
lacp_status(bundle->lacp));
if (bond_should_send_learning_packets(bundle->bond)) {
bundle_send_learning_packets(bundle);
}
}
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
ofproto->has_mirrors = true;
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ mac_learning_flush(ofproto->ml,
+ &ofproto->backer->revalidate_set);
mirror_update_dups(ofproto);
return 0;
}
ofproto = mirror->ofproto;
- ofproto->need_revalidate = REV_RECONFIGURE;
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
mirror_bit = MIRROR_MASK_C(1) << mirror->idx;
HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
return 0;
}
+ push_all_stats();
+
*packets = mirror->packet_count;
*bytes = mirror->byte_count;
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
if (mac_learning_set_flood_vlans(ofproto->ml, flood_vlans)) {
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
}
return 0;
}
forward_bpdu_changed(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
static void
-set_mac_idle_time(struct ofproto *ofproto_, unsigned int idle_time)
+set_mac_table_config(struct ofproto *ofproto_, unsigned int idle_time,
+ size_t max_entries)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
mac_learning_set_idle_time(ofproto->ml, idle_time);
+ mac_learning_set_max_entries(ofproto->ml, max_entries);
}
\f
/* Ports. */
static struct ofport_dpif *
get_odp_port(const struct ofproto_dpif *ofproto, uint32_t odp_port)
{
- return get_ofp_port(ofproto, odp_port_to_ofp_port(odp_port));
+ struct ofport_dpif *port = odp_port_to_ofport(ofproto->backer, odp_port);
+ return port && &ofproto->up == port->up.ofproto ? port : NULL;
}
static void
-ofproto_port_from_dpif_port(struct ofproto_port *ofproto_port,
+ofproto_port_from_dpif_port(struct ofproto_dpif *ofproto,
+ struct ofproto_port *ofproto_port,
struct dpif_port *dpif_port)
{
ofproto_port->name = dpif_port->name;
ofproto_port->type = dpif_port->type;
- ofproto_port->ofp_port = odp_port_to_ofp_port(dpif_port->port_no);
+ ofproto_port->ofp_port = odp_port_to_ofp_port(ofproto, dpif_port->port_no);
+}
+
+static struct ofport_dpif *
+ofport_get_peer(const struct ofport_dpif *ofport_dpif)
+{
+ const struct ofproto_dpif *ofproto;
+ const char *peer;
+
+ peer = netdev_vport_patch_peer(ofport_dpif->up.netdev);
+ if (!peer) {
+ return NULL;
+ }
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ struct ofport *ofport;
+
+ ofport = shash_find_data(&ofproto->up.port_by_name, peer);
+ if (ofport && ofport->ofproto->ofproto_class == &ofproto_dpif_class) {
+ return ofport_dpif_cast(ofport);
+ }
+ }
+ return NULL;
}
static void
ofport->carrier_seq = carrier_seq;
port_run_fast(ofport);
+
+ if (ofport->tnl_port
+ && tnl_port_reconfigure(&ofport->up, ofport->odp_port,
+ &ofport->tnl_port)) {
+ ofproto_dpif_cast(ofport->up.ofproto)->backer->need_revalidate = true;
+ }
+
if (ofport->cfm) {
int cfm_opup = cfm_get_opup(ofport->cfm);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
if (ofproto->has_bundle_action) {
- ofproto->need_revalidate = REV_PORT_TOGGLED;
+ ofproto->backer->need_revalidate = REV_PORT_TOGGLED;
}
}
struct dpif_port dpif_port;
int error;
- error = dpif_port_query_by_name(ofproto->dpif, devname, &dpif_port);
+ if (sset_contains(&ofproto->ghost_ports, devname)) {
+ const char *type = netdev_get_type_from_name(devname);
+
+ /* We may be called before ofproto->up.port_by_name is populated with
+ * the appropriate ofport. For this reason, we must get the name and
+ * type from the netdev layer directly. */
+ if (type) {
+ const struct ofport *ofport;
+
+ ofport = shash_find_data(&ofproto->up.port_by_name, devname);
+ ofproto_port->ofp_port = ofport ? ofport->ofp_port : OFPP_NONE;
+ ofproto_port->name = xstrdup(devname);
+ ofproto_port->type = xstrdup(type);
+ return 0;
+ }
+ return ENODEV;
+ }
+
+ if (!sset_contains(&ofproto->ports, devname)) {
+ return ENODEV;
+ }
+ error = dpif_port_query_by_name(ofproto->backer->dpif,
+ devname, &dpif_port);
if (!error) {
- ofproto_port_from_dpif_port(ofproto_port, &dpif_port);
+ ofproto_port_from_dpif_port(ofproto, ofproto_port, &dpif_port);
}
return error;
}
static int
-port_add(struct ofproto *ofproto_, struct netdev *netdev, uint16_t *ofp_portp)
+port_add(struct ofproto *ofproto_, struct netdev *netdev)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- uint16_t odp_port = UINT16_MAX;
- int error;
+ const char *dp_port_name = netdev_vport_get_dpif_port(netdev);
+ const char *devname = netdev_get_name(netdev);
- error = dpif_port_add(ofproto->dpif, netdev, &odp_port);
- if (!error) {
- *ofp_portp = odp_port_to_ofp_port(odp_port);
+ if (netdev_vport_is_patch(netdev)) {
+ sset_add(&ofproto->ghost_ports, netdev_get_name(netdev));
+ return 0;
}
- return error;
+
+ if (!dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
+ uint32_t port_no = UINT32_MAX;
+ int error;
+
+ error = dpif_port_add(ofproto->backer->dpif, netdev, &port_no);
+ if (error) {
+ return error;
+ }
+ if (netdev_get_tunnel_config(netdev)) {
+ simap_put(&ofproto->backer->tnl_backers, dp_port_name, port_no);
+ }
+ }
+
+ if (netdev_get_tunnel_config(netdev)) {
+ sset_add(&ofproto->ghost_ports, devname);
+ } else {
+ sset_add(&ofproto->ports, devname);
+ }
+ return 0;
}
static int
port_del(struct ofproto *ofproto_, uint16_t ofp_port)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- int error;
+ struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
+ int error = 0;
- error = dpif_port_del(ofproto->dpif, ofp_port_to_odp_port(ofp_port));
- if (!error) {
- struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
- if (ofport) {
+ if (!ofport) {
+ return 0;
+ }
+
+ sset_find_and_delete(&ofproto->ghost_ports,
+ netdev_get_name(ofport->up.netdev));
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ if (!ofport->tnl_port) {
+ error = dpif_port_del(ofproto->backer->dpif, ofport->odp_port);
+ if (!error) {
/* The caller is going to close ofport->up.netdev. If this is a
* bonded port, then the bond is using that netdev, so remove it
* from the bond. The client will need to reconfigure everything
struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
int error;
+ push_all_stats();
+
error = netdev_get_stats(ofport->up.netdev, stats);
- if (!error && ofport->odp_port == OVSP_LOCAL) {
+ if (!error && ofport_->ofp_port == OFPP_LOCAL) {
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
/* ofproto->stats.tx_packets represents packets that we created
}
struct port_dump_state {
- struct dpif_port_dump dump;
- bool done;
+ uint32_t bucket;
+ uint32_t offset;
+ bool ghost;
+
+ struct ofproto_port port;
+ bool has_port;
};
static int
-port_dump_start(const struct ofproto *ofproto_, void **statep)
+port_dump_start(const struct ofproto *ofproto_ OVS_UNUSED, void **statep)
{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct port_dump_state *state;
-
- *statep = state = xmalloc(sizeof *state);
- dpif_port_dump_start(&state->dump, ofproto->dpif);
- state->done = false;
+ *statep = xzalloc(sizeof(struct port_dump_state));
return 0;
}
static int
-port_dump_next(const struct ofproto *ofproto_ OVS_UNUSED, void *state_,
+port_dump_next(const struct ofproto *ofproto_, void *state_,
struct ofproto_port *port)
{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct port_dump_state *state = state_;
- struct dpif_port dpif_port;
+ const struct sset *sset;
+ struct sset_node *node;
- if (dpif_port_dump_next(&state->dump, &dpif_port)) {
- ofproto_port_from_dpif_port(port, &dpif_port);
- return 0;
- } else {
- int error = dpif_port_dump_done(&state->dump);
- state->done = true;
- return error ? error : EOF;
+ if (state->has_port) {
+ ofproto_port_destroy(&state->port);
+ state->has_port = false;
}
+ sset = state->ghost ? &ofproto->ghost_ports : &ofproto->ports;
+ while ((node = sset_at_position(sset, &state->bucket, &state->offset))) {
+ int error;
+
+ error = port_query_by_name(ofproto_, node->name, &state->port);
+ if (!error) {
+ *port = state->port;
+ state->has_port = true;
+ return 0;
+ } else if (error != ENODEV) {
+ return error;
+ }
+ }
+
+ if (!state->ghost) {
+ state->ghost = true;
+ state->bucket = 0;
+ state->offset = 0;
+ return port_dump_next(ofproto_, state_, port);
+ }
+
+ return EOF;
}
static int
{
struct port_dump_state *state = state_;
- if (!state->done) {
- dpif_port_dump_done(&state->dump);
+ if (state->has_port) {
+ ofproto_port_destroy(&state->port);
}
free(state);
return 0;
port_poll(const struct ofproto *ofproto_, char **devnamep)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- return dpif_port_poll(ofproto->dpif, devnamep);
+
+ if (ofproto->port_poll_errno) {
+ int error = ofproto->port_poll_errno;
+ ofproto->port_poll_errno = 0;
+ return error;
+ }
+
+ if (sset_is_empty(&ofproto->port_poll_set)) {
+ return EAGAIN;
+ }
+
+ *devnamep = sset_pop(&ofproto->port_poll_set);
+ return 0;
}
static void
port_poll_wait(const struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- dpif_port_poll_wait(ofproto->dpif);
+ dpif_port_poll_wait(ofproto->backer->dpif);
}
static int
* It's possible to batch more than that, but the benefit might be minimal. */
struct flow_miss {
struct hmap_node hmap_node;
+ struct ofproto_dpif *ofproto;
struct flow flow;
enum odp_key_fitness key_fitness;
const struct nlattr *key;
size_t key_len;
- ovs_be16 initial_tci;
+ struct initial_vals initial_vals;
struct list packets;
enum dpif_upcall_type upcall_type;
+ uint32_t odp_in_port;
};
struct flow_miss_op {
struct dpif_op dpif_op;
- struct subfacet *subfacet; /* Subfacet */
void *garbage; /* Pointer to pass to free(), NULL if none. */
uint64_t stub[1024 / 8]; /* Temporary buffer. */
};
static enum slow_path_reason
process_special(struct ofproto_dpif *ofproto, const struct flow *flow,
- const struct ofpbuf *packet)
+ const struct ofport_dpif *ofport, const struct ofpbuf *packet)
{
- struct ofport_dpif *ofport = get_ofp_port(ofproto, flow->in_port);
-
if (!ofport) {
return 0;
- }
-
- if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow)) {
+ } else if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow)) {
if (packet) {
cfm_process_heartbeat(ofport->cfm, packet);
}
stp_process_packet(ofport, packet);
}
return SLOW_STP;
+ } else {
+ return 0;
}
- return 0;
}
static struct flow_miss *
-flow_miss_find(struct hmap *todo, const struct flow *flow, uint32_t hash)
+flow_miss_find(struct hmap *todo, const struct ofproto_dpif *ofproto,
+ const struct flow *flow, uint32_t hash)
{
struct flow_miss *miss;
HMAP_FOR_EACH_WITH_HASH (miss, hmap_node, hash, todo) {
- if (flow_equal(&miss->flow, flow)) {
+ if (miss->ofproto == ofproto && flow_equal(&miss->flow, flow)) {
return miss;
}
}
init_flow_miss_execute_op(struct flow_miss *miss, struct ofpbuf *packet,
struct flow_miss_op *op)
{
- if (miss->flow.vlan_tci != miss->initial_tci) {
+ if (miss->flow.vlan_tci != miss->initial_vals.vlan_tci) {
/* This packet was received on a VLAN splinter port. We
* added a VLAN to the packet to make the packet resemble
* the flow, but the actions were composed assuming that
eth_pop_vlan(packet);
}
- op->subfacet = NULL;
op->garbage = NULL;
op->dpif_op.type = DPIF_OP_EXECUTE;
op->dpif_op.u.execute.key = miss->key;
dpif_flow_stats_extract(&miss->flow, packet, now, &stats);
rule_credit_stats(rule, &stats);
- action_xlate_ctx_init(&ctx, ofproto, &miss->flow, miss->initial_tci,
- rule, 0, packet);
+ action_xlate_ctx_init(&ctx, ofproto, &miss->flow,
+ &miss->initial_vals, rule, 0, packet);
ctx.resubmit_stats = &stats;
xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len,
&odp_actions);
struct subfacet *subfacet;
struct ofpbuf *packet;
- subfacet = subfacet_create(facet,
- miss->key_fitness, miss->key, miss->key_len,
- miss->initial_tci, now);
+ subfacet = subfacet_create(facet, miss, now);
LIST_FOR_EACH (packet, list_node, &miss->packets) {
struct flow_miss_op *op = &ops[*n_ops];
struct dpif_execute *execute = &op->dpif_op.u.execute;
init_flow_miss_execute_op(miss, packet, op);
- op->subfacet = subfacet;
if (!subfacet->slow) {
execute->actions = subfacet->actions;
execute->actions_len = subfacet->actions_len;
struct flow_miss_op *op = &ops[(*n_ops)++];
struct dpif_flow_put *put = &op->dpif_op.u.flow_put;
- op->subfacet = subfacet;
+ subfacet->path = want_path;
+
op->garbage = NULL;
op->dpif_op.type = DPIF_OP_FLOW_PUT;
put->flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
}
}
-/* Handles flow miss 'miss' on 'ofproto'. May add any required datapath
- * operations to 'ops', incrementing '*n_ops' for each new op. */
+/* Handles flow miss 'miss'. May add any required datapath operations
+ * to 'ops', incrementing '*n_ops' for each new op. */
static void
-handle_flow_miss(struct ofproto_dpif *ofproto, struct flow_miss *miss,
- struct flow_miss_op *ops, size_t *n_ops)
+handle_flow_miss(struct flow_miss *miss, struct flow_miss_op *ops,
+ size_t *n_ops)
{
+ struct ofproto_dpif *ofproto = miss->ofproto;
struct facet *facet;
long long int now;
uint32_t hash;
handle_flow_miss_with_facet(miss, facet, now, ops, n_ops);
}
-/* Like odp_flow_key_to_flow(), this function converts the 'key_len' bytes of
- * OVS_KEY_ATTR_* attributes in 'key' to a flow structure in 'flow' and returns
- * an ODP_FIT_* value that indicates how well 'key' fits our expectations for
- * what a flow key should contain.
+static struct drop_key *
+drop_key_lookup(const struct dpif_backer *backer, const struct nlattr *key,
+ size_t key_len)
+{
+ struct drop_key *drop_key;
+
+ HMAP_FOR_EACH_WITH_HASH (drop_key, hmap_node, hash_bytes(key, key_len, 0),
+ &backer->drop_keys) {
+ if (drop_key->key_len == key_len
+ && !memcmp(drop_key->key, key, key_len)) {
+ return drop_key;
+ }
+ }
+ return NULL;
+}
+
+static void
+drop_key_clear(struct dpif_backer *backer)
+{
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 15);
+ struct drop_key *drop_key, *next;
+
+ HMAP_FOR_EACH_SAFE (drop_key, next, hmap_node, &backer->drop_keys) {
+ int error;
+
+ error = dpif_flow_del(backer->dpif, drop_key->key, drop_key->key_len,
+ NULL);
+ if (error && !VLOG_DROP_WARN(&rl)) {
+ struct ds ds = DS_EMPTY_INITIALIZER;
+ odp_flow_key_format(drop_key->key, drop_key->key_len, &ds);
+ VLOG_WARN("Failed to delete drop key (%s) (%s)", strerror(error),
+ ds_cstr(&ds));
+ ds_destroy(&ds);
+ }
+
+ hmap_remove(&backer->drop_keys, &drop_key->hmap_node);
+ free(drop_key->key);
+ free(drop_key);
+ }
+}
+
+/* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key'
+ * respectively), populates 'flow' with the result of odp_flow_key_to_flow().
+ * Optionally, if nonnull, populates 'fitnessp' with the fitness of 'flow' as
+ * returned by odp_flow_key_to_flow(). Also, optionally populates 'ofproto'
+ * with the ofproto_dpif, and 'odp_in_port' with the datapath in_port, that
+ * 'packet' ingressed.
*
- * This function also includes some logic to help make VLAN splinters
- * transparent to the rest of the upcall processing logic. In particular, if
- * the extracted in_port is a VLAN splinter port, it replaces flow->in_port by
- * the "real" port, sets flow->vlan_tci correctly for the VLAN of the VLAN
- * splinter port, and pushes a VLAN header onto 'packet' (if it is nonnull).
+ * If 'ofproto' is nonnull, requires 'flow''s in_port to exist. Otherwise sets
+ * 'flow''s in_port to OFPP_NONE.
*
- * Sets '*initial_tci' to the VLAN TCI with which the packet was really
- * received, that is, the actual VLAN TCI extracted by odp_flow_key_to_flow().
- * (This differs from the value returned in flow->vlan_tci only for packets
- * received on VLAN splinters.)
- */
-static enum odp_key_fitness
-ofproto_dpif_extract_flow_key(const struct ofproto_dpif *ofproto,
- const struct nlattr *key, size_t key_len,
- struct flow *flow, ovs_be16 *initial_tci,
- struct ofpbuf *packet)
+ * This function does post-processing on data returned from
+ * odp_flow_key_to_flow() to help make VLAN splinters transparent to the rest
+ * of the upcall processing logic. In particular, if the extracted in_port is
+ * a VLAN splinter port, it replaces flow->in_port by the "real" port, sets
+ * flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes
+ * a VLAN header onto 'packet' (if it is nonnull).
+ *
+ * Optionally, if 'initial_vals' is nonnull, sets 'initial_vals->vlan_tci'
+ * to the VLAN TCI with which the packet was really received, that is, the
+ * actual VLAN TCI extracted by odp_flow_key_to_flow(). (This differs from
+ * the value returned in flow->vlan_tci only for packets received on
+ * VLAN splinters.) Also, if received on an IP tunnel, sets
+ * 'initial_vals->tunnel_ip_tos' to the tunnel's IP TOS.
+ *
+ * Similarly, this function also includes some logic to help with tunnels. It
+ * may modify 'flow' as necessary to make the tunneling implementation
+ * transparent to the upcall processing logic.
+ *
+ * Returns 0 if successful, ENODEV if the parsed flow has no associated ofport,
+ * or some other positive errno if there are other problems. */
+static int
+ofproto_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
+ const struct nlattr *key, size_t key_len,
+ struct flow *flow, enum odp_key_fitness *fitnessp,
+ struct ofproto_dpif **ofproto, uint32_t *odp_in_port,
+ struct initial_vals *initial_vals)
{
+ const struct ofport_dpif *port;
enum odp_key_fitness fitness;
+ int error = ENODEV;
fitness = odp_flow_key_to_flow(key, key_len, flow);
if (fitness == ODP_FIT_ERROR) {
- return fitness;
+ error = EINVAL;
+ goto exit;
}
- *initial_tci = flow->vlan_tci;
- if (vsp_adjust_flow(ofproto, flow)) {
- if (packet) {
- /* Make the packet resemble the flow, so that it gets sent to an
- * OpenFlow controller properly, so that it looks correct for
- * sFlow, and so that flow_extract() will get the correct vlan_tci
- * if it is called on 'packet'.
- *
- * The allocated space inside 'packet' probably also contains
- * 'key', that is, both 'packet' and 'key' are probably part of a
- * struct dpif_upcall (see the large comment on that structure
- * definition), so pushing data on 'packet' is in general not a
- * good idea since it could overwrite 'key' or free it as a side
- * effect. However, it's OK in this special case because we know
- * that 'packet' is inside a Netlink attribute: pushing 4 bytes
- * will just overwrite the 4-byte "struct nlattr", which is fine
- * since we don't need that header anymore. */
- eth_push_vlan(packet, flow->vlan_tci);
+ if (initial_vals) {
+ initial_vals->vlan_tci = flow->vlan_tci;
+ initial_vals->tunnel_ip_tos = flow->tunnel.ip_tos;
+ }
+
+ if (odp_in_port) {
+ *odp_in_port = flow->in_port;
+ }
+
+ if (tnl_port_should_receive(flow)) {
+ const struct ofport *ofport = tnl_port_receive(flow);
+ if (!ofport) {
+ flow->in_port = OFPP_NONE;
+ goto exit;
+ }
+ port = ofport_dpif_cast(ofport);
+
+ /* We can't reproduce 'key' from 'flow'. */
+ fitness = fitness == ODP_FIT_PERFECT ? ODP_FIT_TOO_MUCH : fitness;
+
+ /* XXX: Since the tunnel module is not scoped per backer, it's
+ * theoretically possible that we'll receive an ofport belonging to an
+ * entirely different datapath. In practice, this can't happen because
+ * no platforms has two separate datapaths which each support
+ * tunneling. */
+ ovs_assert(ofproto_dpif_cast(port->up.ofproto)->backer == backer);
+ } else {
+ port = odp_port_to_ofport(backer, flow->in_port);
+ if (!port) {
+ flow->in_port = OFPP_NONE;
+ goto exit;
}
- /* Let the caller know that we can't reproduce 'key' from 'flow'. */
- if (fitness == ODP_FIT_PERFECT) {
- fitness = ODP_FIT_TOO_MUCH;
+ flow->in_port = port->up.ofp_port;
+ if (vsp_adjust_flow(ofproto_dpif_cast(port->up.ofproto), flow)) {
+ if (packet) {
+ /* Make the packet resemble the flow, so that it gets sent to
+ * an OpenFlow controller properly, so that it looks correct
+ * for sFlow, and so that flow_extract() will get the correct
+ * vlan_tci if it is called on 'packet'.
+ *
+ * The allocated space inside 'packet' probably also contains
+ * 'key', that is, both 'packet' and 'key' are probably part of
+ * a struct dpif_upcall (see the large comment on that
+ * structure definition), so pushing data on 'packet' is in
+ * general not a good idea since it could overwrite 'key' or
+ * free it as a side effect. However, it's OK in this special
+ * case because we know that 'packet' is inside a Netlink
+ * attribute: pushing 4 bytes will just overwrite the 4-byte
+ * "struct nlattr", which is fine since we don't need that
+ * header anymore. */
+ eth_push_vlan(packet, flow->vlan_tci);
+ }
+ /* We can't reproduce 'key' from 'flow'. */
+ fitness = fitness == ODP_FIT_PERFECT ? ODP_FIT_TOO_MUCH : fitness;
}
}
+ error = 0;
+
+ if (ofproto) {
+ *ofproto = ofproto_dpif_cast(port->up.ofproto);
+ }
- return fitness;
+exit:
+ if (fitnessp) {
+ *fitnessp = fitness;
+ }
+ return error;
}
static void
-handle_miss_upcalls(struct ofproto_dpif *ofproto, struct dpif_upcall *upcalls,
+handle_miss_upcalls(struct dpif_backer *backer, struct dpif_upcall *upcalls,
size_t n_upcalls)
{
struct dpif_upcall *upcall;
for (upcall = upcalls; upcall < &upcalls[n_upcalls]; upcall++) {
struct flow_miss *miss = &misses[n_misses];
struct flow_miss *existing_miss;
+ struct ofproto_dpif *ofproto;
+ uint32_t odp_in_port;
struct flow flow;
uint32_t hash;
+ int error;
- /* Obtain metadata and check userspace/kernel agreement on flow match,
- * then set 'flow''s header pointers. */
- miss->key_fitness = ofproto_dpif_extract_flow_key(
- ofproto, upcall->key, upcall->key_len,
- &flow, &miss->initial_tci, upcall->packet);
- if (miss->key_fitness == ODP_FIT_ERROR) {
+ error = ofproto_receive(backer, upcall->packet, upcall->key,
+ upcall->key_len, &flow, &miss->key_fitness,
+ &ofproto, &odp_in_port, &miss->initial_vals);
+ if (error == ENODEV) {
+ struct drop_key *drop_key;
+
+ /* Received packet on port for which we couldn't associate
+ * an ofproto. This can happen if a port is removed while
+ * traffic is being received. Print a rate-limited message
+ * in case it happens frequently. Install a drop flow so
+ * that future packets of the flow are inexpensively dropped
+ * in the kernel. */
+ VLOG_INFO_RL(&rl, "received packet on unassociated port %"PRIu32,
+ flow.in_port);
+
+ drop_key = drop_key_lookup(backer, upcall->key, upcall->key_len);
+ if (!drop_key) {
+ drop_key = xmalloc(sizeof *drop_key);
+ drop_key->key = xmemdup(upcall->key, upcall->key_len);
+ drop_key->key_len = upcall->key_len;
+
+ hmap_insert(&backer->drop_keys, &drop_key->hmap_node,
+ hash_bytes(drop_key->key, drop_key->key_len, 0));
+ dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY,
+ drop_key->key, drop_key->key_len, NULL, 0, NULL);
+ }
+ continue;
+ }
+ if (error) {
continue;
}
- flow_extract(upcall->packet, flow.skb_priority,
+
+ ofproto->n_missed++;
+ flow_extract(upcall->packet, flow.skb_priority, flow.skb_mark,
&flow.tunnel, flow.in_port, &miss->flow);
/* Add other packets to a to-do list. */
hash = flow_hash(&miss->flow, 0);
- existing_miss = flow_miss_find(&todo, &miss->flow, hash);
+ existing_miss = flow_miss_find(&todo, ofproto, &miss->flow, hash);
if (!existing_miss) {
hmap_insert(&todo, &miss->hmap_node, hash);
+ miss->ofproto = ofproto;
miss->key = upcall->key;
miss->key_len = upcall->key_len;
miss->upcall_type = upcall->type;
+ miss->odp_in_port = odp_in_port;
list_init(&miss->packets);
n_misses++;
* operations to batch. */
n_ops = 0;
HMAP_FOR_EACH (miss, hmap_node, &todo) {
- handle_flow_miss(ofproto, miss, flow_miss_ops, &n_ops);
+ handle_flow_miss(miss, flow_miss_ops, &n_ops);
}
- assert(n_ops <= ARRAY_SIZE(flow_miss_ops));
+ ovs_assert(n_ops <= ARRAY_SIZE(flow_miss_ops));
/* Execute batch. */
for (i = 0; i < n_ops; i++) {
dpif_ops[i] = &flow_miss_ops[i].dpif_op;
}
- dpif_operate(ofproto->dpif, dpif_ops, n_ops);
+ dpif_operate(backer->dpif, dpif_ops, n_ops);
- /* Free memory and update facets. */
+ /* Free memory. */
for (i = 0; i < n_ops; i++) {
- struct flow_miss_op *op = &flow_miss_ops[i];
-
- switch (op->dpif_op.type) {
- case DPIF_OP_EXECUTE:
- break;
-
- case DPIF_OP_FLOW_PUT:
- if (!op->dpif_op.error) {
- op->subfacet->path = subfacet_want_path(op->subfacet->slow);
- }
- break;
-
- case DPIF_OP_FLOW_DEL:
- NOT_REACHED();
- }
-
- free(op->garbage);
+ free(flow_miss_ops[i].garbage);
}
hmap_destroy(&todo);
}
}
/* "action" upcalls need a closer look. */
- memcpy(&cookie, &upcall->userdata, sizeof(cookie));
+ if (!upcall->userdata) {
+ VLOG_WARN_RL(&rl, "action upcall missing cookie");
+ return BAD_UPCALL;
+ }
+ if (nl_attr_get_size(upcall->userdata) != sizeof(cookie)) {
+ VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %zu",
+ nl_attr_get_size(upcall->userdata));
+ return BAD_UPCALL;
+ }
+ memcpy(&cookie, nl_attr_get(upcall->userdata), sizeof(cookie));
switch (cookie.type) {
case USER_ACTION_COOKIE_SFLOW:
return SFLOW_UPCALL;
case USER_ACTION_COOKIE_UNSPEC:
default:
- VLOG_WARN_RL(&rl, "invalid user cookie : 0x%"PRIx64, upcall->userdata);
+ VLOG_WARN_RL(&rl, "invalid user cookie : 0x%"PRIx64,
+ nl_attr_get_u64(upcall->userdata));
return BAD_UPCALL;
}
}
static void
-handle_sflow_upcall(struct ofproto_dpif *ofproto,
+handle_sflow_upcall(struct dpif_backer *backer,
const struct dpif_upcall *upcall)
{
+ struct ofproto_dpif *ofproto;
union user_action_cookie cookie;
- enum odp_key_fitness fitness;
- ovs_be16 initial_tci;
struct flow flow;
+ uint32_t odp_in_port;
- fitness = ofproto_dpif_extract_flow_key(ofproto, upcall->key,
- upcall->key_len, &flow,
- &initial_tci, upcall->packet);
- if (fitness == ODP_FIT_ERROR) {
+ if (ofproto_receive(backer, upcall->packet, upcall->key, upcall->key_len,
+ &flow, NULL, &ofproto, &odp_in_port, NULL)
+ || !ofproto->sflow) {
return;
}
- memcpy(&cookie, &upcall->userdata, sizeof(cookie));
- dpif_sflow_received(ofproto->sflow, upcall->packet, &flow, &cookie);
+ memcpy(&cookie, nl_attr_get(upcall->userdata), sizeof(cookie));
+ dpif_sflow_received(ofproto->sflow, upcall->packet, &flow,
+ odp_in_port, &cookie);
}
static int
-handle_upcalls(struct ofproto_dpif *ofproto, unsigned int max_batch)
+handle_upcalls(struct dpif_backer *backer, unsigned int max_batch)
{
struct dpif_upcall misses[FLOW_MISS_MAX_BATCH];
struct ofpbuf miss_bufs[FLOW_MISS_MAX_BATCH];
int n_misses;
int i;
- assert(max_batch <= FLOW_MISS_MAX_BATCH);
+ ovs_assert(max_batch <= FLOW_MISS_MAX_BATCH);
n_misses = 0;
for (n_processed = 0; n_processed < max_batch; n_processed++) {
ofpbuf_use_stub(buf, miss_buf_stubs[n_misses],
sizeof miss_buf_stubs[n_misses]);
- error = dpif_recv(ofproto->dpif, upcall, buf);
+ error = dpif_recv(backer->dpif, upcall, buf);
if (error) {
ofpbuf_uninit(buf);
break;
break;
case SFLOW_UPCALL:
- if (ofproto->sflow) {
- handle_sflow_upcall(ofproto, upcall);
- }
+ handle_sflow_upcall(backer, upcall);
ofpbuf_uninit(buf);
break;
}
/* Handle deferred MISS_UPCALL processing. */
- handle_miss_upcalls(ofproto, misses, n_misses);
+ handle_miss_upcalls(backer, misses, n_misses);
for (i = 0; i < n_misses; i++) {
ofpbuf_uninit(&miss_bufs[i]);
}
/* Flow expiration. */
static int subfacet_max_idle(const struct ofproto_dpif *);
-static void update_stats(struct ofproto_dpif *);
+static void update_stats(struct dpif_backer *);
static void rule_expire(struct rule_dpif *);
static void expire_subfacets(struct ofproto_dpif *, int dp_max_idle);
*
* Returns the number of milliseconds after which it should be called again. */
static int
-expire(struct ofproto_dpif *ofproto)
+expire(struct dpif_backer *backer)
{
- struct rule_dpif *rule, *next_rule;
- struct oftable *table;
- int dp_max_idle;
+ struct ofproto_dpif *ofproto;
+ int max_idle = INT32_MAX;
- /* Update stats for each flow in the datapath. */
- update_stats(ofproto);
+ /* Periodically clear out the drop keys in an effort to keep them
+ * relatively few. */
+ drop_key_clear(backer);
- /* Expire subfacets that have been idle too long. */
- dp_max_idle = subfacet_max_idle(ofproto);
- expire_subfacets(ofproto, dp_max_idle);
+ /* Update stats for each flow in the backer. */
+ update_stats(backer);
- /* Expire OpenFlow flows whose idle_timeout or hard_timeout has passed. */
- OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
- struct cls_cursor cursor;
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ struct rule *rule, *next_rule;
+ int dp_max_idle;
- cls_cursor_init(&cursor, &table->cls, NULL);
- CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
- rule_expire(rule);
+ if (ofproto->backer != backer) {
+ continue;
}
- }
- /* All outstanding data in existing flows has been accounted, so it's a
- * good time to do bond rebalancing. */
- if (ofproto->has_bonded_bundles) {
- struct ofbundle *bundle;
+ /* Keep track of the max number of flows per ofproto_dpif. */
+ update_max_subfacet_count(ofproto);
+
+ /* Expire subfacets that have been idle too long. */
+ dp_max_idle = subfacet_max_idle(ofproto);
+ expire_subfacets(ofproto, dp_max_idle);
+
+ max_idle = MIN(max_idle, dp_max_idle);
+
+ /* Expire OpenFlow flows whose idle_timeout or hard_timeout
+ * has passed. */
+ LIST_FOR_EACH_SAFE (rule, next_rule, expirable,
+ &ofproto->up.expirable) {
+ rule_expire(rule_dpif_cast(rule));
+ }
+
+ /* All outstanding data in existing flows has been accounted, so it's a
+ * good time to do bond rebalancing. */
+ if (ofproto->has_bonded_bundles) {
+ struct ofbundle *bundle;
- HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
- if (bundle->bond) {
- bond_rebalance(bundle->bond, &ofproto->revalidate_set);
+ HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
+ if (bundle->bond) {
+ bond_rebalance(bundle->bond, &backer->revalidate_set);
+ }
}
}
}
- return MIN(dp_max_idle, 1000);
+ return MIN(max_idle, 1000);
}
/* Updates flow table statistics given that the datapath just reported 'stats'
facet_account(facet);
facet->accounted_bytes = facet->byte_count;
}
- facet_push_stats(facet);
}
/* 'key' with length 'key_len' bytes is a flow in 'dpif' that we know nothing
* about, or a flow that shouldn't be installed but was anyway. Delete it. */
static void
-delete_unexpected_flow(struct dpif *dpif,
+delete_unexpected_flow(struct ofproto_dpif *ofproto,
const struct nlattr *key, size_t key_len)
{
if (!VLOG_DROP_WARN(&rl)) {
ds_init(&s);
odp_flow_key_format(key, key_len, &s);
- VLOG_WARN("unexpected flow from datapath %s", ds_cstr(&s));
+ VLOG_WARN("unexpected flow on %s: %s", ofproto->up.name, ds_cstr(&s));
ds_destroy(&s);
}
COVERAGE_INC(facet_unexpected);
- dpif_flow_del(dpif, key, key_len, NULL);
+ dpif_flow_del(ofproto->backer->dpif, key, key_len, NULL);
}
/* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
* avoided by calling update_stats() whenever rules are created or
* deleted. However, the performance impact of making so many calls to the
* datapath do not justify the benefit of having perfectly accurate statistics.
+ *
+ * In addition, this function maintains per ofproto flow hit counts. The patch
+ * port is not treated specially. e.g. A packet ingress from br0 patched into
+ * br1 will increase the hit count of br0 by 1, however, does not affect
+ * the hit or miss counts of br1.
*/
static void
-update_stats(struct ofproto_dpif *p)
+update_stats(struct dpif_backer *backer)
{
const struct dpif_flow_stats *stats;
struct dpif_flow_dump dump;
const struct nlattr *key;
size_t key_len;
- dpif_flow_dump_start(&dump, p->dpif);
+ dpif_flow_dump_start(&dump, backer->dpif);
while (dpif_flow_dump_next(&dump, &key, &key_len, NULL, NULL, &stats)) {
+ struct flow flow;
struct subfacet *subfacet;
+ struct ofproto_dpif *ofproto;
+ struct ofport_dpif *ofport;
+ uint32_t key_hash;
+
+ if (ofproto_receive(backer, NULL, key, key_len, &flow, NULL, &ofproto,
+ NULL, NULL)) {
+ continue;
+ }
- subfacet = subfacet_find(p, key, key_len);
+ ofproto->total_subfacet_count += hmap_count(&ofproto->subfacets);
+ ofproto->n_update_stats++;
+ update_moving_averages(ofproto);
+
+ ofport = get_ofp_port(ofproto, flow.in_port);
+ if (ofport && ofport->tnl_port) {
+ netdev_vport_inc_rx(ofport->up.netdev, stats);
+ }
+
+ key_hash = odp_flow_key_hash(key, key_len);
+ subfacet = subfacet_find(ofproto, key, key_len, key_hash);
switch (subfacet ? subfacet->path : SF_NOT_INSTALLED) {
case SF_FAST_PATH:
+ /* Update ofproto_dpif's hit count. */
+ if (stats->n_packets > subfacet->dp_packet_count) {
+ uint64_t delta = stats->n_packets - subfacet->dp_packet_count;
+ dpif_stats_update_hit_count(ofproto, delta);
+ }
+
update_subfacet_stats(subfacet, stats);
break;
case SF_NOT_INSTALLED:
default:
- delete_unexpected_flow(p->dpif, key, key_len);
+ delete_unexpected_flow(ofproto, key, key_len);
break;
}
+ run_fast_rl();
}
dpif_flow_dump_done(&dump);
}
return bucket * BUCKET_WIDTH;
}
-enum { EXPIRE_MAX_BATCH = 50 };
-
-static void
-expire_batch(struct ofproto_dpif *ofproto, struct subfacet **subfacets, int n)
-{
- struct odputil_keybuf keybufs[EXPIRE_MAX_BATCH];
- struct dpif_op ops[EXPIRE_MAX_BATCH];
- struct dpif_op *opsp[EXPIRE_MAX_BATCH];
- struct ofpbuf keys[EXPIRE_MAX_BATCH];
- struct dpif_flow_stats stats[EXPIRE_MAX_BATCH];
- int i;
-
- for (i = 0; i < n; i++) {
- ops[i].type = DPIF_OP_FLOW_DEL;
- subfacet_get_key(subfacets[i], &keybufs[i], &keys[i]);
- ops[i].u.flow_del.key = keys[i].data;
- ops[i].u.flow_del.key_len = keys[i].size;
- ops[i].u.flow_del.stats = &stats[i];
- opsp[i] = &ops[i];
- }
-
- dpif_operate(ofproto->dpif, opsp, n);
- for (i = 0; i < n; i++) {
- subfacet_reset_dp_stats(subfacets[i], &stats[i]);
- subfacets[i]->path = SF_NOT_INSTALLED;
- subfacet_destroy(subfacets[i]);
- }
-}
-
static void
expire_subfacets(struct ofproto_dpif *ofproto, int dp_max_idle)
{
long long int special_cutoff = time_msec() - 10000;
struct subfacet *subfacet, *next_subfacet;
- struct subfacet *batch[EXPIRE_MAX_BATCH];
+ struct subfacet *batch[SUBFACET_DESTROY_MAX_BATCH];
int n_batch;
n_batch = 0;
if (subfacet->used < cutoff) {
if (subfacet->path != SF_NOT_INSTALLED) {
batch[n_batch++] = subfacet;
- if (n_batch >= EXPIRE_MAX_BATCH) {
- expire_batch(ofproto, batch, n_batch);
+ if (n_batch >= SUBFACET_DESTROY_MAX_BATCH) {
+ subfacet_destroy_batch(ofproto, batch, n_batch);
n_batch = 0;
}
} else {
}
if (n_batch > 0) {
- expire_batch(ofproto, batch, n_batch);
+ subfacet_destroy_batch(ofproto, batch, n_batch);
}
}
netflow_flow_init(&facet->nf_flow);
netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
+ facet->learn_rl = time_msec() + 500;
+
return facet;
}
}
/* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
- * 'packet', which arrived on 'in_port'.
- *
- * Takes ownership of 'packet'. */
+ * 'packet', which arrived on 'in_port'. */
static bool
execute_odp_actions(struct ofproto_dpif *ofproto, const struct flow *flow,
const struct nlattr *odp_actions, size_t actions_len,
int error;
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
- odp_flow_key_from_flow(&key, flow);
+ odp_flow_key_from_flow(&key, flow,
+ ofp_port_to_odp_port(ofproto, flow->in_port));
- error = dpif_execute(ofproto->dpif, key.data, key.size,
+ error = dpif_execute(ofproto->backer->dpif, key.data, key.size,
odp_actions, actions_len, packet);
-
- ofpbuf_delete(packet);
return !error;
}
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
struct subfacet *subfacet, *next_subfacet;
- assert(!list_is_empty(&facet->subfacets));
+ ovs_assert(!list_is_empty(&facet->subfacets));
/* First uninstall all of the subfacets to get final statistics. */
LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
facet_learn(struct facet *facet)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
+ struct subfacet *subfacet= CONTAINER_OF(list_front(&facet->subfacets),
+ struct subfacet, list_node);
+ long long int now = time_msec();
struct action_xlate_ctx ctx;
+ if (!facet->has_fin_timeout && now < facet->learn_rl) {
+ return;
+ }
+
+ facet->learn_rl = now + 500;
+
if (!facet->has_learn
&& !facet->has_normal
&& (!facet->has_fin_timeout
}
action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
- facet->flow.vlan_tci,
+ &subfacet->initial_vals,
facet->rule, facet->tcp_flags, NULL);
ctx.may_learn = true;
xlate_actions_for_side_effects(&ctx, facet->rule->up.ofpacts,
facet_account(struct facet *facet)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
- struct subfacet *subfacet;
+ struct subfacet *subfacet = facet_get_subfacet(facet);
const struct nlattr *a;
unsigned int left;
ovs_be16 vlan_tci;
*
* We use the actions from an arbitrary subfacet because they should all
* be equally valid for our purpose. */
- subfacet = CONTAINER_OF(list_front(&facet->subfacets),
- struct subfacet, list_node);
vlan_tci = facet->flow.vlan_tci;
NL_ATTR_FOR_EACH_UNSAFE (a, left,
subfacet->actions, subfacet->actions_len) {
struct subfacet *subfacet;
LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
- assert(!subfacet->dp_byte_count);
- assert(!subfacet->dp_packet_count);
+ ovs_assert(!subfacet->dp_byte_count);
+ ovs_assert(!subfacet->dp_packet_count);
}
facet_push_stats(facet);
facet = facet_find(ofproto, flow, hash);
if (facet
- && (ofproto->need_revalidate
- || tag_set_intersects(&ofproto->revalidate_set, facet->tags))) {
+ && (ofproto->backer->need_revalidate
+ || tag_set_intersects(&ofproto->backer->revalidate_set,
+ facet->tags))) {
facet_revalidate(facet);
+
+ /* facet_revalidate() may have destroyed 'facet'. */
+ facet = facet_find(ofproto, flow, hash);
}
return facet;
}
+/* Return a subfacet from 'facet'. A facet consists of one or more
+ * subfacets, and this function returns one of them. */
+static struct subfacet *facet_get_subfacet(struct facet *facet)
+{
+ return CONTAINER_OF(list_front(&facet->subfacets), struct subfacet,
+ list_node);
+}
+
static const char *
subfacet_path_to_string(enum subfacet_path path)
{
ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
enum subfacet_path want_path;
- struct odputil_keybuf keybuf;
struct action_xlate_ctx ctx;
- struct ofpbuf key;
struct ds s;
action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
- subfacet->initial_tci, rule, 0, NULL);
+ &subfacet->initial_vals, rule, 0, NULL);
xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len,
&odp_actions);
}
ds_init(&s);
- subfacet_get_key(subfacet, &keybuf, &key);
- odp_flow_key_format(key.data, key.size, &s);
+ odp_flow_key_format(subfacet->key, subfacet->key_len, &s);
ds_put_cstr(&s, ": inconsistency in subfacet");
if (want_path != subfacet->path) {
* 'facet' to the new rule and recompiles its actions.
*
* - If the rule found is the same as 'facet''s current rule, leaves 'facet'
- * where it is and recompiles its actions anyway. */
+ * where it is and recompiles its actions anyway.
+ *
+ * - If any of 'facet''s subfacets correspond to a new flow according to
+ * ofproto_receive(), 'facet' is removed. */
static void
facet_revalidate(struct facet *facet)
{
COVERAGE_INC(facet_revalidate);
+ /* Check that child subfacets still correspond to this facet. Tunnel
+ * configuration changes could cause a subfacet's OpenFlow in_port to
+ * change. */
+ LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
+ struct ofproto_dpif *recv_ofproto;
+ struct flow recv_flow;
+ int error;
+
+ error = ofproto_receive(ofproto->backer, NULL, subfacet->key,
+ subfacet->key_len, &recv_flow, NULL,
+ &recv_ofproto, NULL, NULL);
+ if (error
+ || recv_ofproto != ofproto
+ || memcmp(&recv_flow, &facet->flow, sizeof recv_flow)) {
+ facet_remove(facet);
+ return;
+ }
+ }
+
new_rule = rule_dpif_lookup(ofproto, &facet->flow);
/* Calculate new datapath actions.
enum slow_path_reason slow;
action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
- subfacet->initial_tci, new_rule, 0, NULL);
+ &subfacet->initial_vals, new_rule, 0, NULL);
xlate_actions(&ctx, new_rule->up.ofpacts, new_rule->up.ofpacts_len,
&odp_actions);
{
struct dpif_flow_stats stats;
- assert(facet->packet_count >= facet->prev_packet_count);
- assert(facet->byte_count >= facet->prev_byte_count);
- assert(facet->used >= facet->prev_used);
+ ovs_assert(facet->packet_count >= facet->prev_packet_count);
+ ovs_assert(facet->byte_count >= facet->prev_byte_count);
+ ovs_assert(facet->used >= facet->prev_used);
stats.n_packets = facet->packet_count - facet->prev_packet_count;
stats.n_bytes = facet->byte_count - facet->prev_byte_count;
facet->prev_byte_count = facet->byte_count;
facet->prev_used = facet->used;
- flow_push_stats(facet->rule, &facet->flow, &stats);
+ flow_push_stats(facet, &stats);
update_mirror_stats(ofproto_dpif_cast(facet->rule->up.ofproto),
facet->mirrors, stats.n_packets, stats.n_bytes);
}
}
+static void
+push_all_stats(void)
+{
+ static long long int rl = LLONG_MIN;
+ struct ofproto_dpif *ofproto;
+
+ if (time_msec() < rl) {
+ return;
+ }
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ struct facet *facet;
+
+ HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
+ facet_push_stats(facet);
+ run_fast_rl();
+ }
+ }
+
+ rl = time_msec() + 100;
+}
+
static void
rule_credit_stats(struct rule_dpif *rule, const struct dpif_flow_stats *stats)
{
ofproto_rule_update_used(&rule->up, stats->used);
}
-/* Pushes flow statistics to the rules which 'flow' resubmits into given
- * 'rule''s actions and mirrors. */
+/* Pushes flow statistics to the rules which 'facet->flow' resubmits
+ * into given 'facet->rule''s actions and mirrors. */
static void
-flow_push_stats(struct rule_dpif *rule,
- const struct flow *flow, const struct dpif_flow_stats *stats)
+flow_push_stats(struct facet *facet, const struct dpif_flow_stats *stats)
{
+ struct rule_dpif *rule = facet->rule;
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
+ struct subfacet *subfacet = facet_get_subfacet(facet);
struct action_xlate_ctx ctx;
ofproto_rule_update_used(&rule->up, stats->used);
- action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci, rule,
- 0, NULL);
+ action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
+ &subfacet->initial_vals, rule, 0, NULL);
ctx.resubmit_stats = stats;
xlate_actions_for_side_effects(&ctx, rule->up.ofpacts,
rule->up.ofpacts_len);
/* Subfacets. */
static struct subfacet *
-subfacet_find__(struct ofproto_dpif *ofproto,
- const struct nlattr *key, size_t key_len, uint32_t key_hash,
- const struct flow *flow)
+subfacet_find(struct ofproto_dpif *ofproto,
+ const struct nlattr *key, size_t key_len, uint32_t key_hash)
{
struct subfacet *subfacet;
HMAP_FOR_EACH_WITH_HASH (subfacet, hmap_node, key_hash,
&ofproto->subfacets) {
- if (subfacet->key
- ? (subfacet->key_len == key_len
- && !memcmp(key, subfacet->key, key_len))
- : flow_equal(flow, &subfacet->facet->flow)) {
+ if (subfacet->key_len == key_len
+ && !memcmp(key, subfacet->key, key_len)) {
return subfacet;
}
}
}
/* Searches 'facet' (within 'ofproto') for a subfacet with the specified
- * 'key_fitness', 'key', and 'key_len'. Returns the existing subfacet if
- * there is one, otherwise creates and returns a new subfacet.
+ * 'key_fitness', 'key', and 'key_len' members in 'miss'. Returns the
+ * existing subfacet if there is one, otherwise creates and returns a
+ * new subfacet.
*
* If the returned subfacet is new, then subfacet->actions will be NULL, in
* which case the caller must populate the actions with
* subfacet_make_actions(). */
static struct subfacet *
-subfacet_create(struct facet *facet, enum odp_key_fitness key_fitness,
- const struct nlattr *key, size_t key_len,
- ovs_be16 initial_tci, long long int now)
+subfacet_create(struct facet *facet, struct flow_miss *miss,
+ long long int now)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
- uint32_t key_hash = odp_flow_key_hash(key, key_len);
+ enum odp_key_fitness key_fitness = miss->key_fitness;
+ const struct nlattr *key = miss->key;
+ size_t key_len = miss->key_len;
+ uint32_t key_hash;
struct subfacet *subfacet;
+ key_hash = odp_flow_key_hash(key, key_len);
+
if (list_is_empty(&facet->subfacets)) {
subfacet = &facet->one_subfacet;
} else {
- subfacet = subfacet_find__(ofproto, key, key_len, key_hash,
- &facet->flow);
+ subfacet = subfacet_find(ofproto, key, key_len, key_hash);
if (subfacet) {
if (subfacet->facet == facet) {
return subfacet;
list_push_back(&facet->subfacets, &subfacet->list_node);
subfacet->facet = facet;
subfacet->key_fitness = key_fitness;
- if (key_fitness != ODP_FIT_PERFECT) {
- subfacet->key = xmemdup(key, key_len);
- subfacet->key_len = key_len;
- } else {
- subfacet->key = NULL;
- subfacet->key_len = 0;
- }
+ subfacet->key = xmemdup(key, key_len);
+ subfacet->key_len = key_len;
subfacet->used = now;
+ subfacet->created = now;
subfacet->dp_packet_count = 0;
subfacet->dp_byte_count = 0;
subfacet->actions_len = 0;
? SLOW_MATCH
: 0);
subfacet->path = SF_NOT_INSTALLED;
- subfacet->initial_tci = initial_tci;
+ subfacet->initial_vals = miss->initial_vals;
+ subfacet->odp_in_port = miss->odp_in_port;
+ ofproto->subfacet_add_count++;
return subfacet;
}
-/* Searches 'ofproto' for a subfacet with the given 'key', 'key_len', and
- * 'flow'. Returns the subfacet if one exists, otherwise NULL. */
-static struct subfacet *
-subfacet_find(struct ofproto_dpif *ofproto,
- const struct nlattr *key, size_t key_len)
-{
- uint32_t key_hash = odp_flow_key_hash(key, key_len);
- enum odp_key_fitness fitness;
- struct flow flow;
-
- fitness = odp_flow_key_to_flow(key, key_len, &flow);
- if (fitness == ODP_FIT_ERROR) {
- return NULL;
- }
-
- return subfacet_find__(ofproto, key, key_len, key_hash, &flow);
-}
-
/* Uninstalls 'subfacet' from the datapath, if it is installed, removes it from
* its facet within 'ofproto', and frees it. */
static void
struct facet *facet = subfacet->facet;
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
+ /* Update ofproto stats before uninstall the subfacet. */
+ ofproto->subfacet_del_count++;
+ ofproto->total_subfacet_life_span += (time_msec() - subfacet->created);
+
subfacet_uninstall(subfacet);
hmap_remove(&ofproto->subfacets, &subfacet->hmap_node);
list_remove(&subfacet->list_node);
}
}
-/* Initializes 'key' with the sequence of OVS_KEY_ATTR_* Netlink attributes
- * that can be used to refer to 'subfacet'. The caller must provide 'keybuf'
- * for use as temporary storage. */
static void
-subfacet_get_key(struct subfacet *subfacet, struct odputil_keybuf *keybuf,
- struct ofpbuf *key)
+subfacet_destroy_batch(struct ofproto_dpif *ofproto,
+ struct subfacet **subfacets, int n)
{
- if (!subfacet->key) {
- ofpbuf_use_stack(key, keybuf, sizeof *keybuf);
- odp_flow_key_from_flow(key, &subfacet->facet->flow);
- } else {
- ofpbuf_use_const(key, subfacet->key, subfacet->key_len);
+ struct dpif_op ops[SUBFACET_DESTROY_MAX_BATCH];
+ struct dpif_op *opsp[SUBFACET_DESTROY_MAX_BATCH];
+ struct dpif_flow_stats stats[SUBFACET_DESTROY_MAX_BATCH];
+ int i;
+
+ for (i = 0; i < n; i++) {
+ ops[i].type = DPIF_OP_FLOW_DEL;
+ ops[i].u.flow_del.key = subfacets[i]->key;
+ ops[i].u.flow_del.key_len = subfacets[i]->key_len;
+ ops[i].u.flow_del.stats = &stats[i];
+ opsp[i] = &ops[i];
+ }
+
+ dpif_operate(ofproto->backer->dpif, opsp, n);
+ for (i = 0; i < n; i++) {
+ subfacet_reset_dp_stats(subfacets[i], &stats[i]);
+ subfacets[i]->path = SF_NOT_INSTALLED;
+ subfacet_destroy(subfacets[i]);
+ run_fast_rl();
}
}
struct action_xlate_ctx ctx;
- action_xlate_ctx_init(&ctx, ofproto, &facet->flow, subfacet->initial_tci,
- rule, 0, packet);
+ action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
+ &subfacet->initial_vals, rule, 0, packet);
xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, odp_actions);
facet->tags = ctx.tags;
facet->has_learn = ctx.has_learn;
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
enum subfacet_path path = subfacet_want_path(slow);
uint64_t slow_path_stub[128 / 8];
- struct odputil_keybuf keybuf;
enum dpif_flow_put_flags flags;
- struct ofpbuf key;
int ret;
flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
&actions, &actions_len);
}
- subfacet_get_key(subfacet, &keybuf, &key);
- ret = dpif_flow_put(ofproto->dpif, flags, key.data, key.size,
- actions, actions_len, stats);
+ ret = dpif_flow_put(ofproto->backer->dpif, flags, subfacet->key,
+ subfacet->key_len, actions, actions_len, stats);
if (stats) {
subfacet_reset_dp_stats(subfacet, stats);
if (subfacet->path != SF_NOT_INSTALLED) {
struct rule_dpif *rule = subfacet->facet->rule;
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
- struct odputil_keybuf keybuf;
struct dpif_flow_stats stats;
- struct ofpbuf key;
int error;
- subfacet_get_key(subfacet, &keybuf, &key);
- error = dpif_flow_del(ofproto->dpif, key.data, key.size, &stats);
+ error = dpif_flow_del(ofproto->backer->dpif, subfacet->key,
+ subfacet->key_len, &stats);
subfacet_reset_dp_stats(subfacet, &stats);
if (!error) {
subfacet_update_stats(subfacet, &stats);
}
subfacet->path = SF_NOT_INSTALLED;
} else {
- assert(subfacet->dp_packet_count == 0);
- assert(subfacet->dp_byte_count == 0);
+ ovs_assert(subfacet->dp_packet_count == 0);
+ ovs_assert(subfacet->dp_byte_count == 0);
}
}
facet->packet_count += stats->n_packets;
facet->byte_count += stats->n_bytes;
facet->tcp_flags |= stats->tcp_flags;
- facet_push_stats(facet);
netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags);
}
}
struct rule_dpif *rule = rule_dpif_cast(rule_);
struct facet *facet;
+ push_all_stats();
+
/* Start from historical data for 'rule' itself that are no longer tracked
* in facets. This counts, for example, facets that have expired. */
*packets = rule->packet_count;
}
}
-static enum ofperr
-rule_execute(struct rule *rule_, const struct flow *flow,
- struct ofpbuf *packet)
+static void
+rule_dpif_execute(struct rule_dpif *rule, const struct flow *flow,
+ struct ofpbuf *packet)
{
- struct rule_dpif *rule = rule_dpif_cast(rule_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
-
+ struct initial_vals initial_vals;
struct dpif_flow_stats stats;
-
struct action_xlate_ctx ctx;
uint64_t odp_actions_stub[1024 / 8];
struct ofpbuf odp_actions;
dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
rule_credit_stats(rule, &stats);
+ initial_vals.vlan_tci = flow->vlan_tci;
+ initial_vals.tunnel_ip_tos = flow->tunnel.ip_tos;
ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
- action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci,
+ action_xlate_ctx_init(&ctx, ofproto, flow, &initial_vals,
rule, stats.tcp_flags, packet);
ctx.resubmit_stats = &stats;
xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, &odp_actions);
odp_actions.size, packet);
ofpbuf_uninit(&odp_actions);
+}
+static enum ofperr
+rule_execute(struct rule *rule, const struct flow *flow,
+ struct ofpbuf *packet)
+{
+ rule_dpif_execute(rule_dpif_cast(rule), flow, packet);
+ ofpbuf_delete(packet);
return 0;
}
send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet)
{
const struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+ uint64_t odp_actions_stub[1024 / 8];
struct ofpbuf key, odp_actions;
struct odputil_keybuf keybuf;
- uint16_t odp_port;
+ uint32_t odp_port;
struct flow flow;
int error;
- flow_extract(packet, 0, NULL, 0, &flow);
- odp_port = vsp_realdev_to_vlandev(ofproto, ofport->odp_port,
- flow.vlan_tci);
- if (odp_port != ofport->odp_port) {
- eth_pop_vlan(packet);
- flow.vlan_tci = htons(0);
+ flow_extract(packet, 0, 0, NULL, OFPP_LOCAL, &flow);
+ if (netdev_vport_is_patch(ofport->up.netdev)) {
+ struct ofproto_dpif *peer_ofproto;
+ struct dpif_flow_stats stats;
+ struct ofport_dpif *peer;
+ struct rule_dpif *rule;
+
+ peer = ofport_get_peer(ofport);
+ if (!peer) {
+ return ENODEV;
+ }
+
+ dpif_flow_stats_extract(&flow, packet, time_msec(), &stats);
+ netdev_vport_inc_tx(ofport->up.netdev, &stats);
+ netdev_vport_inc_rx(peer->up.netdev, &stats);
+
+ flow.in_port = peer->up.ofp_port;
+ peer_ofproto = ofproto_dpif_cast(peer->up.ofproto);
+ rule = rule_dpif_lookup(peer_ofproto, &flow);
+ rule_dpif_execute(rule, &flow, packet);
+
+ return 0;
+ }
+
+ ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
+
+ if (ofport->tnl_port) {
+ struct dpif_flow_stats stats;
+
+ odp_port = tnl_port_send(ofport->tnl_port, &flow);
+ if (odp_port == OVSP_NONE) {
+ return ENODEV;
+ }
+
+ dpif_flow_stats_extract(&flow, packet, time_msec(), &stats);
+ netdev_vport_inc_tx(ofport->up.netdev, &stats);
+ odp_put_tunnel_action(&flow.tunnel, &odp_actions);
+ odp_put_skb_mark_action(flow.skb_mark, &odp_actions);
+ } else {
+ odp_port = vsp_realdev_to_vlandev(ofproto, ofport->odp_port,
+ flow.vlan_tci);
+ if (odp_port != ofport->odp_port) {
+ eth_pop_vlan(packet);
+ flow.vlan_tci = htons(0);
+ }
}
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
- odp_flow_key_from_flow(&key, &flow);
+ odp_flow_key_from_flow(&key, &flow,
+ ofp_port_to_odp_port(ofproto, flow.in_port));
- ofpbuf_init(&odp_actions, 32);
compose_sflow_action(ofproto, &odp_actions, &flow, odp_port);
nl_msg_put_u32(&odp_actions, OVS_ACTION_ATTR_OUTPUT, odp_port);
- error = dpif_execute(ofproto->dpif,
+ error = dpif_execute(ofproto->backer->dpif,
key.data, key.size,
odp_actions.data, odp_actions.size,
packet);
\f
/* OpenFlow to datapath action translation. */
+static bool may_receive(const struct ofport_dpif *, struct action_xlate_ctx *);
static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
struct action_xlate_ctx *);
static void xlate_normal(struct action_xlate_ctx *);
ofpbuf_use_stack(&buf, stub, stub_size);
if (slow & (SLOW_CFM | SLOW_LACP | SLOW_STP)) {
- uint32_t pid = dpif_port_get_pid(ofproto->dpif, UINT16_MAX);
- odp_put_userspace_action(pid, &cookie, &buf);
+ uint32_t pid = dpif_port_get_pid(ofproto->backer->dpif, UINT32_MAX);
+ odp_put_userspace_action(pid, &cookie, sizeof cookie, &buf);
} else {
put_userspace_action(ofproto, &buf, flow, &cookie);
}
{
uint32_t pid;
- pid = dpif_port_get_pid(ofproto->dpif,
- ofp_port_to_odp_port(flow->in_port));
+ pid = dpif_port_get_pid(ofproto->backer->dpif,
+ ofp_port_to_odp_port(ofproto, flow->in_port));
- return odp_put_userspace_action(pid, cookie, odp_actions);
+ return odp_put_userspace_action(pid, cookie, sizeof *cookie, odp_actions);
}
static void
cookie = ofpbuf_at(ctx->odp_actions, ctx->user_cookie_offset,
sizeof(*cookie));
- assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
+ ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
compose_sflow_cookie(ctx->ofproto, base->vlan_tci,
ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
bool check_stp)
{
const struct ofport_dpif *ofport = get_ofp_port(ctx->ofproto, ofp_port);
- uint16_t odp_port = ofp_port_to_odp_port(ofp_port);
ovs_be16 flow_vlan_tci = ctx->flow.vlan_tci;
+ ovs_be64 flow_tun_id = ctx->flow.tunnel.tun_id;
uint8_t flow_nw_tos = ctx->flow.nw_tos;
- uint16_t out_port;
+ struct priority_to_dscp *pdscp;
+ uint32_t out_port, odp_port;
- if (ofport) {
- struct priority_to_dscp *pdscp;
+ /* If 'struct flow' gets additional metadata, we'll need to zero it out
+ * before traversing a patch port. */
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 20);
+
+ if (!ofport) {
+ xlate_report(ctx, "Nonexistent output port");
+ return;
+ } else if (ofport->up.pp.config & OFPUTIL_PC_NO_FWD) {
+ xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
+ return;
+ } else if (check_stp && !stp_forward_in_state(ofport->stp_state)) {
+ xlate_report(ctx, "STP not in forwarding state, skipping output");
+ return;
+ }
+
+ if (netdev_vport_is_patch(ofport->up.netdev)) {
+ struct ofport_dpif *peer = ofport_get_peer(ofport);
+ struct flow old_flow = ctx->flow;
+ const struct ofproto_dpif *peer_ofproto;
+ enum slow_path_reason special;
+ struct ofport_dpif *in_port;
- if (ofport->up.pp.config & OFPUTIL_PC_NO_FWD) {
- xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
+ if (!peer) {
+ xlate_report(ctx, "Nonexistent patch port peer");
return;
- } else if (check_stp && !stp_forward_in_state(ofport->stp_state)) {
- xlate_report(ctx, "STP not in forwarding state, skipping output");
+ }
+
+ peer_ofproto = ofproto_dpif_cast(peer->up.ofproto);
+ if (peer_ofproto->backer != ctx->ofproto->backer) {
+ xlate_report(ctx, "Patch port peer on a different datapath");
return;
}
- pdscp = get_priority(ofport, ctx->flow.skb_priority);
- if (pdscp) {
- ctx->flow.nw_tos &= ~IP_DSCP_MASK;
- ctx->flow.nw_tos |= pdscp->dscp;
+ ctx->ofproto = ofproto_dpif_cast(peer->up.ofproto);
+ ctx->flow.in_port = peer->up.ofp_port;
+ ctx->flow.metadata = htonll(0);
+ memset(&ctx->flow.tunnel, 0, sizeof ctx->flow.tunnel);
+ memset(ctx->flow.regs, 0, sizeof ctx->flow.regs);
+
+ in_port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
+ special = process_special(ctx->ofproto, &ctx->flow, in_port,
+ ctx->packet);
+ if (special) {
+ ctx->slow |= special;
+ } else if (!in_port || may_receive(in_port, ctx)) {
+ if (!in_port || stp_forward_in_state(in_port->stp_state)) {
+ xlate_table_action(ctx, ctx->flow.in_port, 0, true);
+ } else {
+ /* Forwarding is disabled by STP. Let OFPP_NORMAL and the
+ * learning action look at the packet, then drop it. */
+ struct flow old_base_flow = ctx->base_flow;
+ size_t old_size = ctx->odp_actions->size;
+ xlate_table_action(ctx, ctx->flow.in_port, 0, true);
+ ctx->base_flow = old_base_flow;
+ ctx->odp_actions->size = old_size;
+ }
}
- } else {
- /* We may not have an ofport record for this port, but it doesn't hurt
- * to allow forwarding to it anyhow. Maybe such a port will appear
- * later and we're pre-populating the flow table. */
+
+ ctx->flow = old_flow;
+ ctx->ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+
+ if (ctx->resubmit_stats) {
+ netdev_vport_inc_tx(ofport->up.netdev, ctx->resubmit_stats);
+ netdev_vport_inc_rx(peer->up.netdev, ctx->resubmit_stats);
+ }
+
+ return;
+ }
+
+ pdscp = get_priority(ofport, ctx->flow.skb_priority);
+ if (pdscp) {
+ ctx->flow.nw_tos &= ~IP_DSCP_MASK;
+ ctx->flow.nw_tos |= pdscp->dscp;
}
- out_port = vsp_realdev_to_vlandev(ctx->ofproto, odp_port,
- ctx->flow.vlan_tci);
- if (out_port != odp_port) {
- ctx->flow.vlan_tci = htons(0);
+ if (ofport->tnl_port) {
+ odp_port = tnl_port_send(ofport->tnl_port, &ctx->flow);
+ if (odp_port == OVSP_NONE) {
+ xlate_report(ctx, "Tunneling decided against output");
+ return;
+ }
+
+ if (ctx->resubmit_stats) {
+ netdev_vport_inc_tx(ofport->up.netdev, ctx->resubmit_stats);
+ }
+ out_port = odp_port;
+ commit_odp_tunnel_action(&ctx->flow, &ctx->base_flow,
+ ctx->odp_actions);
+ } else {
+ odp_port = ofport->odp_port;
+ out_port = vsp_realdev_to_vlandev(ctx->ofproto, odp_port,
+ ctx->flow.vlan_tci);
+ if (out_port != odp_port) {
+ ctx->flow.vlan_tci = htons(0);
+ }
+ ctx->flow.skb_mark &= ~IPSEC_MARK;
}
commit_odp_actions(&ctx->flow, &ctx->base_flow, ctx->odp_actions);
nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT, out_port);
ctx->sflow_odp_port = odp_port;
ctx->sflow_n_outputs++;
ctx->nf_output_iface = ofp_port;
+ ctx->flow.tunnel.tun_id = flow_tun_id;
ctx->flow.vlan_tci = flow_vlan_tci;
ctx->flow.nw_tos = flow_nw_tos;
}
compose_output_action__(ctx, ofp_port, true);
}
+static void
+tag_the_flow(struct action_xlate_ctx *ctx, struct rule_dpif *rule)
+{
+ struct ofproto_dpif *ofproto = ctx->ofproto;
+ uint8_t table_id = ctx->table_id;
+
+ if (table_id > 0 && table_id < N_TABLES) {
+ struct table_dpif *table = &ofproto->tables[table_id];
+ if (table->other_table) {
+ ctx->tags |= (rule && rule->tag
+ ? rule->tag
+ : rule_calculate_tag(&ctx->flow,
+ &table->other_table->mask,
+ table->basis));
+ }
+ }
+}
+
+/* Common rule processing in one place to avoid duplicating code. */
+static struct rule_dpif *
+ctx_rule_hooks(struct action_xlate_ctx *ctx, struct rule_dpif *rule,
+ bool may_packet_in)
+{
+ if (ctx->resubmit_hook) {
+ ctx->resubmit_hook(ctx, rule);
+ }
+ if (rule == NULL && may_packet_in) {
+ /* XXX
+ * check if table configuration flags
+ * OFPTC_TABLE_MISS_CONTROLLER, default.
+ * OFPTC_TABLE_MISS_CONTINUE,
+ * OFPTC_TABLE_MISS_DROP
+ * When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do?
+ */
+ rule = rule_dpif_miss_rule(ctx->ofproto, &ctx->flow);
+ }
+ if (rule && ctx->resubmit_stats) {
+ rule_credit_stats(rule, ctx->resubmit_stats);
+ }
+ return rule;
+}
+
static void
xlate_table_action(struct action_xlate_ctx *ctx,
uint16_t in_port, uint8_t table_id, bool may_packet_in)
{
if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
- struct ofproto_dpif *ofproto = ctx->ofproto;
struct rule_dpif *rule;
- uint16_t old_in_port;
- uint8_t old_table_id;
+ uint16_t old_in_port = ctx->flow.in_port;
+ uint8_t old_table_id = ctx->table_id;
- old_table_id = ctx->table_id;
ctx->table_id = table_id;
/* Look up a flow with 'in_port' as the input port. */
- old_in_port = ctx->flow.in_port;
ctx->flow.in_port = in_port;
- rule = rule_dpif_lookup__(ofproto, &ctx->flow, table_id);
-
- /* Tag the flow. */
- if (table_id > 0 && table_id < N_TABLES) {
- struct table_dpif *table = &ofproto->tables[table_id];
- if (table->other_table) {
- ctx->tags |= (rule && rule->tag
- ? rule->tag
- : rule_calculate_tag(&ctx->flow,
- &table->other_table->mask,
- table->basis));
- }
- }
+ rule = rule_dpif_lookup__(ctx->ofproto, &ctx->flow, table_id);
+
+ tag_the_flow(ctx, rule);
/* Restore the original input port. Otherwise OFPP_NORMAL and
* OFPP_IN_PORT will have surprising behavior. */
ctx->flow.in_port = old_in_port;
- if (ctx->resubmit_hook) {
- ctx->resubmit_hook(ctx, rule);
- }
-
- if (rule == NULL && may_packet_in) {
- /* TODO:XXX
- * check if table configuration flags
- * OFPTC_TABLE_MISS_CONTROLLER, default.
- * OFPTC_TABLE_MISS_CONTINUE,
- * OFPTC_TABLE_MISS_DROP
- * When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do?
- */
- rule = rule_dpif_miss_rule(ofproto, &ctx->flow);
- }
+ rule = ctx_rule_hooks(ctx, rule, may_packet_in);
if (rule) {
struct rule_dpif *old_rule = ctx->rule;
- if (ctx->resubmit_stats) {
- rule_credit_stats(rule, ctx->resubmit_stats);
- }
-
ctx->recurse++;
ctx->rule = rule;
do_xlate_actions(rule->up.ofpacts, rule->up.ofpacts_len, ctx);
if (packet->l2 && packet->l3) {
struct eth_header *eh;
+ uint16_t mpls_depth;
eth_pop_vlan(packet);
eh = packet->l2;
- /* If the Ethernet type is less than ETH_TYPE_MIN, it's likely an 802.2
- * LLC frame. Calculating the Ethernet type of these frames is more
- * trouble than seems appropriate for a simple assertion. */
- assert(ntohs(eh->eth_type) < ETH_TYPE_MIN
- || eh->eth_type == ctx->flow.dl_type);
-
memcpy(eh->eth_src, ctx->flow.dl_src, sizeof eh->eth_src);
memcpy(eh->eth_dst, ctx->flow.dl_dst, sizeof eh->eth_dst);
eth_push_vlan(packet, ctx->flow.vlan_tci);
}
+ mpls_depth = eth_mpls_depth(packet);
+
+ if (mpls_depth < ctx->flow.mpls_depth) {
+ push_mpls(packet, ctx->flow.dl_type, ctx->flow.mpls_lse);
+ } else if (mpls_depth > ctx->flow.mpls_depth) {
+ pop_mpls(packet, ctx->flow.dl_type);
+ } else if (mpls_depth) {
+ set_mpls_lse(packet, ctx->flow.mpls_lse);
+ }
+
if (packet->l4) {
if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
packet_set_ipv4(packet, ctx->flow.nw_src, ctx->flow.nw_dst,
pin.send_len = len;
flow_get_metadata(&ctx->flow, &pin.fmd);
- connmgr_send_packet_in(ctx->ofproto->up.connmgr, &pin);
- ofpbuf_delete(packet);
+ connmgr_send_packet_in(ctx->ofproto->up.connmgr, &pin);
+ ofpbuf_delete(packet);
+}
+
+static void
+execute_mpls_push_action(struct action_xlate_ctx *ctx, ovs_be16 eth_type)
+{
+ ovs_assert(eth_type_mpls(eth_type));
+
+ if (ctx->base_flow.mpls_depth) {
+ ctx->flow.mpls_lse &= ~htonl(MPLS_BOS_MASK);
+ ctx->flow.mpls_depth++;
+ } else {
+ ovs_be32 label;
+ uint8_t tc, ttl;
+
+ if (ctx->flow.dl_type == htons(ETH_TYPE_IPV6)) {
+ label = htonl(0x2); /* IPV6 Explicit Null. */
+ } else {
+ label = htonl(0x0); /* IPV4 Explicit Null. */
+ }
+ tc = (ctx->flow.nw_tos & IP_DSCP_MASK) >> 2;
+ ttl = ctx->flow.nw_ttl ? ctx->flow.nw_ttl : 0x40;
+ ctx->flow.mpls_lse = set_mpls_lse_values(ttl, tc, 1, label);
+ ctx->flow.mpls_depth = 1;
+ }
+ ctx->flow.dl_type = eth_type;
+}
+
+static void
+execute_mpls_pop_action(struct action_xlate_ctx *ctx, ovs_be16 eth_type)
+{
+ ovs_assert(eth_type_mpls(ctx->flow.dl_type));
+ ovs_assert(!eth_type_mpls(eth_type));
+
+ if (ctx->flow.mpls_depth) {
+ ctx->flow.mpls_depth--;
+ ctx->flow.mpls_lse = htonl(0);
+ if (!ctx->flow.mpls_depth) {
+ ctx->flow.dl_type = eth_type;
+ }
+ }
}
static bool
}
}
+static bool
+execute_set_mpls_ttl_action(struct action_xlate_ctx *ctx, uint8_t ttl)
+{
+ if (!eth_type_mpls(ctx->flow.dl_type)) {
+ return true;
+ }
+
+ set_mpls_lse_ttl(&ctx->flow.mpls_lse, ttl);
+ return false;
+}
+
+static bool
+execute_dec_mpls_ttl_action(struct action_xlate_ctx *ctx)
+{
+ uint8_t ttl = mpls_lse_to_ttl(ctx->flow.mpls_lse);
+
+ if (!eth_type_mpls(ctx->flow.dl_type)) {
+ return false;
+ }
+
+ if (ttl > 1) {
+ ttl--;
+ set_mpls_lse_ttl(&ctx->flow.mpls_lse, ttl);
+ return false;
+ } else {
+ execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
+
+ /* Stop processing for current table. */
+ return true;
+ }
+}
+
static void
xlate_output_action(struct action_xlate_ctx *ctx,
uint16_t port, uint16_t max_len, bool may_packet_in)
int error;
/* Translate queue to priority. */
- error = dpif_queue_to_priority(ctx->ofproto->dpif, queue_id, &priority);
+ error = dpif_queue_to_priority(ctx->ofproto->backer->dpif,
+ queue_id, &priority);
if (error) {
/* Fall back to ordinary output action. */
xlate_output_action(ctx, enqueue->port, 0, false);
{
uint32_t skb_priority;
- if (!dpif_queue_to_priority(ctx->ofproto->dpif, queue_id, &skb_priority)) {
+ if (!dpif_queue_to_priority(ctx->ofproto->backer->dpif,
+ queue_id, &skb_priority)) {
ctx->flow.skb_priority = skb_priority;
} else {
/* Couldn't translate queue to a priority. Nothing to do. A warning
ovs_be64 tun_id;
};
-static void
-xlate_autopath(struct action_xlate_ctx *ctx,
- const struct ofpact_autopath *ap)
-{
- uint16_t ofp_port = ap->port;
- struct ofport_dpif *port = get_ofp_port(ctx->ofproto, ofp_port);
-
- if (!port || !port->bundle) {
- ofp_port = OFPP_NONE;
- } else if (port->bundle->bond) {
- /* Autopath does not support VLAN hashing. */
- struct ofport_dpif *slave = bond_choose_output_slave(
- port->bundle->bond, &ctx->flow, 0, &ctx->tags);
- if (slave) {
- ofp_port = slave->up.ofp_port;
- }
- }
- nxm_reg_load(&ap->dst, ofp_port, &ctx->flow);
-}
-
static bool
slave_enabled_cb(uint16_t ofp_port, void *ofproto_)
{
return true;
}
+static bool
+tunnel_ecn_ok(struct action_xlate_ctx *ctx)
+{
+ if (is_ip_any(&ctx->base_flow)
+ && (ctx->base_flow.tunnel.ip_tos & IP_ECN_MASK) == IP_ECN_CE) {
+ if ((ctx->base_flow.nw_tos & IP_ECN_MASK) == IP_ECN_NOT_ECT) {
+ VLOG_WARN_RL(&rl, "dropping tunnel packet marked ECN CE"
+ " but is not ECN capable");
+ return false;
+ } else {
+ /* Set the ECN CE value in the tunneled packet. */
+ ctx->flow.nw_tos |= IP_ECN_CE;
+ }
+ }
+
+ return true;
+}
+
static void
do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
struct action_xlate_ctx *ctx)
{
- const struct ofport_dpif *port;
bool was_evictable = true;
const struct ofpact *a;
- port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
- if (port && !may_receive(port, ctx)) {
- /* Drop this flow. */
- return;
- }
-
if (ctx->rule) {
/* Don't let the rule we're working on get evicted underneath us. */
was_evictable = ctx->rule->up.evictable;
ctx->rule->up.evictable = false;
}
+
+ do_xlate_actions_again:
OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
struct ofpact_controller *controller;
const struct ofpact_metadata *metadata;
break;
case OFPACT_PUSH_VLAN:
- /* TODO:XXX 802.1AD(QinQ) */
+ /* XXX 802.1AD(QinQ) */
ctx->flow.vlan_tci = htons(VLAN_CFI);
break;
break;
case OFPACT_SET_IPV4_SRC:
- ctx->flow.nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
+ if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
+ ctx->flow.nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
+ }
break;
case OFPACT_SET_IPV4_DST:
- ctx->flow.nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
+ if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
+ ctx->flow.nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
+ }
break;
case OFPACT_SET_IPV4_DSCP:
break;
case OFPACT_SET_L4_SRC_PORT:
- ctx->flow.tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
+ if (is_ip_any(&ctx->flow)) {
+ ctx->flow.tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
+ }
break;
case OFPACT_SET_L4_DST_PORT:
- ctx->flow.tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
+ if (is_ip_any(&ctx->flow)) {
+ ctx->flow.tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
+ }
break;
case OFPACT_RESUBMIT:
nxm_execute_reg_load(ofpact_get_REG_LOAD(a), &ctx->flow);
break;
+ case OFPACT_STACK_PUSH:
+ nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), &ctx->flow,
+ &ctx->stack);
+ break;
+
+ case OFPACT_STACK_POP:
+ nxm_execute_stack_pop(ofpact_get_STACK_POP(a), &ctx->flow,
+ &ctx->stack);
+ break;
+
+ case OFPACT_PUSH_MPLS:
+ execute_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a)->ethertype);
+ break;
+
+ case OFPACT_POP_MPLS:
+ execute_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
+ break;
+
+ case OFPACT_SET_MPLS_TTL:
+ if (execute_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl)) {
+ goto out;
+ }
+ break;
+
+ case OFPACT_DEC_MPLS_TTL:
+ if (execute_dec_mpls_ttl_action(ctx)) {
+ goto out;
+ }
+ break;
+
case OFPACT_DEC_TTL:
if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
goto out;
multipath_execute(ofpact_get_MULTIPATH(a), &ctx->flow);
break;
- case OFPACT_AUTOPATH:
- xlate_autopath(ctx, ofpact_get_AUTOPATH(a));
- break;
-
case OFPACT_BUNDLE:
ctx->ofproto->has_bundle_action = true;
xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
break;
case OFPACT_CLEAR_ACTIONS:
- /* TODO:XXX
+ /* XXX
* Nothing to do because writa-actions is not supported for now.
* When writa-actions is supported, clear-actions also must
* be supported at the same time.
break;
case OFPACT_GOTO_TABLE: {
- /* TODO:XXX remove recursion */
- /* It is assumed that goto-table is last action */
+ /* It is assumed that goto-table is the last action. */
struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
- assert(ctx->table_id < ogt->table_id);
- xlate_table_action(ctx, ctx->flow.in_port, ogt->table_id, true);
+ struct rule_dpif *rule;
+
+ ovs_assert(ctx->table_id < ogt->table_id);
+
+ ctx->table_id = ogt->table_id;
+
+ /* Look up a flow from the new table. */
+ rule = rule_dpif_lookup__(ctx->ofproto, &ctx->flow, ctx->table_id);
+
+ tag_the_flow(ctx, rule);
+
+ rule = ctx_rule_hooks(ctx, rule, true);
+
+ if (rule) {
+ if (ctx->rule) {
+ ctx->rule->up.evictable = was_evictable;
+ }
+ ctx->rule = rule;
+ was_evictable = rule->up.evictable;
+ rule->up.evictable = false;
+
+ /* Tail recursion removal. */
+ ofpacts = rule->up.ofpacts;
+ ofpacts_len = rule->up.ofpacts_len;
+ goto do_xlate_actions_again;
+ }
break;
}
}
}
out:
- /* We've let OFPP_NORMAL and the learning action look at the packet,
- * so drop it now if forwarding is disabled. */
- if (port && !stp_forward_in_state(port->stp_state)) {
- ofpbuf_clear(ctx->odp_actions);
- add_sflow_action(ctx);
- }
if (ctx->rule) {
ctx->rule->up.evictable = was_evictable;
}
static void
action_xlate_ctx_init(struct action_xlate_ctx *ctx,
struct ofproto_dpif *ofproto, const struct flow *flow,
- ovs_be16 initial_tci, struct rule_dpif *rule,
+ const struct initial_vals *initial_vals,
+ struct rule_dpif *rule,
uint8_t tcp_flags, const struct ofpbuf *packet)
{
+ ovs_be64 initial_tun_id = flow->tunnel.tun_id;
+
+ /* Flow initialization rules:
+ * - 'base_flow' must match the kernel's view of the packet at the
+ * time that action processing starts. 'flow' represents any
+ * transformations we wish to make through actions.
+ * - By default 'base_flow' and 'flow' are the same since the input
+ * packet matches the output before any actions are applied.
+ * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
+ * of the received packet as seen by the kernel. If we later output
+ * to another device without any modifications this will cause us to
+ * insert a new tag since the original one was stripped off by the
+ * VLAN device.
+ * - Tunnel 'flow' is largely cleared when transitioning between
+ * the input and output stages since it does not make sense to output
+ * a packet with the exact headers that it was received with (i.e.
+ * the destination IP is us). The one exception is the tun_id, which
+ * is preserved to allow use in later resubmit lookups and loads into
+ * registers.
+ * - Tunnel 'base_flow' is completely cleared since that is what the
+ * kernel does. If we wish to maintain the original values an action
+ * needs to be generated. */
+
ctx->ofproto = ofproto;
ctx->flow = *flow;
+ memset(&ctx->flow.tunnel, 0, sizeof ctx->flow.tunnel);
ctx->base_flow = ctx->flow;
- memset(&ctx->base_flow.tunnel, 0, sizeof ctx->base_flow.tunnel);
- ctx->base_flow.vlan_tci = initial_tci;
+ ctx->base_flow.vlan_tci = initial_vals->vlan_tci;
+ ctx->base_flow.tunnel.ip_tos = initial_vals->tunnel_ip_tos;
+ ctx->flow.tunnel.tun_id = initial_tun_id;
ctx->rule = rule;
ctx->packet = packet;
ctx->may_learn = packet != NULL;
static bool hit_resubmit_limit;
enum slow_path_reason special;
+ struct ofport_dpif *in_port;
+ struct flow orig_flow;
COVERAGE_INC(ofproto_dpif_xlate);
ctx->table_id = 0;
ctx->exit = false;
+ ofpbuf_use_stub(&ctx->stack, ctx->init_stack, sizeof ctx->init_stack);
+
if (ctx->ofproto->has_mirrors || hit_resubmit_limit) {
/* Do this conditionally because the copy is expensive enough that it
- * shows up in profiles.
- *
- * We keep orig_flow in 'ctx' only because I couldn't make GCC 4.4
- * believe that I wasn't using it without initializing it if I kept it
- * in a local variable. */
- ctx->orig_flow = ctx->flow;
+ * shows up in profiles. */
+ orig_flow = ctx->flow;
}
if (ctx->flow.nw_frag & FLOW_NW_FRAG_ANY) {
}
}
- special = process_special(ctx->ofproto, &ctx->flow, ctx->packet);
+ in_port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
+ special = process_special(ctx->ofproto, &ctx->flow, in_port, ctx->packet);
if (special) {
ctx->slow |= special;
} else {
static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1);
- ovs_be16 initial_tci = ctx->base_flow.vlan_tci;
+ struct initial_vals initial_vals;
+ uint32_t local_odp_port;
+
+ initial_vals.vlan_tci = ctx->base_flow.vlan_tci;
+ initial_vals.tunnel_ip_tos = ctx->base_flow.tunnel.ip_tos;
add_sflow_action(ctx);
- do_xlate_actions(ofpacts, ofpacts_len, ctx);
+
+ if (tunnel_ecn_ok(ctx) && (!in_port || may_receive(in_port, ctx))) {
+ do_xlate_actions(ofpacts, ofpacts_len, ctx);
+
+ /* We've let OFPP_NORMAL and the learning action look at the
+ * packet, so drop it now if forwarding is disabled. */
+ if (in_port && !stp_forward_in_state(in_port->stp_state)) {
+ ofpbuf_clear(ctx->odp_actions);
+ add_sflow_action(ctx);
+ }
+ }
if (ctx->max_resubmit_trigger && !ctx->resubmit_hook) {
if (!hit_resubmit_limit) {
} else if (!VLOG_DROP_ERR(&trace_rl)) {
struct ds ds = DS_EMPTY_INITIALIZER;
- ofproto_trace(ctx->ofproto, &ctx->orig_flow, ctx->packet,
- initial_tci, &ds);
+ ofproto_trace(ctx->ofproto, &orig_flow, ctx->packet,
+ &initial_vals, &ds);
VLOG_ERR("Trace triggered by excessive resubmit "
"recursion:\n%s", ds_cstr(&ds));
ds_destroy(&ds);
}
}
+ local_odp_port = ofp_port_to_odp_port(ctx->ofproto, OFPP_LOCAL);
if (!connmgr_may_set_up_flow(ctx->ofproto->up.connmgr, &ctx->flow,
+ local_odp_port,
ctx->odp_actions->data,
ctx->odp_actions->size)) {
ctx->slow |= SLOW_IN_BAND;
}
}
if (ctx->ofproto->has_mirrors) {
- add_mirror_actions(ctx, &ctx->orig_flow);
+ add_mirror_actions(ctx, &orig_flow);
}
fix_sflow_action(ctx);
}
+
+ ofpbuf_uninit(&ctx->stack);
}
/* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
in_bundle->name, vlan);
mac->port.p = in_bundle;
- tag_set_add(&ofproto->revalidate_set,
+ tag_set_add(&ofproto->backer->revalidate_set,
mac_learning_changed(ofproto->ml, mac));
}
}
if (table->catchall_table != catchall || table->other_table != other) {
table->catchall_table = catchall;
table->other_table = other;
- ofproto->need_revalidate = REV_FLOW_TABLE;
+ ofproto->backer->need_revalidate = REV_FLOW_TABLE;
}
}
table_update_taggable(ofproto, rule->up.table_id);
- if (!ofproto->need_revalidate) {
+ if (!ofproto->backer->need_revalidate) {
struct table_dpif *table = &ofproto->tables[rule->up.table_id];
if (table->other_table && rule->tag) {
- tag_set_add(&ofproto->revalidate_set, rule->tag);
+ tag_set_add(&ofproto->backer->revalidate_set, rule->tag);
} else {
- ofproto->need_revalidate = REV_FLOW_TABLE;
+ ofproto->backer->need_revalidate = REV_FLOW_TABLE;
}
}
}
enum ofp_config_flags frag_handling)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
-
if (frag_handling != OFPC_FRAG_REASM) {
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
return true;
} else {
return false;
const struct ofpact *ofpacts, size_t ofpacts_len)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ struct initial_vals initial_vals;
struct odputil_keybuf keybuf;
struct dpif_flow_stats stats;
struct ofpbuf odp_actions;
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
- odp_flow_key_from_flow(&key, flow);
+ odp_flow_key_from_flow(&key, flow,
+ ofp_port_to_odp_port(ofproto, flow->in_port));
dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
- action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci, NULL,
+ initial_vals.vlan_tci = flow->vlan_tci;
+ initial_vals.tunnel_ip_tos = 0;
+ action_xlate_ctx_init(&ctx, ofproto, flow, &initial_vals, NULL,
packet_get_tcp_flags(packet, flow), packet);
ctx.resubmit_stats = &stats;
ofpbuf_use_stub(&odp_actions,
odp_actions_stub, sizeof odp_actions_stub);
xlate_actions(&ctx, ofpacts, ofpacts_len, &odp_actions);
- dpif_execute(ofproto->dpif, key.data, key.size,
+ dpif_execute(ofproto->backer->dpif, key.data, key.size,
odp_actions.data, odp_actions.size, packet);
ofpbuf_uninit(&odp_actions);
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- dpif_get_netflow_ids(ofproto->dpif, engine_type, engine_id);
+ dpif_get_netflow_ids(ofproto->backer->dpif, engine_type, engine_id);
}
static void
unixctl_command_reply_error(conn, "no such bridge");
return;
}
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
} else {
HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
}
}
struct ofproto_dpif *ofproto;
struct ofpbuf odp_key;
struct ofpbuf *packet;
- ovs_be16 initial_tci;
+ struct initial_vals initial_vals;
struct ds result;
struct flow flow;
char *s;
goto exit;
}
- /* Convert odp_key to flow. */
- error = ofproto_dpif_extract_flow_key(ofproto, odp_key.data,
- odp_key.size, &flow,
- &initial_tci, NULL);
- if (error == ODP_FIT_ERROR) {
+ /* The user might have specified the wrong ofproto but within the
+ * same backer. That's OK, ofproto_receive() can find the right
+ * one for us. */
+ if (ofproto_receive(ofproto->backer, NULL, odp_key.data,
+ odp_key.size, &flow, NULL, &ofproto, NULL,
+ &initial_vals)) {
unixctl_command_reply_error(conn, "Invalid flow");
goto exit;
}
+ ds_put_format(&result, "Bridge: %s\n", ofproto->up.name);
} else {
char *error_s;
goto exit;
}
- initial_tci = flow.vlan_tci;
- vsp_adjust_flow(ofproto, &flow);
+ initial_vals.vlan_tci = flow.vlan_tci;
+ initial_vals.tunnel_ip_tos = flow.tunnel.ip_tos;
}
/* Generate a packet, if requested. */
packet = ofpbuf_new(0);
flow_compose(packet, &flow);
}
- } else if (argc == 6) {
- /* ofproto/trace dpname priority tun_id in_port packet */
+ } else if (argc == 7) {
+ /* ofproto/trace dpname priority tun_id in_port mark packet */
const char *priority_s = argv[2];
const char *tun_id_s = argv[3];
const char *in_port_s = argv[4];
- const char *packet_s = argv[5];
- uint16_t in_port = atoi(in_port_s);
+ const char *mark_s = argv[5];
+ const char *packet_s = argv[6];
+ uint32_t in_port = atoi(in_port_s);
ovs_be64 tun_id = htonll(strtoull(tun_id_s, NULL, 0));
uint32_t priority = atoi(priority_s);
+ uint32_t mark = atoi(mark_s);
const char *msg;
msg = eth_from_hex(packet_s, &packet);
ds_put_cstr(&result, s);
free(s);
- flow_extract(packet, priority, NULL, in_port, &flow);
+ flow_extract(packet, priority, mark, NULL, in_port, &flow);
flow.tunnel.tun_id = tun_id;
- initial_tci = flow.vlan_tci;
+ initial_vals.vlan_tci = flow.vlan_tci;
+ initial_vals.tunnel_ip_tos = flow.tunnel.ip_tos;
} else {
unixctl_command_reply_error(conn, "Bad command syntax");
goto exit;
}
- ofproto_trace(ofproto, &flow, packet, initial_tci, &result);
+ ofproto_trace(ofproto, &flow, packet, &initial_vals, &result);
unixctl_command_reply(conn, ds_cstr(&result));
exit:
static void
ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow,
- const struct ofpbuf *packet, ovs_be16 initial_tci,
- struct ds *ds)
+ const struct ofpbuf *packet,
+ const struct initial_vals *initial_vals, struct ds *ds)
{
struct rule_dpif *rule;
trace.flow = *flow;
ofpbuf_use_stub(&odp_actions,
odp_actions_stub, sizeof odp_actions_stub);
- action_xlate_ctx_init(&trace.ctx, ofproto, flow, initial_tci,
+ action_xlate_ctx_init(&trace.ctx, ofproto, flow, initial_vals,
rule, tcp_flags, packet);
trace.ctx.resubmit_hook = trace_resubmit;
trace.ctx.report_hook = trace_report;
}
}
if (errors) {
- ofproto->need_revalidate = REV_INCONSISTENCY;
+ ofproto->backer->need_revalidate = REV_INCONSISTENCY;
}
if (errors) {
ds_destroy(&reply);
}
+/* Store the current ofprotos in 'ofproto_shash'. Returns a sorted list
+ * of the 'ofproto_shash' nodes. It is the responsibility of the caller
+ * to destroy 'ofproto_shash' and free the returned value. */
+static const struct shash_node **
+get_ofprotos(struct shash *ofproto_shash)
+{
+ const struct ofproto_dpif *ofproto;
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ char *name = xasprintf("%s@%s", ofproto->up.type, ofproto->up.name);
+ shash_add_nocopy(ofproto_shash, name, ofproto);
+ }
+
+ return shash_sort(ofproto_shash);
+}
+
+static void
+ofproto_unixctl_dpif_dump_dps(struct unixctl_conn *conn, int argc OVS_UNUSED,
+ const char *argv[] OVS_UNUSED,
+ void *aux OVS_UNUSED)
+{
+ struct ds ds = DS_EMPTY_INITIALIZER;
+ struct shash ofproto_shash;
+ const struct shash_node **sorted_ofprotos;
+ int i;
+
+ shash_init(&ofproto_shash);
+ sorted_ofprotos = get_ofprotos(&ofproto_shash);
+ for (i = 0; i < shash_count(&ofproto_shash); i++) {
+ const struct shash_node *node = sorted_ofprotos[i];
+ ds_put_format(&ds, "%s\n", node->name);
+ }
+
+ shash_destroy(&ofproto_shash);
+ free(sorted_ofprotos);
+
+ unixctl_command_reply(conn, ds_cstr(&ds));
+ ds_destroy(&ds);
+}
+
+static void
+show_dp_format(const struct ofproto_dpif *ofproto, struct ds *ds)
+{
+ const struct shash_node **ports;
+ int i;
+ struct avg_subfacet_rates lifetime;
+ unsigned long long int minutes;
+ const int min_ms = 60 * 1000; /* milliseconds in one minute. */
+
+ minutes = (time_msec() - ofproto->created) / min_ms;
+
+ if (minutes > 0) {
+ lifetime.add_rate = (double)ofproto->total_subfacet_add_count
+ / minutes;
+ lifetime.del_rate = (double)ofproto->total_subfacet_del_count
+ / minutes;
+ }else {
+ lifetime.add_rate = 0.0;
+ lifetime.del_rate = 0.0;
+ }
+
+ ds_put_format(ds, "%s (%s):\n", ofproto->up.name,
+ dpif_name(ofproto->backer->dpif));
+ ds_put_format(ds,
+ "\tlookups: hit:%"PRIu64" missed:%"PRIu64"\n",
+ ofproto->n_hit, ofproto->n_missed);
+ ds_put_format(ds, "\tflows: cur: %zu, avg: %5.3f, max: %d,"
+ " life span: %llu(ms)\n",
+ hmap_count(&ofproto->subfacets),
+ avg_subfacet_count(ofproto),
+ ofproto->max_n_subfacet,
+ avg_subfacet_life_span(ofproto));
+ if (minutes >= 60) {
+ show_dp_rates(ds, "\t\thourly avg:", &ofproto->hourly);
+ }
+ if (minutes >= 60 * 24) {
+ show_dp_rates(ds, "\t\tdaily avg:", &ofproto->daily);
+ }
+ show_dp_rates(ds, "\t\toverall avg:", &lifetime);
+
+ ports = shash_sort(&ofproto->up.port_by_name);
+ for (i = 0; i < shash_count(&ofproto->up.port_by_name); i++) {
+ const struct shash_node *node = ports[i];
+ struct ofport *ofport = node->data;
+ const char *name = netdev_get_name(ofport->netdev);
+ const char *type = netdev_get_type(ofport->netdev);
+ uint32_t odp_port;
+
+ ds_put_format(ds, "\t%s %u/", name, ofport->ofp_port);
+
+ odp_port = ofp_port_to_odp_port(ofproto, ofport->ofp_port);
+ if (odp_port != OVSP_NONE) {
+ ds_put_format(ds, "%"PRIu32":", odp_port);
+ } else {
+ ds_put_cstr(ds, "none:");
+ }
+
+ if (strcmp(type, "system")) {
+ struct netdev *netdev;
+ int error;
+
+ ds_put_format(ds, " (%s", type);
+
+ error = netdev_open(name, type, &netdev);
+ if (!error) {
+ struct smap config;
+
+ smap_init(&config);
+ error = netdev_get_config(netdev, &config);
+ if (!error) {
+ const struct smap_node **nodes;
+ size_t i;
+
+ nodes = smap_sort(&config);
+ for (i = 0; i < smap_count(&config); i++) {
+ const struct smap_node *node = nodes[i];
+ ds_put_format(ds, "%c %s=%s", i ? ',' : ':',
+ node->key, node->value);
+ }
+ free(nodes);
+ }
+ smap_destroy(&config);
+
+ netdev_close(netdev);
+ }
+ ds_put_char(ds, ')');
+ }
+ ds_put_char(ds, '\n');
+ }
+ free(ports);
+}
+
+static void
+ofproto_unixctl_dpif_show(struct unixctl_conn *conn, int argc,
+ const char *argv[], void *aux OVS_UNUSED)
+{
+ struct ds ds = DS_EMPTY_INITIALIZER;
+ const struct ofproto_dpif *ofproto;
+
+ if (argc > 1) {
+ int i;
+ for (i = 1; i < argc; i++) {
+ ofproto = ofproto_dpif_lookup(argv[i]);
+ if (!ofproto) {
+ ds_put_format(&ds, "Unknown bridge %s (use dpif/dump-dps "
+ "for help)", argv[i]);
+ unixctl_command_reply_error(conn, ds_cstr(&ds));
+ return;
+ }
+ show_dp_format(ofproto, &ds);
+ }
+ } else {
+ struct shash ofproto_shash;
+ const struct shash_node **sorted_ofprotos;
+ int i;
+
+ shash_init(&ofproto_shash);
+ sorted_ofprotos = get_ofprotos(&ofproto_shash);
+ for (i = 0; i < shash_count(&ofproto_shash); i++) {
+ const struct shash_node *node = sorted_ofprotos[i];
+ show_dp_format(node->data, &ds);
+ }
+
+ shash_destroy(&ofproto_shash);
+ free(sorted_ofprotos);
+ }
+
+ unixctl_command_reply(conn, ds_cstr(&ds));
+ ds_destroy(&ds);
+}
+
+static void
+ofproto_unixctl_dpif_dump_flows(struct unixctl_conn *conn,
+ int argc OVS_UNUSED, const char *argv[],
+ void *aux OVS_UNUSED)
+{
+ struct ds ds = DS_EMPTY_INITIALIZER;
+ const struct ofproto_dpif *ofproto;
+ struct subfacet *subfacet;
+
+ ofproto = ofproto_dpif_lookup(argv[1]);
+ if (!ofproto) {
+ unixctl_command_reply_error(conn, "no such bridge");
+ return;
+ }
+
+ update_stats(ofproto->backer);
+
+ HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->subfacets) {
+ odp_flow_key_format(subfacet->key, subfacet->key_len, &ds);
+
+ ds_put_format(&ds, ", packets:%"PRIu64", bytes:%"PRIu64", used:",
+ subfacet->dp_packet_count, subfacet->dp_byte_count);
+ if (subfacet->used) {
+ ds_put_format(&ds, "%.3fs",
+ (time_msec() - subfacet->used) / 1000.0);
+ } else {
+ ds_put_format(&ds, "never");
+ }
+ if (subfacet->facet->tcp_flags) {
+ ds_put_cstr(&ds, ", flags:");
+ packet_format_tcp_flags(&ds, subfacet->facet->tcp_flags);
+ }
+
+ ds_put_cstr(&ds, ", actions:");
+ if (subfacet->slow) {
+ uint64_t slow_path_stub[128 / 8];
+ const struct nlattr *actions;
+ size_t actions_len;
+
+ compose_slow_path(ofproto, &subfacet->facet->flow, subfacet->slow,
+ slow_path_stub, sizeof slow_path_stub,
+ &actions, &actions_len);
+ format_odp_actions(&ds, actions, actions_len);
+ } else {
+ format_odp_actions(&ds, subfacet->actions, subfacet->actions_len);
+ }
+ ds_put_char(&ds, '\n');
+ }
+
+ unixctl_command_reply(conn, ds_cstr(&ds));
+ ds_destroy(&ds);
+}
+
+static void
+ofproto_unixctl_dpif_del_flows(struct unixctl_conn *conn,
+ int argc OVS_UNUSED, const char *argv[],
+ void *aux OVS_UNUSED)
+{
+ struct ds ds = DS_EMPTY_INITIALIZER;
+ struct ofproto_dpif *ofproto;
+
+ ofproto = ofproto_dpif_lookup(argv[1]);
+ if (!ofproto) {
+ unixctl_command_reply_error(conn, "no such bridge");
+ return;
+ }
+
+ flush(&ofproto->up);
+
+ unixctl_command_reply(conn, ds_cstr(&ds));
+ ds_destroy(&ds);
+}
+
static void
ofproto_dpif_unixctl_init(void)
{
unixctl_command_register(
"ofproto/trace",
- "bridge {tun_id in_port packet | odp_flow [-generate]}",
- 2, 5, ofproto_unixctl_trace, NULL);
+ "bridge {priority tun_id in_port mark packet | odp_flow [-generate]}",
+ 2, 6, ofproto_unixctl_trace, NULL);
unixctl_command_register("fdb/flush", "[bridge]", 0, 1,
ofproto_unixctl_fdb_flush, NULL);
unixctl_command_register("fdb/show", "bridge", 1, 1,
ofproto_dpif_unclog, NULL);
unixctl_command_register("ofproto/self-check", "[bridge]", 0, 1,
ofproto_dpif_self_check, NULL);
+ unixctl_command_register("dpif/dump-dps", "", 0, 0,
+ ofproto_unixctl_dpif_dump_dps, NULL);
+ unixctl_command_register("dpif/show", "[bridge]", 0, INT_MAX,
+ ofproto_unixctl_dpif_show, NULL);
+ unixctl_command_register("dpif/dump-flows", "bridge", 1, 1,
+ ofproto_unixctl_dpif_dump_flows, NULL);
+ unixctl_command_register("dpif/del-flows", "bridge", 1, 1,
+ ofproto_unixctl_dpif_del_flows, NULL);
}
\f
/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
return 0;
}
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
if (ofport->realdev_ofp_port) {
vsp_remove(ofport);
uint32_t realdev_odp_port, ovs_be16 vlan_tci)
{
if (!hmap_is_empty(&ofproto->realdev_vid_map)) {
- uint16_t realdev_ofp_port = odp_port_to_ofp_port(realdev_odp_port);
+ uint16_t realdev_ofp_port;
int vid = vlan_tci_to_vid(vlan_tci);
const struct vlan_splinter *vsp;
+ realdev_ofp_port = odp_port_to_ofp_port(ofproto, realdev_odp_port);
HMAP_FOR_EACH_WITH_HASH (vsp, realdev_vid_node,
hash_realdev_vid(realdev_ofp_port, vid),
&ofproto->realdev_vid_map) {
if (vsp->realdev_ofp_port == realdev_ofp_port
&& vsp->vid == vid) {
- return ofp_port_to_odp_port(vsp->vlandev_ofp_port);
+ return ofp_port_to_odp_port(ofproto, vsp->vlandev_ofp_port);
}
}
}
VLOG_ERR("duplicate vlan device record");
}
}
-\f
+
+static uint32_t
+ofp_port_to_odp_port(const struct ofproto_dpif *ofproto, uint16_t ofp_port)
+{
+ const struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
+ return ofport ? ofport->odp_port : OVSP_NONE;
+}
+
+static struct ofport_dpif *
+odp_port_to_ofport(const struct dpif_backer *backer, uint32_t odp_port)
+{
+ struct ofport_dpif *port;
+
+ HMAP_FOR_EACH_IN_BUCKET (port, odp_port_node,
+ hash_int(odp_port, 0),
+ &backer->odp_to_ofport_map) {
+ if (port->odp_port == odp_port) {
+ return port;
+ }
+ }
+
+ return NULL;
+}
+
+static uint16_t
+odp_port_to_ofp_port(const struct ofproto_dpif *ofproto, uint32_t odp_port)
+{
+ struct ofport_dpif *port;
+
+ port = odp_port_to_ofport(ofproto->backer, odp_port);
+ if (port && &ofproto->up == port->up.ofproto) {
+ return port->up.ofp_port;
+ } else {
+ return OFPP_NONE;
+ }
+}
+static unsigned long long int
+avg_subfacet_life_span(const struct ofproto_dpif *ofproto)
+{
+ unsigned long long int dc;
+ unsigned long long int avg;
+
+ dc = ofproto->total_subfacet_del_count + ofproto->subfacet_del_count;
+ avg = dc ? ofproto->total_subfacet_life_span / dc : 0;
+
+ return avg;
+}
+
+static double
+avg_subfacet_count(const struct ofproto_dpif *ofproto)
+{
+ double avg_c = 0.0;
+
+ if (ofproto->n_update_stats) {
+ avg_c = (double)ofproto->total_subfacet_count
+ / ofproto->n_update_stats;
+ }
+
+ return avg_c;
+}
+
+static void
+show_dp_rates(struct ds *ds, const char *heading,
+ const struct avg_subfacet_rates *rates)
+{
+ ds_put_format(ds, "%s add rate: %5.3f/min, del rate: %5.3f/min\n",
+ heading, rates->add_rate, rates->del_rate);
+}
+
+static void
+update_max_subfacet_count(struct ofproto_dpif *ofproto)
+{
+ ofproto->max_n_subfacet = MAX(ofproto->max_n_subfacet,
+ hmap_count(&ofproto->subfacets));
+}
+
+/* Compute exponentially weighted moving average, adding 'new' as the newest,
+ * most heavily weighted element. 'base' designates the rate of decay: after
+ * 'base' further updates, 'new''s weight in the EWMA decays to about 1/e
+ * (about .37). */
+static void
+exp_mavg(double *avg, int base, double new)
+{
+ *avg = (*avg * (base - 1) + new) / base;
+}
+
+static void
+update_moving_averages(struct ofproto_dpif *ofproto)
+{
+ const int min_ms = 60 * 1000; /* milliseconds in one minute. */
+
+ /* Update hourly averages on the minute boundaries. */
+ if (time_msec() - ofproto->last_minute >= min_ms) {
+ exp_mavg(&ofproto->hourly.add_rate, 60, ofproto->subfacet_add_count);
+ exp_mavg(&ofproto->hourly.del_rate, 60, ofproto->subfacet_del_count);
+
+ /* Update daily averages on the hour boundaries. */
+ if ((ofproto->last_minute - ofproto->created) / min_ms % 60 == 59) {
+ exp_mavg(&ofproto->daily.add_rate, 24, ofproto->hourly.add_rate);
+ exp_mavg(&ofproto->daily.del_rate, 24, ofproto->hourly.del_rate);
+ }
+
+ ofproto->total_subfacet_add_count += ofproto->subfacet_add_count;
+ ofproto->total_subfacet_del_count += ofproto->subfacet_del_count;
+ ofproto->subfacet_add_count = 0;
+ ofproto->subfacet_del_count = 0;
+ ofproto->last_minute += min_ms;
+ }
+}
+
+static void
+dpif_stats_update_hit_count(struct ofproto_dpif *ofproto, uint64_t delta)
+{
+ ofproto->n_hit += delta;
+}
+
const struct ofproto_class ofproto_dpif_class = {
+ init,
enumerate_types,
enumerate_names,
del,
+ port_open_type,
+ type_run,
+ type_run_fast,
+ type_wait,
alloc,
construct,
destruct,
get_netflow_ids,
set_sflow,
set_cfm,
- get_cfm_fault,
- get_cfm_opup,
- get_cfm_remote_mpids,
- get_cfm_health,
+ get_cfm_status,
set_stp,
get_stp_status,
set_stp_port,
set_flood_vlans,
is_mirror_output_bundle,
forward_bpdu_changed,
- set_mac_idle_time,
+ set_mac_table_config,
set_realdev,
};