static bool facet_is_controller_flow(struct facet *);
+/* Node in 'ofport_dpif''s 'priorities' map. Used to maintain a map from
+ * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
+ * traffic egressing the 'ofport' with that priority should be marked with. */
+struct priority_to_dscp {
+ struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'priorities' map. */
+ uint32_t priority; /* Priority of this queue (see struct flow). */
+
+ uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
+};
+
/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
*
* This is deprecated. It is only for compatibility with broken device drivers
static int set_bfd(struct ofport *, const struct smap *);
static int set_cfm(struct ofport *, const struct cfm_settings *);
static void ofport_clear_priorities(struct ofport_dpif *);
+static void ofport_update_peer(struct ofport_dpif *);
static void run_fast_rl(void);
struct dpif_completion {
struct ofoperation *op;
};
+/* Reasons that we might need to revalidate every facet, and corresponding
+ * coverage counters.
+ *
+ * A value of 0 means that there is no need to revalidate.
+ *
+ * It would be nice to have some cleaner way to integrate with coverage
+ * counters, but with only a few reasons I guess this is good enough for
+ * now. */
+enum revalidate_reason {
+ REV_RECONFIGURE = 1, /* Switch configuration changed. */
+ REV_STP, /* Spanning tree protocol port status change. */
+ REV_PORT_TOGGLED, /* Port enabled or disabled by CFM, LACP, ...*/
+ REV_FLOW_TABLE, /* Flow table changed. */
+ REV_INCONSISTENCY /* Facet self-check failed. */
+};
COVERAGE_DEFINE(rev_reconfigure);
COVERAGE_DEFINE(rev_stp);
COVERAGE_DEFINE(rev_port_toggled);
size_t key_len;
};
+struct avg_subfacet_rates {
+ double add_rate; /* Moving average of new flows created per minute. */
+ double del_rate; /* Moving average of flows deleted per minute. */
+};
+
+/* All datapaths of a given type share a single dpif backer instance. */
+struct dpif_backer {
+ char *type;
+ int refcount;
+ struct dpif *dpif;
+ struct timer next_expiration;
+ struct hmap odp_to_ofport_map; /* ODP port to ofport mapping. */
+
+ struct simap tnl_backers; /* Set of dpif ports backing tunnels. */
+
+ /* Facet revalidation flags applying to facets which use this backer. */
+ enum revalidate_reason need_revalidate; /* Revalidate every facet. */
+ struct tag_set revalidate_set; /* Revalidate only matching facets. */
+
+ struct hmap drop_keys; /* Set of dropped odp keys. */
+ bool recv_set_enable; /* Enables or disables receiving packets. */
+
+ struct hmap subfacets;
+ struct governor *governor;
+
+ /* Subfacet statistics.
+ *
+ * These keep track of the total number of subfacets added and deleted and
+ * flow life span. They are useful for computing the flow rates stats
+ * exposed via "ovs-appctl dpif/show". The goal is to learn about
+ * traffic patterns in ways that we can use later to improve Open vSwitch
+ * performance in new situations. */
+ long long int created; /* Time when it is created. */
+ unsigned max_n_subfacet; /* Maximum number of flows */
+ unsigned avg_n_subfacet; /* Average number of flows. */
+ long long int avg_subfacet_life; /* Average life span of subfacets. */
+
+ /* The average number of subfacets... */
+ struct avg_subfacet_rates hourly; /* ...over the last hour. */
+ struct avg_subfacet_rates daily; /* ...over the last day. */
+ struct avg_subfacet_rates lifetime; /* ...over the switch lifetime. */
+ long long int last_minute; /* Last time 'hourly' was updated. */
+
+ /* Number of subfacets added or deleted since 'last_minute'. */
+ unsigned subfacet_add_count;
+ unsigned subfacet_del_count;
+
+ /* Number of subfacets added or deleted from 'created' to 'last_minute.' */
+ unsigned long long int total_subfacet_add_count;
+ unsigned long long int total_subfacet_del_count;
+};
+
/* All existing ofproto_backer instances, indexed by ofproto->up.type. */
static struct shash all_dpif_backers = SHASH_INITIALIZER(&all_dpif_backers);
struct oftable *table;
int i;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
complete_operations(ofproto);
port->stp_port = NULL;
port->stp_state = STP_DISABLED;
port->tnl_port = NULL;
+ port->peer = NULL;
hmap_init(&port->priorities);
port->realdev_ofp_port = 0;
port->vlandev_vid = 0;
* to be "internal" to the switch as a whole, and therefore not an
* candidate for counter polling. */
port->odp_port = OVSP_NONE;
+ ofport_update_peer(port);
return 0;
}
ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
+ if (port->peer) {
+ port->peer->peer = NULL;
+ port->peer = NULL;
+ }
+
if (port->odp_port != OVSP_NONE && !port->tnl_port) {
hmap_remove(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node);
}
if (port->cfm) {
cfm_set_netdev(port->cfm, port->up.netdev);
}
+
+ if (port->tnl_port && tnl_port_reconfigure(&port->up, port->odp_port,
+ &port->tnl_port)) {
+ ofproto_dpif_cast(port->up.ofproto)->backer->need_revalidate = true;
+ }
+
+ ofport_update_peer(port);
}
static void
}
}
-/* Returns true if STP should process 'flow'. */
-static bool
-stp_should_process_flow(const struct flow *flow)
+/* Returns true if STP should process 'flow'. Sets fields in 'wc' that
+ * were used to make the determination.*/
+bool
+stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
{
+ memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
return eth_addr_equals(flow->dl_dst, eth_addr_stp);
}
-static void
+void
stp_process_packet(const struct ofport_dpif *ofport,
const struct ofpbuf *packet)
{
}
}
\f
-struct priority_to_dscp *
+int
+ofproto_dpif_queue_to_priority(const struct ofproto_dpif *ofproto,
+ uint32_t queue_id, uint32_t *priority)
+{
+ return dpif_queue_to_priority(ofproto->backer->dpif, queue_id, priority);
+}
+
+static struct priority_to_dscp *
get_priority(const struct ofport_dpif *ofport, uint32_t priority)
{
struct priority_to_dscp *pdscp;
return NULL;
}
+bool
+ofproto_dpif_dscp_from_priority(const struct ofport_dpif *ofport,
+ uint32_t priority, uint8_t *dscp)
+{
+ struct priority_to_dscp *pdscp = get_priority(ofport, priority);
+ *dscp = pdscp ? pdscp->dscp : 0;
+ return pdscp != NULL;
+}
+
static void
ofport_clear_priorities(struct ofport_dpif *ofport)
{
ofproto_port->ofp_port = odp_port_to_ofp_port(ofproto, dpif_port->port_no);
}
-struct ofport_dpif *
-ofport_get_peer(const struct ofport_dpif *ofport_dpif)
+static void
+ofport_update_peer(struct ofport_dpif *ofport)
{
const struct ofproto_dpif *ofproto;
- const char *peer;
+ struct dpif_backer *backer;
+ const char *peer_name;
- peer = netdev_vport_patch_peer(ofport_dpif->up.netdev);
- if (!peer) {
- return NULL;
+ if (!netdev_vport_is_patch(ofport->up.netdev)) {
+ return;
+ }
+
+ backer = ofproto_dpif_cast(ofport->up.ofproto)->backer;
+ backer->need_revalidate = true;
+
+ if (ofport->peer) {
+ ofport->peer->peer = NULL;
+ ofport->peer = NULL;
+ }
+
+ peer_name = netdev_vport_patch_peer(ofport->up.netdev);
+ if (!peer_name) {
+ return;
}
HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
- struct ofport *ofport;
+ struct ofport *peer_ofport;
+ struct ofport_dpif *peer;
+ const char *peer_peer;
+
+ if (ofproto->backer != backer) {
+ continue;
+ }
+
+ peer_ofport = shash_find_data(&ofproto->up.port_by_name, peer_name);
+ if (!peer_ofport) {
+ continue;
+ }
- ofport = shash_find_data(&ofproto->up.port_by_name, peer);
- if (ofport && ofport->ofproto->ofproto_class == &ofproto_dpif_class) {
- return ofport_dpif_cast(ofport);
+ peer = ofport_dpif_cast(peer_ofport);
+ peer_peer = netdev_vport_patch_peer(peer->up.netdev);
+ if (peer_peer && !strcmp(netdev_get_name(ofport->up.netdev),
+ peer_peer)) {
+ ofport->peer = peer;
+ ofport->peer->peer = ofport;
}
+
+ return;
}
- return NULL;
}
static void
port_run_fast(ofport);
- if (ofport->tnl_port
- && tnl_port_reconfigure(&ofport->up, ofport->odp_port,
- &ofport->tnl_port)) {
- ofproto_dpif_cast(ofport->up.ofproto)->backer->need_revalidate = true;
- }
-
if (ofport->cfm) {
int cfm_opup = cfm_get_opup(ofport->cfm);
connmgr_send_packet_in(ofproto->up.connmgr, &pin);
}
-enum slow_path_reason
-process_special(struct ofproto_dpif *ofproto, const struct flow *flow,
- const struct ofport_dpif *ofport, const struct ofpbuf *packet)
-{
- if (!ofport) {
- return 0;
- } else if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow)) {
- if (packet) {
- cfm_process_heartbeat(ofport->cfm, packet);
- }
- return SLOW_CFM;
- } else if (ofport->bfd && bfd_should_process_flow(flow)) {
- if (packet) {
- bfd_process_packet(ofport->bfd, flow, packet);
- }
- return SLOW_BFD;
- } else if (ofport->bundle && ofport->bundle->lacp
- && flow->dl_type == htons(ETH_TYPE_LACP)) {
- if (packet) {
- lacp_process_packet(ofport->bundle->lacp, ofport, packet);
- }
- return SLOW_LACP;
- } else if (ofproto->stp && stp_should_process_flow(flow)) {
- if (packet) {
- stp_process_packet(ofport, packet);
- }
- return SLOW_STP;
- } else {
- return 0;
- }
-}
-
static struct flow_miss *
flow_miss_find(struct hmap *todo, const struct ofproto_dpif *ofproto,
const struct flow *flow, uint32_t hash)
put->flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
put->key = miss->key;
put->key_len = miss->key_len;
+ put->mask = NULL;
+ put->mask_len = 0;
if (want_path == SF_FAST_PATH) {
put->actions = facet->xout.odp_actions.data;
put->actions_len = facet->xout.odp_actions.size;
hmap_insert(&backer->drop_keys, &drop_key->hmap_node,
hash_bytes(drop_key->key, drop_key->key_len, 0));
dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY,
- drop_key->key, drop_key->key_len, NULL, 0, NULL);
+ drop_key->key, drop_key->key_len,
+ NULL, 0, NULL, 0, NULL);
}
continue;
}
size_t key_len;
dpif_flow_dump_start(&dump, backer->dpif);
- while (dpif_flow_dump_next(&dump, &key, &key_len, NULL, NULL, &stats)) {
+ while (dpif_flow_dump_next(&dump, &key, &key_len,
+ NULL, NULL, NULL, NULL, &stats)) {
struct subfacet *subfacet;
uint32_t key_hash;
&actions, &actions_len);
}
- ret = dpif_flow_put(subfacet->backer->dpif, flags, subfacet->key,
- subfacet->key_len, actions, actions_len, stats);
+ ret = dpif_flow_put(ofproto->backer->dpif, flags, subfacet->key,
+ subfacet->key_len, NULL, 0,
+ actions, actions_len, stats);
if (stats) {
subfacet_reset_dp_stats(subfacet, stats);
return NULL;
}
+ if (wc) {
+ wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
+ }
+
cls = &ofproto->up.tables[table_id].cls;
frag = (flow->nw_frag & FLOW_NW_FRAG_ANY) != 0;
if (frag && ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
if (netflow_options) {
if (!ofproto->netflow) {
ofproto->netflow = netflow_create();
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
return netflow_set_options(ofproto->netflow, netflow_options);
- } else {
+ } else if (ofproto->netflow) {
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
netflow_destroy(ofproto->netflow);
ofproto->netflow = NULL;
- return 0;
}
+
+ return 0;
}
static void
* bridge is specified. If function odp_flow_key_from_string()
* returns 0, the flow is a odp_flow. If function
* parse_ofp_exact_flow() returns 0, the flow is a br_flow. */
- if (!odp_flow_key_from_string(argv[argc - 1], NULL, &odp_key)) {
+ if (!odp_flow_from_string(argv[argc - 1], NULL, &odp_key, NULL)) {
/* If the odp_flow is the second argument,
* the datapath name is the first argument. */
if (argc == 3) {