/*
- * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "netdev-vport.h"
#include "netdev.h"
#include "netlink.h"
-#include "netlink-socket.h"
#include "nx-match.h"
#include "odp-util.h"
#include "odp-execute.h"
#include "ofproto-dpif-ipfix.h"
#include "ofproto-dpif-mirror.h"
#include "ofproto-dpif-monitor.h"
+#include "ofproto-dpif-rid.h"
#include "ofproto-dpif-sflow.h"
#include "ofproto-dpif-upcall.h"
#include "ofproto-dpif-xlate.h"
* - Do include packets and bytes from datapath flows which have not
* recently been processed by a revalidator. */
struct ovs_mutex stats_mutex;
- uint64_t packet_count OVS_GUARDED; /* Number of packets received. */
- uint64_t byte_count OVS_GUARDED; /* Number of bytes received. */
+ struct dpif_flow_stats stats OVS_GUARDED;
};
-static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes);
+static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes,
+ long long int *used);
static struct rule_dpif *rule_dpif_cast(const struct rule *);
static void rule_expire(struct rule_dpif *);
bool recv_set_enable; /* Enables or disables receiving packets. */
+ struct recirc_id_pool *rid_pool; /* Recirculation ID pool. */
+
/* True if the datapath supports variable-length
* OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions.
* False if the datapath supports only 8-byte (or shorter) userdata. */
bool variable_length_userdata;
+
+ /* Maximum number of MPLS label stack entries that the datapath supports
+ * in a match */
+ size_t max_mpls_depth;
};
/* All existing ofproto_backer instances, indexed by ofproto->up.type. */
return CONTAINER_OF(ofproto, struct ofproto_dpif, up);
}
+size_t
+ofproto_dpif_get_max_mpls_depth(const struct ofproto_dpif *ofproto)
+{
+ return ofproto->backer->max_mpls_depth;
+}
+
static struct ofport_dpif *get_ofp_port(const struct ofproto_dpif *ofproto,
ofp_port_t ofp_port);
static void ofproto_trace(struct ofproto_dpif *, const struct flow *,
ofproto->netflow, ofproto->up.frag_handling,
ofproto->up.forward_bpdu,
connmgr_has_in_band(ofproto->up.connmgr),
- ofproto->backer->variable_length_userdata);
+ ofproto->backer->variable_length_userdata,
+ ofproto->backer->max_mpls_depth);
HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
xlate_bundle_set(ofproto, bundle, bundle->name,
ovs_rwlock_destroy(&backer->odp_to_ofport_lock);
hmap_destroy(&backer->odp_to_ofport_map);
shash_find_and_delete(&all_dpif_backers, backer->type);
+ recirc_id_pool_destroy(backer->rid_pool);
free(backer->type);
dpif_close(backer->dpif);
-
free(backer);
}
};
static bool check_variable_length_userdata(struct dpif_backer *backer);
+static size_t check_max_mpls_depth(struct dpif_backer *backer);
static int
open_dpif_backer(const char *type, struct dpif_backer **backerp)
struct shash_node *node;
struct list garbage_list;
struct odp_garbage *garbage, *next;
+
struct sset names;
char *backer_name;
const char *name;
return error;
}
backer->variable_length_userdata = check_variable_length_userdata(backer);
+ backer->max_mpls_depth = check_max_mpls_depth(backer);
+ backer->rid_pool = recirc_id_pool_create();
if (backer->recv_set_enable) {
udpif_set_threads(backer->udpif, n_handlers, n_revalidators);
{
struct eth_header *eth;
struct ofpbuf actions;
- struct ofpbuf key;
+ struct dpif_execute execute;
struct ofpbuf packet;
size_t start;
int error;
ofpbuf_init(&actions, 64);
start = nl_msg_start_nested(&actions, OVS_ACTION_ATTR_USERSPACE);
nl_msg_put_u32(&actions, OVS_USERSPACE_ATTR_PID,
- dpif_port_get_pid(backer->dpif, ODPP_NONE));
+ dpif_port_get_pid(backer->dpif, ODPP_NONE, 0));
nl_msg_put_unspec_zero(&actions, OVS_USERSPACE_ATTR_USERDATA, 4);
nl_msg_end_nested(&actions, start);
- /* Compose an ODP flow key. The key is arbitrary but it must match the
- * packet that we compose later. */
- ofpbuf_init(&key, 64);
- nl_msg_put_u32(&key, OVS_KEY_ATTR_IN_PORT, 0);
- nl_msg_put_unspec_zero(&key, OVS_KEY_ATTR_ETHERNET,
- sizeof(struct ovs_key_ethernet));
- nl_msg_put_be16(&key, OVS_KEY_ATTR_ETHERTYPE, htons(0x1234));
-
- /* Compose a packet that matches the key. */
+ /* Compose a dummy ethernet packet. */
ofpbuf_init(&packet, ETH_HEADER_LEN);
eth = ofpbuf_put_zeros(&packet, ETH_HEADER_LEN);
eth->eth_type = htons(0x1234);
- /* Execute the actions. On older datapaths this fails with -ERANGE, on
+ /* Execute the actions. On older datapaths this fails with ERANGE, on
* newer datapaths it succeeds. */
- error = dpif_execute(backer->dpif, key.data, key.size,
- actions.data, actions.size, &packet, false);
+ execute.actions = ofpbuf_data(&actions);
+ execute.actions_len = ofpbuf_size(&actions);
+ execute.packet = &packet;
+ execute.md = PKT_METADATA_INITIALIZER(0);
+ execute.needs_help = false;
+
+ error = dpif_execute(backer->dpif, &execute);
ofpbuf_uninit(&packet);
- ofpbuf_uninit(&key);
ofpbuf_uninit(&actions);
switch (error) {
}
}
+/* Tests the MPLS label stack depth supported by 'backer''s datapath.
+ *
+ * Returns the number of elements in a struct flow's mpls_lse field
+ * if the datapath supports at least that many entries in an
+ * MPLS label stack.
+ * Otherwise returns the number of MPLS push actions supported by
+ * the datapath. */
+static size_t
+check_max_mpls_depth(struct dpif_backer *backer)
+{
+ struct flow flow;
+ int n;
+
+ for (n = 0; n < FLOW_MAX_MPLS_LABELS; n++) {
+ struct odputil_keybuf keybuf;
+ struct ofpbuf key;
+ int error;
+
+ memset(&flow, 0, sizeof flow);
+ flow.dl_type = htons(ETH_TYPE_MPLS);
+ flow_set_mpls_bos(&flow, n, 1);
+
+ ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
+ odp_flow_key_from_flow(&key, &flow, 0);
+
+ error = dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY,
+ ofpbuf_data(&key), ofpbuf_size(&key), NULL, 0, NULL, 0, NULL);
+ if (error && error != EEXIST) {
+ if (error != EINVAL) {
+ VLOG_WARN("%s: MPLS stack length feature probe failed (%s)",
+ dpif_name(backer->dpif), ovs_strerror(error));
+ }
+ break;
+ }
+
+ error = dpif_flow_del(backer->dpif, ofpbuf_data(&key), ofpbuf_size(&key), NULL);
+ if (error) {
+ VLOG_WARN("%s: failed to delete MPLS feature probe flow",
+ dpif_name(backer->dpif));
+ }
+ }
+
+ VLOG_INFO("%s: MPLS label stack length probed as %d",
+ dpif_name(backer->dpif), n);
+ return n;
+}
+
static int
construct(struct ofproto *ofproto_)
{
ofproto->mbridge = mbridge_create();
ofproto->has_bonded_bundles = false;
ofproto->lacp_enabled = false;
- ovs_mutex_init(&ofproto->stats_mutex);
+ ovs_mutex_init_adaptive(&ofproto->stats_mutex);
ovs_mutex_init(&ofproto->vsp_mutex);
guarded_list_init(&ofproto->pins);
const struct ofpbuf *ofpacts, struct rule_dpif **rulep)
{
struct ofputil_flow_mod fm;
+ struct classifier *cls;
int error;
match_init_catchall(&fm.match);
fm.buffer_id = 0;
fm.out_port = 0;
fm.flags = 0;
- fm.ofpacts = ofpacts->data;
- fm.ofpacts_len = ofpacts->size;
+ fm.ofpacts = ofpbuf_data(ofpacts);
+ fm.ofpacts_len = ofpbuf_size(ofpacts);
error = ofproto_flow_mod(&ofproto->up, &fm);
if (error) {
return error;
}
- if (rule_dpif_lookup_in_table(ofproto, &fm.match.flow, NULL, TBL_INTERNAL,
- rulep)) {
- rule_dpif_unref(*rulep);
- } else {
- OVS_NOT_REACHED();
- }
+ cls = &ofproto->up.tables[TBL_INTERNAL].cls;
+ fat_rwlock_rdlock(&cls->rwlock);
+ *rulep = rule_dpif_cast(rule_from_cls_rule(
+ classifier_lookup(cls, &fm.match.flow, NULL)));
+ ovs_assert(*rulep != NULL);
+ fat_rwlock_unlock(&cls->rwlock);
return 0;
}
xlate_remove_ofproto(ofproto);
ovs_rwlock_unlock(&xlate_rwlock);
- /* Discard any flow_miss_batches queued up for 'ofproto', avoiding a
- * use-after-free error. */
- udpif_revalidate(ofproto->backer->udpif);
+ /* Ensure that the upcall processing threads have no remaining references
+ * to the ofproto or anything in it. */
+ udpif_synchronize(ofproto->backer->udpif);
hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
struct cls_cursor cursor;
- ovs_rwlock_rdlock(&table->cls.rwlock);
+ fat_rwlock_rdlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, NULL);
- ovs_rwlock_unlock(&table->cls.rwlock);
+ fat_rwlock_unlock(&table->cls.rwlock);
CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
ofproto_rule_delete(&ofproto->up, &rule->up);
}
}
static void
-flush(struct ofproto *ofproto OVS_UNUSED)
+flush(struct ofproto *ofproto_)
{
- udpif_flush();
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ struct dpif_backer *backer = ofproto->backer;
+
+ if (backer) {
+ udpif_flush(backer->udpif);
+ }
}
static void
struct dpif_dp_stats s;
uint64_t n_miss, n_no_pkt_in, n_bytes, n_dropped_frags;
uint64_t n_lookup;
+ long long int used;
strcpy(ots->name, "classifier");
dpif_get_dp_stats(ofproto->backer->dpif, &s);
- rule_get_stats(&ofproto->miss_rule->up, &n_miss, &n_bytes);
- rule_get_stats(&ofproto->no_packet_in_rule->up, &n_no_pkt_in, &n_bytes);
- rule_get_stats(&ofproto->drop_frags_rule->up, &n_dropped_frags, &n_bytes);
-
+ rule_get_stats(&ofproto->miss_rule->up, &n_miss, &n_bytes, &used);
+ rule_get_stats(&ofproto->no_packet_in_rule->up, &n_no_pkt_in, &n_bytes,
+ &used);
+ rule_get_stats(&ofproto->drop_frags_rule->up, &n_dropped_frags, &n_bytes,
+ &used);
n_lookup = s.n_hit + s.n_missed - n_dropped_frags;
ots->lookup_count = htonll(n_lookup);
ots->matched_count = htonll(n_lookup - n_miss - n_no_pkt_in);
bundle_remove(port_);
set_cfm(port_, NULL);
set_bfd(port_, NULL);
+ if (port->stp_port) {
+ stp_port_disable(port->stp_port);
+ }
if (ofproto->sflow) {
dpif_sflow_del_port(ofproto->sflow, port->odp_port);
}
learning_packet = bond_compose_learning_packet(bundle->bond,
e->mac, e->vlan,
&port_void);
- learning_packet->private_p = port_void;
+ /* Temporarily use l2 as a private pointer (see below). */
+ ovs_assert(learning_packet->l2 == ofpbuf_data(learning_packet));
+ learning_packet->l2 = port_void;
list_push_back(&packets, &learning_packet->list_node);
}
}
error = n_packets = n_errors = 0;
LIST_FOR_EACH (learning_packet, list_node, &packets) {
int ret;
+ void *port_void = learning_packet->l2;
- ret = ofproto_dpif_send_packet(learning_packet->private_p, learning_packet);
+ /* Restore l2. */
+ learning_packet->l2 = ofpbuf_data(learning_packet);
+ ret = ofproto_dpif_send_packet(port_void, learning_packet);
if (ret) {
error = ret;
n_errors++;
rule_expire(struct rule_dpif *rule)
OVS_REQUIRES(ofproto_mutex)
{
- uint16_t idle_timeout, hard_timeout;
+ uint16_t hard_timeout, idle_timeout;
long long int now = time_msec();
- int reason;
+ int reason = -1;
ovs_assert(!rule->up.pending);
- /* Has 'rule' expired? */
- ovs_mutex_lock(&rule->up.mutex);
hard_timeout = rule->up.hard_timeout;
idle_timeout = rule->up.idle_timeout;
- if (hard_timeout && now > rule->up.modified + hard_timeout * 1000) {
- reason = OFPRR_HARD_TIMEOUT;
- } else if (idle_timeout && now > rule->up.used + idle_timeout * 1000) {
- reason = OFPRR_IDLE_TIMEOUT;
- } else {
- reason = -1;
+
+ /* Has 'rule' expired? */
+ if (hard_timeout) {
+ long long int modified;
+
+ ovs_mutex_lock(&rule->up.mutex);
+ modified = rule->up.modified;
+ ovs_mutex_unlock(&rule->up.mutex);
+
+ if (now > modified + hard_timeout * 1000) {
+ reason = OFPRR_HARD_TIMEOUT;
+ }
+ }
+
+ if (reason < 0 && idle_timeout) {
+ long long int used;
+
+ ovs_mutex_lock(&rule->stats_mutex);
+ used = rule->stats.used;
+ ovs_mutex_unlock(&rule->stats_mutex);
+
+ if (now > used + idle_timeout * 1000) {
+ reason = OFPRR_IDLE_TIMEOUT;
+ }
}
- ovs_mutex_unlock(&rule->up.mutex);
if (reason >= 0) {
COVERAGE_INC(ofproto_dpif_expired);
const struct ofpact *ofpacts, size_t ofpacts_len,
struct ofpbuf *packet)
{
- struct odputil_keybuf keybuf;
struct dpif_flow_stats stats;
struct xlate_out xout;
struct xlate_in xin;
ofp_port_t in_port;
- struct ofpbuf key;
+ struct dpif_execute execute;
int error;
ovs_assert((rule != NULL) != (ofpacts != NULL));
xin.resubmit_stats = &stats;
xlate_actions(&xin, &xout);
- ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
in_port = flow->in_port.ofp_port;
if (in_port == OFPP_NONE) {
in_port = OFPP_LOCAL;
}
- odp_flow_key_from_flow(&key, flow, ofp_port_to_odp_port(ofproto, in_port));
+ execute.actions = ofpbuf_data(&xout.odp_actions);
+ execute.actions_len = ofpbuf_size(&xout.odp_actions);
+ execute.packet = packet;
+ execute.md.tunnel = flow->tunnel;
+ execute.md.skb_priority = flow->skb_priority;
+ execute.md.pkt_mark = flow->pkt_mark;
+ execute.md.in_port.odp_port = ofp_port_to_odp_port(ofproto, in_port);
+ execute.needs_help = (xout.slow & SLOW_ACTION) != 0;
+
+ error = dpif_execute(ofproto->backer->dpif, &execute);
- error = dpif_execute(ofproto->backer->dpif, key.data, key.size,
- xout.odp_actions.data, xout.odp_actions.size, packet,
- (xout.slow & SLOW_ACTION) != 0);
xlate_out_uninit(&xout);
return error;
const struct dpif_flow_stats *stats)
{
ovs_mutex_lock(&rule->stats_mutex);
- rule->packet_count += stats->n_packets;
- rule->byte_count += stats->n_bytes;
- rule->up.used = MAX(rule->up.used, stats->used);
+ rule->stats.n_packets += stats->n_packets;
+ rule->stats.n_bytes += stats->n_bytes;
+ rule->stats.used = MAX(rule->stats.used, stats->used);
ovs_mutex_unlock(&rule->stats_mutex);
}
return rule_is_table_miss(&rule->up);
}
+bool
+rule_dpif_is_internal(const struct rule_dpif *rule)
+{
+ return rule_is_internal(&rule->up);
+}
+
ovs_be64
rule_dpif_get_flow_cookie(const struct rule_dpif *rule)
OVS_REQUIRES(rule->up.mutex)
return rule_get_actions(&rule->up);
}
-/* Lookup 'flow' in 'ofproto''s classifier. If 'wc' is non-null, sets
- * the fields that were relevant as part of the lookup. */
-void
+/* Lookup 'flow' in table 0 of 'ofproto''s classifier.
+ * If 'wc' is non-null, sets the fields that were relevant as part of
+ * the lookup. Returns the table_id where a match or miss occurred.
+ *
+ * The return value will be zero unless there was a miss and
+ * OFPTC_TABLE_MISS_CONTINUE is in effect for the sequence of tables
+ * where misses occur. */
+uint8_t
rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow,
struct flow_wildcards *wc, struct rule_dpif **rule)
{
- struct ofport_dpif *port;
+ enum rule_dpif_lookup_verdict verdict;
+ enum ofputil_port_config config = 0;
+ uint8_t table_id = 0;
- if (rule_dpif_lookup_in_table(ofproto, flow, wc, 0, rule)) {
- return;
+ verdict = rule_dpif_lookup_from_table(ofproto, flow, wc, true,
+ &table_id, rule);
+
+ switch (verdict) {
+ case RULE_DPIF_LOOKUP_VERDICT_MATCH:
+ return table_id;
+ case RULE_DPIF_LOOKUP_VERDICT_CONTROLLER: {
+ struct ofport_dpif *port;
+
+ port = get_ofp_port(ofproto, flow->in_port.ofp_port);
+ if (!port) {
+ VLOG_WARN_RL(&rl, "packet-in on unknown OpenFlow port %"PRIu16,
+ flow->in_port.ofp_port);
+ }
+ config = port ? port->up.pp.config : 0;
+ break;
}
- port = get_ofp_port(ofproto, flow->in_port.ofp_port);
- if (!port) {
- VLOG_WARN_RL(&rl, "packet-in on unknown OpenFlow port %"PRIu16,
- flow->in_port.ofp_port);
+ case RULE_DPIF_LOOKUP_VERDICT_DROP:
+ config = OFPUTIL_PC_NO_PACKET_IN;
+ break;
+ default:
+ OVS_NOT_REACHED();
}
- choose_miss_rule(port ? port->up.pp.config : 0, ofproto->miss_rule,
+ choose_miss_rule(config, ofproto->miss_rule,
ofproto->no_packet_in_rule, rule);
+ return table_id;
}
-bool
-rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto,
- const struct flow *flow, struct flow_wildcards *wc,
- uint8_t table_id, struct rule_dpif **rule)
+static struct rule_dpif *
+rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, uint8_t table_id,
+ const struct flow *flow, struct flow_wildcards *wc)
{
+ struct classifier *cls = &ofproto->up.tables[table_id].cls;
const struct cls_rule *cls_rule;
- struct classifier *cls;
- bool frag;
-
- *rule = NULL;
- if (table_id >= N_TABLES) {
- return false;
- }
+ struct rule_dpif *rule;
- if (wc) {
- memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
- wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
- }
+ fat_rwlock_rdlock(&cls->rwlock);
+ if (ofproto->up.frag_handling != OFPC_FRAG_NX_MATCH) {
+ if (wc) {
+ memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
+ if (is_ip_any(flow)) {
+ wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
+ }
+ }
- cls = &ofproto->up.tables[table_id].cls;
- ovs_rwlock_rdlock(&cls->rwlock);
- frag = (flow->nw_frag & FLOW_NW_FRAG_ANY) != 0;
- if (frag && ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
- /* We must pretend that transport ports are unavailable. */
- struct flow ofpc_normal_flow = *flow;
- ofpc_normal_flow.tp_src = htons(0);
- ofpc_normal_flow.tp_dst = htons(0);
- cls_rule = classifier_lookup(cls, &ofpc_normal_flow, wc);
- } else if (frag && ofproto->up.frag_handling == OFPC_FRAG_DROP) {
- cls_rule = &ofproto->drop_frags_rule->up.cr;
- /* Frag mask in wc already set above. */
+ if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
+ if (ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
+ /* We must pretend that transport ports are unavailable. */
+ struct flow ofpc_normal_flow = *flow;
+ ofpc_normal_flow.tp_src = htons(0);
+ ofpc_normal_flow.tp_dst = htons(0);
+ cls_rule = classifier_lookup(cls, &ofpc_normal_flow, wc);
+ } else {
+ /* Must be OFPC_FRAG_DROP (we don't have OFPC_FRAG_REASM). */
+ cls_rule = &ofproto->drop_frags_rule->up.cr;
+ }
+ } else {
+ cls_rule = classifier_lookup(cls, flow, wc);
+ }
} else {
cls_rule = classifier_lookup(cls, flow, wc);
}
- *rule = rule_dpif_cast(rule_from_cls_rule(cls_rule));
- rule_dpif_ref(*rule);
- ovs_rwlock_unlock(&cls->rwlock);
+ rule = rule_dpif_cast(rule_from_cls_rule(cls_rule));
+ rule_dpif_ref(rule);
+ fat_rwlock_unlock(&cls->rwlock);
- return *rule != NULL;
+ return rule;
+}
+
+/* Look up 'flow' in 'ofproto''s classifier starting from table '*table_id'.
+ * Stores the rule that was found in '*rule', or NULL if none was found.
+ * Updates 'wc', if nonnull, to reflect the fields that were used during the
+ * lookup.
+ *
+ * If 'honor_table_miss' is true, the first lookup occurs in '*table_id', but
+ * if none is found then the table miss configuration for that table is
+ * honored, which can result in additional lookups in other OpenFlow tables.
+ * In this case the function updates '*table_id' to reflect the final OpenFlow
+ * table that was searched.
+ *
+ * If 'honor_table_miss' is false, then only one table lookup occurs, in
+ * '*table_id'.
+ *
+ * Returns:
+ *
+ * - RULE_DPIF_LOOKUP_VERDICT_MATCH if a rule (in '*rule') was found.
+ *
+ * - RULE_DPIF_LOOKUP_VERDICT_DROP if no rule was found and a table miss
+ * configuration specified that the packet should be dropped in this
+ * case. (This occurs only if 'honor_table_miss' is true, because only in
+ * this case does the table miss configuration matter.)
+ *
+ * - RULE_DPIF_LOOKUP_VERDICT_CONTROLLER if no rule was found otherwise. */
+enum rule_dpif_lookup_verdict
+rule_dpif_lookup_from_table(struct ofproto_dpif *ofproto,
+ const struct flow *flow,
+ struct flow_wildcards *wc,
+ bool honor_table_miss,
+ uint8_t *table_id, struct rule_dpif **rule)
+{
+ uint8_t next_id;
+
+ for (next_id = *table_id;
+ next_id < ofproto->up.n_tables;
+ next_id++, next_id += (next_id == TBL_INTERNAL))
+ {
+ *table_id = next_id;
+ *rule = rule_dpif_lookup_in_table(ofproto, *table_id, flow, wc);
+ if (*rule) {
+ return RULE_DPIF_LOOKUP_VERDICT_MATCH;
+ } else if (!honor_table_miss) {
+ return RULE_DPIF_LOOKUP_VERDICT_CONTROLLER;
+ } else {
+ switch (table_get_config(&ofproto->up, *table_id)
+ & OFPTC11_TABLE_MISS_MASK) {
+ case OFPTC11_TABLE_MISS_CONTINUE:
+ break;
+
+ case OFPTC11_TABLE_MISS_CONTROLLER:
+ return RULE_DPIF_LOOKUP_VERDICT_CONTROLLER;
+
+ case OFPTC11_TABLE_MISS_DROP:
+ return RULE_DPIF_LOOKUP_VERDICT_DROP;
+ }
+ }
+ }
+
+ return RULE_DPIF_LOOKUP_VERDICT_CONTROLLER;
}
/* Given a port configuration (specified as zero if there's no port), chooses
static enum ofperr
rule_construct(struct rule *rule_)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
{
struct rule_dpif *rule = rule_dpif_cast(rule_);
- ovs_mutex_init(&rule->stats_mutex);
- ovs_mutex_lock(&rule->stats_mutex);
- rule->packet_count = 0;
- rule->byte_count = 0;
- ovs_mutex_unlock(&rule->stats_mutex);
+ ovs_mutex_init_adaptive(&rule->stats_mutex);
+ rule->stats.n_packets = 0;
+ rule->stats.n_bytes = 0;
+ rule->stats.used = rule->up.modified;
return 0;
}
}
static void
-rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes)
+rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes,
+ long long int *used)
{
struct rule_dpif *rule = rule_dpif_cast(rule_);
ovs_mutex_lock(&rule->stats_mutex);
- *packets = rule->packet_count;
- *bytes = rule->byte_count;
+ *packets = rule->stats.n_packets;
+ *bytes = rule->stats.n_bytes;
+ *used = rule->stats.used;
ovs_mutex_unlock(&rule->stats_mutex);
}
if (reset_counters) {
ovs_mutex_lock(&rule->stats_mutex);
- rule->packet_count = 0;
- rule->byte_count = 0;
+ rule->stats.n_packets = 0;
+ rule->stats.n_bytes = 0;
ovs_mutex_unlock(&rule->stats_mutex);
}
group_construct(struct ofgroup *group_)
{
struct group_dpif *group = group_dpif_cast(group_);
- ovs_mutex_init(&group->stats_mutex);
+ const struct ofputil_bucket *bucket;
+
+ /* Prevent group chaining because our locking structure makes it hard to
+ * implement deadlock-free. (See xlate_group_resource_check().) */
+ LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
+ const struct ofpact *a;
+
+ OFPACT_FOR_EACH (a, bucket->ofpacts, bucket->ofpacts_len) {
+ if (a->type == OFPACT_GROUP) {
+ return OFPERR_OFPGMFC_CHAINING_UNSUPPORTED;
+ }
+ }
+ }
+
+ ovs_mutex_init_adaptive(&group->stats_mutex);
ovs_mutex_lock(&group->stats_mutex);
group_construct_stats(group);
ovs_mutex_unlock(&group->stats_mutex);
static enum ofperr
group_modify(struct ofgroup *group_, struct ofgroup *victim_)
{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(group_->ofproto);
struct group_dpif *group = group_dpif_cast(group_);
struct group_dpif *victim = group_dpif_cast(victim_);
group_construct_stats(group);
ovs_mutex_unlock(&group->stats_mutex);
+ ofproto->backer->need_revalidate = REV_FLOW_TABLE;
+
return 0;
}
ovs_mutex_lock(&ofproto->stats_mutex);
ofproto->stats.tx_packets++;
- ofproto->stats.tx_bytes += packet->size;
+ ofproto->stats.tx_bytes += ofpbuf_size(packet);
ovs_mutex_unlock(&ofproto->stats_mutex);
return error;
}
struct xlate_out xout;
struct xlate_in xin;
struct flow flow;
+ struct flow_wildcards wc;
struct ds *result;
};
ds_put_cstr(result, "OpenFlow actions=");
ofpacts_format(actions->ofpacts, actions->ofpacts_len, result);
ds_put_char(result, '\n');
-
- rule_actions_unref(actions);
}
static void
ds_put_char_multiple(result, '\t', level);
ds_put_format(result, "%s: ", title);
- format_odp_actions(result, odp_actions->data, odp_actions->size);
+ format_odp_actions(result, ofpbuf_data(odp_actions),
+ ofpbuf_size(odp_actions));
+ ds_put_char(result, '\n');
+}
+
+static void
+trace_format_megaflow(struct ds *result, int level, const char *title,
+ struct trace_ctx *trace)
+{
+ struct match match;
+
+ ds_put_char_multiple(result, '\t', level);
+ ds_put_format(result, "%s: ", title);
+ flow_wildcards_or(&trace->wc, &trace->xout.wc, &trace->wc);
+ match_init(&match, &trace->flow, &trace->wc);
+ match_format(&match, result, OFP_DEFAULT_PRIORITY);
ds_put_char(result, '\n');
}
trace_format_flow(result, recurse + 1, "Resubmitted flow", trace);
trace_format_regs(result, recurse + 1, "Resubmitted regs", trace);
trace_format_odp(result, recurse + 1, "Resubmitted odp", trace);
+ trace_format_megaflow(result, recurse + 1, "Resubmitted megaflow", trace);
trace_format_rule(result, recurse + 1, rule);
}
goto exit;
}
- if (xlate_receive(backer, NULL, odp_key.data, odp_key.size, flow,
- NULL, ofprotop, NULL, NULL, NULL, NULL)) {
+ if (xlate_receive(backer, NULL, ofpbuf_data(&odp_key),
+ ofpbuf_size(&odp_key), flow,
+ ofprotop, NULL, NULL, NULL, NULL)) {
error = "Invalid datapath flow";
goto exit;
}
/* Generate a packet, if requested. */
if (packet) {
- if (!packet->size) {
+ if (!ofpbuf_size(packet)) {
flow_compose(packet, flow);
} else {
- union flow_in_port in_port = flow->in_port;
+ struct pkt_metadata md = pkt_metadata_from_flow(flow);
/* Use the metadata from the flow and the packet argument
* to reconstruct the flow. */
- flow_extract(packet, flow->skb_priority, flow->pkt_mark, NULL,
- &in_port, flow);
+ flow_extract(packet, &md, flow);
}
}
goto exit;
}
if (enforce_consistency) {
- retval = ofpacts_check_consistency(ofpacts.data, ofpacts.size, &flow,
- u16_to_ofp(ofproto->up.max_ports),
+ retval = ofpacts_check_consistency(ofpbuf_data(&ofpacts), ofpbuf_size(&ofpacts),
+ &flow, u16_to_ofp(ofproto->up.max_ports),
0, 0, usable_protocols);
} else {
- retval = ofpacts_check(ofpacts.data, ofpacts.size, &flow,
+ retval = ofpacts_check(ofpbuf_data(&ofpacts), ofpbuf_size(&ofpacts), &flow,
u16_to_ofp(ofproto->up.max_ports), 0, 0,
&usable_protocols);
}
goto exit;
}
- ofproto_trace(ofproto, &flow, packet, ofpacts.data, ofpacts.size, &result);
+ ofproto_trace(ofproto, &flow, packet,
+ ofpbuf_data(&ofpacts), ofpbuf_size(&ofpacts), &result);
unixctl_command_reply(conn, ds_cstr(&result));
exit:
struct ds *ds)
{
struct rule_dpif *rule;
- struct flow_wildcards wc;
+ struct trace_ctx trace;
ds_put_format(ds, "Bridge: %s\n", ofproto->up.name);
ds_put_cstr(ds, "Flow: ");
flow_format(ds, flow);
ds_put_char(ds, '\n');
- flow_wildcards_init_catchall(&wc);
+ flow_wildcards_init_catchall(&trace.wc);
if (ofpacts) {
rule = NULL;
} else {
- rule_dpif_lookup(ofproto, flow, &wc, &rule);
+ rule_dpif_lookup(ofproto, flow, &trace.wc, &rule);
trace_format_rule(ds, 0, rule);
if (rule == ofproto->miss_rule) {
}
if (rule || ofpacts) {
- uint64_t odp_actions_stub[1024 / 8];
- struct ofpbuf odp_actions;
- struct trace_ctx trace;
- struct match match;
- uint16_t tcp_flags;
-
- tcp_flags = packet ? packet_get_tcp_flags(packet, flow) : 0;
trace.result = ds;
trace.flow = *flow;
- ofpbuf_use_stub(&odp_actions,
- odp_actions_stub, sizeof odp_actions_stub);
- xlate_in_init(&trace.xin, ofproto, flow, rule, tcp_flags, packet);
+ xlate_in_init(&trace.xin, ofproto, flow, rule, ntohs(flow->tcp_flags),
+ packet);
if (ofpacts) {
trace.xin.ofpacts = ofpacts;
trace.xin.ofpacts_len = ofpacts_len;
trace.xin.report_hook = trace_report;
xlate_actions(&trace.xin, &trace.xout);
- flow_wildcards_or(&trace.xout.wc, &trace.xout.wc, &wc);
ds_put_char(ds, '\n');
trace_format_flow(ds, 0, "Final flow", &trace);
-
- match_init(&match, flow, &trace.xout.wc);
- ds_put_cstr(ds, "Relevant fields: ");
- match_format(&match, ds, OFP_DEFAULT_PRIORITY);
- ds_put_char(ds, '\n');
+ trace_format_megaflow(ds, 0, "Megaflow", &trace);
ds_put_cstr(ds, "Datapath actions: ");
- format_odp_actions(ds, trace.xout.odp_actions.data,
- trace.xout.odp_actions.size);
+ format_odp_actions(ds, ofpbuf_data(&trace.xout.odp_actions),
+ ofpbuf_size(&trace.xout.odp_actions));
if (trace.xout.slow) {
enum slow_path_reason slow;
ofproto_dpif_contains_flow(const struct ofproto_dpif *ofproto,
const struct nlattr *key, size_t key_len)
{
- enum odp_key_fitness fitness;
struct ofproto_dpif *ofp;
struct flow flow;
- xlate_receive(ofproto->backer, NULL, key, key_len, &flow, &fitness, &ofp,
+ xlate_receive(ofproto->backer, NULL, key, key_len, &flow, &ofp,
NULL, NULL, NULL, NULL);
return ofp == ofproto;
}
struct dpif_port dpif_port;
struct dpif_port_dump port_dump;
struct hmap portno_names;
+ void *state = NULL;
+ int error;
ofproto = ofproto_dpif_lookup(argv[argc - 1]);
if (!ofproto) {
}
ds_init(&ds);
- dpif_flow_dump_start(&flow_dump, ofproto->backer->dpif);
- while (dpif_flow_dump_next(&flow_dump, &key, &key_len, &mask, &mask_len,
- &actions, &actions_len, &stats)) {
+ error = dpif_flow_dump_start(&flow_dump, ofproto->backer->dpif);
+ if (error) {
+ goto exit;
+ }
+ dpif_flow_dump_state_init(ofproto->backer->dpif, &state);
+ while (dpif_flow_dump_next(&flow_dump, state, &key, &key_len,
+ &mask, &mask_len, &actions, &actions_len,
+ &stats)) {
if (!ofproto_dpif_contains_flow(ofproto, key, key_len)) {
continue;
}
format_odp_actions(&ds, actions, actions_len);
ds_put_char(&ds, '\n');
}
+ dpif_flow_dump_state_uninit(ofproto->backer->dpif, state);
+ error = dpif_flow_dump_done(&flow_dump);
- if (dpif_flow_dump_done(&flow_dump)) {
+exit:
+ if (error) {
ds_clear(&ds);
ds_put_format(&ds, "dpif/dump_flows failed: %s", ovs_strerror(errno));
unixctl_command_reply_error(conn, ds_cstr(&ds));
unixctl_command_register("dpif/dump-flows", "[-m] bridge", 1, 2,
ofproto_unixctl_dpif_dump_flows, NULL);
}
+
+
+/* Returns true if 'rule' is an internal rule, false otherwise. */
+bool
+rule_is_internal(const struct rule *rule)
+{
+ return rule->table_id == TBL_INTERNAL;
+}
\f
/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
*
ofproto_has_vlan_splinters(const struct ofproto_dpif *ofproto)
OVS_EXCLUDED(ofproto->vsp_mutex)
{
- bool ret;
-
- ovs_mutex_lock(&ofproto->vsp_mutex);
- ret = !hmap_is_empty(&ofproto->realdev_vid_map);
- ovs_mutex_unlock(&ofproto->vsp_mutex);
- return ret;
+ /* hmap_is_empty is thread safe. */
+ return !hmap_is_empty(&ofproto->realdev_vid_map);
}
+
static ofp_port_t
vsp_realdev_to_vlandev__(const struct ofproto_dpif *ofproto,
ofp_port_t realdev_ofp_port, ovs_be16 vlan_tci)
{
ofp_port_t ret;
+ /* hmap_is_empty is thread safe, see if we can return immediately. */
+ if (hmap_is_empty(&ofproto->realdev_vid_map)) {
+ return realdev_ofp_port;
+ }
ovs_mutex_lock(&ofproto->vsp_mutex);
ret = vsp_realdev_to_vlandev__(ofproto, realdev_ofp_port, vlan_tci);
ovs_mutex_unlock(&ofproto->vsp_mutex);
ofp_port_t realdev;
int vid;
+ /* hmap_is_empty is thread safe. */
+ if (hmap_is_empty(&ofproto->vlandev_map)) {
+ return false;
+ }
+
ovs_mutex_lock(&ofproto->vsp_mutex);
realdev = vsp_vlandev_to_realdev(ofproto, flow->in_port.ofp_port, &vid);
ovs_mutex_unlock(&ofproto->vsp_mutex);
}
}
+uint32_t
+ofproto_dpif_alloc_recirc_id(struct ofproto_dpif *ofproto)
+{
+ struct dpif_backer *backer = ofproto->backer;
+
+ return recirc_id_alloc(backer->rid_pool);
+}
+
+void
+ofproto_dpif_free_recirc_id(struct ofproto_dpif *ofproto, uint32_t recirc_id)
+{
+ struct dpif_backer *backer = ofproto->backer;
+
+ recirc_id_free(backer->rid_pool, recirc_id);
+}
+
const struct ofproto_class ofproto_dpif_class = {
init,
enumerate_types,