#include "bond.h"
#include "bundle.h"
#include "byte-order.h"
+#include "connectivity.h"
#include "connmgr.h"
#include "coverage.h"
#include "cfm.h"
#include "ofproto-dpif-upcall.h"
#include "ofproto-dpif-xlate.h"
#include "poll-loop.h"
+#include "seq.h"
#include "simap.h"
#include "smap.h"
#include "timer.h"
#define SUBFACET_DESTROY_MAX_BATCH 50
-static struct subfacet *subfacet_create(struct facet *, struct flow_miss *);
+static struct subfacet *subfacet_create(struct facet *, struct flow_miss *,
+ uint32_t key_hash);
static struct subfacet *subfacet_find(struct dpif_backer *,
const struct nlattr *key, size_t key_len,
uint32_t key_hash);
long long int prev_used; /* Used time from last stats push. */
/* Accounting. */
- uint64_t accounted_bytes; /* Bytes processed by facet_account(). */
- struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
uint16_t tcp_flags; /* TCP flags seen for this 'rule'. */
struct xlate_out xout;
struct dpif_flow_stats *, bool may_learn);
static void facet_push_stats(struct facet *, bool may_learn);
static void facet_learn(struct facet *);
-static void facet_account(struct facet *);
static void push_all_stats(void);
static bool facet_is_controller_flow(struct facet *);
struct bfd *bfd; /* BFD, if any. */
bool may_enable; /* May be enabled in bonds. */
bool is_tunnel; /* This port is a tunnel. */
+ bool is_layer3; /* This is a layer 3 port. */
long long int carrier_seq; /* Carrier status changes. */
struct ofport_dpif *peer; /* Peer if patch port. */
static int set_bfd(struct ofport *, const struct smap *);
static int set_cfm(struct ofport *, const struct cfm_settings *);
static void ofport_update_peer(struct ofport_dpif *);
-static void run_fast_rl(void);
-static int run_fast(struct ofproto *);
struct dpif_completion {
struct list list_node;
COVERAGE_DEFINE(rev_mac_learning);
COVERAGE_DEFINE(rev_inconsistency);
-struct avg_subfacet_rates {
- double add_rate; /* Moving average of new flows created per minute. */
- double del_rate; /* Moving average of flows deleted per minute. */
-};
-
/* All datapaths of a given type share a single dpif backer instance. */
struct dpif_backer {
char *type;
struct timer next_expiration;
struct ovs_rwlock odp_to_ofport_lock;
- struct hmap odp_to_ofport_map OVS_GUARDED; /* ODP port to ofport map. */
+ struct hmap odp_to_ofport_map OVS_GUARDED; /* Contains "struct ofport"s. */
struct simap tnl_backers; /* Set of dpif ports backing tunnels. */
* exposed via "ovs-appctl dpif/show". The goal is to learn about
* traffic patterns in ways that we can use later to improve Open vSwitch
* performance in new situations. */
- long long int created; /* Time when it is created. */
unsigned max_n_subfacet; /* Maximum number of flows */
unsigned avg_n_subfacet; /* Average number of flows. */
long long int avg_subfacet_life; /* Average life span of subfacets. */
struct hmap bundles; /* Contains "struct ofbundle"s. */
struct mac_learning *ml;
bool has_bonded_bundles;
+ bool lacp_enabled;
struct mbridge *mbridge;
/* Facets. */
struct sset ghost_ports; /* Ports with no datapath port. */
struct sset port_poll_set; /* Queued names for port_poll() reply. */
int port_poll_errno; /* Last errno for port_poll() reply. */
+ uint64_t change_seq; /* Connectivity status changes. */
/* Per ofproto's dpif stats. */
uint64_t n_hit;
static struct ofport_dpif *get_ofp_port(const struct ofproto_dpif *ofproto,
ofp_port_t ofp_port);
static void ofproto_trace(struct ofproto_dpif *, const struct flow *,
- const struct ofpbuf *packet, struct ds *);
+ const struct ofpbuf *packet,
+ const struct ofpact[], size_t ofpacts_len,
+ struct ds *);
/* Upcalls. */
static void handle_upcalls(struct dpif_backer *);
/* Flow expiration. */
static int expire(struct dpif_backer *);
-/* NetFlow. */
-static void send_netflow_active_timeouts(struct ofproto_dpif *);
-
/* Global variables. */
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
dpif_run(backer->dpif);
+ handle_upcalls(backer);
+
/* The most natural place to push facet statistics is when they're pulled
* from the datapath. However, when there are many flows in the datapath,
* this expensive operation can occur so frequently, that it reduces our
ofproto->no_packet_in_rule, ofproto->ml,
ofproto->stp, ofproto->mbridge,
ofproto->sflow, ofproto->ipfix,
- ofproto->up.frag_handling,
+ ofproto->netflow, ofproto->up.frag_handling,
ofproto->up.forward_bpdu,
- connmgr_has_in_band(ofproto->up.connmgr),
- ofproto->netflow != NULL);
+ connmgr_has_in_band(ofproto->up.connmgr));
HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
xlate_bundle_set(ofproto, bundle, bundle->name,
ovs_rwlock_unlock(&ofproto->facets.rwlock);
CLS_CURSOR_FOR_EACH_SAFE (facet, next, cr, &cursor) {
facet_revalidate(facet);
- run_fast_rl();
}
}
}
}
-static int
-dpif_backer_run_fast(struct dpif_backer *backer)
-{
- handle_upcalls(backer);
-
- return 0;
-}
-
-static int
-type_run_fast(const char *type)
-{
- struct dpif_backer *backer;
-
- backer = shash_find_data(&all_dpif_backers, type);
- if (!backer) {
- /* This is not necessarily a problem, since backers are only
- * created on demand. */
- return 0;
- }
-
- return dpif_backer_run_fast(backer);
-}
-
-static void
-run_fast_rl(void)
-{
- static long long int port_rl = LLONG_MIN;
-
- if (time_msec() >= port_rl) {
- struct ofproto_dpif *ofproto;
-
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
- run_fast(&ofproto->up);
- }
- port_rl = time_msec() + 200;
- }
-}
-
static void
type_wait(const char *type)
{
backer->n_handler_threads = n_handler_threads;
backer->max_n_subfacet = 0;
- backer->created = time_msec();
backer->avg_n_subfacet = 0;
backer->avg_subfacet_life = 0;
ofproto->ml = mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME);
ofproto->mbridge = mbridge_create();
ofproto->has_bonded_bundles = false;
+ ofproto->lacp_enabled = false;
ovs_mutex_init(&ofproto->stats_mutex);
ovs_mutex_init(&ofproto->vsp_mutex);
- classifier_init(&ofproto->facets);
+ classifier_init(&ofproto->facets, NULL);
ofproto->consistency_rl = LLONG_MIN;
guarded_list_init(&ofproto->pins);
sset_init(&ofproto->ghost_ports);
sset_init(&ofproto->port_poll_set);
ofproto->port_poll_errno = 0;
+ ofproto->change_seq = 0;
SHASH_FOR_EACH_SAFE (node, next, &init_ofp_ports) {
struct iface_hint *iface_hint = node->data;
mbridge_unref(ofproto->mbridge);
- netflow_destroy(ofproto->netflow);
+ netflow_unref(ofproto->netflow);
dpif_sflow_unref(ofproto->sflow);
hmap_destroy(&ofproto->bundles);
mac_learning_unref(ofproto->ml);
close_dpif_backer(ofproto->backer);
}
-static int
-run_fast(struct ofproto *ofproto_)
-{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct ofproto_packet_in *pin, *next_pin;
- struct list pins;
-
- /* Do not perform any periodic activity required by 'ofproto' while
- * waiting for flow restore to complete. */
- if (ofproto_get_flow_restore_wait()) {
- return 0;
- }
-
- guarded_list_pop_all(&ofproto->pins, &pins);
- LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) {
- connmgr_send_packet_in(ofproto->up.connmgr, pin);
- list_remove(&pin->list_node);
- free(CONST_CAST(void *, pin->up.packet));
- free(pin);
- }
-
- return 0;
-}
-
static int
run(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct ofport_dpif *ofport;
- struct ofbundle *bundle;
- int error;
+ uint64_t new_seq;
if (mbridge_need_revalidate(ofproto->mbridge)) {
ofproto->backer->need_revalidate = REV_RECONFIGURE;
ovs_rwlock_unlock(&ofproto->ml->rwlock);
}
- /* Do not perform any periodic activity below required by 'ofproto' while
+ /* Do not perform any periodic activity required by 'ofproto' while
* waiting for flow restore to complete. */
- if (ofproto_get_flow_restore_wait()) {
- return 0;
- }
+ if (!ofproto_get_flow_restore_wait()) {
+ struct ofproto_packet_in *pin, *next_pin;
+ struct list pins;
- error = run_fast(ofproto_);
- if (error) {
- return error;
+ guarded_list_pop_all(&ofproto->pins, &pins);
+ LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) {
+ connmgr_send_packet_in(ofproto->up.connmgr, pin);
+ list_remove(&pin->list_node);
+ free(CONST_CAST(void *, pin->up.packet));
+ free(pin);
+ }
}
if (ofproto->netflow) {
- if (netflow_run(ofproto->netflow)) {
- send_netflow_active_timeouts(ofproto);
- }
+ netflow_run(ofproto->netflow);
}
if (ofproto->sflow) {
dpif_sflow_run(ofproto->sflow);
dpif_ipfix_run(ofproto->ipfix);
}
- HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
- port_run(ofport);
+ new_seq = seq_read(connectivity_seq_get());
+ if (ofproto->change_seq != new_seq) {
+ struct ofport_dpif *ofport;
+
+ HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
+ port_run(ofport);
+ }
+
+ ofproto->change_seq = new_seq;
}
- HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
- bundle_run(bundle);
+ if (ofproto->lacp_enabled || ofproto->has_bonded_bundles) {
+ struct ofbundle *bundle;
+
+ HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
+ bundle_run(bundle);
+ }
}
stp_run(ofproto);
wait(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct ofbundle *bundle;
if (ofproto_get_flow_restore_wait()) {
return;
if (ofproto->ipfix) {
dpif_ipfix_wait(ofproto->ipfix);
}
- HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
- bundle_wait(bundle);
+ if (ofproto->lacp_enabled || ofproto->has_bonded_bundles) {
+ struct ofbundle *bundle;
+
+ HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
+ bundle_wait(bundle);
+ }
}
if (ofproto->netflow) {
netflow_wait(ofproto->netflow);
port->realdev_ofp_port = 0;
port->vlandev_vid = 0;
port->carrier_seq = netdev_get_carrier_resets(netdev);
+ port->is_layer3 = netdev_vport_is_layer3(netdev);
if (netdev_vport_is_patch(netdev)) {
/* By bailing out here, we don't submit the port to the sFlow module
s->state = stp_port_get_state(sp);
s->sec_in_state = (time_msec() - ofport->stp_state_entered) / 1000;
s->role = stp_port_get_role(sp);
+
+ return 0;
+}
+
+static int
+get_stp_port_stats(struct ofport *ofport_,
+ struct ofproto_port_stp_stats *s)
+{
+ struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+ struct stp_port *sp = ofport->stp_port;
+
+ if (!ofproto->stp || !sp) {
+ s->enabled = false;
+ return 0;
+ }
+
+ s->enabled = true;
stp_port_get_counts(sp, &s->tx_count, &s->rx_count, &s->error_count);
return 0;
bundle->floodable = true;
LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
+ || port->is_layer3
|| !stp_forward_in_state(port->stp_state)) {
bundle->floodable = false;
break;
port->bundle = bundle;
list_push_back(&bundle->ports, &port->bundle_node);
if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
+ || port->is_layer3
|| !stp_forward_in_state(port->stp_state)) {
bundle->floodable = false;
}
/* LACP. */
if (s->lacp) {
+ ofproto->lacp_enabled = true;
if (!bundle->lacp) {
ofproto->backer->need_revalidate = REV_RECONFIGURE;
bundle->lacp = lacp_create();
return ofport ? ofport_dpif_cast(ofport) : NULL;
}
-static struct ofport_dpif *
-get_odp_port(const struct ofproto_dpif *ofproto, odp_port_t odp_port)
-{
- struct ofport_dpif *port = odp_port_to_ofport(ofproto->backer, odp_port);
- return port && &ofproto->up == port->up.ofproto ? port : NULL;
-}
-
static void
ofproto_port_from_dpif_port(struct ofproto_dpif *ofproto,
struct ofproto_port *ofproto_port,
{
enum subfacet_path want_path;
struct subfacet *subfacet;
+ uint32_t key_hash;
+ /* Update facet stats. */
facet->packet_count += miss->stats.n_packets;
facet->prev_packet_count += miss->stats.n_packets;
facet->byte_count += miss->stats.n_bytes;
facet->prev_byte_count += miss->stats.n_bytes;
- want_path = facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH;
+ /* Look for an existing subfacet. If we find one, update its used time. */
+ key_hash = odp_flow_key_hash(miss->key, miss->key_len);
+ if (!list_is_empty(&facet->subfacets)) {
+ subfacet = subfacet_find(miss->ofproto->backer,
+ miss->key, miss->key_len, key_hash);
+ if (subfacet) {
+ if (subfacet->facet == facet) {
+ subfacet->used = MAX(subfacet->used, miss->stats.used);
+ } else {
+ /* This shouldn't happen. */
+ VLOG_ERR_RL(&rl, "subfacet with wrong facet");
+ subfacet_destroy(subfacet);
+ subfacet = NULL;
+ }
+ }
+ } else {
+ subfacet = NULL;
+ }
/* Don't install the flow if it's the result of the "userspace"
* action for an already installed facet. This can occur when a
return;
}
- subfacet = subfacet_create(facet, miss);
+ /* Create a subfacet, if we don't already have one. */
+ if (!subfacet) {
+ subfacet = subfacet_create(facet, miss, key_hash);
+ }
+
+ /* Install the subfacet, if it's not already installed. */
+ want_path = facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH;
if (subfacet->path != want_path) {
struct flow_miss_op *op = &ops[(*n_ops)++];
struct dpif_flow_put *put = &op->dpif_op.u.flow_put;
subfacet->dp_byte_count = stats->n_bytes;
subfacet_update_stats(subfacet, &diff);
- if (facet->accounted_bytes < facet->byte_count) {
+ if (diff.n_packets) {
facet_learn(facet);
- facet_account(facet);
- facet->accounted_bytes = facet->byte_count;
}
}
delete_unexpected_flow(backer, key, key_len);
break;
}
- run_fast_rl();
}
dpif_flow_dump_done(&dump);
}
facet->learn_rl = time_msec() + 500;
list_init(&facet->subfacets);
- netflow_flow_init(&facet->nf_flow);
- netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
xlate_out_copy(&facet->xout, &miss->xout);
classifier_insert(&ofproto->facets, &facet->cr);
ovs_rwlock_unlock(&ofproto->facets.rwlock);
- facet->nf_flow.output_iface = facet->xout.nf_output_iface;
return facet;
}
facet_push_stats(facet, true);
}
-static void
-facet_account(struct facet *facet)
-{
- const struct nlattr *a;
- unsigned int left;
- ovs_be16 vlan_tci;
- uint64_t n_bytes;
-
- if (!facet->xout.has_normal || !facet->ofproto->has_bonded_bundles) {
- return;
- }
- n_bytes = facet->byte_count - facet->accounted_bytes;
-
- /* This loop feeds byte counters to bond_account() for rebalancing to use
- * as a basis. We also need to track the actual VLAN on which the packet
- * is going to be sent to ensure that it matches the one passed to
- * bond_choose_output_slave(). (Otherwise, we will account to the wrong
- * hash bucket.)
- *
- * We use the actions from an arbitrary subfacet because they should all
- * be equally valid for our purpose. */
- vlan_tci = facet->flow.vlan_tci;
- NL_ATTR_FOR_EACH_UNSAFE (a, left, facet->xout.odp_actions.data,
- facet->xout.odp_actions.size) {
- const struct ovs_action_push_vlan *vlan;
- struct ofport_dpif *port;
-
- switch (nl_attr_type(a)) {
- case OVS_ACTION_ATTR_OUTPUT:
- port = get_odp_port(facet->ofproto, nl_attr_get_odp_port(a));
- if (port && port->bundle && port->bundle->bond) {
- bond_account(port->bundle->bond, &facet->flow,
- vlan_tci_to_vid(vlan_tci), n_bytes);
- }
- break;
-
- case OVS_ACTION_ATTR_POP_VLAN:
- vlan_tci = htons(0);
- break;
-
- case OVS_ACTION_ATTR_PUSH_VLAN:
- vlan = nl_attr_get(a);
- vlan_tci = vlan->vlan_tci;
- break;
- }
- }
-}
-
/* Returns true if the only action for 'facet' is to send to the controller.
* (We don't report NetFlow expiration messages for such facets because they
* are just part of the control logic for the network, not real traffic). */
}
facet_push_stats(facet, false);
- if (facet->accounted_bytes < facet->byte_count) {
- facet_account(facet);
- facet->accounted_bytes = facet->byte_count;
- }
if (ofproto->netflow && !facet_is_controller_flow(facet)) {
- struct ofexpired expired;
- expired.flow = facet->flow;
- expired.packet_count = facet->packet_count;
- expired.byte_count = facet->byte_count;
- expired.used = facet->used;
- netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
+ netflow_expire(ofproto->netflow, &facet->flow);
+ netflow_flow_clear(ofproto->netflow, &facet->flow);
}
/* Reset counters to prevent double counting if 'facet' ever gets
* reinstalled. */
facet_reset_counters(facet);
-
- netflow_flow_clear(&facet->nf_flow);
facet->tcp_flags = 0;
}
error = xlate_receive(ofproto->backer, NULL, subfacet->key,
subfacet->key_len, &recv_flow, NULL,
- &recv_ofproto, NULL);
+ &recv_ofproto, NULL, NULL, NULL, NULL);
if (error
|| recv_ofproto != ofproto
|| facet != facet_find(ofproto, &recv_flow)) {
xlate_in_init(&xin, ofproto, &facet->flow, new_rule, 0, NULL);
xlate_actions(&xin, &xout);
flow_wildcards_or(&xout.wc, &xout.wc, &wc);
+ /* Make sure non -packet fields are not masked. If not cleared,
+ * the memcmp() below may fail, causing an otherwise valid facet
+ * to be removed. */
+ flow_wildcards_clear_non_packet_fields(&xout.wc);
/* A facet's slow path reason should only change under dramatic
* circumstances. Rather than try to update everything, it's simpler to
facet->xout.has_fin_timeout = xout.has_fin_timeout;
facet->xout.nf_output_iface = xout.nf_output_iface;
facet->xout.mirrors = xout.mirrors;
- facet->nf_flow.output_iface = facet->xout.nf_output_iface;
ovs_mutex_lock(&new_rule->up.mutex);
facet->used = MAX(facet->used, new_rule->up.created);
facet->byte_count = 0;
facet->prev_packet_count = 0;
facet->prev_byte_count = 0;
- facet->accounted_bytes = 0;
}
static void
flow_push_stats(struct ofproto_dpif *ofproto, struct flow *flow,
struct dpif_flow_stats *stats, bool may_learn)
{
- struct ofport_dpif *in_port;
struct xlate_in xin;
- in_port = get_ofp_port(ofproto, flow->in_port.ofp_port);
- if (in_port && in_port->is_tunnel) {
- netdev_vport_inc_rx(in_port->up.netdev, stats);
- }
-
xlate_in_init(&xin, ofproto, flow, NULL, stats->tcp_flags, NULL);
xin.resubmit_stats = stats;
xin.may_learn = may_learn;
facet->prev_packet_count = facet->packet_count;
facet->prev_byte_count = facet->byte_count;
facet->prev_used = facet->used;
-
- netflow_flow_update_time(facet->ofproto->netflow, &facet->nf_flow,
- facet->used);
- netflow_flow_update_flags(&facet->nf_flow, facet->tcp_flags);
- mirror_update_stats(facet->ofproto->mbridge, facet->xout.mirrors,
- stats.n_packets, stats.n_bytes);
flow_push_stats(facet->ofproto, &facet->flow, &stats, may_learn);
}
}
static void
-push_all_stats__(bool run_fast)
+push_all_stats(void)
{
static long long int rl = LLONG_MIN;
struct ofproto_dpif *ofproto;
cls_cursor_init(&cursor, &ofproto->facets, NULL);
CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
facet_push_stats(facet, false);
- if (run_fast) {
- run_fast_rl();
- }
}
ovs_rwlock_unlock(&ofproto->facets.rwlock);
}
rl = time_msec() + 100;
}
-static void
-push_all_stats(void)
-{
- push_all_stats__(true);
-}
-
void
rule_dpif_credit_stats(struct rule_dpif *rule,
const struct dpif_flow_stats *stats)
return NULL;
}
-/* Searches 'facet' (within 'ofproto') for a subfacet with the specified
- * 'key_fitness', 'key', and 'key_len' members in 'miss'. Returns the
- * existing subfacet if there is one, otherwise creates and returns a
- * new subfacet. */
+/* Creates and returns a new subfacet within 'facet' for the flow in 'miss'.
+ * 'key_hash' must be a hash over miss->key. The caller must have already
+ * ensured that no subfacet subfacet already exists. */
static struct subfacet *
-subfacet_create(struct facet *facet, struct flow_miss *miss)
+subfacet_create(struct facet *facet, struct flow_miss *miss, uint32_t key_hash)
{
struct dpif_backer *backer = miss->ofproto->backer;
const struct nlattr *key = miss->key;
size_t key_len = miss->key_len;
- uint32_t key_hash;
struct subfacet *subfacet;
- key_hash = odp_flow_key_hash(key, key_len);
-
- if (list_is_empty(&facet->subfacets)) {
- subfacet = &facet->one_subfacet;
- } else {
- subfacet = subfacet_find(backer, key, key_len, key_hash);
- if (subfacet) {
- if (subfacet->facet == facet) {
- return subfacet;
- }
-
- /* This shouldn't happen. */
- VLOG_ERR_RL(&rl, "subfacet with wrong facet");
- subfacet_destroy(subfacet);
- }
-
- subfacet = xmalloc(sizeof *subfacet);
- }
+ subfacet = (list_is_empty(&facet->subfacets)
+ ? &facet->one_subfacet
+ : xmalloc(sizeof *subfacet));
COVERAGE_INC(subfacet_create);
hmap_insert(&backer->subfacets, &subfacet->hmap_node, key_hash);
subfacet_reset_dp_stats(subfacets[i], &stats[i]);
subfacets[i]->path = SF_NOT_INSTALLED;
subfacet_destroy(subfacets[i]);
- run_fast_rl();
}
}
{
struct rule_dpif *rule = rule_dpif_cast(rule_);
- /* push_all_stats() can handle flow misses which, when using the learn
- * action, can cause rules to be added and deleted. This can corrupt our
- * caller's datastructures which assume that rule_get_stats() doesn't have
- * an impact on the flow table. To be safe, we disable miss handling. */
- push_all_stats__(false);
+ push_all_stats();
/* Start from historical data for 'rule' itself that are no longer tracked
* in facets. This counts, for example, facets that have expired. */
return netflow_set_options(ofproto->netflow, netflow_options);
} else if (ofproto->netflow) {
ofproto->backer->need_revalidate = REV_RECONFIGURE;
- netflow_destroy(ofproto->netflow);
+ netflow_unref(ofproto->netflow);
ofproto->netflow = NULL;
}
dpif_get_netflow_ids(ofproto->backer->dpif, engine_type, engine_id);
}
-
-static void
-send_active_timeout(struct ofproto_dpif *ofproto, struct facet *facet)
-{
- if (!facet_is_controller_flow(facet) &&
- netflow_active_timeout_expired(ofproto->netflow, &facet->nf_flow)) {
- struct subfacet *subfacet;
- struct ofexpired expired;
-
- LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
- if (subfacet->path == SF_FAST_PATH) {
- struct dpif_flow_stats stats;
-
- subfacet_install(subfacet, &facet->xout.odp_actions,
- &stats);
- subfacet_update_stats(subfacet, &stats);
- }
- }
-
- expired.flow = facet->flow;
- expired.packet_count = facet->packet_count;
- expired.byte_count = facet->byte_count;
- expired.used = facet->used;
- netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
- }
-}
-
-static void
-send_netflow_active_timeouts(struct ofproto_dpif *ofproto)
-{
- struct cls_cursor cursor;
- struct facet *facet;
-
- ovs_rwlock_rdlock(&ofproto->facets.rwlock);
- cls_cursor_init(&cursor, &ofproto->facets, NULL);
- CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
- send_active_timeout(ofproto, facet);
- }
- ovs_rwlock_unlock(&ofproto->facets.rwlock);
-}
\f
static struct ofproto_dpif *
ofproto_dpif_lookup(const char *name)
ds_put_char_multiple(result, '\t', level);
ds_put_format(result, "%s:", title);
for (i = 0; i < FLOW_N_REGS; i++) {
- ds_put_format(result, " reg%zu=0x%"PRIx32, i, trace->flow.regs[i]);
+ ds_put_format(result, " reg%"PRIuSIZE"=0x%"PRIx32, i, trace->flow.regs[i]);
}
ds_put_char(result, '\n');
}
}
if (xlate_receive(backer, NULL, odp_key.data, odp_key.size, flow,
- NULL, ofprotop, NULL)) {
+ NULL, ofprotop, NULL, NULL, NULL, NULL)) {
error = "Invalid datapath flow";
goto exit;
}
struct ds result;
ds_init(&result);
- ofproto_trace(ofproto, &flow, packet, &result);
+ ofproto_trace(ofproto, &flow, packet, NULL, 0, &result);
unixctl_command_reply(conn, ds_cstr(&result));
ds_destroy(&result);
ofpbuf_delete(packet);
}
}
+static void
+ofproto_unixctl_trace_actions(struct unixctl_conn *conn, int argc,
+ const char *argv[], void *aux OVS_UNUSED)
+{
+ enum ofputil_protocol usable_protocols;
+ struct ofproto_dpif *ofproto;
+ bool enforce_consistency;
+ struct ofpbuf ofpacts;
+ struct ofpbuf *packet;
+ struct ds result;
+ struct flow flow;
+ uint16_t in_port;
+
+ /* Three kinds of error return values! */
+ enum ofperr retval;
+ const char *error;
+ char *rw_error;
+
+ packet = NULL;
+ ds_init(&result);
+ ofpbuf_init(&ofpacts, 0);
+
+ /* Parse actions. */
+ rw_error = parse_ofpacts(argv[--argc], &ofpacts, &usable_protocols);
+ if (rw_error) {
+ unixctl_command_reply_error(conn, rw_error);
+ free(rw_error);
+ goto exit;
+ }
+
+ /* OpenFlow 1.1 and later suggest that the switch enforces certain forms of
+ * consistency between the flow and the actions. With -consistent, we
+ * enforce consistency even for a flow supported in OpenFlow 1.0. */
+ if (!strcmp(argv[1], "-consistent")) {
+ enforce_consistency = true;
+ argv++;
+ argc--;
+ } else {
+ enforce_consistency = false;
+ }
+
+ error = parse_flow_and_packet(argc, argv, &ofproto, &flow, &packet);
+ if (error) {
+ unixctl_command_reply_error(conn, error);
+ goto exit;
+ }
+
+ /* Do the same checks as handle_packet_out() in ofproto.c.
+ *
+ * We pass a 'table_id' of 0 to ofproto_check_ofpacts(), which isn't
+ * strictly correct because these actions aren't in any table, but it's OK
+ * because it 'table_id' is used only to check goto_table instructions, but
+ * packet-outs take a list of actions and therefore it can't include
+ * instructions.
+ *
+ * We skip the "meter" check here because meter is an instruction, not an
+ * action, and thus cannot appear in ofpacts. */
+ in_port = ofp_to_u16(flow.in_port.ofp_port);
+ if (in_port >= ofproto->up.max_ports && in_port < ofp_to_u16(OFPP_MAX)) {
+ unixctl_command_reply_error(conn, "invalid in_port");
+ goto exit;
+ }
+ if (enforce_consistency) {
+ retval = ofpacts_check_consistency(ofpacts.data, ofpacts.size, &flow,
+ u16_to_ofp(ofproto->up.max_ports),
+ 0, 0, usable_protocols);
+ } else {
+ retval = ofpacts_check(ofpacts.data, ofpacts.size, &flow,
+ u16_to_ofp(ofproto->up.max_ports), 0, 0,
+ &usable_protocols);
+ }
+
+ if (retval) {
+ ds_clear(&result);
+ ds_put_format(&result, "Bad actions: %s", ofperr_to_string(retval));
+ unixctl_command_reply_error(conn, ds_cstr(&result));
+ goto exit;
+ }
+
+ ofproto_trace(ofproto, &flow, packet, ofpacts.data, ofpacts.size, &result);
+ unixctl_command_reply(conn, ds_cstr(&result));
+
+exit:
+ ds_destroy(&result);
+ ofpbuf_delete(packet);
+ ofpbuf_uninit(&ofpacts);
+}
+
+/* Implements a "trace" through 'ofproto''s flow table, appending a textual
+ * description of the results to 'ds'.
+ *
+ * The trace follows a packet with the specified 'flow' through the flow
+ * table. 'packet' may be nonnull to trace an actual packet, with consequent
+ * side effects (if it is nonnull then its flow must be 'flow').
+ *
+ * If 'ofpacts' is nonnull then its 'ofpacts_len' bytes specify the actions to
+ * trace, otherwise the actions are determined by a flow table lookup. */
static void
ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow,
- const struct ofpbuf *packet, struct ds *ds)
+ const struct ofpbuf *packet,
+ const struct ofpact ofpacts[], size_t ofpacts_len,
+ struct ds *ds)
{
struct rule_dpif *rule;
struct flow_wildcards wc;
+ ds_put_format(ds, "Bridge: %s\n", ofproto->up.name);
ds_put_cstr(ds, "Flow: ");
flow_format(ds, flow);
ds_put_char(ds, '\n');
flow_wildcards_init_catchall(&wc);
- rule_dpif_lookup(ofproto, flow, &wc, &rule);
+ if (ofpacts) {
+ rule = NULL;
+ } else {
+ rule_dpif_lookup(ofproto, flow, &wc, &rule);
- trace_format_rule(ds, 0, rule);
- if (rule == ofproto->miss_rule) {
- ds_put_cstr(ds, "\nNo match, flow generates \"packet in\"s.\n");
- } else if (rule == ofproto->no_packet_in_rule) {
- ds_put_cstr(ds, "\nNo match, packets dropped because "
- "OFPPC_NO_PACKET_IN is set on in_port.\n");
- } else if (rule == ofproto->drop_frags_rule) {
- ds_put_cstr(ds, "\nPackets dropped because they are IP fragments "
- "and the fragment handling mode is \"drop\".\n");
+ trace_format_rule(ds, 0, rule);
+ if (rule == ofproto->miss_rule) {
+ ds_put_cstr(ds, "\nNo match, flow generates \"packet in\"s.\n");
+ } else if (rule == ofproto->no_packet_in_rule) {
+ ds_put_cstr(ds, "\nNo match, packets dropped because "
+ "OFPPC_NO_PACKET_IN is set on in_port.\n");
+ } else if (rule == ofproto->drop_frags_rule) {
+ ds_put_cstr(ds, "\nPackets dropped because they are IP fragments "
+ "and the fragment handling mode is \"drop\".\n");
+ }
}
- if (rule) {
+ if (rule || ofpacts) {
uint64_t odp_actions_stub[1024 / 8];
struct ofpbuf odp_actions;
struct trace_ctx trace;
ofpbuf_use_stub(&odp_actions,
odp_actions_stub, sizeof odp_actions_stub);
xlate_in_init(&trace.xin, ofproto, flow, rule, tcp_flags, packet);
+ if (ofpacts) {
+ trace.xin.ofpacts = ofpacts;
+ trace.xin.ofpacts_len = ofpacts_len;
+ }
trace.xin.resubmit_hook = trace_resubmit;
trace.xin.report_hook = trace_report;
ds_put_format(ds, "%s: hit:%"PRIu64" missed:%"PRIu64"\n",
dpif_name(backer->dpif), n_hit, n_missed);
- ds_put_format(ds, "\tflows: cur: %zu, avg: %u, max: %u,"
+ ds_put_format(ds, "\tflows: cur: %"PRIuSIZE", avg: %u, max: %u,"
" life span: %lldms\n", hmap_count(&backer->subfacets),
backer->avg_n_subfacet, backer->max_n_subfacet,
backer->avg_subfacet_life);
CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
cls_rule_format(&facet->cr, &ds);
ds_put_cstr(&ds, ", ");
- ds_put_format(&ds, "n_subfacets:%zu, ", list_size(&facet->subfacets));
+ ds_put_format(&ds, "n_subfacets:%"PRIuSIZE", ", list_size(&facet->subfacets));
ds_put_format(&ds, "used:%.3fs, ", (now - facet->used) / 1000.0);
ds_put_cstr(&ds, "Datapath actions: ");
if (facet->xout.slow) {
"ofproto/trace",
"{[dp_name] odp_flow | bridge br_flow} [-generate|packet]",
1, 3, ofproto_unixctl_trace, NULL);
+ unixctl_command_register(
+ "ofproto/trace-packet-out",
+ "[-consistent] {[dp_name] odp_flow | bridge br_flow} [-generate|packet] actions",
+ 2, 6, ofproto_unixctl_trace_actions, NULL);
unixctl_command_register("fdb/flush", "[bridge]", 0, 1,
ofproto_unixctl_fdb_flush, NULL);
unixctl_command_register("fdb/show", "bridge", 1, 1,
del,
port_open_type,
type_run,
- type_run_fast,
type_wait,
alloc,
construct,
destruct,
dealloc,
run,
- run_fast,
wait,
get_memory_usage,
flush,
get_stp_status,
set_stp_port,
get_stp_port_status,
+ get_stp_port_stats,
set_queues,
bundle_set,
bundle_remove,