long long int prev_used; /* Used time from last stats push. */
/* Accounting. */
- uint64_t accounted_bytes; /* Bytes processed by facet_account(). */
- struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
uint16_t tcp_flags; /* TCP flags seen for this 'rule'. */
struct xlate_out xout;
struct dpif_flow_stats *, bool may_learn);
static void facet_push_stats(struct facet *, bool may_learn);
static void facet_learn(struct facet *);
-static void facet_account(struct facet *);
static void push_all_stats(void);
static bool facet_is_controller_flow(struct facet *);
static int set_bfd(struct ofport *, const struct smap *);
static int set_cfm(struct ofport *, const struct cfm_settings *);
static void ofport_update_peer(struct ofport_dpif *);
-static void run_fast_rl(void);
-static int run_fast(struct ofproto *);
struct dpif_completion {
struct list list_node;
struct hmap bundles; /* Contains "struct ofbundle"s. */
struct mac_learning *ml;
bool has_bonded_bundles;
+ bool lacp_enabled;
struct mbridge *mbridge;
/* Facets. */
/* Flow expiration. */
static int expire(struct dpif_backer *);
-/* NetFlow. */
-static void send_netflow_active_timeouts(struct ofproto_dpif *);
-
/* Global variables. */
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
dpif_run(backer->dpif);
+ handle_upcalls(backer);
+
/* The most natural place to push facet statistics is when they're pulled
* from the datapath. However, when there are many flows in the datapath,
* this expensive operation can occur so frequently, that it reduces our
ofproto->no_packet_in_rule, ofproto->ml,
ofproto->stp, ofproto->mbridge,
ofproto->sflow, ofproto->ipfix,
- ofproto->up.frag_handling,
+ ofproto->netflow, ofproto->up.frag_handling,
ofproto->up.forward_bpdu,
- connmgr_has_in_band(ofproto->up.connmgr),
- ofproto->netflow != NULL);
+ connmgr_has_in_band(ofproto->up.connmgr));
HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
xlate_bundle_set(ofproto, bundle, bundle->name,
ovs_rwlock_unlock(&ofproto->facets.rwlock);
CLS_CURSOR_FOR_EACH_SAFE (facet, next, cr, &cursor) {
facet_revalidate(facet);
- run_fast_rl();
}
}
}
}
-static int
-dpif_backer_run_fast(struct dpif_backer *backer)
-{
- handle_upcalls(backer);
-
- return 0;
-}
-
-static int
-type_run_fast(const char *type)
-{
- struct dpif_backer *backer;
-
- backer = shash_find_data(&all_dpif_backers, type);
- if (!backer) {
- /* This is not necessarily a problem, since backers are only
- * created on demand. */
- return 0;
- }
-
- return dpif_backer_run_fast(backer);
-}
-
-static void
-run_fast_rl(void)
-{
- static long long int port_rl = LLONG_MIN;
-
- if (time_msec() >= port_rl) {
- struct ofproto_dpif *ofproto;
-
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
- run_fast(&ofproto->up);
- }
- port_rl = time_msec() + 200;
- }
-}
-
static void
type_wait(const char *type)
{
ofproto->ml = mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME);
ofproto->mbridge = mbridge_create();
ofproto->has_bonded_bundles = false;
+ ofproto->lacp_enabled = false;
ovs_mutex_init(&ofproto->stats_mutex);
ovs_mutex_init(&ofproto->vsp_mutex);
mbridge_unref(ofproto->mbridge);
- netflow_destroy(ofproto->netflow);
+ netflow_unref(ofproto->netflow);
dpif_sflow_unref(ofproto->sflow);
hmap_destroy(&ofproto->bundles);
mac_learning_unref(ofproto->ml);
close_dpif_backer(ofproto->backer);
}
-static int
-run_fast(struct ofproto *ofproto_)
-{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct ofproto_packet_in *pin, *next_pin;
- struct list pins;
-
- /* Do not perform any periodic activity required by 'ofproto' while
- * waiting for flow restore to complete. */
- if (ofproto_get_flow_restore_wait()) {
- return 0;
- }
-
- guarded_list_pop_all(&ofproto->pins, &pins);
- LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) {
- connmgr_send_packet_in(ofproto->up.connmgr, pin);
- list_remove(&pin->list_node);
- free(CONST_CAST(void *, pin->up.packet));
- free(pin);
- }
-
- return 0;
-}
-
static int
run(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct ofbundle *bundle;
uint64_t new_seq;
- int error;
if (mbridge_need_revalidate(ofproto->mbridge)) {
ofproto->backer->need_revalidate = REV_RECONFIGURE;
ovs_rwlock_unlock(&ofproto->ml->rwlock);
}
- /* Do not perform any periodic activity below required by 'ofproto' while
+ /* Do not perform any periodic activity required by 'ofproto' while
* waiting for flow restore to complete. */
- if (ofproto_get_flow_restore_wait()) {
- return 0;
- }
+ if (!ofproto_get_flow_restore_wait()) {
+ struct ofproto_packet_in *pin, *next_pin;
+ struct list pins;
- error = run_fast(ofproto_);
- if (error) {
- return error;
+ guarded_list_pop_all(&ofproto->pins, &pins);
+ LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) {
+ connmgr_send_packet_in(ofproto->up.connmgr, pin);
+ list_remove(&pin->list_node);
+ free(CONST_CAST(void *, pin->up.packet));
+ free(pin);
+ }
}
if (ofproto->netflow) {
- if (netflow_run(ofproto->netflow)) {
- send_netflow_active_timeouts(ofproto);
- }
+ netflow_run(ofproto->netflow);
}
if (ofproto->sflow) {
dpif_sflow_run(ofproto->sflow);
ofproto->change_seq = new_seq;
}
- HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
- bundle_run(bundle);
+ if (ofproto->lacp_enabled || ofproto->has_bonded_bundles) {
+ struct ofbundle *bundle;
+
+ HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
+ bundle_run(bundle);
+ }
}
stp_run(ofproto);
wait(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct ofbundle *bundle;
if (ofproto_get_flow_restore_wait()) {
return;
if (ofproto->ipfix) {
dpif_ipfix_wait(ofproto->ipfix);
}
- HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
- bundle_wait(bundle);
+ if (ofproto->lacp_enabled || ofproto->has_bonded_bundles) {
+ struct ofbundle *bundle;
+
+ HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
+ bundle_wait(bundle);
+ }
}
if (ofproto->netflow) {
netflow_wait(ofproto->netflow);
/* LACP. */
if (s->lacp) {
+ ofproto->lacp_enabled = true;
if (!bundle->lacp) {
ofproto->backer->need_revalidate = REV_RECONFIGURE;
bundle->lacp = lacp_create();
return ofport ? ofport_dpif_cast(ofport) : NULL;
}
-static struct ofport_dpif *
-get_odp_port(const struct ofproto_dpif *ofproto, odp_port_t odp_port)
-{
- struct ofport_dpif *port = odp_port_to_ofport(ofproto->backer, odp_port);
- return port && &ofproto->up == port->up.ofproto ? port : NULL;
-}
-
static void
ofproto_port_from_dpif_port(struct ofproto_dpif *ofproto,
struct ofproto_port *ofproto_port,
subfacet->dp_byte_count = stats->n_bytes;
subfacet_update_stats(subfacet, &diff);
- if (facet->accounted_bytes < facet->byte_count) {
+ if (diff.n_packets) {
facet_learn(facet);
- facet_account(facet);
- facet->accounted_bytes = facet->byte_count;
}
}
delete_unexpected_flow(backer, key, key_len);
break;
}
- run_fast_rl();
}
dpif_flow_dump_done(&dump);
}
facet->learn_rl = time_msec() + 500;
list_init(&facet->subfacets);
- netflow_flow_init(&facet->nf_flow);
- netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
xlate_out_copy(&facet->xout, &miss->xout);
classifier_insert(&ofproto->facets, &facet->cr);
ovs_rwlock_unlock(&ofproto->facets.rwlock);
- facet->nf_flow.output_iface = facet->xout.nf_output_iface;
return facet;
}
facet_push_stats(facet, true);
}
-static void
-facet_account(struct facet *facet)
-{
- const struct nlattr *a;
- unsigned int left;
- ovs_be16 vlan_tci;
- uint64_t n_bytes;
-
- if (!facet->xout.has_normal || !facet->ofproto->has_bonded_bundles) {
- return;
- }
- n_bytes = facet->byte_count - facet->accounted_bytes;
-
- /* This loop feeds byte counters to bond_account() for rebalancing to use
- * as a basis. We also need to track the actual VLAN on which the packet
- * is going to be sent to ensure that it matches the one passed to
- * bond_choose_output_slave(). (Otherwise, we will account to the wrong
- * hash bucket.)
- *
- * We use the actions from an arbitrary subfacet because they should all
- * be equally valid for our purpose. */
- vlan_tci = facet->flow.vlan_tci;
- NL_ATTR_FOR_EACH_UNSAFE (a, left, facet->xout.odp_actions.data,
- facet->xout.odp_actions.size) {
- const struct ovs_action_push_vlan *vlan;
- struct ofport_dpif *port;
-
- switch (nl_attr_type(a)) {
- case OVS_ACTION_ATTR_OUTPUT:
- port = get_odp_port(facet->ofproto, nl_attr_get_odp_port(a));
- if (port && port->bundle && port->bundle->bond) {
- bond_account(port->bundle->bond, &facet->flow,
- vlan_tci_to_vid(vlan_tci), n_bytes);
- }
- break;
-
- case OVS_ACTION_ATTR_POP_VLAN:
- vlan_tci = htons(0);
- break;
-
- case OVS_ACTION_ATTR_PUSH_VLAN:
- vlan = nl_attr_get(a);
- vlan_tci = vlan->vlan_tci;
- break;
- }
- }
-}
-
/* Returns true if the only action for 'facet' is to send to the controller.
* (We don't report NetFlow expiration messages for such facets because they
* are just part of the control logic for the network, not real traffic). */
}
facet_push_stats(facet, false);
- if (facet->accounted_bytes < facet->byte_count) {
- facet_account(facet);
- facet->accounted_bytes = facet->byte_count;
- }
if (ofproto->netflow && !facet_is_controller_flow(facet)) {
- struct ofexpired expired;
- expired.flow = facet->flow;
- expired.packet_count = facet->packet_count;
- expired.byte_count = facet->byte_count;
- expired.used = facet->used;
- netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
+ netflow_expire(ofproto->netflow, &facet->flow);
+ netflow_flow_clear(ofproto->netflow, &facet->flow);
}
/* Reset counters to prevent double counting if 'facet' ever gets
* reinstalled. */
facet_reset_counters(facet);
-
- netflow_flow_clear(&facet->nf_flow);
facet->tcp_flags = 0;
}
error = xlate_receive(ofproto->backer, NULL, subfacet->key,
subfacet->key_len, &recv_flow, NULL,
- &recv_ofproto, NULL);
+ &recv_ofproto, NULL, NULL, NULL, NULL);
if (error
|| recv_ofproto != ofproto
|| facet != facet_find(ofproto, &recv_flow)) {
facet->xout.has_fin_timeout = xout.has_fin_timeout;
facet->xout.nf_output_iface = xout.nf_output_iface;
facet->xout.mirrors = xout.mirrors;
- facet->nf_flow.output_iface = facet->xout.nf_output_iface;
ovs_mutex_lock(&new_rule->up.mutex);
facet->used = MAX(facet->used, new_rule->up.created);
facet->byte_count = 0;
facet->prev_packet_count = 0;
facet->prev_byte_count = 0;
- facet->accounted_bytes = 0;
}
static void
flow_push_stats(struct ofproto_dpif *ofproto, struct flow *flow,
struct dpif_flow_stats *stats, bool may_learn)
{
- struct ofport_dpif *in_port;
struct xlate_in xin;
- in_port = get_ofp_port(ofproto, flow->in_port.ofp_port);
- if (in_port && in_port->is_tunnel) {
- netdev_vport_inc_rx(in_port->up.netdev, stats);
- if (in_port->bfd) {
- bfd_account_rx(in_port->bfd, stats);
- }
- }
-
xlate_in_init(&xin, ofproto, flow, NULL, stats->tcp_flags, NULL);
xin.resubmit_stats = stats;
xin.may_learn = may_learn;
facet->prev_packet_count = facet->packet_count;
facet->prev_byte_count = facet->byte_count;
facet->prev_used = facet->used;
-
- netflow_flow_update_time(facet->ofproto->netflow, &facet->nf_flow,
- facet->used);
- netflow_flow_update_flags(&facet->nf_flow, facet->tcp_flags);
- mirror_update_stats(facet->ofproto->mbridge, facet->xout.mirrors,
- stats.n_packets, stats.n_bytes);
flow_push_stats(facet->ofproto, &facet->flow, &stats, may_learn);
}
}
static void
-push_all_stats__(bool run_fast)
+push_all_stats(void)
{
static long long int rl = LLONG_MIN;
struct ofproto_dpif *ofproto;
cls_cursor_init(&cursor, &ofproto->facets, NULL);
CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
facet_push_stats(facet, false);
- if (run_fast) {
- run_fast_rl();
- }
}
ovs_rwlock_unlock(&ofproto->facets.rwlock);
}
rl = time_msec() + 100;
}
-static void
-push_all_stats(void)
-{
- push_all_stats__(true);
-}
-
void
rule_dpif_credit_stats(struct rule_dpif *rule,
const struct dpif_flow_stats *stats)
subfacet_reset_dp_stats(subfacets[i], &stats[i]);
subfacets[i]->path = SF_NOT_INSTALLED;
subfacet_destroy(subfacets[i]);
- run_fast_rl();
}
}
{
struct rule_dpif *rule = rule_dpif_cast(rule_);
- /* push_all_stats() can handle flow misses which, when using the learn
- * action, can cause rules to be added and deleted. This can corrupt our
- * caller's datastructures which assume that rule_get_stats() doesn't have
- * an impact on the flow table. To be safe, we disable miss handling. */
- push_all_stats__(false);
+ push_all_stats();
/* Start from historical data for 'rule' itself that are no longer tracked
* in facets. This counts, for example, facets that have expired. */
return netflow_set_options(ofproto->netflow, netflow_options);
} else if (ofproto->netflow) {
ofproto->backer->need_revalidate = REV_RECONFIGURE;
- netflow_destroy(ofproto->netflow);
+ netflow_unref(ofproto->netflow);
ofproto->netflow = NULL;
}
dpif_get_netflow_ids(ofproto->backer->dpif, engine_type, engine_id);
}
-
-static void
-send_active_timeout(struct ofproto_dpif *ofproto, struct facet *facet)
-{
- if (!facet_is_controller_flow(facet) &&
- netflow_active_timeout_expired(ofproto->netflow, &facet->nf_flow)) {
- struct subfacet *subfacet;
- struct ofexpired expired;
-
- LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
- if (subfacet->path == SF_FAST_PATH) {
- struct dpif_flow_stats stats;
-
- subfacet_install(subfacet, &facet->xout.odp_actions,
- &stats);
- subfacet_update_stats(subfacet, &stats);
- }
- }
-
- expired.flow = facet->flow;
- expired.packet_count = facet->packet_count;
- expired.byte_count = facet->byte_count;
- expired.used = facet->used;
- netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
- }
-}
-
-static void
-send_netflow_active_timeouts(struct ofproto_dpif *ofproto)
-{
- struct cls_cursor cursor;
- struct facet *facet;
-
- ovs_rwlock_rdlock(&ofproto->facets.rwlock);
- cls_cursor_init(&cursor, &ofproto->facets, NULL);
- CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
- send_active_timeout(ofproto, facet);
- }
- ovs_rwlock_unlock(&ofproto->facets.rwlock);
-}
\f
static struct ofproto_dpif *
ofproto_dpif_lookup(const char *name)
}
if (xlate_receive(backer, NULL, odp_key.data, odp_key.size, flow,
- NULL, ofprotop, NULL)) {
+ NULL, ofprotop, NULL, NULL, NULL, NULL)) {
error = "Invalid datapath flow";
goto exit;
}
del,
port_open_type,
type_run,
- type_run_fast,
type_wait,
alloc,
construct,
destruct,
dealloc,
run,
- run_fast,
wait,
get_memory_usage,
flush,