static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes);
static void rule_invalidate(const struct rule_dpif *);
+static tag_type rule_calculate_tag(const struct flow *,
+ const struct minimask *, uint32_t secret);
struct ofbundle {
struct hmap_node hmap_node; /* In struct ofproto's "bundles" hmap. */
struct facet {
/* Owners. */
struct hmap_node hmap_node; /* In owning ofproto's 'facets' hmap. */
- struct list list_node; /* In owning rule's 'facets' list. */
- struct rule_dpif *rule; /* Owning rule. */
+ struct ofproto_dpif *ofproto;
/* Owned data. */
struct list subfacets;
uint8_t tcp_flags; /* TCP flags seen for this 'rule'. */
struct xlate_out xout;
+ bool fail_open; /* Facet matched the fail open rule. */
/* Storage for a single subfacet, to reduce malloc() time and space
* overhead. (A facet always has at least one subfacet and in the common
enum revalidate_reason {
REV_RECONFIGURE = 1, /* Switch configuration changed. */
REV_STP, /* Spanning tree protocol port status change. */
+ REV_BOND, /* Bonding changed. */
REV_PORT_TOGGLED, /* Port enabled or disabled by CFM, LACP, ...*/
REV_FLOW_TABLE, /* Flow table changed. */
+ REV_MAC_LEARNING, /* Mac learning changed. */
REV_INCONSISTENCY /* Facet self-check failed. */
};
COVERAGE_DEFINE(rev_reconfigure);
COVERAGE_DEFINE(rev_stp);
+COVERAGE_DEFINE(rev_bond);
COVERAGE_DEFINE(rev_port_toggled);
COVERAGE_DEFINE(rev_flow_table);
+COVERAGE_DEFINE(rev_mac_learning);
COVERAGE_DEFINE(rev_inconsistency);
/* Drop keys are odp flow keys which have drop flows installed in the kernel.
/* Type functions. */
+static void process_dpif_port_changes(struct dpif_backer *);
+static void process_dpif_all_ports_changed(struct dpif_backer *);
+static void process_dpif_port_change(struct dpif_backer *,
+ const char *devname);
+static void process_dpif_port_error(struct dpif_backer *, int error);
+
static struct ofproto_dpif *
lookup_ofproto_dpif_by_port_name(const char *name)
{
{
static long long int push_timer = LLONG_MIN;
struct dpif_backer *backer;
- char *devname;
- int error;
backer = shash_find_data(&all_dpif_backers, type);
if (!backer) {
* and the configuration has now changed to "false", enable receiving
* packets from the datapath. */
if (!backer->recv_set_enable && !ofproto_get_flow_restore_wait()) {
+ int error;
+
backer->recv_set_enable = true;
error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
switch (backer->need_revalidate) {
case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break;
case REV_STP: COVERAGE_INC(rev_stp); break;
+ case REV_BOND: COVERAGE_INC(rev_bond); break;
case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break;
case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break;
+ case REV_MAC_LEARNING: COVERAGE_INC(rev_mac_learning); break;
case REV_INCONSISTENCY: COVERAGE_INC(rev_inconsistency); break;
}
timer_set_duration(&backer->next_expiration, delay);
}
- /* Check for port changes in the dpif. */
- while ((error = dpif_port_poll(backer->dpif, &devname)) == 0) {
- struct ofproto_dpif *ofproto;
- struct dpif_port port;
-
- /* Don't report on the datapath's device. */
- if (!strcmp(devname, dpif_base_name(backer->dpif))) {
- goto next;
- }
-
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
- &all_ofproto_dpifs) {
- if (simap_contains(&ofproto->backer->tnl_backers, devname)) {
- goto next;
- }
- }
-
- ofproto = lookup_ofproto_dpif_by_port_name(devname);
- if (dpif_port_query_by_name(backer->dpif, devname, &port)) {
- /* The port was removed. If we know the datapath,
- * report it through poll_set(). If we don't, it may be
- * notifying us of a removal we initiated, so ignore it.
- * If there's a pending ENOBUFS, let it stand, since
- * everything will be reevaluated. */
- if (ofproto && ofproto->port_poll_errno != ENOBUFS) {
- sset_add(&ofproto->port_poll_set, devname);
- ofproto->port_poll_errno = 0;
- }
- } else if (!ofproto) {
- /* The port was added, but we don't know with which
- * ofproto we should associate it. Delete it. */
- dpif_port_del(backer->dpif, port.port_no);
- }
- dpif_port_destroy(&port);
-
- next:
- free(devname);
- }
-
- if (error != EAGAIN) {
- struct ofproto_dpif *ofproto;
-
- /* There was some sort of error, so propagate it to all
- * ofprotos that use this backer. */
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
- &all_ofproto_dpifs) {
- if (ofproto->backer == backer) {
- sset_clear(&ofproto->port_poll_set);
- ofproto->port_poll_errno = error;
- }
- }
- }
+ process_dpif_port_changes(backer);
if (backer->governor) {
size_t n_subfacets;
return 0;
}
+/* Check for and handle port changes in 'backer''s dpif. */
+static void
+process_dpif_port_changes(struct dpif_backer *backer)
+{
+ for (;;) {
+ char *devname;
+ int error;
+
+ error = dpif_port_poll(backer->dpif, &devname);
+ switch (error) {
+ case EAGAIN:
+ return;
+
+ case ENOBUFS:
+ process_dpif_all_ports_changed(backer);
+ break;
+
+ case 0:
+ process_dpif_port_change(backer, devname);
+ free(devname);
+ break;
+
+ default:
+ process_dpif_port_error(backer, error);
+ break;
+ }
+ }
+}
+
+static void
+process_dpif_all_ports_changed(struct dpif_backer *backer)
+{
+ struct ofproto_dpif *ofproto;
+ struct dpif_port dpif_port;
+ struct dpif_port_dump dump;
+ struct sset devnames;
+ const char *devname;
+
+ sset_init(&devnames);
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ if (ofproto->backer == backer) {
+ struct ofport *ofport;
+
+ HMAP_FOR_EACH (ofport, hmap_node, &ofproto->up.ports) {
+ sset_add(&devnames, netdev_get_name(ofport->netdev));
+ }
+ }
+ }
+ DPIF_PORT_FOR_EACH (&dpif_port, &dump, backer->dpif) {
+ sset_add(&devnames, dpif_port.name);
+ }
+
+ SSET_FOR_EACH (devname, &devnames) {
+ process_dpif_port_change(backer, devname);
+ }
+ sset_destroy(&devnames);
+}
+
+static void
+process_dpif_port_change(struct dpif_backer *backer, const char *devname)
+{
+ struct ofproto_dpif *ofproto;
+ struct dpif_port port;
+
+ /* Don't report on the datapath's device. */
+ if (!strcmp(devname, dpif_base_name(backer->dpif))) {
+ return;
+ }
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
+ &all_ofproto_dpifs) {
+ if (simap_contains(&ofproto->backer->tnl_backers, devname)) {
+ return;
+ }
+ }
+
+ ofproto = lookup_ofproto_dpif_by_port_name(devname);
+ if (dpif_port_query_by_name(backer->dpif, devname, &port)) {
+ /* The port was removed. If we know the datapath,
+ * report it through poll_set(). If we don't, it may be
+ * notifying us of a removal we initiated, so ignore it.
+ * If there's a pending ENOBUFS, let it stand, since
+ * everything will be reevaluated. */
+ if (ofproto && ofproto->port_poll_errno != ENOBUFS) {
+ sset_add(&ofproto->port_poll_set, devname);
+ ofproto->port_poll_errno = 0;
+ }
+ } else if (!ofproto) {
+ /* The port was added, but we don't know with which
+ * ofproto we should associate it. Delete it. */
+ dpif_port_del(backer->dpif, port.port_no);
+ } else {
+ struct ofport_dpif *ofport;
+
+ ofport = ofport_dpif_cast(shash_find_data(
+ &ofproto->up.port_by_name, devname));
+ if (ofport
+ && ofport->odp_port != port.port_no
+ && !odp_port_to_ofport(backer, port.port_no))
+ {
+ /* 'ofport''s datapath port number has changed from
+ * 'ofport->odp_port' to 'port.port_no'. Update our internal data
+ * structures to match. */
+ hmap_remove(&backer->odp_to_ofport_map, &ofport->odp_port_node);
+ ofport->odp_port = port.port_no;
+ hmap_insert(&backer->odp_to_ofport_map, &ofport->odp_port_node,
+ hash_odp_port(port.port_no));
+ backer->need_revalidate = REV_RECONFIGURE;
+ }
+ }
+ dpif_port_destroy(&port);
+}
+
+/* Propagate 'error' to all ofprotos based on 'backer'. */
+static void
+process_dpif_port_error(struct dpif_backer *backer, int error)
+{
+ struct ofproto_dpif *ofproto;
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ if (ofproto->backer == backer) {
+ sset_clear(&ofproto->port_poll_set);
+ ofproto->port_poll_errno = error;
+ }
+ }
+}
+
static int
dpif_backer_run_fast(struct dpif_backer *backer, int max_batch)
{
if (mbridge_need_revalidate(ofproto->mbridge)) {
ofproto->backer->need_revalidate = REV_RECONFIGURE;
- mac_learning_flush(ofproto->ml, NULL);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
+ mac_learning_flush(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
}
/* Do not perform any periodic activity below required by 'ofproto' while
}
stp_run(ofproto);
- mac_learning_run(ofproto->ml, &ofproto->backer->revalidate_set);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
+ if (mac_learning_run(ofproto->ml)) {
+ ofproto->backer->need_revalidate = REV_MAC_LEARNING;
+ }
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
/* Check the consistency of a random facet, to aid debugging. */
if (time_msec() >= ofproto->consistency_rl
if (ofproto->netflow) {
netflow_wait(ofproto->netflow);
}
+ ovs_rwlock_rdlock(&ofproto->ml->rwlock);
mac_learning_wait(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
stp_wait(ofproto);
if (ofproto->backer->need_revalidate) {
/* Shouldn't happen, but if it does just go around again. */
n_batch = 0;
HMAP_FOR_EACH_SAFE (subfacet, next_subfacet, hmap_node,
&ofproto->backer->subfacets) {
- if (ofproto_dpif_cast(subfacet->facet->rule->up.ofproto) != ofproto) {
+ if (subfacet->facet->ofproto != ofproto) {
continue;
}
set_stp_port(ofport, NULL);
}
- stp_destroy(ofproto->stp);
+ stp_unref(ofproto->stp);
ofproto->stp = NULL;
}
if (stp_learn_in_state(ofport->stp_state)
!= stp_learn_in_state(state)) {
/* xxx Learning action flows should also be flushed. */
- mac_learning_flush(ofproto->ml,
- &ofproto->backer->revalidate_set);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
+ mac_learning_flush(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
}
fwd_change = stp_forward_in_state(ofport->stp_state)
!= stp_forward_in_state(state);
}
if (stp_check_and_reset_fdb_flush(ofproto->stp)) {
- mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
+ mac_learning_flush(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
}
}
}
struct mac_entry *mac, *next_mac;
ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ ovs_rwlock_wrlock(&ml->rwlock);
LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
if (mac->port.p == bundle) {
if (all_ofprotos) {
if (o != ofproto) {
struct mac_entry *e;
- e = mac_learning_lookup(o->ml, mac->mac, mac->vlan,
- NULL);
+ ovs_rwlock_wrlock(&o->ml->rwlock);
+ e = mac_learning_lookup(o->ml, mac->mac, mac->vlan);
if (e) {
mac_learning_expire(o->ml, e);
}
+ ovs_rwlock_unlock(&o->ml->rwlock);
}
}
}
mac_learning_expire(ml, mac);
}
}
+ ovs_rwlock_unlock(&ml->rwlock);
}
static struct ofbundle *
struct mac_entry *e;
error = n_packets = n_errors = 0;
+ ovs_rwlock_rdlock(&ofproto->ml->rwlock);
LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
if (e->port.p != bundle) {
struct ofpbuf *learning_packet;
n_packets++;
}
}
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
if (n_errors) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
bond_slave_set_may_enable(bundle->bond, port, port->may_enable);
}
- bond_run(bundle->bond, &bundle->ofproto->backer->revalidate_set,
- lacp_status(bundle->lacp));
+ if (bond_run(bundle->bond, lacp_status(bundle->lacp))) {
+ bundle->ofproto->backer->need_revalidate = REV_BOND;
+ }
+
if (bond_should_send_learning_packets(bundle->bond)) {
bundle_send_learning_packets(bundle);
}
set_flood_vlans(struct ofproto *ofproto_, unsigned long *flood_vlans)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
if (mac_learning_set_flood_vlans(ofproto->ml, flood_vlans)) {
- mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
+ mac_learning_flush(ofproto->ml);
}
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
return 0;
}
size_t max_entries)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
mac_learning_set_idle_time(ofproto->ml, idle_time);
mac_learning_set_max_entries(ofproto->ml, max_entries);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
}
\f
/* Ports. */
/* Helper for handle_flow_miss_without_facet() and
* handle_flow_miss_with_facet(). */
static void
-handle_flow_miss_common(struct rule_dpif *rule,
- struct ofpbuf *packet, const struct flow *flow)
+handle_flow_miss_common(struct ofproto_dpif *ofproto, struct ofpbuf *packet,
+ const struct flow *flow, bool fail_open)
{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
-
- if (rule->up.cr.priority == FAIL_OPEN_PRIORITY) {
+ if (fail_open) {
/*
* Extra-special case for fail-open mode.
*
COVERAGE_INC(facet_suppress);
- handle_flow_miss_common(rule, packet, &miss->flow);
+ handle_flow_miss_common(miss->ofproto, packet, &miss->flow,
+ rule->up.cr.priority == FAIL_OPEN_PRIORITY);
if (xout->slow) {
struct xlate_in xin;
long long int now, struct dpif_flow_stats *stats,
struct flow_miss_op *ops, size_t *n_ops)
{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
enum subfacet_path want_path;
struct subfacet *subfacet;
struct ofpbuf *packet;
- subfacet = subfacet_create(facet, miss, now);
want_path = facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH;
- if (stats) {
- subfacet_update_stats(subfacet, stats);
- }
LIST_FOR_EACH (packet, list_node, &miss->packets) {
struct flow_miss_op *op = &ops[*n_ops];
- handle_flow_miss_common(facet->rule, packet, &miss->flow);
+ handle_flow_miss_common(miss->ofproto, packet, &miss->flow,
+ facet->fail_open);
if (want_path != SF_FAST_PATH) {
+ struct rule_dpif *rule;
struct xlate_in xin;
- xlate_in_init(&xin, ofproto, &miss->flow, facet->rule, 0, packet);
+ rule = rule_dpif_lookup(facet->ofproto, &facet->flow, NULL);
+ xlate_in_init(&xin, facet->ofproto, &miss->flow, rule, 0, packet);
xlate_actions_for_side_effects(&xin);
}
}
}
+ /* Don't install the flow if it's the result of the "userspace"
+ * action for an already installed facet. This can occur when a
+ * datapath flow with wildcards has a "userspace" action and flows
+ * sent to userspace result in a different subfacet, which will then
+ * be rejected as overlapping by the datapath. */
+ if (miss->upcall_type == DPIF_UC_ACTION
+ && !list_is_empty(&facet->subfacets)) {
+ if (stats) {
+ facet->used = MAX(facet->used, stats->used);
+ facet->packet_count += stats->n_packets;
+ facet->byte_count += stats->n_bytes;
+ facet->tcp_flags |= stats->tcp_flags;
+ }
+ return;
+ }
+
+ subfacet = subfacet_create(facet, miss, now);
+ if (stats) {
+ subfacet_update_stats(subfacet, stats);
+ }
+
if (miss->upcall_type == DPIF_UC_MISS || subfacet->path != want_path) {
struct flow_miss_op *op = &ops[(*n_ops)++];
struct dpif_flow_put *put = &op->dpif_op.u.flow_put;
op->xout_garbage = false;
op->dpif_op.type = DPIF_OP_FLOW_PUT;
op->subfacet = subfacet;
- put->flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
+ put->flags = DPIF_FP_CREATE;
put->key = miss->key;
put->key_len = miss->key_len;
put->mask = op->mask.data;
put->actions = facet->xout.odp_actions.data;
put->actions_len = facet->xout.odp_actions.size;
} else {
- compose_slow_path(ofproto, &miss->flow, facet->xout.slow,
+ compose_slow_path(facet->ofproto, &miss->flow, facet->xout.slow,
op->slow_stub, sizeof op->slow_stub,
&put->actions, &put->actions_len);
}
drop_key = drop_key_lookup(backer, upcall->key, upcall->key_len);
if (!drop_key) {
- drop_key = xmalloc(sizeof *drop_key);
- drop_key->key = xmemdup(upcall->key, upcall->key_len);
- drop_key->key_len = upcall->key_len;
-
- hmap_insert(&backer->drop_keys, &drop_key->hmap_node,
- hash_bytes(drop_key->key, drop_key->key_len, 0));
- dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY,
- drop_key->key, drop_key->key_len,
- NULL, 0, NULL, 0, NULL);
+ int ret;
+ ret = dpif_flow_put(backer->dpif,
+ DPIF_FP_CREATE | DPIF_FP_MODIFY,
+ upcall->key, upcall->key_len,
+ NULL, 0, NULL, 0, NULL);
+
+ if (!ret) {
+ drop_key = xmalloc(sizeof *drop_key);
+ drop_key->key = xmemdup(upcall->key, upcall->key_len);
+ drop_key->key_len = upcall->key_len;
+
+ hmap_insert(&backer->drop_keys, &drop_key->hmap_node,
+ hash_bytes(drop_key->key, drop_key->key_len, 0));
+ }
}
continue;
}
COVERAGE_INC(subfacet_install_fail);
+ /* Zero-out subfacet counters when installation failed, but
+ * datapath reported hits. This should not happen and
+ * indicates a bug, since if the datapath flow exists, we
+ * should not be attempting to create a new subfacet. A
+ * buggy datapath could trigger this, so just zero out the
+ * counters and log an error. */
+ if (subfacet->dp_packet_count || subfacet->dp_byte_count) {
+ VLOG_ERR_RL(&rl, "failed to install subfacet for which "
+ "datapath reported hits");
+ subfacet->dp_packet_count = subfacet->dp_byte_count = 0;
+ }
+
subfacet->path = SF_NOT_INSTALLED;
}
HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
if (bundle->bond) {
- bond_rebalance(bundle->bond, &backer->revalidate_set);
+ bond_rebalance(bundle->bond);
}
}
}
const struct dpif_flow_stats *stats)
{
struct facet *facet = subfacet->facet;
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
struct dpif_flow_stats diff;
diff.tcp_flags = stats->tcp_flags;
diff.n_bytes = 0;
}
- ofproto->n_hit += diff.n_packets;
+ facet->ofproto->n_hit += diff.n_packets;
subfacet->dp_packet_count = stats->n_packets;
subfacet->dp_byte_count = stats->n_bytes;
subfacet_update_stats(subfacet, &diff);
static void
rule_expire(struct rule_dpif *rule)
{
- struct facet *facet, *next_facet;
long long int now;
uint8_t reason;
COVERAGE_INC(ofproto_dpif_expired);
- /* Update stats. (This is a no-op if the rule expired due to an idle
- * timeout, because that only happens when the rule has no facets left.) */
- LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
- facet_remove(facet);
- }
-
/* Get rid of the rule. */
ofproto_rule_expire(&rule->up, reason);
}
struct match match;
facet = xzalloc(sizeof *facet);
+ facet->ofproto = miss->ofproto;
facet->packet_count = facet->prev_packet_count = stats->n_packets;
facet->byte_count = facet->prev_byte_count = stats->n_bytes;
facet->tcp_flags = stats->tcp_flags;
facet->used = stats->used;
facet->flow = miss->flow;
facet->learn_rl = time_msec() + 500;
- facet->rule = rule;
- list_push_back(&facet->rule->facets, &facet->list_node);
list_init(&facet->subfacets);
netflow_flow_init(&facet->nf_flow);
netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
classifier_insert(&ofproto->facets, &facet->cr);
facet->nf_flow.output_iface = facet->xout.nf_output_iface;
+ facet->fail_open = rule->up.cr.priority == FAIL_OPEN_PRIORITY;
return facet;
}
static void
facet_remove(struct facet *facet)
{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
struct subfacet *subfacet, *next_subfacet;
ovs_assert(!list_is_empty(&facet->subfacets));
&facet->subfacets) {
subfacet_destroy__(subfacet);
}
- classifier_remove(&ofproto->facets, &facet->cr);
+ classifier_remove(&facet->ofproto->facets, &facet->cr);
cls_rule_destroy(&facet->cr);
- list_remove(&facet->list_node);
facet_free(facet);
}
static void
facet_account(struct facet *facet)
{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
const struct nlattr *a;
unsigned int left;
ovs_be16 vlan_tci;
uint64_t n_bytes;
- if (!facet->xout.has_normal || !ofproto->has_bonded_bundles) {
+ if (!facet->xout.has_normal || !facet->ofproto->has_bonded_bundles) {
return;
}
n_bytes = facet->byte_count - facet->accounted_bytes;
switch (nl_attr_type(a)) {
case OVS_ACTION_ATTR_OUTPUT:
- port = get_odp_port(ofproto, nl_attr_get_odp_port(a));
+ port = get_odp_port(facet->ofproto, nl_attr_get_odp_port(a));
if (port && port->bundle && port->bundle->bond) {
bond_account(port->bundle->bond, &facet->flow,
vlan_tci_to_vid(vlan_tci), n_bytes);
facet_is_controller_flow(struct facet *facet)
{
if (facet) {
- const struct rule *rule = &facet->rule->up;
- const struct ofpact *ofpacts = rule->ofpacts;
- size_t ofpacts_len = rule->ofpacts_len;
+ struct ofproto_dpif *ofproto = facet->ofproto;
+ const struct rule_dpif *rule = rule_dpif_lookup(ofproto, &facet->flow,
+ NULL);
+ const struct ofpact *ofpacts = rule->up.ofpacts;
+ size_t ofpacts_len = rule->up.ofpacts_len;
if (ofpacts_len > 0 &&
ofpacts->type == OFPACT_CONTROLLER &&
static void
facet_flush_stats(struct facet *facet)
{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
+ struct ofproto_dpif *ofproto = facet->ofproto;
struct subfacet *subfacet;
LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 15);
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
-
struct xlate_out xout;
struct xlate_in xin;
struct rule_dpif *rule;
- bool ok;
-
- /* Check the rule for consistency. */
- rule = rule_dpif_lookup(ofproto, &facet->flow, NULL);
- if (rule != facet->rule) {
- if (!VLOG_DROP_WARN(&rl)) {
- struct ds s = DS_EMPTY_INITIALIZER;
-
- flow_format(&s, &facet->flow);
- ds_put_format(&s, ": facet associated with wrong rule (was "
- "table=%"PRIu8",", facet->rule->up.table_id);
- cls_rule_format(&facet->rule->up.cr, &s);
- ds_put_format(&s, ") (should have been table=%"PRIu8",",
- rule->up.table_id);
- cls_rule_format(&rule->up.cr, &s);
- ds_put_char(&s, ')');
-
- VLOG_WARN("%s", ds_cstr(&s));
- ds_destroy(&s);
- }
- return false;
- }
+ bool ok, fail_open;
/* Check the datapath actions for consistency. */
- xlate_in_init(&xin, ofproto, &facet->flow, rule, 0, NULL);
+ rule = rule_dpif_lookup(facet->ofproto, &facet->flow, NULL);
+ xlate_in_init(&xin, facet->ofproto, &facet->flow, rule, 0, NULL);
xlate_actions(&xin, &xout);
+ fail_open = rule->up.cr.priority == FAIL_OPEN_PRIORITY;
ok = ofpbuf_equal(&facet->xout.odp_actions, &xout.odp_actions)
- && facet->xout.slow == xout.slow;
+ && facet->xout.slow == xout.slow
+ && facet->fail_open == fail_open;
if (!ok && !VLOG_DROP_WARN(&rl)) {
struct ds s = DS_EMPTY_INITIALIZER;
ds_put_format(&s, " slow path incorrect. should be %d", xout.slow);
}
- VLOG_WARN("%s", ds_cstr(&s));
+ if (facet->fail_open != fail_open) {
+ ds_put_format(&s, " fail open incorrect. should be %s",
+ fail_open ? "true" : "false");
+ }
ds_destroy(&s);
}
xlate_out_uninit(&xout);
static bool
facet_revalidate(struct facet *facet)
{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
+ struct ofproto_dpif *ofproto = facet->ofproto;
struct rule_dpif *new_rule;
struct subfacet *subfacet;
struct flow_wildcards wc;
facet->xout.nf_output_iface = xout.nf_output_iface;
facet->xout.mirrors = xout.mirrors;
facet->nf_flow.output_iface = facet->xout.nf_output_iface;
-
- if (facet->rule != new_rule) {
- COVERAGE_INC(facet_changed_rule);
- list_remove(&facet->list_node);
- list_push_back(&new_rule->facets, &facet->list_node);
- facet->rule = new_rule;
- facet->used = new_rule->up.created;
- facet->prev_used = facet->used;
- }
+ facet->used = MAX(facet->used, new_rule->up.created);
+ facet->fail_open = new_rule->up.cr.priority == FAIL_OPEN_PRIORITY;
xlate_out_uninit(&xout);
return true;
stats.tcp_flags = facet->tcp_flags;
if (may_learn || stats.n_packets || facet->used > facet->prev_used) {
- struct ofproto_dpif *ofproto =
- ofproto_dpif_cast(facet->rule->up.ofproto);
-
+ struct ofproto_dpif *ofproto = facet->ofproto;
struct ofport_dpif *in_port;
+ struct rule_dpif *rule;
struct xlate_in xin;
facet->prev_packet_count = facet->packet_count;
netdev_vport_inc_rx(in_port->up.netdev, &stats);
}
- rule_credit_stats(facet->rule, &stats);
+ rule = rule_dpif_lookup(ofproto, &facet->flow, NULL);
+ rule_credit_stats(rule, &stats);
netflow_flow_update_time(ofproto->netflow, &facet->nf_flow,
facet->used);
netflow_flow_update_flags(&facet->nf_flow, facet->tcp_flags);
mirror_update_stats(ofproto->mbridge, facet->xout.mirrors,
stats.n_packets, stats.n_bytes);
- xlate_in_init(&xin, ofproto, &facet->flow, facet->rule,
- stats.tcp_flags, NULL);
+ xlate_in_init(&xin, ofproto, &facet->flow, rule, stats.tcp_flags,
+ NULL);
xin.resubmit_stats = &stats;
xin.may_learn = may_learn;
xlate_actions_for_side_effects(&xin);
subfacet_destroy__(struct subfacet *subfacet)
{
struct facet *facet = subfacet->facet;
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
+ struct ofproto_dpif *ofproto = facet->ofproto;
/* Update ofproto stats before uninstall the subfacet. */
ofproto->backer->subfacet_del_count++;
struct dpif_flow_stats *stats)
{
struct facet *facet = subfacet->facet;
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
enum subfacet_path path = facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH;
const struct nlattr *actions = odp_actions->data;
size_t actions_len = odp_actions->size;
enum dpif_flow_put_flags flags;
int ret;
- flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
+ flags = subfacet->path == SF_NOT_INSTALLED ? DPIF_FP_CREATE
+ : DPIF_FP_MODIFY;
if (stats) {
flags |= DPIF_FP_ZERO_STATS;
}
if (path == SF_SLOW_PATH) {
- compose_slow_path(ofproto, &facet->flow, facet->xout.slow,
+ compose_slow_path(facet->ofproto, &facet->flow, facet->xout.slow,
slow_path_stub, sizeof slow_path_stub,
&actions, &actions_len);
}
subfacet_uninstall(struct subfacet *subfacet)
{
if (subfacet->path != SF_NOT_INSTALLED) {
- struct rule_dpif *rule = subfacet->facet->rule;
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
+ struct ofproto_dpif *ofproto = subfacet->facet->ofproto;
struct dpif_flow_stats stats;
int error;
rule->packet_count = 0;
rule->byte_count = 0;
- victim = rule_dpif_cast(ofoperation_get_victim(rule->up.pending));
- if (victim && !list_is_empty(&victim->facets)) {
- struct facet *facet;
-
- rule->facets = victim->facets;
- list_moved(&rule->facets);
- LIST_FOR_EACH (facet, list_node, &rule->facets) {
- /* XXX: We're only clearing our local counters here. It's possible
- * that quite a few packets are unaccounted for in the datapath
- * statistics. These will be accounted to the new rule instead of
- * cleared as required. This could be fixed by clearing out the
- * datapath statistics for this facet, but currently it doesn't
- * seem worth it. */
- facet_reset_counters(facet);
- facet->rule = rule;
- }
- } else {
- /* Must avoid list_moved() in this case. */
- list_init(&rule->facets);
- }
-
table_id = rule->up.table_id;
+ victim = rule_dpif_cast(ofoperation_get_victim(rule->up.pending));
if (victim) {
rule->tag = victim->tag;
} else if (table_id == 0) {
}
static void
-rule_destruct(struct rule *rule_)
+rule_destruct(struct rule *rule)
{
- struct rule_dpif *rule = rule_dpif_cast(rule_);
- struct facet *facet, *next_facet;
-
- LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
- facet_revalidate(facet);
- }
-
- complete_operation(rule);
+ complete_operation(rule_dpif_cast(rule));
}
static void
/* Calculates the tag to use for 'flow' and mask 'mask' when it is inserted
* into an OpenFlow table with the given 'basis'. */
-tag_type
+static tag_type
rule_calculate_tag(const struct flow *flow, const struct minimask *mask,
uint32_t secret)
{
unixctl_command_reply_error(conn, "no such bridge");
return;
}
- mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
+ mac_learning_flush(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
} else {
HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
- mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
+ mac_learning_flush(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
}
}
}
ds_put_cstr(&ds, " port VLAN MAC Age\n");
+ ovs_rwlock_rdlock(&ofproto->ml->rwlock);
LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
struct ofbundle *bundle = e->port.p;
char name[OFP_MAX_PORT_NAME_LEN];
name, e->vlan, ETH_ADDR_ARGS(e->mac),
mac_entry_age(ofproto->ml, e));
}
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
unixctl_command_reply(conn, ds_cstr(&ds));
ds_destroy(&ds);
}
HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->backer->subfacets) {
struct facet *facet = subfacet->facet;
+ struct odputil_keybuf maskbuf;
+ struct ofpbuf mask;
- if (ofproto_dpif_cast(facet->rule->up.ofproto) != ofproto) {
+ if (facet->ofproto != ofproto) {
continue;
}
- odp_flow_key_format(subfacet->key, subfacet->key_len, &ds);
+ ofpbuf_use_stack(&mask, &maskbuf, sizeof maskbuf);
+ if (enable_megaflows) {
+ odp_flow_key_from_mask(&mask, &facet->xout.wc.masks,
+ &facet->flow, UINT32_MAX);
+ }
+
+ odp_flow_format(subfacet->key, subfacet->key_len,
+ mask.data, mask.size, &ds);
ds_put_format(&ds, ", packets:%"PRIu64", bytes:%"PRIu64", used:",
subfacet->dp_packet_count, subfacet->dp_byte_count);