struct flow_wildcards *wc);
static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes);
-static void rule_invalidate(const struct rule_dpif *);
struct ofbundle {
struct hmap_node hmap_node; /* In struct ofproto's "bundles" hmap. */
struct list bundle_node; /* In struct ofbundle's "ports" list. */
struct cfm *cfm; /* Connectivity Fault Management, if any. */
struct bfd *bfd; /* BFD, if any. */
- tag_type tag; /* Tag associated with this port. */
bool may_enable; /* May be enabled in bonds. */
bool is_tunnel; /* This port is a tunnel. */
long long int carrier_seq; /* Carrier status changes. */
enum stp_state stp_state; /* Always STP_DISABLED if STP not in use. */
long long int stp_state_entered;
- struct hmap priorities; /* Map of attached 'priority_to_dscp's. */
+ /* Queue to DSCP mapping. */
+ struct ofproto_port_queue *qdscp;
+ size_t n_qdscp;
/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
*
int vlandev_vid;
};
-/* Node in 'ofport_dpif''s 'priorities' map. Used to maintain a map from
- * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
- * traffic egressing the 'ofport' with that priority should be marked with. */
-struct priority_to_dscp {
- struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'priorities' map. */
- uint32_t priority; /* Priority of this queue (see struct flow). */
-
- uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
-};
-
/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
*
* This is deprecated. It is only for compatibility with broken device drivers
static void port_wait(struct ofport_dpif *);
static int set_bfd(struct ofport *, const struct smap *);
static int set_cfm(struct ofport *, const struct cfm_settings *);
-static void ofport_clear_priorities(struct ofport_dpif *);
static void ofport_update_peer(struct ofport_dpif *);
static void run_fast_rl(void);
struct ofoperation *op;
};
-/* Extra information about a classifier table.
- * Currently used just for optimized flow revalidation. */
-struct table_dpif {
- /* If either of these is nonnull, then this table has a form that allows
- * flows to be tagged to avoid revalidating most flows for the most common
- * kinds of flow table changes. */
- struct cls_table *catchall_table; /* Table that wildcards all fields. */
- struct cls_table *other_table; /* Table with any other wildcard set. */
- uint32_t basis; /* Keeps each table's tags separate. */
-};
-
/* Reasons that we might need to revalidate every facet, and corresponding
* coverage counters.
*
enum revalidate_reason {
REV_RECONFIGURE = 1, /* Switch configuration changed. */
REV_STP, /* Spanning tree protocol port status change. */
+ REV_BOND, /* Bonding changed. */
REV_PORT_TOGGLED, /* Port enabled or disabled by CFM, LACP, ...*/
REV_FLOW_TABLE, /* Flow table changed. */
+ REV_MAC_LEARNING, /* Mac learning changed. */
REV_INCONSISTENCY /* Facet self-check failed. */
};
COVERAGE_DEFINE(rev_reconfigure);
COVERAGE_DEFINE(rev_stp);
+COVERAGE_DEFINE(rev_bond);
COVERAGE_DEFINE(rev_port_toggled);
COVERAGE_DEFINE(rev_flow_table);
+COVERAGE_DEFINE(rev_mac_learning);
COVERAGE_DEFINE(rev_inconsistency);
/* Drop keys are odp flow keys which have drop flows installed in the kernel.
/* Facet revalidation flags applying to facets which use this backer. */
enum revalidate_reason need_revalidate; /* Revalidate every facet. */
- struct tag_set revalidate_set; /* Revalidate only matching facets. */
struct hmap drop_keys; /* Set of dropped odp keys. */
bool recv_set_enable; /* Enables or disables receiving packets. */
struct classifier facets; /* Contains 'struct facet's. */
long long int consistency_rl;
- /* Revalidation. */
- struct table_dpif tables[N_TABLES];
-
/* Support for debugging async flow mods. */
struct list completions;
/* Type functions. */
+static void process_dpif_port_changes(struct dpif_backer *);
+static void process_dpif_all_ports_changed(struct dpif_backer *);
+static void process_dpif_port_change(struct dpif_backer *,
+ const char *devname);
+static void process_dpif_port_error(struct dpif_backer *, int error);
+
static struct ofproto_dpif *
lookup_ofproto_dpif_by_port_name(const char *name)
{
{
static long long int push_timer = LLONG_MIN;
struct dpif_backer *backer;
- char *devname;
- int error;
backer = shash_find_data(&all_dpif_backers, type);
if (!backer) {
* and the configuration has now changed to "false", enable receiving
* packets from the datapath. */
if (!backer->recv_set_enable && !ofproto_get_flow_restore_wait()) {
+ int error;
+
backer->recv_set_enable = true;
error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
backer->need_revalidate = REV_RECONFIGURE;
}
- if (backer->need_revalidate
- || !tag_set_is_empty(&backer->revalidate_set)) {
- struct tag_set revalidate_set = backer->revalidate_set;
- bool need_revalidate = backer->need_revalidate;
+ if (backer->need_revalidate) {
struct ofproto_dpif *ofproto;
struct simap_node *node;
struct simap tmp_backers;
switch (backer->need_revalidate) {
case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break;
case REV_STP: COVERAGE_INC(rev_stp); break;
+ case REV_BOND: COVERAGE_INC(rev_bond); break;
case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break;
case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break;
+ case REV_MAC_LEARNING: COVERAGE_INC(rev_mac_learning); break;
case REV_INCONSISTENCY: COVERAGE_INC(rev_inconsistency); break;
}
-
- if (backer->need_revalidate) {
- /* Clear the drop_keys in case we should now be accepting some
- * formerly dropped flows. */
- drop_key_clear(backer);
- }
-
- /* Clear the revalidation flags. */
- tag_set_init(&backer->revalidate_set);
backer->need_revalidate = 0;
+ /* Clear the drop_keys in case we should now be accepting some
+ * formerly dropped flows. */
+ drop_key_clear(backer);
+
HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
struct facet *facet, *next;
+ struct ofport_dpif *ofport;
struct cls_cursor cursor;
+ struct ofbundle *bundle;
if (ofproto->backer != backer) {
continue;
}
- if (need_revalidate) {
- struct ofport_dpif *ofport;
- struct ofbundle *bundle;
-
- xlate_ofproto_set(ofproto, ofproto->up.name, ofproto->ml,
- ofproto->mbridge, ofproto->sflow,
- ofproto->ipfix, ofproto->up.frag_handling,
- ofproto->up.forward_bpdu,
- connmgr_has_in_band(ofproto->up.connmgr),
- ofproto->netflow != NULL,
- ofproto->stp != NULL);
-
- HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
- xlate_bundle_set(ofproto, bundle, bundle->name,
- bundle->vlan_mode, bundle->vlan,
- bundle->trunks, bundle->use_priority_tags,
- bundle->bond, bundle->lacp,
- bundle->floodable);
- }
+ xlate_ofproto_set(ofproto, ofproto->up.name, ofproto->ml,
+ ofproto->stp, ofproto->mbridge,
+ ofproto->sflow, ofproto->ipfix,
+ ofproto->up.frag_handling,
+ ofproto->up.forward_bpdu,
+ connmgr_has_in_band(ofproto->up.connmgr),
+ ofproto->netflow != NULL);
- HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
- xlate_ofport_set(ofproto, ofport->bundle, ofport,
- ofport->up.ofp_port, ofport->odp_port,
- ofport->up.netdev, ofport->cfm,
- ofport->bfd, ofport->peer,
- ofport->up.pp.config, ofport->stp_state,
- ofport->is_tunnel, ofport->may_enable);
- }
+ HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
+ xlate_bundle_set(ofproto, bundle, bundle->name,
+ bundle->vlan_mode, bundle->vlan,
+ bundle->trunks, bundle->use_priority_tags,
+ bundle->bond, bundle->lacp,
+ bundle->floodable);
+ }
+
+ HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
+ int stp_port = ofport->stp_port
+ ? stp_port_no(ofport->stp_port)
+ : 0;
+ xlate_ofport_set(ofproto, ofport->bundle, ofport,
+ ofport->up.ofp_port, ofport->odp_port,
+ ofport->up.netdev, ofport->cfm,
+ ofport->bfd, ofport->peer, stp_port,
+ ofport->qdscp, ofport->n_qdscp,
+ ofport->up.pp.config, ofport->is_tunnel,
+ ofport->may_enable);
}
cls_cursor_init(&cursor, &ofproto->facets, NULL);
CLS_CURSOR_FOR_EACH_SAFE (facet, next, cr, &cursor) {
- if (need_revalidate
- || tag_set_intersects(&revalidate_set, facet->xout.tags)) {
- facet_revalidate(facet);
- run_fast_rl();
- }
+ facet_revalidate(facet);
+ run_fast_rl();
}
}
}
timer_set_duration(&backer->next_expiration, delay);
}
- /* Check for port changes in the dpif. */
- while ((error = dpif_port_poll(backer->dpif, &devname)) == 0) {
- struct ofproto_dpif *ofproto;
- struct dpif_port port;
-
- /* Don't report on the datapath's device. */
- if (!strcmp(devname, dpif_base_name(backer->dpif))) {
- goto next;
- }
-
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
- &all_ofproto_dpifs) {
- if (simap_contains(&ofproto->backer->tnl_backers, devname)) {
- goto next;
- }
- }
-
- ofproto = lookup_ofproto_dpif_by_port_name(devname);
- if (dpif_port_query_by_name(backer->dpif, devname, &port)) {
- /* The port was removed. If we know the datapath,
- * report it through poll_set(). If we don't, it may be
- * notifying us of a removal we initiated, so ignore it.
- * If there's a pending ENOBUFS, let it stand, since
- * everything will be reevaluated. */
- if (ofproto && ofproto->port_poll_errno != ENOBUFS) {
- sset_add(&ofproto->port_poll_set, devname);
- ofproto->port_poll_errno = 0;
- }
- } else if (!ofproto) {
- /* The port was added, but we don't know with which
- * ofproto we should associate it. Delete it. */
- dpif_port_del(backer->dpif, port.port_no);
- }
- dpif_port_destroy(&port);
-
- next:
- free(devname);
- }
-
- if (error != EAGAIN) {
- struct ofproto_dpif *ofproto;
-
- /* There was some sort of error, so propagate it to all
- * ofprotos that use this backer. */
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
- &all_ofproto_dpifs) {
- if (ofproto->backer == backer) {
- sset_clear(&ofproto->port_poll_set);
- ofproto->port_poll_errno = error;
- }
- }
- }
+ process_dpif_port_changes(backer);
if (backer->governor) {
size_t n_subfacets;
return 0;
}
+/* Check for and handle port changes in 'backer''s dpif. */
+static void
+process_dpif_port_changes(struct dpif_backer *backer)
+{
+ for (;;) {
+ char *devname;
+ int error;
+
+ error = dpif_port_poll(backer->dpif, &devname);
+ switch (error) {
+ case EAGAIN:
+ return;
+
+ case ENOBUFS:
+ process_dpif_all_ports_changed(backer);
+ break;
+
+ case 0:
+ process_dpif_port_change(backer, devname);
+ free(devname);
+ break;
+
+ default:
+ process_dpif_port_error(backer, error);
+ break;
+ }
+ }
+}
+
+static void
+process_dpif_all_ports_changed(struct dpif_backer *backer)
+{
+ struct ofproto_dpif *ofproto;
+ struct dpif_port dpif_port;
+ struct dpif_port_dump dump;
+ struct sset devnames;
+ const char *devname;
+
+ sset_init(&devnames);
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ if (ofproto->backer == backer) {
+ struct ofport *ofport;
+
+ HMAP_FOR_EACH (ofport, hmap_node, &ofproto->up.ports) {
+ sset_add(&devnames, netdev_get_name(ofport->netdev));
+ }
+ }
+ }
+ DPIF_PORT_FOR_EACH (&dpif_port, &dump, backer->dpif) {
+ sset_add(&devnames, dpif_port.name);
+ }
+
+ SSET_FOR_EACH (devname, &devnames) {
+ process_dpif_port_change(backer, devname);
+ }
+ sset_destroy(&devnames);
+}
+
+static void
+process_dpif_port_change(struct dpif_backer *backer, const char *devname)
+{
+ struct ofproto_dpif *ofproto;
+ struct dpif_port port;
+
+ /* Don't report on the datapath's device. */
+ if (!strcmp(devname, dpif_base_name(backer->dpif))) {
+ return;
+ }
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
+ &all_ofproto_dpifs) {
+ if (simap_contains(&ofproto->backer->tnl_backers, devname)) {
+ return;
+ }
+ }
+
+ ofproto = lookup_ofproto_dpif_by_port_name(devname);
+ if (dpif_port_query_by_name(backer->dpif, devname, &port)) {
+ /* The port was removed. If we know the datapath,
+ * report it through poll_set(). If we don't, it may be
+ * notifying us of a removal we initiated, so ignore it.
+ * If there's a pending ENOBUFS, let it stand, since
+ * everything will be reevaluated. */
+ if (ofproto && ofproto->port_poll_errno != ENOBUFS) {
+ sset_add(&ofproto->port_poll_set, devname);
+ ofproto->port_poll_errno = 0;
+ }
+ } else if (!ofproto) {
+ /* The port was added, but we don't know with which
+ * ofproto we should associate it. Delete it. */
+ dpif_port_del(backer->dpif, port.port_no);
+ } else {
+ struct ofport_dpif *ofport;
+
+ ofport = ofport_dpif_cast(shash_find_data(
+ &ofproto->up.port_by_name, devname));
+ if (ofport
+ && ofport->odp_port != port.port_no
+ && !odp_port_to_ofport(backer, port.port_no))
+ {
+ /* 'ofport''s datapath port number has changed from
+ * 'ofport->odp_port' to 'port.port_no'. Update our internal data
+ * structures to match. */
+ hmap_remove(&backer->odp_to_ofport_map, &ofport->odp_port_node);
+ ofport->odp_port = port.port_no;
+ hmap_insert(&backer->odp_to_ofport_map, &ofport->odp_port_node,
+ hash_odp_port(port.port_no));
+ backer->need_revalidate = REV_RECONFIGURE;
+ }
+ }
+ dpif_port_destroy(&port);
+}
+
+/* Propagate 'error' to all ofprotos based on 'backer'. */
+static void
+process_dpif_port_error(struct dpif_backer *backer, int error)
+{
+ struct ofproto_dpif *ofproto;
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ if (ofproto->backer == backer) {
+ sset_clear(&ofproto->port_poll_set);
+ ofproto->port_poll_errno = error;
+ }
+ }
+}
+
static int
dpif_backer_run_fast(struct dpif_backer *backer, int max_batch)
{
timer_set_duration(&backer->next_expiration, 1000);
backer->need_revalidate = 0;
simap_init(&backer->tnl_backers);
- tag_set_init(&backer->revalidate_set);
backer->recv_set_enable = !ofproto_get_flow_restore_wait();
*backerp = backer;
struct shash_node *node, *next;
odp_port_t max_ports;
int error;
- int i;
error = open_dpif_backer(ofproto->up.type, &ofproto->backer);
if (error) {
classifier_init(&ofproto->facets);
ofproto->consistency_rl = LLONG_MIN;
- for (i = 0; i < N_TABLES; i++) {
- struct table_dpif *table = &ofproto->tables[i];
-
- table->catchall_table = NULL;
- table->other_table = NULL;
- table->basis = random_uint32();
- }
-
list_init(&ofproto->completions);
ofproto_dpif_unixctl_init();
if (mbridge_need_revalidate(ofproto->mbridge)) {
ofproto->backer->need_revalidate = REV_RECONFIGURE;
- mac_learning_flush(ofproto->ml, NULL);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
+ mac_learning_flush(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
}
/* Do not perform any periodic activity below required by 'ofproto' while
}
stp_run(ofproto);
- mac_learning_run(ofproto->ml, &ofproto->backer->revalidate_set);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
+ if (mac_learning_run(ofproto->ml)) {
+ ofproto->backer->need_revalidate = REV_MAC_LEARNING;
+ }
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
/* Check the consistency of a random facet, to aid debugging. */
if (time_msec() >= ofproto->consistency_rl
hmap_node);
facet = CONTAINER_OF(cr, struct facet, cr);
- if (!tag_set_intersects(&ofproto->backer->revalidate_set,
- facet->xout.tags)) {
- if (!facet_check_consistency(facet)) {
- ofproto->backer->need_revalidate = REV_INCONSISTENCY;
- }
+ if (!facet_check_consistency(facet)) {
+ ofproto->backer->need_revalidate = REV_INCONSISTENCY;
}
}
if (ofproto->sflow) {
dpif_sflow_wait(ofproto->sflow);
}
- if (!tag_set_is_empty(&ofproto->backer->revalidate_set)) {
- poll_immediate_wake();
- }
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
port_wait(ofport);
}
if (ofproto->netflow) {
netflow_wait(ofproto->netflow);
}
+ ovs_rwlock_rdlock(&ofproto->ml->rwlock);
mac_learning_wait(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
stp_wait(ofproto);
if (ofproto->backer->need_revalidate) {
/* Shouldn't happen, but if it does just go around again. */
port->bundle = NULL;
port->cfm = NULL;
port->bfd = NULL;
- port->tag = tag_create_random();
port->may_enable = true;
port->stp_port = NULL;
port->stp_state = STP_DISABLED;
port->is_tunnel = false;
port->peer = NULL;
- hmap_init(&port->priorities);
+ port->qdscp = NULL;
+ port->n_qdscp = 0;
port->realdev_ofp_port = 0;
port->vlandev_vid = 0;
port->carrier_seq = netdev_get_carrier_resets(netdev);
dpif_sflow_del_port(ofproto->sflow, port->odp_port);
}
- ofport_clear_priorities(port);
- hmap_destroy(&port->priorities);
+ free(port->qdscp);
}
static void
set_stp_port(ofport, NULL);
}
- stp_destroy(ofproto->stp);
+ stp_unref(ofproto->stp);
ofproto->stp = NULL;
}
if (stp_learn_in_state(ofport->stp_state)
!= stp_learn_in_state(state)) {
/* xxx Learning action flows should also be flushed. */
- mac_learning_flush(ofproto->ml,
- &ofproto->backer->revalidate_set);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
+ mac_learning_flush(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
}
fwd_change = stp_forward_in_state(ofport->stp_state)
!= stp_forward_in_state(state);
}
if (stp_check_and_reset_fdb_flush(ofproto->stp)) {
- mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
+ mac_learning_flush(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
}
}
}
poll_timer_wait(1000);
}
}
-
-/* Returns true if STP should process 'flow'. Sets fields in 'wc' that
- * were used to make the determination.*/
-bool
-stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
-{
- memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
- return eth_addr_equals(flow->dl_dst, eth_addr_stp);
-}
-
-void
-stp_process_packet(const struct ofport_dpif *ofport,
- const struct ofpbuf *packet)
-{
- struct ofpbuf payload = *packet;
- struct eth_header *eth = payload.data;
- struct stp_port *sp = ofport->stp_port;
-
- /* Sink packets on ports that have STP disabled when the bridge has
- * STP enabled. */
- if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
- return;
- }
-
- /* Trim off padding on payload. */
- if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
- payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN;
- }
-
- if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
- stp_received_bpdu(sp, payload.data, payload.size);
- }
-}
\f
int
ofproto_dpif_queue_to_priority(const struct ofproto_dpif *ofproto,
return dpif_queue_to_priority(ofproto->backer->dpif, queue_id, priority);
}
-static struct priority_to_dscp *
-get_priority(const struct ofport_dpif *ofport, uint32_t priority)
-{
- struct priority_to_dscp *pdscp;
- uint32_t hash;
-
- hash = hash_int(priority, 0);
- HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &ofport->priorities) {
- if (pdscp->priority == priority) {
- return pdscp;
- }
- }
- return NULL;
-}
-
-bool
-ofproto_dpif_dscp_from_priority(const struct ofport_dpif *ofport,
- uint32_t priority, uint8_t *dscp)
-{
- struct priority_to_dscp *pdscp = get_priority(ofport, priority);
- *dscp = pdscp ? pdscp->dscp : 0;
- return pdscp != NULL;
-}
-
-static void
-ofport_clear_priorities(struct ofport_dpif *ofport)
-{
- struct priority_to_dscp *pdscp, *next;
-
- HMAP_FOR_EACH_SAFE (pdscp, next, hmap_node, &ofport->priorities) {
- hmap_remove(&ofport->priorities, &pdscp->hmap_node);
- free(pdscp);
- }
-}
-
static int
-set_queues(struct ofport *ofport_,
- const struct ofproto_port_queue *qdscp_list,
+set_queues(struct ofport *ofport_, const struct ofproto_port_queue *qdscp,
size_t n_qdscp)
{
struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
- struct hmap new = HMAP_INITIALIZER(&new);
- size_t i;
- for (i = 0; i < n_qdscp; i++) {
- struct priority_to_dscp *pdscp;
- uint32_t priority;
- uint8_t dscp;
-
- dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
- if (dpif_queue_to_priority(ofproto->backer->dpif, qdscp_list[i].queue,
- &priority)) {
- continue;
- }
-
- pdscp = get_priority(ofport, priority);
- if (pdscp) {
- hmap_remove(&ofport->priorities, &pdscp->hmap_node);
- } else {
- pdscp = xmalloc(sizeof *pdscp);
- pdscp->priority = priority;
- pdscp->dscp = dscp;
- ofproto->backer->need_revalidate = REV_RECONFIGURE;
- }
-
- if (pdscp->dscp != dscp) {
- pdscp->dscp = dscp;
- ofproto->backer->need_revalidate = REV_RECONFIGURE;
- }
-
- hmap_insert(&new, &pdscp->hmap_node, hash_int(pdscp->priority, 0));
- }
-
- if (!hmap_is_empty(&ofport->priorities)) {
- ofport_clear_priorities(ofport);
+ if (ofport->n_qdscp != n_qdscp
+ || (n_qdscp && memcmp(ofport->qdscp, qdscp,
+ n_qdscp * sizeof *qdscp))) {
ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ free(ofport->qdscp);
+ ofport->qdscp = n_qdscp
+ ? xmemdup(qdscp, n_qdscp * sizeof *qdscp)
+ : NULL;
+ ofport->n_qdscp = n_qdscp;
}
- hmap_swap(&new, &ofport->priorities);
- hmap_destroy(&new);
-
return 0;
}
\f
struct mac_entry *mac, *next_mac;
ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ ovs_rwlock_wrlock(&ml->rwlock);
LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
if (mac->port.p == bundle) {
if (all_ofprotos) {
if (o != ofproto) {
struct mac_entry *e;
- e = mac_learning_lookup(o->ml, mac->mac, mac->vlan,
- NULL);
+ ovs_rwlock_wrlock(&o->ml->rwlock);
+ e = mac_learning_lookup(o->ml, mac->mac, mac->vlan);
if (e) {
mac_learning_expire(o->ml, e);
}
+ ovs_rwlock_unlock(&o->ml->rwlock);
}
}
}
mac_learning_expire(ml, mac);
}
}
+ ovs_rwlock_unlock(&ml->rwlock);
}
static struct ofbundle *
struct mac_entry *e;
error = n_packets = n_errors = 0;
+ ovs_rwlock_rdlock(&ofproto->ml->rwlock);
LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
if (e->port.p != bundle) {
struct ofpbuf *learning_packet;
n_packets++;
}
}
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
if (n_errors) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
bond_slave_set_may_enable(bundle->bond, port, port->may_enable);
}
- bond_run(bundle->bond, &bundle->ofproto->backer->revalidate_set,
- lacp_status(bundle->lacp));
+ if (bond_run(bundle->bond, lacp_status(bundle->lacp))) {
+ bundle->ofproto->backer->need_revalidate = REV_BOND;
+ }
+
if (bond_should_send_learning_packets(bundle->bond)) {
bundle_send_learning_packets(bundle);
}
set_flood_vlans(struct ofproto *ofproto_, unsigned long *flood_vlans)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
if (mac_learning_set_flood_vlans(ofproto->ml, flood_vlans)) {
- mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
+ mac_learning_flush(ofproto->ml);
}
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
return 0;
}
size_t max_entries)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
mac_learning_set_idle_time(ofproto->ml, idle_time);
mac_learning_set_max_entries(ofproto->ml, max_entries);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
}
\f
/* Ports. */
op->xout_garbage = false;
op->dpif_op.type = DPIF_OP_FLOW_PUT;
op->subfacet = subfacet;
- put->flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
+ put->flags = DPIF_FP_CREATE;
put->key = miss->key;
put->key_len = miss->key_len;
put->mask = op->mask.data;
drop_key = drop_key_lookup(backer, upcall->key, upcall->key_len);
if (!drop_key) {
- drop_key = xmalloc(sizeof *drop_key);
- drop_key->key = xmemdup(upcall->key, upcall->key_len);
- drop_key->key_len = upcall->key_len;
-
- hmap_insert(&backer->drop_keys, &drop_key->hmap_node,
- hash_bytes(drop_key->key, drop_key->key_len, 0));
- dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY,
- drop_key->key, drop_key->key_len,
- NULL, 0, NULL, 0, NULL);
+ int ret;
+ ret = dpif_flow_put(backer->dpif,
+ DPIF_FP_CREATE | DPIF_FP_MODIFY,
+ upcall->key, upcall->key_len,
+ NULL, 0, NULL, 0, NULL);
+
+ if (!ret) {
+ drop_key = xmalloc(sizeof *drop_key);
+ drop_key->key = xmemdup(upcall->key, upcall->key_len);
+ drop_key->key_len = upcall->key_len;
+
+ hmap_insert(&backer->drop_keys, &drop_key->hmap_node,
+ hash_bytes(drop_key->key, drop_key->key_len, 0));
+ }
}
continue;
}
HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
if (bundle->bond) {
- bond_rebalance(bundle->bond, &backer->revalidate_set);
+ bond_rebalance(bundle->bond);
}
}
}
facet = facet_find(ofproto, flow);
if (facet
- && (ofproto->backer->need_revalidate
- || tag_set_intersects(&ofproto->backer->revalidate_set,
- facet->xout.tags))
+ && ofproto->backer->need_revalidate
&& !facet_revalidate(facet)) {
return NULL;
}
}
/* Update 'facet' now that we've taken care of all the old state. */
- facet->xout.tags = xout.tags;
facet->xout.slow = xout.slow;
facet->xout.has_learn = xout.has_learn;
facet->xout.has_normal = xout.has_normal;
enum dpif_flow_put_flags flags;
int ret;
- flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
+ flags = subfacet->path == SF_NOT_INSTALLED ? DPIF_FP_CREATE
+ : DPIF_FP_MODIFY;
if (stats) {
flags |= DPIF_FP_ZERO_STATS;
}
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
- rule_invalidate(rule);
+ ofproto->backer->need_revalidate = REV_FLOW_TABLE;
if (clogged) {
struct dpif_completion *c = xmalloc(sizeof *c);
c->op = rule->up.pending;
rule_construct(struct rule *rule_)
{
struct rule_dpif *rule = rule_dpif_cast(rule_);
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
- struct rule_dpif *victim;
- uint8_t table_id;
-
rule->packet_count = 0;
rule->byte_count = 0;
-
- table_id = rule->up.table_id;
- victim = rule_dpif_cast(ofoperation_get_victim(rule->up.pending));
- if (victim) {
- rule->tag = victim->tag;
- } else if (table_id == 0) {
- rule->tag = 0;
- } else {
- struct flow flow;
-
- miniflow_expand(&rule->up.cr.match.flow, &flow);
- rule->tag = rule_calculate_tag(&flow, &rule->up.cr.match.mask,
- ofproto->tables[table_id].basis);
- }
-
complete_operation(rule);
return 0;
}
return odp_put_userspace_action(pid, cookie, cookie_size, odp_actions);
}
-
-tag_type
-calculate_flow_tag(struct ofproto_dpif *ofproto, const struct flow *flow,
- uint8_t table_id, struct rule_dpif *rule)
-{
- if (table_id > 0 && table_id < N_TABLES) {
- struct table_dpif *table = &ofproto->tables[table_id];
- if (table->other_table) {
- return (rule && rule->tag
- ? rule->tag
- : rule_calculate_tag(flow, &table->other_table->mask,
- table->basis));
- }
- }
-
- return 0;
-}
-\f
-/* Optimized flow revalidation.
- *
- * It's a difficult problem, in general, to tell which facets need to have
- * their actions recalculated whenever the OpenFlow flow table changes. We
- * don't try to solve that general problem: for most kinds of OpenFlow flow
- * table changes, we recalculate the actions for every facet. This is
- * relatively expensive, but it's good enough if the OpenFlow flow table
- * doesn't change very often.
- *
- * However, we can expect one particular kind of OpenFlow flow table change to
- * happen frequently: changes caused by MAC learning. To avoid wasting a lot
- * of CPU on revalidating every facet whenever MAC learning modifies the flow
- * table, we add a special case that applies to flow tables in which every rule
- * has the same form (that is, the same wildcards), except that the table is
- * also allowed to have a single "catch-all" flow that matches all packets. We
- * optimize this case by tagging all of the facets that resubmit into the table
- * and invalidating the same tag whenever a flow changes in that table. The
- * end result is that we revalidate just the facets that need it (and sometimes
- * a few more, but not all of the facets or even all of the facets that
- * resubmit to the table modified by MAC learning). */
-
-/* Calculates the tag to use for 'flow' and mask 'mask' when it is inserted
- * into an OpenFlow table with the given 'basis'. */
-tag_type
-rule_calculate_tag(const struct flow *flow, const struct minimask *mask,
- uint32_t secret)
-{
- if (minimask_is_catchall(mask)) {
- return 0;
- } else {
- uint32_t hash = flow_hash_in_minimask(flow, mask, secret);
- return tag_create_deterministic(hash);
- }
-}
-
-/* Following a change to OpenFlow table 'table_id' in 'ofproto', update the
- * taggability of that table.
- *
- * This function must be called after *each* change to a flow table. If you
- * skip calling it on some changes then the pointer comparisons at the end can
- * be invalid if you get unlucky. For example, if a flow removal causes a
- * cls_table to be destroyed and then a flow insertion causes a cls_table with
- * different wildcards to be created with the same address, then this function
- * will incorrectly skip revalidation. */
-static void
-table_update_taggable(struct ofproto_dpif *ofproto, uint8_t table_id)
-{
- struct table_dpif *table = &ofproto->tables[table_id];
- const struct oftable *oftable = &ofproto->up.tables[table_id];
- struct cls_table *catchall, *other;
- struct cls_table *t;
-
- catchall = other = NULL;
-
- switch (hmap_count(&oftable->cls.tables)) {
- case 0:
- /* We could tag this OpenFlow table but it would make the logic a
- * little harder and it's a corner case that doesn't seem worth it
- * yet. */
- break;
-
- case 1:
- case 2:
- HMAP_FOR_EACH (t, hmap_node, &oftable->cls.tables) {
- if (cls_table_is_catchall(t)) {
- catchall = t;
- } else if (!other) {
- other = t;
- } else {
- /* Indicate that we can't tag this by setting both tables to
- * NULL. (We know that 'catchall' is already NULL.) */
- other = NULL;
- }
- }
- break;
-
- default:
- /* Can't tag this table. */
- break;
- }
-
- if (table->catchall_table != catchall || table->other_table != other) {
- table->catchall_table = catchall;
- table->other_table = other;
- ofproto->backer->need_revalidate = REV_FLOW_TABLE;
- }
-}
-
-/* Given 'rule' that has changed in some way (either it is a rule being
- * inserted, a rule being deleted, or a rule whose actions are being
- * modified), marks facets for revalidation to ensure that packets will be
- * forwarded correctly according to the new state of the flow table.
- *
- * This function must be called after *each* change to a flow table. See
- * the comment on table_update_taggable() for more information. */
-static void
-rule_invalidate(const struct rule_dpif *rule)
-{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
-
- table_update_taggable(ofproto, rule->up.table_id);
-
- if (!ofproto->backer->need_revalidate) {
- struct table_dpif *table = &ofproto->tables[rule->up.table_id];
-
- if (table->other_table && rule->tag) {
- tag_set_add(&ofproto->backer->revalidate_set, rule->tag);
- } else {
- ofproto->backer->need_revalidate = REV_FLOW_TABLE;
- }
- }
-}
\f
static bool
set_frag_handling(struct ofproto *ofproto_,
unixctl_command_reply_error(conn, "no such bridge");
return;
}
- mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
+ mac_learning_flush(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
} else {
HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
- mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
+ mac_learning_flush(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
}
}
}
ds_put_cstr(&ds, " port VLAN MAC Age\n");
+ ovs_rwlock_rdlock(&ofproto->ml->rwlock);
LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
struct ofbundle *bundle = e->port.p;
char name[OFP_MAX_PORT_NAME_LEN];
name, e->vlan, ETH_ADDR_ARGS(e->mac),
mac_entry_age(ofproto->ml, e));
}
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
unixctl_command_reply(conn, ds_cstr(&ds));
ds_destroy(&ds);
}