int i;
if (sw->ml) {
+ ovs_rwlock_wrlock(&sw->ml->rwlock);
mac_learning_run(sw->ml, NULL);
+ ovs_rwlock_unlock(&sw->ml->rwlock);
}
rconn_run(sw->rconn);
lswitch_wait(struct lswitch *sw)
{
if (sw->ml) {
+ ovs_rwlock_rdlock(&sw->ml->rwlock);
mac_learning_wait(sw->ml);
+ ovs_rwlock_unlock(&sw->ml->rwlock);
}
rconn_run_wait(sw->rconn);
rconn_recv_wait(sw->rconn);
ofp_port_t out_port;
/* Learn the source MAC. */
+ ovs_rwlock_wrlock(&sw->ml->rwlock);
if (mac_learning_may_learn(sw->ml, flow->dl_src, 0)) {
struct mac_entry *mac = mac_learning_insert(sw->ml, flow->dl_src, 0);
if (mac_entry_is_new(mac)
mac_learning_changed(sw->ml, mac);
}
}
+ ovs_rwlock_unlock(&sw->ml->rwlock);
/* Drop frames for reserved multicast addresses. */
if (eth_addr_is_reserved(flow->dl_dst)) {
if (sw->ml) {
struct mac_entry *mac;
+ ovs_rwlock_rdlock(&sw->ml->rwlock);
mac = mac_learning_lookup(sw->ml, flow->dl_dst, 0, NULL);
if (mac) {
out_port = mac->port.ofp_port;
if (out_port == flow->in_port.ofp_port) {
/* Don't send a packet back out its input port. */
+ ovs_rwlock_unlock(&sw->ml->rwlock);
return OFPP_NONE;
}
}
+ ovs_rwlock_unlock(&sw->ml->rwlock);
}
/* Check if we need to use "NORMAL" action. */
* and return false. */
static bool
get_lru(struct mac_learning *ml, struct mac_entry **e)
+ OVS_REQ_RDLOCK(ml->rwlock)
{
if (!list_is_empty(&ml->lrus)) {
*e = mac_entry_from_lru_node(ml->lrus.next);
ml->idle_time = normalize_idle_time(idle_time);
ml->max_entries = MAC_DEFAULT_MAX;
tag_set_init(&ml->tags);
- ml->ref_cnt = 1;
+ atomic_init(&ml->ref_cnt, 1);
+ ovs_rwlock_init(&ml->rwlock);
return ml;
}
{
struct mac_learning *ml = CONST_CAST(struct mac_learning *, ml_);
if (ml) {
- ovs_assert(ml->ref_cnt > 0);
- ml->ref_cnt++;
+ int orig;
+ atomic_add(&ml->ref_cnt, 1, &orig);
+ ovs_assert(orig > 0);
}
return ml;
}
void
mac_learning_unref(struct mac_learning *ml)
{
+ int orig;
+
if (!ml) {
return;
}
- ovs_assert(ml->ref_cnt > 0);
- if (!--ml->ref_cnt) {
+ atomic_sub(&ml->ref_cnt, 1, &orig);
+ ovs_assert(orig > 0);
+ if (orig == 1) {
struct mac_entry *e, *next;
HMAP_FOR_EACH_SAFE (e, next, hmap_node, &ml->table) {
hmap_destroy(&ml->table);
bitmap_free(ml->flood_vlans);
+ ovs_rwlock_destroy(&ml->rwlock);
free(ml);
}
}
#include <time.h>
#include "hmap.h"
#include "list.h"
+#include "ovs-atomic.h"
+#include "ovs-thread.h"
#include "packets.h"
#include "tag.h"
#include "timeval.h"
* relearning based on a reflection from a bond slave. */
#define MAC_GRAT_ARP_LOCK_TIME 5
-/* A MAC learning table entry. */
+/* A MAC learning table entry.
+ * Guarded by owning 'mac_learning''s rwlock */
struct mac_entry {
struct hmap_node hmap_node; /* Node in a mac_learning hmap. */
- struct list lru_node; /* Element in 'lrus' list. */
time_t expires; /* Expiration time. */
time_t grat_arp_lock; /* Gratuitous ARP lock expiration time. */
uint8_t mac[ETH_ADDR_LEN]; /* Known MAC address. */
uint16_t vlan; /* VLAN tag. */
tag_type tag; /* Tag for this learning entry. */
+ /* The following are marked guarded to prevent users from iterating over or
+ * accessing a mac_entry without hodling the parent mac_learning rwlock. */
+ struct list lru_node OVS_GUARDED; /* Element in 'lrus' list. */
+
/* Learned port. */
union {
void *p;
ofp_port_t ofp_port;
- } port;
+ } port OVS_GUARDED;
};
-int mac_entry_age(const struct mac_learning *, const struct mac_entry *);
-
/* Returns true if mac_learning_insert() just created 'mac' and the caller has
* not yet properly initialized it. */
static inline bool mac_entry_is_new(const struct mac_entry *mac)
/* MAC learning table. */
struct mac_learning {
struct hmap table; /* Learning table. */
- struct list lrus; /* In-use entries, least recently used at the
- front, most recently used at the back. */
+ struct list lrus OVS_GUARDED; /* In-use entries, least recently used at the
+ front, most recently used at the back. */
uint32_t secret; /* Secret for randomizing hash table. */
unsigned long *flood_vlans; /* Bitmap of learning disabled VLANs. */
unsigned int idle_time; /* Max age before deleting an entry. */
size_t max_entries; /* Max number of learned MACs. */
struct tag_set tags; /* Tags which have changed. */
- int ref_cnt;
+ atomic_int ref_cnt;
+ struct ovs_rwlock rwlock;
};
+int mac_entry_age(const struct mac_learning *ml, const struct mac_entry *e)
+ OVS_REQ_RDLOCK(ml->rwlock);
+
/* Basics. */
struct mac_learning *mac_learning_create(unsigned int idle_time);
struct mac_learning *mac_learning_ref(const struct mac_learning *);
void mac_learning_unref(struct mac_learning *);
-void mac_learning_run(struct mac_learning *, struct tag_set *);
-void mac_learning_wait(struct mac_learning *);
+void mac_learning_run(struct mac_learning *ml, struct tag_set *)
+ OVS_REQ_WRLOCK(ml->rwlock);
+void mac_learning_wait(struct mac_learning *ml)
+ OVS_REQ_RDLOCK(ml->rwlock);
/* Configuration. */
-bool mac_learning_set_flood_vlans(struct mac_learning *,
- const unsigned long *bitmap);
-void mac_learning_set_idle_time(struct mac_learning *, unsigned int idle_time);
-void mac_learning_set_max_entries(struct mac_learning *, size_t max_entries);
+bool mac_learning_set_flood_vlans(struct mac_learning *ml,
+ const unsigned long *bitmap)
+ OVS_REQ_WRLOCK(ml->rwlock);
+void mac_learning_set_idle_time(struct mac_learning *ml,
+ unsigned int idle_time)
+ OVS_REQ_WRLOCK(ml->rwlock);
+void mac_learning_set_max_entries(struct mac_learning *ml, size_t max_entries)
+ OVS_REQ_WRLOCK(ml->rwlock);
/* Learning. */
-bool mac_learning_may_learn(const struct mac_learning *,
+bool mac_learning_may_learn(const struct mac_learning *ml,
const uint8_t src_mac[ETH_ADDR_LEN],
- uint16_t vlan);
-struct mac_entry *mac_learning_insert(struct mac_learning *,
+ uint16_t vlan)
+ OVS_REQ_RDLOCK(ml->rwlock);
+struct mac_entry *mac_learning_insert(struct mac_learning *ml,
const uint8_t src[ETH_ADDR_LEN],
- uint16_t vlan);
-void mac_learning_changed(struct mac_learning *, struct mac_entry *);
+ uint16_t vlan)
+ OVS_REQ_WRLOCK(ml->rwlock);
+void mac_learning_changed(struct mac_learning *ml, struct mac_entry *e)
+ OVS_REQ_WRLOCK(ml->rwlock);
/* Lookup. */
-struct mac_entry *mac_learning_lookup(const struct mac_learning *,
+struct mac_entry *mac_learning_lookup(const struct mac_learning *ml,
const uint8_t dst[ETH_ADDR_LEN],
- uint16_t vlan, tag_type *);
+ uint16_t vlan, tag_type *)
+ OVS_REQ_RDLOCK(ml->rwlock);
/* Flushing. */
-void mac_learning_expire(struct mac_learning *, struct mac_entry *);
-void mac_learning_flush(struct mac_learning *, struct tag_set *);
+void mac_learning_expire(struct mac_learning *ml, struct mac_entry *e)
+ OVS_REQ_WRLOCK(ml->rwlock);
+void mac_learning_flush(struct mac_learning *ml, struct tag_set *)
+ OVS_REQ_WRLOCK(ml->rwlock);
#endif /* mac-learning.h */
return;
}
+ ovs_rwlock_wrlock(&xbridge->ml->rwlock);
if (!mac_learning_may_learn(xbridge->ml, flow->dl_src, vlan)) {
- return;
+ goto out;
}
mac = mac_learning_insert(xbridge->ml, flow->dl_src, vlan);
if (!in_xbundle->bond) {
mac_entry_set_grat_arp_lock(mac);
} else if (mac_entry_is_grat_arp_locked(mac)) {
- return;
+ goto out;
}
}
mac->port.p = in_xbundle->ofbundle;
mac_learning_changed(xbridge->ml, mac);
}
+out:
+ ovs_rwlock_unlock(&xbridge->ml->rwlock);
}
/* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
return false;
case BV_DROP_IF_MOVED:
+ ovs_rwlock_rdlock(&xbridge->ml->rwlock);
mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan, NULL);
if (mac && mac->port.p != in_xbundle->ofbundle &&
(!is_gratuitous_arp(flow, &ctx->xout->wc)
|| mac_entry_is_grat_arp_locked(mac))) {
+ ovs_rwlock_unlock(&xbridge->ml->rwlock);
xlate_report(ctx, "SLB bond thinks this packet looped back, "
"dropping");
return false;
}
+ ovs_rwlock_unlock(&xbridge->ml->rwlock);
break;
}
}
}
/* Determine output bundle. */
+ ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan,
&ctx->xout->tags);
if (mac) {
}
ctx->xout->nf_output_iface = NF_OUT_FLOOD;
}
+ ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
}
/* Compose SAMPLE action for sFlow or IPFIX. The given probability is
if (mbridge_need_revalidate(ofproto->mbridge)) {
ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
mac_learning_flush(ofproto->ml, NULL);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
}
/* Do not perform any periodic activity below required by 'ofproto' while
}
stp_run(ofproto);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
mac_learning_run(ofproto->ml, &ofproto->backer->revalidate_set);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
/* Check the consistency of a random facet, to aid debugging. */
if (time_msec() >= ofproto->consistency_rl
if (ofproto->netflow) {
netflow_wait(ofproto->netflow);
}
+ ovs_rwlock_rdlock(&ofproto->ml->rwlock);
mac_learning_wait(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
stp_wait(ofproto);
if (ofproto->backer->need_revalidate) {
/* Shouldn't happen, but if it does just go around again. */
if (stp_learn_in_state(ofport->stp_state)
!= stp_learn_in_state(state)) {
/* xxx Learning action flows should also be flushed. */
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
mac_learning_flush(ofproto->ml,
&ofproto->backer->revalidate_set);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
}
fwd_change = stp_forward_in_state(ofport->stp_state)
!= stp_forward_in_state(state);
}
if (stp_check_and_reset_fdb_flush(ofproto->stp)) {
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
}
}
}
struct mac_entry *mac, *next_mac;
ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ ovs_rwlock_wrlock(&ml->rwlock);
LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
if (mac->port.p == bundle) {
if (all_ofprotos) {
if (o != ofproto) {
struct mac_entry *e;
+ ovs_rwlock_wrlock(&o->ml->rwlock);
e = mac_learning_lookup(o->ml, mac->mac, mac->vlan,
NULL);
if (e) {
mac_learning_expire(o->ml, e);
}
+ ovs_rwlock_unlock(&o->ml->rwlock);
}
}
}
mac_learning_expire(ml, mac);
}
}
+ ovs_rwlock_unlock(&ml->rwlock);
}
static struct ofbundle *
struct mac_entry *e;
error = n_packets = n_errors = 0;
+ ovs_rwlock_rdlock(&ofproto->ml->rwlock);
LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
if (e->port.p != bundle) {
struct ofpbuf *learning_packet;
n_packets++;
}
}
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
if (n_errors) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
set_flood_vlans(struct ofproto *ofproto_, unsigned long *flood_vlans)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
if (mac_learning_set_flood_vlans(ofproto->ml, flood_vlans)) {
mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
}
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
return 0;
}
size_t max_entries)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
mac_learning_set_idle_time(ofproto->ml, idle_time);
mac_learning_set_max_entries(ofproto->ml, max_entries);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
}
\f
/* Ports. */
unixctl_command_reply_error(conn, "no such bridge");
return;
}
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
} else {
HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
}
}
}
ds_put_cstr(&ds, " port VLAN MAC Age\n");
+ ovs_rwlock_rdlock(&ofproto->ml->rwlock);
LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
struct ofbundle *bundle = e->port.p;
char name[OFP_MAX_PORT_NAME_LEN];
name, e->vlan, ETH_ADDR_ARGS(e->mac),
mac_entry_age(ofproto->ml, e));
}
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
unixctl_command_reply(conn, ds_cstr(&ds));
ds_destroy(&ds);
}