int forwarding_override; /* Manual override of 'forwarding' status. */
atomic_bool check_tnl_key; /* Verify tunnel key of inbound packets? */
- atomic_int ref_cnt;
+ struct ovs_refcount ref_cnt;
/* When forward_if_rx is true, bfd_forwarding() will return
* true as long as there are incoming packets received.
bfd->diag = DIAG_NONE;
bfd->min_tx = 1000;
bfd->mult = 3;
- atomic_init(&bfd->ref_cnt, 1);
+ ovs_refcount_init(&bfd->ref_cnt);
bfd->netdev = netdev_ref(netdev);
bfd->rx_packets = bfd_rx_packets(bfd);
bfd->in_decay = false;
{
struct bfd *bfd = CONST_CAST(struct bfd *, bfd_);
if (bfd) {
- int orig;
- atomic_add(&bfd->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
+ ovs_refcount_ref(&bfd->ref_cnt);
}
return bfd;
}
void
bfd_unref(struct bfd *bfd) OVS_EXCLUDED(mutex)
{
- if (bfd) {
- int orig;
-
- atomic_sub(&bfd->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
- if (orig == 1) {
- ovs_mutex_lock(&mutex);
- hmap_remove(all_bfds, &bfd->node);
- netdev_close(bfd->netdev);
- free(bfd->name);
- atomic_destroy(&bfd->ref_cnt);
- free(bfd);
- ovs_mutex_unlock(&mutex);
- }
+ if (bfd && ovs_refcount_unref(&bfd->ref_cnt) == 1) {
+ ovs_mutex_lock(&mutex);
+ hmap_remove(all_bfds, &bfd->node);
+ netdev_close(bfd->netdev);
+ ovs_refcount_destroy(&bfd->ref_cnt);
+ free(bfd->name);
+ free(bfd);
+ ovs_mutex_unlock(&mutex);
}
}
atomic_bool check_tnl_key; /* Verify the tunnel key of inbound packets? */
atomic_bool extended; /* Extended mode. */
- atomic_int ref_cnt;
+ struct ovs_refcount ref_cnt;
uint64_t flap_count; /* Count the flaps since boot. */
};
cfm->flap_count = 0;
atomic_init(&cfm->extended, false);
atomic_init(&cfm->check_tnl_key, false);
- atomic_init(&cfm->ref_cnt, 1);
+ ovs_refcount_init(&cfm->ref_cnt);
ovs_mutex_lock(&mutex);
cfm_generate_maid(cfm);
cfm_unref(struct cfm *cfm) OVS_EXCLUDED(mutex)
{
struct remote_mp *rmp, *rmp_next;
- int orig;
if (!cfm) {
return;
}
- atomic_sub(&cfm->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
- if (orig != 1) {
+ if (ovs_refcount_unref(&cfm->ref_cnt) != 1) {
return;
}
atomic_destroy(&cfm->extended);
atomic_destroy(&cfm->check_tnl_key);
- atomic_destroy(&cfm->ref_cnt);
+ ovs_refcount_destroy(&cfm->ref_cnt);
free(cfm);
}
{
struct cfm *cfm = CONST_CAST(struct cfm *, cfm_);
if (cfm) {
- int orig;
- atomic_add(&cfm->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
+ ovs_refcount_ref(&cfm->ref_cnt);
}
return cfm;
}
bool update; /* True if lacp_update() needs to be called. */
bool fallback_ab; /* True if fallback to active-backup on LACP failure. */
- atomic_int ref_cnt;
+ struct ovs_refcount ref_cnt;
};
struct slave {
lacp = xzalloc(sizeof *lacp);
hmap_init(&lacp->slaves);
- atomic_init(&lacp->ref_cnt, 1);
+ ovs_refcount_init(&lacp->ref_cnt);
ovs_mutex_lock(&mutex);
list_push_back(all_lacps, &lacp->node);
{
struct lacp *lacp = CONST_CAST(struct lacp *, lacp_);
if (lacp) {
- int orig;
- atomic_add(&lacp->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
+ ovs_refcount_ref(&lacp->ref_cnt);
}
return lacp;
}
void
lacp_unref(struct lacp *lacp) OVS_EXCLUDED(mutex)
{
- int orig;
-
- if (!lacp) {
- return;
- }
-
- atomic_sub(&lacp->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
- if (orig == 1) {
+ if (lacp && ovs_refcount_unref(&lacp->ref_cnt) == 1) {
struct slave *slave, *next;
ovs_mutex_lock(&mutex);
hmap_destroy(&lacp->slaves);
list_remove(&lacp->node);
free(lacp->name);
- atomic_destroy(&lacp->ref_cnt);
+ ovs_refcount_destroy(&lacp->ref_cnt);
free(lacp);
ovs_mutex_unlock(&mutex);
}
ml->idle_time = normalize_idle_time(idle_time);
ml->max_entries = MAC_DEFAULT_MAX;
ml->need_revalidate = false;
- atomic_init(&ml->ref_cnt, 1);
+ ovs_refcount_init(&ml->ref_cnt);
ovs_rwlock_init(&ml->rwlock);
return ml;
}
{
struct mac_learning *ml = CONST_CAST(struct mac_learning *, ml_);
if (ml) {
- int orig;
- atomic_add(&ml->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
+ ovs_refcount_ref(&ml->ref_cnt);
}
return ml;
}
void
mac_learning_unref(struct mac_learning *ml)
{
- int orig;
-
- if (!ml) {
- return;
- }
-
- atomic_sub(&ml->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
- if (orig == 1) {
+ if (ml && ovs_refcount_unref(&ml->ref_cnt) == 1) {
struct mac_entry *e, *next;
HMAP_FOR_EACH_SAFE (e, next, hmap_node, &ml->table) {
bitmap_free(ml->flood_vlans);
ovs_rwlock_destroy(&ml->rwlock);
- atomic_destroy(&ml->ref_cnt);
+ ovs_refcount_destroy(&ml->ref_cnt);
free(ml);
}
}
/*
- * Copyright (c) 2008, 2009, 2010, 2011, 2012 Nicira, Inc.
+ * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
unsigned long *flood_vlans; /* Bitmap of learning disabled VLANs. */
unsigned int idle_time; /* Max age before deleting an entry. */
size_t max_entries; /* Max number of learned MACs. */
- atomic_int ref_cnt;
+ struct ovs_refcount ref_cnt;
struct ovs_rwlock rwlock;
bool need_revalidate;
};
#endif
#undef IN_OVS_ATOMIC_H
+/* Reference count. */
+struct ovs_refcount {
+ atomic_uint count;
+};
+
+/* Initializes 'refcount'. The reference count is initially 1. */
+static inline void
+ovs_refcount_init(struct ovs_refcount *refcount)
+{
+ atomic_init(&refcount->count, 1);
+}
+
+/* Destroys 'refcount'. */
+static inline void
+ovs_refcount_destroy(struct ovs_refcount *refcount)
+{
+ atomic_destroy(&refcount->count);
+}
+
+/* Increments 'refcount'. */
+static inline void
+ovs_refcount_ref(struct ovs_refcount *refcount)
+{
+ unsigned int old_refcount;
+
+ atomic_add(&refcount->count, 1, &old_refcount);
+ ovs_assert(old_refcount > 0);
+}
+
+/* Decrements 'refcount' and returns the previous reference count. Often used
+ * in this form:
+ *
+ * if (ovs_refcount_unref(&object->ref_cnt) == 1) {
+ * // ...uninitialize object...
+ * free(object);
+ * }
+ */
+static inline unsigned int
+ovs_refcount_unref(struct ovs_refcount *refcount)
+{
+ unsigned int old_refcount;
+
+ atomic_sub(&refcount->count, 1, &old_refcount);
+ ovs_assert(old_refcount > 0);
+ return old_refcount;
+}
+
+/* Reads and returns 'ref_count_''s current reference count.
+ *
+ * Rarely useful. */
+static inline unsigned int
+ovs_refcount_read(const struct ovs_refcount *refcount_)
+{
+ struct ovs_refcount *refcount
+ = CONST_CAST(struct ovs_refcount *, refcount_);
+ unsigned int count;
+
+ atomic_read(&refcount->count, &count);
+ return count;
+}
+
#endif /* ovs-atomic.h */
void (*send_bpdu)(struct ofpbuf *bpdu, int port_no, void *aux);
void *aux;
- atomic_int ref_cnt;
+ struct ovs_refcount ref_cnt;
};
static struct ovs_mutex mutex;
p->path_cost = 19; /* Recommended default for 100 Mb/s link. */
stp_initialize_port(p, STP_DISABLED);
}
- atomic_init(&stp->ref_cnt, 1);
+ ovs_refcount_init(&stp->ref_cnt);
list_push_back(all_stps, &stp->node);
ovs_mutex_unlock(&mutex);
{
struct stp *stp = CONST_CAST(struct stp *, stp_);
if (stp) {
- int orig;
- atomic_add(&stp->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
+ ovs_refcount_ref(&stp->ref_cnt);
}
return stp;
}
void
stp_unref(struct stp *stp)
{
- int orig;
-
- if (!stp) {
- return;
- }
-
- atomic_sub(&stp->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
- if (orig == 1) {
+ if (stp && ovs_refcount_unref(&stp->ref_cnt) == 1) {
ovs_mutex_lock(&mutex);
list_remove(&stp->node);
ovs_mutex_unlock(&mutex);
free(stp->name);
- atomic_destroy(&stp->ref_cnt);
+ ovs_refcount_destroy(&stp->ref_cnt);
free(stp);
}
}
long long int next_fake_iface_update; /* LLONG_MAX if disabled. */
bool lacp_fallback_ab; /* Fallback to active-backup on LACP failure. */
- atomic_int ref_cnt;
+ struct ovs_refcount ref_cnt;
};
static struct ovs_rwlock rwlock = OVS_RWLOCK_INITIALIZER;
bond = xzalloc(sizeof *bond);
hmap_init(&bond->slaves);
bond->next_fake_iface_update = LLONG_MAX;
- atomic_init(&bond->ref_cnt, 1);
+ ovs_refcount_init(&bond->ref_cnt);
bond_reconfigure(bond, s);
return bond;
struct bond *bond = CONST_CAST(struct bond *, bond_);
if (bond) {
- int orig;
- atomic_add(&bond->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
+ ovs_refcount_ref(&bond->ref_cnt);
}
return bond;
}
bond_unref(struct bond *bond)
{
struct bond_slave *slave, *next_slave;
- int orig;
- if (!bond) {
- return;
- }
-
- atomic_sub(&bond->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
- if (orig != 1) {
+ if (!bond || ovs_refcount_unref(&bond->ref_cnt) != 1) {
return;
}
free(bond->hash);
free(bond->name);
- atomic_destroy(&bond->ref_cnt);
+ ovs_refcount_destroy(&bond->ref_cnt);
free(bond);
}
struct hmap flows; /* Contains 'netflow_flows'. */
- atomic_int ref_cnt;
+ struct ovs_refcount ref_cnt;
};
struct netflow_flow {
nf->add_id_to_iface = false;
nf->netflow_cnt = 0;
hmap_init(&nf->flows);
- atomic_init(&nf->ref_cnt, 1);
+ ovs_refcount_init(&nf->ref_cnt);
ofpbuf_init(&nf->packet, 1500);
atomic_add(&netflow_count, 1, &junk);
return nf;
{
struct netflow *nf = CONST_CAST(struct netflow *, nf_);
if (nf) {
- int orig;
- atomic_add(&nf->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
+ ovs_refcount_ref(&nf->ref_cnt);
}
return nf;
}
void
netflow_unref(struct netflow *nf)
{
- int orig;
-
- if (!nf) {
- return;
- }
+ if (nf && ovs_refcount_unref(&nf->ref_cnt) == 1) {
+ int orig;
- atomic_sub(&nf->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
- if (orig == 1) {
atomic_sub(&netflow_count, 1, &orig);
collectors_destroy(nf->collectors);
ofpbuf_uninit(&nf->packet);
- atomic_destroy(&nf->ref_cnt);
+ ovs_refcount_destroy(&nf->ref_cnt);
free(nf);
}
}
struct dpif_ipfix {
struct dpif_ipfix_bridge_exporter bridge_exporter;
struct hmap flow_exporter_map; /* dpif_ipfix_flow_exporter_map_node. */
- atomic_int ref_cnt;
+ struct ovs_refcount ref_cnt;
};
#define IPFIX_VERSION 0x000a
di = xzalloc(sizeof *di);
dpif_ipfix_bridge_exporter_init(&di->bridge_exporter);
hmap_init(&di->flow_exporter_map);
- atomic_init(&di->ref_cnt, 1);
+ ovs_refcount_init(&di->ref_cnt);
return di;
}
{
struct dpif_ipfix *di = CONST_CAST(struct dpif_ipfix *, di_);
if (di) {
- int orig;
- atomic_add(&di->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
+ ovs_refcount_ref(&di->ref_cnt);
}
return di;
}
void
dpif_ipfix_unref(struct dpif_ipfix *di) OVS_EXCLUDED(mutex)
{
- int orig;
-
- if (!di) {
- return;
- }
-
- atomic_sub(&di->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
- if (orig == 1) {
+ if (di && ovs_refcount_unref(&di->ref_cnt) == 1) {
ovs_mutex_lock(&mutex);
dpif_ipfix_clear(di);
dpif_ipfix_bridge_exporter_destroy(&di->bridge_exporter);
hmap_destroy(&di->flow_exporter_map);
- atomic_destroy(&di->ref_cnt);
+ ovs_refcount_destroy(&di->ref_cnt);
free(di);
ovs_mutex_unlock(&mutex);
}
size_t n_flood, n_all;
struct hmap ports; /* Contains "struct dpif_sflow_port"s. */
uint32_t probability;
- atomic_int ref_cnt;
+ struct ovs_refcount ref_cnt;
};
static void dpif_sflow_del_port__(struct dpif_sflow *,
hmap_init(&ds->ports);
ds->probability = 0;
route_table_register();
- atomic_init(&ds->ref_cnt, 1);
+ ovs_refcount_init(&ds->ref_cnt);
return ds;
}
{
struct dpif_sflow *ds = CONST_CAST(struct dpif_sflow *, ds_);
if (ds) {
- int orig;
- atomic_add(&ds->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
+ ovs_refcount_ref(&ds->ref_cnt);
}
return ds;
}
void
dpif_sflow_unref(struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
{
- int orig;
-
- if (!ds) {
- return;
- }
-
- atomic_sub(&ds->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
- if (orig == 1) {
+ if (ds && ovs_refcount_unref(&ds->ref_cnt) == 1) {
struct dpif_sflow_port *dsp, *next;
route_table_unregister();
dpif_sflow_del_port__(ds, dsp);
}
hmap_destroy(&ds->ports);
- atomic_destroy(&ds->ref_cnt);
+ ovs_refcount_destroy(&ds->ref_cnt);
free(ds);
}
}
* The classifier owns one reference.
* Any thread trying to keep a rule from being freed should hold its own
* reference. */
- atomic_uint ref_count;
+ struct ovs_refcount ref_count;
/* Operation now in progress, if nonnull. */
struct ofoperation *pending OVS_GUARDED_BY(ofproto_mutex);
* 'rule' is the rule for which 'rule->actions == actions') or that owns a
* reference to 'actions->ref_count' (or both). */
struct rule_actions {
- atomic_uint ref_count;
+ struct ovs_refcount ref_count;
/* These members are immutable: they do not change during the struct's
* lifetime. */
ofproto_rule_ref(struct rule *rule)
{
if (rule) {
- unsigned int orig;
-
- atomic_add(&rule->ref_count, 1, &orig);
- ovs_assert(orig != 0);
+ ovs_refcount_ref(&rule->ref_count);
}
}
void
ofproto_rule_unref(struct rule *rule)
{
- if (rule) {
- unsigned int orig;
-
- atomic_sub(&rule->ref_count, 1, &orig);
- if (orig == 1) {
- rule->ofproto->ofproto_class->rule_destruct(rule);
- ofproto_rule_destroy__(rule);
- } else {
- ovs_assert(orig != 0);
- }
+ if (rule && ovs_refcount_unref(&rule->ref_count) == 1) {
+ rule->ofproto->ofproto_class->rule_destruct(rule);
+ ofproto_rule_destroy__(rule);
}
}
cls_rule_destroy(CONST_CAST(struct cls_rule *, &rule->cr));
rule_actions_unref(rule->actions);
ovs_mutex_destroy(&rule->mutex);
- atomic_destroy(&rule->ref_count);
+ ovs_refcount_destroy(&rule->ref_count);
rule->ofproto->ofproto_class->rule_dealloc(rule);
}
struct rule_actions *actions;
actions = xmalloc(sizeof *actions);
- atomic_init(&actions->ref_count, 1);
+ ovs_refcount_init(&actions->ref_count);
actions->ofpacts = xmemdup(ofpacts, ofpacts_len);
actions->ofpacts_len = ofpacts_len;
actions->provider_meter_id
rule_actions_ref(struct rule_actions *actions)
{
if (actions) {
- unsigned int orig;
-
- atomic_add(&actions->ref_count, 1, &orig);
- ovs_assert(orig != 0);
+ ovs_refcount_ref(&actions->ref_count);
}
}
void
rule_actions_unref(struct rule_actions *actions)
{
- if (actions) {
- unsigned int orig;
-
- atomic_sub(&actions->ref_count, 1, &orig);
- if (orig == 1) {
- atomic_destroy(&actions->ref_count);
- free(actions->ofpacts);
- free(actions);
- } else {
- ovs_assert(orig != 0);
- }
+ if (actions && ovs_refcount_unref(&actions->ref_count) == 1) {
+ ovs_refcount_destroy(&actions->ref_count);
+ free(actions->ofpacts);
+ free(actions);
}
}
/* Initialize base state. */
*CONST_CAST(struct ofproto **, &rule->ofproto) = ofproto;
cls_rule_move(CONST_CAST(struct cls_rule *, &rule->cr), &cr);
- atomic_init(&rule->ref_count, 1);
+ ovs_refcount_init(&rule->ref_count);
rule->pending = NULL;
rule->flow_cookie = fm->new_cookie;
rule->created = rule->modified = rule->used = time_msec();