#include "packets.h"
#include "poll-loop.h"
#include "random.h"
+ #include "seq.h"
#include "shash.h"
#include "sset.h"
#include "timeval.h"
struct dp_netdev_queue queues[N_QUEUES];
struct hmap flow_table; /* Flow table. */
+ struct seq *queue_seq; /* Incremented whenever a packet is queued. */
/* Statistics. */
long long int n_hit; /* Number of flow table matches. */
/* Ports. */
struct dp_netdev_port *ports[MAX_PORTS];
struct list port_list;
- unsigned int serial;
+ struct seq *port_seq; /* Incremented whenever a port changes. */
};
/* A port in a netdev-based datapath. */
struct dpif_netdev {
struct dpif dpif;
struct dp_netdev *dp;
- unsigned int dp_serial;
+ uint64_t last_port_seq;
};
/* All netdev-based datapaths. */
static void dp_netdev_port_input(struct dp_netdev *dp,
struct dp_netdev_port *port,
struct ofpbuf *packet, uint32_t skb_priority,
- uint32_t skb_mark, const struct flow_tnl *tnl);
+ uint32_t pkt_mark, const struct flow_tnl *tnl);
static struct dpif_netdev *
dpif_netdev_cast(const struct dpif *dpif)
return class != &dpif_netdev_class;
}
+static bool
+dpif_netdev_class_is_planetlab(const struct dpif_class *class)
+{
+ return class == &dpif_planetlab_class;
+}
+
static const char *
dpif_netdev_port_open_type(const struct dpif_class *class, const char *type)
{
return strcmp(type, "internal") ? type
+ : dpif_netdev_class_is_planetlab(class) ? "pltap"
: dpif_netdev_class_is_dummy(class) ? "dummy"
: "tap";
}
dpif = xmalloc(sizeof *dpif);
dpif_init(&dpif->dpif, dp->class, dp->name, netflow_id >> 8, netflow_id);
dpif->dp = dp;
- dpif->dp_serial = dp->serial;
+ dpif->last_port_seq = seq_read(dp->port_seq);
return &dpif->dpif;
}
{
uint32_t port_no;
- if (dp->class != &dpif_netdev_class) {
+ if (dp->class != &dpif_netdev_class &&
+ dp->class != &dpif_planetlab_class) {
const char *p;
int start_no = 0;
for (i = 0; i < N_QUEUES; i++) {
dp->queues[i].head = dp->queues[i].tail = 0;
}
+ dp->queue_seq = seq_create();
hmap_init(&dp->flow_table);
list_init(&dp->port_list);
+ dp->port_seq = seq_create();
error = do_add_port(dp, name, "internal", ODPP_LOCAL);
if (error) {
do_del_port(dp, port->port_no);
}
dp_netdev_purge_queues(dp);
+ seq_destroy(dp->queue_seq);
hmap_destroy(&dp->flow_table);
+ seq_destroy(dp->port_seq);
free(dp->name);
free(dp);
}
list_push_back(&dp->port_list, &port->node);
dp->ports[odp_to_u32(port_no)] = port;
- dp->serial++;
+ seq_change(dp->port_seq);
return 0;
}
list_remove(&port->node);
dp->ports[odp_to_u32(port_no)] = NULL;
- dp->serial++;
+ seq_change(dp->port_seq);
netdev_close(port->netdev);
netdev_restore_flags(port->sf);
dpif_netdev_port_poll(const struct dpif *dpif_, char **devnamep OVS_UNUSED)
{
struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
+ uint64_t new_port_seq;
int error;
ovs_mutex_lock(&dp_netdev_mutex);
- if (dpif->dp_serial != dpif->dp->serial) {
- dpif->dp_serial = dpif->dp->serial;
+ new_port_seq = seq_read(dpif->dp->port_seq);
+ if (dpif->last_port_seq != new_port_seq) {
+ dpif->last_port_seq = new_port_seq;
error = ENOBUFS;
} else {
error = EAGAIN;
{
struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
- /* XXX In a multithreaded process, there is a race window between this
- * function and the poll_block() in one thread and a change in
- * dpif->dp->serial in another thread. */
-
ovs_mutex_lock(&dp_netdev_mutex);
- if (dpif->dp_serial != dpif->dp->serial) {
- poll_immediate_wake();
- }
+ seq_wait(dpif->dp->port_seq, dpif->last_port_seq);
ovs_mutex_unlock(&dp_netdev_mutex);
}
static void
dpif_netdev_recv_wait(struct dpif *dpif)
{
- /* XXX In a multithreaded process, there is a race window between this
- * function and the poll_block() in one thread and a packet being queued in
- * another thread. */
+ struct dp_netdev *dp = get_dp_netdev(dpif);
+ uint64_t seq;
ovs_mutex_lock(&dp_netdev_mutex);
+ seq = seq_read(dp->queue_seq);
if (find_nonempty_queue(dpif)) {
poll_immediate_wake();
+ } else {
+ seq_wait(dp->queue_seq, seq);
}
ovs_mutex_unlock(&dp_netdev_mutex);
}
static void
dp_netdev_port_input(struct dp_netdev *dp, struct dp_netdev_port *port,
struct ofpbuf *packet, uint32_t skb_priority,
- uint32_t skb_mark, const struct flow_tnl *tnl)
+ uint32_t pkt_mark, const struct flow_tnl *tnl)
{
struct dp_netdev_flow *flow;
struct flow key;
return;
}
in_port_.odp_port = port->port_no;
- flow_extract(packet, skb_priority, skb_mark, tnl, &in_port_, &key);
+ flow_extract(packet, skb_priority, pkt_mark, tnl, &in_port_, &key);
flow = dp_netdev_lookup_flow(dp, &key);
if (flow) {
dp_netdev_flow_used(flow, packet);
buf->size = packet->size;
upcall->packet = buf;
+ seq_change(dp->queue_seq);
+
return 0;
} else {
dp->n_lost++;
dp_netdev_output_port, dp_netdev_action_userspace);
}
+#define DPIF_NETDEV_CLASS_FUNCTIONS \
+ dpif_netdev_enumerate, \
+ dpif_netdev_port_open_type, \
+ dpif_netdev_open, \
+ dpif_netdev_close, \
+ dpif_netdev_destroy, \
+ dpif_netdev_run, \
+ dpif_netdev_wait, \
+ dpif_netdev_get_stats, \
+ dpif_netdev_port_add, \
+ dpif_netdev_port_del, \
+ dpif_netdev_port_query_by_number, \
+ dpif_netdev_port_query_by_name, \
+ dpif_netdev_get_max_ports, \
+ NULL, /* port_get_pid */ \
+ dpif_netdev_port_dump_start, \
+ dpif_netdev_port_dump_next, \
+ dpif_netdev_port_dump_done, \
+ dpif_netdev_port_poll, \
+ dpif_netdev_port_poll_wait, \
+ dpif_netdev_flow_get, \
+ dpif_netdev_flow_put, \
+ dpif_netdev_flow_del, \
+ dpif_netdev_flow_flush, \
+ dpif_netdev_flow_dump_start, \
+ dpif_netdev_flow_dump_next, \
+ dpif_netdev_flow_dump_done, \
+ dpif_netdev_execute, \
+ NULL, /* operate */ \
+ dpif_netdev_recv_set, \
+ dpif_netdev_queue_to_priority, \
+ dpif_netdev_recv, \
+ dpif_netdev_recv_wait, \
+ dpif_netdev_recv_purge, \
+
const struct dpif_class dpif_netdev_class = {
"netdev",
- dpif_netdev_enumerate,
- dpif_netdev_port_open_type,
- dpif_netdev_open,
- dpif_netdev_close,
- dpif_netdev_destroy,
- dpif_netdev_run,
- dpif_netdev_wait,
- dpif_netdev_get_stats,
- dpif_netdev_port_add,
- dpif_netdev_port_del,
- dpif_netdev_port_query_by_number,
- dpif_netdev_port_query_by_name,
- dpif_netdev_get_max_ports,
- NULL, /* port_get_pid */
- dpif_netdev_port_dump_start,
- dpif_netdev_port_dump_next,
- dpif_netdev_port_dump_done,
- dpif_netdev_port_poll,
- dpif_netdev_port_poll_wait,
- dpif_netdev_flow_get,
- dpif_netdev_flow_put,
- dpif_netdev_flow_del,
- dpif_netdev_flow_flush,
- dpif_netdev_flow_dump_start,
- dpif_netdev_flow_dump_next,
- dpif_netdev_flow_dump_done,
- dpif_netdev_execute,
- NULL, /* operate */
- dpif_netdev_recv_set,
- dpif_netdev_queue_to_priority,
- dpif_netdev_recv,
- dpif_netdev_recv_wait,
- dpif_netdev_recv_purge,
+ DPIF_NETDEV_CLASS_FUNCTIONS
+};
+
+const struct dpif_class dpif_planetlab_class = {
+ "planetlab",
+ DPIF_NETDEV_CLASS_FUNCTIONS
};
static void
dp->ports[odp_to_u32(port->port_no)] = NULL;
dp->ports[port_no] = port;
port->port_no = u32_to_odp(port_no);
- dp->serial++;
+ seq_change(dp->port_seq);
unixctl_command_reply(conn, NULL);
}
"DP PORT NEW-NUMBER",
3, 3, dpif_dummy_change_port_number, NULL);
}
+
enum netdev_flags saved_values;
};
- static struct shash netdev_classes = SHASH_INITIALIZER(&netdev_classes);
+ /* Protects 'netdev_shash' and the mutable members of struct netdev. */
+ static struct ovs_mutex netdev_mutex = OVS_MUTEX_INITIALIZER;
/* All created network devices. */
- static struct shash netdev_shash = SHASH_INITIALIZER(&netdev_shash);
+ static struct shash netdev_shash OVS_GUARDED_BY(netdev_mutex)
+ = SHASH_INITIALIZER(&netdev_shash);
+
+ /* Protects 'netdev_classes' against insertions or deletions.
+ *
+ * This is not an rwlock for performance reasons but to allow recursive
+ * acquisition when calling into providers. For example, netdev_run() calls
+ * into provider 'run' functions, which might reasonably want to call one of
+ * the netdev functions that takes netdev_class_rwlock read-only. */
+ static struct ovs_rwlock netdev_class_rwlock OVS_ACQ_BEFORE(netdev_mutex)
+ = OVS_RWLOCK_INITIALIZER;
+
+ /* Contains 'struct netdev_registered_class'es. */
+ static struct hmap netdev_classes OVS_GUARDED_BY(netdev_class_rwlock)
+ = HMAP_INITIALIZER(&netdev_classes);
+
+ struct netdev_registered_class {
+ struct hmap_node hmap_node; /* In 'netdev_classes', by class->type. */
+ const struct netdev_class *class;
+ atomic_int ref_cnt; /* Number of 'struct netdev's of this class. */
+ };
/* This is set pretty low because we probably won't learn anything from the
* additional log messages. */
static void
netdev_initialize(void)
+ OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex)
{
- static bool inited;
-
- if (!inited) {
- inited = true;
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
+ if (ovsthread_once_start(&once)) {
fatal_signal_add_hook(restore_all_flags, NULL, NULL, true);
netdev_vport_patch_register();
netdev_register_provider(&netdev_tap_class);
netdev_register_provider(&netdev_bsd_class);
#endif
- netdev_register_provider(&netdev_tunnel_class);
- netdev_register_provider(&netdev_pltap_class);
++ netdev_register_provider(&netdev_tunnel_class);
++ netdev_register_provider(&netdev_pltap_class);
+
+ ovsthread_once_done(&once);
}
}
* main poll loop. */
void
netdev_run(void)
+ OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex)
{
- struct shash_node *node;
- SHASH_FOR_EACH(node, &netdev_classes) {
- const struct netdev_class *netdev_class = node->data;
- if (netdev_class->run) {
- netdev_class->run();
- }
+ struct netdev_registered_class *rc;
+
+ ovs_rwlock_rdlock(&netdev_class_rwlock);
+ HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
+ rc->class->run();
}
+ ovs_rwlock_unlock(&netdev_class_rwlock);
}
/* Arranges for poll_block() to wake up when netdev_run() needs to be called.
* main poll loop. */
void
netdev_wait(void)
+ OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex)
{
- struct shash_node *node;
- SHASH_FOR_EACH(node, &netdev_classes) {
- const struct netdev_class *netdev_class = node->data;
- if (netdev_class->wait) {
- netdev_class->wait();
+ struct netdev_registered_class *rc;
+
+ ovs_rwlock_rdlock(&netdev_class_rwlock);
+ HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
+ rc->class->wait();
+ }
+ ovs_rwlock_unlock(&netdev_class_rwlock);
+ }
+
+ static struct netdev_registered_class *
+ netdev_lookup_class(const char *type)
+ OVS_REQ_RDLOCK(netdev_class_rwlock)
+ {
+ struct netdev_registered_class *rc;
+
+ HMAP_FOR_EACH_WITH_HASH (rc, hmap_node, hash_string(type, 0),
+ &netdev_classes) {
+ if (!strcmp(type, rc->class->type)) {
+ return rc;
}
}
+ return NULL;
}
/* Initializes and registers a new netdev provider. After successful
* registration, new netdevs of that type can be opened using netdev_open(). */
int
netdev_register_provider(const struct netdev_class *new_class)
+ OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex)
{
- if (shash_find(&netdev_classes, new_class->type)) {
+ int error;
+
+ ovs_rwlock_wrlock(&netdev_class_rwlock);
+ if (netdev_lookup_class(new_class->type)) {
VLOG_WARN("attempted to register duplicate netdev provider: %s",
new_class->type);
- return EEXIST;
- }
-
- if (new_class->init) {
- int error = new_class->init();
- if (error) {
+ error = EEXIST;
+ } else {
+ error = new_class->init ? new_class->init() : 0;
+ if (!error) {
+ struct netdev_registered_class *rc;
+
+ rc = xmalloc(sizeof *rc);
+ hmap_insert(&netdev_classes, &rc->hmap_node,
+ hash_string(new_class->type, 0));
+ rc->class = new_class;
+ atomic_init(&rc->ref_cnt, 0);
+ } else {
VLOG_ERR("failed to initialize %s network device class: %s",
new_class->type, ovs_strerror(error));
- return error;
}
}
+ ovs_rwlock_unlock(&netdev_class_rwlock);
- shash_add(&netdev_classes, new_class->type, new_class);
-
- return 0;
+ return error;
}
/* Unregisters a netdev provider. 'type' must have been previously
* new netdevs of that type cannot be opened using netdev_open(). */
int
netdev_unregister_provider(const char *type)
+ OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex)
{
- struct shash_node *del_node, *netdev_node;
+ struct netdev_registered_class *rc;
+ int error;
- del_node = shash_find(&netdev_classes, type);
- if (!del_node) {
+ ovs_rwlock_wrlock(&netdev_class_rwlock);
+ rc = netdev_lookup_class(type);
+ if (!rc) {
VLOG_WARN("attempted to unregister a netdev provider that is not "
"registered: %s", type);
- return EAFNOSUPPORT;
- }
+ error = EAFNOSUPPORT;
+ } else {
+ int ref_cnt;
- SHASH_FOR_EACH (netdev_node, &netdev_shash) {
- struct netdev *netdev = netdev_node->data;
- if (!strcmp(netdev->netdev_class->type, type)) {
+ atomic_read(&rc->ref_cnt, &ref_cnt);
+ if (!ref_cnt) {
+ hmap_remove(&netdev_classes, &rc->hmap_node);
+ free(rc);
+ error = 0;
+ } else {
VLOG_WARN("attempted to unregister in use netdev provider: %s",
type);
- return EBUSY;
+ error = EBUSY;
}
}
+ ovs_rwlock_unlock(&netdev_class_rwlock);
- shash_delete(&netdev_classes, del_node);
-
- return 0;
- }
-
- const struct netdev_class *
- netdev_lookup_provider(const char *type)
- {
- netdev_initialize();
- return shash_find_data(&netdev_classes, type && type[0] ? type : "system");
+ return error;
}
/* Clears 'types' and enumerates the types of all currently registered netdev
* providers into it. The caller must first initialize the sset. */
void
netdev_enumerate_types(struct sset *types)
+ OVS_EXCLUDED(netdev_mutex)
{
- struct shash_node *node;
+ struct netdev_registered_class *rc;
netdev_initialize();
sset_clear(types);
- SHASH_FOR_EACH(node, &netdev_classes) {
- const struct netdev_class *netdev_class = node->data;
- sset_add(types, netdev_class->type);
+ ovs_rwlock_rdlock(&netdev_class_rwlock);
+ HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
+ sset_add(types, rc->class->type);
}
+ ovs_rwlock_unlock(&netdev_class_rwlock);
}
/* Check that the network device name is not the same as any of the registered
* Returns true if there is a name conflict, false otherwise. */
bool
netdev_is_reserved_name(const char *name)
+ OVS_EXCLUDED(netdev_mutex)
{
- struct shash_node *node;
+ struct netdev_registered_class *rc;
netdev_initialize();
- SHASH_FOR_EACH (node, &netdev_classes) {
- const char *dpif_port;
- dpif_port = netdev_vport_class_get_dpif_port(node->data);
+
+ ovs_rwlock_rdlock(&netdev_class_rwlock);
+ HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
+ const char *dpif_port = netdev_vport_class_get_dpif_port(rc->class);
if (dpif_port && !strcmp(dpif_port, name)) {
+ ovs_rwlock_unlock(&netdev_class_rwlock);
return true;
}
}
+ ovs_rwlock_unlock(&netdev_class_rwlock);
if (!strncmp(name, "ovs-", 4)) {
struct sset types;
* before they can be used. */
int
netdev_open(const char *name, const char *type, struct netdev **netdevp)
+ OVS_EXCLUDED(netdev_mutex)
{
struct netdev *netdev;
int error;
netdev_initialize();
+ ovs_rwlock_rdlock(&netdev_class_rwlock);
+ ovs_mutex_lock(&netdev_mutex);
netdev = shash_find_data(&netdev_shash, name);
if (!netdev) {
- const struct netdev_class *class;
+ struct netdev_registered_class *rc;
- class = netdev_lookup_provider(type);
- if (class) {
- netdev = class->alloc();
+ rc = netdev_lookup_class(type && type[0] ? type : "system");
+ if (rc) {
+ netdev = rc->class->alloc();
if (netdev) {
memset(netdev, 0, sizeof *netdev);
- netdev->netdev_class = class;
+ netdev->netdev_class = rc->class;
netdev->name = xstrdup(name);
netdev->node = shash_add(&netdev_shash, name, netdev);
list_init(&netdev->saved_flags_list);
- error = class->construct(netdev);
- if (error) {
- class->dealloc(netdev);
+ error = rc->class->construct(netdev);
+ if (!error) {
+ int old_ref_cnt;
+
+ atomic_add(&rc->ref_cnt, 1, &old_ref_cnt);
+ } else {
+ free(netdev->name);
+ ovs_assert(list_is_empty(&netdev->saved_flags_list));
+ shash_delete(&netdev_shash, netdev->node);
+ rc->class->dealloc(netdev);
}
} else {
error = ENOMEM;
error = 0;
}
+ ovs_mutex_unlock(&netdev_mutex);
+ ovs_rwlock_unlock(&netdev_class_rwlock);
+
if (!error) {
netdev->ref_cnt++;
*netdevp = netdev;
* 'netdev_' is null. */
struct netdev *
netdev_ref(const struct netdev *netdev_)
+ OVS_EXCLUDED(netdev_mutex)
{
struct netdev *netdev = CONST_CAST(struct netdev *, netdev_);
if (netdev) {
+ ovs_mutex_lock(&netdev_mutex);
ovs_assert(netdev->ref_cnt > 0);
netdev->ref_cnt++;
+ ovs_mutex_unlock(&netdev_mutex);
}
return netdev;
}
* or NULL if none are needed. */
int
netdev_set_config(struct netdev *netdev, const struct smap *args)
+ OVS_EXCLUDED(netdev_mutex)
{
if (netdev->netdev_class->set_config) {
- struct smap no_args = SMAP_INITIALIZER(&no_args);
+ const struct smap no_args = SMAP_INITIALIZER(&no_args);
return netdev->netdev_class->set_config(netdev,
args ? args : &no_args);
} else if (args && !smap_is_empty(args)) {
* smap_destroy(). */
int
netdev_get_config(const struct netdev *netdev, struct smap *args)
+ OVS_EXCLUDED(netdev_mutex)
{
int error;
const struct netdev_tunnel_config *
netdev_get_tunnel_config(const struct netdev *netdev)
+ OVS_EXCLUDED(netdev_mutex)
{
if (netdev->netdev_class->get_tunnel_config) {
return netdev->netdev_class->get_tunnel_config(netdev);
static void
netdev_unref(struct netdev *dev)
+ OVS_RELEASES(netdev_mutex)
{
ovs_assert(dev->ref_cnt);
if (!--dev->ref_cnt) {
+ const struct netdev_class *class = dev->netdev_class;
+ struct netdev_registered_class *rc;
+ int old_ref_cnt;
+
dev->netdev_class->destruct(dev);
shash_delete(&netdev_shash, dev->node);
free(dev->name);
dev->netdev_class->dealloc(dev);
+ ovs_mutex_unlock(&netdev_mutex);
+
+ ovs_rwlock_rdlock(&netdev_class_rwlock);
+ rc = netdev_lookup_class(class->type);
+ atomic_sub(&rc->ref_cnt, 1, &old_ref_cnt);
+ ovs_assert(old_ref_cnt > 0);
+ ovs_rwlock_unlock(&netdev_class_rwlock);
+ } else {
+ ovs_mutex_unlock(&netdev_mutex);
}
}
/* Closes and destroys 'netdev'. */
void
netdev_close(struct netdev *netdev)
+ OVS_EXCLUDED(netdev_mutex)
{
if (netdev) {
+ ovs_mutex_lock(&netdev_mutex);
netdev_unref(netdev);
}
}
int
netdev_rx_open(struct netdev *netdev, struct netdev_rx **rxp)
+ OVS_EXCLUDED(netdev_mutex)
{
int error;
rx->netdev = netdev;
error = netdev->netdev_class->rx_construct(rx);
if (!error) {
+ ovs_mutex_lock(&netdev_mutex);
netdev->ref_cnt++;
+ ovs_mutex_unlock(&netdev_mutex);
+
*rxp = rx;
return 0;
}
void
netdev_rx_close(struct netdev_rx *rx)
+ OVS_EXCLUDED(netdev_mutex)
{
if (rx) {
struct netdev *netdev = rx->netdev;
do_update_flags(struct netdev *netdev, enum netdev_flags off,
enum netdev_flags on, enum netdev_flags *old_flagsp,
struct netdev_saved_flags **sfp)
+ OVS_EXCLUDED(netdev_mutex)
{
struct netdev_saved_flags *sf = NULL;
enum netdev_flags old_flags;
enum netdev_flags new_flags = (old_flags & ~off) | on;
enum netdev_flags changed_flags = old_flags ^ new_flags;
if (changed_flags) {
+ ovs_mutex_lock(&netdev_mutex);
*sfp = sf = xmalloc(sizeof *sf);
sf->netdev = netdev;
list_push_front(&netdev->saved_flags_list, &sf->node);
sf->saved_values = changed_flags & new_flags;
netdev->ref_cnt++;
+ ovs_mutex_unlock(&netdev_mutex);
}
}
* Does nothing if 'sf' is NULL. */
void
netdev_restore_flags(struct netdev_saved_flags *sf)
+ OVS_EXCLUDED(netdev_mutex)
{
if (sf) {
struct netdev *netdev = sf->netdev;
sf->saved_flags & sf->saved_values,
sf->saved_flags & ~sf->saved_values,
&old_flags);
+
+ ovs_mutex_lock(&netdev_mutex);
list_remove(&sf->node);
free(sf);
-
netdev_unref(netdev);
}
}
* The caller must free the returned netdev with netdev_close(). */
struct netdev *
netdev_from_name(const char *name)
+ OVS_EXCLUDED(netdev_mutex)
{
struct netdev *netdev;
+ ovs_mutex_lock(&netdev_mutex);
netdev = shash_find_data(&netdev_shash, name);
if (netdev) {
- netdev_ref(netdev);
+ netdev->ref_cnt++;
}
+ ovs_mutex_unlock(&netdev_mutex);
return netdev;
}
void
netdev_get_devices(const struct netdev_class *netdev_class,
struct shash *device_list)
+ OVS_EXCLUDED(netdev_mutex)
{
struct shash_node *node;
+
+ ovs_mutex_lock(&netdev_mutex);
SHASH_FOR_EACH (node, &netdev_shash) {
struct netdev *dev = node->data;
shash_add(device_list, node->name, node->data);
}
}
+ ovs_mutex_unlock(&netdev_mutex);
}
const char *