struct netdev_bsd {
struct netdev up;
+
+ /* Never changes after initialization. */
+ char *kernel_name;
+
+ /* Protects all members below. */
+ struct ovs_mutex mutex;
+
unsigned int cache_valid;
unsigned int change_seq;
/* Used for sending packets on non-tap devices. */
pcap_t *pcap;
int fd;
-
- char *kernel_name;
};
return error;
}
+ ovs_mutex_init(&netdev->mutex, PTHREAD_MUTEX_NORMAL);
netdev->change_seq = 1;
netdev->tap_fd = -1;
netdev->kernel_name = xstrdup(netdev_->name);
/* Create a tap device by opening /dev/tap. The TAPGIFNAME ioctl is used
* to retrieve the name of the tap device. */
+ ovs_mutex_init(&netdev->mutex, PTHREAD_MUTEX_NORMAL);
netdev->tap_fd = open("/dev/tap", O_RDWR);
netdev->change_seq = 1;
if (netdev->tap_fd < 0) {
return 0;
error_unref_notifier:
+ ovs_mutex_destroy(&netdev->mutex);
cache_notifier_unref();
error:
free(kernel_name);
pcap_close(netdev->pcap);
}
free(netdev->kernel_name);
+ ovs_mutex_destroy(&netdev->mutex);
}
static void
struct netdev_rx_bsd *rx = netdev_rx_bsd_cast(rx_);
struct netdev *netdev_ = rx->up.netdev;
struct netdev_bsd *netdev = netdev_bsd_cast(netdev_);
+ int error;
if (!strcmp(netdev_get_type(netdev_), "tap")) {
rx->pcap_handle = NULL;
rx->fd = netdev->tap_fd;
+ error = 0;
} else {
- int error = netdev_bsd_open_pcap(netdev_get_kernel_name(netdev_),
- &rx->pcap_handle, &rx->fd);
- if (error) {
- return error;
+ ovs_mutex_lock(&netdev->mutex);
+ error = netdev_bsd_open_pcap(netdev_get_kernel_name(netdev_),
+ &rx->pcap_handle, &rx->fd);
+ if (!error) {
+ netdev_bsd_changed(netdev);
}
-
- netdev_bsd_changed(netdev);
+ ovs_mutex_unlock(&netdev->mutex);
}
- return 0;
+ return error;
}
static void
{
struct netdev_bsd *dev = netdev_bsd_cast(netdev_);
const char *name = netdev_get_name(netdev_);
+ int error;
+ ovs_mutex_lock(&dev->mutex);
if (dev->tap_fd < 0 && !dev->pcap) {
- int error = netdev_bsd_open_pcap(name, &dev->pcap, &dev->fd);
- if (error) {
- return error;
- }
+ error = netdev_bsd_open_pcap(name, &dev->pcap, &dev->fd);
+ } else {
+ error = 0;
}
- for (;;) {
+ while (!error) {
ssize_t retval;
if (dev->tap_fd >= 0) {
retval = write(dev->tap_fd, data, size);
if (retval < 0) {
if (errno == EINTR) {
continue;
- } else if (errno != EAGAIN) {
- VLOG_WARN_RL(&rl, "error sending Ethernet packet on %s: %s",
- name, ovs_strerror(errno));
+ } else {
+ error = errno;
+ if (error != EAGAIN) {
+ VLOG_WARN_RL(&rl, "error sending Ethernet packet on %s: "
+ "%s", name, ovs_strerror(error));
+ }
}
- return errno;
} else if (retval != size) {
VLOG_WARN_RL(&rl, "sent partial Ethernet packet (%zd bytes of "
"%zu) on %s", retval, size, name);
- return EMSGSIZE;
+ error = EMSGSIZE;
} else {
- return 0;
+ break;
}
}
+
+ ovs_mutex_unlock(&dev->mutex);
+ return error;
}
/*
{
struct netdev_bsd *dev = netdev_bsd_cast(netdev_);
+ ovs_mutex_lock(&dev->mutex);
if (dev->tap_fd >= 0) {
/* TAP device always accepts packets. */
poll_immediate_wake();
/* We haven't even tried to send a packet yet. */
poll_immediate_wake();
}
+ ovs_mutex_unlock(&dev->mutex);
}
/*
const uint8_t mac[ETH_ADDR_LEN])
{
struct netdev_bsd *netdev = netdev_bsd_cast(netdev_);
- int error;
+ int error = 0;
+ ovs_mutex_lock(&netdev->mutex);
if (!(netdev->cache_valid & VALID_ETHERADDR)
|| !eth_addr_equals(netdev->etheraddr, mac)) {
error = set_etheraddr(netdev_get_kernel_name(netdev_), AF_LINK,
memcpy(netdev->etheraddr, mac, ETH_ADDR_LEN);
netdev_bsd_changed(netdev);
}
- } else {
- error = 0;
}
+ ovs_mutex_unlock(&netdev->mutex);
+
return error;
}
uint8_t mac[ETH_ADDR_LEN])
{
struct netdev_bsd *netdev = netdev_bsd_cast(netdev_);
+ int error = 0;
+ ovs_mutex_lock(&netdev->mutex);
if (!(netdev->cache_valid & VALID_ETHERADDR)) {
- int error = get_etheraddr(netdev_get_kernel_name(netdev_),
- netdev->etheraddr);
- if (error) {
- return error;
+ error = get_etheraddr(netdev_get_kernel_name(netdev_),
+ netdev->etheraddr);
+ if (!error) {
+ netdev->cache_valid |= VALID_ETHERADDR;
}
- netdev->cache_valid |= VALID_ETHERADDR;
}
- memcpy(mac, netdev->etheraddr, ETH_ADDR_LEN);
+ if (!error) {
+ memcpy(mac, netdev->etheraddr, ETH_ADDR_LEN);
+ }
+ ovs_mutex_unlock(&netdev->mutex);
- return 0;
+ return error;
}
/*
netdev_bsd_get_mtu(const struct netdev *netdev_, int *mtup)
{
struct netdev_bsd *netdev = netdev_bsd_cast(netdev_);
+ int error = 0;
+ ovs_mutex_lock(&netdev->mutex);
if (!(netdev->cache_valid & VALID_MTU)) {
struct ifreq ifr;
- int error;
error = af_inet_ifreq_ioctl(netdev_get_kernel_name(netdev_), &ifr,
SIOCGIFMTU, "SIOCGIFMTU");
- if (error) {
- return error;
+ if (!error) {
+ netdev->mtu = ifr.ifr_mtu;
+ netdev->cache_valid |= VALID_MTU;
}
- netdev->mtu = ifr.ifr_mtu;
- netdev->cache_valid |= VALID_MTU;
}
+ if (!error) {
+ *mtup = netdev->mtu;
+ }
+ ovs_mutex_unlock(&netdev->mutex);
- *mtup = netdev->mtu;
return 0;
}
static int
-netdev_bsd_get_ifindex(const struct netdev *netdev)
+netdev_bsd_get_ifindex(const struct netdev *netdev_)
{
+ struct netdev_bsd *netdev = netdev_bsd_cast(netdev_);
int ifindex, error;
- error = get_ifindex(netdev, &ifindex);
+ ovs_mutex_lock(&netdev->mutex);
+ error = get_ifindex(netdev_, &ifindex);
+ ovs_mutex_unlock(&netdev->mutex);
+
return error ? -error : ifindex;
}
netdev_bsd_get_carrier(const struct netdev *netdev_, bool *carrier)
{
struct netdev_bsd *netdev = netdev_bsd_cast(netdev_);
+ int error = 0;
+ ovs_mutex_lock(&netdev->mutex);
if (!(netdev->cache_valid & VALID_CARRIER)) {
struct ifmediareq ifmr;
- int error;
memset(&ifmr, 0, sizeof(ifmr));
strncpy(ifmr.ifm_name, netdev_get_kernel_name(netdev_),
sizeof ifmr.ifm_name);
error = af_inet_ioctl(SIOCGIFMEDIA, &ifmr);
- if (error) {
+ if (!error) {
+ netdev->carrier = (ifmr.ifm_status & IFM_ACTIVE) == IFM_ACTIVE;
+ netdev->cache_valid |= VALID_CARRIER;
+
+ /* If the interface doesn't report whether the media is active,
+ * just assume it is active. */
+ if ((ifmr.ifm_status & IFM_AVALID) == 0) {
+ netdev->carrier = true;
+ }
+ } else {
VLOG_DBG_RL(&rl, "%s: ioctl(SIOCGIFMEDIA) failed: %s",
netdev_get_name(netdev_), ovs_strerror(error));
- return error;
- }
-
- netdev->carrier = (ifmr.ifm_status & IFM_ACTIVE) == IFM_ACTIVE;
- netdev->cache_valid |= VALID_CARRIER;
-
- /* If the interface doesn't report whether the media is active,
- * just assume it is active. */
- if ((ifmr.ifm_status & IFM_AVALID) == 0) {
- netdev->carrier = true;
}
}
- *carrier = netdev->carrier;
+ if (!error) {
+ *carrier = netdev->carrier;
+ }
+ ovs_mutex_unlock(&netdev->mutex);
- return 0;
+ return error;
}
static void
struct in_addr *netmask)
{
struct netdev_bsd *netdev = netdev_bsd_cast(netdev_);
+ int error = 0;
+ ovs_mutex_lock(&netdev->mutex);
if (!(netdev->cache_valid & VALID_IN4)) {
- const struct sockaddr_in *sin;
struct ifreq ifr;
- int error;
ifr.ifr_addr.sa_family = AF_INET;
error = af_inet_ifreq_ioctl(netdev_get_kernel_name(netdev_), &ifr,
SIOCGIFADDR, "SIOCGIFADDR");
- if (error) {
- return error;
- }
+ if (!error) {
+ const struct sockaddr_in *sin;
- sin = (struct sockaddr_in *) &ifr.ifr_addr;
- netdev->in4 = sin->sin_addr;
- error = af_inet_ifreq_ioctl(netdev_get_kernel_name(netdev_), &ifr,
- SIOCGIFNETMASK, "SIOCGIFNETMASK");
- if (error) {
- return error;
+ sin = (struct sockaddr_in *) &ifr.ifr_addr;
+ netdev->in4 = sin->sin_addr;
+ netdev->cache_valid |= VALID_IN4;
+ error = af_inet_ifreq_ioctl(netdev_get_kernel_name(netdev_), &ifr,
+ SIOCGIFNETMASK, "SIOCGIFNETMASK");
+ if (!error) {
+ *netmask = sin->sin_addr;
+ }
}
- netdev->netmask = sin->sin_addr;
- netdev->cache_valid |= VALID_IN4;
}
- *in4 = netdev->in4;
- *netmask = netdev->netmask;
+ if (!error) {
+ *in4 = netdev->in4;
+ *netmask = netdev->netmask;
+ }
+ ovs_mutex_unlock(&netdev->mutex);
- return in4->s_addr == INADDR_ANY ? EADDRNOTAVAIL : 0;
+ return error ? error : in4->s_addr == INADDR_ANY ? EADDRNOTAVAIL : 0;
}
/*
struct netdev_bsd *netdev = netdev_bsd_cast(netdev_);
int error;
+ ovs_mutex_lock(&netdev->mutex);
error = do_set_addr(netdev_, SIOCSIFADDR, "SIOCSIFADDR", addr);
if (!error) {
if (addr.s_addr != INADDR_ANY) {
}
netdev_bsd_changed(netdev);
}
+ ovs_mutex_unlock(&netdev->mutex);
+
return error;
}
struct netdev_dummy {
struct netdev up;
+
+ /* Protects all members below. */
+ struct ovs_mutex mutex;
+
uint8_t hwaddr[ETH_ADDR_LEN];
int mtu;
struct netdev_stats stats;
static unixctl_cb_func netdev_dummy_set_admin_state;
static int netdev_dummy_construct(struct netdev *);
-static void netdev_dummy_poll_notify(struct netdev_dummy *);
+static void netdev_dummy_poll_notify(struct netdev_dummy *netdev)
+ OVS_REQUIRES(netdev->mutex);
static void netdev_dummy_queue_packet(struct netdev_dummy *, struct ofpbuf *);
static void dummy_stream_close(struct dummy_stream *);
struct netdev_dummy *dev = node->data;
size_t i;
+ ovs_mutex_lock(&dev->mutex);
+
if (dev->pstream) {
struct stream *new_stream;
int error;
}
}
+ ovs_mutex_unlock(&dev->mutex);
netdev_close(&dev->up);
}
shash_destroy(&dummy_netdevs);
struct netdev_dummy *dev = node->data;
size_t i;
+ ovs_mutex_lock(&dev->mutex);
if (dev->pstream) {
pstream_wait(dev->pstream);
}
}
stream_recv_wait(s->stream);
}
+ ovs_mutex_unlock(&dev->mutex);
netdev_close(&dev->up);
}
shash_destroy(&dummy_netdevs);
unsigned int n;
atomic_add(&next_n, 1, &n);
+
+ ovs_mutex_init(&netdev->mutex, PTHREAD_MUTEX_NORMAL);
netdev->hwaddr[0] = 0xaa;
netdev->hwaddr[1] = 0x55;
netdev->hwaddr[2] = n >> 24;
dummy_stream_close(&netdev->streams[i]);
}
free(netdev->streams);
+ ovs_mutex_destroy(&netdev->mutex);
}
static void
struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
const char *pstream;
+ ovs_mutex_lock(&netdev->mutex);
netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
pstream = smap_get(args, "pstream");
}
}
}
+ ovs_mutex_unlock(&netdev->mutex);
+
return 0;
}
struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
+ ovs_mutex_lock(&netdev->mutex);
list_push_back(&netdev->rxes, &rx->node);
list_init(&rx->recv_queue);
rx->recv_queue_len = 0;
+ ovs_mutex_unlock(&netdev->mutex);
return 0;
}
netdev_dummy_rx_destruct(struct netdev_rx *rx_)
{
struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
+ struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
+ ovs_mutex_lock(&netdev->mutex);
list_remove(&rx->node);
ofpbuf_list_delete(&rx->recv_queue);
+ ovs_mutex_unlock(&netdev->mutex);
}
static void
netdev_dummy_rx_recv(struct netdev_rx *rx_, void *buffer, size_t size)
{
struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
+ struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
struct ofpbuf *packet;
int retval;
- if (list_is_empty(&rx->recv_queue)) {
+ ovs_mutex_lock(&netdev->mutex);
+ if (!list_is_empty(&rx->recv_queue)) {
+ packet = ofpbuf_from_list(list_pop_front(&rx->recv_queue));
+ rx->recv_queue_len--;
+ } else {
+ packet = NULL;
+ }
+ ovs_mutex_unlock(&netdev->mutex);
+
+ if (!packet) {
return -EAGAIN;
}
- packet = ofpbuf_from_list(list_pop_front(&rx->recv_queue));
- rx->recv_queue_len--;
if (packet->size <= size) {
memcpy(buffer, packet->data, packet->size);
retval = packet->size;
netdev_dummy_rx_wait(struct netdev_rx *rx_)
{
struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
+ struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
+
+ ovs_mutex_lock(&netdev->mutex);
if (!list_is_empty(&rx->recv_queue)) {
poll_immediate_wake();
}
+ ovs_mutex_unlock(&netdev->mutex);
}
static int
netdev_dummy_rx_drain(struct netdev_rx *rx_)
{
struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
+ struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
+
+ ovs_mutex_lock(&netdev->mutex);
ofpbuf_list_delete(&rx->recv_queue);
rx->recv_queue_len = 0;
+ ovs_mutex_unlock(&netdev->mutex);
+
return 0;
}
}
}
+ ovs_mutex_lock(&dev->mutex);
dev->stats.tx_packets++;
dev->stats.tx_bytes += size;
list_push_back(&s->txq, &b->list_node);
}
}
+ ovs_mutex_unlock(&dev->mutex);
return 0;
}
{
struct netdev_dummy *dev = netdev_dummy_cast(netdev);
+ ovs_mutex_lock(&dev->mutex);
if (!eth_addr_equals(dev->hwaddr, mac)) {
memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
netdev_dummy_poll_notify(dev);
}
+ ovs_mutex_unlock(&dev->mutex);
return 0;
}
netdev_dummy_get_etheraddr(const struct netdev *netdev,
uint8_t mac[ETH_ADDR_LEN])
{
- const struct netdev_dummy *dev = netdev_dummy_cast(netdev);
+ struct netdev_dummy *dev = netdev_dummy_cast(netdev);
+ ovs_mutex_lock(&dev->mutex);
memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
+ ovs_mutex_unlock(&dev->mutex);
+
return 0;
}
static int
netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup)
{
- const struct netdev_dummy *dev = netdev_dummy_cast(netdev);
+ struct netdev_dummy *dev = netdev_dummy_cast(netdev);
+ ovs_mutex_lock(&dev->mutex);
*mtup = dev->mtu;
+ ovs_mutex_unlock(&dev->mutex);
+
return 0;
}
{
struct netdev_dummy *dev = netdev_dummy_cast(netdev);
+ ovs_mutex_lock(&dev->mutex);
dev->mtu = mtu;
+ ovs_mutex_unlock(&dev->mutex);
+
return 0;
}
static int
netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
{
- const struct netdev_dummy *dev = netdev_dummy_cast(netdev);
+ struct netdev_dummy *dev = netdev_dummy_cast(netdev);
+ ovs_mutex_lock(&dev->mutex);
*stats = dev->stats;
+ ovs_mutex_unlock(&dev->mutex);
+
return 0;
}
{
struct netdev_dummy *dev = netdev_dummy_cast(netdev);
+ ovs_mutex_lock(&dev->mutex);
dev->stats = *stats;
+ ovs_mutex_unlock(&dev->mutex);
+
return 0;
}
netdev_dummy_get_ifindex(const struct netdev *netdev)
{
struct netdev_dummy *dev = netdev_dummy_cast(netdev);
+ int ifindex;
+
+ ovs_mutex_lock(&dev->mutex);
+ ifindex = dev->ifindex;
+ ovs_mutex_unlock(&dev->mutex);
- return dev->ifindex;
+ return ifindex;
}
static int
-netdev_dummy_update_flags(struct netdev *netdev_,
- enum netdev_flags off, enum netdev_flags on,
- enum netdev_flags *old_flagsp)
+netdev_dummy_update_flags__(struct netdev_dummy *netdev,
+ enum netdev_flags off, enum netdev_flags on,
+ enum netdev_flags *old_flagsp)
+ OVS_REQUIRES(netdev->mutex)
{
- struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
-
if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
return EINVAL;
}
if (*old_flagsp != netdev->flags) {
netdev_dummy_poll_notify(netdev);
}
+
return 0;
}
+static int
+netdev_dummy_update_flags(struct netdev *netdev_,
+ enum netdev_flags off, enum netdev_flags on,
+ enum netdev_flags *old_flagsp)
+{
+ struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
+ int error;
+
+ ovs_mutex_lock(&netdev->mutex);
+ error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp);
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return error;
+}
+
static unsigned int
-netdev_dummy_change_seq(const struct netdev *netdev)
+netdev_dummy_change_seq(const struct netdev *netdev_)
{
- return netdev_dummy_cast(netdev)->change_seq;
+ struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
+ unsigned int change_seq;
+
+ ovs_mutex_lock(&netdev->mutex);
+ change_seq = netdev->change_seq;
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return change_seq;
}
\f
/* Helper functions. */
goto exit;
}
+ ovs_mutex_lock(&dummy_dev->mutex);
dummy_dev->stats.rx_packets++;
dummy_dev->stats.rx_bytes += packet->size;
-
netdev_dummy_queue_packet(dummy_dev, packet);
+ ovs_mutex_unlock(&dummy_dev->mutex);
}
unixctl_command_reply(conn, NULL);
static void
netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state)
+ OVS_REQUIRES(dev->mutex)
{
enum netdev_flags old_flags;
if (admin_state) {
- netdev_dummy_update_flags(&dev->up, 0, NETDEV_UP, &old_flags);
+ netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags);
} else {
- netdev_dummy_update_flags(&dev->up, NETDEV_UP, 0, &old_flags);
+ netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags);
}
}
if (netdev && is_dummy_class(netdev->netdev_class)) {
struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
+ ovs_mutex_lock(&dummy_dev->mutex);
netdev_dummy_set_admin_state__(dummy_dev, up);
+ ovs_mutex_unlock(&dummy_dev->mutex);
+
netdev_close(netdev);
} else {
unixctl_command_reply_error(conn, "Unknown Dummy Interface");
shash_init(&dummy_netdevs);
netdev_get_devices(&dummy_class, &dummy_netdevs);
SHASH_FOR_EACH (node, &dummy_netdevs) {
- struct netdev *netdev = node->data;
- netdev_dummy_set_admin_state__(netdev_dummy_cast(netdev), up);
- netdev_close(netdev);
+ struct netdev *netdev_ = node->data;
+ struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
+
+ ovs_mutex_lock(&netdev->mutex);
+ netdev_dummy_set_admin_state__(netdev, up);
+ ovs_mutex_unlock(&netdev->mutex);
+
+ netdev_close(netdev_);
}
shash_destroy(&dummy_netdevs);
}
SSET_FOR_EACH (type, &types) {
if (!netdev_unregister_provider(type)) {
struct netdev_class *class;
+ int error;
- class = xmalloc(sizeof *class);
- *class = dummy_class;
+ class = xmemdup(&dummy_class, sizeof dummy_class);
class->type = xstrdup(type);
- netdev_register_provider(class);
+ error = netdev_register_provider(class);
+ if (error) {
+ VLOG_ERR("%s: failed to register netdev provider (%s)",
+ type, ovs_strerror(error));
+ free(CONST_CAST(char *, class->type));
+ free(class);
+ }
}
}
sset_destroy(&types);
struct netdev_linux {
struct netdev up;
+ /* Protects all members below. */
+ struct ovs_mutex mutex;
+
unsigned int cache_valid;
unsigned int change_seq;
}
\f
static void netdev_linux_update(struct netdev_linux *netdev,
- const struct rtnetlink_link_change *);
+ const struct rtnetlink_link_change *)
+ OVS_REQUIRES(netdev->mutex);
static void netdev_linux_changed(struct netdev_linux *netdev,
- unsigned int ifi_flags, unsigned int mask);
+ unsigned int ifi_flags, unsigned int mask)
+ OVS_REQUIRES(netdev->mutex);
/* Returns a NETLINK_ROUTE socket listening for RTNLGRP_LINK changes, or NULL
* if no such socket could be created. */
struct netdev *netdev_ = netdev_from_name(change.ifname);
if (netdev_ && is_netdev_linux_class(netdev_->netdev_class)) {
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
+
+ ovs_mutex_lock(&netdev->mutex);
netdev_linux_update(netdev, &change);
+ ovs_mutex_unlock(&netdev->mutex);
+
netdev_close(netdev_);
}
}
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
unsigned int flags;
+ ovs_mutex_lock(&netdev->mutex);
get_flags(netdev_, &flags);
netdev_linux_changed(netdev, flags, 0);
+ ovs_mutex_unlock(&netdev->mutex);
+
netdev_close(netdev_);
}
shash_destroy(&device_shash);
static void
netdev_linux_changed(struct netdev_linux *dev,
unsigned int ifi_flags, unsigned int mask)
+ OVS_REQUIRES(dev->mutex)
{
dev->change_seq++;
if (!dev->change_seq) {
static void
netdev_linux_update(struct netdev_linux *dev,
const struct rtnetlink_link_change *change)
+ OVS_REQUIRES(dev->mutex)
{
if (change->nlmsg_type == RTM_NEWLINK) {
/* Keep drv-info */
static void
netdev_linux_common_construct(struct netdev_linux *netdev)
{
+ ovs_mutex_init(&netdev->mutex, PTHREAD_MUTEX_NORMAL);
netdev->change_seq = 1;
}
{
close(netdev->tap_fd);
}
+
+ ovs_mutex_destroy(&netdev->mutex);
}
static void
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
int error;
+ ovs_mutex_lock(&netdev->mutex);
rx->is_tap = is_tap_netdev(netdev_);
if (rx->is_tap) {
rx->fd = netdev->tap_fd;
goto error;
}
}
+ ovs_mutex_unlock(&netdev->mutex);
return 0;
if (rx->fd >= 0) {
close(rx->fd);
}
+ ovs_mutex_unlock(&netdev->mutex);
return error;
}
struct msghdr msg;
struct iovec iov;
int ifindex;
- int error;
int sock;
sock = af_packet_sock();
return -sock;
}
- error = get_ifindex(netdev_, &ifindex);
- if (error) {
- return error;
+ ifindex = netdev_get_ifindex(netdev_);
+ if (ifindex < 0) {
+ return -ifindex;
}
/* We don't bother setting most fields in sockaddr_ll because the
struct netdev_saved_flags *sf = NULL;
int error;
+ ovs_mutex_lock(&netdev->mutex);
+
if (netdev->cache_valid & VALID_ETHERADDR) {
- if (netdev->ether_addr_error) {
- return netdev->ether_addr_error;
- }
- if (eth_addr_equals(netdev->etheraddr, mac)) {
- return 0;
+ error = netdev->ether_addr_error;
+ if (error || eth_addr_equals(netdev->etheraddr, mac)) {
+ goto exit;
}
netdev->cache_valid &= ~VALID_ETHERADDR;
}
netdev_restore_flags(sf);
+exit:
+ ovs_mutex_unlock(&netdev->mutex);
return error;
}
uint8_t mac[ETH_ADDR_LEN])
{
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
+ int error;
+ ovs_mutex_lock(&netdev->mutex);
if (!(netdev->cache_valid & VALID_ETHERADDR)) {
- int error = get_etheraddr(netdev_get_name(netdev_),
- netdev->etheraddr);
-
- netdev->ether_addr_error = error;
+ netdev->ether_addr_error = get_etheraddr(netdev_get_name(netdev_),
+ netdev->etheraddr);
netdev->cache_valid |= VALID_ETHERADDR;
}
- if (!netdev->ether_addr_error) {
+ error = netdev->ether_addr_error;
+ if (!error) {
memcpy(mac, netdev->etheraddr, ETH_ADDR_LEN);
}
+ ovs_mutex_unlock(&netdev->mutex);
- return netdev->ether_addr_error;
+ return error;
}
/* Returns the maximum size of transmitted (and received) packets on 'netdev',
netdev_linux_get_mtu(const struct netdev *netdev_, int *mtup)
{
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
+ int error;
+
+ ovs_mutex_lock(&netdev->mutex);
if (!(netdev->cache_valid & VALID_MTU)) {
struct ifreq ifr;
- int error;
-
- error = af_inet_ifreq_ioctl(netdev_get_name(netdev_), &ifr,
- SIOCGIFMTU, "SIOCGIFMTU");
- netdev->netdev_mtu_error = error;
+ netdev->netdev_mtu_error = af_inet_ifreq_ioctl(
+ netdev_get_name(netdev_), &ifr, SIOCGIFMTU, "SIOCGIFMTU");
netdev->mtu = ifr.ifr_mtu;
netdev->cache_valid |= VALID_MTU;
}
- if (!netdev->netdev_mtu_error) {
+ error = netdev->netdev_mtu_error;
+ if (!error) {
*mtup = netdev->mtu;
}
- return netdev->netdev_mtu_error;
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return error;
}
/* Sets the maximum size of transmitted (MTU) for given device using linux
struct ifreq ifr;
int error;
+ ovs_mutex_lock(&netdev->mutex);
if (netdev->cache_valid & VALID_MTU) {
- if (netdev->netdev_mtu_error) {
- return netdev->netdev_mtu_error;
- }
- if (netdev->mtu == mtu) {
- return 0;
+ error = netdev->netdev_mtu_error;
+ if (error || netdev->mtu == mtu) {
+ goto exit;
}
netdev->cache_valid &= ~VALID_MTU;
}
netdev->mtu = ifr.ifr_mtu;
netdev->cache_valid |= VALID_MTU;
}
+exit:
+ ovs_mutex_unlock(&netdev->mutex);
return error;
}
/* Returns the ifindex of 'netdev', if successful, as a positive number.
* On failure, returns a negative errno value. */
static int
-netdev_linux_get_ifindex(const struct netdev *netdev)
+netdev_linux_get_ifindex(const struct netdev *netdev_)
{
+ struct netdev_linux *netdev = netdev_linux_cast(netdev_);
int ifindex, error;
- error = get_ifindex(netdev, &ifindex);
+ ovs_mutex_lock(&netdev->mutex);
+ error = get_ifindex(netdev_, &ifindex);
+ ovs_mutex_unlock(&netdev->mutex);
+
return error ? -error : ifindex;
}
{
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
+ ovs_mutex_lock(&netdev->mutex);
if (netdev->miimon_interval > 0) {
*carrier = netdev->miimon;
} else {
*carrier = (netdev->ifi_flags & IFF_RUNNING) != 0;
}
+ ovs_mutex_unlock(&netdev->mutex);
return 0;
}
static long long int
-netdev_linux_get_carrier_resets(const struct netdev *netdev)
+netdev_linux_get_carrier_resets(const struct netdev *netdev_)
{
- return netdev_linux_cast(netdev)->carrier_resets;
+ struct netdev_linux *netdev = netdev_linux_cast(netdev_);
+ long long int carrier_resets;
+
+ ovs_mutex_lock(&netdev->mutex);
+ carrier_resets = netdev->carrier_resets;
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return carrier_resets;
}
static int
{
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
+ ovs_mutex_lock(&netdev->mutex);
interval = interval > 0 ? MAX(interval, 100) : 0;
if (netdev->miimon_interval != interval) {
netdev->miimon_interval = interval;
timer_set_expired(&netdev->miimon_timer);
}
+ ovs_mutex_unlock(&netdev->mutex);
return 0;
}
struct netdev_linux *dev = netdev_linux_cast(netdev);
bool miimon;
- if (dev->miimon_interval <= 0 || !timer_expired(&dev->miimon_timer)) {
- netdev_close(netdev);
- continue;
- }
+ ovs_mutex_lock(&dev->mutex);
+ if (dev->miimon_interval > 0 && timer_expired(&dev->miimon_timer)) {
+ netdev_linux_get_miimon(dev->up.name, &miimon);
+ if (miimon != dev->miimon) {
+ dev->miimon = miimon;
+ netdev_linux_changed(dev, dev->ifi_flags, 0);
+ }
- netdev_linux_get_miimon(dev->up.name, &miimon);
- if (miimon != dev->miimon) {
- dev->miimon = miimon;
- netdev_linux_changed(dev, dev->ifi_flags, 0);
+ timer_set_duration(&dev->miimon_timer, dev->miimon_interval);
}
-
- timer_set_duration(&dev->miimon_timer, dev->miimon_interval);
+ ovs_mutex_unlock(&dev->mutex);
netdev_close(netdev);
}
struct netdev *netdev = node->data;
struct netdev_linux *dev = netdev_linux_cast(netdev);
+ ovs_mutex_lock(&dev->mutex);
if (dev->miimon_interval > 0) {
timer_wait(&dev->miimon_timer);
}
+ ovs_mutex_unlock(&dev->mutex);
netdev_close(netdev);
}
shash_destroy(&device_shash);
static int
netdev_linux_sys_get_stats(const struct netdev *netdev_,
- struct netdev_stats *stats)
+ struct netdev_stats *stats)
{
static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
static int use_netlink_stats;
struct netdev_stats dev_stats;
int error;
+ ovs_mutex_lock(&netdev->mutex);
get_stats_via_vport(netdev_, stats);
-
error = netdev_linux_sys_get_stats(netdev_, &dev_stats);
-
if (error) {
- if (netdev->vport_stats_error) {
- return error;
- } else {
- return 0;
+ if (!netdev->vport_stats_error) {
+ error = 0;
}
- }
-
- if (netdev->vport_stats_error) {
+ } else if (netdev->vport_stats_error) {
/* stats not available from OVS then use ioctl stats. */
*stats = dev_stats;
} else {
stats->tx_heartbeat_errors += dev_stats.tx_heartbeat_errors;
stats->tx_window_errors += dev_stats.tx_window_errors;
}
- return 0;
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return error;
}
/* Retrieves current device stats for 'netdev-tap' netdev or
struct netdev_stats dev_stats;
int error;
+ ovs_mutex_lock(&netdev->mutex);
get_stats_via_vport(netdev_, stats);
-
error = netdev_linux_sys_get_stats(netdev_, &dev_stats);
if (error) {
- if (netdev->vport_stats_error) {
- return error;
- } else {
- return 0;
+ if (!netdev->vport_stats_error) {
+ error = 0;
}
- }
+ } else if (netdev->vport_stats_error) {
+ /* Transmit and receive stats will appear to be swapped relative to the
+ * other ports since we are the one sending the data, not a remote
+ * computer. For consistency, we swap them back here. This does not
+ * apply if we are getting stats from the vport layer because it always
+ * tracks stats from the perspective of the switch. */
- /* If this port is an internal port then the transmit and receive stats
- * will appear to be swapped relative to the other ports since we are the
- * one sending the data, not a remote computer. For consistency, we swap
- * them back here. This does not apply if we are getting stats from the
- * vport layer because it always tracks stats from the perspective of the
- * switch. */
- if (netdev->vport_stats_error) {
*stats = dev_stats;
swap_uint64(&stats->rx_packets, &stats->tx_packets);
swap_uint64(&stats->rx_bytes, &stats->tx_bytes);
stats->multicast += dev_stats.multicast;
stats->collisions += dev_stats.collisions;
}
- return 0;
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return error;
}
static int
struct netdev_stats *stats)
{
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
+ int error;
+ ovs_mutex_lock(&netdev->mutex);
get_stats_via_vport(netdev_, stats);
- return netdev->vport_stats_error;
+ error = netdev->vport_stats_error;
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return error;
}
static int
static void
netdev_linux_read_features(struct netdev_linux *netdev)
+ OVS_REQUIRES(netdev->mutex)
{
struct ethtool_cmd ecmd;
uint32_t speed;
enum netdev_features *peer)
{
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
+ int error;
+ ovs_mutex_lock(&netdev->mutex);
netdev_linux_read_features(netdev);
-
if (!netdev->get_features_error) {
*current = netdev->current;
*advertised = netdev->advertised;
*supported = netdev->supported;
*peer = 0; /* XXX */
}
- return netdev->get_features_error;
+ error = netdev->get_features_error;
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return error;
}
/* Set the features advertised by 'netdev' to 'advertise'. */
static int
-netdev_linux_set_advertisements(struct netdev *netdev,
+netdev_linux_set_advertisements(struct netdev *netdev_,
enum netdev_features advertise)
{
+ struct netdev_linux *netdev = netdev_linux_cast(netdev_);
struct ethtool_cmd ecmd;
int error;
+ ovs_mutex_lock(&netdev->mutex);
+
COVERAGE_INC(netdev_get_ethtool);
memset(&ecmd, 0, sizeof ecmd);
- error = netdev_linux_do_ethtool(netdev_get_name(netdev), &ecmd,
+ error = netdev_linux_do_ethtool(netdev_get_name(netdev_), &ecmd,
ETHTOOL_GSET, "ETHTOOL_GSET");
if (error) {
- return error;
+ goto exit;
}
ecmd.advertising = 0;
ecmd.advertising |= ADVERTISED_Asym_Pause;
}
COVERAGE_INC(netdev_set_ethtool);
- return netdev_linux_do_ethtool(netdev_get_name(netdev), &ecmd,
- ETHTOOL_SSET, "ETHTOOL_SSET");
+ error = netdev_linux_do_ethtool(netdev_get_name(netdev_), &ecmd,
+ ETHTOOL_SSET, "ETHTOOL_SSET");
+
+exit:
+ ovs_mutex_unlock(&netdev->mutex);
+ return error;
}
/* Attempts to set input rate limiting (policing) policy. Returns 0 if
const char *netdev_name = netdev_get_name(netdev_);
int error;
-
kbits_burst = (!kbits_rate ? 0 /* Force to 0 if no rate specified. */
: !kbits_burst ? 1000 /* Default to 1000 kbits if 0. */
: kbits_burst); /* Stick with user-specified value. */
+ ovs_mutex_lock(&netdev->mutex);
if (netdev->cache_valid & VALID_POLICING) {
- if (netdev->netdev_policing_error) {
- return netdev->netdev_policing_error;
- }
-
- if (netdev->kbits_rate == kbits_rate &&
- netdev->kbits_burst == kbits_burst) {
+ error = netdev->netdev_policing_error;
+ if (error || (netdev->kbits_rate == kbits_rate &&
+ netdev->kbits_burst == kbits_burst)) {
/* Assume that settings haven't changed since we last set them. */
- return 0;
+ goto out;
}
netdev->cache_valid &= ~VALID_POLICING;
}
netdev->netdev_policing_error = error;
netdev->cache_valid |= VALID_POLICING;
}
+ ovs_mutex_unlock(&netdev->mutex);
return error;
}
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
int error;
+ ovs_mutex_lock(&netdev->mutex);
error = tc_query_qdisc(netdev_);
- if (error) {
- return error;
+ if (!error) {
+ *typep = netdev->tc->ops->ovs_name;
+ error = (netdev->tc->ops->qdisc_get
+ ? netdev->tc->ops->qdisc_get(netdev_, details)
+ : 0);
}
+ ovs_mutex_unlock(&netdev->mutex);
- *typep = netdev->tc->ops->ovs_name;
- return (netdev->tc->ops->qdisc_get
- ? netdev->tc->ops->qdisc_get(netdev_, details)
- : 0);
+ return error;
}
static int
return EOPNOTSUPP;
}
+ ovs_mutex_lock(&netdev->mutex);
error = tc_query_qdisc(netdev_);
if (error) {
- return error;
+ goto exit;
}
if (new_ops == netdev->tc->ops) {
- return new_ops->qdisc_set ? new_ops->qdisc_set(netdev_, details) : 0;
+ error = new_ops->qdisc_set ? new_ops->qdisc_set(netdev_, details) : 0;
} else {
/* Delete existing qdisc. */
error = tc_del_qdisc(netdev_);
if (error) {
- return error;
+ goto exit;
}
ovs_assert(netdev->tc == NULL);
/* Install new qdisc. */
error = new_ops->tc_install(netdev_, details);
ovs_assert((error == 0) == (netdev->tc != NULL));
-
- return error;
}
+
+exit:
+ ovs_mutex_unlock(&netdev->mutex);
+ return error;
}
static int
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
int error;
+ ovs_mutex_lock(&netdev->mutex);
error = tc_query_qdisc(netdev_);
- if (error) {
- return error;
- } else {
+ if (!error) {
struct tc_queue *queue = tc_find_queue(netdev_, queue_id);
- return (queue
+ error = (queue
? netdev->tc->ops->class_get(netdev_, queue, details)
: ENOENT);
}
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return error;
}
static int
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
int error;
+ ovs_mutex_lock(&netdev->mutex);
error = tc_query_qdisc(netdev_);
- if (error) {
- return error;
- } else if (queue_id >= netdev->tc->ops->n_queues
- || !netdev->tc->ops->class_set) {
- return EINVAL;
+ if (!error) {
+ error = (queue_id < netdev->tc->ops->n_queues
+ && netdev->tc->ops->class_set
+ ? netdev->tc->ops->class_set(netdev_, queue_id, details)
+ : EINVAL);
}
+ ovs_mutex_unlock(&netdev->mutex);
- return netdev->tc->ops->class_set(netdev_, queue_id, details);
+ return error;
}
static int
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
int error;
+ ovs_mutex_lock(&netdev->mutex);
error = tc_query_qdisc(netdev_);
- if (error) {
- return error;
- } else if (!netdev->tc->ops->class_delete) {
- return EINVAL;
- } else {
- struct tc_queue *queue = tc_find_queue(netdev_, queue_id);
- return (queue
- ? netdev->tc->ops->class_delete(netdev_, queue)
- : ENOENT);
+ if (!error) {
+ if (netdev->tc->ops->class_delete) {
+ struct tc_queue *queue = tc_find_queue(netdev_, queue_id);
+ error = (queue
+ ? netdev->tc->ops->class_delete(netdev_, queue)
+ : ENOENT);
+ } else {
+ error = EINVAL;
+ }
}
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return error;
}
static int
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
int error;
+ ovs_mutex_lock(&netdev->mutex);
error = tc_query_qdisc(netdev_);
- if (error) {
- return error;
- } else if (!netdev->tc->ops->class_get_stats) {
- return EOPNOTSUPP;
- } else {
- const struct tc_queue *queue = tc_find_queue(netdev_, queue_id);
- if (!queue) {
- return ENOENT;
+ if (!error) {
+ if (netdev->tc->ops->class_get_stats) {
+ const struct tc_queue *queue = tc_find_queue(netdev_, queue_id);
+ if (queue) {
+ stats->created = queue->created;
+ error = netdev->tc->ops->class_get_stats(netdev_, queue,
+ stats);
+ } else {
+ error = ENOENT;
+ }
+ } else {
+ error = EOPNOTSUPP;
}
- stats->created = queue->created;
- return netdev->tc->ops->class_get_stats(netdev_, queue, stats);
}
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return error;
}
static bool
netdev_dump_queues_cb *cb, void *aux)
{
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
- struct tc_queue *queue, *next_queue;
- struct smap details;
- int last_error;
int error;
+ ovs_mutex_lock(&netdev->mutex);
error = tc_query_qdisc(netdev_);
- if (error) {
- return error;
- } else if (!netdev->tc->ops->class_get) {
- return EOPNOTSUPP;
- }
+ if (!error) {
+ if (netdev->tc->ops->class_get) {
+ struct tc_queue *queue, *next_queue;
+ struct smap details;
- last_error = 0;
- smap_init(&details);
- HMAP_FOR_EACH_SAFE (queue, next_queue, hmap_node,
- &netdev->tc->queues) {
- smap_clear(&details);
+ smap_init(&details);
+ HMAP_FOR_EACH_SAFE (queue, next_queue, hmap_node,
+ &netdev->tc->queues) {
+ int retval;
- error = netdev->tc->ops->class_get(netdev_, queue, &details);
- if (!error) {
- (*cb)(queue->queue_id, &details, aux);
+ smap_clear(&details);
+
+ retval = netdev->tc->ops->class_get(netdev_, queue, &details);
+ if (!retval) {
+ (*cb)(queue->queue_id, &details, aux);
+ } else {
+ error = retval;
+ }
+ }
+ smap_destroy(&details);
} else {
- last_error = error;
+ error = EOPNOTSUPP;
}
}
- smap_destroy(&details);
+ ovs_mutex_unlock(&netdev->mutex);
- return last_error;
+ return error;
}
static int
netdev_dump_queue_stats_cb *cb, void *aux)
{
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
- struct nl_dump dump;
- struct ofpbuf msg;
- int last_error;
int error;
+ ovs_mutex_lock(&netdev->mutex);
error = tc_query_qdisc(netdev_);
- if (error) {
- return error;
- } else if (!netdev->tc->ops->class_dump_stats) {
- return EOPNOTSUPP;
- }
+ if (!error) {
+ struct nl_dump dump;
- last_error = 0;
- if (!start_queue_dump(netdev_, &dump)) {
- return ENODEV;
- }
- while (nl_dump_next(&dump, &msg)) {
- error = netdev->tc->ops->class_dump_stats(netdev_, &msg, cb, aux);
- if (error) {
- last_error = error;
+ if (!netdev->tc->ops->class_dump_stats) {
+ error = EOPNOTSUPP;
+ } else if (!start_queue_dump(netdev_, &dump)) {
+ error = ENODEV;
+ } else {
+ struct ofpbuf msg;
+ int retval;
+
+ while (nl_dump_next(&dump, &msg)) {
+ retval = netdev->tc->ops->class_dump_stats(netdev_, &msg,
+ cb, aux);
+ if (retval) {
+ error = retval;
+ }
+ }
+
+ retval = nl_dump_done(&dump);
+ if (retval) {
+ error = retval;
+ }
}
}
+ ovs_mutex_unlock(&netdev->mutex);
- error = nl_dump_done(&dump);
- return error ? error : last_error;
+ return error;
}
static int
struct in_addr *address, struct in_addr *netmask)
{
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
+ int error;
+ ovs_mutex_lock(&netdev->mutex);
if (!(netdev->cache_valid & VALID_IN4)) {
- int error;
-
error = netdev_linux_get_ipv4(netdev_, &netdev->address,
SIOCGIFADDR, "SIOCGIFADDR");
- if (error) {
- return error;
+ if (!error) {
+ error = netdev_linux_get_ipv4(netdev_, &netdev->netmask,
+ SIOCGIFNETMASK, "SIOCGIFNETMASK");
+ if (!error) {
+ netdev->cache_valid |= VALID_IN4;
+ }
}
+ } else {
+ error = 0;
+ }
- error = netdev_linux_get_ipv4(netdev_, &netdev->netmask,
- SIOCGIFNETMASK, "SIOCGIFNETMASK");
- if (error) {
- return error;
+ if (!error) {
+ if (netdev->address.s_addr != INADDR_ANY) {
+ *address = netdev->address;
+ *netmask = netdev->netmask;
+ } else {
+ error = EADDRNOTAVAIL;
}
-
- netdev->cache_valid |= VALID_IN4;
}
- *address = netdev->address;
- *netmask = netdev->netmask;
- return address->s_addr == INADDR_ANY ? EADDRNOTAVAIL : 0;
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return error;
}
static int
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
int error;
+ ovs_mutex_lock(&netdev->mutex);
error = do_set_addr(netdev_, SIOCSIFADDR, "SIOCSIFADDR", address);
if (!error) {
netdev->cache_valid |= VALID_IN4;
"SIOCSIFNETMASK", netmask);
}
}
+ ovs_mutex_unlock(&netdev->mutex);
+
return error;
}
netdev_linux_get_in6(const struct netdev *netdev_, struct in6_addr *in6)
{
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
+
+ ovs_mutex_lock(&netdev->mutex);
if (!(netdev->cache_valid & VALID_IN6)) {
FILE *file;
char line[128];
netdev->cache_valid |= VALID_IN6;
}
*in6 = netdev->in6;
+ ovs_mutex_unlock(&netdev->mutex);
+
return 0;
}
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
int error = 0;
+ ovs_mutex_lock(&netdev->mutex);
if (!(netdev->cache_valid & VALID_DRVINFO)) {
struct ethtool_cmd *cmd = (struct ethtool_cmd *) &netdev->drvinfo;
smap_add(smap, "driver_version", netdev->drvinfo.version);
smap_add(smap, "firmware_version", netdev->drvinfo.fw_version);
}
+ ovs_mutex_unlock(&netdev->mutex);
+
return error;
}
int old_flags, new_flags;
int error = 0;
+ ovs_mutex_lock(&netdev->mutex);
old_flags = netdev->ifi_flags;
*old_flagsp = iff_to_nd_flags(old_flags);
new_flags = (old_flags & ~nd_to_iff_flags(off)) | nd_to_iff_flags(on);
error = set_flags(netdev_get_name(netdev_), new_flags);
get_flags(netdev_, &netdev->ifi_flags);
}
+ ovs_mutex_unlock(&netdev->mutex);
+
return error;
}
static unsigned int
-netdev_linux_change_seq(const struct netdev *netdev)
+netdev_linux_change_seq(const struct netdev *netdev_)
{
- return netdev_linux_cast(netdev)->change_seq;
+ struct netdev_linux *netdev = netdev_linux_cast(netdev_);
+ unsigned int change_seq;
+
+ ovs_mutex_lock(&netdev->mutex);
+ change_seq = netdev->change_seq;
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return change_seq;
}
#define NETDEV_LINUX_CLASS(NAME, CONSTRUCT, GET_STATS, SET_STATS, \
* Network device implementations may read these members but should not modify
* them. */
struct netdev {
+ /* The following do not change during the lifetime of a struct netdev. */
char *name; /* Name of network device. */
const struct netdev_class *netdev_class; /* Functions to control
this device. */
+
+ /* The following are protected by 'netdev_mutex' (internal to netdev.c). */
int ref_cnt; /* Times this devices was opened. */
struct shash_node *node; /* Pointer to element in global map. */
struct list saved_flags_list; /* Contains "struct netdev_saved_flags". */
int netdev_register_provider(const struct netdev_class *);
int netdev_unregister_provider(const char *type);
-const struct netdev_class *netdev_lookup_provider(const char *type);
extern const struct netdev_class netdev_linux_class;
extern const struct netdev_class netdev_internal_class;
struct netdev_vport {
struct netdev up;
+
+ /* Protects all members below. */
+ struct ovs_mutex mutex;
+
unsigned int change_seq;
uint8_t etheraddr[ETH_ADDR_LEN];
struct netdev_stats stats;
};
static int netdev_vport_construct(struct netdev *);
-static int get_patch_config(const struct netdev *, struct smap *args);
+static int get_patch_config(const struct netdev *netdev, struct smap *args);
static int get_tunnel_config(const struct netdev *, struct smap *args);
-static void netdev_vport_poll_notify(struct netdev_vport *);
+static void netdev_vport_poll_notify(struct netdev_vport *netdev)
+ OVS_REQUIRES(netdev->mutex);
static bool
is_vport_class(const struct netdev_class *class)
{
struct netdev_vport *netdev = netdev_vport_cast(netdev_);
+ ovs_mutex_init(&netdev->mutex, PTHREAD_MUTEX_NORMAL);
netdev->change_seq = 1;
eth_addr_random(netdev->etheraddr);
route_table_unregister();
free(netdev->peer);
+ ovs_mutex_destroy(&netdev->mutex);
}
static void
const uint8_t mac[ETH_ADDR_LEN])
{
struct netdev_vport *netdev = netdev_vport_cast(netdev_);
+
+ ovs_mutex_lock(&netdev->mutex);
memcpy(netdev->etheraddr, mac, ETH_ADDR_LEN);
netdev_vport_poll_notify(netdev);
+ ovs_mutex_unlock(&netdev->mutex);
+
return 0;
}
static int
-netdev_vport_get_etheraddr(const struct netdev *netdev,
+netdev_vport_get_etheraddr(const struct netdev *netdev_,
uint8_t mac[ETH_ADDR_LEN])
{
- memcpy(mac, netdev_vport_cast(netdev)->etheraddr, ETH_ADDR_LEN);
+ struct netdev_vport *netdev = netdev_vport_cast(netdev_);
+
+ ovs_mutex_lock(&netdev->mutex);
+ memcpy(mac, netdev->etheraddr, ETH_ADDR_LEN);
+ ovs_mutex_unlock(&netdev->mutex);
+
return 0;
}
static int
-tunnel_get_status(const struct netdev *netdev, struct smap *smap)
+tunnel_get_status(const struct netdev *netdev_, struct smap *smap)
{
+ struct netdev_vport *netdev = netdev_vport_cast(netdev_);
char iface[IFNAMSIZ];
ovs_be32 route;
- route = netdev_vport_cast(netdev)->tnl_cfg.ip_dst;
+ ovs_mutex_lock(&netdev->mutex);
+ route = netdev->tnl_cfg.ip_dst;
+ ovs_mutex_unlock(&netdev->mutex);
+
if (route_table_get_name(route, iface)) {
struct netdev *egress_netdev;
&tnl_cfg.out_key_present,
&tnl_cfg.out_key_flow);
+ ovs_mutex_lock(&dev->mutex);
dev->tnl_cfg = tnl_cfg;
netdev_vport_poll_notify(dev);
+ ovs_mutex_unlock(&dev->mutex);
return 0;
}
static int
get_tunnel_config(const struct netdev *dev, struct smap *args)
{
- const struct netdev_tunnel_config *tnl_cfg =
- &netdev_vport_cast(dev)->tnl_cfg;
+ struct netdev_vport *netdev = netdev_vport_cast(dev);
+ struct netdev_tunnel_config tnl_cfg;
+
+ ovs_mutex_lock(&netdev->mutex);
+ tnl_cfg = netdev->tnl_cfg;
+ ovs_mutex_unlock(&netdev->mutex);
- if (tnl_cfg->ip_dst) {
- smap_add_format(args, "remote_ip", IP_FMT, IP_ARGS(tnl_cfg->ip_dst));
- } else if (tnl_cfg->ip_dst_flow) {
+ if (tnl_cfg.ip_dst) {
+ smap_add_format(args, "remote_ip", IP_FMT, IP_ARGS(tnl_cfg.ip_dst));
+ } else if (tnl_cfg.ip_dst_flow) {
smap_add(args, "remote_ip", "flow");
}
- if (tnl_cfg->ip_src) {
- smap_add_format(args, "local_ip", IP_FMT, IP_ARGS(tnl_cfg->ip_src));
- } else if (tnl_cfg->ip_src_flow) {
+ if (tnl_cfg.ip_src) {
+ smap_add_format(args, "local_ip", IP_FMT, IP_ARGS(tnl_cfg.ip_src));
+ } else if (tnl_cfg.ip_src_flow) {
smap_add(args, "local_ip", "flow");
}
- if (tnl_cfg->in_key_flow && tnl_cfg->out_key_flow) {
+ if (tnl_cfg.in_key_flow && tnl_cfg.out_key_flow) {
smap_add(args, "key", "flow");
- } else if (tnl_cfg->in_key_present && tnl_cfg->out_key_present
- && tnl_cfg->in_key == tnl_cfg->out_key) {
- smap_add_format(args, "key", "%"PRIu64, ntohll(tnl_cfg->in_key));
+ } else if (tnl_cfg.in_key_present && tnl_cfg.out_key_present
+ && tnl_cfg.in_key == tnl_cfg.out_key) {
+ smap_add_format(args, "key", "%"PRIu64, ntohll(tnl_cfg.in_key));
} else {
- if (tnl_cfg->in_key_flow) {
+ if (tnl_cfg.in_key_flow) {
smap_add(args, "in_key", "flow");
- } else if (tnl_cfg->in_key_present) {
+ } else if (tnl_cfg.in_key_present) {
smap_add_format(args, "in_key", "%"PRIu64,
- ntohll(tnl_cfg->in_key));
+ ntohll(tnl_cfg.in_key));
}
- if (tnl_cfg->out_key_flow) {
+ if (tnl_cfg.out_key_flow) {
smap_add(args, "out_key", "flow");
- } else if (tnl_cfg->out_key_present) {
+ } else if (tnl_cfg.out_key_present) {
smap_add_format(args, "out_key", "%"PRIu64,
- ntohll(tnl_cfg->out_key));
+ ntohll(tnl_cfg.out_key));
}
}
- if (tnl_cfg->ttl_inherit) {
+ if (tnl_cfg.ttl_inherit) {
smap_add(args, "ttl", "inherit");
- } else if (tnl_cfg->ttl != DEFAULT_TTL) {
- smap_add_format(args, "ttl", "%"PRIu8, tnl_cfg->ttl);
+ } else if (tnl_cfg.ttl != DEFAULT_TTL) {
+ smap_add_format(args, "ttl", "%"PRIu8, tnl_cfg.ttl);
}
- if (tnl_cfg->tos_inherit) {
+ if (tnl_cfg.tos_inherit) {
smap_add(args, "tos", "inherit");
- } else if (tnl_cfg->tos) {
- smap_add_format(args, "tos", "0x%x", tnl_cfg->tos);
+ } else if (tnl_cfg.tos) {
+ smap_add_format(args, "tos", "0x%x", tnl_cfg.tos);
}
- if (tnl_cfg->dst_port) {
- uint16_t dst_port = ntohs(tnl_cfg->dst_port);
+ if (tnl_cfg.dst_port) {
+ uint16_t dst_port = ntohs(tnl_cfg.dst_port);
const char *type = netdev_get_type(dev);
if ((!strcmp("vxlan", type) && dst_port != VXLAN_DST_PORT) ||
}
}
- if (tnl_cfg->csum) {
+ if (tnl_cfg.csum) {
smap_add(args, "csum", "true");
}
- if (!tnl_cfg->dont_fragment) {
+ if (!tnl_cfg.dont_fragment) {
smap_add(args, "df_default", "false");
}
if (netdev_vport_is_patch(netdev_)) {
struct netdev_vport *netdev = netdev_vport_cast(netdev_);
+
+ ovs_mutex_lock(&netdev->mutex);
if (netdev->peer) {
peer = xstrdup(netdev->peer);
}
+ ovs_mutex_unlock(&netdev->mutex);
}
return peer;
{
if (is_vport_class(netdev_get_class(netdev))) {
struct netdev_vport *dev = netdev_vport_cast(netdev);
+
+ ovs_mutex_lock(&dev->mutex);
dev->stats.rx_packets += stats->n_packets;
dev->stats.rx_bytes += stats->n_bytes;
+ ovs_mutex_unlock(&dev->mutex);
}
}
{
if (is_vport_class(netdev_get_class(netdev))) {
struct netdev_vport *dev = netdev_vport_cast(netdev);
+
+ ovs_mutex_lock(&dev->mutex);
dev->stats.tx_packets += stats->n_packets;
dev->stats.tx_bytes += stats->n_bytes;
+ ovs_mutex_unlock(&dev->mutex);
}
}
{
struct netdev_vport *dev = netdev_vport_cast(dev_);
+ ovs_mutex_lock(&dev->mutex);
if (dev->peer) {
smap_add(args, "peer", dev->peer);
}
+ ovs_mutex_unlock(&dev->mutex);
+
return 0;
}
return EINVAL;
}
+ ovs_mutex_lock(&dev->mutex);
free(dev->peer);
dev->peer = xstrdup(peer);
netdev_vport_poll_notify(dev);
+ ovs_mutex_unlock(&dev->mutex);
+
return 0;
}
get_stats(const struct netdev *netdev, struct netdev_stats *stats)
{
struct netdev_vport *dev = netdev_vport_cast(netdev);
- memcpy(stats, &dev->stats, sizeof *stats);
+
+ ovs_mutex_lock(&dev->mutex);
+ *stats = dev->stats;
+ ovs_mutex_unlock(&dev->mutex);
+
return 0;
}
\f
TUNNEL_CLASS("vxlan", "vxlan_system"),
TUNNEL_CLASS("lisp", "lisp_system")
};
- static bool inited;
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
- int i;
+ if (ovsthread_once_start(&once)) {
+ int i;
- if (!inited) {
- inited = true;
for (i = 0; i < ARRAY_SIZE(vport_classes); i++) {
netdev_register_provider(&vport_classes[i].netdev_class);
}
+ ovsthread_once_done(&once);
}
}
enum netdev_flags saved_values;
};
-static struct shash netdev_classes = SHASH_INITIALIZER(&netdev_classes);
+/* Protects 'netdev_shash' and the mutable members of struct netdev. */
+static struct ovs_mutex netdev_mutex = OVS_MUTEX_INITIALIZER;
/* All created network devices. */
-static struct shash netdev_shash = SHASH_INITIALIZER(&netdev_shash);
+static struct shash netdev_shash OVS_GUARDED_BY(netdev_mutex)
+ = SHASH_INITIALIZER(&netdev_shash);
+
+/* Protects 'netdev_classes' against insertions or deletions.
+ *
+ * This is not an rwlock for performance reasons but to allow recursive
+ * acquisition when calling into providers. For example, netdev_run() calls
+ * into provider 'run' functions, which might reasonably want to call one of
+ * the netdev functions that takes netdev_class_rwlock read-only. */
+static struct ovs_rwlock netdev_class_rwlock OVS_ACQ_BEFORE(netdev_mutex)
+ = OVS_RWLOCK_INITIALIZER;
+
+/* Contains 'struct netdev_registered_class'es. */
+static struct hmap netdev_classes OVS_GUARDED_BY(netdev_class_rwlock)
+ = HMAP_INITIALIZER(&netdev_classes);
+
+struct netdev_registered_class {
+ struct hmap_node hmap_node; /* In 'netdev_classes', by class->type. */
+ const struct netdev_class *class;
+ atomic_int ref_cnt; /* Number of 'struct netdev's of this class. */
+};
/* This is set pretty low because we probably won't learn anything from the
* additional log messages. */
static void
netdev_initialize(void)
+ OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex)
{
- static bool inited;
-
- if (!inited) {
- inited = true;
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
+ if (ovsthread_once_start(&once)) {
fatal_signal_add_hook(restore_all_flags, NULL, NULL, true);
netdev_vport_patch_register();
netdev_register_provider(&netdev_tap_class);
netdev_register_provider(&netdev_bsd_class);
#endif
+
+ ovsthread_once_done(&once);
}
}
* main poll loop. */
void
netdev_run(void)
+ OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex)
{
- struct shash_node *node;
- SHASH_FOR_EACH(node, &netdev_classes) {
- const struct netdev_class *netdev_class = node->data;
- if (netdev_class->run) {
- netdev_class->run();
- }
+ struct netdev_registered_class *rc;
+
+ ovs_rwlock_rdlock(&netdev_class_rwlock);
+ HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
+ rc->class->run();
}
+ ovs_rwlock_unlock(&netdev_class_rwlock);
}
/* Arranges for poll_block() to wake up when netdev_run() needs to be called.
* main poll loop. */
void
netdev_wait(void)
+ OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex)
{
- struct shash_node *node;
- SHASH_FOR_EACH(node, &netdev_classes) {
- const struct netdev_class *netdev_class = node->data;
- if (netdev_class->wait) {
- netdev_class->wait();
+ struct netdev_registered_class *rc;
+
+ ovs_rwlock_rdlock(&netdev_class_rwlock);
+ HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
+ rc->class->wait();
+ }
+ ovs_rwlock_unlock(&netdev_class_rwlock);
+}
+
+static struct netdev_registered_class *
+netdev_lookup_class(const char *type)
+ OVS_REQ_RDLOCK(netdev_class_rwlock)
+{
+ struct netdev_registered_class *rc;
+
+ HMAP_FOR_EACH_WITH_HASH (rc, hmap_node, hash_string(type, 0),
+ &netdev_classes) {
+ if (!strcmp(type, rc->class->type)) {
+ return rc;
}
}
+ return NULL;
}
/* Initializes and registers a new netdev provider. After successful
* registration, new netdevs of that type can be opened using netdev_open(). */
int
netdev_register_provider(const struct netdev_class *new_class)
+ OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex)
{
- if (shash_find(&netdev_classes, new_class->type)) {
+ int error;
+
+ ovs_rwlock_wrlock(&netdev_class_rwlock);
+ if (netdev_lookup_class(new_class->type)) {
VLOG_WARN("attempted to register duplicate netdev provider: %s",
new_class->type);
- return EEXIST;
- }
-
- if (new_class->init) {
- int error = new_class->init();
- if (error) {
+ error = EEXIST;
+ } else {
+ error = new_class->init ? new_class->init() : 0;
+ if (!error) {
+ struct netdev_registered_class *rc;
+
+ rc = xmalloc(sizeof *rc);
+ hmap_insert(&netdev_classes, &rc->hmap_node,
+ hash_string(new_class->type, 0));
+ rc->class = new_class;
+ atomic_init(&rc->ref_cnt, 0);
+ } else {
VLOG_ERR("failed to initialize %s network device class: %s",
new_class->type, ovs_strerror(error));
- return error;
}
}
+ ovs_rwlock_unlock(&netdev_class_rwlock);
- shash_add(&netdev_classes, new_class->type, new_class);
-
- return 0;
+ return error;
}
/* Unregisters a netdev provider. 'type' must have been previously
* new netdevs of that type cannot be opened using netdev_open(). */
int
netdev_unregister_provider(const char *type)
+ OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex)
{
- struct shash_node *del_node, *netdev_node;
+ struct netdev_registered_class *rc;
+ int error;
- del_node = shash_find(&netdev_classes, type);
- if (!del_node) {
+ ovs_rwlock_wrlock(&netdev_class_rwlock);
+ rc = netdev_lookup_class(type);
+ if (!rc) {
VLOG_WARN("attempted to unregister a netdev provider that is not "
"registered: %s", type);
- return EAFNOSUPPORT;
- }
+ error = EAFNOSUPPORT;
+ } else {
+ int ref_cnt;
- SHASH_FOR_EACH (netdev_node, &netdev_shash) {
- struct netdev *netdev = netdev_node->data;
- if (!strcmp(netdev->netdev_class->type, type)) {
+ atomic_read(&rc->ref_cnt, &ref_cnt);
+ if (!ref_cnt) {
+ hmap_remove(&netdev_classes, &rc->hmap_node);
+ free(rc);
+ error = 0;
+ } else {
VLOG_WARN("attempted to unregister in use netdev provider: %s",
type);
- return EBUSY;
+ error = EBUSY;
}
}
+ ovs_rwlock_unlock(&netdev_class_rwlock);
- shash_delete(&netdev_classes, del_node);
-
- return 0;
-}
-
-const struct netdev_class *
-netdev_lookup_provider(const char *type)
-{
- netdev_initialize();
- return shash_find_data(&netdev_classes, type && type[0] ? type : "system");
+ return error;
}
/* Clears 'types' and enumerates the types of all currently registered netdev
* providers into it. The caller must first initialize the sset. */
void
netdev_enumerate_types(struct sset *types)
+ OVS_EXCLUDED(netdev_mutex)
{
- struct shash_node *node;
+ struct netdev_registered_class *rc;
netdev_initialize();
sset_clear(types);
- SHASH_FOR_EACH(node, &netdev_classes) {
- const struct netdev_class *netdev_class = node->data;
- sset_add(types, netdev_class->type);
+ ovs_rwlock_rdlock(&netdev_class_rwlock);
+ HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
+ sset_add(types, rc->class->type);
}
+ ovs_rwlock_unlock(&netdev_class_rwlock);
}
/* Check that the network device name is not the same as any of the registered
* Returns true if there is a name conflict, false otherwise. */
bool
netdev_is_reserved_name(const char *name)
+ OVS_EXCLUDED(netdev_mutex)
{
- struct shash_node *node;
+ struct netdev_registered_class *rc;
netdev_initialize();
- SHASH_FOR_EACH (node, &netdev_classes) {
- const char *dpif_port;
- dpif_port = netdev_vport_class_get_dpif_port(node->data);
+
+ ovs_rwlock_rdlock(&netdev_class_rwlock);
+ HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
+ const char *dpif_port = netdev_vport_class_get_dpif_port(rc->class);
if (dpif_port && !strcmp(dpif_port, name)) {
+ ovs_rwlock_unlock(&netdev_class_rwlock);
return true;
}
}
+ ovs_rwlock_unlock(&netdev_class_rwlock);
if (!strncmp(name, "ovs-", 4)) {
struct sset types;
* before they can be used. */
int
netdev_open(const char *name, const char *type, struct netdev **netdevp)
+ OVS_EXCLUDED(netdev_mutex)
{
struct netdev *netdev;
int error;
netdev_initialize();
+ ovs_rwlock_rdlock(&netdev_class_rwlock);
+ ovs_mutex_lock(&netdev_mutex);
netdev = shash_find_data(&netdev_shash, name);
if (!netdev) {
- const struct netdev_class *class;
+ struct netdev_registered_class *rc;
- class = netdev_lookup_provider(type);
- if (class) {
- netdev = class->alloc();
+ rc = netdev_lookup_class(type && type[0] ? type : "system");
+ if (rc) {
+ netdev = rc->class->alloc();
if (netdev) {
memset(netdev, 0, sizeof *netdev);
- netdev->netdev_class = class;
+ netdev->netdev_class = rc->class;
netdev->name = xstrdup(name);
netdev->node = shash_add(&netdev_shash, name, netdev);
list_init(&netdev->saved_flags_list);
- error = class->construct(netdev);
- if (error) {
- class->dealloc(netdev);
+ error = rc->class->construct(netdev);
+ if (!error) {
+ int old_ref_cnt;
+
+ atomic_add(&rc->ref_cnt, 1, &old_ref_cnt);
+ } else {
+ rc->class->dealloc(netdev);
}
} else {
error = ENOMEM;
error = 0;
}
+ ovs_mutex_unlock(&netdev_mutex);
+ ovs_rwlock_unlock(&netdev_class_rwlock);
+
if (!error) {
netdev->ref_cnt++;
*netdevp = netdev;
* 'netdev_' is null. */
struct netdev *
netdev_ref(const struct netdev *netdev_)
+ OVS_EXCLUDED(netdev_mutex)
{
struct netdev *netdev = CONST_CAST(struct netdev *, netdev_);
if (netdev) {
+ ovs_mutex_lock(&netdev_mutex);
ovs_assert(netdev->ref_cnt > 0);
netdev->ref_cnt++;
+ ovs_mutex_unlock(&netdev_mutex);
}
return netdev;
}
* or NULL if none are needed. */
int
netdev_set_config(struct netdev *netdev, const struct smap *args)
+ OVS_EXCLUDED(netdev_mutex)
{
if (netdev->netdev_class->set_config) {
- struct smap no_args = SMAP_INITIALIZER(&no_args);
+ const struct smap no_args = SMAP_INITIALIZER(&no_args);
return netdev->netdev_class->set_config(netdev,
args ? args : &no_args);
} else if (args && !smap_is_empty(args)) {
* smap_destroy(). */
int
netdev_get_config(const struct netdev *netdev, struct smap *args)
+ OVS_EXCLUDED(netdev_mutex)
{
int error;
const struct netdev_tunnel_config *
netdev_get_tunnel_config(const struct netdev *netdev)
+ OVS_EXCLUDED(netdev_mutex)
{
if (netdev->netdev_class->get_tunnel_config) {
return netdev->netdev_class->get_tunnel_config(netdev);
static void
netdev_unref(struct netdev *dev)
+ OVS_RELEASES(netdev_mutex)
{
ovs_assert(dev->ref_cnt);
if (!--dev->ref_cnt) {
+ const struct netdev_class *class = dev->netdev_class;
+ struct netdev_registered_class *rc;
+ int old_ref_cnt;
+
dev->netdev_class->destruct(dev);
shash_delete(&netdev_shash, dev->node);
free(dev->name);
dev->netdev_class->dealloc(dev);
+ ovs_mutex_unlock(&netdev_mutex);
+
+ ovs_rwlock_rdlock(&netdev_class_rwlock);
+ rc = netdev_lookup_class(class->type);
+ atomic_sub(&rc->ref_cnt, 1, &old_ref_cnt);
+ ovs_assert(old_ref_cnt > 0);
+ ovs_rwlock_unlock(&netdev_class_rwlock);
+ } else {
+ ovs_mutex_unlock(&netdev_mutex);
}
}
/* Closes and destroys 'netdev'. */
void
netdev_close(struct netdev *netdev)
+ OVS_EXCLUDED(netdev_mutex)
{
if (netdev) {
+ ovs_mutex_lock(&netdev_mutex);
netdev_unref(netdev);
}
}
int
netdev_rx_open(struct netdev *netdev, struct netdev_rx **rxp)
+ OVS_EXCLUDED(netdev_mutex)
{
int error;
rx->netdev = netdev;
error = netdev->netdev_class->rx_construct(rx);
if (!error) {
+ ovs_mutex_lock(&netdev_mutex);
netdev->ref_cnt++;
+ ovs_mutex_unlock(&netdev_mutex);
+
*rxp = rx;
return 0;
}
void
netdev_rx_close(struct netdev_rx *rx)
+ OVS_EXCLUDED(netdev_mutex)
{
if (rx) {
struct netdev *netdev = rx->netdev;
do_update_flags(struct netdev *netdev, enum netdev_flags off,
enum netdev_flags on, enum netdev_flags *old_flagsp,
struct netdev_saved_flags **sfp)
+ OVS_EXCLUDED(netdev_mutex)
{
struct netdev_saved_flags *sf = NULL;
enum netdev_flags old_flags;
enum netdev_flags new_flags = (old_flags & ~off) | on;
enum netdev_flags changed_flags = old_flags ^ new_flags;
if (changed_flags) {
+ ovs_mutex_lock(&netdev_mutex);
*sfp = sf = xmalloc(sizeof *sf);
sf->netdev = netdev;
list_push_front(&netdev->saved_flags_list, &sf->node);
sf->saved_values = changed_flags & new_flags;
netdev->ref_cnt++;
+ ovs_mutex_unlock(&netdev_mutex);
}
}
* Does nothing if 'sf' is NULL. */
void
netdev_restore_flags(struct netdev_saved_flags *sf)
+ OVS_EXCLUDED(netdev_mutex)
{
if (sf) {
struct netdev *netdev = sf->netdev;
sf->saved_flags & sf->saved_values,
sf->saved_flags & ~sf->saved_values,
&old_flags);
+
+ ovs_mutex_lock(&netdev_mutex);
list_remove(&sf->node);
free(sf);
-
netdev_unref(netdev);
}
}
* The caller must free the returned netdev with netdev_close(). */
struct netdev *
netdev_from_name(const char *name)
+ OVS_EXCLUDED(netdev_mutex)
{
struct netdev *netdev;
+ ovs_mutex_lock(&netdev_mutex);
netdev = shash_find_data(&netdev_shash, name);
if (netdev) {
- netdev_ref(netdev);
+ netdev->ref_cnt++;
}
+ ovs_mutex_unlock(&netdev_mutex);
return netdev;
}
void
netdev_get_devices(const struct netdev_class *netdev_class,
struct shash *device_list)
+ OVS_EXCLUDED(netdev_mutex)
{
struct shash_node *node;
+
+ ovs_mutex_lock(&netdev_mutex);
SHASH_FOR_EACH (node, &netdev_shash) {
struct netdev *dev = node->data;
shash_add(device_list, node->name, node->data);
}
}
+ ovs_mutex_unlock(&netdev_mutex);
}
const char *
* Every port on a switch must have a corresponding netdev that must minimally
* support a few operations, such as the ability to read the netdev's MTU.
* The PORTING file at the top of the source tree has more information in the
- * "Writing a netdev Provider" section. */
+ * "Writing a netdev Provider" section.
+ *
+ * Thread-safety
+ * =============
+ *
+ * Most of the netdev functions are fully thread-safe: they may be called from
+ * any number of threads on the same or different netdev objects. The
+ * exceptions are:
+ *
+ * netdev_rx_recv()
+ * netdev_rx_wait()
+ * netdev_rx_drain()
+ *
+ * These functions are conditionally thread-safe: they may be called from
+ * different threads only on different netdev_rx objects. (The client may
+ * create multiple netdev_rx objects for a single netdev and access each
+ * of those from a different thread.) */
struct netdev;
struct netdev_class;