X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Fnetdev.c;h=6aca2f495644d00219bffd1b6acdf3ab92cc9ea7;hb=dbcfd2fd1b2def1a2e4b0638f67174759b53a1bb;hp=74f5f53eec32a29a7adb0a0336ab3fab0ffa33d7;hpb=ac60863f78e412004c5b69f5a64a49bc6f0bc46d;p=sliver-openvswitch.git diff --git a/lib/netdev.c b/lib/netdev.c index 74f5f53ee..6aca2f495 100644 --- a/lib/netdev.c +++ b/lib/netdev.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc. + * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ #include #include +#include "connectivity.h" #include "coverage.h" #include "dpif.h" #include "dynamic-string.h" @@ -36,6 +37,7 @@ #include "openflow/openflow.h" #include "packets.h" #include "poll-loop.h" +#include "seq.h" #include "shash.h" #include "smap.h" #include "sset.h" @@ -65,15 +67,14 @@ static struct shash netdev_shash OVS_GUARDED_BY(netdev_mutex) /* Protects 'netdev_classes' against insertions or deletions. * - * This is not an rwlock for performance reasons but to allow recursive - * acquisition when calling into providers. For example, netdev_run() calls - * into provider 'run' functions, which might reasonably want to call one of - * the netdev functions that takes netdev_class_rwlock read-only. */ -static struct ovs_rwlock netdev_class_rwlock OVS_ACQ_BEFORE(netdev_mutex) - = OVS_RWLOCK_INITIALIZER; + * This is a recursive mutex to allow recursive acquisition when calling into + * providers. For example, netdev_run() calls into provider 'run' functions, + * which might reasonably want to call one of the netdev functions that takes + * netdev_class_mutex. */ +static struct ovs_mutex netdev_class_mutex OVS_ACQ_BEFORE(netdev_mutex); /* Contains 'struct netdev_registered_class'es. */ -static struct hmap netdev_classes OVS_GUARDED_BY(netdev_class_rwlock) +static struct hmap netdev_classes OVS_GUARDED_BY(netdev_class_mutex) = HMAP_INITIALIZER(&netdev_classes); struct netdev_registered_class { @@ -91,15 +92,17 @@ void update_device_args(struct netdev *, const struct shash *args); static void netdev_initialize(void) - OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex) + OVS_EXCLUDED(netdev_class_mutex, netdev_mutex) { static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER; if (ovsthread_once_start(&once)) { + ovs_mutex_init_recursive(&netdev_class_mutex); + fatal_signal_add_hook(restore_all_flags, NULL, NULL, true); netdev_vport_patch_register(); -#ifdef LINUX_DATAPATH +#ifdef __linux__ netdev_register_provider(&netdev_linux_class); netdev_register_provider(&netdev_internal_class); netdev_register_provider(&netdev_tap_class); @@ -122,17 +125,17 @@ netdev_initialize(void) * main poll loop. */ void netdev_run(void) - OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex) + OVS_EXCLUDED(netdev_class_mutex, netdev_mutex) { struct netdev_registered_class *rc; - ovs_rwlock_rdlock(&netdev_class_rwlock); + ovs_mutex_lock(&netdev_class_mutex); HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) { if (rc->class->run) { rc->class->run(); } } - ovs_rwlock_unlock(&netdev_class_rwlock); + ovs_mutex_unlock(&netdev_class_mutex); } /* Arranges for poll_block() to wake up when netdev_run() needs to be called. @@ -141,22 +144,22 @@ netdev_run(void) * main poll loop. */ void netdev_wait(void) - OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex) + OVS_EXCLUDED(netdev_class_mutex, netdev_mutex) { struct netdev_registered_class *rc; - ovs_rwlock_rdlock(&netdev_class_rwlock); + ovs_mutex_lock(&netdev_class_mutex); HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) { if (rc->class->wait) { rc->class->wait(); } } - ovs_rwlock_unlock(&netdev_class_rwlock); + ovs_mutex_unlock(&netdev_class_mutex); } static struct netdev_registered_class * netdev_lookup_class(const char *type) - OVS_REQ_RDLOCK(netdev_class_rwlock) + OVS_REQ_RDLOCK(netdev_class_mutex) { struct netdev_registered_class *rc; @@ -173,11 +176,11 @@ netdev_lookup_class(const char *type) * registration, new netdevs of that type can be opened using netdev_open(). */ int netdev_register_provider(const struct netdev_class *new_class) - OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex) + OVS_EXCLUDED(netdev_class_mutex, netdev_mutex) { int error; - ovs_rwlock_wrlock(&netdev_class_rwlock); + ovs_mutex_lock(&netdev_class_mutex); if (netdev_lookup_class(new_class->type)) { VLOG_WARN("attempted to register duplicate netdev provider: %s", new_class->type); @@ -197,7 +200,7 @@ netdev_register_provider(const struct netdev_class *new_class) new_class->type, ovs_strerror(error)); } } - ovs_rwlock_unlock(&netdev_class_rwlock); + ovs_mutex_unlock(&netdev_class_mutex); return error; } @@ -207,12 +210,12 @@ netdev_register_provider(const struct netdev_class *new_class) * new netdevs of that type cannot be opened using netdev_open(). */ int netdev_unregister_provider(const char *type) - OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex) + OVS_EXCLUDED(netdev_class_mutex, netdev_mutex) { struct netdev_registered_class *rc; int error; - ovs_rwlock_wrlock(&netdev_class_rwlock); + ovs_mutex_lock(&netdev_class_mutex); rc = netdev_lookup_class(type); if (!rc) { VLOG_WARN("attempted to unregister a netdev provider that is not " @@ -224,6 +227,7 @@ netdev_unregister_provider(const char *type) atomic_read(&rc->ref_cnt, &ref_cnt); if (!ref_cnt) { hmap_remove(&netdev_classes, &rc->hmap_node); + atomic_destroy(&rc->ref_cnt); free(rc); error = 0; } else { @@ -232,7 +236,7 @@ netdev_unregister_provider(const char *type) error = EBUSY; } } - ovs_rwlock_unlock(&netdev_class_rwlock); + ovs_mutex_unlock(&netdev_class_mutex); return error; } @@ -248,11 +252,11 @@ netdev_enumerate_types(struct sset *types) netdev_initialize(); sset_clear(types); - ovs_rwlock_rdlock(&netdev_class_rwlock); + ovs_mutex_lock(&netdev_class_mutex); HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) { sset_add(types, rc->class->type); } - ovs_rwlock_unlock(&netdev_class_rwlock); + ovs_mutex_unlock(&netdev_class_mutex); } /* Check that the network device name is not the same as any of the registered @@ -268,15 +272,15 @@ netdev_is_reserved_name(const char *name) netdev_initialize(); - ovs_rwlock_rdlock(&netdev_class_rwlock); + ovs_mutex_lock(&netdev_class_mutex); HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) { const char *dpif_port = netdev_vport_class_get_dpif_port(rc->class); if (dpif_port && !strcmp(dpif_port, name)) { - ovs_rwlock_unlock(&netdev_class_rwlock); + ovs_mutex_unlock(&netdev_class_mutex); return true; } } - ovs_rwlock_unlock(&netdev_class_rwlock); + ovs_mutex_unlock(&netdev_class_mutex); if (!strncmp(name, "ovs-", 4)) { struct sset types; @@ -312,7 +316,7 @@ netdev_open(const char *name, const char *type, struct netdev **netdevp) netdev_initialize(); - ovs_rwlock_rdlock(&netdev_class_rwlock); + ovs_mutex_lock(&netdev_class_mutex); ovs_mutex_lock(&netdev_mutex); netdev = shash_find_data(&netdev_shash, name); if (!netdev) { @@ -333,6 +337,7 @@ netdev_open(const char *name, const char *type, struct netdev **netdevp) int old_ref_cnt; atomic_add(&rc->ref_cnt, 1, &old_ref_cnt); + seq_change(connectivity_seq_get()); } else { free(netdev->name); ovs_assert(list_is_empty(&netdev->saved_flags_list)); @@ -352,7 +357,7 @@ netdev_open(const char *name, const char *type, struct netdev **netdevp) } ovs_mutex_unlock(&netdev_mutex); - ovs_rwlock_unlock(&netdev_class_rwlock); + ovs_mutex_unlock(&netdev_class_mutex); if (!error) { netdev->ref_cnt++; @@ -388,13 +393,19 @@ netdev_set_config(struct netdev *netdev, const struct smap *args) { if (netdev->netdev_class->set_config) { const struct smap no_args = SMAP_INITIALIZER(&no_args); - return netdev->netdev_class->set_config(netdev, - args ? args : &no_args); + int error; + + error = netdev->netdev_class->set_config(netdev, + args ? args : &no_args); + if (error) { + VLOG_WARN("%s: could not set configuration (%s)", + netdev_get_name(netdev), ovs_strerror(error)); + } + return error; } else if (args && !smap_is_empty(args)) { VLOG_WARN("%s: arguments provided to device that is not configurable", netdev_get_name(netdev)); } - return 0; } @@ -452,11 +463,11 @@ netdev_unref(struct netdev *dev) dev->netdev_class->dealloc(dev); ovs_mutex_unlock(&netdev_mutex); - ovs_rwlock_rdlock(&netdev_class_rwlock); + ovs_mutex_lock(&netdev_class_mutex); rc = netdev_lookup_class(class->type); atomic_sub(&rc->ref_cnt, 1, &old_ref_cnt); ovs_assert(old_ref_cnt > 0); - ovs_rwlock_unlock(&netdev_class_rwlock); + ovs_mutex_unlock(&netdev_class_mutex); } else { ovs_mutex_unlock(&netdev_mutex); } @@ -492,6 +503,13 @@ netdev_parse_name(const char *netdev_name_, char **name, char **type) } } +/* Attempts to open a netdev_rx handle for obtaining packets received on + * 'netdev'. On success, returns 0 and stores a nonnull 'netdev_rx *' into + * '*rxp'. On failure, returns a positive errno value and stores NULL into + * '*rxp'. + * + * Some kinds of network devices might not support receiving packets. This + * function returns EOPNOTSUPP in that case.*/ int netdev_rx_open(struct netdev *netdev, struct netdev_rx **rxp) OVS_EXCLUDED(netdev_mutex) @@ -523,6 +541,7 @@ netdev_rx_open(struct netdev *netdev, struct netdev_rx **rxp) return error; } +/* Closes 'rx'. */ void netdev_rx_close(struct netdev_rx *rx) OVS_EXCLUDED(netdev_mutex) @@ -535,6 +554,29 @@ netdev_rx_close(struct netdev_rx *rx) } } +/* Attempts to receive a packet from 'rx' into the tailroom of 'buffer', which + * must initially be empty. If successful, returns 0 and increments + * 'buffer->size' by the number of bytes in the received packet, otherwise a + * positive errno value. + * + * Returns EAGAIN immediately if no packet is ready to be received. + * + * Returns EMSGSIZE, and discards the packet, if the received packet is longer + * than 'ofpbuf_tailroom(buffer)'. + * + * Implementations may make use of VLAN_HEADER_LEN bytes of tailroom to + * add a VLAN header which is obtained out-of-band to the packet. If + * this occurs then VLAN_HEADER_LEN bytes of tailroom will no longer be + * available for the packet, otherwise it may be used for the packet + * itself. + * + * It is advised that the tailroom of 'buffer' should be + * VLAN_HEADER_LEN bytes longer than the MTU to allow space for an + * out-of-band VLAN header to be added to the packet. At the very least, + * 'buffer' must have at least ETH_TOTAL_MIN bytes of tailroom. + * + * This function may be set to null if it would always return EOPNOTSUPP + * anyhow. */ int netdev_rx_recv(struct netdev_rx *rx, struct ofpbuf *buffer) { @@ -543,26 +585,27 @@ netdev_rx_recv(struct netdev_rx *rx, struct ofpbuf *buffer) ovs_assert(buffer->size == 0); ovs_assert(ofpbuf_tailroom(buffer) >= ETH_TOTAL_MIN); - retval = rx->netdev->netdev_class->rx_recv(rx, buffer->data, - ofpbuf_tailroom(buffer)); - if (retval >= 0) { + retval = rx->netdev->netdev_class->rx_recv(rx, buffer); + if (!retval) { COVERAGE_INC(netdev_received); - buffer->size += retval; if (buffer->size < ETH_TOTAL_MIN) { ofpbuf_put_zeros(buffer, ETH_TOTAL_MIN - buffer->size); } return 0; } else { - return -retval; + return retval; } } +/* Arranges for poll_block() to wake up when a packet is ready to be received + * on 'rx'. */ void netdev_rx_wait(struct netdev_rx *rx) { rx->netdev->netdev_class->rx_wait(rx); } +/* Discards any packets ready to be received on 'rx'. */ int netdev_rx_drain(struct netdev_rx *rx) { @@ -1496,18 +1539,6 @@ netdev_dump_queue_stats(const struct netdev *netdev, : EOPNOTSUPP); } -/* Returns a sequence number which indicates changes in one of 'netdev''s - * properties. The returned sequence will be nonzero so that callers have a - * value which they may use as a reset when tracking 'netdev'. - * - * The returned sequence number will change whenever 'netdev''s flags, - * features, ethernet address, or carrier changes. It may change for other - * reasons as well, or no reason at all. */ -unsigned int -netdev_change_seq(const struct netdev *netdev) -{ - return netdev->netdev_class->change_seq(netdev); -} /* Returns the class type of 'netdev'. *