datapath.c \
dp_notify.c \
flow.c \
- genl_exec.c \
tunnel.c \
vlan.c \
vport.c \
compat.h \
datapath.h \
flow.h \
- genl_exec.h \
tunnel.h \
vlan.h \
vport.h \
#include <linux/openvswitch.h>
#include <linux/rculist.h>
#include <linux/dmi.h>
+#include <linux/genetlink.h>
+#include <net/genetlink.h>
#include <net/genetlink.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include "checksum.h"
#include "datapath.h"
#include "flow.h"
-#include "genl_exec.h"
#include "vlan.h"
#include "tunnel.h"
#include "vport-internal_dev.h"
/**
* DOC: Locking:
*
- * Writes to device state (add/remove datapath, port, set operations on vports,
- * etc.) are protected by RTNL.
- *
- * Writes to other state (flow table modifications, set miscellaneous datapath
- * parameters, etc.) are protected by genl_mutex. The RTNL lock nests inside
- * genl_mutex.
+ * All writes e.g. Writes to device state (add/remove datapath, port, set
+ * operations on vports, etc.), Writes to other state (flow table
+ * modifications, set miscellaneous datapath parameters, etc.) are protected
+ * by ovs_lock.
*
* Reads are protected by RCU.
*
* There are a few special cases (mostly stats) that have their own
* synchronization but they nest under all of above and don't interact with
* each other.
+ *
+ * The RTNL lock nests inside ovs_mutex.
*/
+static DEFINE_MUTEX(ovs_mutex);
+
+void ovs_lock(void)
+{
+ mutex_lock(&ovs_mutex);
+}
+
+void ovs_unlock(void)
+{
+ mutex_unlock(&ovs_mutex);
+}
+
+#ifdef CONFIG_LOCKDEP
+int lockdep_ovsl_is_held(void)
+{
+ if (debug_locks)
+ return lockdep_is_held(&ovs_mutex);
+ else
+ return 1;
+}
+#endif
+
static struct vport *new_vport(const struct vport_parms *);
static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *,
const struct dp_upcall_info *);
struct sk_buff *,
const struct dp_upcall_info *);
-/* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
+/* Must be called with rcu_read_lock or ovs_mutex. */
static struct datapath *get_dp(struct net *net, int dp_ifindex)
{
struct datapath *dp = NULL;
return dp;
}
-/* Must be called with rcu_read_lock or RTNL lock. */
+/* Must be called with rcu_read_lock or ovs_mutex. */
const char *ovs_dp_name(const struct datapath *dp)
{
- struct vport *vport = ovs_vport_rtnl_rcu(dp, OVSP_LOCAL);
+ struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
return vport->ops->get_name(vport);
}
return NULL;
}
-/* Called with RTNL lock and genl_lock. */
+/* Called with ovs_mutex. */
static struct vport *new_vport(const struct vport_parms *parms)
{
struct vport *vport;
return vport;
}
-/* Called with RTNL lock. */
void ovs_dp_detach_port(struct vport *p)
{
- ASSERT_RTNL();
+ ASSERT_OVSL();
/* First drop references to device. */
hlist_del_rcu(&p->dp_hash_node);
return err;
}
-/* Called with genl_mutex. */
+/* Called with ovs_mutex. */
static int flush_flows(struct datapath *dp)
{
struct flow_table *old_table;
struct flow_table *new_table;
- old_table = genl_dereference(dp->table);
+ old_table = ovsl_dereference(dp->table);
new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
if (!new_table)
return -ENOMEM;
static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
{
int i;
- struct flow_table *table = genl_dereference(dp->table);
+ struct flow_table *table = ovsl_dereference(dp->table);
stats->n_flows = ovs_flow_tbl_count(table);
+ nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
}
-/* Called with genl_lock. */
+/* Called with ovs_mutex. */
static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
struct sk_buff *skb, u32 portid,
u32 seq, u32 flags, u8 cmd)
u8 tcp_flags;
int err;
- sf_acts = rcu_dereference_protected(flow->sf_acts,
- lockdep_genl_is_held());
+ sf_acts = ovsl_dereference(flow->sf_acts);
ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
if (!ovs_header)
{
const struct sw_flow_actions *sf_acts;
- sf_acts = rcu_dereference_protected(flow->sf_acts,
- lockdep_genl_is_held());
+ sf_acts = ovsl_dereference(flow->sf_acts);
return genlmsg_new(ovs_flow_cmd_msg_size(sf_acts), GFP_KERNEL);
}
goto error;
}
+ ovs_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
error = -ENODEV;
if (!dp)
- goto err_kfree;
+ goto err_unlock_ovs;
- table = genl_dereference(dp->table);
+ table = ovsl_dereference(dp->table);
flow = ovs_flow_tbl_lookup(table, &key, key_len);
if (!flow) {
/* Bail out if we're not allowed to create a new flow. */
error = -ENOENT;
if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
- goto err_kfree;
+ goto err_unlock_ovs;
/* Expand table, if necessary, to make room. */
if (ovs_flow_tbl_need_to_expand(table)) {
if (!IS_ERR(new_table)) {
rcu_assign_pointer(dp->table, new_table);
ovs_flow_tbl_deferred_destroy(table);
- table = genl_dereference(dp->table);
+ table = ovsl_dereference(dp->table);
}
}
flow = ovs_flow_alloc();
if (IS_ERR(flow)) {
error = PTR_ERR(flow);
- goto err_kfree;
+ goto err_unlock_ovs;
}
clear_stats(flow);
error = -EEXIST;
if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
- goto err_kfree;
+ goto err_unlock_ovs;
/* Update actions. */
- old_acts = rcu_dereference_protected(flow->sf_acts,
- lockdep_genl_is_held());
+ old_acts = ovsl_dereference(flow->sf_acts);
rcu_assign_pointer(flow->sf_acts, acts);
ovs_flow_deferred_free_acts(old_acts);
spin_unlock_bh(&flow->lock);
}
}
+ ovs_unlock();
if (!IS_ERR(reply))
ovs_notify(reply, info, &ovs_dp_flow_multicast_group);
ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
return 0;
+err_unlock_ovs:
+ ovs_unlock();
err_kfree:
kfree(acts);
error:
if (err)
return err;
+ ovs_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
- if (!dp)
- return -ENODEV;
+ if (!dp) {
+ err = -ENODEV;
+ goto unlock;
+ }
- table = genl_dereference(dp->table);
+ table = ovsl_dereference(dp->table);
flow = ovs_flow_tbl_lookup(table, &key, key_len);
- if (!flow)
- return -ENOENT;
+ if (!flow) {
+ err = -ENOENT;
+ goto unlock;
+ }
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
info->snd_seq, OVS_FLOW_CMD_NEW);
- if (IS_ERR(reply))
- return PTR_ERR(reply);
+ if (IS_ERR(reply)) {
+ err = PTR_ERR(reply);
+ goto unlock;
+ }
+ ovs_unlock();
return genlmsg_reply(reply, info);
+unlock:
+ ovs_unlock();
+ return err;
}
static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
int err;
int key_len;
+ ovs_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
- if (!dp)
- return -ENODEV;
-
- if (!a[OVS_FLOW_ATTR_KEY])
- return flush_flows(dp);
+ if (!dp) {
+ err = -ENODEV;
+ goto unlock;
+ }
+ if (!a[OVS_FLOW_ATTR_KEY]) {
+ err = flush_flows(dp);
+ goto unlock;
+ }
err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
if (err)
- return err;
+ goto unlock;
- table = genl_dereference(dp->table);
+ table = ovsl_dereference(dp->table);
flow = ovs_flow_tbl_lookup(table, &key, key_len);
- if (!flow)
- return -ENOENT;
+ if (!flow) {
+ err = -ENOENT;
+ goto unlock;
+ }
reply = ovs_flow_cmd_alloc_info(flow);
- if (!reply)
- return -ENOMEM;
+ if (!reply) {
+ err = -ENOMEM;
+ goto unlock;
+ }
ovs_flow_tbl_remove(table, flow);
BUG_ON(err < 0);
ovs_flow_deferred_free(flow);
+ ovs_unlock();
ovs_notify(reply, info, &ovs_dp_flow_multicast_group);
return 0;
+unlock:
+ ovs_unlock();
+ return err;
}
static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct datapath *dp;
struct flow_table *table;
+ ovs_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
- if (!dp)
+ if (!dp) {
+ ovs_unlock();
return -ENODEV;
+ }
- table = genl_dereference(dp->table);
+ table = ovsl_dereference(dp->table);
for (;;) {
struct sw_flow *flow;
cb->args[0] = bucket;
cb->args[1] = obj;
}
+ ovs_unlock();
return skb->len;
}
return CHECK_NUL_STRING(a[OVS_DP_ATTR_NAME], IFNAMSIZ - 1);
}
-/* Called with genl_mutex and optionally with RTNL lock also. */
+/* Called with ovs_mutex. */
static struct datapath *lookup_datapath(struct net *net,
struct ovs_header *ovs_header,
struct nlattr *a[OVS_DP_ATTR_MAX + 1])
if (err)
goto err;
- rtnl_lock();
+ ovs_lock();
err = -ENOMEM;
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
if (dp == NULL)
- goto err_unlock_rtnl;
+ goto err_unlock_ovs;
ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
list_add_tail(&dp->list_node, &ovs_net->dps);
- rtnl_unlock();
+ ovs_unlock();
ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
return 0;
err_destroy_local_port:
- ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
+ ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
err_destroy_ports_array:
kfree(dp->ports);
err_destroy_percpu:
free_percpu(dp->stats_percpu);
err_destroy_table:
- ovs_flow_tbl_destroy(genl_dereference(dp->table));
+ ovs_flow_tbl_destroy(ovsl_dereference(dp->table));
err_free_dp:
release_net(ovs_dp_get_net(dp));
kfree(dp);
-err_unlock_rtnl:
- rtnl_unlock();
+err_unlock_ovs:
+ ovs_unlock();
err:
return err;
}
-/* Called with genl_mutex. */
+/* Called with ovs_mutex. */
static void __dp_destroy(struct datapath *dp)
{
int i;
- rtnl_lock();
-
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
struct vport *vport;
struct hlist_node *n;
}
list_del(&dp->list_node);
- ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
- /* rtnl_unlock() will wait until all the references to devices that
- * are pending unregistration have been dropped. We do it here to
- * ensure that any internal devices (which contain DP pointers) are
- * fully destroyed before freeing the datapath.
- */
- rtnl_unlock();
+ /* OVSP_LOCAL is datapath internal port. We need to make sure that
+ * all port in datapath are destroyed first before freeing datapath.
+ */
+ ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
call_rcu(&dp->rcu, destroy_dp_rcu);
}
if (err)
return err;
+ ovs_lock();
dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
err = PTR_ERR(dp);
if (IS_ERR(dp))
- return err;
+ goto unlock;
reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
info->snd_seq, OVS_DP_CMD_DEL);
err = PTR_ERR(reply);
if (IS_ERR(reply))
- return err;
+ goto unlock;
__dp_destroy(dp);
+ ovs_unlock();
ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
return 0;
+unlock:
+ ovs_unlock();
+ return err;
}
static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
+ ovs_lock();
dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
+ err = PTR_ERR(dp);
if (IS_ERR(dp))
- return PTR_ERR(dp);
+ goto unlock;
reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
info->snd_seq, OVS_DP_CMD_NEW);
err = PTR_ERR(reply);
netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
ovs_dp_datapath_multicast_group.id, err);
- return 0;
+ err = 0;
+ goto unlock;
}
+ ovs_unlock();
ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
return 0;
+unlock:
+ ovs_unlock();
+ return err;
}
static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
+ ovs_lock();
dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
- if (IS_ERR(dp))
- return PTR_ERR(dp);
+ if (IS_ERR(dp)) {
+ err = PTR_ERR(dp);
+ goto unlock;
+ }
reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
info->snd_seq, OVS_DP_CMD_NEW);
- if (IS_ERR(reply))
- return PTR_ERR(reply);
+ if (IS_ERR(reply)) {
+ err = PTR_ERR(reply);
+ goto unlock;
+ }
+ ovs_unlock();
return genlmsg_reply(reply, info);
+
+unlock:
+ ovs_unlock();
+ return err;
}
static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
int skip = cb->args[0];
int i = 0;
+ ovs_lock();
list_for_each_entry(dp, &ovs_net->dps, list_node) {
if (i >= skip &&
ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
break;
i++;
}
+ ovs_unlock();
cb->args[0] = i;
.name = OVS_VPORT_MCGROUP
};
-/* Called with RTNL lock or RCU read lock. */
+/* Called with ovs_mutex or RCU read lock. */
static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
u32 portid, u32 seq, u32 flags, u8 cmd)
{
return err;
}
-/* Called with RTNL lock or RCU read lock. */
+/* Called with ovs_mutex or RCU read lock. */
struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
u32 seq, u8 cmd)
{
return CHECK_NUL_STRING(a[OVS_VPORT_ATTR_NAME], IFNAMSIZ - 1);
}
-/* Called with RTNL lock or RCU read lock. */
+/* Called with ovs_mutex or RCU read lock. */
static struct vport *lookup_vport(struct net *net,
struct ovs_header *ovs_header,
struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
if (!dp)
return ERR_PTR(-ENODEV);
- vport = ovs_vport_rtnl_rcu(dp, port_no);
+ vport = ovs_vport_ovsl_rcu(dp, port_no);
if (!vport)
return ERR_PTR(-ENODEV);
return vport;
if (err)
goto exit;
- rtnl_lock();
+ ovs_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
err = -ENODEV;
if (!dp)
if (port_no >= DP_MAX_PORTS)
goto exit_unlock;
- vport = ovs_vport_rtnl(dp, port_no);
+ vport = ovs_vport_ovsl(dp, port_no);
err = -EBUSY;
if (vport)
goto exit_unlock;
err = -EFBIG;
goto exit_unlock;
}
- vport = ovs_vport_rtnl(dp, port_no);
+ vport = ovs_vport_ovsl(dp, port_no);
if (!vport)
break;
}
ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
exit_unlock:
- rtnl_unlock();
+ ovs_unlock();
exit:
return err;
}
if (err)
goto exit;
- rtnl_lock();
+ ovs_lock();
vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
err = PTR_ERR(vport);
if (IS_ERR(vport))
ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
- rtnl_unlock();
+ ovs_unlock();
return 0;
exit_free:
kfree_skb(reply);
exit_unlock:
- rtnl_unlock();
+ ovs_unlock();
exit:
return err;
}
if (err)
goto exit;
- rtnl_lock();
+ ovs_lock();
vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
err = PTR_ERR(vport);
if (IS_ERR(vport))
ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
exit_unlock:
- rtnl_unlock();
+ ovs_unlock();
exit:
return err;
}
return err;
}
-static int __rehash_flow_table(void *dummy)
+static void rehash_flow_table(struct work_struct *work)
{
struct datapath *dp;
struct net *net;
+ ovs_lock();
rtnl_lock();
for_each_net(net) {
struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
list_for_each_entry(dp, &ovs_net->dps, list_node) {
- struct flow_table *old_table = genl_dereference(dp->table);
+ struct flow_table *old_table = ovsl_dereference(dp->table);
struct flow_table *new_table;
new_table = ovs_flow_tbl_rehash(old_table);
}
}
rtnl_unlock();
- return 0;
-}
-
-static void rehash_flow_table(struct work_struct *work)
-{
- genl_exec(__rehash_flow_table, NULL);
+ ovs_unlock();
schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
}
-static int dp_destroy_all(void *data)
-{
- struct datapath *dp, *dp_next;
- struct ovs_net *ovs_net = data;
-
- list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
- __dp_destroy(dp);
-
- return 0;
-}
-
static int __net_init ovs_init_net(struct net *net)
{
struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
INIT_LIST_HEAD(&ovs_net->dps);
+ INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
return 0;
}
static void __net_exit ovs_exit_net(struct net *net)
{
+ struct datapath *dp, *dp_next;
struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
- genl_exec(dp_destroy_all, ovs_net);
+ ovs_lock();
+ list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
+ __dp_destroy(dp);
+ ovs_unlock();
+
+ cancel_work_sync(&ovs_net->dp_notify_work);
}
static struct pernet_operations ovs_net_ops = {
pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
VERSION);
- err = genl_exec_init();
- if (err)
- goto error;
-
err = ovs_workqueues_init();
if (err)
- goto error_genl_exec;
+ goto error;
err = ovs_flow_init();
if (err)
ovs_flow_exit();
error_wq:
ovs_workqueues_exit();
-error_genl_exec:
- genl_exec_exit();
error:
return err;
}
ovs_vport_exit();
ovs_flow_exit();
ovs_workqueues_exit();
- genl_exec_exit();
}
module_init(dp_init);
* struct datapath - datapath for flow-based packet switching
* @rcu: RCU callback head for deferred destruction.
* @list_node: Element in global 'dps' list.
- * @table: Current flow table. Protected by genl_lock and RCU.
+ * @table: Current flow table. Protected by ovs_mutex and RCU.
* @ports: Hash table for ports. %OVSP_LOCAL port always exists. Protected by
- * RTNL and RCU.
+ * ovs_mutex and RCU.
* @stats_percpu: Per-CPU datapath statistics.
* @net: Reference to net namespace.
*
struct ovs_net {
struct list_head dps;
struct vport_net vport_net;
+ struct work_struct dp_notify_work;
};
extern int ovs_net_id;
+void ovs_lock(void);
+void ovs_unlock(void);
+
+#ifdef CONFIG_LOCKDEP
+int lockdep_ovsl_is_held(void);
+#else
+#define lockdep_ovsl_is_held() 1
+#endif
+
+#define ASSERT_OVSL() WARN_ON(unlikely(!lockdep_ovsl_is_held()))
+#define ovsl_dereference(p) \
+ rcu_dereference_protected(p, lockdep_ovsl_is_held())
static inline struct net *ovs_dp_get_net(struct datapath *dp)
{
return ovs_lookup_vport(dp, port_no);
}
-static inline struct vport *ovs_vport_rtnl_rcu(const struct datapath *dp, int port_no)
+static inline struct vport *ovs_vport_ovsl_rcu(const struct datapath *dp, int port_no)
{
- WARN_ON_ONCE(!rcu_read_lock_held() && !rtnl_is_locked());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
return ovs_lookup_vport(dp, port_no);
}
-static inline struct vport *ovs_vport_rtnl(const struct datapath *dp, int port_no)
+static inline struct vport *ovs_vport_ovsl(const struct datapath *dp, int port_no)
{
- ASSERT_RTNL();
+ ASSERT_OVSL();
return ovs_lookup_vport(dp, port_no);
}
u8 cmd);
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb);
+void ovs_dp_notify_wq(struct work_struct *work);
#endif /* datapath.h */
#include "vport-internal_dev.h"
#include "vport-netdev.h"
+static void dp_detach_port_notify(struct vport *vport)
+{
+ struct sk_buff *notify;
+ struct datapath *dp;
+
+ dp = vport->dp;
+ notify = ovs_vport_cmd_build_info(vport, 0, 0,
+ OVS_VPORT_CMD_DEL);
+ ovs_dp_detach_port(vport);
+ if (IS_ERR(notify)) {
+ netlink_set_err(GENL_SOCK(ovs_dp_get_net(dp)), 0,
+ ovs_dp_vport_multicast_group.id,
+ PTR_ERR(notify));
+ return;
+ }
+
+ genlmsg_multicast_netns(ovs_dp_get_net(dp), notify, 0,
+ ovs_dp_vport_multicast_group.id,
+ GFP_KERNEL);
+}
+
+void ovs_dp_notify_wq(struct work_struct *work)
+{
+ struct ovs_net *ovs_net = container_of(work, struct ovs_net, dp_notify_work);
+ struct datapath *dp;
+
+ ovs_lock();
+ list_for_each_entry(dp, &ovs_net->dps, list_node) {
+ int i;
+
+ for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
+ struct vport *vport;
+ struct hlist_node *n;
+
+ hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) {
+ struct netdev_vport *netdev_vport;
+
+ if (vport->ops->type != OVS_VPORT_TYPE_NETDEV)
+ continue;
+
+ netdev_vport = netdev_vport_priv(vport);
+ if (netdev_vport->dev->reg_state == NETREG_UNREGISTERED ||
+ netdev_vport->dev->reg_state == NETREG_UNREGISTERING)
+ dp_detach_port_notify(vport);
+ }
+ }
+ }
+ ovs_unlock();
+}
+
static int dp_device_event(struct notifier_block *unused, unsigned long event,
void *ptr)
{
+ struct ovs_net *ovs_net;
struct net_device *dev = ptr;
- struct vport *vport;
+ struct vport *vport = NULL;
- if (ovs_is_internal_dev(dev))
- vport = ovs_internal_dev_get_vport(dev);
- else
+ if (!ovs_is_internal_dev(dev))
vport = ovs_netdev_get_vport(dev);
if (!vport)
return NOTIFY_DONE;
- switch (event) {
- case NETDEV_UNREGISTER:
- if (!ovs_is_internal_dev(dev)) {
- struct sk_buff *notify;
- struct datapath *dp = vport->dp;
-
- notify = ovs_vport_cmd_build_info(vport, 0, 0,
- OVS_VPORT_CMD_DEL);
- ovs_dp_detach_port(vport);
- if (IS_ERR(notify)) {
- netlink_set_err(GENL_SOCK(ovs_dp_get_net(dp)), 0,
- ovs_dp_vport_multicast_group.id,
- PTR_ERR(notify));
- break;
- }
-
- genlmsg_multicast_netns(ovs_dp_get_net(dp), notify, 0,
- ovs_dp_vport_multicast_group.id,
- GFP_KERNEL);
- }
- break;
-
+ if (event == NETDEV_UNREGISTER) {
+ ovs_net = net_generic(dev_net(dev), ovs_net_id);
+ queue_work(&ovs_net->dp_notify_work);
}
return NOTIFY_DONE;
+++ /dev/null
-/*
- * Copyright (c) 2007-2012 Nicira, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/version.h>
-#include <linux/completion.h>
-#include <net/genetlink.h>
-#include "genl_exec.h"
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
-
-static DEFINE_MUTEX(genl_exec_lock);
-
-static genl_exec_func_t genl_exec_function;
-static int genl_exec_function_ret;
-static void *genl_exec_data;
-static struct completion done;
-
-static struct sk_buff *genlmsg_skb;
-
-static int genl_exec_cmd(struct sk_buff *dummy, struct genl_info *dummy2)
-{
- genl_exec_function_ret = genl_exec_function(genl_exec_data);
- complete(&done);
- return 0;
-}
-
-enum exec_cmd {
- GENL_EXEC_UNSPEC,
- GENL_EXEC_RUN,
-};
-
-static struct genl_family genl_exec_family = {
- .id = GENL_ID_GENERATE,
- .name = "ovs_genl_exec",
- .version = 1,
-};
-
-static struct genl_ops genl_exec_ops[] = {
- {
- .cmd = GENL_EXEC_RUN,
- .doit = genl_exec_cmd,
- .flags = CAP_NET_ADMIN,
- },
-};
-
-int genl_exec_init(void)
-{
- int err;
-
- err = genl_register_family_with_ops(&genl_exec_family,
- genl_exec_ops, ARRAY_SIZE(genl_exec_ops));
-
- if (err)
- return err;
-
- genlmsg_skb = genlmsg_new(0, GFP_KERNEL);
- if (!genlmsg_skb) {
- genl_unregister_family(&genl_exec_family);
- return -ENOMEM;
- }
- return 0;
-}
-
-void genl_exec_exit(void)
-{
- kfree_skb(genlmsg_skb);
- genl_unregister_family(&genl_exec_family);
-}
-
-/* genl_lock() is not exported from older kernel.
- * Following function allows any function to be executed with
- * genl_mutex held. */
-
-int genl_exec(genl_exec_func_t func, void *data)
-{
- int ret;
-
- mutex_lock(&genl_exec_lock);
-
- init_completion(&done);
- skb_get(genlmsg_skb);
- genlmsg_put(genlmsg_skb, 0, 0, &genl_exec_family,
- NLM_F_REQUEST, GENL_EXEC_RUN);
-
- genl_exec_function = func;
- genl_exec_data = data;
-
- /* There is no need to send msg to current namespace. */
- ret = genlmsg_unicast(&init_net, genlmsg_skb, 0);
-
- if (!ret) {
- wait_for_completion(&done);
- ret = genl_exec_function_ret;
- } else {
- pr_err("genl_exec send error %d\n", ret);
- }
-
- /* Wait for genetlink to kfree skb. */
- while (skb_shared(genlmsg_skb))
- cpu_relax();
-
- genlmsg_skb->data = genlmsg_skb->head;
- skb_reset_tail_pointer(genlmsg_skb);
-
- mutex_unlock(&genl_exec_lock);
-
- return ret;
-}
-
-#else
-
-int genl_exec(genl_exec_func_t func, void *data)
-{
- int ret;
-
- genl_lock();
- ret = func(data);
- genl_unlock();
- return ret;
-}
-
-int genl_exec_init(void)
-{
- return 0;
-}
-
-void genl_exec_exit(void)
-{
-}
-#endif
+++ /dev/null
-/*
- * Copyright (c) 2007-2012 Nicira, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#ifndef GENL_EXEC_H
-#define GENL_EXEC_H 1
-
-typedef int (*genl_exec_func_t)(void *data);
-int genl_exec(genl_exec_func_t func, void *data);
-int genl_exec_init(void);
-void genl_exec_exit(void);
-
-#endif /* genl_exec.h */
linux/compat/include/linux/err.h \
linux/compat/include/linux/etherdevice.h \
linux/compat/include/linux/flex_array.h \
- linux/compat/include/linux/genetlink.h \
linux/compat/include/linux/icmp.h \
linux/compat/include/linux/icmpv6.h \
linux/compat/include/linux/if.h \
+++ /dev/null
-#ifndef __GENETLINK_WRAPPER_H
-#define __GENETLINK_WRAPPER_H 1
-
-#include <linux/version.h>
-#include_next <linux/genetlink.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)
-#ifdef CONFIG_PROVE_LOCKING
-static inline int lockdep_genl_is_held(void)
-{
- return 1;
-}
-#endif
-#endif
-
-#ifndef genl_dereference
-#include <linux/rcupdate.h>
-
-#define genl_dereference(p) \
- rcu_dereference_protected(p, lockdep_genl_is_held())
-#endif
-
-#endif /* linux/genetlink.h wrapper */
struct vport *vport;
ovs_net = net_generic(net, ovs_net_id);
- if (rtnl_dereference(ovs_net->vport_net.gre_vport))
+ if (ovsl_dereference(ovs_net->vport_net.gre_vport))
return ERR_PTR(-EEXIST);
vport = ovs_tnl_create(parms, &ovs_gre_vport_ops, &gre_tnl_ops);
struct vport *vport;
ovs_net = net_generic(net, ovs_net_id);
- if (rtnl_dereference(ovs_net->vport_net.gre64_vport))
+ if (ovsl_dereference(ovs_net->vport_net.gre64_vport))
return ERR_PTR(-EEXIST);
vport = ovs_tnl_create(parms, &ovs_gre64_vport_ops, &gre64_tnl_ops);
if (vport->port_no == OVSP_LOCAL)
netdev_vport->dev->features |= NETIF_F_NETNS_LOCAL;
+ rtnl_lock();
err = register_netdevice(netdev_vport->dev);
if (err)
goto error_free_netdev;
dev_set_promiscuity(netdev_vport->dev, 1);
+ rtnl_unlock();
netif_start_queue(netdev_vport->dev);
return vport;
error_free_netdev:
+ rtnl_unlock();
free_netdev(netdev_vport->dev);
error_free_vport:
ovs_vport_free(vport);
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
netif_stop_queue(netdev_vport->dev);
+ rtnl_lock();
dev_set_promiscuity(netdev_vport->dev, -1);
/* unregister_netdevice() waits for an RCU grace period. */
unregister_netdevice(netdev_vport->dev);
+
+ rtnl_unlock();
}
static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
goto error_put;
}
+ rtnl_lock();
err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
vport);
if (err)
- goto error_put;
+ goto error_unlock;
dev_set_promiscuity(netdev_vport->dev, 1);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
dev_disable_lro(netdev_vport->dev);
#endif
netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
+ rtnl_unlock();
return vport;
+error_unlock:
+ rtnl_unlock();
error_put:
dev_put(netdev_vport->dev);
error_free_vport:
{
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+ rtnl_lock();
netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
netdev_rx_handler_unregister(netdev_vport->dev);
dev_set_promiscuity(netdev_vport->dev, -1);
+ rtnl_unlock();
call_rcu(&netdev_vport->rcu, free_port_rcu);
}
static const struct vport_ops **vport_ops_list;
static int n_vport_types;
-/* Protected by RCU read lock for reading, RTNL lock for writing. */
+/* Protected by RCU read lock for reading, ovs_mutex for writing. */
static struct hlist_head *dev_table;
#define VPORT_HASH_BUCKETS 1024
*
* @name: name of port to find
*
- * Must be called with RTNL or RCU read lock.
+ * Must be called with ovs or RCU read lock.
*/
struct vport *ovs_vport_locate(struct net *net, const char *name)
{
* @parms: Information about new vport.
*
* Creates a new vport with the specified configuration (which is dependent on
- * device type). RTNL lock must be held.
+ * device type). ovs_mutex must be held.
*/
struct vport *ovs_vport_add(const struct vport_parms *parms)
{
int err = 0;
int i;
- ASSERT_RTNL();
-
for (i = 0; i < n_vport_types; i++) {
if (vport_ops_list[i]->type == parms->type) {
struct hlist_head *bucket;
* @port: New configuration.
*
* Modifies an existing device with the specified configuration (which is
- * dependent on device type). RTNL lock must be held.
+ * dependent on device type). ovs_mutex must be held.
*/
int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
{
- ASSERT_RTNL();
-
if (!vport->ops->set_options)
return -EOPNOTSUPP;
return vport->ops->set_options(vport, options);
* @vport: vport to delete.
*
* Detaches @vport from its datapath and destroys it. It is possible to fail
- * for reasons such as lack of memory. RTNL lock must be held.
+ * for reasons such as lack of memory. ovs_mutex must be held.
*/
void ovs_vport_del(struct vport *vport)
{
- ASSERT_RTNL();
+ ASSERT_OVSL();
hlist_del_rcu(&vport->hash_node);
-
vport->ops->destroy(vport);
}
* support setting the stats, in which case the result will always be
* -EOPNOTSUPP.
*
- * Must be called with RTNL lock.
+ * Must be called with ovs_mutex.
*/
void ovs_vport_set_stats(struct vport *vport, struct ovs_vport_stats *stats)
{
- ASSERT_RTNL();
-
spin_lock_bh(&vport->stats_lock);
vport->offset_stats = *stats;
spin_unlock_bh(&vport->stats_lock);
*
* Retrieves transmit, receive, and error stats for the given device.
*
- * Must be called with RTNL lock or rcu_read_lock.
+ * Must be called with ovs_mutex or rcu_read_lock.
*/
void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
{
* negative error code if a real error occurred. If an error occurs, @skb is
* left unmodified.
*
- * Must be called with RTNL lock or rcu_read_lock.
+ * Must be called with ovs_mutex or rcu_read_lock.
*/
int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
{
* @vport: vport on which to send the packet
* @skb: skb to send
*
- * Sends the given packet and returns the length of data sent. Either RTNL
+ * Sends the given packet and returns the length of data sent. Either ovs
* lock or rcu_read_lock must be held.
*/
int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
int (*init)(void);
void (*exit)(void);
- /* Called with RTNL lock. */
+ /* Called with ovs_mutex. */
struct vport *(*create)(const struct vport_parms *);
void (*destroy)(struct vport *);
int (*set_options)(struct vport *, struct nlattr *);
int (*get_options)(const struct vport *, struct sk_buff *);
- /* Called with rcu_read_lock or RTNL lock. */
+ /* Called with rcu_read_lock or ovs_mutex. */
const char *(*get_name)(const struct vport *);
void (*get_config)(const struct vport *, void *);
int (*get_ifindex)(const struct vport *);