/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira Networks.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
#include "checksum.h"
#include "datapath.h"
#include "flow.h"
+#include "genl_exec.h"
#include "vlan.h"
#include "tunnel.h"
#include "vport-internal_dev.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
- LINUX_VERSION_CODE > KERNEL_VERSION(3,2,0)
+ LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)
#error Kernels before 2.6.18 or after 3.2 are not supported by this version of Open vSwitch.
#endif
-int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
-EXPORT_SYMBOL(dp_ioctl_hook);
+#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
+static void rehash_flow_table(struct work_struct *work);
+static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
+
+int (*ovs_dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
+EXPORT_SYMBOL(ovs_dp_ioctl_hook);
/**
* DOC: Locking:
rcu_read_lock();
dev = dev_get_by_index_rcu(&init_net, dp_ifindex);
if (dev) {
- struct vport *vport = internal_dev_get_vport(dev);
+ struct vport *vport = ovs_internal_dev_get_vport(dev);
if (vport)
dp = vport->dp;
}
return dp;
}
-/* Must be called with genl_mutex. */
-static struct flow_table *get_table_protected(struct datapath *dp)
-{
- return rcu_dereference_protected(dp->table, lockdep_genl_is_held());
-}
-
/* Must be called with rcu_read_lock or RTNL lock. */
-static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
-{
- return rcu_dereference_rtnl(dp->ports[port_no]);
-}
-
-/* Must be called with rcu_read_lock or RTNL lock. */
-const char *dp_name(const struct datapath *dp)
+const char *ovs_dp_name(const struct datapath *dp)
{
struct vport *vport = rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]);
return vport->ops->get_name(vport);
rcu_read_lock();
- local = get_vport_protected(dp, OVSP_LOCAL);
+ local = rcu_dereference(dp->ports[OVSP_LOCAL]);
if (local)
ifindex = local->ops->get_ifindex(local);
else
{
struct datapath *dp = container_of(rcu, struct datapath, rcu);
- flow_tbl_destroy((__force struct flow_table *)dp->table);
+ ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
free_percpu(dp->stats_percpu);
kobject_put(&dp->ifobj);
}
{
struct vport *vport;
- vport = vport_add(parms);
+ vport = ovs_vport_add(parms);
if (!IS_ERR(vport)) {
struct datapath *dp = parms->dp;
}
/* Called with RTNL lock. */
-void dp_detach_port(struct vport *p)
+void ovs_dp_detach_port(struct vport *p)
{
ASSERT_RTNL();
if (p->port_no != OVSP_LOCAL)
- dp_sysfs_del_if(p);
+ ovs_dp_sysfs_del_if(p);
dp_ifinfo_notify(RTM_DELLINK, p);
/* First drop references to device. */
rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
/* Then destroy it. */
- vport_del(p);
+ ovs_vport_del(p);
}
/* Must be called with rcu_read_lock. */
-void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
+void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
{
struct datapath *dp = p->dp;
struct sw_flow *flow;
int key_len;
/* Extract flow from 'skb' into 'key'. */
- error = flow_extract(skb, p->port_no, &key, &key_len);
+ error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
if (unlikely(error)) {
kfree_skb(skb);
return;
}
/* Look up flow. */
- flow = flow_tbl_lookup(rcu_dereference(dp->table),
- &key, key_len);
+ flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table),
+ &key, key_len);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
upcall.key = &key;
upcall.userdata = NULL;
upcall.pid = p->upcall_pid;
- dp_upcall(dp, skb, &upcall);
+ ovs_dp_upcall(dp, skb, &upcall);
consume_skb(skb);
stats_counter = &stats->n_missed;
goto out;
}
stats_counter = &stats->n_hit;
- flow_used(OVS_CB(skb)->flow, skb);
- execute_actions(dp, skb);
+ ovs_flow_used(OVS_CB(skb)->flow, skb);
+ ovs_execute_actions(dp, skb);
out:
/* Update datapath statistics. */
-
- write_seqcount_begin(&stats->seqlock);
+ u64_stats_update_begin(&stats->sync);
(*stats_counter)++;
- write_seqcount_end(&stats->seqlock);
+ u64_stats_update_end(&stats->sync);
}
static struct genl_family dp_packet_genl_family = {
.maxattr = OVS_PACKET_ATTR_MAX
};
-int dp_upcall(struct datapath *dp, struct sk_buff *skb,
- const struct dp_upcall_info *upcall_info)
+int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
+ const struct dp_upcall_info *upcall_info)
{
struct dp_stats_percpu *stats;
int dp_ifindex;
err:
stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
- write_seqcount_begin(&stats->seqlock);
+ u64_stats_update_begin(&stats->sync);
stats->n_lost++;
- write_seqcount_end(&stats->seqlock);
+ u64_stats_update_end(&stats->sync);
return err;
}
break;
if (skb == segs && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
- /* The initial flow key extracted by flow_extract() in
- * this case is for a first fragment, so we need to
+ /* The initial flow key extracted by ovs_flow_extract()
+ * in this case is for a first fragment, so we need to
* properly mark later fragments.
*/
later_key = *upcall_info->key;
const struct dp_upcall_info *upcall_info)
{
struct ovs_header *upcall;
+ struct sk_buff *nskb = NULL;
struct sk_buff *user_skb; /* to be queued to userspace */
struct nlattr *nla;
unsigned int len;
int err;
- err = vlan_deaccel_tag(skb);
- if (unlikely(err))
- return err;
+ if (vlan_tx_tag_present(skb)) {
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+
+ err = vlan_deaccel_tag(nskb);
+ if (err)
+ return err;
+
+ skb = nskb;
+ }
- if (nla_attr_size(skb->len) > USHRT_MAX)
- return -EFBIG;
+ if (nla_attr_size(skb->len) > USHRT_MAX) {
+ err = -EFBIG;
+ goto out;
+ }
len = sizeof(struct ovs_header);
len += nla_total_size(skb->len);
len += nla_total_size(8);
user_skb = genlmsg_new(len, GFP_ATOMIC);
- if (!user_skb)
- return -ENOMEM;
+ if (!user_skb) {
+ err = -ENOMEM;
+ goto out;
+ }
upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
0, upcall_info->cmd);
upcall->dp_ifindex = dp_ifindex;
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
- flow_to_nlattrs(upcall_info->key, user_skb);
+ ovs_flow_to_nlattrs(upcall_info->key, user_skb);
nla_nest_end(user_skb, nla);
if (upcall_info->userdata)
skb_copy_and_csum_dev(skb, nla_data(nla));
- return genlmsg_unicast(&init_net, user_skb, upcall_info->pid);
+ err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid);
+
+out:
+ kfree_skb(nskb);
+ return err;
}
/* Called with genl_mutex. */
if (!dp)
return -ENODEV;
- old_table = get_table_protected(dp);
- new_table = flow_tbl_alloc(TBL_MIN_BUCKETS);
+ old_table = genl_dereference(dp->table);
+ new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
if (!new_table)
return -ENOMEM;
rcu_assign_pointer(dp->table, new_table);
- flow_tbl_deferred_destroy(old_table);
+ ovs_flow_tbl_deferred_destroy(old_table);
return 0;
}
packet->protocol = htons(ETH_P_802_2);
/* Build an sw_flow for sending this packet. */
- flow = flow_alloc();
+ flow = ovs_flow_alloc();
err = PTR_ERR(flow);
if (IS_ERR(flow))
goto err_kfree_skb;
- err = flow_extract(packet, -1, &flow->key, &key_len);
+ err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
if (err)
goto err_flow_put;
- err = flow_metadata_from_nlattrs(&flow->key.phy.priority,
- &flow->key.phy.in_port,
- &flow->key.phy.tun_id,
- a[OVS_PACKET_ATTR_KEY]);
+ err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority,
+ &flow->key.phy.in_port,
+ &flow->key.phy.tun_id,
+ a[OVS_PACKET_ATTR_KEY]);
if (err)
goto err_flow_put;
if (err)
goto err_flow_put;
- flow->hash = flow_hash(&flow->key, key_len);
+ flow->hash = ovs_flow_hash(&flow->key, key_len);
- acts = flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
+ acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
err = PTR_ERR(acts);
if (IS_ERR(acts))
goto err_flow_put;
goto err_unlock;
local_bh_disable();
- err = execute_actions(dp, packet);
+ err = ovs_execute_actions(dp, packet);
local_bh_enable();
rcu_read_unlock();
- flow_put(flow);
+ ovs_flow_put(flow);
return err;
err_unlock:
rcu_read_unlock();
err_flow_put:
- flow_put(flow);
+ ovs_flow_put(flow);
err_kfree_skb:
kfree_skb(packet);
err:
static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
{
int i;
- struct flow_table *table = get_table_protected(dp);
+ struct flow_table *table = genl_dereference(dp->table);
- stats->n_flows = flow_tbl_count(table);
+ stats->n_flows = ovs_flow_tbl_count(table);
stats->n_hit = stats->n_missed = stats->n_lost = 0;
for_each_possible_cpu(i) {
const struct dp_stats_percpu *percpu_stats;
struct dp_stats_percpu local_stats;
- unsigned seqcount;
+ unsigned int start;
percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
do {
- seqcount = read_seqcount_begin(&percpu_stats->seqlock);
+ start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
local_stats = *percpu_stats;
- } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
+ } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
stats->n_hit += local_stats.n_hit;
stats->n_missed += local_stats.n_missed;
.maxattr = OVS_FLOW_ATTR_MAX
};
-static struct genl_multicast_group dp_flow_multicast_group = {
+static struct genl_multicast_group ovs_dp_flow_multicast_group = {
.name = OVS_FLOW_MCGROUP
};
nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
if (!nla)
goto nla_put_failure;
- err = flow_to_nlattrs(&flow->key, skb);
+ err = ovs_flow_to_nlattrs(&flow->key, skb);
if (err)
goto error;
nla_nest_end(skb, nla);
spin_unlock_bh(&flow->lock);
if (used)
- NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, flow_used_time(used));
+ NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used));
if (stats.n_packets)
NLA_PUT(skb, OVS_FLOW_ATTR_STATS,
error = -EINVAL;
if (!a[OVS_FLOW_ATTR_KEY])
goto error;
- error = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+ error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
if (error)
goto error;
if (!dp)
goto error;
- table = get_table_protected(dp);
- flow = flow_tbl_lookup(table, &key, key_len);
+ table = genl_dereference(dp->table);
+ flow = ovs_flow_tbl_lookup(table, &key, key_len);
if (!flow) {
struct sw_flow_actions *acts;
goto error;
/* Expand table, if necessary, to make room. */
- if (flow_tbl_need_to_expand(table)) {
+ if (ovs_flow_tbl_need_to_expand(table)) {
struct flow_table *new_table;
- new_table = flow_tbl_expand(table);
+ new_table = ovs_flow_tbl_expand(table);
if (!IS_ERR(new_table)) {
rcu_assign_pointer(dp->table, new_table);
- flow_tbl_deferred_destroy(table);
- table = get_table_protected(dp);
+ ovs_flow_tbl_deferred_destroy(table);
+ table = genl_dereference(dp->table);
}
}
/* Allocate flow. */
- flow = flow_alloc();
+ flow = ovs_flow_alloc();
if (IS_ERR(flow)) {
error = PTR_ERR(flow);
goto error;
clear_stats(flow);
/* Obtain actions. */
- acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
+ acts = ovs_flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
error = PTR_ERR(acts);
if (IS_ERR(acts))
goto error_free_flow;
rcu_assign_pointer(flow->sf_acts, acts);
/* Put flow in bucket. */
- flow->hash = flow_hash(&key, key_len);
- flow_tbl_insert(table, flow);
+ flow->hash = ovs_flow_hash(&key, key_len);
+ ovs_flow_tbl_insert(table, flow);
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
info->snd_seq,
old_acts->actions_len))) {
struct sw_flow_actions *new_acts;
- new_acts = flow_actions_alloc(acts_attrs);
+ new_acts = ovs_flow_actions_alloc(acts_attrs);
error = PTR_ERR(new_acts);
if (IS_ERR(new_acts))
goto error;
rcu_assign_pointer(flow->sf_acts, new_acts);
- flow_deferred_free_acts(old_acts);
+ ovs_flow_deferred_free_acts(old_acts);
}
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
if (!IS_ERR(reply))
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_flow_multicast_group.id, info->nlhdr,
+ GFP_KERNEL);
else
netlink_set_err(INIT_NET_GENL_SOCK, 0,
- dp_flow_multicast_group.id, PTR_ERR(reply));
+ ovs_dp_flow_multicast_group.id,
+ PTR_ERR(reply));
return 0;
error_free_flow:
- flow_put(flow);
+ ovs_flow_put(flow);
error:
return error;
}
if (!a[OVS_FLOW_ATTR_KEY])
return -EINVAL;
- err = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+ err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
if (err)
return err;
if (!dp)
return -ENODEV;
- table = get_table_protected(dp);
- flow = flow_tbl_lookup(table, &key, key_len);
+ table = genl_dereference(dp->table);
+ flow = ovs_flow_tbl_lookup(table, &key, key_len);
if (!flow)
return -ENOENT;
if (!a[OVS_FLOW_ATTR_KEY])
return flush_flows(ovs_header->dp_ifindex);
- err = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+ err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
if (err)
return err;
if (!dp)
return -ENODEV;
- table = get_table_protected(dp);
- flow = flow_tbl_lookup(table, &key, key_len);
+ table = genl_dereference(dp->table);
+ flow = ovs_flow_tbl_lookup(table, &key, key_len);
if (!flow)
return -ENOENT;
if (!reply)
return -ENOMEM;
- flow_tbl_remove(table, flow);
+ ovs_flow_tbl_remove(table, flow);
err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
info->snd_seq, 0, OVS_FLOW_CMD_DEL);
BUG_ON(err < 0);
- flow_deferred_free(flow);
+ ovs_flow_deferred_free(flow);
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
return 0;
}
{
struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
struct datapath *dp;
+ struct flow_table *table;
dp = get_dp(ovs_header->dp_ifindex);
if (!dp)
return -ENODEV;
+ table = genl_dereference(dp->table);
+
for (;;) {
struct sw_flow *flow;
u32 bucket, obj;
bucket = cb->args[0];
obj = cb->args[1];
- flow = flow_tbl_next(get_table_protected(dp), &bucket, &obj);
+ flow = ovs_flow_tbl_next(table, &bucket, &obj);
if (!flow)
break;
.maxattr = OVS_DP_ATTR_MAX
};
-static struct genl_multicast_group dp_datapath_multicast_group = {
+static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
.name = OVS_DATAPATH_MCGROUP
};
u32 pid, u32 seq, u32 flags, u8 cmd)
{
struct ovs_header *ovs_header;
- struct nlattr *nla;
+ struct ovs_dp_stats dp_stats;
int err;
ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
ovs_header->dp_ifindex = get_dpifindex(dp);
rcu_read_lock();
- err = nla_put_string(skb, OVS_DP_ATTR_NAME, dp_name(dp));
+ err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
rcu_read_unlock();
if (err)
goto nla_put_failure;
- nla = nla_reserve(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats));
- if (!nla)
- goto nla_put_failure;
- get_dp_stats(dp, nla_data(nla));
+ get_dp_stats(dp, &dp_stats);
+ NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats);
return genlmsg_end(skb, ovs_header);
struct vport *vport;
rcu_read_lock();
- vport = vport_locate(nla_data(a[OVS_DP_ATTR_NAME]));
+ vport = ovs_vport_locate(nla_data(a[OVS_DP_ATTR_NAME]));
dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
rcu_read_unlock();
}
/* Allocate table. */
err = -ENOMEM;
- rcu_assign_pointer(dp->table, flow_tbl_alloc(TBL_MIN_BUCKETS));
+ rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
if (!dp->table)
goto err_free_dp;
goto err_destroy_local_port;
list_add_tail(&dp->list_node, &dps);
- dp_sysfs_add_dp(dp);
+ ovs_dp_sysfs_add_dp(dp);
rtnl_unlock();
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_datapath_multicast_group.id, info->nlhdr,
+ GFP_KERNEL);
return 0;
err_destroy_local_port:
- dp_detach_port(get_vport_protected(dp, OVSP_LOCAL));
+ ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
err_destroy_percpu:
free_percpu(dp->stats_percpu);
err_destroy_table:
- flow_tbl_destroy(get_table_protected(dp));
+ ovs_flow_tbl_destroy(genl_dereference(dp->table));
err_free_dp:
kfree(dp);
err_put_module:
list_for_each_entry_safe(vport, next_vport, &dp->port_list, node)
if (vport->port_no != OVSP_LOCAL)
- dp_detach_port(vport);
+ ovs_dp_detach_port(vport);
- dp_sysfs_del_dp(dp);
+ ovs_dp_sysfs_del_dp(dp);
list_del(&dp->list_node);
- dp_detach_port(get_vport_protected(dp, OVSP_LOCAL));
+ ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
/* rtnl_unlock() will wait until all the references to devices that
* are pending unregistration have been dropped. We do it here to
module_put(THIS_MODULE);
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_datapath_multicast_group.id, info->nlhdr,
+ GFP_KERNEL);
return 0;
if (IS_ERR(reply)) {
err = PTR_ERR(reply);
netlink_set_err(INIT_NET_GENL_SOCK, 0,
- dp_datapath_multicast_group.id, err);
+ ovs_dp_datapath_multicast_group.id, err);
return 0;
}
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_datapath_multicast_group.id, info->nlhdr,
+ GFP_KERNEL);
+
return 0;
}
int i = 0;
list_for_each_entry(dp, &dps, list_node) {
- if (i < skip)
- continue;
- if (ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
+ if (i >= skip &&
+ ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
OVS_DP_CMD_NEW) < 0)
break;
.maxattr = OVS_VPORT_ATTR_MAX
};
-struct genl_multicast_group dp_vport_multicast_group = {
+struct genl_multicast_group ovs_dp_vport_multicast_group = {
.name = OVS_VPORT_MCGROUP
};
u32 pid, u32 seq, u32 flags, u8 cmd)
{
struct ovs_header *ovs_header;
- struct nlattr *nla;
+ struct ovs_vport_stats vport_stats;
int err;
ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport));
NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
- nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS,
- sizeof(struct ovs_vport_stats));
- if (!nla)
- goto nla_put_failure;
-
- vport_get_stats(vport, nla_data(nla));
+ ovs_vport_get_stats(vport, &vport_stats);
+ NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
+ &vport_stats);
NLA_PUT(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN,
vport->ops->get_addr(vport));
- err = vport_get_options(vport, skb);
+ err = ovs_vport_get_options(vport, skb);
if (err == -EMSGSIZE)
goto error;
struct vport *vport;
if (a[OVS_VPORT_ATTR_NAME]) {
- vport = vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
+ vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
if (!vport)
return ERR_PTR(-ENODEV);
+ if (ovs_header->dp_ifindex &&
+ ovs_header->dp_ifindex != get_dpifindex(vport->dp))
+ return ERR_PTR(-ENODEV);
return vport;
} else if (a[OVS_VPORT_ATTR_PORT_NO]) {
u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
if (!dp)
return ERR_PTR(-ENODEV);
- vport = get_vport_protected(dp, port_no);
+ vport = rcu_dereference_rtnl(dp->ports[port_no]);
if (!vport)
return ERR_PTR(-ENOENT);
return vport;
int err = 0;
if (a[OVS_VPORT_ATTR_STATS])
- vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
+ ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
if (a[OVS_VPORT_ATTR_ADDRESS])
- err = vport_set_addr(vport, nla_data(a[OVS_VPORT_ATTR_ADDRESS]));
+ err = ovs_vport_set_addr(vport, nla_data(a[OVS_VPORT_ATTR_ADDRESS]));
return err;
}
if (port_no >= DP_MAX_PORTS)
goto exit_unlock;
- vport = get_vport_protected(dp, port_no);
+ vport = rtnl_dereference(dp->ports[port_no]);
err = -EBUSY;
if (vport)
goto exit_unlock;
err = -EFBIG;
goto exit_unlock;
}
- vport = get_vport_protected(dp, port_no);
+ vport = rtnl_dereference(dp->ports[port_no]);
if (!vport)
break;
}
if (IS_ERR(vport))
goto exit_unlock;
- dp_sysfs_add_if(vport);
+ ovs_dp_sysfs_add_if(vport);
err = change_vport(vport, a);
if (!err) {
err = PTR_ERR(reply);
}
if (err) {
- dp_detach_port(vport);
+ ovs_dp_detach_port(vport);
goto exit_unlock;
}
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
-
+ ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
exit_unlock:
rtnl_unlock();
err = -EINVAL;
if (!err && a[OVS_VPORT_ATTR_OPTIONS])
- err = vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
+ err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
if (!err)
err = change_vport(vport, a);
if (!err && a[OVS_VPORT_ATTR_UPCALL_PID])
if (IS_ERR(reply)) {
err = PTR_ERR(reply);
netlink_set_err(INIT_NET_GENL_SOCK, 0,
- dp_vport_multicast_group.id, err);
+ ovs_dp_vport_multicast_group.id, err);
return 0;
}
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
exit_unlock:
rtnl_unlock();
if (IS_ERR(reply))
goto exit_unlock;
- dp_detach_port(vport);
+ ovs_dp_detach_port(vport);
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
exit_unlock:
rtnl_unlock();
for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
struct vport *vport;
- vport = get_vport_protected(dp, port_no);
+ vport = rcu_dereference(dp->ports[port_no]);
if (!vport)
continue;
static const struct genl_family_and_ops dp_genl_families[] = {
{ &dp_datapath_genl_family,
dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
- &dp_datapath_multicast_group },
+ &ovs_dp_datapath_multicast_group },
{ &dp_vport_genl_family,
dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
- &dp_vport_multicast_group },
+ &ovs_dp_vport_multicast_group },
{ &dp_flow_genl_family,
dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
- &dp_flow_multicast_group },
+ &ovs_dp_flow_multicast_group },
{ &dp_packet_genl_family,
dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
NULL },
return err;
}
+static int __rehash_flow_table(void *dummy)
+{
+ struct datapath *dp;
+
+ list_for_each_entry(dp, &dps, list_node) {
+ struct flow_table *old_table = genl_dereference(dp->table);
+ struct flow_table *new_table;
+
+ new_table = ovs_flow_tbl_rehash(old_table);
+ if (!IS_ERR(new_table)) {
+ rcu_assign_pointer(dp->table, new_table);
+ ovs_flow_tbl_deferred_destroy(old_table);
+ }
+ }
+ return 0;
+}
+
+static void rehash_flow_table(struct work_struct *work)
+{
+ genl_exec(__rehash_flow_table, NULL);
+ schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
+}
+
static int __init dp_init(void)
{
struct sk_buff *dummy_skb;
pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
VERSION BUILDNR);
- err = tnl_init();
+ err = genl_exec_init();
if (err)
goto error;
- err = flow_init();
+ err = ovs_workqueues_init();
+ if (err)
+ goto error_genl_exec;
+
+ err = ovs_tnl_init();
+ if (err)
+ goto error_wq;
+
+ err = ovs_flow_init();
if (err)
goto error_tnl_exit;
- err = vport_init();
+ err = ovs_vport_init();
if (err)
goto error_flow_exit;
- err = register_netdevice_notifier(&dp_device_notifier);
+ err = register_netdevice_notifier(&ovs_dp_device_notifier);
if (err)
goto error_vport_exit;
if (err < 0)
goto error_unreg_notifier;
+ schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
+
return 0;
error_unreg_notifier:
- unregister_netdevice_notifier(&dp_device_notifier);
+ unregister_netdevice_notifier(&ovs_dp_device_notifier);
error_vport_exit:
- vport_exit();
+ ovs_vport_exit();
error_flow_exit:
- flow_exit();
+ ovs_flow_exit();
error_tnl_exit:
- tnl_exit();
+ ovs_tnl_exit();
+error_wq:
+ ovs_workqueues_exit();
+error_genl_exec:
+ genl_exec_exit();
error:
return err;
}
static void dp_cleanup(void)
{
+ cancel_delayed_work_sync(&rehash_flow_wq);
rcu_barrier();
dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
- unregister_netdevice_notifier(&dp_device_notifier);
- vport_exit();
- flow_exit();
- tnl_exit();
+ unregister_netdevice_notifier(&ovs_dp_device_notifier);
+ ovs_vport_exit();
+ ovs_flow_exit();
+ ovs_tnl_exit();
+ ovs_workqueues_exit();
+ genl_exec_exit();
}
module_init(dp_init);