/*
- * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
+ * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
* Distributed under the terms of the GNU GPL version 2.
*
* Significant portions of this file may be copied from parts of the Linux
/* Functions for managing the dp interface/device. */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/udp.h>
#include <linux/version.h>
#include <linux/ethtool.h>
-#include <linux/random.h>
#include <linux/wait.h>
#include <asm/system.h>
#include <asm/div64.h>
#include <asm/bug.h>
+#include <linux/highmem.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <linux/inetdevice.h>
#include <linux/list.h>
#include <linux/rculist.h>
-#include <linux/workqueue.h>
#include <linux/dmi.h>
#include <net/inet_ecn.h>
+#include <net/genetlink.h>
+#include <linux/compat.h>
#include "openvswitch/datapath-protocol.h"
+#include "checksum.h"
#include "datapath.h"
#include "actions.h"
#include "flow.h"
+#include "loop_counter.h"
+#include "odp-compat.h"
#include "table.h"
#include "vport-internal_dev.h"
-#include "compat.h"
-
-
int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
EXPORT_SYMBOL(dp_ioctl_hook);
* dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
* lock first.
*
- * It is safe to access the datapath and dp_port structures with just
+ * It is safe to access the datapath and vport structures with just
* dp_mutex.
*/
-static struct datapath *dps[ODP_MAX];
+static struct datapath __rcu *dps[ODP_MAX];
static DEFINE_MUTEX(dp_mutex);
-/* Number of milliseconds between runs of the maintenance thread. */
-#define MAINT_SLEEP_MSECS 1000
-
-static int new_dp_port(struct datapath *, struct odp_port *, int port_no);
+static struct vport *new_vport(const struct vport_parms *);
/* Must be called with rcu_read_lock or dp_mutex. */
struct datapath *get_dp(int dp_idx)
{
if (dp_idx < 0 || dp_idx >= ODP_MAX)
return NULL;
- return rcu_dereference(dps[dp_idx]);
+ return rcu_dereference_check(dps[dp_idx], rcu_read_lock_held() ||
+ lockdep_is_held(&dp_mutex));
}
EXPORT_SYMBOL_GPL(get_dp);
return dp;
}
+static struct tbl *get_table_protected(struct datapath *dp)
+{
+ return rcu_dereference_protected(dp->table,
+ lockdep_is_held(&dp->mutex));
+}
+
+static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
+{
+ return rcu_dereference_protected(dp->ports[port_no],
+ lockdep_is_held(&dp->mutex));
+}
+
/* Must be called with rcu_read_lock or RTNL lock. */
const char *dp_name(const struct datapath *dp)
{
- return vport_get_name(dp->ports[ODPP_LOCAL]->vport);
+ return vport_get_name(rcu_dereference_rtnl(dp->ports[ODPP_LOCAL]));
}
static inline size_t br_nlmsg_size(void)
}
static int dp_fill_ifinfo(struct sk_buff *skb,
- const struct dp_port *port,
+ const struct vport *port,
int event, unsigned int flags)
{
- const struct datapath *dp = port->dp;
- int ifindex = vport_get_ifindex(port->vport);
- int iflink = vport_get_iflink(port->vport);
+ struct datapath *dp = port->dp;
+ int ifindex = vport_get_ifindex(port);
+ int iflink = vport_get_iflink(port);
struct ifinfomsg *hdr;
struct nlmsghdr *nlh;
hdr->__ifi_pad = 0;
hdr->ifi_type = ARPHRD_ETHER;
hdr->ifi_index = ifindex;
- hdr->ifi_flags = vport_get_flags(port->vport);
+ hdr->ifi_flags = vport_get_flags(port);
hdr->ifi_change = 0;
- NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port->vport));
- NLA_PUT_U32(skb, IFLA_MASTER, vport_get_ifindex(dp->ports[ODPP_LOCAL]->vport));
- NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port->vport));
+ NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
+ NLA_PUT_U32(skb, IFLA_MASTER,
+ vport_get_ifindex(get_vport_protected(dp, ODPP_LOCAL)));
+ NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
#ifdef IFLA_OPERSTATE
NLA_PUT_U8(skb, IFLA_OPERSTATE,
- vport_is_running(port->vport)
- ? vport_get_operstate(port->vport)
+ vport_is_running(port)
+ ? vport_get_operstate(port)
: IF_OPER_DOWN);
#endif
- NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN,
- vport_get_addr(port->vport));
+ NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
if (ifindex != iflink)
NLA_PUT_U32(skb, IFLA_LINK,iflink);
return -EMSGSIZE;
}
-static void dp_ifinfo_notify(int event, struct dp_port *port)
+static void dp_ifinfo_notify(int event, struct vport *port)
{
struct sk_buff *skb;
int err = -ENOBUFS;
static int create_dp(int dp_idx, const char __user *devnamep)
{
- struct odp_port internal_dev_port;
+ struct vport_parms parms;
char devname[IFNAMSIZ];
+ struct vport *vport;
struct datapath *dp;
int err;
int i;
if (devnamep) {
- err = -EFAULT;
- if (strncpy_from_user(devname, devnamep, IFNAMSIZ) < 0)
+ int retval = strncpy_from_user(devname, devnamep, IFNAMSIZ);
+ if (retval < 0) {
+ err = -EFAULT;
+ goto err;
+ } else if (retval >= IFNAMSIZ) {
+ err = -ENAMETOOLONG;
goto err;
- devname[IFNAMSIZ - 1] = '\0';
+ }
} else {
- snprintf(devname, sizeof devname, "of%d", dp_idx);
+ snprintf(devname, sizeof(devname), "of%d", dp_idx);
}
rtnl_lock();
goto err_put_module;
err = -ENOMEM;
- dp = kzalloc(sizeof *dp, GFP_KERNEL);
+ dp = kzalloc(sizeof(*dp), GFP_KERNEL);
if (dp == NULL)
goto err_put_module;
INIT_LIST_HEAD(&dp->port_list);
mutex_init(&dp->mutex);
+ mutex_lock(&dp->mutex);
dp->dp_idx = dp_idx;
for (i = 0; i < DP_N_QUEUES; i++)
skb_queue_head_init(&dp->queues[i]);
/* Allocate table. */
err = -ENOMEM;
- rcu_assign_pointer(dp->table, tbl_create(0));
+ rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
if (!dp->table)
goto err_free_dp;
/* Set up our datapath device. */
- BUILD_BUG_ON(sizeof(internal_dev_port.devname) != sizeof(devname));
- strcpy(internal_dev_port.devname, devname);
- internal_dev_port.flags = ODP_PORT_INTERNAL;
- err = new_dp_port(dp, &internal_dev_port, ODPP_LOCAL);
- if (err) {
+ parms.name = devname;
+ parms.type = ODP_VPORT_TYPE_INTERNAL;
+ parms.options = NULL;
+ parms.dp = dp;
+ parms.port_no = ODPP_LOCAL;
+ vport = new_vport(&parms);
+ if (IS_ERR(vport)) {
+ err = PTR_ERR(vport);
if (err == -EBUSY)
err = -EEXIST;
dp->drop_frags = 0;
dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
- if (!dp->stats_percpu)
+ if (!dp->stats_percpu) {
+ err = -ENOMEM;
goto err_destroy_local_port;
+ }
rcu_assign_pointer(dps[dp_idx], dp);
+ dp_sysfs_add_dp(dp);
+
+ mutex_unlock(&dp->mutex);
mutex_unlock(&dp_mutex);
rtnl_unlock();
- dp_sysfs_add_dp(dp);
-
return 0;
err_destroy_local_port:
- dp_detach_port(dp->ports[ODPP_LOCAL], 1);
+ dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
err_destroy_table:
- tbl_destroy(dp->table, NULL);
+ tbl_destroy(get_table_protected(dp), NULL);
err_free_dp:
+ mutex_unlock(&dp->mutex);
kfree(dp);
err_put_module:
module_put(THIS_MODULE);
return err;
}
-static void do_destroy_dp(struct datapath *dp)
+static void destroy_dp_rcu(struct rcu_head *rcu)
{
- struct dp_port *p, *n;
+ struct datapath *dp = container_of(rcu, struct datapath, rcu);
int i;
- list_for_each_entry_safe (p, n, &dp->port_list, node)
- if (p->port_no != ODPP_LOCAL)
- dp_detach_port(p, 1);
-
- dp_sysfs_del_dp(dp);
-
- rcu_assign_pointer(dps[dp->dp_idx], NULL);
-
- dp_detach_port(dp->ports[ODPP_LOCAL], 1);
-
- tbl_destroy(dp->table, flow_free_tbl);
-
for (i = 0; i < DP_N_QUEUES; i++)
skb_queue_purge(&dp->queues[i]);
- for (i = 0; i < DP_MAX_GROUPS; i++)
- kfree(dp->groups[i]);
+
+ tbl_destroy((struct tbl __force *)dp->table, flow_free_tbl);
free_percpu(dp->stats_percpu);
kobject_put(&dp->ifobj);
- module_put(THIS_MODULE);
}
static int destroy_dp(int dp_idx)
{
struct datapath *dp;
- int err;
+ int err = 0;
+ struct vport *p, *n;
rtnl_lock();
mutex_lock(&dp_mutex);
dp = get_dp(dp_idx);
- err = -ENODEV;
- if (!dp)
- goto err_unlock;
+ if (!dp) {
+ err = -ENODEV;
+ goto out;
+ }
- do_destroy_dp(dp);
- err = 0;
+ mutex_lock(&dp->mutex);
-err_unlock:
+ list_for_each_entry_safe (p, n, &dp->port_list, node)
+ if (p->port_no != ODPP_LOCAL)
+ dp_detach_port(p);
+
+ dp_sysfs_del_dp(dp);
+ rcu_assign_pointer(dps[dp->dp_idx], NULL);
+ dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
+
+ mutex_unlock(&dp->mutex);
+ call_rcu(&dp->rcu, destroy_dp_rcu);
+ module_put(THIS_MODULE);
+
+out:
mutex_unlock(&dp_mutex);
rtnl_unlock();
return err;
}
-static void release_dp_port(struct kobject *kobj)
-{
- struct dp_port *p = container_of(kobj, struct dp_port, kobj);
- kfree(p);
-}
-
-static struct kobj_type brport_ktype = {
-#ifdef CONFIG_SYSFS
- .sysfs_ops = &brport_sysfs_ops,
-#endif
- .release = release_dp_port
-};
-
-/* Called with RTNL lock and dp_mutex. */
-static int new_dp_port(struct datapath *dp, struct odp_port *odp_port, int port_no)
+/* Called with RTNL lock and dp->mutex. */
+static struct vport *new_vport(const struct vport_parms *parms)
{
struct vport *vport;
- struct dp_port *p;
- int err;
-
- vport = vport_locate(odp_port->devname);
- if (!vport) {
- vport_lock();
-
- if (odp_port->flags & ODP_PORT_INTERNAL)
- vport = __vport_add(odp_port->devname, "internal", NULL);
- else
- vport = __vport_add(odp_port->devname, "netdev", NULL);
-
- vport_unlock();
-
- if (IS_ERR(vport))
- return PTR_ERR(vport);
- }
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
+ vport_lock();
+ vport = vport_add(parms);
+ if (!IS_ERR(vport)) {
+ struct datapath *dp = parms->dp;
- p->port_no = port_no;
- p->dp = dp;
- atomic_set(&p->sflow_pool, 0);
+ rcu_assign_pointer(dp->ports[parms->port_no], vport);
+ list_add_rcu(&vport->node, &dp->port_list);
+ dp->n_ports++;
- err = vport_attach(vport, p);
- if (err) {
- kfree(p);
- return err;
+ dp_ifinfo_notify(RTM_NEWLINK, vport);
}
+ vport_unlock();
- rcu_assign_pointer(dp->ports[port_no], p);
- list_add_rcu(&p->node, &dp->port_list);
- dp->n_ports++;
-
- /* Initialize kobject for bridge. This will be added as
- * /sys/class/net/<devname>/brport later, if sysfs is enabled. */
- p->kobj.kset = NULL;
- kobject_init(&p->kobj, &brport_ktype);
-
- dp_ifinfo_notify(RTM_NEWLINK, p);
-
- return 0;
-}
-
-static int attach_port(int dp_idx, struct odp_port __user *portp)
-{
- struct datapath *dp;
- struct odp_port port;
- int port_no;
- int err;
-
- err = -EFAULT;
- if (copy_from_user(&port, portp, sizeof port))
- goto out;
- port.devname[IFNAMSIZ - 1] = '\0';
-
- rtnl_lock();
- dp = get_dp_locked(dp_idx);
- err = -ENODEV;
- if (!dp)
- goto out_unlock_rtnl;
-
- for (port_no = 1; port_no < DP_MAX_PORTS; port_no++)
- if (!dp->ports[port_no])
- goto got_port_no;
- err = -EFBIG;
- goto out_unlock_dp;
-
-got_port_no:
- err = new_dp_port(dp, &port, port_no);
- if (err)
- goto out_unlock_dp;
-
- if (!(port.flags & ODP_PORT_INTERNAL))
- set_internal_devs_mtu(dp);
- dp_sysfs_add_if(dp->ports[port_no]);
-
- err = __put_user(port_no, &portp->port);
-
-out_unlock_dp:
- mutex_unlock(&dp->mutex);
-out_unlock_rtnl:
- rtnl_unlock();
-out:
- return err;
+ return vport;
}
-int dp_detach_port(struct dp_port *p, int may_delete)
+int dp_detach_port(struct vport *p)
{
- struct vport *vport = p->vport;
int err;
ASSERT_RTNL();
list_del_rcu(&p->node);
rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
- err = vport_detach(vport);
- if (err)
- return err;
-
- /* Then wait until no one is still using it, and destroy it. */
- synchronize_rcu();
-
- if (may_delete) {
- const char *port_type = vport_get_type(vport);
-
- if (!strcmp(port_type, "netdev") || !strcmp(port_type, "internal")) {
- vport_lock();
- __vport_del(vport);
- vport_unlock();
- }
- }
-
- kobject_put(&p->kobj);
-
- return 0;
-}
-
-static int detach_port(int dp_idx, int port_no)
-{
- struct dp_port *p;
- struct datapath *dp;
- int err;
-
- err = -EINVAL;
- if (port_no < 0 || port_no >= DP_MAX_PORTS || port_no == ODPP_LOCAL)
- goto out;
-
- rtnl_lock();
- dp = get_dp_locked(dp_idx);
- err = -ENODEV;
- if (!dp)
- goto out_unlock_rtnl;
-
- p = dp->ports[port_no];
- err = -ENOENT;
- if (!p)
- goto out_unlock_dp;
+ /* Then destroy it. */
+ vport_lock();
+ err = vport_del(p);
+ vport_unlock();
- err = dp_detach_port(p, 1);
-
-out_unlock_dp:
- mutex_unlock(&dp->mutex);
-out_unlock_rtnl:
- rtnl_unlock();
-out:
return err;
}
-/* Must be called with rcu_read_lock and with bottom-halves disabled. */
-void dp_process_received_packet(struct dp_port *p, struct sk_buff *skb)
+/* Must be called with rcu_read_lock. */
+void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
{
struct datapath *dp = p->dp;
struct dp_stats_percpu *stats;
- struct odp_flow_key key;
- struct tbl_node *flow_node;
-
- WARN_ON_ONCE(skb_shared(skb));
- skb_warn_if_lro(skb);
+ int stats_counter_off;
+ struct sw_flow_actions *acts;
+ struct loop_counter *loop;
+ int error;
- OVS_CB(skb)->dp_port = p;
- compute_ip_summed(skb, false);
+ OVS_CB(skb)->vport = p;
- /* BHs are off so we don't have to use get_cpu()/put_cpu() here. */
- stats = percpu_ptr(dp->stats_percpu, smp_processor_id());
+ if (!OVS_CB(skb)->flow) {
+ struct sw_flow_key key;
+ struct tbl_node *flow_node;
+ bool is_frag;
- if (flow_extract(skb, p ? p->port_no : ODPP_NONE, &key)) {
- if (dp->drop_frags) {
+ /* Extract flow from 'skb' into 'key'. */
+ error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key, &is_frag);
+ if (unlikely(error)) {
kfree_skb(skb);
- stats->n_frags++;
return;
}
- }
-
- flow_node = tbl_lookup(rcu_dereference(dp->table), &key, flow_hash(&key), flow_cmp);
- if (flow_node) {
- struct sw_flow *flow = flow_cast(flow_node);
- struct sw_flow_actions *acts = rcu_dereference(flow->sf_acts);
- flow_used(flow, skb);
- execute_actions(dp, skb, &key, acts->actions, acts->n_actions,
- GFP_ATOMIC);
- stats->n_hit++;
- } else {
- stats->n_missed++;
- dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
- }
-}
-
-#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
-/* This code is based on a skb_checksum_setup from net/dev/core.c from a
- * combination of Lenny's 2.6.26 Xen kernel and Xen's
- * linux-2.6.18-92.1.10.el5.xs5.0.0.394.644. We can't call this function
- * directly because it isn't exported in all versions. */
-static int skb_pull_up_to(struct sk_buff *skb, void *ptr)
-{
- if (ptr < (void *)skb->tail)
- return 1;
- if (__pskb_pull_tail(skb,
- ptr - (void *)skb->data - skb_headlen(skb))) {
- return 1;
- } else {
- return 0;
- }
-}
-int vswitch_skb_checksum_setup(struct sk_buff *skb)
-{
- struct iphdr *iph;
- unsigned char *th;
- int err = -EPROTO;
- __u16 csum_start, csum_offset;
+ if (is_frag && dp->drop_frags) {
+ kfree_skb(skb);
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
+ goto out;
+ }
- if (!skb->proto_csum_blank)
- return 0;
+ /* Look up flow. */
+ flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
+ flow_hash(&key), flow_cmp);
+ if (unlikely(!flow_node)) {
+ struct dp_upcall_info upcall;
+
+ upcall.type = _ODPL_MISS_NR;
+ upcall.key = &key;
+ upcall.userdata = 0;
+ upcall.sample_pool = 0;
+ upcall.actions = NULL;
+ upcall.actions_len = 0;
+ dp_upcall(dp, skb, &upcall);
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
+ goto out;
+ }
- if (skb->protocol != htons(ETH_P_IP))
- goto out;
+ OVS_CB(skb)->flow = flow_cast(flow_node);
+ }
- if (!skb_pull_up_to(skb, skb_network_header(skb) + sizeof(struct iphdr)))
- goto out;
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
+ flow_used(OVS_CB(skb)->flow, skb);
- iph = ip_hdr(skb);
- th = skb_network_header(skb) + 4 * iph->ihl;
+ acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
- csum_start = th - skb->head;
- switch (iph->protocol) {
- case IPPROTO_TCP:
- csum_offset = offsetof(struct tcphdr, check);
- break;
- case IPPROTO_UDP:
- csum_offset = offsetof(struct udphdr, check);
- break;
- default:
- if (net_ratelimit())
- printk(KERN_ERR "Attempting to checksum a non-"
- "TCP/UDP packet, dropping a protocol"
- " %d packet", iph->protocol);
- goto out;
+ /* Check whether we've looped too much. */
+ loop = loop_get_counter();
+ if (unlikely(++loop->count > MAX_LOOPS))
+ loop->looping = true;
+ if (unlikely(loop->looping)) {
+ loop_suppress(dp, acts);
+ kfree_skb(skb);
+ goto out_loop;
}
- if (!skb_pull_up_to(skb, th + csum_offset + 2))
- goto out;
-
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb->proto_csum_blank = 0;
+ /* Execute actions. */
+ execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
+ acts->actions_len);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
- skb->csum_start = csum_start;
- skb->csum_offset = csum_offset;
-#else
- skb_set_transport_header(skb, csum_start - skb_headroom(skb));
- skb->csum = csum_offset;
-#endif
+ /* Check whether sub-actions looped too much. */
+ if (unlikely(loop->looping))
+ loop_suppress(dp, acts);
- err = 0;
+out_loop:
+ /* Decrement loop counter. */
+ if (!--loop->count)
+ loop->looping = false;
+ loop_put_counter();
out:
- return err;
-}
-#endif /* CONFIG_XEN && HAVE_PROTO_DATA_VALID */
-
- /* Types of checksums that we can receive (these all refer to L4 checksums):
- * 1. CHECKSUM_NONE: Device that did not compute checksum, contains full
- * (though not verified) checksum in packet but not in skb->csum. Packets
- * from the bridge local port will also have this type.
- * 2. CHECKSUM_COMPLETE (CHECKSUM_HW): Good device that computes checksums,
- * also the GRE module. This is the same as CHECKSUM_NONE, except it has
- * a valid skb->csum. Importantly, both contain a full checksum (not
- * verified) in the packet itself. The only difference is that if the
- * packet gets to L4 processing on this machine (not in DomU) we won't
- * have to recompute the checksum to verify. Most hardware devices do not
- * produce packets with this type, even if they support receive checksum
- * offloading (they produce type #5).
- * 3. CHECKSUM_PARTIAL (CHECKSUM_HW): Packet without full checksum and needs to
- * be computed if it is sent off box. Unfortunately on earlier kernels,
- * this case is impossible to distinguish from #2, despite having opposite
- * meanings. Xen adds an extra field on earlier kernels (see #4) in order
- * to distinguish the different states. The only real user of this type
- * with bridging is Xen (on later kernels).
- * 4. CHECKSUM_UNNECESSARY (with proto_csum_blank true): This packet was
- * generated locally by a Xen DomU and has a partial checksum. If it is
- * handled on this machine (Dom0 or DomU), then the checksum will not be
- * computed. If it goes off box, the checksum in the packet needs to be
- * completed. Calling skb_checksum_setup converts this to CHECKSUM_HW
- * (CHECKSUM_PARTIAL) so that the checksum can be completed. In later
- * kernels, this combination is replaced with CHECKSUM_PARTIAL.
- * 5. CHECKSUM_UNNECESSARY (with proto_csum_blank false): Packet with a correct
- * full checksum or using a protocol without a checksum. skb->csum is
- * undefined. This is common from devices with receive checksum
- * offloading. This is somewhat similar to CHECKSUM_NONE, except that
- * nobody will try to verify the checksum with CHECKSUM_UNNECESSARY.
- *
- * Note that on earlier kernels, CHECKSUM_COMPLETE and CHECKSUM_PARTIAL are
- * both defined as CHECKSUM_HW. Normally the meaning of CHECKSUM_HW is clear
- * based on whether it is on the transmit or receive path. After the datapath
- * it will be intepreted as CHECKSUM_PARTIAL. If the packet already has a
- * checksum, we will panic. Since we can receive packets with checksums, we
- * assume that all CHECKSUM_HW packets have checksums and map them to
- * CHECKSUM_NONE, which has a similar meaning (the it is only different if the
- * packet is processed by the local IP stack, in which case it will need to
- * be reverified). If we receive a packet with CHECKSUM_HW that really means
- * CHECKSUM_PARTIAL, it will be sent with the wrong checksum. However, there
- * shouldn't be any devices that do this with bridging.
- *
- * The bridge has similar behavior and this function closely resembles
- * skb_forward_csum(). It is slightly different because we are only concerned
- * with bridging and not other types of forwarding and can get away with
- * slightly more optimal behavior.*/
-void
-compute_ip_summed(struct sk_buff *skb, bool xmit)
-{
- /* For our convenience these defines change repeatedly between kernel
- * versions, so we can't just copy them over... */
- switch (skb->ip_summed) {
- case CHECKSUM_NONE:
- OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
- break;
- case CHECKSUM_UNNECESSARY:
- OVS_CB(skb)->ip_summed = OVS_CSUM_UNNECESSARY;
- break;
-#ifdef CHECKSUM_HW
- /* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE.
- * However, we should only get CHECKSUM_PARTIAL packets from Xen, which
- * uses some special fields to represent this (see below). Since we
- * can only make one type work, pick the one that actually happens in
- * practice.
- *
- * The one exception to this is if we are on the transmit path
- * (basically after skb_checksum_setup() has been run) the type has
- * already been converted, so we should stay with that. */
- case CHECKSUM_HW:
- if (!xmit)
- OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
- else
- OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
+ /* Update datapath statistics. */
+ local_bh_disable();
+ stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
- break;
-#else
- case CHECKSUM_COMPLETE:
- OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
- break;
- case CHECKSUM_PARTIAL:
- OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
- break;
-#endif
- default:
- printk(KERN_ERR "openvswitch: unknown checksum type %d\n",
- skb->ip_summed);
- /* None seems the safest... */
- OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
- }
-
-#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
- /* Xen has a special way of representing CHECKSUM_PARTIAL on older
- * kernels. It should not be set on the transmit path though. */
- if (skb->proto_csum_blank)
- OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
-
- WARN_ON_ONCE(skb->proto_csum_blank && xmit);
-#endif
+ write_seqcount_begin(&stats->seqlock);
+ (*(u64 *)((u8 *)stats + stats_counter_off))++;
+ write_seqcount_end(&stats->seqlock);
+
+ local_bh_enable();
}
-void
-forward_ip_summed(struct sk_buff *skb)
+static void copy_and_csum_skb(struct sk_buff *skb, void *to)
{
-#ifdef CHECKSUM_HW
- if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
- skb->ip_summed = CHECKSUM_NONE;
-#endif
+ u16 csum_start, csum_offset;
+ __wsum csum;
+
+ get_skb_csum_pointers(skb, &csum_start, &csum_offset);
+ csum_start -= skb_headroom(skb);
+ BUG_ON(csum_start >= skb_headlen(skb));
+
+ skb_copy_bits(skb, 0, to, csum_start);
+
+ csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start,
+ skb->len - csum_start, 0);
+ *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
}
/* Append each packet in 'skb' list to 'queue'. There will be only one packet
* unless we broke up a GSO packet. */
-static int
-queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
- int queue_no, u32 arg)
+static int queue_control_packets(struct datapath *dp, struct sk_buff *skb,
+ const struct dp_upcall_info *upcall_info)
{
struct sk_buff *nskb;
int port_no;
int err;
- if (OVS_CB(skb)->dp_port)
- port_no = OVS_CB(skb)->dp_port->port_no;
+ if (OVS_CB(skb)->vport)
+ port_no = OVS_CB(skb)->vport->port_no;
else
port_no = ODPP_LOCAL;
do {
- struct odp_msg *header;
+ struct odp_packet *upcall;
+ struct sk_buff *user_skb; /* to be queued to userspace */
+ struct nlattr *nla;
+ unsigned int len;
nskb = skb->next;
skb->next = NULL;
- /* If a checksum-deferred packet is forwarded to the
- * controller, correct the pointers and checksum. This happens
- * on a regular basis only on Xen, on which VMs can pass up
- * packets that do not have their checksum computed.
- */
- err = vswitch_skb_checksum_setup(skb);
- if (err)
+ len = sizeof(struct odp_packet);
+ len += nla_total_size(4); /* ODP_PACKET_ATTR_TYPE. */
+ len += nla_total_size(skb->len);
+ len += nla_total_size(FLOW_BUFSIZE);
+ if (upcall_info->userdata)
+ len += nla_total_size(8);
+ if (upcall_info->sample_pool)
+ len += nla_total_size(4);
+ if (upcall_info->actions_len)
+ len += nla_total_size(upcall_info->actions_len);
+
+ user_skb = alloc_skb(len, GFP_ATOMIC);
+ if (!user_skb)
goto err_kfree_skbs;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
- /* Until 2.6.22, the start of the transport header was
- * also the start of data to be checksummed. Linux
- * 2.6.22 introduced the csum_start field for this
- * purpose, but we should point the transport header to
- * it anyway for backward compatibility, as
- * dev_queue_xmit() does even in 2.6.28. */
- skb_set_transport_header(skb, skb->csum_start -
- skb_headroom(skb));
-#endif
+ upcall = (struct odp_packet *)__skb_put(user_skb, sizeof(*upcall));
+ upcall->dp_idx = dp->dp_idx;
+
+ nla_put_u32(user_skb, ODP_PACKET_ATTR_TYPE, upcall_info->type);
+
+ nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_KEY);
+ flow_to_nlattrs(upcall_info->key, user_skb);
+ nla_nest_end(user_skb, nla);
+
+ if (upcall_info->userdata)
+ nla_put_u64(user_skb, ODP_PACKET_ATTR_USERDATA, upcall_info->userdata);
+ if (upcall_info->sample_pool)
+ nla_put_u32(user_skb, ODP_PACKET_ATTR_SAMPLE_POOL, upcall_info->sample_pool);
+ if (upcall_info->actions_len) {
+ const struct nlattr *actions = upcall_info->actions;
+ u32 actions_len = upcall_info->actions_len;
- err = skb_checksum_help(skb);
- if (err)
- goto err_kfree_skbs;
+ nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_ACTIONS);
+ memcpy(__skb_put(user_skb, actions_len), actions, actions_len);
+ nla_nest_end(user_skb, nla);
}
- err = skb_cow(skb, sizeof *header);
- if (err)
- goto err_kfree_skbs;
+ nla = __nla_reserve(user_skb, ODP_PACKET_ATTR_PACKET, skb->len);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ copy_and_csum_skb(skb, nla_data(nla));
+ else
+ skb_copy_bits(skb, 0, nla_data(nla), skb->len);
- header = (struct odp_msg*)__skb_push(skb, sizeof *header);
- header->type = queue_no;
- header->length = skb->len;
- header->port = port_no;
- header->reserved = 0;
- header->arg = arg;
- skb_queue_tail(queue, skb);
+ upcall->len = user_skb->len;
+ skb_queue_tail(&dp->queues[upcall_info->type], user_skb);
+ kfree_skb(skb);
skb = nskb;
} while (skb);
return 0;
return err;
}
-int
-dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
- u32 arg)
+int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info)
{
struct dp_stats_percpu *stats;
struct sk_buff_head *queue;
int err;
WARN_ON_ONCE(skb_shared(skb));
- BUG_ON(queue_no != _ODPL_MISS_NR && queue_no != _ODPL_ACTION_NR && queue_no != _ODPL_SFLOW_NR);
- queue = &dp->queues[queue_no];
+ BUG_ON(upcall_info->type >= DP_N_QUEUES);
+
+ queue = &dp->queues[upcall_info->type];
err = -ENOBUFS;
if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
goto err_kfree_skb;
forward_ip_summed(skb);
+ err = vswitch_skb_checksum_setup(skb);
+ if (err)
+ goto err_kfree_skb;
+
/* Break apart GSO packets into their component pieces. Otherwise
* userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
if (skb_is_gso(skb)) {
- struct sk_buff *nskb = skb_gso_segment(skb, 0);
- if (nskb) {
- kfree_skb(skb);
- skb = nskb;
- if (unlikely(IS_ERR(skb))) {
- err = PTR_ERR(skb);
- goto err;
- }
- } else {
- /* XXX This case might not be possible. It's hard to
- * tell from the skb_gso_segment() code and comment. */
+ struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
+
+ kfree_skb(skb);
+ skb = nskb;
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ goto err;
}
}
- err = queue_control_packets(skb, queue, queue_no, arg);
+ err = queue_control_packets(dp, skb, upcall_info);
wake_up_interruptible(&dp->waitqueue);
return err;
err_kfree_skb:
kfree_skb(skb);
err:
- stats = percpu_ptr(dp->stats_percpu, get_cpu());
+ local_bh_disable();
+ stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+
+ write_seqcount_begin(&stats->seqlock);
stats->n_lost++;
- put_cpu();
+ write_seqcount_end(&stats->seqlock);
+
+ local_bh_enable();
return err;
}
static int flush_flows(struct datapath *dp)
{
- struct tbl *old_table = rcu_dereference(dp->table);
+ struct tbl *old_table = get_table_protected(dp);
struct tbl *new_table;
- new_table = tbl_create(0);
+ new_table = tbl_create(TBL_MIN_BUCKETS);
if (!new_table)
return -ENOMEM;
return 0;
}
-static int validate_actions(const struct sw_flow_actions *actions)
+static int validate_actions(const struct nlattr *actions, u32 actions_len)
{
- unsigned int i;
+ const struct nlattr *a;
+ int rem;
+
+ nla_for_each_attr(a, actions, actions_len, rem) {
+ static const u32 action_lens[ODPAT_MAX + 1] = {
+ [ODPAT_OUTPUT] = 4,
+ [ODPAT_CONTROLLER] = 8,
+ [ODPAT_SET_DL_TCI] = 2,
+ [ODPAT_STRIP_VLAN] = 0,
+ [ODPAT_SET_DL_SRC] = ETH_ALEN,
+ [ODPAT_SET_DL_DST] = ETH_ALEN,
+ [ODPAT_SET_NW_SRC] = 4,
+ [ODPAT_SET_NW_DST] = 4,
+ [ODPAT_SET_NW_TOS] = 1,
+ [ODPAT_SET_TP_SRC] = 2,
+ [ODPAT_SET_TP_DST] = 2,
+ [ODPAT_SET_TUNNEL] = 8,
+ [ODPAT_SET_PRIORITY] = 4,
+ [ODPAT_POP_PRIORITY] = 0,
+ [ODPAT_DROP_SPOOFED_ARP] = 0,
+ };
+ int type = nla_type(a);
+
+ if (type > ODPAT_MAX || nla_len(a) != action_lens[type])
+ return -EINVAL;
- for (i = 0; i < actions->n_actions; i++) {
- const union odp_action *a = &actions->actions[i];
- switch (a->type) {
- case ODPAT_OUTPUT:
- if (a->output.port >= DP_MAX_PORTS)
- return -EINVAL;
- break;
+ switch (type) {
+ case ODPAT_UNSPEC:
+ return -EINVAL;
- case ODPAT_OUTPUT_GROUP:
- if (a->output_group.group >= DP_MAX_GROUPS)
- return -EINVAL;
+ case ODPAT_CONTROLLER:
+ case ODPAT_STRIP_VLAN:
+ case ODPAT_SET_DL_SRC:
+ case ODPAT_SET_DL_DST:
+ case ODPAT_SET_NW_SRC:
+ case ODPAT_SET_NW_DST:
+ case ODPAT_SET_TP_SRC:
+ case ODPAT_SET_TP_DST:
+ case ODPAT_SET_TUNNEL:
+ case ODPAT_SET_PRIORITY:
+ case ODPAT_POP_PRIORITY:
+ case ODPAT_DROP_SPOOFED_ARP:
+ /* No validation needed. */
break;
- case ODPAT_SET_VLAN_VID:
- if (a->vlan_vid.vlan_vid & htons(~VLAN_VID_MASK))
+ case ODPAT_OUTPUT:
+ if (nla_get_u32(a) >= DP_MAX_PORTS)
return -EINVAL;
break;
- case ODPAT_SET_VLAN_PCP:
- if (a->vlan_pcp.vlan_pcp
- & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT))
+ case ODPAT_SET_DL_TCI:
+ if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
return -EINVAL;
break;
case ODPAT_SET_NW_TOS:
- if (a->nw_tos.nw_tos & INET_ECN_MASK)
+ if (nla_get_u8(a) & INET_ECN_MASK)
return -EINVAL;
break;
default:
- if (a->type >= ODPAT_N_ACTIONS)
- return -EOPNOTSUPP;
- break;
+ return -EOPNOTSUPP;
}
}
+ if (rem > 0)
+ return -EINVAL;
+
return 0;
}
struct sw_flow_actions *actions;
int error;
- actions = flow_actions_alloc(flow->n_actions);
+ actions = flow_actions_alloc(flow->actions_len);
error = PTR_ERR(actions);
if (IS_ERR(actions))
goto error;
error = -EFAULT;
- if (copy_from_user(actions->actions, flow->actions,
- flow->n_actions * sizeof(union odp_action)))
+ if (copy_from_user(actions->actions,
+ (struct nlattr __user __force *)flow->actions,
+ flow->actions_len))
goto error_free_actions;
- error = validate_actions(actions);
+ error = validate_actions(actions->actions, actions->actions_len);
if (error)
goto error_free_actions;
static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats)
{
- if (flow->used.tv_sec) {
- stats->used_sec = flow->used.tv_sec;
- stats->used_nsec = flow->used.tv_nsec;
+ if (flow->used) {
+ struct timespec offset_ts, used, now_mono;
+
+ ktime_get_ts(&now_mono);
+ jiffies_to_timespec(jiffies - flow->used, &offset_ts);
+ set_normalized_timespec(&used, now_mono.tv_sec - offset_ts.tv_sec,
+ now_mono.tv_nsec - offset_ts.tv_nsec);
+
+ stats->used_sec = used.tv_sec;
+ stats->used_nsec = used.tv_nsec;
} else {
stats->used_sec = 0;
stats->used_nsec = 0;
}
+
stats->n_packets = flow->packet_count;
stats->n_bytes = flow->byte_count;
- stats->ip_tos = flow->ip_tos;
+ stats->reserved = 0;
stats->tcp_flags = flow->tcp_flags;
stats->error = 0;
}
static void clear_stats(struct sw_flow *flow)
{
- flow->used.tv_sec = flow->used.tv_nsec = 0;
+ flow->used = 0;
flow->tcp_flags = 0;
- flow->ip_tos = 0;
flow->packet_count = 0;
flow->byte_count = 0;
}
static int expand_table(struct datapath *dp)
{
- struct tbl *old_table = rcu_dereference(dp->table);
+ struct tbl *old_table = get_table_protected(dp);
struct tbl *new_table;
new_table = tbl_expand(old_table);
return 0;
}
-static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
+static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf,
+ struct odp_flow_stats *stats)
{
- struct odp_flow_put uf;
struct tbl_node *flow_node;
+ struct sw_flow_key key;
struct sw_flow *flow;
struct tbl *table;
- struct odp_flow_stats stats;
+ struct sw_flow_actions *acts = NULL;
int error;
+ u32 hash;
- error = -EFAULT;
- if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put)))
- goto error;
- memset(uf.flow.key.reserved, 0, sizeof uf.flow.key.reserved);
+ error = flow_copy_from_user(&key, (const struct nlattr __force __user *)uf->flow.key,
+ uf->flow.key_len);
+ if (error)
+ return error;
- table = rcu_dereference(dp->table);
- flow_node = tbl_lookup(table, &uf.flow.key, flow_hash(&uf.flow.key), flow_cmp);
+ hash = flow_hash(&key);
+ table = get_table_protected(dp);
+ flow_node = tbl_lookup(table, &key, hash, flow_cmp);
if (!flow_node) {
/* No such flow. */
- struct sw_flow_actions *acts;
-
error = -ENOENT;
- if (!(uf.flags & ODPPF_CREATE))
+ if (!(uf->flags & ODPPF_CREATE))
goto error;
/* Expand table, if necessary, to make room. */
error = expand_table(dp);
if (error)
goto error;
- table = rcu_dereference(dp->table);
+ table = get_table_protected(dp);
}
/* Allocate flow. */
- error = -ENOMEM;
- flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
- if (flow == NULL)
+ flow = flow_alloc();
+ if (IS_ERR(flow)) {
+ error = PTR_ERR(flow);
goto error;
- flow->key = uf.flow.key;
- spin_lock_init(&flow->lock);
+ }
+ flow->key = key;
clear_stats(flow);
/* Obtain actions. */
- acts = get_actions(&uf.flow);
+ acts = get_actions(&uf->flow);
error = PTR_ERR(acts);
if (IS_ERR(acts))
goto error_free_flow;
rcu_assign_pointer(flow->sf_acts, acts);
/* Put flow in bucket. */
- error = tbl_insert(table, &flow->tbl_node, flow_hash(&flow->key));
+ error = tbl_insert(table, &flow->tbl_node, hash);
if (error)
goto error_free_flow_acts;
- memset(&stats, 0, sizeof(struct odp_flow_stats));
+ memset(stats, 0, sizeof(struct odp_flow_stats));
} else {
/* We found a matching flow. */
struct sw_flow_actions *old_acts, *new_acts;
- unsigned long int flags;
flow = flow_cast(flow_node);
/* Bail out if we're not allowed to modify an existing flow. */
error = -EEXIST;
- if (!(uf.flags & ODPPF_MODIFY))
+ if (!(uf->flags & ODPPF_MODIFY))
goto error;
/* Swap actions. */
- new_acts = get_actions(&uf.flow);
+ new_acts = get_actions(&uf->flow);
error = PTR_ERR(new_acts);
if (IS_ERR(new_acts))
goto error;
- old_acts = rcu_dereference(flow->sf_acts);
- if (old_acts->n_actions != new_acts->n_actions ||
+
+ old_acts = rcu_dereference_protected(flow->sf_acts,
+ lockdep_is_held(&dp->mutex));
+ if (old_acts->actions_len != new_acts->actions_len ||
memcmp(old_acts->actions, new_acts->actions,
- sizeof(union odp_action) * old_acts->n_actions)) {
+ old_acts->actions_len)) {
rcu_assign_pointer(flow->sf_acts, new_acts);
flow_deferred_free_acts(old_acts);
} else {
}
/* Fetch stats, then clear them if necessary. */
- spin_lock_irqsave(&flow->lock, flags);
- get_stats(flow, &stats);
- if (uf.flags & ODPPF_ZERO_STATS)
+ spin_lock_bh(&flow->lock);
+ get_stats(flow, stats);
+ if (uf->flags & ODPPF_ZERO_STATS)
clear_stats(flow);
- spin_unlock_irqrestore(&flow->lock, flags);
+ spin_unlock_bh(&flow->lock);
}
- /* Copy stats to userspace. */
- if (__copy_to_user(&ufp->flow.stats, &stats,
- sizeof(struct odp_flow_stats)))
- return -EFAULT;
return 0;
error_free_flow_acts:
- kfree(flow->sf_acts);
+ kfree(acts);
error_free_flow:
- kmem_cache_free(flow_cache, flow);
+ flow->sf_acts = NULL;
+ flow_put(flow);
error:
return error;
}
-static int put_actions(const struct sw_flow *flow, struct odp_flow __user *ufp)
+static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
{
- union odp_action __user *actions;
- struct sw_flow_actions *sf_acts;
- u32 n_actions;
+ struct odp_flow_stats stats;
+ struct odp_flow_put uf;
+ int error;
- if (__get_user(actions, &ufp->actions) ||
- __get_user(n_actions, &ufp->n_actions))
+ if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put)))
return -EFAULT;
- if (!n_actions)
- return 0;
+ error = do_put_flow(dp, &uf, &stats);
+ if (error)
+ return error;
- sf_acts = rcu_dereference(flow->sf_acts);
- if (__put_user(sf_acts->n_actions, &ufp->n_actions) ||
- (actions && copy_to_user(actions, sf_acts->actions,
- sizeof(union odp_action) *
- min(sf_acts->n_actions, n_actions))))
+ if (copy_to_user(&ufp->flow.stats, &stats,
+ sizeof(struct odp_flow_stats)))
return -EFAULT;
return 0;
}
-static int answer_query(struct sw_flow *flow, u32 query_flags,
- struct odp_flow __user *ufp)
+static int do_answer_query(struct datapath *dp, struct sw_flow *flow,
+ u32 query_flags,
+ struct odp_flow_stats __user *ustats,
+ struct nlattr __user *actions,
+ u32 __user *actions_lenp)
{
+ struct sw_flow_actions *sf_acts;
struct odp_flow_stats stats;
- unsigned long int flags;
+ u32 actions_len;
- spin_lock_irqsave(&flow->lock, flags);
+ spin_lock_bh(&flow->lock);
get_stats(flow, &stats);
-
- if (query_flags & ODPFF_ZERO_TCP_FLAGS) {
+ if (query_flags & ODPFF_ZERO_TCP_FLAGS)
flow->tcp_flags = 0;
- }
- spin_unlock_irqrestore(&flow->lock, flags);
- if (__copy_to_user(&ufp->stats, &stats, sizeof(struct odp_flow_stats)))
+ spin_unlock_bh(&flow->lock);
+
+ if (copy_to_user(ustats, &stats, sizeof(struct odp_flow_stats)) ||
+ get_user(actions_len, actions_lenp))
+ return -EFAULT;
+
+ if (!actions_len)
+ return 0;
+
+ sf_acts = rcu_dereference_protected(flow->sf_acts,
+ lockdep_is_held(&dp->mutex));
+ if (put_user(sf_acts->actions_len, actions_lenp) ||
+ (actions && copy_to_user(actions, sf_acts->actions,
+ min(sf_acts->actions_len, actions_len))))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int answer_query(struct datapath *dp, struct sw_flow *flow,
+ u32 query_flags, struct odp_flow __user *ufp)
+{
+ struct nlattr __user *actions;
+
+ if (get_user(actions, (struct nlattr __user * __user *)&ufp->actions))
return -EFAULT;
- return put_actions(flow, ufp);
+
+ return do_answer_query(dp, flow, query_flags,
+ &ufp->stats, actions, &ufp->actions_len);
}
-static int del_flow(struct datapath *dp, struct odp_flow __user *ufp)
+static struct sw_flow *do_del_flow(struct datapath *dp, const struct nlattr __user *key, u32 key_len)
{
- struct tbl *table = rcu_dereference(dp->table);
- struct odp_flow uf;
+ struct tbl *table = get_table_protected(dp);
struct tbl_node *flow_node;
- struct sw_flow *flow;
+ struct sw_flow_key swkey;
int error;
- error = -EFAULT;
- if (copy_from_user(&uf, ufp, sizeof uf))
- goto error;
- memset(uf.key.reserved, 0, sizeof uf.key.reserved);
+ error = flow_copy_from_user(&swkey, key, key_len);
+ if (error)
+ return ERR_PTR(error);
- flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
- error = -ENOENT;
+ flow_node = tbl_lookup(table, &swkey, flow_hash(&swkey), flow_cmp);
if (!flow_node)
- goto error;
+ return ERR_PTR(-ENOENT);
error = tbl_remove(table, flow_node);
if (error)
- goto error;
+ return ERR_PTR(error);
- /* XXX These statistics might lose a few packets, since other CPUs can
- * be using this flow. We used to synchronize_rcu() to make sure that
- * we get completely accurate stats, but that blows our performance,
- * badly. */
+ /* XXX Returned flow_node's statistics might lose a few packets, since
+ * other CPUs can be using this flow. We used to synchronize_rcu() to
+ * make sure that we get completely accurate stats, but that blows our
+ * performance, badly. */
+ return flow_cast(flow_node);
+}
- flow = flow_cast(flow_node);
- error = answer_query(flow, 0, ufp);
- flow_deferred_free(flow);
+static int del_flow(struct datapath *dp, struct odp_flow __user *ufp)
+{
+ struct sw_flow *flow;
+ struct odp_flow uf;
+ int error;
-error:
+ if (copy_from_user(&uf, ufp, sizeof(uf)))
+ return -EFAULT;
+
+ flow = do_del_flow(dp, (const struct nlattr __force __user *)uf.key, uf.key_len);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ error = answer_query(dp, flow, 0, ufp);
+ flow_deferred_free(flow);
return error;
}
-static int query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
+static int do_query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
{
- struct tbl *table = rcu_dereference(dp->table);
- int i;
+ struct tbl *table = get_table_protected(dp);
+ u32 i;
+
for (i = 0; i < flowvec->n_flows; i++) {
- struct __user odp_flow *ufp = &flowvec->flows[i];
+ struct odp_flow __user *ufp = (struct odp_flow __user __force *)&flowvec->flows[i];
+ struct sw_flow_key key;
struct odp_flow uf;
struct tbl_node *flow_node;
int error;
- if (__copy_from_user(&uf, ufp, sizeof uf))
+ if (copy_from_user(&uf, ufp, sizeof(uf)))
return -EFAULT;
- memset(uf.key.reserved, 0, sizeof uf.key.reserved);
- flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
+ error = flow_copy_from_user(&key, (const struct nlattr __force __user *)uf.key, uf.key_len);
+ if (error)
+ return error;
+
+ flow_node = tbl_lookup(table, &uf.key, flow_hash(&key), flow_cmp);
if (!flow_node)
- error = __put_user(ENOENT, &ufp->stats.error);
+ error = put_user(ENOENT, &ufp->stats.error);
else
- error = answer_query(flow_cast(flow_node), uf.flags, ufp);
+ error = answer_query(dp, flow_cast(flow_node), uf.flags, ufp);
if (error)
return -EFAULT;
}
return flowvec->n_flows;
}
-struct list_flows_cbdata {
- struct odp_flow __user *uflows;
- int n_flows;
- int listed_flows;
-};
-
-static int list_flow(struct tbl_node *node, void *cbdata_)
+static int do_flowvec_ioctl(struct datapath *dp, unsigned long argp,
+ int (*function)(struct datapath *,
+ const struct odp_flowvec *))
{
- struct sw_flow *flow = flow_cast(node);
- struct list_flows_cbdata *cbdata = cbdata_;
- struct odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
- int error;
+ struct odp_flowvec __user *uflowvec;
+ struct odp_flowvec flowvec;
+ int retval;
- if (__copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
+ uflowvec = (struct odp_flowvec __user *)argp;
+ if (copy_from_user(&flowvec, uflowvec, sizeof(flowvec)))
return -EFAULT;
- error = answer_query(flow, 0, ufp);
- if (error)
- return error;
- if (cbdata->listed_flows >= cbdata->n_flows)
- return cbdata->listed_flows;
- return 0;
+ if (flowvec.n_flows > INT_MAX / sizeof(struct odp_flow))
+ return -EINVAL;
+
+ retval = function(dp, &flowvec);
+ return (retval < 0 ? retval
+ : retval == flowvec.n_flows ? 0
+ : put_user(retval, &uflowvec->n_flows));
}
-static int list_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
+static struct sw_flow *do_dump_flow(struct datapath *dp, u32 __user *state)
{
- struct list_flows_cbdata cbdata;
- int error;
+ struct tbl *table = get_table_protected(dp);
+ struct tbl_node *tbl_node;
+ u32 bucket, obj;
- if (!flowvec->n_flows)
- return 0;
+ if (get_user(bucket, &state[0]) || get_user(obj, &state[1]))
+ return ERR_PTR(-EFAULT);
+
+ tbl_node = tbl_next(table, &bucket, &obj);
+
+ if (put_user(bucket, &state[0]) || put_user(obj, &state[1]))
+ return ERR_PTR(-EFAULT);
- cbdata.uflows = flowvec->flows;
- cbdata.n_flows = flowvec->n_flows;
- cbdata.listed_flows = 0;
- error = tbl_foreach(rcu_dereference(dp->table), list_flow, &cbdata);
- return error ? error : cbdata.listed_flows;
+ return tbl_node ? flow_cast(tbl_node) : NULL;
}
-static int do_flowvec_ioctl(struct datapath *dp, unsigned long argp,
- int (*function)(struct datapath *,
- const struct odp_flowvec *))
+static int dump_flow(struct datapath *dp, struct odp_flow_dump __user *udumpp)
{
- struct odp_flowvec __user *uflowvec;
- struct odp_flowvec flowvec;
- int retval;
+ struct odp_flow __user *uflowp;
+ struct nlattr __user *ukey;
+ struct sw_flow *flow;
+ u32 key_len;
- uflowvec = (struct odp_flowvec __user *)argp;
- if (!access_ok(VERIFY_WRITE, uflowvec, sizeof *uflowvec) ||
- copy_from_user(&flowvec, uflowvec, sizeof flowvec))
+ flow = do_dump_flow(dp, udumpp->state);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ if (get_user(uflowp, (struct odp_flow __user *__user*)&udumpp->flow))
return -EFAULT;
- if (flowvec.n_flows > INT_MAX / sizeof(struct odp_flow))
- return -EINVAL;
+ if (!flow)
+ return put_user(ODPFF_EOF, &uflowp->flags);
- if (!access_ok(VERIFY_WRITE, flowvec.flows,
- flowvec.n_flows * sizeof(struct odp_flow)))
+ if (put_user(0, &uflowp->flags) ||
+ get_user(ukey, (struct nlattr __user * __user*)&uflowp->key) ||
+ get_user(key_len, &uflowp->key_len))
return -EFAULT;
- retval = function(dp, &flowvec);
- return (retval < 0 ? retval
- : retval == flowvec.n_flows ? 0
- : __put_user(retval, &uflowvec->n_flows));
+ key_len = flow_copy_to_user(ukey, &flow->key, key_len);
+ if (key_len < 0)
+ return key_len;
+ if (put_user(key_len, &uflowp->key_len))
+ return -EFAULT;
+
+ return answer_query(dp, flow, 0, uflowp);
}
-static int do_execute(struct datapath *dp, const struct odp_execute *executep)
+static int do_execute(struct datapath *dp, const struct odp_execute *execute)
{
- struct odp_execute execute;
- struct odp_flow_key key;
+ struct sw_flow_key key;
struct sk_buff *skb;
struct sw_flow_actions *actions;
struct ethhdr *eth;
+ bool is_frag;
int err;
- err = -EFAULT;
- if (copy_from_user(&execute, executep, sizeof execute))
- goto error;
-
err = -EINVAL;
- if (execute.length < ETH_HLEN || execute.length > 65535)
+ if (execute->length < ETH_HLEN || execute->length > 65535)
goto error;
- err = -ENOMEM;
- actions = flow_actions_alloc(execute.n_actions);
- if (!actions)
+ actions = flow_actions_alloc(execute->actions_len);
+ if (IS_ERR(actions)) {
+ err = PTR_ERR(actions);
goto error;
+ }
err = -EFAULT;
- if (copy_from_user(actions->actions, execute.actions,
- execute.n_actions * sizeof *execute.actions))
+ if (copy_from_user(actions->actions,
+ (struct nlattr __user __force *)execute->actions, execute->actions_len))
goto error_free_actions;
- err = validate_actions(actions);
+ err = validate_actions(actions->actions, execute->actions_len);
if (err)
goto error_free_actions;
err = -ENOMEM;
- skb = alloc_skb(execute.length, GFP_KERNEL);
+ skb = alloc_skb(execute->length, GFP_KERNEL);
if (!skb)
goto error_free_actions;
- if (execute.in_port < DP_MAX_PORTS)
- OVS_CB(skb)->dp_port = dp->ports[execute.in_port];
- else
- OVS_CB(skb)->dp_port = NULL;
-
err = -EFAULT;
- if (copy_from_user(skb_put(skb, execute.length), execute.data,
- execute.length))
+ if (copy_from_user(skb_put(skb, execute->length),
+ (const void __user __force *)execute->data,
+ execute->length))
goto error_free_skb;
skb_reset_mac_header(skb);
else
skb->protocol = htons(ETH_P_802_2);
- flow_extract(skb, execute.in_port, &key);
- err = execute_actions(dp, skb, &key, actions->actions,
- actions->n_actions, GFP_KERNEL);
+ err = flow_extract(skb, -1, &key, &is_frag);
+ if (err)
+ goto error_free_skb;
+
+ rcu_read_lock();
+ err = execute_actions(dp, skb, &key, actions->actions, actions->actions_len);
+ rcu_read_unlock();
+
kfree(actions);
return err;
return err;
}
+static int execute_packet(struct datapath *dp, const struct odp_execute __user *executep)
+{
+ struct odp_execute execute;
+
+ if (copy_from_user(&execute, executep, sizeof(execute)))
+ return -EFAULT;
+
+ return do_execute(dp, &execute);
+}
+
static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
{
- struct tbl *table = rcu_dereference(dp->table);
struct odp_stats stats;
int i;
- stats.n_flows = tbl_count(table);
- stats.cur_capacity = tbl_n_buckets(table);
- stats.max_capacity = TBL_MAX_BUCKETS;
stats.n_ports = dp->n_ports;
stats.max_ports = DP_MAX_PORTS;
- stats.max_groups = DP_MAX_GROUPS;
stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
for_each_possible_cpu(i) {
- const struct dp_stats_percpu *s;
- s = percpu_ptr(dp->stats_percpu, i);
- stats.n_frags += s->n_frags;
- stats.n_hit += s->n_hit;
- stats.n_missed += s->n_missed;
- stats.n_lost += s->n_lost;
+ const struct dp_stats_percpu *percpu_stats;
+ struct dp_stats_percpu local_stats;
+ unsigned seqcount;
+
+ percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
+
+ do {
+ seqcount = read_seqcount_begin(&percpu_stats->seqlock);
+ local_stats = *percpu_stats;
+ } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
+
+ stats.n_frags += local_stats.n_frags;
+ stats.n_hit += local_stats.n_hit;
+ stats.n_missed += local_stats.n_missed;
+ stats.n_lost += local_stats.n_lost;
}
stats.max_miss_queue = DP_MAX_QUEUE_LEN;
stats.max_action_queue = DP_MAX_QUEUE_LEN;
- return copy_to_user(statsp, &stats, sizeof stats) ? -EFAULT : 0;
+ return copy_to_user(statsp, &stats, sizeof(stats)) ? -EFAULT : 0;
}
/* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
int dp_min_mtu(const struct datapath *dp)
{
- struct dp_port *p;
+ struct vport *p;
int mtu = 0;
ASSERT_RTNL();
/* Skip any internal ports, since that's what we're trying to
* set. */
- if (is_internal_vport(p->vport))
+ if (is_internal_vport(p))
continue;
- dev_mtu = vport_get_mtu(p->vport);
+ dev_mtu = vport_get_mtu(p);
if (!mtu || dev_mtu < mtu)
mtu = dev_mtu;
}
}
/* Sets the MTU of all datapath devices to the minimum of the ports. Must
- * be called with RTNL lock and dp_mutex. */
+ * be called with RTNL lock. */
void set_internal_devs_mtu(const struct datapath *dp)
{
- struct dp_port *p;
+ struct vport *p;
int mtu;
ASSERT_RTNL();
mtu = dp_min_mtu(dp);
list_for_each_entry_rcu (p, &dp->port_list, node) {
- if (is_internal_vport(p->vport))
- vport_set_mtu(p->vport, mtu);
+ if (is_internal_vport(p))
+ vport_set_mtu(p, mtu);
}
}
-static int
-put_port(const struct dp_port *p, struct odp_port __user *uop)
+static int get_listen_mask(const struct file *f)
{
- struct odp_port op;
+ return (long)f->private_data;
+}
- memset(&op, 0, sizeof op);
+static void set_listen_mask(struct file *f, int listen_mask)
+{
+ f->private_data = (void*)(long)listen_mask;
+}
+
+static const struct nla_policy vport_policy[ODP_VPORT_ATTR_MAX + 1] = {
+ [ODP_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+ [ODP_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
+ [ODP_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
+ [ODP_VPORT_ATTR_STATS] = { .len = sizeof(struct rtnl_link_stats64) },
+ [ODP_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
+ [ODP_VPORT_ATTR_MTU] = { .type = NLA_U32 },
+ [ODP_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
+};
+
+static int copy_vport_to_user(void __user *dst, struct vport *vport, uint32_t total_len)
+{
+ struct odp_vport *odp_vport;
+ struct sk_buff *skb;
+ struct nlattr *nla;
+ int ifindex, iflink;
+ int err;
+
+ skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ err = -ENOMEM;
+ if (!skb)
+ goto exit;
rcu_read_lock();
- strncpy(op.devname, vport_get_name(p->vport), sizeof op.devname);
- rcu_read_unlock();
+ odp_vport = (struct odp_vport*)__skb_put(skb, sizeof(struct odp_vport));
+ odp_vport->dp_idx = vport->dp->dp_idx;
+ odp_vport->total_len = total_len;
+
+ NLA_PUT_U32(skb, ODP_VPORT_ATTR_PORT_NO, vport->port_no);
+ NLA_PUT_U32(skb, ODP_VPORT_ATTR_TYPE, vport_get_type(vport));
+ NLA_PUT_STRING(skb, ODP_VPORT_ATTR_NAME, vport_get_name(vport));
+
+ nla = nla_reserve(skb, ODP_VPORT_ATTR_STATS, sizeof(struct rtnl_link_stats64));
+ if (!nla)
+ goto nla_put_failure;
+ if (vport_get_stats(vport, nla_data(nla)))
+ __skb_trim(skb, skb->len - nla->nla_len);
+
+ NLA_PUT(skb, ODP_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
+
+ NLA_PUT_U32(skb, ODP_VPORT_ATTR_MTU, vport_get_mtu(vport));
+
+ err = vport_get_options(vport, skb);
+
+ ifindex = vport_get_ifindex(vport);
+ if (ifindex > 0)
+ NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFINDEX, ifindex);
- op.port = p->port_no;
- op.flags = is_internal_vport(p->vport) ? ODP_PORT_INTERNAL : 0;
+ iflink = vport_get_iflink(vport);
+ if (iflink > 0)
+ NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFLINK, iflink);
- return copy_to_user(uop, &op, sizeof op) ? -EFAULT : 0;
+ err = -EMSGSIZE;
+ if (skb->len > total_len)
+ goto exit_unlock;
+
+ odp_vport->len = skb->len;
+ err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
+ goto exit_unlock;
+
+nla_put_failure:
+ err = -EMSGSIZE;
+exit_unlock:
+ rcu_read_unlock();
+ kfree_skb(skb);
+exit:
+ return err;
}
-static int
-query_port(struct datapath *dp, struct odp_port __user *uport)
+static struct sk_buff *copy_vport_from_user(struct odp_vport __user *uodp_vport,
+ struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
{
- struct odp_port port;
+ struct odp_vport *odp_vport;
+ struct sk_buff *skb;
+ u32 len;
+ int err;
- if (copy_from_user(&port, uport, sizeof port))
- return -EFAULT;
+ if (get_user(len, &uodp_vport->len))
+ return ERR_PTR(-EFAULT);
+ if (len < sizeof(struct odp_vport))
+ return ERR_PTR(-EINVAL);
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ err = -EFAULT;
+ if (copy_from_user(__skb_put(skb, len), uodp_vport, len))
+ goto error_free_skb;
- if (port.devname[0]) {
- struct vport *vport;
- struct dp_port *dp_port;
- int err = 0;
+ odp_vport = (struct odp_vport *)skb->data;
+ err = -EINVAL;
+ if (odp_vport->len != len)
+ goto error_free_skb;
- port.devname[IFNAMSIZ - 1] = '\0';
+ err = nla_parse(a, ODP_VPORT_ATTR_MAX, (struct nlattr *)(skb->data + sizeof(struct odp_vport)),
+ skb->len - sizeof(struct odp_vport), vport_policy);
+ if (err)
+ goto error_free_skb;
- vport_lock();
- rcu_read_lock();
+ err = VERIFY_NUL_STRING(a[ODP_VPORT_ATTR_NAME], IFNAMSIZ - 1);
+ if (err)
+ goto error_free_skb;
+
+ return skb;
- vport = vport_locate(port.devname);
+error_free_skb:
+ kfree_skb(skb);
+ return ERR_PTR(err);
+}
+
+
+/* Called without any locks (or with RTNL lock).
+ * Returns holding vport->dp->mutex.
+ */
+static struct vport *lookup_vport(struct odp_vport *odp_vport,
+ struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
+{
+ struct datapath *dp;
+ struct vport *vport;
+
+ if (a[ODP_VPORT_ATTR_NAME]) {
+ int dp_idx, port_no;
+
+ retry:
+ vport_lock();
+ vport = vport_locate(nla_data(a[ODP_VPORT_ATTR_NAME]));
if (!vport) {
- err = -ENODEV;
- goto error_unlock;
+ vport_unlock();
+ return ERR_PTR(-ENODEV);
}
+ dp_idx = vport->dp->dp_idx;
+ port_no = vport->port_no;
+ vport_unlock();
+
+ dp = get_dp_locked(dp_idx);
+ if (!dp)
+ goto retry;
- dp_port = vport_get_dp_port(vport);
- if (!dp_port || dp_port->dp != dp) {
- err = -ENOENT;
- goto error_unlock;
+ vport = get_vport_protected(dp, port_no);
+ if (!vport ||
+ strcmp(vport_get_name(vport), nla_data(a[ODP_VPORT_ATTR_NAME]))) {
+ mutex_unlock(&dp->mutex);
+ goto retry;
}
- port.port = dp_port->port_no;
+ return vport;
+ } else if (a[ODP_VPORT_ATTR_PORT_NO]) {
+ u32 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
-error_unlock:
- rcu_read_unlock();
- vport_unlock();
+ if (port_no >= DP_MAX_PORTS)
+ return ERR_PTR(-EINVAL);
- if (err)
- return err;
- } else {
- if (port.port >= DP_MAX_PORTS)
- return -EINVAL;
- if (!dp->ports[port.port])
- return -ENOENT;
- }
+ dp = get_dp_locked(odp_vport->dp_idx);
+ if (!dp)
+ return ERR_PTR(-ENODEV);
- return put_port(dp->ports[port.port], uport);
+ vport = get_vport_protected(dp, port_no);
+ if (!vport) {
+ mutex_unlock(&dp->mutex);
+ return ERR_PTR(-ENOENT);
+ }
+ return vport;
+ } else
+ return ERR_PTR(-EINVAL);
}
-static int
-list_ports(struct datapath *dp, struct odp_portvec __user *pvp)
+static int change_vport(struct vport *vport, struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
{
- struct odp_portvec pv;
- struct dp_port *p;
- int idx;
+ int err = 0;
+ if (a[ODP_VPORT_ATTR_STATS])
+ err = vport_set_stats(vport, nla_data(a[ODP_VPORT_ATTR_STATS]));
+ if (!err && a[ODP_VPORT_ATTR_ADDRESS])
+ err = vport_set_addr(vport, nla_data(a[ODP_VPORT_ATTR_ADDRESS]));
+ if (!err && a[ODP_VPORT_ATTR_MTU])
+ err = vport_set_mtu(vport, nla_get_u32(a[ODP_VPORT_ATTR_MTU]));
+ return err;
+}
- if (copy_from_user(&pv, pvp, sizeof pv))
- return -EFAULT;
+static int attach_vport(struct odp_vport __user *uodp_vport)
+{
+ struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
+ struct odp_vport *odp_vport;
+ struct vport_parms parms;
+ struct vport *vport;
+ struct sk_buff *skb;
+ struct datapath *dp;
+ u32 port_no;
+ int err;
+
+ skb = copy_vport_from_user(uodp_vport, a);
+ err = PTR_ERR(skb);
+ if (IS_ERR(skb))
+ goto exit;
+ odp_vport = (struct odp_vport *)skb->data;
+
+ err = -EINVAL;
+ if (!a[ODP_VPORT_ATTR_NAME] || !a[ODP_VPORT_ATTR_TYPE])
+ goto exit_kfree_skb;
+
+ rtnl_lock();
+
+ dp = get_dp_locked(odp_vport->dp_idx);
+ err = -ENODEV;
+ if (!dp)
+ goto exit_unlock_rtnl;
+
+ if (a[ODP_VPORT_ATTR_PORT_NO]) {
+ port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
+
+ err = -EFBIG;
+ if (port_no >= DP_MAX_PORTS)
+ goto exit_unlock_dp;
- idx = 0;
- if (pv.n_ports) {
- list_for_each_entry_rcu (p, &dp->port_list, node) {
- if (put_port(p, &pv.ports[idx]))
- return -EFAULT;
- if (idx++ >= pv.n_ports)
+ vport = get_vport_protected(dp, port_no);
+ err = -EBUSY;
+ if (vport)
+ goto exit_unlock_dp;
+ } else {
+ for (port_no = 1; ; port_no++) {
+ if (port_no >= DP_MAX_PORTS) {
+ err = -EFBIG;
+ goto exit_unlock_dp;
+ }
+ vport = get_vport_protected(dp, port_no);
+ if (!vport)
break;
}
}
- return put_user(dp->n_ports, &pvp->n_ports);
-}
-/* RCU callback for freeing a dp_port_group */
-static void free_port_group(struct rcu_head *rcu)
-{
- struct dp_port_group *g = container_of(rcu, struct dp_port_group, rcu);
- kfree(g);
+ parms.name = nla_data(a[ODP_VPORT_ATTR_NAME]);
+ parms.type = nla_get_u32(a[ODP_VPORT_ATTR_TYPE]);
+ parms.options = a[ODP_VPORT_ATTR_OPTIONS];
+ parms.dp = dp;
+ parms.port_no = port_no;
+
+ vport = new_vport(&parms);
+ err = PTR_ERR(vport);
+ if (IS_ERR(vport))
+ goto exit_unlock_dp;
+
+ set_internal_devs_mtu(dp);
+ dp_sysfs_add_if(vport);
+
+ err = change_vport(vport, a);
+ if (err) {
+ dp_detach_port(vport);
+ goto exit_unlock_dp;
+ }
+
+ err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
+
+exit_unlock_dp:
+ mutex_unlock(&dp->mutex);
+exit_unlock_rtnl:
+ rtnl_unlock();
+exit_kfree_skb:
+ kfree_skb(skb);
+exit:
+ return err;
}
-static int
-set_port_group(struct datapath *dp, const struct odp_port_group __user *upg)
+static int set_vport(unsigned int cmd, struct odp_vport __user *uodp_vport)
{
- struct odp_port_group pg;
- struct dp_port_group *new_group, *old_group;
- int error;
-
- error = -EFAULT;
- if (copy_from_user(&pg, upg, sizeof pg))
- goto error;
+ struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
+ struct vport *vport;
+ struct sk_buff *skb;
+ int err;
- error = -EINVAL;
- if (pg.n_ports > DP_MAX_PORTS || pg.group >= DP_MAX_GROUPS)
- goto error;
+ skb = copy_vport_from_user(uodp_vport, a);
+ err = PTR_ERR(skb);
+ if (IS_ERR(skb))
+ goto exit;
- error = -ENOMEM;
- new_group = kmalloc(sizeof *new_group + sizeof(u16) * pg.n_ports,
- GFP_KERNEL);
- if (!new_group)
- goto error;
+ rtnl_lock();
+ vport = lookup_vport((struct odp_vport *)skb->data, a);
+ err = PTR_ERR(vport);
+ if (IS_ERR(vport))
+ goto exit_free;
- new_group->n_ports = pg.n_ports;
- error = -EFAULT;
- if (copy_from_user(new_group->ports, pg.ports,
- sizeof(u16) * pg.n_ports))
- goto error_free;
-
- old_group = rcu_dereference(dp->groups[pg.group]);
- rcu_assign_pointer(dp->groups[pg.group], new_group);
- if (old_group)
- call_rcu(&old_group->rcu, free_port_group);
- return 0;
+ err = 0;
+ if (a[ODP_VPORT_ATTR_OPTIONS])
+ err = vport_set_options(vport, a[ODP_VPORT_ATTR_OPTIONS]);
+ if (!err)
+ err = change_vport(vport, a);
-error_free:
- kfree(new_group);
-error:
- return error;
+ mutex_unlock(&vport->dp->mutex);
+exit_free:
+ kfree_skb(skb);
+ rtnl_unlock();
+exit:
+ return err;
}
-static int
-get_port_group(struct datapath *dp, struct odp_port_group *upg)
+static int del_vport(unsigned int cmd, struct odp_vport __user *uodp_vport)
{
- struct odp_port_group pg;
- struct dp_port_group *g;
- u16 n_copy;
-
- if (copy_from_user(&pg, upg, sizeof pg))
- return -EFAULT;
+ struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
+ struct datapath *dp;
+ struct vport *vport;
+ struct sk_buff *skb;
+ int err;
- if (pg.group >= DP_MAX_GROUPS)
- return -EINVAL;
+ skb = copy_vport_from_user(uodp_vport, a);
+ err = PTR_ERR(skb);
+ if (IS_ERR(skb))
+ goto exit;
- g = dp->groups[pg.group];
- n_copy = g ? min_t(int, g->n_ports, pg.n_ports) : 0;
- if (n_copy && copy_to_user(pg.ports, g->ports, n_copy * sizeof(u16)))
- return -EFAULT;
+ rtnl_lock();
+ vport = lookup_vport((struct odp_vport *)skb->data, a);
+ err = PTR_ERR(vport);
+ if (IS_ERR(vport))
+ goto exit_free;
+ dp = vport->dp;
- if (put_user(g ? g->n_ports : 0, &upg->n_ports))
- return -EFAULT;
+ err = -EINVAL;
+ if (vport->port_no == ODPP_LOCAL)
+ goto exit_free;
- return 0;
+ err = dp_detach_port(vport);
+ mutex_unlock(&dp->mutex);
+exit_free:
+ kfree_skb(skb);
+ rtnl_unlock();
+exit:
+ return err;
}
-static int get_listen_mask(const struct file *f)
+static int get_vport(struct odp_vport __user *uodp_vport)
{
- return (long)f->private_data;
+ struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
+ struct odp_vport *odp_vport;
+ struct vport *vport;
+ struct sk_buff *skb;
+ int err;
+
+ skb = copy_vport_from_user(uodp_vport, a);
+ err = PTR_ERR(skb);
+ if (IS_ERR(skb))
+ goto exit;
+ odp_vport = (struct odp_vport *)skb->data;
+
+ vport = lookup_vport(odp_vport, a);
+ err = PTR_ERR(vport);
+ if (IS_ERR(vport))
+ goto exit_free;
+
+ err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
+ mutex_unlock(&vport->dp->mutex);
+exit_free:
+ kfree_skb(skb);
+exit:
+ return err;
}
-static void set_listen_mask(struct file *f, int listen_mask)
+static int dump_vport(struct odp_vport __user *uodp_vport)
{
- f->private_data = (void*)(long)listen_mask;
+ struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
+ struct odp_vport *odp_vport;
+ struct sk_buff *skb;
+ struct datapath *dp;
+ u32 port_no;
+ int err;
+
+ skb = copy_vport_from_user(uodp_vport, a);
+ err = PTR_ERR(skb);
+ if (IS_ERR(skb))
+ goto exit;
+ odp_vport = (struct odp_vport *)skb->data;
+
+ dp = get_dp_locked(odp_vport->dp_idx);
+ err = -ENODEV;
+ if (!dp)
+ goto exit_free;
+
+ port_no = 0;
+ if (a[ODP_VPORT_ATTR_PORT_NO])
+ port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
+ for (; port_no < DP_MAX_PORTS; port_no++) {
+ struct vport *vport = get_vport_protected(dp, port_no);
+ if (vport) {
+ err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
+ goto exit_unlock_dp;
+ }
+ }
+ err = -ENODEV;
+
+exit_unlock_dp:
+ mutex_unlock(&dp->mutex);
+exit_free:
+ kfree_skb(skb);
+exit:
+ return err;
}
static long openvswitch_ioctl(struct file *f, unsigned int cmd,
{
int dp_idx = iminor(f->f_dentry->d_inode);
struct datapath *dp;
- int drop_frags, listeners, port_no;
+ int drop_frags, listeners;
unsigned int sflow_probability;
int err;
err = destroy_dp(dp_idx);
goto exit;
- case ODP_PORT_ATTACH:
- err = attach_port(dp_idx, (struct odp_port __user *)argp);
+ case ODP_VPORT_NEW:
+ err = attach_vport((struct odp_vport __user *)argp);
goto exit;
- case ODP_PORT_DETACH:
- err = get_user(port_no, (int __user *)argp);
- if (!err)
- err = detach_port(dp_idx, port_no);
- goto exit;
-
- case ODP_VPORT_ADD:
- err = vport_add((struct odp_vport_add __user *)argp);
- goto exit;
-
- case ODP_VPORT_MOD:
- err = vport_mod((struct odp_vport_mod __user *)argp);
+ case ODP_VPORT_GET:
+ err = get_vport((struct odp_vport __user *)argp);
goto exit;
case ODP_VPORT_DEL:
- err = vport_del((char __user *)argp);
+ err = del_vport(cmd, (struct odp_vport __user *)argp);
goto exit;
- case ODP_VPORT_STATS_GET:
- err = vport_stats_get((struct odp_vport_stats_req __user *)argp);
+ case ODP_VPORT_SET:
+ err = set_vport(cmd, (struct odp_vport __user *)argp);
goto exit;
- case ODP_VPORT_ETHER_GET:
- err = vport_ether_get((struct odp_vport_ether __user *)argp);
- goto exit;
-
- case ODP_VPORT_ETHER_SET:
- err = vport_ether_set((struct odp_vport_ether __user *)argp);
- goto exit;
-
- case ODP_VPORT_MTU_GET:
- err = vport_mtu_get((struct odp_vport_mtu __user *)argp);
- goto exit;
-
- case ODP_VPORT_MTU_SET:
- err = vport_mtu_set((struct odp_vport_mtu __user *)argp);
+ case ODP_VPORT_DUMP:
+ err = dump_vport((struct odp_vport __user *)argp);
goto exit;
}
dp->sflow_probability = sflow_probability;
break;
- case ODP_PORT_QUERY:
- err = query_port(dp, (struct odp_port __user *)argp);
- break;
-
- case ODP_PORT_LIST:
- err = list_ports(dp, (struct odp_portvec __user *)argp);
- break;
-
- case ODP_PORT_GROUP_SET:
- err = set_port_group(dp, (struct odp_port_group __user *)argp);
- break;
-
- case ODP_PORT_GROUP_GET:
- err = get_port_group(dp, (struct odp_port_group __user *)argp);
- break;
-
case ODP_FLOW_FLUSH:
err = flush_flows(dp);
break;
break;
case ODP_FLOW_GET:
- err = do_flowvec_ioctl(dp, argp, query_flows);
+ err = do_flowvec_ioctl(dp, argp, do_query_flows);
break;
- case ODP_FLOW_LIST:
- err = do_flowvec_ioctl(dp, argp, list_flows);
+ case ODP_FLOW_DUMP:
+ err = dump_flow(dp, (struct odp_flow_dump __user *)argp);
break;
case ODP_EXECUTE:
- err = do_execute(dp, (struct odp_execute __user *)argp);
+ err = execute_packet(dp, (struct odp_execute __user *)argp);
break;
default:
return 0;
}
-ssize_t openvswitch_read(struct file *f, char __user *buf, size_t nbytes,
- loff_t *ppos)
+#ifdef CONFIG_COMPAT
+static int compat_get_flow(struct odp_flow *flow, const struct compat_odp_flow __user *compat)
+{
+ compat_uptr_t key, actions;
+
+ if (!access_ok(VERIFY_READ, compat, sizeof(struct compat_odp_flow)) ||
+ __copy_from_user(&flow->stats, &compat->stats, sizeof(struct odp_flow_stats)) ||
+ __get_user(key, &compat->key) ||
+ __get_user(flow->key_len, &compat->key_len) ||
+ __get_user(actions, &compat->actions) ||
+ __get_user(flow->actions_len, &compat->actions_len) ||
+ __get_user(flow->flags, &compat->flags))
+ return -EFAULT;
+
+ flow->key = (struct nlattr __force *)compat_ptr(key);
+ flow->actions = (struct nlattr __force *)compat_ptr(actions);
+ return 0;
+}
+
+static int compat_put_flow(struct datapath *dp, struct compat_odp_flow_put __user *ufp)
+{
+ struct odp_flow_stats stats;
+ struct odp_flow_put fp;
+ int error;
+
+ if (compat_get_flow(&fp.flow, &ufp->flow) ||
+ get_user(fp.flags, &ufp->flags))
+ return -EFAULT;
+
+ error = do_put_flow(dp, &fp, &stats);
+ if (error)
+ return error;
+
+ if (copy_to_user(&ufp->flow.stats, &stats,
+ sizeof(struct odp_flow_stats)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int compat_answer_query(struct datapath *dp, struct sw_flow *flow,
+ u32 query_flags,
+ struct compat_odp_flow __user *ufp)
+{
+ compat_uptr_t actions;
+
+ if (get_user(actions, &ufp->actions))
+ return -EFAULT;
+
+ return do_answer_query(dp, flow, query_flags, &ufp->stats,
+ compat_ptr(actions), &ufp->actions_len);
+}
+
+static int compat_del_flow(struct datapath *dp, struct compat_odp_flow __user *ufp)
+{
+ struct sw_flow *flow;
+ struct odp_flow uf;
+ int error;
+
+ if (compat_get_flow(&uf, ufp))
+ return -EFAULT;
+
+ flow = do_del_flow(dp, (const struct nlattr __force __user *)uf.key, uf.key_len);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ error = compat_answer_query(dp, flow, 0, ufp);
+ flow_deferred_free(flow);
+ return error;
+}
+
+static int compat_query_flows(struct datapath *dp,
+ struct compat_odp_flow __user *flows,
+ u32 n_flows)
+{
+ struct tbl *table = get_table_protected(dp);
+ u32 i;
+
+ for (i = 0; i < n_flows; i++) {
+ struct compat_odp_flow __user *ufp = &flows[i];
+ struct odp_flow uf;
+ struct tbl_node *flow_node;
+ struct sw_flow_key key;
+ int error;
+
+ if (compat_get_flow(&uf, ufp))
+ return -EFAULT;
+
+ error = flow_copy_from_user(&key, (const struct nlattr __force __user *) uf.key, uf.key_len);
+ if (error)
+ return error;
+
+ flow_node = tbl_lookup(table, &key, flow_hash(&key), flow_cmp);
+ if (!flow_node)
+ error = put_user(ENOENT, &ufp->stats.error);
+ else
+ error = compat_answer_query(dp, flow_cast(flow_node),
+ uf.flags, ufp);
+ if (error)
+ return -EFAULT;
+ }
+ return n_flows;
+}
+
+static int compat_dump_flow(struct datapath *dp, struct compat_odp_flow_dump __user *udumpp)
+{
+ struct compat_odp_flow __user *uflowp;
+ compat_uptr_t compat_ufp;
+ struct sw_flow *flow;
+ compat_uptr_t ukey;
+ u32 key_len;
+
+ flow = do_dump_flow(dp, udumpp->state);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ if (get_user(compat_ufp, &udumpp->flow))
+ return -EFAULT;
+ uflowp = compat_ptr(compat_ufp);
+
+ if (!flow)
+ return put_user(ODPFF_EOF, &uflowp->flags);
+
+ if (put_user(0, &uflowp->flags) ||
+ get_user(ukey, &uflowp->key) ||
+ get_user(key_len, &uflowp->key_len))
+ return -EFAULT;
+
+ key_len = flow_copy_to_user(compat_ptr(ukey), &flow->key, key_len);
+ if (key_len < 0)
+ return key_len;
+ if (put_user(key_len, &uflowp->key_len))
+ return -EFAULT;
+
+ return compat_answer_query(dp, flow, 0, uflowp);
+}
+
+static int compat_flowvec_ioctl(struct datapath *dp, unsigned long argp,
+ int (*function)(struct datapath *,
+ struct compat_odp_flow __user *,
+ u32 n_flows))
+{
+ struct compat_odp_flowvec __user *uflowvec;
+ struct compat_odp_flow __user *flows;
+ struct compat_odp_flowvec flowvec;
+ int retval;
+
+ uflowvec = compat_ptr(argp);
+ if (!access_ok(VERIFY_WRITE, uflowvec, sizeof(*uflowvec)) ||
+ copy_from_user(&flowvec, uflowvec, sizeof(flowvec)))
+ return -EFAULT;
+
+ if (flowvec.n_flows > INT_MAX / sizeof(struct compat_odp_flow))
+ return -EINVAL;
+
+ flows = compat_ptr(flowvec.flows);
+ if (!access_ok(VERIFY_WRITE, flows,
+ flowvec.n_flows * sizeof(struct compat_odp_flow)))
+ return -EFAULT;
+
+ retval = function(dp, flows, flowvec.n_flows);
+ return (retval < 0 ? retval
+ : retval == flowvec.n_flows ? 0
+ : put_user(retval, &uflowvec->n_flows));
+}
+
+static int compat_execute(struct datapath *dp, const struct compat_odp_execute __user *uexecute)
+{
+ struct odp_execute execute;
+ compat_uptr_t actions;
+ compat_uptr_t data;
+
+ if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_odp_execute)) ||
+ __get_user(actions, &uexecute->actions) ||
+ __get_user(execute.actions_len, &uexecute->actions_len) ||
+ __get_user(data, &uexecute->data) ||
+ __get_user(execute.length, &uexecute->length))
+ return -EFAULT;
+
+ execute.actions = (struct nlattr __force *)compat_ptr(actions);
+ execute.data = (const void __force *)compat_ptr(data);
+
+ return do_execute(dp, &execute);
+}
+
+static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned long argp)
+{
+ int dp_idx = iminor(f->f_dentry->d_inode);
+ struct datapath *dp;
+ int err;
+
+ switch (cmd) {
+ case ODP_DP_DESTROY:
+ case ODP_FLOW_FLUSH:
+ /* Ioctls that don't need any translation at all. */
+ return openvswitch_ioctl(f, cmd, argp);
+
+ case ODP_DP_CREATE:
+ case ODP_VPORT_NEW:
+ case ODP_VPORT_DEL:
+ case ODP_VPORT_GET:
+ case ODP_VPORT_SET:
+ case ODP_VPORT_DUMP:
+ case ODP_DP_STATS:
+ case ODP_GET_DROP_FRAGS:
+ case ODP_SET_DROP_FRAGS:
+ case ODP_SET_LISTEN_MASK:
+ case ODP_GET_LISTEN_MASK:
+ case ODP_SET_SFLOW_PROBABILITY:
+ case ODP_GET_SFLOW_PROBABILITY:
+ /* Ioctls that just need their pointer argument extended. */
+ return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
+ }
+
+ dp = get_dp_locked(dp_idx);
+ err = -ENODEV;
+ if (!dp)
+ goto exit;
+
+ switch (cmd) {
+ case ODP_FLOW_PUT32:
+ err = compat_put_flow(dp, compat_ptr(argp));
+ break;
+
+ case ODP_FLOW_DEL32:
+ err = compat_del_flow(dp, compat_ptr(argp));
+ break;
+
+ case ODP_FLOW_GET32:
+ err = compat_flowvec_ioctl(dp, argp, compat_query_flows);
+ break;
+
+ case ODP_FLOW_DUMP32:
+ err = compat_dump_flow(dp, compat_ptr(argp));
+ break;
+
+ case ODP_EXECUTE32:
+ err = compat_execute(dp, compat_ptr(argp));
+ break;
+
+ default:
+ err = -ENOIOCTLCMD;
+ break;
+ }
+ mutex_unlock(&dp->mutex);
+exit:
+ return err;
+}
+#endif
+
+static ssize_t openvswitch_read(struct file *f, char __user *buf,
+ size_t nbytes, loff_t *ppos)
{
- /* XXX is there sufficient synchronization here? */
int listeners = get_listen_mask(f);
int dp_idx = iminor(f->f_dentry->d_inode);
- struct datapath *dp = get_dp(dp_idx);
+ struct datapath *dp = get_dp_locked(dp_idx);
struct sk_buff *skb;
- struct iovec __user iov;
- size_t copy_bytes;
+ struct iovec iov;
int retval;
if (!dp)
}
}
success:
- copy_bytes = min_t(size_t, skb->len, nbytes);
+ mutex_unlock(&dp->mutex);
+
iov.iov_base = buf;
- iov.iov_len = copy_bytes;
+ iov.iov_len = min_t(size_t, skb->len, nbytes);
retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
if (!retval)
- retval = copy_bytes;
+ retval = skb->len;
+
kfree_skb(skb);
+ return retval;
error:
+ mutex_unlock(&dp->mutex);
return retval;
}
static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
{
- /* XXX is there sufficient synchronization here? */
int dp_idx = iminor(file->f_dentry->d_inode);
- struct datapath *dp = get_dp(dp_idx);
+ struct datapath *dp = get_dp_locked(dp_idx);
unsigned int mask;
if (dp) {
poll_wait(file, &dp->waitqueue, wait);
if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
mask |= POLLIN | POLLRDNORM;
+ mutex_unlock(&dp->mutex);
} else {
mask = POLLIN | POLLRDNORM | POLLHUP;
}
return mask;
}
-struct file_operations openvswitch_fops = {
- /* XXX .aio_read = openvswitch_aio_read, */
+static struct file_operations openvswitch_fops = {
+ .owner = THIS_MODULE,
.read = openvswitch_read,
.poll = openvswitch_poll,
.unlocked_ioctl = openvswitch_ioctl,
- /* XXX .fasync = openvswitch_fasync, */
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = openvswitch_compat_ioctl,
+#endif
};
static int major;