/*
- * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
+ * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
* Distributed under the terms of the GNU GPL version 2.
*
* Significant portions of this file may be copied from parts of the Linux
#include <linux/compat.h>
#include "openvswitch/datapath-protocol.h"
+#include "checksum.h"
#include "datapath.h"
#include "actions.h"
#include "flow.h"
#include "table.h"
#include "vport-internal_dev.h"
-#include "compat.h"
-
int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
EXPORT_SYMBOL(dp_ioctl_hook);
* dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
* lock first.
*
- * It is safe to access the datapath and dp_port structures with just
+ * It is safe to access the datapath and vport structures with just
* dp_mutex.
*/
-static struct datapath *dps[ODP_MAX];
+static struct datapath __rcu *dps[ODP_MAX];
static DEFINE_MUTEX(dp_mutex);
-static int new_dp_port(struct datapath *, struct odp_port *, int port_no);
+static int new_vport(struct datapath *, struct odp_port *, int port_no);
/* Must be called with rcu_read_lock or dp_mutex. */
struct datapath *get_dp(int dp_idx)
{
if (dp_idx < 0 || dp_idx >= ODP_MAX)
return NULL;
- return rcu_dereference(dps[dp_idx]);
+ return rcu_dereference_check(dps[dp_idx], rcu_read_lock_held() ||
+ lockdep_is_held(&dp_mutex));
}
EXPORT_SYMBOL_GPL(get_dp);
return dp;
}
+static struct tbl *get_table_protected(struct datapath *dp)
+{
+ return rcu_dereference_protected(dp->table,
+ lockdep_is_held(&dp->mutex));
+}
+
+static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
+{
+ return rcu_dereference_protected(dp->ports[port_no],
+ lockdep_is_held(&dp->mutex));
+}
+
/* Must be called with rcu_read_lock or RTNL lock. */
const char *dp_name(const struct datapath *dp)
{
- return vport_get_name(dp->ports[ODPP_LOCAL]->vport);
+ return vport_get_name(rcu_dereference_rtnl(dp->ports[ODPP_LOCAL]));
}
static inline size_t br_nlmsg_size(void)
}
static int dp_fill_ifinfo(struct sk_buff *skb,
- const struct dp_port *port,
+ const struct vport *port,
int event, unsigned int flags)
{
- const struct datapath *dp = port->dp;
- int ifindex = vport_get_ifindex(port->vport);
- int iflink = vport_get_iflink(port->vport);
+ struct datapath *dp = port->dp;
+ int ifindex = vport_get_ifindex(port);
+ int iflink = vport_get_iflink(port);
struct ifinfomsg *hdr;
struct nlmsghdr *nlh;
hdr->__ifi_pad = 0;
hdr->ifi_type = ARPHRD_ETHER;
hdr->ifi_index = ifindex;
- hdr->ifi_flags = vport_get_flags(port->vport);
+ hdr->ifi_flags = vport_get_flags(port);
hdr->ifi_change = 0;
- NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port->vport));
- NLA_PUT_U32(skb, IFLA_MASTER, vport_get_ifindex(dp->ports[ODPP_LOCAL]->vport));
- NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port->vport));
+ NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
+ NLA_PUT_U32(skb, IFLA_MASTER,
+ vport_get_ifindex(get_vport_protected(dp, ODPP_LOCAL)));
+ NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
#ifdef IFLA_OPERSTATE
NLA_PUT_U8(skb, IFLA_OPERSTATE,
- vport_is_running(port->vport)
- ? vport_get_operstate(port->vport)
+ vport_is_running(port)
+ ? vport_get_operstate(port)
: IF_OPER_DOWN);
#endif
- NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN,
- vport_get_addr(port->vport));
+ NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
if (ifindex != iflink)
NLA_PUT_U32(skb, IFLA_LINK,iflink);
return -EMSGSIZE;
}
-static void dp_ifinfo_notify(int event, struct dp_port *port)
+static void dp_ifinfo_notify(int event, struct vport *port)
{
struct sk_buff *skb;
int err = -ENOBUFS;
goto err;
}
} else {
- snprintf(devname, sizeof devname, "of%d", dp_idx);
+ snprintf(devname, sizeof(devname), "of%d", dp_idx);
}
rtnl_lock();
goto err_put_module;
err = -ENOMEM;
- dp = kzalloc(sizeof *dp, GFP_KERNEL);
+ dp = kzalloc(sizeof(*dp), GFP_KERNEL);
if (dp == NULL)
goto err_put_module;
INIT_LIST_HEAD(&dp->port_list);
mutex_init(&dp->mutex);
+ mutex_lock(&dp->mutex);
dp->dp_idx = dp_idx;
for (i = 0; i < DP_N_QUEUES; i++)
skb_queue_head_init(&dp->queues[i]);
/* Allocate table. */
err = -ENOMEM;
- rcu_assign_pointer(dp->table, tbl_create(0));
+ rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
if (!dp->table)
goto err_free_dp;
/* Set up our datapath device. */
BUILD_BUG_ON(sizeof(internal_dev_port.devname) != sizeof(devname));
strcpy(internal_dev_port.devname, devname);
- internal_dev_port.flags = ODP_PORT_INTERNAL;
- err = new_dp_port(dp, &internal_dev_port, ODPP_LOCAL);
+ strcpy(internal_dev_port.type, "internal");
+ err = new_vport(dp, &internal_dev_port, ODPP_LOCAL);
if (err) {
if (err == -EBUSY)
err = -EEXIST;
dp->drop_frags = 0;
dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
- if (!dp->stats_percpu)
+ if (!dp->stats_percpu) {
+ err = -ENOMEM;
goto err_destroy_local_port;
+ }
rcu_assign_pointer(dps[dp_idx], dp);
+ dp_sysfs_add_dp(dp);
+
+ mutex_unlock(&dp->mutex);
mutex_unlock(&dp_mutex);
rtnl_unlock();
- dp_sysfs_add_dp(dp);
-
return 0;
err_destroy_local_port:
- dp_detach_port(dp->ports[ODPP_LOCAL], 1);
+ dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
err_destroy_table:
- tbl_destroy(dp->table, NULL);
+ tbl_destroy(get_table_protected(dp), NULL);
err_free_dp:
+ mutex_unlock(&dp->mutex);
kfree(dp);
err_put_module:
module_put(THIS_MODULE);
return err;
}
-static void do_destroy_dp(struct datapath *dp)
+static void destroy_dp_rcu(struct rcu_head *rcu)
{
- struct dp_port *p, *n;
+ struct datapath *dp = container_of(rcu, struct datapath, rcu);
int i;
- list_for_each_entry_safe (p, n, &dp->port_list, node)
- if (p->port_no != ODPP_LOCAL)
- dp_detach_port(p, 1);
-
- dp_sysfs_del_dp(dp);
-
- rcu_assign_pointer(dps[dp->dp_idx], NULL);
-
- dp_detach_port(dp->ports[ODPP_LOCAL], 1);
-
- tbl_destroy(dp->table, flow_free_tbl);
-
for (i = 0; i < DP_N_QUEUES; i++)
skb_queue_purge(&dp->queues[i]);
+
+ tbl_destroy((struct tbl __force *)dp->table, flow_free_tbl);
free_percpu(dp->stats_percpu);
kobject_put(&dp->ifobj);
- module_put(THIS_MODULE);
}
static int destroy_dp(int dp_idx)
{
struct datapath *dp;
- int err;
+ int err = 0;
+ struct vport *p, *n;
rtnl_lock();
mutex_lock(&dp_mutex);
dp = get_dp(dp_idx);
- err = -ENODEV;
- if (!dp)
- goto err_unlock;
+ if (!dp) {
+ err = -ENODEV;
+ goto out;
+ }
- do_destroy_dp(dp);
- err = 0;
+ mutex_lock(&dp->mutex);
-err_unlock:
+ list_for_each_entry_safe (p, n, &dp->port_list, node)
+ if (p->port_no != ODPP_LOCAL)
+ dp_detach_port(p);
+
+ dp_sysfs_del_dp(dp);
+ rcu_assign_pointer(dps[dp->dp_idx], NULL);
+ dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
+
+ mutex_unlock(&dp->mutex);
+ call_rcu(&dp->rcu, destroy_dp_rcu);
+ module_put(THIS_MODULE);
+
+out:
mutex_unlock(&dp_mutex);
rtnl_unlock();
return err;
}
-static void release_dp_port(struct kobject *kobj)
-{
- struct dp_port *p = container_of(kobj, struct dp_port, kobj);
- kfree(p);
-}
-
-static struct kobj_type brport_ktype = {
-#ifdef CONFIG_SYSFS
- .sysfs_ops = &brport_sysfs_ops,
-#endif
- .release = release_dp_port
-};
-
-/* Called with RTNL lock and dp_mutex. */
-static int new_dp_port(struct datapath *dp, struct odp_port *odp_port, int port_no)
+/* Called with RTNL lock and dp->mutex. */
+static int new_vport(struct datapath *dp, struct odp_port *odp_port, int port_no)
{
+ struct vport_parms parms;
struct vport *vport;
- struct dp_port *p;
- int err;
-
- vport = vport_locate(odp_port->devname);
- if (!vport) {
- struct vport_parms parms;
-
- parms.name = odp_port->devname;
- parms.type = odp_port->flags & ODP_PORT_INTERNAL ? "internal" : "netdev";
- parms.config = NULL;
-
- vport_lock();
- vport = vport_add(&parms);
- vport_unlock();
- if (IS_ERR(vport))
- return PTR_ERR(vport);
- }
+ parms.name = odp_port->devname;
+ parms.type = odp_port->type;
+ parms.config = odp_port->config;
+ parms.dp = dp;
+ parms.port_no = port_no;
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
+ vport_lock();
+ vport = vport_add(&parms);
+ vport_unlock();
- p->port_no = port_no;
- p->dp = dp;
- p->vport = vport;
- atomic_set(&p->sflow_pool, 0);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
- err = vport_attach(vport, p);
- if (err) {
- kfree(p);
- return err;
- }
-
- rcu_assign_pointer(dp->ports[port_no], p);
- list_add_rcu(&p->node, &dp->port_list);
+ rcu_assign_pointer(dp->ports[port_no], vport);
+ list_add_rcu(&vport->node, &dp->port_list);
dp->n_ports++;
- /* Initialize kobject for bridge. This will be added as
- * /sys/class/net/<devname>/brport later, if sysfs is enabled. */
- p->kobj.kset = NULL;
- kobject_init(&p->kobj, &brport_ktype);
-
- dp_ifinfo_notify(RTM_NEWLINK, p);
+ dp_ifinfo_notify(RTM_NEWLINK, vport);
return 0;
}
int err;
err = -EFAULT;
- if (copy_from_user(&port, portp, sizeof port))
+ if (copy_from_user(&port, portp, sizeof(port)))
goto out;
port.devname[IFNAMSIZ - 1] = '\0';
+ port.type[VPORT_TYPE_SIZE - 1] = '\0';
rtnl_lock();
dp = get_dp_locked(dp_idx);
goto out_unlock_dp;
got_port_no:
- err = new_dp_port(dp, &port, port_no);
+ err = new_vport(dp, &port, port_no);
if (err)
goto out_unlock_dp;
set_internal_devs_mtu(dp);
- dp_sysfs_add_if(dp->ports[port_no]);
+ dp_sysfs_add_if(get_vport_protected(dp, port_no));
err = put_user(port_no, &portp->port);
return err;
}
-int dp_detach_port(struct dp_port *p, int may_delete)
+int dp_detach_port(struct vport *p)
{
- struct vport *vport = p->vport;
int err;
ASSERT_RTNL();
list_del_rcu(&p->node);
rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
- err = vport_detach(vport);
- if (err)
- return err;
-
- /* Then wait until no one is still using it, and destroy it. */
- synchronize_rcu();
-
- if (may_delete) {
- const char *port_type = vport_get_type(vport);
-
- if (!strcmp(port_type, "netdev") || !strcmp(port_type, "internal")) {
- vport_lock();
- vport_del(vport);
- vport_unlock();
- }
- }
+ /* Then destroy it. */
+ vport_lock();
+ err = vport_del(p);
+ vport_unlock();
- kobject_put(&p->kobj);
-
- return 0;
+ return err;
}
static int detach_port(int dp_idx, int port_no)
{
- struct dp_port *p;
+ struct vport *p;
struct datapath *dp;
int err;
if (!dp)
goto out_unlock_rtnl;
- p = dp->ports[port_no];
+ p = get_vport_protected(dp, port_no);
err = -ENOENT;
if (!p)
goto out_unlock_dp;
- err = dp_detach_port(p, 1);
+ err = dp_detach_port(p);
out_unlock_dp:
mutex_unlock(&dp->mutex);
}
/* Must be called with rcu_read_lock. */
-void dp_process_received_packet(struct dp_port *p, struct sk_buff *skb)
+void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
{
struct datapath *dp = p->dp;
struct dp_stats_percpu *stats;
struct loop_counter *loop;
int error;
- OVS_CB(skb)->dp_port = p;
+ OVS_CB(skb)->vport = p;
if (!OVS_CB(skb)->flow) {
struct odp_flow_key key;
flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
flow_hash(&key), flow_cmp);
if (unlikely(!flow_node)) {
- dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
+ dp_output_control(dp, skb, _ODPL_MISS_NR,
+ (__force u64)OVS_CB(skb)->tun_id);
stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
goto out;
}
OVS_CB(skb)->flow = flow_cast(flow_node);
}
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
flow_used(OVS_CB(skb)->flow, skb);
acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
loop->looping = true;
if (unlikely(loop->looping)) {
loop_suppress(dp, acts);
+ kfree_skb(skb);
goto out_loop;
}
/* Execute actions. */
execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
- acts->n_actions);
- stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
+ acts->actions_len);
/* Check whether sub-actions looped too much. */
if (unlikely(loop->looping))
local_bh_enable();
}
-#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
-/* This code is based on skb_checksum_setup() from Xen's net/dev/core.c. We
- * can't call this function directly because it isn't exported in all
- * versions. */
-int vswitch_skb_checksum_setup(struct sk_buff *skb)
-{
- struct iphdr *iph;
- unsigned char *th;
- int err = -EPROTO;
- __u16 csum_start, csum_offset;
-
- if (!skb->proto_csum_blank)
- return 0;
-
- if (skb->protocol != htons(ETH_P_IP))
- goto out;
-
- if (!pskb_may_pull(skb, skb_network_header(skb) + sizeof(struct iphdr) - skb->data))
- goto out;
-
- iph = ip_hdr(skb);
- th = skb_network_header(skb) + 4 * iph->ihl;
-
- csum_start = th - skb->head;
- switch (iph->protocol) {
- case IPPROTO_TCP:
- csum_offset = offsetof(struct tcphdr, check);
- break;
- case IPPROTO_UDP:
- csum_offset = offsetof(struct udphdr, check);
- break;
- default:
- if (net_ratelimit())
- pr_err("Attempting to checksum a non-TCP/UDP packet, "
- "dropping a protocol %d packet",
- iph->protocol);
- goto out;
- }
-
- if (!pskb_may_pull(skb, th + csum_offset + 2 - skb->data))
- goto out;
-
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb->proto_csum_blank = 0;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
- skb->csum_start = csum_start;
- skb->csum_offset = csum_offset;
-#else
- skb_set_transport_header(skb, csum_start - skb_headroom(skb));
- skb->csum = csum_offset;
-#endif
-
- err = 0;
-
-out:
- return err;
-}
-#endif /* CONFIG_XEN && HAVE_PROTO_DATA_VALID */
-
- /* Types of checksums that we can receive (these all refer to L4 checksums):
- * 1. CHECKSUM_NONE: Device that did not compute checksum, contains full
- * (though not verified) checksum in packet but not in skb->csum. Packets
- * from the bridge local port will also have this type.
- * 2. CHECKSUM_COMPLETE (CHECKSUM_HW): Good device that computes checksums,
- * also the GRE module. This is the same as CHECKSUM_NONE, except it has
- * a valid skb->csum. Importantly, both contain a full checksum (not
- * verified) in the packet itself. The only difference is that if the
- * packet gets to L4 processing on this machine (not in DomU) we won't
- * have to recompute the checksum to verify. Most hardware devices do not
- * produce packets with this type, even if they support receive checksum
- * offloading (they produce type #5).
- * 3. CHECKSUM_PARTIAL (CHECKSUM_HW): Packet without full checksum and needs to
- * be computed if it is sent off box. Unfortunately on earlier kernels,
- * this case is impossible to distinguish from #2, despite having opposite
- * meanings. Xen adds an extra field on earlier kernels (see #4) in order
- * to distinguish the different states.
- * 4. CHECKSUM_UNNECESSARY (with proto_csum_blank true): This packet was
- * generated locally by a Xen DomU and has a partial checksum. If it is
- * handled on this machine (Dom0 or DomU), then the checksum will not be
- * computed. If it goes off box, the checksum in the packet needs to be
- * completed. Calling skb_checksum_setup converts this to CHECKSUM_HW
- * (CHECKSUM_PARTIAL) so that the checksum can be completed. In later
- * kernels, this combination is replaced with CHECKSUM_PARTIAL.
- * 5. CHECKSUM_UNNECESSARY (with proto_csum_blank false): Packet with a correct
- * full checksum or using a protocol without a checksum. skb->csum is
- * undefined. This is common from devices with receive checksum
- * offloading. This is somewhat similar to CHECKSUM_NONE, except that
- * nobody will try to verify the checksum with CHECKSUM_UNNECESSARY.
- *
- * Note that on earlier kernels, CHECKSUM_COMPLETE and CHECKSUM_PARTIAL are
- * both defined as CHECKSUM_HW. Normally the meaning of CHECKSUM_HW is clear
- * based on whether it is on the transmit or receive path. After the datapath
- * it will be intepreted as CHECKSUM_PARTIAL. If the packet already has a
- * checksum, we will panic. Since we can receive packets with checksums, we
- * assume that all CHECKSUM_HW packets have checksums and map them to
- * CHECKSUM_NONE, which has a similar meaning (the it is only different if the
- * packet is processed by the local IP stack, in which case it will need to
- * be reverified). If we receive a packet with CHECKSUM_HW that really means
- * CHECKSUM_PARTIAL, it will be sent with the wrong checksum. However, there
- * shouldn't be any devices that do this with bridging. */
-void compute_ip_summed(struct sk_buff *skb, bool xmit)
-{
- /* For our convenience these defines change repeatedly between kernel
- * versions, so we can't just copy them over... */
- switch (skb->ip_summed) {
- case CHECKSUM_NONE:
- OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
- break;
- case CHECKSUM_UNNECESSARY:
- OVS_CB(skb)->ip_summed = OVS_CSUM_UNNECESSARY;
- break;
-#ifdef CHECKSUM_HW
- /* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE.
- * However, on the receive side we should only get CHECKSUM_PARTIAL
- * packets from Xen, which uses some special fields to represent this
- * (see below). Since we can only make one type work, pick the one
- * that actually happens in practice.
- *
- * On the transmit side (basically after skb_checksum_setup()
- * has been run or on internal dev transmit), packets with
- * CHECKSUM_COMPLETE aren't generated, so assume CHECKSUM_PARTIAL. */
- case CHECKSUM_HW:
- if (!xmit)
- OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
- else
- OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
-
- break;
-#else
- case CHECKSUM_COMPLETE:
- OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
- break;
- case CHECKSUM_PARTIAL:
- OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
- break;
-#endif
- default:
- pr_err("unknown checksum type %d\n", skb->ip_summed);
- /* None seems the safest... */
- OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
- }
-
-#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
- /* Xen has a special way of representing CHECKSUM_PARTIAL on older
- * kernels. It should not be set on the transmit path though. */
- if (skb->proto_csum_blank)
- OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
-
- WARN_ON_ONCE(skb->proto_csum_blank && xmit);
-#endif
-}
-
-/* This function closely resembles skb_forward_csum() used by the bridge. It
- * is slightly different because we are only concerned with bridging and not
- * other types of forwarding and can get away with slightly more optimal
- * behavior.*/
-void forward_ip_summed(struct sk_buff *skb)
-{
-#ifdef CHECKSUM_HW
- if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
- skb->ip_summed = CHECKSUM_NONE;
-#endif
-}
-
/* Append each packet in 'skb' list to 'queue'. There will be only one packet
* unless we broke up a GSO packet. */
static int queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
- int queue_no, u32 arg)
+ int queue_no, u64 arg)
{
struct sk_buff *nskb;
int port_no;
int err;
- if (OVS_CB(skb)->dp_port)
- port_no = OVS_CB(skb)->dp_port->port_no;
+ if (OVS_CB(skb)->vport)
+ port_no = OVS_CB(skb)->vport->port_no;
else
port_no = ODPP_LOCAL;
nskb = skb->next;
skb->next = NULL;
- err = skb_cow(skb, sizeof *header);
+ err = skb_cow(skb, sizeof(*header));
if (err)
goto err_kfree_skbs;
- header = (struct odp_msg*)__skb_push(skb, sizeof *header);
+ header = (struct odp_msg*)__skb_push(skb, sizeof(*header));
header->type = queue_no;
header->length = skb->len;
header->port = port_no;
- header->reserved = 0;
header->arg = arg;
skb_queue_tail(queue, skb);
}
int dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
- u32 arg)
+ u64 arg)
{
struct dp_stats_percpu *stats;
struct sk_buff_head *queue;
* userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
if (skb_is_gso(skb)) {
struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
- if (nskb) {
- kfree_skb(skb);
- skb = nskb;
- if (unlikely(IS_ERR(skb))) {
- err = PTR_ERR(skb);
- goto err;
- }
- } else {
- /* XXX This case might not be possible. It's hard to
- * tell from the skb_gso_segment() code and comment. */
+
+ kfree_skb(skb);
+ skb = nskb;
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ goto err;
}
}
static int flush_flows(struct datapath *dp)
{
- struct tbl *old_table = rcu_dereference(dp->table);
+ struct tbl *old_table = get_table_protected(dp);
struct tbl *new_table;
- new_table = tbl_create(0);
+ new_table = tbl_create(TBL_MIN_BUCKETS);
if (!new_table)
return -ENOMEM;
return 0;
}
-static int validate_actions(const struct sw_flow_actions *actions)
-{
- unsigned int i;
+static int validate_actions(const struct nlattr *actions, u32 actions_len)
+{
+ const struct nlattr *a;
+ int rem;
+
+ nla_for_each_attr(a, actions, actions_len, rem) {
+ static const u32 action_lens[ODPAT_MAX + 1] = {
+ [ODPAT_OUTPUT] = 4,
+ [ODPAT_CONTROLLER] = 8,
+ [ODPAT_SET_DL_TCI] = 2,
+ [ODPAT_STRIP_VLAN] = 0,
+ [ODPAT_SET_DL_SRC] = ETH_ALEN,
+ [ODPAT_SET_DL_DST] = ETH_ALEN,
+ [ODPAT_SET_NW_SRC] = 4,
+ [ODPAT_SET_NW_DST] = 4,
+ [ODPAT_SET_NW_TOS] = 1,
+ [ODPAT_SET_TP_SRC] = 2,
+ [ODPAT_SET_TP_DST] = 2,
+ [ODPAT_SET_TUNNEL] = 8,
+ [ODPAT_SET_PRIORITY] = 4,
+ [ODPAT_POP_PRIORITY] = 0,
+ [ODPAT_DROP_SPOOFED_ARP] = 0,
+ };
+ int type = nla_type(a);
+
+ if (type > ODPAT_MAX || nla_len(a) != action_lens[type])
+ return -EINVAL;
- for (i = 0; i < actions->n_actions; i++) {
- const union odp_action *a = &actions->actions[i];
+ switch (type) {
+ case ODPAT_UNSPEC:
+ return -EINVAL;
- switch (a->type) {
case ODPAT_CONTROLLER:
case ODPAT_STRIP_VLAN:
case ODPAT_SET_DL_SRC:
break;
case ODPAT_OUTPUT:
- if (a->output.port >= DP_MAX_PORTS)
+ if (nla_get_u32(a) >= DP_MAX_PORTS)
return -EINVAL;
break;
case ODPAT_SET_DL_TCI:
- if (a->dl_tci.tci & htons(VLAN_CFI_MASK))
+ if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
return -EINVAL;
break;
case ODPAT_SET_NW_TOS:
- if (a->nw_tos.nw_tos & INET_ECN_MASK)
+ if (nla_get_u8(a) & INET_ECN_MASK)
return -EINVAL;
break;
}
}
+ if (rem > 0)
+ return -EINVAL;
+
return 0;
}
struct sw_flow_actions *actions;
int error;
- actions = flow_actions_alloc(flow->n_actions);
+ actions = flow_actions_alloc(flow->actions_len);
error = PTR_ERR(actions);
if (IS_ERR(actions))
goto error;
error = -EFAULT;
- if (copy_from_user(actions->actions, flow->actions,
- flow->n_actions * sizeof(union odp_action)))
+ if (copy_from_user(actions->actions,
+ (struct nlattr __user __force *)flow->actions,
+ flow->actions_len))
goto error_free_actions;
- error = validate_actions(actions);
+ error = validate_actions(actions->actions, actions->actions_len);
if (error)
goto error_free_actions;
static int expand_table(struct datapath *dp)
{
- struct tbl *old_table = rcu_dereference(dp->table);
+ struct tbl *old_table = get_table_protected(dp);
struct tbl *new_table;
new_table = tbl_expand(old_table);
struct tbl_node *flow_node;
struct sw_flow *flow;
struct tbl *table;
+ struct sw_flow_actions *acts = NULL;
int error;
+ u32 hash;
- table = rcu_dereference(dp->table);
- flow_node = tbl_lookup(table, &uf->flow.key, flow_hash(&uf->flow.key), flow_cmp);
+ hash = flow_hash(&uf->flow.key);
+ table = get_table_protected(dp);
+ flow_node = tbl_lookup(table, &uf->flow.key, hash, flow_cmp);
if (!flow_node) {
/* No such flow. */
- struct sw_flow_actions *acts;
-
error = -ENOENT;
if (!(uf->flags & ODPPF_CREATE))
goto error;
error = expand_table(dp);
if (error)
goto error;
- table = rcu_dereference(dp->table);
+ table = get_table_protected(dp);
}
/* Allocate flow. */
rcu_assign_pointer(flow->sf_acts, acts);
/* Put flow in bucket. */
- error = tbl_insert(table, &flow->tbl_node, flow_hash(&flow->key));
+ error = tbl_insert(table, &flow->tbl_node, hash);
if (error)
goto error_free_flow_acts;
error = PTR_ERR(new_acts);
if (IS_ERR(new_acts))
goto error;
- old_acts = rcu_dereference(flow->sf_acts);
- if (old_acts->n_actions != new_acts->n_actions ||
+
+ old_acts = rcu_dereference_protected(flow->sf_acts,
+ lockdep_is_held(&dp->mutex));
+ if (old_acts->actions_len != new_acts->actions_len ||
memcmp(old_acts->actions, new_acts->actions,
- sizeof(union odp_action) * old_acts->n_actions)) {
+ old_acts->actions_len)) {
rcu_assign_pointer(flow->sf_acts, new_acts);
flow_deferred_free_acts(old_acts);
} else {
return 0;
error_free_flow_acts:
- kfree(flow->sf_acts);
+ kfree(acts);
error_free_flow:
flow->sf_acts = NULL;
flow_put(flow);
return 0;
}
-static int do_answer_query(struct sw_flow *flow, u32 query_flags,
+static int do_answer_query(struct datapath *dp, struct sw_flow *flow,
+ u32 query_flags,
struct odp_flow_stats __user *ustats,
- union odp_action __user *actions,
- u32 __user *n_actionsp)
+ struct nlattr __user *actions,
+ u32 __user *actions_lenp)
{
struct sw_flow_actions *sf_acts;
struct odp_flow_stats stats;
- u32 n_actions;
+ u32 actions_len;
spin_lock_bh(&flow->lock);
get_stats(flow, &stats);
spin_unlock_bh(&flow->lock);
if (copy_to_user(ustats, &stats, sizeof(struct odp_flow_stats)) ||
- get_user(n_actions, n_actionsp))
+ get_user(actions_len, actions_lenp))
return -EFAULT;
- if (!n_actions)
+ if (!actions_len)
return 0;
- sf_acts = rcu_dereference(flow->sf_acts);
- if (put_user(sf_acts->n_actions, n_actionsp) ||
+ sf_acts = rcu_dereference_protected(flow->sf_acts,
+ lockdep_is_held(&dp->mutex));
+ if (put_user(sf_acts->actions_len, actions_lenp) ||
(actions && copy_to_user(actions, sf_acts->actions,
- sizeof(union odp_action) *
- min(sf_acts->n_actions, n_actions))))
+ min(sf_acts->actions_len, actions_len))))
return -EFAULT;
return 0;
}
-static int answer_query(struct sw_flow *flow, u32 query_flags,
- struct odp_flow __user *ufp)
+static int answer_query(struct datapath *dp, struct sw_flow *flow,
+ u32 query_flags, struct odp_flow __user *ufp)
{
- union odp_action *actions;
+ struct nlattr __user *actions;
- if (get_user(actions, &ufp->actions))
+ if (get_user(actions, (struct nlattr __user * __user *)&ufp->actions))
return -EFAULT;
- return do_answer_query(flow, query_flags,
- &ufp->stats, actions, &ufp->n_actions);
+ return do_answer_query(dp, flow, query_flags,
+ &ufp->stats, actions, &ufp->actions_len);
}
static struct sw_flow *do_del_flow(struct datapath *dp, struct odp_flow_key *key)
{
- struct tbl *table = rcu_dereference(dp->table);
+ struct tbl *table = get_table_protected(dp);
struct tbl_node *flow_node;
int error;
struct odp_flow uf;
int error;
- if (copy_from_user(&uf, ufp, sizeof uf))
+ if (copy_from_user(&uf, ufp, sizeof(uf)))
return -EFAULT;
flow = do_del_flow(dp, &uf.key);
if (IS_ERR(flow))
return PTR_ERR(flow);
- error = answer_query(flow, 0, ufp);
+ error = answer_query(dp, flow, 0, ufp);
flow_deferred_free(flow);
return error;
}
static int do_query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
{
- struct tbl *table = rcu_dereference(dp->table);
+ struct tbl *table = get_table_protected(dp);
u32 i;
for (i = 0; i < flowvec->n_flows; i++) {
- struct odp_flow __user *ufp = &flowvec->flows[i];
+ struct odp_flow __user *ufp = (struct odp_flow __user __force *)&flowvec->flows[i];
struct odp_flow uf;
struct tbl_node *flow_node;
int error;
- if (copy_from_user(&uf, ufp, sizeof uf))
+ if (copy_from_user(&uf, ufp, sizeof(uf)))
return -EFAULT;
flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
if (!flow_node)
error = put_user(ENOENT, &ufp->stats.error);
else
- error = answer_query(flow_cast(flow_node), uf.flags, ufp);
+ error = answer_query(dp, flow_cast(flow_node), uf.flags, ufp);
if (error)
return -EFAULT;
}
return flowvec->n_flows;
}
-struct list_flows_cbdata {
- struct odp_flow __user *uflows;
- u32 n_flows;
- u32 listed_flows;
-};
-
-static int list_flow(struct tbl_node *node, void *cbdata_)
-{
- struct sw_flow *flow = flow_cast(node);
- struct list_flows_cbdata *cbdata = cbdata_;
- struct odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
- int error;
-
- if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
- return -EFAULT;
- error = answer_query(flow, 0, ufp);
- if (error)
- return error;
-
- if (cbdata->listed_flows >= cbdata->n_flows)
- return cbdata->listed_flows;
- return 0;
-}
-
-static int do_list_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
-{
- struct list_flows_cbdata cbdata;
- int error;
-
- if (!flowvec->n_flows)
- return 0;
-
- cbdata.uflows = flowvec->flows;
- cbdata.n_flows = flowvec->n_flows;
- cbdata.listed_flows = 0;
-
- error = tbl_foreach(rcu_dereference(dp->table), list_flow, &cbdata);
- return error ? error : cbdata.listed_flows;
-}
-
static int do_flowvec_ioctl(struct datapath *dp, unsigned long argp,
int (*function)(struct datapath *,
const struct odp_flowvec *))
int retval;
uflowvec = (struct odp_flowvec __user *)argp;
- if (copy_from_user(&flowvec, uflowvec, sizeof flowvec))
+ if (copy_from_user(&flowvec, uflowvec, sizeof(flowvec)))
return -EFAULT;
if (flowvec.n_flows > INT_MAX / sizeof(struct odp_flow))
: put_user(retval, &uflowvec->n_flows));
}
+static struct sw_flow *do_dump_flow(struct datapath *dp, u32 __user *state)
+{
+ struct tbl *table = get_table_protected(dp);
+ struct tbl_node *tbl_node;
+ u32 bucket, obj;
+
+ if (get_user(bucket, &state[0]) || get_user(obj, &state[1]))
+ return ERR_PTR(-EFAULT);
+
+ tbl_node = tbl_next(table, &bucket, &obj);
+
+ if (put_user(bucket, &state[0]) || put_user(obj, &state[1]))
+ return ERR_PTR(-EFAULT);
+
+ return tbl_node ? flow_cast(tbl_node) : NULL;
+}
+
+static int dump_flow(struct datapath *dp, struct odp_flow_dump __user *udumpp)
+{
+ struct odp_flow __user *uflowp;
+ struct sw_flow *flow;
+
+ flow = do_dump_flow(dp, udumpp->state);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ if (get_user(uflowp, (struct odp_flow __user *__user*)&udumpp->flow))
+ return -EFAULT;
+
+ if (!flow)
+ return put_user(ODPFF_EOF, &uflowp->flags);
+
+ if (copy_to_user(&uflowp->key, &flow->key, sizeof(struct odp_flow_key)) ||
+ put_user(0, &uflowp->flags))
+ return -EFAULT;
+ return answer_query(dp, flow, 0, uflowp);
+}
+
static int do_execute(struct datapath *dp, const struct odp_execute *execute)
{
struct odp_flow_key key;
if (execute->length < ETH_HLEN || execute->length > 65535)
goto error;
- actions = flow_actions_alloc(execute->n_actions);
+ actions = flow_actions_alloc(execute->actions_len);
if (IS_ERR(actions)) {
err = PTR_ERR(actions);
goto error;
}
err = -EFAULT;
- if (copy_from_user(actions->actions, execute->actions,
- execute->n_actions * sizeof *execute->actions))
+ if (copy_from_user(actions->actions,
+ (struct nlattr __user __force *)execute->actions, execute->actions_len))
goto error_free_actions;
- err = validate_actions(actions);
+ err = validate_actions(actions->actions, execute->actions_len);
if (err)
goto error_free_actions;
goto error_free_actions;
err = -EFAULT;
- if (copy_from_user(skb_put(skb, execute->length), execute->data,
+ if (copy_from_user(skb_put(skb, execute->length),
+ (const void __user __force *)execute->data,
execute->length))
goto error_free_skb;
goto error_free_skb;
rcu_read_lock();
- err = execute_actions(dp, skb, &key, actions->actions, actions->n_actions);
+ err = execute_actions(dp, skb, &key, actions->actions, actions->actions_len);
rcu_read_unlock();
kfree(actions);
{
struct odp_execute execute;
- if (copy_from_user(&execute, executep, sizeof execute))
+ if (copy_from_user(&execute, executep, sizeof(execute)))
return -EFAULT;
return do_execute(dp, &execute);
static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
{
- struct tbl *table = rcu_dereference(dp->table);
+ struct tbl *table = get_table_protected(dp);
struct odp_stats stats;
int i;
}
stats.max_miss_queue = DP_MAX_QUEUE_LEN;
stats.max_action_queue = DP_MAX_QUEUE_LEN;
- return copy_to_user(statsp, &stats, sizeof stats) ? -EFAULT : 0;
+ return copy_to_user(statsp, &stats, sizeof(stats)) ? -EFAULT : 0;
}
/* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
int dp_min_mtu(const struct datapath *dp)
{
- struct dp_port *p;
+ struct vport *p;
int mtu = 0;
ASSERT_RTNL();
/* Skip any internal ports, since that's what we're trying to
* set. */
- if (is_internal_vport(p->vport))
+ if (is_internal_vport(p))
continue;
- dev_mtu = vport_get_mtu(p->vport);
+ dev_mtu = vport_get_mtu(p);
if (!mtu || dev_mtu < mtu)
mtu = dev_mtu;
}
* be called with RTNL lock. */
void set_internal_devs_mtu(const struct datapath *dp)
{
- struct dp_port *p;
+ struct vport *p;
int mtu;
ASSERT_RTNL();
mtu = dp_min_mtu(dp);
list_for_each_entry_rcu (p, &dp->port_list, node) {
- if (is_internal_vport(p->vport))
- vport_set_mtu(p->vport, mtu);
+ if (is_internal_vport(p))
+ vport_set_mtu(p, mtu);
}
}
-static int put_port(const struct dp_port *p, struct odp_port __user *uop)
+static int put_port(const struct vport *p, struct odp_port __user *uop)
{
struct odp_port op;
- memset(&op, 0, sizeof op);
+ memset(&op, 0, sizeof(op));
rcu_read_lock();
- strncpy(op.devname, vport_get_name(p->vport), sizeof op.devname);
+ strncpy(op.devname, vport_get_name(p), sizeof(op.devname));
+ strncpy(op.type, vport_get_type(p), sizeof(op.type));
+ vport_get_config(p, op.config);
rcu_read_unlock();
op.port = p->port_no;
- op.flags = is_internal_vport(p->vport) ? ODP_PORT_INTERNAL : 0;
- return copy_to_user(uop, &op, sizeof op) ? -EFAULT : 0;
+ return copy_to_user(uop, &op, sizeof(op)) ? -EFAULT : 0;
}
static int query_port(struct datapath *dp, struct odp_port __user *uport)
{
struct odp_port port;
+ struct vport *vport;
- if (copy_from_user(&port, uport, sizeof port))
+ if (copy_from_user(&port, uport, sizeof(port)))
return -EFAULT;
if (port.devname[0]) {
- struct vport *vport;
- struct dp_port *dp_port;
- int err = 0;
-
port.devname[IFNAMSIZ - 1] = '\0';
vport_lock();
- rcu_read_lock();
-
vport = vport_locate(port.devname);
- if (!vport) {
- err = -ENODEV;
- goto error_unlock;
- }
-
- dp_port = vport_get_dp_port(vport);
- if (!dp_port || dp_port->dp != dp) {
- err = -ENOENT;
- goto error_unlock;
- }
-
- port.port = dp_port->port_no;
-
-error_unlock:
- rcu_read_unlock();
vport_unlock();
- if (err)
- return err;
+ if (!vport)
+ return -ENODEV;
+ if (vport->dp != dp)
+ return -ENOENT;
} else {
if (port.port >= DP_MAX_PORTS)
return -EINVAL;
- if (!dp->ports[port.port])
+
+ vport = get_vport_protected(dp, port.port);
+ if (!vport)
return -ENOENT;
}
- return put_port(dp->ports[port.port], uport);
+ return put_port(vport, uport);
}
static int do_list_ports(struct datapath *dp, struct odp_port __user *uports,
{
int idx = 0;
if (n_ports) {
- struct dp_port *p;
+ struct vport *p;
list_for_each_entry_rcu (p, &dp->port_list, node) {
if (put_port(p, &uports[idx]))
struct odp_portvec pv;
int retval;
- if (copy_from_user(&pv, upv, sizeof pv))
+ if (copy_from_user(&pv, upv, sizeof(pv)))
return -EFAULT;
- retval = do_list_ports(dp, pv.ports, pv.n_ports);
+ retval = do_list_ports(dp, (struct odp_port __user __force *)pv.ports,
+ pv.n_ports);
if (retval < 0)
return retval;
err = destroy_dp(dp_idx);
goto exit;
- case ODP_PORT_ATTACH:
+ case ODP_VPORT_ATTACH:
err = attach_port(dp_idx, (struct odp_port __user *)argp);
goto exit;
- case ODP_PORT_DETACH:
+ case ODP_VPORT_DETACH:
err = get_user(port_no, (int __user *)argp);
if (!err)
err = detach_port(dp_idx, port_no);
goto exit;
- case ODP_VPORT_ADD:
- err = vport_user_add((struct odp_vport_add __user *)argp);
- goto exit;
-
case ODP_VPORT_MOD:
- err = vport_user_mod((struct odp_vport_mod __user *)argp);
- goto exit;
-
- case ODP_VPORT_DEL:
- err = vport_user_del((char __user *)argp);
+ err = vport_user_mod((struct odp_port __user *)argp);
goto exit;
case ODP_VPORT_STATS_GET:
dp->sflow_probability = sflow_probability;
break;
- case ODP_PORT_QUERY:
+ case ODP_VPORT_QUERY:
err = query_port(dp, (struct odp_port __user *)argp);
break;
- case ODP_PORT_LIST:
+ case ODP_VPORT_LIST:
err = list_ports(dp, (struct odp_portvec __user *)argp);
break;
err = do_flowvec_ioctl(dp, argp, do_query_flows);
break;
- case ODP_FLOW_LIST:
- err = do_flowvec_ioctl(dp, argp, do_list_flows);
+ case ODP_FLOW_DUMP:
+ err = dump_flow(dp, (struct odp_flow_dump __user *)argp);
break;
case ODP_EXECUTE:
struct compat_odp_portvec pv;
int retval;
- if (copy_from_user(&pv, upv, sizeof pv))
+ if (copy_from_user(&pv, upv, sizeof(pv)))
return -EFAULT;
retval = do_list_ports(dp, compat_ptr(pv.ports), pv.n_ports);
__copy_from_user(&flow->stats, &compat->stats, sizeof(struct odp_flow_stats)) ||
__copy_from_user(&flow->key, &compat->key, sizeof(struct odp_flow_key)) ||
__get_user(actions, &compat->actions) ||
- __get_user(flow->n_actions, &compat->n_actions) ||
+ __get_user(flow->actions_len, &compat->actions_len) ||
__get_user(flow->flags, &compat->flags))
return -EFAULT;
- flow->actions = compat_ptr(actions);
+ flow->actions = (struct nlattr __force *)compat_ptr(actions);
return 0;
}
return 0;
}
-static int compat_answer_query(struct sw_flow *flow, u32 query_flags,
+static int compat_answer_query(struct datapath *dp, struct sw_flow *flow,
+ u32 query_flags,
struct compat_odp_flow __user *ufp)
{
compat_uptr_t actions;
if (get_user(actions, &ufp->actions))
return -EFAULT;
- return do_answer_query(flow, query_flags, &ufp->stats,
- compat_ptr(actions), &ufp->n_actions);
+ return do_answer_query(dp, flow, query_flags, &ufp->stats,
+ compat_ptr(actions), &ufp->actions_len);
}
static int compat_del_flow(struct datapath *dp, struct compat_odp_flow __user *ufp)
if (IS_ERR(flow))
return PTR_ERR(flow);
- error = compat_answer_query(flow, 0, ufp);
+ error = compat_answer_query(dp, flow, 0, ufp);
flow_deferred_free(flow);
return error;
}
-static int compat_query_flows(struct datapath *dp, struct compat_odp_flow *flows, u32 n_flows)
+static int compat_query_flows(struct datapath *dp,
+ struct compat_odp_flow __user *flows,
+ u32 n_flows)
{
- struct tbl *table = rcu_dereference(dp->table);
+ struct tbl *table = get_table_protected(dp);
u32 i;
for (i = 0; i < n_flows; i++) {
if (!flow_node)
error = put_user(ENOENT, &ufp->stats.error);
else
- error = compat_answer_query(flow_cast(flow_node), uf.flags, ufp);
+ error = compat_answer_query(dp, flow_cast(flow_node),
+ uf.flags, ufp);
if (error)
return -EFAULT;
}
return n_flows;
}
-struct compat_list_flows_cbdata {
- struct compat_odp_flow __user *uflows;
- u32 n_flows;
- u32 listed_flows;
-};
-
-static int compat_list_flow(struct tbl_node *node, void *cbdata_)
+static int compat_dump_flow(struct datapath *dp, struct compat_odp_flow_dump __user *udumpp)
{
- struct sw_flow *flow = flow_cast(node);
- struct compat_list_flows_cbdata *cbdata = cbdata_;
- struct compat_odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
- int error;
-
- if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
- return -EFAULT;
- error = compat_answer_query(flow, 0, ufp);
- if (error)
- return error;
-
- if (cbdata->listed_flows >= cbdata->n_flows)
- return cbdata->listed_flows;
- return 0;
-}
+ struct compat_odp_flow __user *uflowp;
+ compat_uptr_t compat_ufp;
+ struct sw_flow *flow;
-static int compat_list_flows(struct datapath *dp, struct compat_odp_flow *flows, u32 n_flows)
-{
- struct compat_list_flows_cbdata cbdata;
- int error;
+ flow = do_dump_flow(dp, udumpp->state);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
- if (!n_flows)
- return 0;
+ if (get_user(compat_ufp, &udumpp->flow))
+ return -EFAULT;
+ uflowp = compat_ptr(compat_ufp);
- cbdata.uflows = flows;
- cbdata.n_flows = n_flows;
- cbdata.listed_flows = 0;
+ if (!flow)
+ return put_user(ODPFF_EOF, &uflowp->flags);
- error = tbl_foreach(rcu_dereference(dp->table), compat_list_flow, &cbdata);
- return error ? error : cbdata.listed_flows;
+ if (copy_to_user(&uflowp->key, &flow->key, sizeof(struct odp_flow_key)) ||
+ put_user(0, &uflowp->flags))
+ return -EFAULT;
+ return compat_answer_query(dp, flow, 0, uflowp);
}
static int compat_flowvec_ioctl(struct datapath *dp, unsigned long argp,
int (*function)(struct datapath *,
- struct compat_odp_flow *,
+ struct compat_odp_flow __user *,
u32 n_flows))
{
struct compat_odp_flowvec __user *uflowvec;
int retval;
uflowvec = compat_ptr(argp);
- if (!access_ok(VERIFY_WRITE, uflowvec, sizeof *uflowvec) ||
- copy_from_user(&flowvec, uflowvec, sizeof flowvec))
+ if (!access_ok(VERIFY_WRITE, uflowvec, sizeof(*uflowvec)) ||
+ copy_from_user(&flowvec, uflowvec, sizeof(flowvec)))
return -EFAULT;
if (flowvec.n_flows > INT_MAX / sizeof(struct compat_odp_flow))
if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_odp_execute)) ||
__get_user(actions, &uexecute->actions) ||
- __get_user(execute.n_actions, &uexecute->n_actions) ||
+ __get_user(execute.actions_len, &uexecute->actions_len) ||
__get_user(data, &uexecute->data) ||
__get_user(execute.length, &uexecute->length))
return -EFAULT;
- execute.actions = compat_ptr(actions);
- execute.data = compat_ptr(data);
+ execute.actions = (struct nlattr __force *)compat_ptr(actions);
+ execute.data = (const void __force *)compat_ptr(data);
return do_execute(dp, &execute);
}
return openvswitch_ioctl(f, cmd, argp);
case ODP_DP_CREATE:
- case ODP_PORT_ATTACH:
- case ODP_PORT_DETACH:
- case ODP_VPORT_DEL:
+ case ODP_VPORT_ATTACH:
+ case ODP_VPORT_DETACH:
+ case ODP_VPORT_MOD:
case ODP_VPORT_MTU_SET:
case ODP_VPORT_MTU_GET:
case ODP_VPORT_ETHER_SET:
case ODP_GET_LISTEN_MASK:
case ODP_SET_SFLOW_PROBABILITY:
case ODP_GET_SFLOW_PROBABILITY:
- case ODP_PORT_QUERY:
+ case ODP_VPORT_QUERY:
/* Ioctls that just need their pointer argument extended. */
return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
-
- case ODP_VPORT_ADD32:
- return compat_vport_user_add(compat_ptr(argp));
-
- case ODP_VPORT_MOD32:
- return compat_vport_user_mod(compat_ptr(argp));
}
dp = get_dp_locked(dp_idx);
goto exit;
switch (cmd) {
- case ODP_PORT_LIST32:
+ case ODP_VPORT_LIST32:
err = compat_list_ports(dp, compat_ptr(argp));
break;
err = compat_flowvec_ioctl(dp, argp, compat_query_flows);
break;
- case ODP_FLOW_LIST32:
- err = compat_flowvec_ioctl(dp, argp, compat_list_flows);
+ case ODP_FLOW_DUMP32:
+ err = compat_dump_flow(dp, compat_ptr(argp));
break;
case ODP_EXECUTE32:
return -EFAULT;
}
-ssize_t openvswitch_read(struct file *f, char __user *buf, size_t nbytes,
- loff_t *ppos)
+static ssize_t openvswitch_read(struct file *f, char __user *buf,
+ size_t nbytes, loff_t *ppos)
{
- /* XXX is there sufficient synchronization here? */
int listeners = get_listen_mask(f);
int dp_idx = iminor(f->f_dentry->d_inode);
- struct datapath *dp = get_dp(dp_idx);
+ struct datapath *dp = get_dp_locked(dp_idx);
struct sk_buff *skb;
size_t copy_bytes, tot_copy_bytes;
int retval;
}
}
success:
+ mutex_unlock(&dp->mutex);
+
copy_bytes = tot_copy_bytes = min_t(size_t, skb->len, nbytes);
retval = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (copy_bytes == skb->len) {
__wsum csum = 0;
- unsigned int csum_start, csum_offset;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
- csum_start = skb->csum_start - skb_headroom(skb);
- csum_offset = skb->csum_offset;
-#else
- csum_start = skb_transport_header(skb) - skb->data;
- csum_offset = skb->csum;
-#endif
+ u16 csum_start, csum_offset;
+
+ get_skb_csum_pointers(skb, &csum_start, &csum_offset);
+ csum_start -= skb_headroom(skb);
+
BUG_ON(csum_start >= skb_headlen(skb));
retval = skb_copy_and_csum_datagram(skb, csum_start, buf + csum_start,
copy_bytes - csum_start, &csum);
copy_bytes = csum_start;
csump = (__sum16 __user *)(buf + csum_start + csum_offset);
- BUG_ON((char *)csump + sizeof(__sum16) > buf + nbytes);
+ BUG_ON((char __user *)csump + sizeof(__sum16) >
+ buf + nbytes);
put_user(csum_fold(csum), csump);
}
} else
}
if (!retval) {
- struct iovec __user iov;
+ struct iovec iov;
iov.iov_base = buf;
iov.iov_len = copy_bytes;
retval = tot_copy_bytes;
kfree_skb(skb);
+ return retval;
error:
+ mutex_unlock(&dp->mutex);
return retval;
}
static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
{
- /* XXX is there sufficient synchronization here? */
int dp_idx = iminor(file->f_dentry->d_inode);
- struct datapath *dp = get_dp(dp_idx);
+ struct datapath *dp = get_dp_locked(dp_idx);
unsigned int mask;
if (dp) {
poll_wait(file, &dp->waitqueue, wait);
if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
mask |= POLLIN | POLLRDNORM;
+ mutex_unlock(&dp->mutex);
} else {
mask = POLLIN | POLLRDNORM | POLLHUP;
}
return mask;
}
-struct file_operations openvswitch_fops = {
- /* XXX .aio_read = openvswitch_aio_read, */
+static struct file_operations openvswitch_fops = {
+ .owner = THIS_MODULE,
.read = openvswitch_read,
.poll = openvswitch_poll,
.unlocked_ioctl = openvswitch_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = openvswitch_compat_ioctl,
#endif
- /* XXX .fasync = openvswitch_fasync, */
};
static int major;