/* Functions for managing the dp interface/device. */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/udp.h>
#include <linux/version.h>
#include <linux/ethtool.h>
-#include <linux/random.h>
#include <linux/wait.h>
#include <asm/system.h>
#include <asm/div64.h>
#include <asm/bug.h>
+#include <linux/highmem.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <linux/inetdevice.h>
#include <linux/list.h>
#include <linux/rculist.h>
-#include <linux/workqueue.h>
#include <linux/dmi.h>
#include <net/inet_ecn.h>
+#include <linux/compat.h>
#include "openvswitch/datapath-protocol.h"
#include "datapath.h"
#include "actions.h"
#include "flow.h"
+#include "odp-compat.h"
+#include "table.h"
#include "vport-internal_dev.h"
#include "compat.h"
static struct datapath *dps[ODP_MAX];
static DEFINE_MUTEX(dp_mutex);
-/* Number of milliseconds between runs of the maintenance thread. */
-#define MAINT_SLEEP_MSECS 1000
+/* We limit the number of times that we pass into dp_process_received_packet()
+ * to avoid blowing out the stack in the event that we have a loop. */
+struct loop_counter {
+ int count; /* Count. */
+ bool looping; /* Loop detected? */
+};
+
+#define DP_MAX_LOOPS 5
+
+/* We use a separate counter for each CPU for both interrupt and non-interrupt
+ * context in order to keep the limit deterministic for a given packet. */
+struct percpu_loop_counters {
+ struct loop_counter counters[2];
+};
+
+static DEFINE_PER_CPU(struct percpu_loop_counters, dp_loop_counters);
static int new_dp_port(struct datapath *, struct odp_port *, int port_no);
int i;
if (devnamep) {
- err = -EFAULT;
- if (strncpy_from_user(devname, devnamep, IFNAMSIZ - 1) < 0)
+ int retval = strncpy_from_user(devname, devnamep, IFNAMSIZ);
+ if (retval < 0) {
+ err = -EFAULT;
+ goto err;
+ } else if (retval >= IFNAMSIZ) {
+ err = -ENAMETOOLONG;
goto err;
- devname[IFNAMSIZ - 1] = '\0';
+ }
} else {
snprintf(devname, sizeof devname, "of%d", dp_idx);
}
/* Allocate table. */
err = -ENOMEM;
- rcu_assign_pointer(dp->table, dp_table_create(DP_L1_SIZE));
+ rcu_assign_pointer(dp->table, tbl_create(0));
if (!dp->table)
goto err_free_dp;
/* Set up our datapath device. */
- strncpy(internal_dev_port.devname, devname, IFNAMSIZ - 1);
+ BUILD_BUG_ON(sizeof(internal_dev_port.devname) != sizeof(devname));
+ strcpy(internal_dev_port.devname, devname);
internal_dev_port.flags = ODP_PORT_INTERNAL;
err = new_dp_port(dp, &internal_dev_port, ODPP_LOCAL);
if (err) {
err_destroy_local_port:
dp_detach_port(dp->ports[ODPP_LOCAL], 1);
err_destroy_table:
- dp_table_destroy(dp->table, 0);
+ tbl_destroy(dp->table, NULL);
err_free_dp:
kfree(dp);
err_put_module:
dp_detach_port(dp->ports[ODPP_LOCAL], 1);
- dp_table_destroy(dp->table, 1);
+ tbl_destroy(dp->table, flow_free_tbl);
for (i = 0; i < DP_N_QUEUES; i++)
skb_queue_purge(&dp->queues[i]);
- for (i = 0; i < DP_MAX_GROUPS; i++)
- kfree(dp->groups[i]);
free_percpu(dp->stats_percpu);
kobject_put(&dp->ifobj);
module_put(THIS_MODULE);
vport_lock();
if (odp_port->flags & ODP_PORT_INTERNAL)
- vport = __vport_add(odp_port->devname, "internal", NULL);
+ vport = vport_add(odp_port->devname, "internal", NULL);
else
- vport = __vport_add(odp_port->devname, "netdev", NULL);
+ vport = vport_add(odp_port->devname, "netdev", NULL);
vport_unlock();
p->port_no = port_no;
p->dp = dp;
+ p->vport = vport;
atomic_set(&p->sflow_pool, 0);
err = vport_attach(vport, p);
if (err)
goto out_unlock_dp;
- if (!(port.flags & ODP_PORT_INTERNAL))
- set_internal_devs_mtu(dp);
+ set_internal_devs_mtu(dp);
dp_sysfs_add_if(dp->ports[port_no]);
- err = __put_user(port_no, &portp->port);
+ err = put_user(port_no, &portp->port);
out_unlock_dp:
mutex_unlock(&dp->mutex);
if (!strcmp(port_type, "netdev") || !strcmp(port_type, "internal")) {
vport_lock();
- __vport_del(vport);
+ vport_del(vport);
vport_unlock();
}
}
return err;
}
-/* Must be called with rcu_read_lock and with bottom-halves disabled. */
+static void suppress_loop(struct datapath *dp, struct sw_flow_actions *actions)
+{
+ if (net_ratelimit())
+ pr_warn("%s: flow looped %d times, dropping\n",
+ dp_name(dp), DP_MAX_LOOPS);
+ actions->n_actions = 0;
+}
+
+/* Must be called with rcu_read_lock. */
void dp_process_received_packet(struct dp_port *p, struct sk_buff *skb)
{
struct datapath *dp = p->dp;
struct dp_stats_percpu *stats;
- struct odp_flow_key key;
- struct sw_flow *flow;
-
- WARN_ON_ONCE(skb_shared(skb));
- skb_warn_if_lro(skb);
+ int stats_counter_off;
+ struct sw_flow_actions *acts;
+ struct loop_counter *loop;
+ int error;
OVS_CB(skb)->dp_port = p;
- compute_ip_summed(skb, false);
- /* BHs are off so we don't have to use get_cpu()/put_cpu() here. */
- stats = percpu_ptr(dp->stats_percpu, smp_processor_id());
+ if (!OVS_CB(skb)->flow) {
+ struct odp_flow_key key;
+ struct tbl_node *flow_node;
+ bool is_frag;
- if (flow_extract(skb, p ? p->port_no : ODPP_NONE, &key)) {
- if (dp->drop_frags) {
+ /* Extract flow from 'skb' into 'key'. */
+ error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key, &is_frag);
+ if (unlikely(error)) {
kfree_skb(skb);
- stats->n_frags++;
return;
}
- }
- flow = dp_table_lookup(rcu_dereference(dp->table), &key);
- if (flow) {
- struct sw_flow_actions *acts = rcu_dereference(flow->sf_acts);
- flow_used(flow, skb);
- execute_actions(dp, skb, &key, acts->actions, acts->n_actions,
- GFP_ATOMIC);
- stats->n_hit++;
- } else {
- stats->n_missed++;
- dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
+ if (is_frag && dp->drop_frags) {
+ kfree_skb(skb);
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
+ goto out;
+ }
+
+ /* Look up flow. */
+ flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
+ flow_hash(&key), flow_cmp);
+ if (unlikely(!flow_node)) {
+ dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
+ goto out;
+ }
+
+ OVS_CB(skb)->flow = flow_cast(flow_node);
}
-}
-#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
-/* This code is based on a skb_checksum_setup from net/dev/core.c from a
- * combination of Lenny's 2.6.26 Xen kernel and Xen's
- * linux-2.6.18-92.1.10.el5.xs5.0.0.394.644. We can't call this function
- * directly because it isn't exported in all versions. */
-static int skb_pull_up_to(struct sk_buff *skb, void *ptr)
-{
- if (ptr < (void *)skb->tail)
- return 1;
- if (__pskb_pull_tail(skb,
- ptr - (void *)skb->data - skb_headlen(skb))) {
- return 1;
- } else {
- return 0;
+ flow_used(OVS_CB(skb)->flow, skb);
+
+ acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
+
+ /* Check whether we've looped too much. */
+ loop = &get_cpu_var(dp_loop_counters).counters[!!in_interrupt()];
+ if (unlikely(++loop->count > DP_MAX_LOOPS))
+ loop->looping = true;
+ if (unlikely(loop->looping)) {
+ suppress_loop(dp, acts);
+ goto out_loop;
}
+
+ /* Execute actions. */
+ execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
+ acts->n_actions);
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
+
+ /* Check whether sub-actions looped too much. */
+ if (unlikely(loop->looping))
+ suppress_loop(dp, acts);
+
+out_loop:
+ /* Decrement loop counter. */
+ if (!--loop->count)
+ loop->looping = false;
+ put_cpu_var(dp_loop_counters);
+
+out:
+ /* Update datapath statistics. */
+ local_bh_disable();
+ stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+
+ write_seqcount_begin(&stats->seqlock);
+ (*(u64 *)((u8 *)stats + stats_counter_off))++;
+ write_seqcount_end(&stats->seqlock);
+
+ local_bh_enable();
}
+#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
+/* This code is based on skb_checksum_setup() from Xen's net/dev/core.c. We
+ * can't call this function directly because it isn't exported in all
+ * versions. */
int vswitch_skb_checksum_setup(struct sk_buff *skb)
{
struct iphdr *iph;
if (skb->protocol != htons(ETH_P_IP))
goto out;
- if (!skb_pull_up_to(skb, skb_network_header(skb) + sizeof(struct iphdr)))
+ if (!pskb_may_pull(skb, skb_network_header(skb) + sizeof(struct iphdr) - skb->data))
goto out;
iph = ip_hdr(skb);
break;
default:
if (net_ratelimit())
- printk(KERN_ERR "Attempting to checksum a non-"
- "TCP/UDP packet, dropping a protocol"
- " %d packet", iph->protocol);
+ pr_err("Attempting to checksum a non-TCP/UDP packet, "
+ "dropping a protocol %d packet",
+ iph->protocol);
goto out;
}
- if (!skb_pull_up_to(skb, th + csum_offset + 2))
+ if (!pskb_may_pull(skb, th + csum_offset + 2 - skb->data))
goto out;
skb->ip_summed = CHECKSUM_PARTIAL;
* be computed if it is sent off box. Unfortunately on earlier kernels,
* this case is impossible to distinguish from #2, despite having opposite
* meanings. Xen adds an extra field on earlier kernels (see #4) in order
- * to distinguish the different states. The only real user of this type
- * with bridging is Xen (on later kernels).
+ * to distinguish the different states.
* 4. CHECKSUM_UNNECESSARY (with proto_csum_blank true): This packet was
* generated locally by a Xen DomU and has a partial checksum. If it is
* handled on this machine (Dom0 or DomU), then the checksum will not be
* packet is processed by the local IP stack, in which case it will need to
* be reverified). If we receive a packet with CHECKSUM_HW that really means
* CHECKSUM_PARTIAL, it will be sent with the wrong checksum. However, there
- * shouldn't be any devices that do this with bridging.
- *
- * The bridge has similar behavior and this function closely resembles
- * skb_forward_csum(). It is slightly different because we are only concerned
- * with bridging and not other types of forwarding and can get away with
- * slightly more optimal behavior.*/
-void
-compute_ip_summed(struct sk_buff *skb, bool xmit)
+ * shouldn't be any devices that do this with bridging. */
+void compute_ip_summed(struct sk_buff *skb, bool xmit)
{
/* For our convenience these defines change repeatedly between kernel
* versions, so we can't just copy them over... */
break;
#ifdef CHECKSUM_HW
/* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE.
- * However, we should only get CHECKSUM_PARTIAL packets from Xen, which
- * uses some special fields to represent this (see below). Since we
- * can only make one type work, pick the one that actually happens in
- * practice.
+ * However, on the receive side we should only get CHECKSUM_PARTIAL
+ * packets from Xen, which uses some special fields to represent this
+ * (see below). Since we can only make one type work, pick the one
+ * that actually happens in practice.
*
- * The one exception to this is if we are on the transmit path
- * (basically after skb_checksum_setup() has been run) the type has
- * already been converted, so we should stay with that. */
+ * On the transmit side (basically after skb_checksum_setup()
+ * has been run or on internal dev transmit), packets with
+ * CHECKSUM_COMPLETE aren't generated, so assume CHECKSUM_PARTIAL. */
case CHECKSUM_HW:
if (!xmit)
OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
break;
#endif
default:
- printk(KERN_ERR "openvswitch: unknown checksum type %d\n",
- skb->ip_summed);
+ pr_err("unknown checksum type %d\n", skb->ip_summed);
/* None seems the safest... */
OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
- }
+ }
#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
/* Xen has a special way of representing CHECKSUM_PARTIAL on older
#endif
}
-void
-forward_ip_summed(struct sk_buff *skb)
+/* This function closely resembles skb_forward_csum() used by the bridge. It
+ * is slightly different because we are only concerned with bridging and not
+ * other types of forwarding and can get away with slightly more optimal
+ * behavior.*/
+void forward_ip_summed(struct sk_buff *skb)
{
#ifdef CHECKSUM_HW
if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
/* Append each packet in 'skb' list to 'queue'. There will be only one packet
* unless we broke up a GSO packet. */
-static int
-queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
- int queue_no, u32 arg)
+static int queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
+ int queue_no, u32 arg)
{
struct sk_buff *nskb;
int port_no;
nskb = skb->next;
skb->next = NULL;
- /* If a checksum-deferred packet is forwarded to the
- * controller, correct the pointers and checksum. This happens
- * on a regular basis only on Xen, on which VMs can pass up
- * packets that do not have their checksum computed.
- */
- err = vswitch_skb_checksum_setup(skb);
- if (err)
- goto err_kfree_skbs;
-#ifndef CHECKSUM_HW
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
- /* Until 2.6.22, the start of the transport header was
- * also the start of data to be checksummed. Linux
- * 2.6.22 introduced the csum_start field for this
- * purpose, but we should point the transport header to
- * it anyway for backward compatibility, as
- * dev_queue_xmit() does even in 2.6.28. */
- skb_set_transport_header(skb, skb->csum_start -
- skb_headroom(skb));
-#endif
- err = skb_checksum_help(skb);
- if (err)
- goto err_kfree_skbs;
- }
-#else
- if (skb->ip_summed == CHECKSUM_HW) {
- err = skb_checksum_help(skb, 0);
- if (err)
- goto err_kfree_skbs;
- }
-#endif
-
err = skb_cow(skb, sizeof *header);
if (err)
goto err_kfree_skbs;
return err;
}
-int
-dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
- u32 arg)
+int dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
+ u32 arg)
{
struct dp_stats_percpu *stats;
struct sk_buff_head *queue;
forward_ip_summed(skb);
+ err = vswitch_skb_checksum_setup(skb);
+ if (err)
+ goto err_kfree_skb;
+
/* Break apart GSO packets into their component pieces. Otherwise
* userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
if (skb_is_gso(skb)) {
- struct sk_buff *nskb = skb_gso_segment(skb, 0);
+ struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
if (nskb) {
kfree_skb(skb);
skb = nskb;
err_kfree_skb:
kfree_skb(skb);
err:
- stats = percpu_ptr(dp->stats_percpu, get_cpu());
+ local_bh_disable();
+ stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+
+ write_seqcount_begin(&stats->seqlock);
stats->n_lost++;
- put_cpu();
+ write_seqcount_end(&stats->seqlock);
+
+ local_bh_enable();
return err;
}
static int flush_flows(struct datapath *dp)
{
- dp->n_flows = 0;
- return dp_table_flush(dp);
+ struct tbl *old_table = rcu_dereference(dp->table);
+ struct tbl *new_table;
+
+ new_table = tbl_create(0);
+ if (!new_table)
+ return -ENOMEM;
+
+ rcu_assign_pointer(dp->table, new_table);
+
+ tbl_deferred_destroy(old_table, flow_free_tbl);
+
+ return 0;
}
static int validate_actions(const struct sw_flow_actions *actions)
for (i = 0; i < actions->n_actions; i++) {
const union odp_action *a = &actions->actions[i];
- switch (a->type) {
- case ODPAT_OUTPUT:
- if (a->output.port >= DP_MAX_PORTS)
- return -EINVAL;
- break;
- case ODPAT_OUTPUT_GROUP:
- if (a->output_group.group >= DP_MAX_GROUPS)
- return -EINVAL;
+ switch (a->type) {
+ case ODPAT_CONTROLLER:
+ case ODPAT_STRIP_VLAN:
+ case ODPAT_SET_DL_SRC:
+ case ODPAT_SET_DL_DST:
+ case ODPAT_SET_NW_SRC:
+ case ODPAT_SET_NW_DST:
+ case ODPAT_SET_TP_SRC:
+ case ODPAT_SET_TP_DST:
+ case ODPAT_SET_TUNNEL:
+ case ODPAT_SET_PRIORITY:
+ case ODPAT_POP_PRIORITY:
+ case ODPAT_DROP_SPOOFED_ARP:
+ /* No validation needed. */
break;
- case ODPAT_SET_VLAN_VID:
- if (a->vlan_vid.vlan_vid & htons(~VLAN_VID_MASK))
+ case ODPAT_OUTPUT:
+ if (a->output.port >= DP_MAX_PORTS)
return -EINVAL;
break;
- case ODPAT_SET_VLAN_PCP:
- if (a->vlan_pcp.vlan_pcp
- & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT))
+ case ODPAT_SET_DL_TCI:
+ if (a->dl_tci.tci & htons(VLAN_CFI_MASK))
return -EINVAL;
break;
break;
default:
- if (a->type >= ODPAT_N_ACTIONS)
- return -EOPNOTSUPP;
- break;
+ return -EOPNOTSUPP;
}
}
static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats)
{
- if (flow->used.tv_sec) {
- stats->used_sec = flow->used.tv_sec;
- stats->used_nsec = flow->used.tv_nsec;
+ if (flow->used) {
+ struct timespec offset_ts, used, now_mono;
+
+ ktime_get_ts(&now_mono);
+ jiffies_to_timespec(jiffies - flow->used, &offset_ts);
+ set_normalized_timespec(&used, now_mono.tv_sec - offset_ts.tv_sec,
+ now_mono.tv_nsec - offset_ts.tv_nsec);
+
+ stats->used_sec = used.tv_sec;
+ stats->used_nsec = used.tv_nsec;
} else {
stats->used_sec = 0;
stats->used_nsec = 0;
}
+
stats->n_packets = flow->packet_count;
stats->n_bytes = flow->byte_count;
- stats->ip_tos = flow->ip_tos;
+ stats->reserved = 0;
stats->tcp_flags = flow->tcp_flags;
stats->error = 0;
}
static void clear_stats(struct sw_flow *flow)
{
- flow->used.tv_sec = flow->used.tv_nsec = 0;
+ flow->used = 0;
flow->tcp_flags = 0;
- flow->ip_tos = 0;
flow->packet_count = 0;
flow->byte_count = 0;
}
-static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
+static int expand_table(struct datapath *dp)
{
- struct odp_flow_put uf;
+ struct tbl *old_table = rcu_dereference(dp->table);
+ struct tbl *new_table;
+
+ new_table = tbl_expand(old_table);
+ if (IS_ERR(new_table))
+ return PTR_ERR(new_table);
+
+ rcu_assign_pointer(dp->table, new_table);
+ tbl_deferred_destroy(old_table, NULL);
+
+ return 0;
+}
+
+static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf,
+ struct odp_flow_stats *stats)
+{
+ struct tbl_node *flow_node;
struct sw_flow *flow;
- struct dp_table *table;
- struct odp_flow_stats stats;
+ struct tbl *table;
int error;
- error = -EFAULT;
- if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put)))
- goto error;
- memset(uf.flow.key.reserved, 0, sizeof uf.flow.key.reserved);
-
table = rcu_dereference(dp->table);
- flow = dp_table_lookup(table, &uf.flow.key);
- if (!flow) {
+ flow_node = tbl_lookup(table, &uf->flow.key, flow_hash(&uf->flow.key), flow_cmp);
+ if (!flow_node) {
/* No such flow. */
struct sw_flow_actions *acts;
error = -ENOENT;
- if (!(uf.flags & ODPPF_CREATE))
+ if (!(uf->flags & ODPPF_CREATE))
goto error;
/* Expand table, if necessary, to make room. */
- if (dp->n_flows >= table->n_buckets) {
- error = -ENOSPC;
- if (table->n_buckets >= DP_MAX_BUCKETS)
- goto error;
-
- error = dp_table_expand(dp);
+ if (tbl_count(table) >= tbl_n_buckets(table)) {
+ error = expand_table(dp);
if (error)
goto error;
table = rcu_dereference(dp->table);
}
/* Allocate flow. */
- error = -ENOMEM;
- flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
- if (flow == NULL)
+ flow = flow_alloc();
+ if (IS_ERR(flow)) {
+ error = PTR_ERR(flow);
goto error;
- flow->key = uf.flow.key;
- spin_lock_init(&flow->lock);
+ }
+ flow->key = uf->flow.key;
clear_stats(flow);
/* Obtain actions. */
- acts = get_actions(&uf.flow);
+ acts = get_actions(&uf->flow);
error = PTR_ERR(acts);
if (IS_ERR(acts))
goto error_free_flow;
rcu_assign_pointer(flow->sf_acts, acts);
/* Put flow in bucket. */
- error = dp_table_insert(table, flow);
+ error = tbl_insert(table, &flow->tbl_node, flow_hash(&flow->key));
if (error)
goto error_free_flow_acts;
- dp->n_flows++;
- memset(&stats, 0, sizeof(struct odp_flow_stats));
+
+ memset(stats, 0, sizeof(struct odp_flow_stats));
} else {
/* We found a matching flow. */
struct sw_flow_actions *old_acts, *new_acts;
- unsigned long int flags;
+
+ flow = flow_cast(flow_node);
/* Bail out if we're not allowed to modify an existing flow. */
error = -EEXIST;
- if (!(uf.flags & ODPPF_MODIFY))
+ if (!(uf->flags & ODPPF_MODIFY))
goto error;
/* Swap actions. */
- new_acts = get_actions(&uf.flow);
+ new_acts = get_actions(&uf->flow);
error = PTR_ERR(new_acts);
if (IS_ERR(new_acts))
goto error;
}
/* Fetch stats, then clear them if necessary. */
- spin_lock_irqsave(&flow->lock, flags);
- get_stats(flow, &stats);
- if (uf.flags & ODPPF_ZERO_STATS)
+ spin_lock_bh(&flow->lock);
+ get_stats(flow, stats);
+ if (uf->flags & ODPPF_ZERO_STATS)
clear_stats(flow);
- spin_unlock_irqrestore(&flow->lock, flags);
+ spin_unlock_bh(&flow->lock);
}
- /* Copy stats to userspace. */
- if (__copy_to_user(&ufp->flow.stats, &stats,
- sizeof(struct odp_flow_stats)))
- return -EFAULT;
return 0;
error_free_flow_acts:
kfree(flow->sf_acts);
error_free_flow:
- kmem_cache_free(flow_cache, flow);
+ flow->sf_acts = NULL;
+ flow_put(flow);
error:
return error;
}
-static int put_actions(const struct sw_flow *flow, struct odp_flow __user *ufp)
+static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
+{
+ struct odp_flow_stats stats;
+ struct odp_flow_put uf;
+ int error;
+
+ if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put)))
+ return -EFAULT;
+
+ error = do_put_flow(dp, &uf, &stats);
+ if (error)
+ return error;
+
+ if (copy_to_user(&ufp->flow.stats, &stats,
+ sizeof(struct odp_flow_stats)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int do_answer_query(struct sw_flow *flow, u32 query_flags,
+ struct odp_flow_stats __user *ustats,
+ union odp_action __user *actions,
+ u32 __user *n_actionsp)
{
- union odp_action __user *actions;
struct sw_flow_actions *sf_acts;
+ struct odp_flow_stats stats;
u32 n_actions;
- if (__get_user(actions, &ufp->actions) ||
- __get_user(n_actions, &ufp->n_actions))
+ spin_lock_bh(&flow->lock);
+ get_stats(flow, &stats);
+ if (query_flags & ODPFF_ZERO_TCP_FLAGS)
+ flow->tcp_flags = 0;
+
+ spin_unlock_bh(&flow->lock);
+
+ if (copy_to_user(ustats, &stats, sizeof(struct odp_flow_stats)) ||
+ get_user(n_actions, n_actionsp))
return -EFAULT;
if (!n_actions)
return 0;
sf_acts = rcu_dereference(flow->sf_acts);
- if (__put_user(sf_acts->n_actions, &ufp->n_actions) ||
+ if (put_user(sf_acts->n_actions, n_actionsp) ||
(actions && copy_to_user(actions, sf_acts->actions,
sizeof(union odp_action) *
min(sf_acts->n_actions, n_actions))))
static int answer_query(struct sw_flow *flow, u32 query_flags,
struct odp_flow __user *ufp)
{
- struct odp_flow_stats stats;
- unsigned long int flags;
+ union odp_action *actions;
- spin_lock_irqsave(&flow->lock, flags);
- get_stats(flow, &stats);
+ if (get_user(actions, &ufp->actions))
+ return -EFAULT;
- if (query_flags & ODPFF_ZERO_TCP_FLAGS) {
- flow->tcp_flags = 0;
- }
- spin_unlock_irqrestore(&flow->lock, flags);
+ return do_answer_query(flow, query_flags,
+ &ufp->stats, actions, &ufp->n_actions);
+}
- if (__copy_to_user(&ufp->stats, &stats, sizeof(struct odp_flow_stats)))
- return -EFAULT;
- return put_actions(flow, ufp);
+static struct sw_flow *do_del_flow(struct datapath *dp, struct odp_flow_key *key)
+{
+ struct tbl *table = rcu_dereference(dp->table);
+ struct tbl_node *flow_node;
+ int error;
+
+ flow_node = tbl_lookup(table, key, flow_hash(key), flow_cmp);
+ if (!flow_node)
+ return ERR_PTR(-ENOENT);
+
+ error = tbl_remove(table, flow_node);
+ if (error)
+ return ERR_PTR(error);
+
+ /* XXX Returned flow_node's statistics might lose a few packets, since
+ * other CPUs can be using this flow. We used to synchronize_rcu() to
+ * make sure that we get completely accurate stats, but that blows our
+ * performance, badly. */
+ return flow_cast(flow_node);
}
static int del_flow(struct datapath *dp, struct odp_flow __user *ufp)
{
- struct dp_table *table = rcu_dereference(dp->table);
- struct odp_flow uf;
struct sw_flow *flow;
+ struct odp_flow uf;
int error;
- error = -EFAULT;
if (copy_from_user(&uf, ufp, sizeof uf))
- goto error;
- memset(uf.key.reserved, 0, sizeof uf.key.reserved);
-
- flow = dp_table_lookup(table, &uf.key);
- error = -ENOENT;
- if (!flow)
- goto error;
+ return -EFAULT;
- /* XXX redundant lookup */
- error = dp_table_delete(table, flow);
- if (error)
- goto error;
+ flow = do_del_flow(dp, &uf.key);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
- /* XXX These statistics might lose a few packets, since other CPUs can
- * be using this flow. We used to synchronize_rcu() to make sure that
- * we get completely accurate stats, but that blows our performance,
- * badly. */
- dp->n_flows--;
error = answer_query(flow, 0, ufp);
flow_deferred_free(flow);
-
-error:
return error;
}
-static int query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
+static int do_query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
{
- struct dp_table *table = rcu_dereference(dp->table);
- int i;
+ struct tbl *table = rcu_dereference(dp->table);
+ u32 i;
+
for (i = 0; i < flowvec->n_flows; i++) {
- struct __user odp_flow *ufp = &flowvec->flows[i];
+ struct odp_flow __user *ufp = &flowvec->flows[i];
struct odp_flow uf;
- struct sw_flow *flow;
+ struct tbl_node *flow_node;
int error;
- if (__copy_from_user(&uf, ufp, sizeof uf))
+ if (copy_from_user(&uf, ufp, sizeof uf))
return -EFAULT;
- memset(uf.key.reserved, 0, sizeof uf.key.reserved);
- flow = dp_table_lookup(table, &uf.key);
- if (!flow)
- error = __put_user(ENOENT, &ufp->stats.error);
+ flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
+ if (!flow_node)
+ error = put_user(ENOENT, &ufp->stats.error);
else
- error = answer_query(flow, uf.flags, ufp);
+ error = answer_query(flow_cast(flow_node), uf.flags, ufp);
if (error)
return -EFAULT;
}
struct list_flows_cbdata {
struct odp_flow __user *uflows;
- int n_flows;
- int listed_flows;
+ u32 n_flows;
+ u32 listed_flows;
};
-static int list_flow(struct sw_flow *flow, void *cbdata_)
+static int list_flow(struct tbl_node *node, void *cbdata_)
{
+ struct sw_flow *flow = flow_cast(node);
struct list_flows_cbdata *cbdata = cbdata_;
struct odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
int error;
- if (__copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
+ if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
return -EFAULT;
error = answer_query(flow, 0, ufp);
if (error)
return 0;
}
-static int list_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
+static int do_list_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
{
struct list_flows_cbdata cbdata;
int error;
cbdata.uflows = flowvec->flows;
cbdata.n_flows = flowvec->n_flows;
cbdata.listed_flows = 0;
- error = dp_table_foreach(rcu_dereference(dp->table),
- list_flow, &cbdata);
+
+ error = tbl_foreach(rcu_dereference(dp->table), list_flow, &cbdata);
return error ? error : cbdata.listed_flows;
}
int retval;
uflowvec = (struct odp_flowvec __user *)argp;
- if (!access_ok(VERIFY_WRITE, uflowvec, sizeof *uflowvec) ||
- copy_from_user(&flowvec, uflowvec, sizeof flowvec))
+ if (copy_from_user(&flowvec, uflowvec, sizeof flowvec))
return -EFAULT;
if (flowvec.n_flows > INT_MAX / sizeof(struct odp_flow))
return -EINVAL;
- if (!access_ok(VERIFY_WRITE, flowvec.flows,
- flowvec.n_flows * sizeof(struct odp_flow)))
- return -EFAULT;
-
retval = function(dp, &flowvec);
return (retval < 0 ? retval
: retval == flowvec.n_flows ? 0
- : __put_user(retval, &uflowvec->n_flows));
+ : put_user(retval, &uflowvec->n_flows));
}
-static int do_execute(struct datapath *dp, const struct odp_execute *executep)
+static int do_execute(struct datapath *dp, const struct odp_execute *execute)
{
- struct odp_execute execute;
struct odp_flow_key key;
struct sk_buff *skb;
struct sw_flow_actions *actions;
struct ethhdr *eth;
+ bool is_frag;
int err;
- err = -EFAULT;
- if (copy_from_user(&execute, executep, sizeof execute))
- goto error;
-
err = -EINVAL;
- if (execute.length < ETH_HLEN || execute.length > 65535)
+ if (execute->length < ETH_HLEN || execute->length > 65535)
goto error;
- err = -ENOMEM;
- actions = flow_actions_alloc(execute.n_actions);
- if (!actions)
+ actions = flow_actions_alloc(execute->n_actions);
+ if (IS_ERR(actions)) {
+ err = PTR_ERR(actions);
goto error;
+ }
err = -EFAULT;
- if (copy_from_user(actions->actions, execute.actions,
- execute.n_actions * sizeof *execute.actions))
+ if (copy_from_user(actions->actions, execute->actions,
+ execute->n_actions * sizeof *execute->actions))
goto error_free_actions;
err = validate_actions(actions);
goto error_free_actions;
err = -ENOMEM;
- skb = alloc_skb(execute.length, GFP_KERNEL);
+ skb = alloc_skb(execute->length, GFP_KERNEL);
if (!skb)
goto error_free_actions;
- if (execute.in_port < DP_MAX_PORTS)
- OVS_CB(skb)->dp_port = dp->ports[execute.in_port];
- else
- OVS_CB(skb)->dp_port = NULL;
-
err = -EFAULT;
- if (copy_from_user(skb_put(skb, execute.length), execute.data,
- execute.length))
+ if (copy_from_user(skb_put(skb, execute->length), execute->data,
+ execute->length))
goto error_free_skb;
skb_reset_mac_header(skb);
else
skb->protocol = htons(ETH_P_802_2);
- flow_extract(skb, execute.in_port, &key);
- err = execute_actions(dp, skb, &key, actions->actions,
- actions->n_actions, GFP_KERNEL);
+ err = flow_extract(skb, -1, &key, &is_frag);
+ if (err)
+ goto error_free_skb;
+
+ rcu_read_lock();
+ err = execute_actions(dp, skb, &key, actions->actions, actions->n_actions);
+ rcu_read_unlock();
+
kfree(actions);
return err;
return err;
}
+static int execute_packet(struct datapath *dp, const struct odp_execute __user *executep)
+{
+ struct odp_execute execute;
+
+ if (copy_from_user(&execute, executep, sizeof execute))
+ return -EFAULT;
+
+ return do_execute(dp, &execute);
+}
+
static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
{
+ struct tbl *table = rcu_dereference(dp->table);
struct odp_stats stats;
int i;
- stats.n_flows = dp->n_flows;
- stats.cur_capacity = rcu_dereference(dp->table)->n_buckets;
- stats.max_capacity = DP_MAX_BUCKETS;
+ stats.n_flows = tbl_count(table);
+ stats.cur_capacity = tbl_n_buckets(table);
+ stats.max_capacity = TBL_MAX_BUCKETS;
stats.n_ports = dp->n_ports;
stats.max_ports = DP_MAX_PORTS;
- stats.max_groups = DP_MAX_GROUPS;
stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
for_each_possible_cpu(i) {
- const struct dp_stats_percpu *s;
- s = percpu_ptr(dp->stats_percpu, i);
- stats.n_frags += s->n_frags;
- stats.n_hit += s->n_hit;
- stats.n_missed += s->n_missed;
- stats.n_lost += s->n_lost;
+ const struct dp_stats_percpu *percpu_stats;
+ struct dp_stats_percpu local_stats;
+ unsigned seqcount;
+
+ percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
+
+ do {
+ seqcount = read_seqcount_begin(&percpu_stats->seqlock);
+ local_stats = *percpu_stats;
+ } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
+
+ stats.n_frags += local_stats.n_frags;
+ stats.n_hit += local_stats.n_hit;
+ stats.n_missed += local_stats.n_missed;
+ stats.n_lost += local_stats.n_lost;
}
stats.max_miss_queue = DP_MAX_QUEUE_LEN;
stats.max_action_queue = DP_MAX_QUEUE_LEN;
}
/* Sets the MTU of all datapath devices to the minimum of the ports. Must
- * be called with RTNL lock and dp_mutex. */
+ * be called with RTNL lock. */
void set_internal_devs_mtu(const struct datapath *dp)
{
struct dp_port *p;
}
}
-static int
-put_port(const struct dp_port *p, struct odp_port __user *uop)
+static int put_port(const struct dp_port *p, struct odp_port __user *uop)
{
struct odp_port op;
return copy_to_user(uop, &op, sizeof op) ? -EFAULT : 0;
}
-static int
-query_port(struct datapath *dp, struct odp_port __user *uport)
+static int query_port(struct datapath *dp, struct odp_port __user *uport)
{
struct odp_port port;
return put_port(dp->ports[port.port], uport);
}
-static int
-list_ports(struct datapath *dp, struct odp_portvec __user *pvp)
+static int do_list_ports(struct datapath *dp, struct odp_port __user *uports,
+ int n_ports)
{
- struct odp_portvec pv;
- struct dp_port *p;
- int idx;
+ int idx = 0;
+ if (n_ports) {
+ struct dp_port *p;
- if (copy_from_user(&pv, pvp, sizeof pv))
- return -EFAULT;
-
- idx = 0;
- if (pv.n_ports) {
list_for_each_entry_rcu (p, &dp->port_list, node) {
- if (put_port(p, &pv.ports[idx]))
+ if (put_port(p, &uports[idx]))
return -EFAULT;
- if (idx++ >= pv.n_ports)
+ if (idx++ >= n_ports)
break;
}
}
- return put_user(dp->n_ports, &pvp->n_ports);
-}
-
-/* RCU callback for freeing a dp_port_group */
-static void free_port_group(struct rcu_head *rcu)
-{
- struct dp_port_group *g = container_of(rcu, struct dp_port_group, rcu);
- kfree(g);
+ return idx;
}
-static int
-set_port_group(struct datapath *dp, const struct odp_port_group __user *upg)
+static int list_ports(struct datapath *dp, struct odp_portvec __user *upv)
{
- struct odp_port_group pg;
- struct dp_port_group *new_group, *old_group;
- int error;
-
- error = -EFAULT;
- if (copy_from_user(&pg, upg, sizeof pg))
- goto error;
-
- error = -EINVAL;
- if (pg.n_ports > DP_MAX_PORTS || pg.group >= DP_MAX_GROUPS)
- goto error;
-
- error = -ENOMEM;
- new_group = kmalloc(sizeof *new_group + sizeof(u16) * pg.n_ports,
- GFP_KERNEL);
- if (!new_group)
- goto error;
-
- new_group->n_ports = pg.n_ports;
- error = -EFAULT;
- if (copy_from_user(new_group->ports, pg.ports,
- sizeof(u16) * pg.n_ports))
- goto error_free;
-
- old_group = rcu_dereference(dp->groups[pg.group]);
- rcu_assign_pointer(dp->groups[pg.group], new_group);
- if (old_group)
- call_rcu(&old_group->rcu, free_port_group);
- return 0;
-
-error_free:
- kfree(new_group);
-error:
- return error;
-}
-
-static int
-get_port_group(struct datapath *dp, struct odp_port_group *upg)
-{
- struct odp_port_group pg;
- struct dp_port_group *g;
- u16 n_copy;
-
- if (copy_from_user(&pg, upg, sizeof pg))
- return -EFAULT;
-
- if (pg.group >= DP_MAX_GROUPS)
- return -EINVAL;
+ struct odp_portvec pv;
+ int retval;
- g = dp->groups[pg.group];
- n_copy = g ? min_t(int, g->n_ports, pg.n_ports) : 0;
- if (n_copy && copy_to_user(pg.ports, g->ports, n_copy * sizeof(u16)))
+ if (copy_from_user(&pv, upv, sizeof pv))
return -EFAULT;
- if (put_user(g ? g->n_ports : 0, &upg->n_ports))
- return -EFAULT;
+ retval = do_list_ports(dp, pv.ports, pv.n_ports);
+ if (retval < 0)
+ return retval;
- return 0;
+ return put_user(retval, &upv->n_ports);
}
static int get_listen_mask(const struct file *f)
goto exit;
case ODP_VPORT_ADD:
- err = vport_add((struct odp_vport_add __user *)argp);
+ err = vport_user_add((struct odp_vport_add __user *)argp);
goto exit;
case ODP_VPORT_MOD:
- err = vport_mod((struct odp_vport_mod __user *)argp);
+ err = vport_user_mod((struct odp_vport_mod __user *)argp);
goto exit;
case ODP_VPORT_DEL:
- err = vport_del((char __user *)argp);
+ err = vport_user_del((char __user *)argp);
goto exit;
case ODP_VPORT_STATS_GET:
- err = vport_stats_get((struct odp_vport_stats_req __user *)argp);
+ err = vport_user_stats_get((struct odp_vport_stats_req __user *)argp);
+ goto exit;
+
+ case ODP_VPORT_STATS_SET:
+ err = vport_user_stats_set((struct odp_vport_stats_req __user *)argp);
goto exit;
case ODP_VPORT_ETHER_GET:
- err = vport_ether_get((struct odp_vport_ether __user *)argp);
+ err = vport_user_ether_get((struct odp_vport_ether __user *)argp);
goto exit;
case ODP_VPORT_ETHER_SET:
- err = vport_ether_set((struct odp_vport_ether __user *)argp);
+ err = vport_user_ether_set((struct odp_vport_ether __user *)argp);
goto exit;
case ODP_VPORT_MTU_GET:
- err = vport_mtu_get((struct odp_vport_mtu __user *)argp);
+ err = vport_user_mtu_get((struct odp_vport_mtu __user *)argp);
goto exit;
case ODP_VPORT_MTU_SET:
- err = vport_mtu_set((struct odp_vport_mtu __user *)argp);
+ err = vport_user_mtu_set((struct odp_vport_mtu __user *)argp);
goto exit;
}
err = list_ports(dp, (struct odp_portvec __user *)argp);
break;
- case ODP_PORT_GROUP_SET:
- err = set_port_group(dp, (struct odp_port_group __user *)argp);
- break;
-
- case ODP_PORT_GROUP_GET:
- err = get_port_group(dp, (struct odp_port_group __user *)argp);
- break;
-
case ODP_FLOW_FLUSH:
err = flush_flows(dp);
break;
break;
case ODP_FLOW_GET:
- err = do_flowvec_ioctl(dp, argp, query_flows);
+ err = do_flowvec_ioctl(dp, argp, do_query_flows);
break;
case ODP_FLOW_LIST:
- err = do_flowvec_ioctl(dp, argp, list_flows);
+ err = do_flowvec_ioctl(dp, argp, do_list_flows);
break;
case ODP_EXECUTE:
- err = do_execute(dp, (struct odp_execute __user *)argp);
+ err = execute_packet(dp, (struct odp_execute __user *)argp);
break;
default:
return 0;
}
+#ifdef CONFIG_COMPAT
+static int compat_list_ports(struct datapath *dp, struct compat_odp_portvec __user *upv)
+{
+ struct compat_odp_portvec pv;
+ int retval;
+
+ if (copy_from_user(&pv, upv, sizeof pv))
+ return -EFAULT;
+
+ retval = do_list_ports(dp, compat_ptr(pv.ports), pv.n_ports);
+ if (retval < 0)
+ return retval;
+
+ return put_user(retval, &upv->n_ports);
+}
+
+static int compat_get_flow(struct odp_flow *flow, const struct compat_odp_flow __user *compat)
+{
+ compat_uptr_t actions;
+
+ if (!access_ok(VERIFY_READ, compat, sizeof(struct compat_odp_flow)) ||
+ __copy_from_user(&flow->stats, &compat->stats, sizeof(struct odp_flow_stats)) ||
+ __copy_from_user(&flow->key, &compat->key, sizeof(struct odp_flow_key)) ||
+ __get_user(actions, &compat->actions) ||
+ __get_user(flow->n_actions, &compat->n_actions) ||
+ __get_user(flow->flags, &compat->flags))
+ return -EFAULT;
+
+ flow->actions = compat_ptr(actions);
+ return 0;
+}
+
+static int compat_put_flow(struct datapath *dp, struct compat_odp_flow_put __user *ufp)
+{
+ struct odp_flow_stats stats;
+ struct odp_flow_put fp;
+ int error;
+
+ if (compat_get_flow(&fp.flow, &ufp->flow) ||
+ get_user(fp.flags, &ufp->flags))
+ return -EFAULT;
+
+ error = do_put_flow(dp, &fp, &stats);
+ if (error)
+ return error;
+
+ if (copy_to_user(&ufp->flow.stats, &stats,
+ sizeof(struct odp_flow_stats)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int compat_answer_query(struct sw_flow *flow, u32 query_flags,
+ struct compat_odp_flow __user *ufp)
+{
+ compat_uptr_t actions;
+
+ if (get_user(actions, &ufp->actions))
+ return -EFAULT;
+
+ return do_answer_query(flow, query_flags, &ufp->stats,
+ compat_ptr(actions), &ufp->n_actions);
+}
+
+static int compat_del_flow(struct datapath *dp, struct compat_odp_flow __user *ufp)
+{
+ struct sw_flow *flow;
+ struct odp_flow uf;
+ int error;
+
+ if (compat_get_flow(&uf, ufp))
+ return -EFAULT;
+
+ flow = do_del_flow(dp, &uf.key);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ error = compat_answer_query(flow, 0, ufp);
+ flow_deferred_free(flow);
+ return error;
+}
+
+static int compat_query_flows(struct datapath *dp, struct compat_odp_flow *flows, u32 n_flows)
+{
+ struct tbl *table = rcu_dereference(dp->table);
+ u32 i;
+
+ for (i = 0; i < n_flows; i++) {
+ struct compat_odp_flow __user *ufp = &flows[i];
+ struct odp_flow uf;
+ struct tbl_node *flow_node;
+ int error;
+
+ if (compat_get_flow(&uf, ufp))
+ return -EFAULT;
+
+ flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
+ if (!flow_node)
+ error = put_user(ENOENT, &ufp->stats.error);
+ else
+ error = compat_answer_query(flow_cast(flow_node), uf.flags, ufp);
+ if (error)
+ return -EFAULT;
+ }
+ return n_flows;
+}
+
+struct compat_list_flows_cbdata {
+ struct compat_odp_flow __user *uflows;
+ u32 n_flows;
+ u32 listed_flows;
+};
+
+static int compat_list_flow(struct tbl_node *node, void *cbdata_)
+{
+ struct sw_flow *flow = flow_cast(node);
+ struct compat_list_flows_cbdata *cbdata = cbdata_;
+ struct compat_odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
+ int error;
+
+ if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
+ return -EFAULT;
+ error = compat_answer_query(flow, 0, ufp);
+ if (error)
+ return error;
+
+ if (cbdata->listed_flows >= cbdata->n_flows)
+ return cbdata->listed_flows;
+ return 0;
+}
+
+static int compat_list_flows(struct datapath *dp, struct compat_odp_flow *flows, u32 n_flows)
+{
+ struct compat_list_flows_cbdata cbdata;
+ int error;
+
+ if (!n_flows)
+ return 0;
+
+ cbdata.uflows = flows;
+ cbdata.n_flows = n_flows;
+ cbdata.listed_flows = 0;
+
+ error = tbl_foreach(rcu_dereference(dp->table), compat_list_flow, &cbdata);
+ return error ? error : cbdata.listed_flows;
+}
+
+static int compat_flowvec_ioctl(struct datapath *dp, unsigned long argp,
+ int (*function)(struct datapath *,
+ struct compat_odp_flow *,
+ u32 n_flows))
+{
+ struct compat_odp_flowvec __user *uflowvec;
+ struct compat_odp_flow __user *flows;
+ struct compat_odp_flowvec flowvec;
+ int retval;
+
+ uflowvec = compat_ptr(argp);
+ if (!access_ok(VERIFY_WRITE, uflowvec, sizeof *uflowvec) ||
+ copy_from_user(&flowvec, uflowvec, sizeof flowvec))
+ return -EFAULT;
+
+ if (flowvec.n_flows > INT_MAX / sizeof(struct compat_odp_flow))
+ return -EINVAL;
+
+ flows = compat_ptr(flowvec.flows);
+ if (!access_ok(VERIFY_WRITE, flows,
+ flowvec.n_flows * sizeof(struct compat_odp_flow)))
+ return -EFAULT;
+
+ retval = function(dp, flows, flowvec.n_flows);
+ return (retval < 0 ? retval
+ : retval == flowvec.n_flows ? 0
+ : put_user(retval, &uflowvec->n_flows));
+}
+
+static int compat_execute(struct datapath *dp, const struct compat_odp_execute __user *uexecute)
+{
+ struct odp_execute execute;
+ compat_uptr_t actions;
+ compat_uptr_t data;
+
+ if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_odp_execute)) ||
+ __get_user(actions, &uexecute->actions) ||
+ __get_user(execute.n_actions, &uexecute->n_actions) ||
+ __get_user(data, &uexecute->data) ||
+ __get_user(execute.length, &uexecute->length))
+ return -EFAULT;
+
+ execute.actions = compat_ptr(actions);
+ execute.data = compat_ptr(data);
+
+ return do_execute(dp, &execute);
+}
+
+static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned long argp)
+{
+ int dp_idx = iminor(f->f_dentry->d_inode);
+ struct datapath *dp;
+ int err;
+
+ switch (cmd) {
+ case ODP_DP_DESTROY:
+ case ODP_FLOW_FLUSH:
+ /* Ioctls that don't need any translation at all. */
+ return openvswitch_ioctl(f, cmd, argp);
+
+ case ODP_DP_CREATE:
+ case ODP_PORT_ATTACH:
+ case ODP_PORT_DETACH:
+ case ODP_VPORT_DEL:
+ case ODP_VPORT_MTU_SET:
+ case ODP_VPORT_MTU_GET:
+ case ODP_VPORT_ETHER_SET:
+ case ODP_VPORT_ETHER_GET:
+ case ODP_VPORT_STATS_SET:
+ case ODP_VPORT_STATS_GET:
+ case ODP_DP_STATS:
+ case ODP_GET_DROP_FRAGS:
+ case ODP_SET_DROP_FRAGS:
+ case ODP_SET_LISTEN_MASK:
+ case ODP_GET_LISTEN_MASK:
+ case ODP_SET_SFLOW_PROBABILITY:
+ case ODP_GET_SFLOW_PROBABILITY:
+ case ODP_PORT_QUERY:
+ /* Ioctls that just need their pointer argument extended. */
+ return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
+
+ case ODP_VPORT_ADD32:
+ return compat_vport_user_add(compat_ptr(argp));
+
+ case ODP_VPORT_MOD32:
+ return compat_vport_user_mod(compat_ptr(argp));
+ }
+
+ dp = get_dp_locked(dp_idx);
+ err = -ENODEV;
+ if (!dp)
+ goto exit;
+
+ switch (cmd) {
+ case ODP_PORT_LIST32:
+ err = compat_list_ports(dp, compat_ptr(argp));
+ break;
+
+ case ODP_FLOW_PUT32:
+ err = compat_put_flow(dp, compat_ptr(argp));
+ break;
+
+ case ODP_FLOW_DEL32:
+ err = compat_del_flow(dp, compat_ptr(argp));
+ break;
+
+ case ODP_FLOW_GET32:
+ err = compat_flowvec_ioctl(dp, argp, compat_query_flows);
+ break;
+
+ case ODP_FLOW_LIST32:
+ err = compat_flowvec_ioctl(dp, argp, compat_list_flows);
+ break;
+
+ case ODP_EXECUTE32:
+ err = compat_execute(dp, compat_ptr(argp));
+ break;
+
+ default:
+ err = -ENOIOCTLCMD;
+ break;
+ }
+ mutex_unlock(&dp->mutex);
+exit:
+ return err;
+}
+#endif
+
+/* Unfortunately this function is not exported so this is a verbatim copy
+ * from net/core/datagram.c in 2.6.30. */
+static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
+ u8 __user *to, int len,
+ __wsum *csump)
+{
+ int start = skb_headlen(skb);
+ int pos = 0;
+ int i, copy = start - offset;
+
+ /* Copy header. */
+ if (copy > 0) {
+ int err = 0;
+ if (copy > len)
+ copy = len;
+ *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
+ *csump, &err);
+ if (err)
+ goto fault;
+ if ((len -= copy) == 0)
+ return 0;
+ offset += copy;
+ to += copy;
+ pos = copy;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ int end;
+
+ WARN_ON(start > offset + len);
+
+ end = start + skb_shinfo(skb)->frags[i].size;
+ if ((copy = end - offset) > 0) {
+ __wsum csum2;
+ int err = 0;
+ u8 *vaddr;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct page *page = frag->page;
+
+ if (copy > len)
+ copy = len;
+ vaddr = kmap(page);
+ csum2 = csum_and_copy_to_user(vaddr +
+ frag->page_offset +
+ offset - start,
+ to, copy, 0, &err);
+ kunmap(page);
+ if (err)
+ goto fault;
+ *csump = csum_block_add(*csump, csum2, pos);
+ if (!(len -= copy))
+ return 0;
+ offset += copy;
+ to += copy;
+ pos += copy;
+ }
+ start = end;
+ }
+
+ if (skb_shinfo(skb)->frag_list) {
+ struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+ for (; list; list=list->next) {
+ int end;
+
+ WARN_ON(start > offset + len);
+
+ end = start + list->len;
+ if ((copy = end - offset) > 0) {
+ __wsum csum2 = 0;
+ if (copy > len)
+ copy = len;
+ if (skb_copy_and_csum_datagram(list,
+ offset - start,
+ to, copy,
+ &csum2))
+ goto fault;
+ *csump = csum_block_add(*csump, csum2, pos);
+ if ((len -= copy) == 0)
+ return 0;
+ offset += copy;
+ to += copy;
+ pos += copy;
+ }
+ start = end;
+ }
+ }
+ if (!len)
+ return 0;
+
+fault:
+ return -EFAULT;
+}
+
ssize_t openvswitch_read(struct file *f, char __user *buf, size_t nbytes,
loff_t *ppos)
{
int dp_idx = iminor(f->f_dentry->d_inode);
struct datapath *dp = get_dp(dp_idx);
struct sk_buff *skb;
- struct iovec __user iov;
- size_t copy_bytes;
+ size_t copy_bytes, tot_copy_bytes;
int retval;
if (!dp)
}
}
success:
- copy_bytes = min_t(size_t, skb->len, nbytes);
- iov.iov_base = buf;
- iov.iov_len = copy_bytes;
- retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
+ copy_bytes = tot_copy_bytes = min_t(size_t, skb->len, nbytes);
+
+ retval = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (copy_bytes == skb->len) {
+ __wsum csum = 0;
+ unsigned int csum_start, csum_offset;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
+ csum_start = skb->csum_start - skb_headroom(skb);
+ csum_offset = skb->csum_offset;
+#else
+ csum_start = skb_transport_header(skb) - skb->data;
+ csum_offset = skb->csum;
+#endif
+ BUG_ON(csum_start >= skb_headlen(skb));
+ retval = skb_copy_and_csum_datagram(skb, csum_start, buf + csum_start,
+ copy_bytes - csum_start, &csum);
+ if (!retval) {
+ __sum16 __user *csump;
+
+ copy_bytes = csum_start;
+ csump = (__sum16 __user *)(buf + csum_start + csum_offset);
+
+ BUG_ON((char *)csump + sizeof(__sum16) > buf + nbytes);
+ put_user(csum_fold(csum), csump);
+ }
+ } else
+ retval = skb_checksum_help(skb);
+ }
+
+ if (!retval) {
+ struct iovec __user iov;
+
+ iov.iov_base = buf;
+ iov.iov_len = copy_bytes;
+ retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
+ }
+
if (!retval)
- retval = copy_bytes;
+ retval = tot_copy_bytes;
+
kfree_skb(skb);
error:
.read = openvswitch_read,
.poll = openvswitch_poll,
.unlocked_ioctl = openvswitch_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = openvswitch_compat_ioctl,
+#endif
/* XXX .fasync = openvswitch_fasync, */
};