/* Functions for managing the dp interface/device. */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/udp.h>
#include <linux/version.h>
#include <linux/ethtool.h>
-#include <linux/random.h>
#include <linux/wait.h>
#include <asm/system.h>
#include <asm/div64.h>
#include <asm/bug.h>
-#include <asm/highmem.h>
+#include <linux/highmem.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <linux/inetdevice.h>
#include <linux/list.h>
#include <linux/rculist.h>
-#include <linux/workqueue.h>
#include <linux/dmi.h>
#include <net/inet_ecn.h>
#include <linux/compat.h>
static struct datapath *dps[ODP_MAX];
static DEFINE_MUTEX(dp_mutex);
-/* Number of milliseconds between runs of the maintenance thread. */
-#define MAINT_SLEEP_MSECS 1000
+/* We limit the number of times that we pass into dp_process_received_packet()
+ * to avoid blowing out the stack in the event that we have a loop. */
+struct loop_counter {
+ int count; /* Count. */
+ bool looping; /* Loop detected? */
+};
+
+#define DP_MAX_LOOPS 5
+
+/* We use a separate counter for each CPU for both interrupt and non-interrupt
+ * context in order to keep the limit deterministic for a given packet. */
+struct percpu_loop_counters {
+ struct loop_counter counters[2];
+};
+
+static DEFINE_PER_CPU(struct percpu_loop_counters, dp_loop_counters);
static int new_dp_port(struct datapath *, struct odp_port *, int port_no);
for (i = 0; i < DP_N_QUEUES; i++)
skb_queue_purge(&dp->queues[i]);
- for (i = 0; i < DP_MAX_GROUPS; i++)
- kfree(dp->groups[i]);
free_percpu(dp->stats_percpu);
kobject_put(&dp->ifobj);
module_put(THIS_MODULE);
p->port_no = port_no;
p->dp = dp;
+ p->vport = vport;
atomic_set(&p->sflow_pool, 0);
err = vport_attach(vport, p);
return err;
}
+static void suppress_loop(struct datapath *dp, struct sw_flow_actions *actions)
+{
+ if (net_ratelimit())
+ pr_warn("%s: flow looped %d times, dropping\n",
+ dp_name(dp), DP_MAX_LOOPS);
+ actions->n_actions = 0;
+}
+
/* Must be called with rcu_read_lock. */
void dp_process_received_packet(struct dp_port *p, struct sk_buff *skb)
{
struct datapath *dp = p->dp;
struct dp_stats_percpu *stats;
int stats_counter_off;
- struct odp_flow_key key;
- struct tbl_node *flow_node;
-
- WARN_ON_ONCE(skb_shared(skb));
- skb_warn_if_lro(skb);
+ struct sw_flow_actions *acts;
+ struct loop_counter *loop;
+ int error;
OVS_CB(skb)->dp_port = p;
- if (flow_extract(skb, p ? p->port_no : ODPP_NONE, &key)) {
- if (dp->drop_frags) {
+ if (!OVS_CB(skb)->flow) {
+ struct odp_flow_key key;
+ struct tbl_node *flow_node;
+ bool is_frag;
+
+ /* Extract flow from 'skb' into 'key'. */
+ error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key, &is_frag);
+ if (unlikely(error)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ if (is_frag && dp->drop_frags) {
kfree_skb(skb);
stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
goto out;
}
+
+ /* Look up flow. */
+ flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
+ flow_hash(&key), flow_cmp);
+ if (unlikely(!flow_node)) {
+ dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
+ goto out;
+ }
+
+ OVS_CB(skb)->flow = flow_cast(flow_node);
}
- flow_node = tbl_lookup(rcu_dereference(dp->table), &key, flow_hash(&key), flow_cmp);
- if (flow_node) {
- struct sw_flow *flow = flow_cast(flow_node);
- struct sw_flow_actions *acts = rcu_dereference(flow->sf_acts);
- flow_used(flow, skb);
- execute_actions(dp, skb, &key, acts->actions, acts->n_actions,
- GFP_ATOMIC);
- stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
- } else {
- stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
- dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
+ flow_used(OVS_CB(skb)->flow, skb);
+
+ acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
+
+ /* Check whether we've looped too much. */
+ loop = &get_cpu_var(dp_loop_counters).counters[!!in_interrupt()];
+ if (unlikely(++loop->count > DP_MAX_LOOPS))
+ loop->looping = true;
+ if (unlikely(loop->looping)) {
+ suppress_loop(dp, acts);
+ goto out_loop;
}
+ /* Execute actions. */
+ execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
+ acts->n_actions, GFP_ATOMIC);
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
+
+ /* Check whether sub-actions looped too much. */
+ if (unlikely(loop->looping))
+ suppress_loop(dp, acts);
+
+out_loop:
+ /* Decrement loop counter. */
+ if (!--loop->count)
+ loop->looping = false;
+ put_cpu_var(dp_loop_counters);
+
out:
+ /* Update datapath statistics. */
local_bh_disable();
stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+
+ write_seqcount_begin(&stats->seqlock);
(*(u64 *)((u8 *)stats + stats_counter_off))++;
+ write_seqcount_end(&stats->seqlock);
+
local_bh_enable();
}
break;
default:
if (net_ratelimit())
- printk(KERN_ERR "Attempting to checksum a non-"
- "TCP/UDP packet, dropping a protocol"
- " %d packet", iph->protocol);
+ pr_err("Attempting to checksum a non-TCP/UDP packet, "
+ "dropping a protocol %d packet",
+ iph->protocol);
goto out;
}
* be reverified). If we receive a packet with CHECKSUM_HW that really means
* CHECKSUM_PARTIAL, it will be sent with the wrong checksum. However, there
* shouldn't be any devices that do this with bridging. */
-void
-compute_ip_summed(struct sk_buff *skb, bool xmit)
+void compute_ip_summed(struct sk_buff *skb, bool xmit)
{
/* For our convenience these defines change repeatedly between kernel
* versions, so we can't just copy them over... */
break;
#endif
default:
- printk(KERN_ERR "openvswitch: unknown checksum type %d\n",
- skb->ip_summed);
+ pr_err("unknown checksum type %d\n", skb->ip_summed);
/* None seems the safest... */
OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
- }
+ }
#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
/* Xen has a special way of representing CHECKSUM_PARTIAL on older
* is slightly different because we are only concerned with bridging and not
* other types of forwarding and can get away with slightly more optimal
* behavior.*/
-void
-forward_ip_summed(struct sk_buff *skb)
+void forward_ip_summed(struct sk_buff *skb)
{
#ifdef CHECKSUM_HW
if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
/* Append each packet in 'skb' list to 'queue'. There will be only one packet
* unless we broke up a GSO packet. */
-static int
-queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
- int queue_no, u32 arg)
+static int queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
+ int queue_no, u32 arg)
{
struct sk_buff *nskb;
int port_no;
return err;
}
-int
-dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
- u32 arg)
+int dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
+ u32 arg)
{
struct dp_stats_percpu *stats;
struct sk_buff_head *queue;
err:
local_bh_disable();
stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+
+ write_seqcount_begin(&stats->seqlock);
stats->n_lost++;
+ write_seqcount_end(&stats->seqlock);
+
local_bh_enable();
return err;
for (i = 0; i < actions->n_actions; i++) {
const union odp_action *a = &actions->actions[i];
+ __be16 mask;
+
switch (a->type) {
- case ODPAT_OUTPUT:
- if (a->output.port >= DP_MAX_PORTS)
- return -EINVAL;
+ case ODPAT_CONTROLLER:
+ case ODPAT_STRIP_VLAN:
+ case ODPAT_SET_DL_SRC:
+ case ODPAT_SET_DL_DST:
+ case ODPAT_SET_NW_SRC:
+ case ODPAT_SET_NW_DST:
+ case ODPAT_SET_TP_SRC:
+ case ODPAT_SET_TP_DST:
+ case ODPAT_SET_TUNNEL:
+ case ODPAT_SET_PRIORITY:
+ case ODPAT_POP_PRIORITY:
+ case ODPAT_DROP_SPOOFED_ARP:
+ /* No validation needed. */
break;
- case ODPAT_OUTPUT_GROUP:
- if (a->output_group.group >= DP_MAX_GROUPS)
+ case ODPAT_OUTPUT:
+ if (a->output.port >= DP_MAX_PORTS)
return -EINVAL;
break;
- case ODPAT_SET_VLAN_VID:
- if (a->vlan_vid.vlan_vid & htons(~VLAN_VID_MASK))
+ case ODPAT_SET_DL_TCI:
+ mask = a->dl_tci.mask;
+ if (mask != htons(VLAN_VID_MASK) &&
+ mask != htons(VLAN_PCP_MASK) &&
+ mask != htons(VLAN_VID_MASK | VLAN_PCP_MASK))
return -EINVAL;
- break;
-
- case ODPAT_SET_VLAN_PCP:
- if (a->vlan_pcp.vlan_pcp
- & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT))
+ if (a->dl_tci.tci & ~mask)
return -EINVAL;
break;
break;
default:
- if (a->type >= ODPAT_N_ACTIONS)
- return -EOPNOTSUPP;
- break;
+ return -EOPNOTSUPP;
}
}
return ERR_PTR(error);
}
-static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats)
+static struct timespec get_time_offset(void)
+{
+ struct timespec now_mono, now_jiffies;
+
+ ktime_get_ts(&now_mono);
+ jiffies_to_timespec(jiffies, &now_jiffies);
+ return timespec_sub(now_mono, now_jiffies);
+}
+
+static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats,
+ struct timespec time_offset)
{
- if (flow->used.tv_sec) {
- stats->used_sec = flow->used.tv_sec;
- stats->used_nsec = flow->used.tv_nsec;
+ if (flow->used) {
+ struct timespec flow_ts, used;
+
+ jiffies_to_timespec(flow->used, &flow_ts);
+ set_normalized_timespec(&used, flow_ts.tv_sec + time_offset.tv_sec,
+ flow_ts.tv_nsec + time_offset.tv_nsec);
+
+ stats->used_sec = used.tv_sec;
+ stats->used_nsec = used.tv_nsec;
} else {
stats->used_sec = 0;
stats->used_nsec = 0;
}
+
stats->n_packets = flow->packet_count;
stats->n_bytes = flow->byte_count;
- stats->ip_tos = flow->ip_tos;
+ stats->reserved = 0;
stats->tcp_flags = flow->tcp_flags;
stats->error = 0;
}
static void clear_stats(struct sw_flow *flow)
{
- flow->used.tv_sec = flow->used.tv_nsec = 0;
+ flow->used = 0;
flow->tcp_flags = 0;
- flow->ip_tos = 0;
flow->packet_count = 0;
flow->byte_count = 0;
}
struct tbl *table;
int error;
- memset(uf->flow.key.reserved, 0, sizeof uf->flow.key.reserved);
-
table = rcu_dereference(dp->table);
flow_node = tbl_lookup(table, &uf->flow.key, flow_hash(&uf->flow.key), flow_cmp);
if (!flow_node) {
}
/* Allocate flow. */
- error = -ENOMEM;
- flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
- if (flow == NULL)
+ flow = flow_alloc();
+ if (IS_ERR(flow)) {
+ error = PTR_ERR(flow);
goto error;
+ }
flow->key = uf->flow.key;
- spin_lock_init(&flow->lock);
clear_stats(flow);
/* Obtain actions. */
/* Fetch stats, then clear them if necessary. */
spin_lock_bh(&flow->lock);
- get_stats(flow, stats);
+ get_stats(flow, stats, get_time_offset());
if (uf->flags & ODPPF_ZERO_STATS)
clear_stats(flow);
spin_unlock_bh(&flow->lock);
error_free_flow_acts:
kfree(flow->sf_acts);
error_free_flow:
- kmem_cache_free(flow_cache, flow);
+ flow->sf_acts = NULL;
+ flow_put(flow);
error:
return error;
}
}
static int do_answer_query(struct sw_flow *flow, u32 query_flags,
+ struct timespec time_offset,
struct odp_flow_stats __user *ustats,
union odp_action __user *actions,
u32 __user *n_actionsp)
u32 n_actions;
spin_lock_bh(&flow->lock);
- get_stats(flow, &stats);
+ get_stats(flow, &stats, time_offset);
if (query_flags & ODPFF_ZERO_TCP_FLAGS)
flow->tcp_flags = 0;
}
static int answer_query(struct sw_flow *flow, u32 query_flags,
+ struct timespec time_offset,
struct odp_flow __user *ufp)
{
union odp_action *actions;
if (get_user(actions, &ufp->actions))
return -EFAULT;
- return do_answer_query(flow, query_flags,
+ return do_answer_query(flow, query_flags, time_offset,
&ufp->stats, actions, &ufp->n_actions);
}
struct tbl_node *flow_node;
int error;
- memset(key->reserved, 0, sizeof key->reserved);
flow_node = tbl_lookup(table, key, flow_hash(key), flow_cmp);
if (!flow_node)
return ERR_PTR(-ENOENT);
if (IS_ERR(flow))
return PTR_ERR(flow);
- error = answer_query(flow, 0, ufp);
+ error = answer_query(flow, 0, get_time_offset(), ufp);
flow_deferred_free(flow);
return error;
}
static int do_query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
{
struct tbl *table = rcu_dereference(dp->table);
+ struct timespec time_offset;
u32 i;
+ time_offset = get_time_offset();
+
for (i = 0; i < flowvec->n_flows; i++) {
struct odp_flow __user *ufp = &flowvec->flows[i];
struct odp_flow uf;
if (copy_from_user(&uf, ufp, sizeof uf))
return -EFAULT;
- memset(uf.key.reserved, 0, sizeof uf.key.reserved);
flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
if (!flow_node)
error = put_user(ENOENT, &ufp->stats.error);
else
- error = answer_query(flow_cast(flow_node), uf.flags, ufp);
+ error = answer_query(flow_cast(flow_node), uf.flags, time_offset, ufp);
if (error)
return -EFAULT;
}
struct odp_flow __user *uflows;
u32 n_flows;
u32 listed_flows;
+ struct timespec time_offset;
};
static int list_flow(struct tbl_node *node, void *cbdata_)
if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
return -EFAULT;
- error = answer_query(flow, 0, ufp);
+ error = answer_query(flow, 0, cbdata->time_offset, ufp);
if (error)
return error;
cbdata.uflows = flowvec->flows;
cbdata.n_flows = flowvec->n_flows;
cbdata.listed_flows = 0;
+ cbdata.time_offset = get_time_offset();
+
error = tbl_foreach(rcu_dereference(dp->table), list_flow, &cbdata);
return error ? error : cbdata.listed_flows;
}
struct sk_buff *skb;
struct sw_flow_actions *actions;
struct ethhdr *eth;
+ bool is_frag;
int err;
err = -EINVAL;
if (execute->length < ETH_HLEN || execute->length > 65535)
goto error;
- err = -ENOMEM;
actions = flow_actions_alloc(execute->n_actions);
- if (!actions)
+ if (IS_ERR(actions)) {
+ err = PTR_ERR(actions);
goto error;
+ }
err = -EFAULT;
if (copy_from_user(actions->actions, execute->actions,
if (!skb)
goto error_free_actions;
- if (execute->in_port < DP_MAX_PORTS)
- OVS_CB(skb)->dp_port = dp->ports[execute->in_port];
- else
- OVS_CB(skb)->dp_port = NULL;
-
err = -EFAULT;
if (copy_from_user(skb_put(skb, execute->length), execute->data,
execute->length))
else
skb->protocol = htons(ETH_P_802_2);
- flow_extract(skb, execute->in_port, &key);
+ err = flow_extract(skb, -1, &key, &is_frag);
+ if (err)
+ goto error_free_skb;
rcu_read_lock();
err = execute_actions(dp, skb, &key, actions->actions,
stats.max_capacity = TBL_MAX_BUCKETS;
stats.n_ports = dp->n_ports;
stats.max_ports = DP_MAX_PORTS;
- stats.max_groups = DP_MAX_GROUPS;
stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
for_each_possible_cpu(i) {
- const struct dp_stats_percpu *s;
- s = per_cpu_ptr(dp->stats_percpu, i);
- stats.n_frags += s->n_frags;
- stats.n_hit += s->n_hit;
- stats.n_missed += s->n_missed;
- stats.n_lost += s->n_lost;
+ const struct dp_stats_percpu *percpu_stats;
+ struct dp_stats_percpu local_stats;
+ unsigned seqcount;
+
+ percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
+
+ do {
+ seqcount = read_seqcount_begin(&percpu_stats->seqlock);
+ local_stats = *percpu_stats;
+ } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
+
+ stats.n_frags += local_stats.n_frags;
+ stats.n_hit += local_stats.n_hit;
+ stats.n_missed += local_stats.n_missed;
+ stats.n_lost += local_stats.n_lost;
}
stats.max_miss_queue = DP_MAX_QUEUE_LEN;
stats.max_action_queue = DP_MAX_QUEUE_LEN;
}
}
-static int
-put_port(const struct dp_port *p, struct odp_port __user *uop)
+static int put_port(const struct dp_port *p, struct odp_port __user *uop)
{
struct odp_port op;
return copy_to_user(uop, &op, sizeof op) ? -EFAULT : 0;
}
-static int
-query_port(struct datapath *dp, struct odp_port __user *uport)
+static int query_port(struct datapath *dp, struct odp_port __user *uport)
{
struct odp_port port;
return put_port(dp->ports[port.port], uport);
}
-static int
-do_list_ports(struct datapath *dp, struct odp_port __user *uports, int n_ports)
+static int do_list_ports(struct datapath *dp, struct odp_port __user *uports,
+ int n_ports)
{
int idx = 0;
if (n_ports) {
return idx;
}
-static int
-list_ports(struct datapath *dp, struct odp_portvec __user *upv)
+static int list_ports(struct datapath *dp, struct odp_portvec __user *upv)
{
struct odp_portvec pv;
int retval;
return put_user(retval, &upv->n_ports);
}
-/* RCU callback for freeing a dp_port_group */
-static void free_port_group(struct rcu_head *rcu)
-{
- struct dp_port_group *g = container_of(rcu, struct dp_port_group, rcu);
- kfree(g);
-}
-
-static int
-do_set_port_group(struct datapath *dp, u16 __user *ports, int n_ports, int group)
-{
- struct dp_port_group *new_group, *old_group;
- int error;
-
- error = -EINVAL;
- if (n_ports > DP_MAX_PORTS || group >= DP_MAX_GROUPS)
- goto error;
-
- error = -ENOMEM;
- new_group = kmalloc(sizeof *new_group + sizeof(u16) * n_ports, GFP_KERNEL);
- if (!new_group)
- goto error;
-
- new_group->n_ports = n_ports;
- error = -EFAULT;
- if (copy_from_user(new_group->ports, ports, sizeof(u16) * n_ports))
- goto error_free;
-
- old_group = rcu_dereference(dp->groups[group]);
- rcu_assign_pointer(dp->groups[group], new_group);
- if (old_group)
- call_rcu(&old_group->rcu, free_port_group);
- return 0;
-
-error_free:
- kfree(new_group);
-error:
- return error;
-}
-
-static int
-set_port_group(struct datapath *dp, const struct odp_port_group __user *upg)
-{
- struct odp_port_group pg;
-
- if (copy_from_user(&pg, upg, sizeof pg))
- return -EFAULT;
-
- return do_set_port_group(dp, pg.ports, pg.n_ports, pg.group);
-}
-
-static int
-do_get_port_group(struct datapath *dp,
- u16 __user *ports, int n_ports, int group,
- u16 __user *n_portsp)
-{
- struct dp_port_group *g;
- u16 n_copy;
-
- if (group >= DP_MAX_GROUPS)
- return -EINVAL;
-
- g = dp->groups[group];
- n_copy = g ? min_t(int, g->n_ports, n_ports) : 0;
- if (n_copy && copy_to_user(ports, g->ports, n_copy * sizeof(u16)))
- return -EFAULT;
-
- if (put_user(g ? g->n_ports : 0, n_portsp))
- return -EFAULT;
-
- return 0;
-}
-
-static int get_port_group(struct datapath *dp, struct odp_port_group __user *upg)
-{
- struct odp_port_group pg;
-
- if (copy_from_user(&pg, upg, sizeof pg))
- return -EFAULT;
-
- return do_get_port_group(dp, pg.ports, pg.n_ports, pg.group, &upg->n_ports);
-}
-
static int get_listen_mask(const struct file *f)
{
return (long)f->private_data;
err = list_ports(dp, (struct odp_portvec __user *)argp);
break;
- case ODP_PORT_GROUP_SET:
- err = set_port_group(dp, (struct odp_port_group __user *)argp);
- break;
-
- case ODP_PORT_GROUP_GET:
- err = get_port_group(dp, (struct odp_port_group __user *)argp);
- break;
-
case ODP_FLOW_FLUSH:
err = flush_flows(dp);
break;
return put_user(retval, &upv->n_ports);
}
-static int compat_set_port_group(struct datapath *dp, const struct compat_odp_port_group __user *upg)
-{
- struct compat_odp_port_group pg;
-
- if (copy_from_user(&pg, upg, sizeof pg))
- return -EFAULT;
-
- return do_set_port_group(dp, compat_ptr(pg.ports), pg.n_ports, pg.group);
-}
-
-static int compat_get_port_group(struct datapath *dp, struct compat_odp_port_group __user *upg)
-{
- struct compat_odp_port_group pg;
-
- if (copy_from_user(&pg, upg, sizeof pg))
- return -EFAULT;
-
- return do_get_port_group(dp, compat_ptr(pg.ports), pg.n_ports,
- pg.group, &upg->n_ports);
-}
-
static int compat_get_flow(struct odp_flow *flow, const struct compat_odp_flow __user *compat)
{
compat_uptr_t actions;
}
static int compat_answer_query(struct sw_flow *flow, u32 query_flags,
+ struct timespec time_offset,
struct compat_odp_flow __user *ufp)
{
compat_uptr_t actions;
if (get_user(actions, &ufp->actions))
return -EFAULT;
- return do_answer_query(flow, query_flags, &ufp->stats,
+ return do_answer_query(flow, query_flags, time_offset, &ufp->stats,
compat_ptr(actions), &ufp->n_actions);
}
if (IS_ERR(flow))
return PTR_ERR(flow);
- error = compat_answer_query(flow, 0, ufp);
+ error = compat_answer_query(flow, 0, get_time_offset(), ufp);
flow_deferred_free(flow);
return error;
}
static int compat_query_flows(struct datapath *dp, struct compat_odp_flow *flows, u32 n_flows)
{
struct tbl *table = rcu_dereference(dp->table);
+ struct timespec time_offset;
u32 i;
+ time_offset = get_time_offset();
+
for (i = 0; i < n_flows; i++) {
struct compat_odp_flow __user *ufp = &flows[i];
struct odp_flow uf;
if (!flow_node)
error = put_user(ENOENT, &ufp->stats.error);
else
- error = compat_answer_query(flow_cast(flow_node), uf.flags, ufp);
+ error = compat_answer_query(flow_cast(flow_node), uf.flags, time_offset, ufp);
if (error)
return -EFAULT;
}
struct compat_odp_flow __user *uflows;
u32 n_flows;
u32 listed_flows;
+ struct timespec time_offset;
};
static int compat_list_flow(struct tbl_node *node, void *cbdata_)
if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
return -EFAULT;
- error = compat_answer_query(flow, 0, ufp);
+ error = compat_answer_query(flow, 0, cbdata->time_offset, ufp);
if (error)
return error;
cbdata.uflows = flows;
cbdata.n_flows = n_flows;
cbdata.listed_flows = 0;
+ cbdata.time_offset = get_time_offset();
+
error = tbl_foreach(rcu_dereference(dp->table), compat_list_flow, &cbdata);
return error ? error : cbdata.listed_flows;
}
compat_uptr_t data;
if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_odp_execute)) ||
- __get_user(execute.in_port, &uexecute->in_port) ||
__get_user(actions, &uexecute->actions) ||
__get_user(execute.n_actions, &uexecute->n_actions) ||
__get_user(data, &uexecute->data) ||
err = compat_list_ports(dp, compat_ptr(argp));
break;
- case ODP_PORT_GROUP_SET32:
- err = compat_set_port_group(dp, compat_ptr(argp));
- break;
-
- case ODP_PORT_GROUP_GET32:
- err = compat_get_port_group(dp, compat_ptr(argp));
- break;
-
case ODP_FLOW_PUT32:
err = compat_put_flow(dp, compat_ptr(argp));
break;
}
success:
copy_bytes = tot_copy_bytes = min_t(size_t, skb->len, nbytes);
-
+
retval = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (copy_bytes == skb->len) {
__wsum csum = 0;
- int csum_start, csum_offset;
+ unsigned int csum_start, csum_offset;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
- /* Until 2.6.22, the start of the transport header was
- * also the start of data to be checksummed. Linux
- * 2.6.22 introduced the csum_start field for this
- * purpose, but we should point the transport header to
- * it anyway for backward compatibility, as
- * dev_queue_xmit() does even in 2.6.28. */
- skb_set_transport_header(skb, skb->csum_start - skb_headroom(skb));
+ csum_start = skb->csum_start - skb_headroom(skb);
csum_offset = skb->csum_offset;
#else
+ csum_start = skb_transport_header(skb) - skb->data;
csum_offset = skb->csum;
#endif
- csum_start = skb_transport_header(skb) - skb->data;
+ BUG_ON(csum_start >= skb_headlen(skb));
retval = skb_copy_and_csum_datagram(skb, csum_start, buf + csum_start,
copy_bytes - csum_start, &csum);
if (!retval) {
copy_bytes = csum_start;
csump = (__sum16 __user *)(buf + csum_start + csum_offset);
+
+ BUG_ON((char *)csump + sizeof(__sum16) > buf + nbytes);
put_user(csum_fold(csum), csump);
}
} else