X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=datapath%2Fdatapath.c;h=b41110dbcb3fd978db959f3104a48a7cc972869e;hb=093ca5b366899cb187ac0fb70b9308eeb8f452eb;hp=50c7f6ec2064dc956c8c84ffa82054b73399a751;hpb=67c74f7515867fb8b3a4d23af98dacd3d547ebdd;p=sliver-openvswitch.git diff --git a/datapath/datapath.c b/datapath/datapath.c index 50c7f6ec2..b41110dbc 100644 --- a/datapath/datapath.c +++ b/datapath/datapath.c @@ -8,6 +8,8 @@ /* Functions for managing the dp interface/device. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -27,7 +29,6 @@ #include #include #include -#include #include #include #include @@ -38,7 +39,6 @@ #include #include #include -#include #include #include #include @@ -69,8 +69,22 @@ EXPORT_SYMBOL(dp_ioctl_hook); static struct datapath *dps[ODP_MAX]; static DEFINE_MUTEX(dp_mutex); -/* Number of milliseconds between runs of the maintenance thread. */ -#define MAINT_SLEEP_MSECS 1000 +/* We limit the number of times that we pass into dp_process_received_packet() + * to avoid blowing out the stack in the event that we have a loop. */ +struct loop_counter { + int count; /* Count. */ + bool looping; /* Loop detected? */ +}; + +#define DP_MAX_LOOPS 5 + +/* We use a separate counter for each CPU for both interrupt and non-interrupt + * context in order to keep the limit deterministic for a given packet. */ +struct percpu_loop_counters { + struct loop_counter counters[2]; +}; + +static DEFINE_PER_CPU(struct percpu_loop_counters, dp_loop_counters); static int new_dp_port(struct datapath *, struct odp_port *, int port_no); @@ -311,8 +325,6 @@ static void do_destroy_dp(struct datapath *dp) for (i = 0; i < DP_N_QUEUES; i++) skb_queue_purge(&dp->queues[i]); - for (i = 0; i < DP_MAX_GROUPS; i++) - kfree(dp->groups[i]); free_percpu(dp->stats_percpu); kobject_put(&dp->ifobj); module_put(THIS_MODULE); @@ -380,6 +392,7 @@ static int new_dp_port(struct datapath *dp, struct odp_port *odp_port, int port_ p->port_no = port_no; p->dp = dp; + p->vport = vport; atomic_set(&p->sflow_pool, 0); err = vport_attach(vport, p); @@ -513,42 +526,93 @@ out: return err; } +static void suppress_loop(struct datapath *dp, struct sw_flow_actions *actions) +{ + if (net_ratelimit()) + pr_warn("%s: flow looped %d times, dropping\n", + dp_name(dp), DP_MAX_LOOPS); + actions->n_actions = 0; +} + /* Must be called with rcu_read_lock. */ void dp_process_received_packet(struct dp_port *p, struct sk_buff *skb) { struct datapath *dp = p->dp; struct dp_stats_percpu *stats; int stats_counter_off; - struct odp_flow_key key; - struct tbl_node *flow_node; + struct sw_flow_actions *acts; + struct loop_counter *loop; + int error; OVS_CB(skb)->dp_port = p; - if (flow_extract(skb, p ? p->port_no : ODPP_NONE, &key)) { - if (dp->drop_frags) { + if (!OVS_CB(skb)->flow) { + struct odp_flow_key key; + struct tbl_node *flow_node; + bool is_frag; + + /* Extract flow from 'skb' into 'key'. */ + error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key, &is_frag); + if (unlikely(error)) { + kfree_skb(skb); + return; + } + + if (is_frag && dp->drop_frags) { kfree_skb(skb); stats_counter_off = offsetof(struct dp_stats_percpu, n_frags); goto out; } + + /* Look up flow. */ + flow_node = tbl_lookup(rcu_dereference(dp->table), &key, + flow_hash(&key), flow_cmp); + if (unlikely(!flow_node)) { + dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id); + stats_counter_off = offsetof(struct dp_stats_percpu, n_missed); + goto out; + } + + OVS_CB(skb)->flow = flow_cast(flow_node); } - flow_node = tbl_lookup(rcu_dereference(dp->table), &key, flow_hash(&key), flow_cmp); - if (flow_node) { - struct sw_flow *flow = flow_cast(flow_node); - struct sw_flow_actions *acts = rcu_dereference(flow->sf_acts); - flow_used(flow, skb); - execute_actions(dp, skb, &key, acts->actions, acts->n_actions, - GFP_ATOMIC); - stats_counter_off = offsetof(struct dp_stats_percpu, n_hit); - } else { - stats_counter_off = offsetof(struct dp_stats_percpu, n_missed); - dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id); + flow_used(OVS_CB(skb)->flow, skb); + + acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); + + /* Check whether we've looped too much. */ + loop = &get_cpu_var(dp_loop_counters).counters[!!in_interrupt()]; + if (unlikely(++loop->count > DP_MAX_LOOPS)) + loop->looping = true; + if (unlikely(loop->looping)) { + suppress_loop(dp, acts); + goto out_loop; } + /* Execute actions. */ + execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions, + acts->n_actions); + stats_counter_off = offsetof(struct dp_stats_percpu, n_hit); + + /* Check whether sub-actions looped too much. */ + if (unlikely(loop->looping)) + suppress_loop(dp, acts); + +out_loop: + /* Decrement loop counter. */ + if (!--loop->count) + loop->looping = false; + put_cpu_var(dp_loop_counters); + out: + /* Update datapath statistics. */ local_bh_disable(); stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); + + write_seqcount_begin(&stats->seqlock); (*(u64 *)((u8 *)stats + stats_counter_off))++; + write_seqcount_end(&stats->seqlock); + local_bh_enable(); } @@ -585,9 +649,9 @@ int vswitch_skb_checksum_setup(struct sk_buff *skb) break; default: if (net_ratelimit()) - printk(KERN_ERR "Attempting to checksum a non-" - "TCP/UDP packet, dropping a protocol" - " %d packet", iph->protocol); + pr_err("Attempting to checksum a non-TCP/UDP packet, " + "dropping a protocol %d packet", + iph->protocol); goto out; } @@ -690,11 +754,10 @@ void compute_ip_summed(struct sk_buff *skb, bool xmit) break; #endif default: - printk(KERN_ERR "openvswitch: unknown checksum type %d\n", - skb->ip_summed); + pr_err("unknown checksum type %d\n", skb->ip_summed); /* None seems the safest... */ OVS_CB(skb)->ip_summed = OVS_CSUM_NONE; - } + } #if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID) /* Xen has a special way of representing CHECKSUM_PARTIAL on older @@ -809,7 +872,11 @@ err_kfree_skb: err: local_bh_disable(); stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); + + write_seqcount_begin(&stats->seqlock); stats->n_lost++; + write_seqcount_end(&stats->seqlock); + local_bh_enable(); return err; @@ -837,25 +904,30 @@ static int validate_actions(const struct sw_flow_actions *actions) for (i = 0; i < actions->n_actions; i++) { const union odp_action *a = &actions->actions[i]; - switch (a->type) { - case ODPAT_OUTPUT: - if (a->output.port >= DP_MAX_PORTS) - return -EINVAL; - break; - case ODPAT_OUTPUT_GROUP: - if (a->output_group.group >= DP_MAX_GROUPS) - return -EINVAL; + switch (a->type) { + case ODPAT_CONTROLLER: + case ODPAT_STRIP_VLAN: + case ODPAT_SET_DL_SRC: + case ODPAT_SET_DL_DST: + case ODPAT_SET_NW_SRC: + case ODPAT_SET_NW_DST: + case ODPAT_SET_TP_SRC: + case ODPAT_SET_TP_DST: + case ODPAT_SET_TUNNEL: + case ODPAT_SET_PRIORITY: + case ODPAT_POP_PRIORITY: + case ODPAT_DROP_SPOOFED_ARP: + /* No validation needed. */ break; - case ODPAT_SET_VLAN_VID: - if (a->vlan_vid.vlan_vid & htons(~VLAN_VID_MASK)) + case ODPAT_OUTPUT: + if (a->output.port >= DP_MAX_PORTS) return -EINVAL; break; - case ODPAT_SET_VLAN_PCP: - if (a->vlan_pcp.vlan_pcp - & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT)) + case ODPAT_SET_DL_TCI: + if (a->dl_tci.tci & htons(VLAN_CFI_MASK)) return -EINVAL; break; @@ -865,9 +937,7 @@ static int validate_actions(const struct sw_flow_actions *actions) break; default: - if (a->type >= ODPAT_N_ACTIONS) - return -EOPNOTSUPP; - break; + return -EOPNOTSUPP; } } @@ -900,24 +970,15 @@ error: return ERR_PTR(error); } -static struct timespec get_time_offset(void) -{ - struct timespec now_mono, now_jiffies; - - ktime_get_ts(&now_mono); - jiffies_to_timespec(jiffies, &now_jiffies); - return timespec_sub(now_mono, now_jiffies); -} - -static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats, - struct timespec time_offset) +static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats) { if (flow->used) { - struct timespec flow_ts, used; + struct timespec offset_ts, used, now_mono; - jiffies_to_timespec(flow->used, &flow_ts); - set_normalized_timespec(&used, flow_ts.tv_sec + time_offset.tv_sec, - flow_ts.tv_nsec + time_offset.tv_nsec); + ktime_get_ts(&now_mono); + jiffies_to_timespec(jiffies - flow->used, &offset_ts); + set_normalized_timespec(&used, now_mono.tv_sec - offset_ts.tv_sec, + now_mono.tv_nsec - offset_ts.tv_nsec); stats->used_sec = used.tv_sec; stats->used_nsec = used.tv_nsec; @@ -928,7 +989,7 @@ static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats, stats->n_packets = flow->packet_count; stats->n_bytes = flow->byte_count; - stats->ip_tos = flow->ip_tos; + stats->reserved = 0; stats->tcp_flags = flow->tcp_flags; stats->error = 0; } @@ -937,7 +998,6 @@ static void clear_stats(struct sw_flow *flow) { flow->used = 0; flow->tcp_flags = 0; - flow->ip_tos = 0; flow->packet_count = 0; flow->byte_count = 0; } @@ -965,8 +1025,6 @@ static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf, struct tbl *table; int error; - memset(uf->flow.key.reserved, 0, sizeof uf->flow.key.reserved); - table = rcu_dereference(dp->table); flow_node = tbl_lookup(table, &uf->flow.key, flow_hash(&uf->flow.key), flow_cmp); if (!flow_node) { @@ -986,12 +1044,12 @@ static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf, } /* Allocate flow. */ - error = -ENOMEM; - flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); - if (flow == NULL) + flow = flow_alloc(); + if (IS_ERR(flow)) { + error = PTR_ERR(flow); goto error; + } flow->key = uf->flow.key; - spin_lock_init(&flow->lock); clear_stats(flow); /* Obtain actions. */ @@ -1035,7 +1093,7 @@ static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf, /* Fetch stats, then clear them if necessary. */ spin_lock_bh(&flow->lock); - get_stats(flow, stats, get_time_offset()); + get_stats(flow, stats); if (uf->flags & ODPPF_ZERO_STATS) clear_stats(flow); spin_unlock_bh(&flow->lock); @@ -1046,7 +1104,8 @@ static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf, error_free_flow_acts: kfree(flow->sf_acts); error_free_flow: - kmem_cache_free(flow_cache, flow); + flow->sf_acts = NULL; + flow_put(flow); error: return error; } @@ -1072,7 +1131,6 @@ static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp) } static int do_answer_query(struct sw_flow *flow, u32 query_flags, - struct timespec time_offset, struct odp_flow_stats __user *ustats, union odp_action __user *actions, u32 __user *n_actionsp) @@ -1082,7 +1140,7 @@ static int do_answer_query(struct sw_flow *flow, u32 query_flags, u32 n_actions; spin_lock_bh(&flow->lock); - get_stats(flow, &stats, time_offset); + get_stats(flow, &stats); if (query_flags & ODPFF_ZERO_TCP_FLAGS) flow->tcp_flags = 0; @@ -1106,7 +1164,6 @@ static int do_answer_query(struct sw_flow *flow, u32 query_flags, } static int answer_query(struct sw_flow *flow, u32 query_flags, - struct timespec time_offset, struct odp_flow __user *ufp) { union odp_action *actions; @@ -1114,7 +1171,7 @@ static int answer_query(struct sw_flow *flow, u32 query_flags, if (get_user(actions, &ufp->actions)) return -EFAULT; - return do_answer_query(flow, query_flags, time_offset, + return do_answer_query(flow, query_flags, &ufp->stats, actions, &ufp->n_actions); } @@ -1124,7 +1181,6 @@ static struct sw_flow *do_del_flow(struct datapath *dp, struct odp_flow_key *key struct tbl_node *flow_node; int error; - memset(key->reserved, 0, sizeof key->reserved); flow_node = tbl_lookup(table, key, flow_hash(key), flow_cmp); if (!flow_node) return ERR_PTR(-ENOENT); @@ -1153,7 +1209,7 @@ static int del_flow(struct datapath *dp, struct odp_flow __user *ufp) if (IS_ERR(flow)) return PTR_ERR(flow); - error = answer_query(flow, 0, get_time_offset(), ufp); + error = answer_query(flow, 0, ufp); flow_deferred_free(flow); return error; } @@ -1161,11 +1217,8 @@ static int del_flow(struct datapath *dp, struct odp_flow __user *ufp) static int do_query_flows(struct datapath *dp, const struct odp_flowvec *flowvec) { struct tbl *table = rcu_dereference(dp->table); - struct timespec time_offset; u32 i; - time_offset = get_time_offset(); - for (i = 0; i < flowvec->n_flows; i++) { struct odp_flow __user *ufp = &flowvec->flows[i]; struct odp_flow uf; @@ -1174,13 +1227,12 @@ static int do_query_flows(struct datapath *dp, const struct odp_flowvec *flowvec if (copy_from_user(&uf, ufp, sizeof uf)) return -EFAULT; - memset(uf.key.reserved, 0, sizeof uf.key.reserved); flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp); if (!flow_node) error = put_user(ENOENT, &ufp->stats.error); else - error = answer_query(flow_cast(flow_node), uf.flags, time_offset, ufp); + error = answer_query(flow_cast(flow_node), uf.flags, ufp); if (error) return -EFAULT; } @@ -1191,7 +1243,6 @@ struct list_flows_cbdata { struct odp_flow __user *uflows; u32 n_flows; u32 listed_flows; - struct timespec time_offset; }; static int list_flow(struct tbl_node *node, void *cbdata_) @@ -1203,7 +1254,7 @@ static int list_flow(struct tbl_node *node, void *cbdata_) if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key)) return -EFAULT; - error = answer_query(flow, 0, cbdata->time_offset, ufp); + error = answer_query(flow, 0, ufp); if (error) return error; @@ -1223,7 +1274,6 @@ static int do_list_flows(struct datapath *dp, const struct odp_flowvec *flowvec) cbdata.uflows = flowvec->flows; cbdata.n_flows = flowvec->n_flows; cbdata.listed_flows = 0; - cbdata.time_offset = get_time_offset(); error = tbl_foreach(rcu_dereference(dp->table), list_flow, &cbdata); return error ? error : cbdata.listed_flows; @@ -1256,16 +1306,18 @@ static int do_execute(struct datapath *dp, const struct odp_execute *execute) struct sk_buff *skb; struct sw_flow_actions *actions; struct ethhdr *eth; + bool is_frag; int err; err = -EINVAL; if (execute->length < ETH_HLEN || execute->length > 65535) goto error; - err = -ENOMEM; actions = flow_actions_alloc(execute->n_actions); - if (!actions) + if (IS_ERR(actions)) { + err = PTR_ERR(actions); goto error; + } err = -EFAULT; if (copy_from_user(actions->actions, execute->actions, @@ -1281,11 +1333,6 @@ static int do_execute(struct datapath *dp, const struct odp_execute *execute) if (!skb) goto error_free_actions; - if (execute->in_port < DP_MAX_PORTS) - OVS_CB(skb)->dp_port = dp->ports[execute->in_port]; - else - OVS_CB(skb)->dp_port = NULL; - err = -EFAULT; if (copy_from_user(skb_put(skb, execute->length), execute->data, execute->length)) @@ -1302,11 +1349,12 @@ static int do_execute(struct datapath *dp, const struct odp_execute *execute) else skb->protocol = htons(ETH_P_802_2); - flow_extract(skb, execute->in_port, &key); + err = flow_extract(skb, -1, &key, &is_frag); + if (err) + goto error_free_skb; rcu_read_lock(); - err = execute_actions(dp, skb, &key, actions->actions, - actions->n_actions, GFP_KERNEL); + err = execute_actions(dp, skb, &key, actions->actions, actions->n_actions); rcu_read_unlock(); kfree(actions); @@ -1341,15 +1389,23 @@ static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp) stats.max_capacity = TBL_MAX_BUCKETS; stats.n_ports = dp->n_ports; stats.max_ports = DP_MAX_PORTS; - stats.max_groups = DP_MAX_GROUPS; stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0; for_each_possible_cpu(i) { - const struct dp_stats_percpu *s; - s = per_cpu_ptr(dp->stats_percpu, i); - stats.n_frags += s->n_frags; - stats.n_hit += s->n_hit; - stats.n_missed += s->n_missed; - stats.n_lost += s->n_lost; + const struct dp_stats_percpu *percpu_stats; + struct dp_stats_percpu local_stats; + unsigned seqcount; + + percpu_stats = per_cpu_ptr(dp->stats_percpu, i); + + do { + seqcount = read_seqcount_begin(&percpu_stats->seqlock); + local_stats = *percpu_stats; + } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount)); + + stats.n_frags += local_stats.n_frags; + stats.n_hit += local_stats.n_hit; + stats.n_missed += local_stats.n_missed; + stats.n_lost += local_stats.n_lost; } stats.max_miss_queue = DP_MAX_QUEUE_LEN; stats.max_action_queue = DP_MAX_QUEUE_LEN; @@ -1492,87 +1548,6 @@ static int list_ports(struct datapath *dp, struct odp_portvec __user *upv) return put_user(retval, &upv->n_ports); } -/* RCU callback for freeing a dp_port_group */ -static void free_port_group(struct rcu_head *rcu) -{ - struct dp_port_group *g = container_of(rcu, struct dp_port_group, rcu); - kfree(g); -} - -static int do_set_port_group(struct datapath *dp, u16 __user *ports, - int n_ports, int group) -{ - struct dp_port_group *new_group, *old_group; - int error; - - error = -EINVAL; - if (n_ports > DP_MAX_PORTS || group >= DP_MAX_GROUPS) - goto error; - - error = -ENOMEM; - new_group = kmalloc(sizeof *new_group + sizeof(u16) * n_ports, GFP_KERNEL); - if (!new_group) - goto error; - - new_group->n_ports = n_ports; - error = -EFAULT; - if (copy_from_user(new_group->ports, ports, sizeof(u16) * n_ports)) - goto error_free; - - old_group = rcu_dereference(dp->groups[group]); - rcu_assign_pointer(dp->groups[group], new_group); - if (old_group) - call_rcu(&old_group->rcu, free_port_group); - return 0; - -error_free: - kfree(new_group); -error: - return error; -} - -static int set_port_group(struct datapath *dp, - const struct odp_port_group __user *upg) -{ - struct odp_port_group pg; - - if (copy_from_user(&pg, upg, sizeof pg)) - return -EFAULT; - - return do_set_port_group(dp, pg.ports, pg.n_ports, pg.group); -} - -static int do_get_port_group(struct datapath *dp, - u16 __user *ports, int n_ports, int group, - u16 __user *n_portsp) -{ - struct dp_port_group *g; - u16 n_copy; - - if (group >= DP_MAX_GROUPS) - return -EINVAL; - - g = dp->groups[group]; - n_copy = g ? min_t(int, g->n_ports, n_ports) : 0; - if (n_copy && copy_to_user(ports, g->ports, n_copy * sizeof(u16))) - return -EFAULT; - - if (put_user(g ? g->n_ports : 0, n_portsp)) - return -EFAULT; - - return 0; -} - -static int get_port_group(struct datapath *dp, struct odp_port_group __user *upg) -{ - struct odp_port_group pg; - - if (copy_from_user(&pg, upg, sizeof pg)) - return -EFAULT; - - return do_get_port_group(dp, pg.ports, pg.n_ports, pg.group, &upg->n_ports); -} - static int get_listen_mask(const struct file *f) { return (long)f->private_data; @@ -1707,14 +1682,6 @@ static long openvswitch_ioctl(struct file *f, unsigned int cmd, err = list_ports(dp, (struct odp_portvec __user *)argp); break; - case ODP_PORT_GROUP_SET: - err = set_port_group(dp, (struct odp_port_group __user *)argp); - break; - - case ODP_PORT_GROUP_GET: - err = get_port_group(dp, (struct odp_port_group __user *)argp); - break; - case ODP_FLOW_FLUSH: err = flush_flows(dp); break; @@ -1774,27 +1741,6 @@ static int compat_list_ports(struct datapath *dp, struct compat_odp_portvec __us return put_user(retval, &upv->n_ports); } -static int compat_set_port_group(struct datapath *dp, const struct compat_odp_port_group __user *upg) -{ - struct compat_odp_port_group pg; - - if (copy_from_user(&pg, upg, sizeof pg)) - return -EFAULT; - - return do_set_port_group(dp, compat_ptr(pg.ports), pg.n_ports, pg.group); -} - -static int compat_get_port_group(struct datapath *dp, struct compat_odp_port_group __user *upg) -{ - struct compat_odp_port_group pg; - - if (copy_from_user(&pg, upg, sizeof pg)) - return -EFAULT; - - return do_get_port_group(dp, compat_ptr(pg.ports), pg.n_ports, - pg.group, &upg->n_ports); -} - static int compat_get_flow(struct odp_flow *flow, const struct compat_odp_flow __user *compat) { compat_uptr_t actions; @@ -1833,7 +1779,6 @@ static int compat_put_flow(struct datapath *dp, struct compat_odp_flow_put __use } static int compat_answer_query(struct sw_flow *flow, u32 query_flags, - struct timespec time_offset, struct compat_odp_flow __user *ufp) { compat_uptr_t actions; @@ -1841,7 +1786,7 @@ static int compat_answer_query(struct sw_flow *flow, u32 query_flags, if (get_user(actions, &ufp->actions)) return -EFAULT; - return do_answer_query(flow, query_flags, time_offset, &ufp->stats, + return do_answer_query(flow, query_flags, &ufp->stats, compat_ptr(actions), &ufp->n_actions); } @@ -1858,7 +1803,7 @@ static int compat_del_flow(struct datapath *dp, struct compat_odp_flow __user *u if (IS_ERR(flow)) return PTR_ERR(flow); - error = compat_answer_query(flow, 0, get_time_offset(), ufp); + error = compat_answer_query(flow, 0, ufp); flow_deferred_free(flow); return error; } @@ -1866,11 +1811,8 @@ static int compat_del_flow(struct datapath *dp, struct compat_odp_flow __user *u static int compat_query_flows(struct datapath *dp, struct compat_odp_flow *flows, u32 n_flows) { struct tbl *table = rcu_dereference(dp->table); - struct timespec time_offset; u32 i; - time_offset = get_time_offset(); - for (i = 0; i < n_flows; i++) { struct compat_odp_flow __user *ufp = &flows[i]; struct odp_flow uf; @@ -1879,13 +1821,12 @@ static int compat_query_flows(struct datapath *dp, struct compat_odp_flow *flows if (compat_get_flow(&uf, ufp)) return -EFAULT; - memset(uf.key.reserved, 0, sizeof uf.key.reserved); flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp); if (!flow_node) error = put_user(ENOENT, &ufp->stats.error); else - error = compat_answer_query(flow_cast(flow_node), uf.flags, time_offset, ufp); + error = compat_answer_query(flow_cast(flow_node), uf.flags, ufp); if (error) return -EFAULT; } @@ -1896,7 +1837,6 @@ struct compat_list_flows_cbdata { struct compat_odp_flow __user *uflows; u32 n_flows; u32 listed_flows; - struct timespec time_offset; }; static int compat_list_flow(struct tbl_node *node, void *cbdata_) @@ -1908,7 +1848,7 @@ static int compat_list_flow(struct tbl_node *node, void *cbdata_) if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key)) return -EFAULT; - error = compat_answer_query(flow, 0, cbdata->time_offset, ufp); + error = compat_answer_query(flow, 0, ufp); if (error) return error; @@ -1928,7 +1868,6 @@ static int compat_list_flows(struct datapath *dp, struct compat_odp_flow *flows, cbdata.uflows = flows; cbdata.n_flows = n_flows; cbdata.listed_flows = 0; - cbdata.time_offset = get_time_offset(); error = tbl_foreach(rcu_dereference(dp->table), compat_list_flow, &cbdata); return error ? error : cbdata.listed_flows; @@ -1970,7 +1909,6 @@ static int compat_execute(struct datapath *dp, const struct compat_odp_execute _ compat_uptr_t data; if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_odp_execute)) || - __get_user(execute.in_port, &uexecute->in_port) || __get_user(actions, &uexecute->actions) || __get_user(execute.n_actions, &uexecute->n_actions) || __get_user(data, &uexecute->data) || @@ -2033,14 +1971,6 @@ static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned err = compat_list_ports(dp, compat_ptr(argp)); break; - case ODP_PORT_GROUP_SET32: - err = compat_set_port_group(dp, compat_ptr(argp)); - break; - - case ODP_PORT_GROUP_GET32: - err = compat_get_port_group(dp, compat_ptr(argp)); - break; - case ODP_FLOW_PUT32: err = compat_put_flow(dp, compat_ptr(argp)); break; @@ -2209,7 +2139,7 @@ ssize_t openvswitch_read(struct file *f, char __user *buf, size_t nbytes, } success: copy_bytes = tot_copy_bytes = min_t(size_t, skb->len, nbytes); - + retval = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { if (copy_bytes == skb->len) {