2 * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/module.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/kernel.h>
24 #include <linux/kthread.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/version.h>
31 #include <linux/ethtool.h>
32 #include <linux/wait.h>
33 #include <asm/system.h>
34 #include <asm/div64.h>
36 #include <linux/highmem.h>
37 #include <linux/netfilter_bridge.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/inetdevice.h>
40 #include <linux/list.h>
41 #include <linux/rculist.h>
42 #include <linux/dmi.h>
43 #include <net/inet_ecn.h>
44 #include <net/genetlink.h>
45 #include <linux/compat.h>
47 #include "openvswitch/datapath-protocol.h"
52 #include "loop_counter.h"
53 #include "odp-compat.h"
55 #include "vport-internal_dev.h"
57 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
58 EXPORT_SYMBOL(dp_ioctl_hook);
60 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
63 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
66 * It is safe to access the datapath and vport structures with just
69 static struct datapath __rcu *dps[ODP_MAX];
70 static DEFINE_MUTEX(dp_mutex);
72 static struct vport *new_vport(const struct vport_parms *);
74 /* Must be called with rcu_read_lock or dp_mutex. */
75 struct datapath *get_dp(int dp_idx)
77 if (dp_idx < 0 || dp_idx >= ODP_MAX)
79 return rcu_dereference_check(dps[dp_idx], rcu_read_lock_held() ||
80 lockdep_is_held(&dp_mutex));
82 EXPORT_SYMBOL_GPL(get_dp);
84 static struct datapath *get_dp_locked(int dp_idx)
88 mutex_lock(&dp_mutex);
91 mutex_lock(&dp->mutex);
92 mutex_unlock(&dp_mutex);
96 static struct tbl *get_table_protected(struct datapath *dp)
98 return rcu_dereference_protected(dp->table,
99 lockdep_is_held(&dp->mutex));
102 static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
104 return rcu_dereference_protected(dp->ports[port_no],
105 lockdep_is_held(&dp->mutex));
108 /* Must be called with rcu_read_lock or RTNL lock. */
109 const char *dp_name(const struct datapath *dp)
111 return vport_get_name(rcu_dereference_rtnl(dp->ports[ODPP_LOCAL]));
114 static inline size_t br_nlmsg_size(void)
116 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
117 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
118 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
119 + nla_total_size(4) /* IFLA_MASTER */
120 + nla_total_size(4) /* IFLA_MTU */
121 + nla_total_size(4) /* IFLA_LINK */
122 + nla_total_size(1); /* IFLA_OPERSTATE */
125 static int dp_fill_ifinfo(struct sk_buff *skb,
126 const struct vport *port,
127 int event, unsigned int flags)
129 struct datapath *dp = port->dp;
130 int ifindex = vport_get_ifindex(port);
131 int iflink = vport_get_iflink(port);
132 struct ifinfomsg *hdr;
133 struct nlmsghdr *nlh;
141 nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
145 hdr = nlmsg_data(nlh);
146 hdr->ifi_family = AF_BRIDGE;
148 hdr->ifi_type = ARPHRD_ETHER;
149 hdr->ifi_index = ifindex;
150 hdr->ifi_flags = vport_get_flags(port);
153 NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
154 NLA_PUT_U32(skb, IFLA_MASTER,
155 vport_get_ifindex(get_vport_protected(dp, ODPP_LOCAL)));
156 NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
157 #ifdef IFLA_OPERSTATE
158 NLA_PUT_U8(skb, IFLA_OPERSTATE,
159 vport_is_running(port)
160 ? vport_get_operstate(port)
164 NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
166 if (ifindex != iflink)
167 NLA_PUT_U32(skb, IFLA_LINK,iflink);
169 return nlmsg_end(skb, nlh);
172 nlmsg_cancel(skb, nlh);
176 static void dp_ifinfo_notify(int event, struct vport *port)
181 skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
185 err = dp_fill_ifinfo(skb, port, event, 0);
187 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
188 WARN_ON(err == -EMSGSIZE);
192 rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
196 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
199 static void release_dp(struct kobject *kobj)
201 struct datapath *dp = container_of(kobj, struct datapath, ifobj);
205 static struct kobj_type dp_ktype = {
206 .release = release_dp
209 static int create_dp(int dp_idx, const char __user *devnamep)
211 struct vport_parms parms;
212 char devname[IFNAMSIZ];
219 int retval = strncpy_from_user(devname, devnamep, IFNAMSIZ);
223 } else if (retval >= IFNAMSIZ) {
228 snprintf(devname, sizeof(devname), "of%d", dp_idx);
232 mutex_lock(&dp_mutex);
234 if (!try_module_get(THIS_MODULE))
237 /* Exit early if a datapath with that number already exists.
238 * (We don't use -EEXIST because that's ambiguous with 'devname'
239 * conflicting with an existing network device name.) */
245 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
248 INIT_LIST_HEAD(&dp->port_list);
249 mutex_init(&dp->mutex);
250 mutex_lock(&dp->mutex);
252 for (i = 0; i < DP_N_QUEUES; i++)
253 skb_queue_head_init(&dp->queues[i]);
254 init_waitqueue_head(&dp->waitqueue);
256 /* Initialize kobject for bridge. This will be added as
257 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
258 dp->ifobj.kset = NULL;
259 kobject_init(&dp->ifobj, &dp_ktype);
261 /* Allocate table. */
263 rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
267 /* Set up our datapath device. */
268 parms.name = devname;
269 parms.type = ODP_VPORT_TYPE_INTERNAL;
270 parms.options = NULL;
272 parms.port_no = ODPP_LOCAL;
273 vport = new_vport(&parms);
275 err = PTR_ERR(vport);
279 goto err_destroy_table;
283 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
284 if (!dp->stats_percpu) {
286 goto err_destroy_local_port;
289 rcu_assign_pointer(dps[dp_idx], dp);
292 mutex_unlock(&dp->mutex);
293 mutex_unlock(&dp_mutex);
298 err_destroy_local_port:
299 dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
301 tbl_destroy(get_table_protected(dp), NULL);
303 mutex_unlock(&dp->mutex);
306 module_put(THIS_MODULE);
308 mutex_unlock(&dp_mutex);
314 static void destroy_dp_rcu(struct rcu_head *rcu)
316 struct datapath *dp = container_of(rcu, struct datapath, rcu);
319 for (i = 0; i < DP_N_QUEUES; i++)
320 skb_queue_purge(&dp->queues[i]);
322 tbl_destroy((struct tbl __force *)dp->table, flow_free_tbl);
323 free_percpu(dp->stats_percpu);
324 kobject_put(&dp->ifobj);
327 static int destroy_dp(int dp_idx)
334 mutex_lock(&dp_mutex);
341 mutex_lock(&dp->mutex);
343 list_for_each_entry_safe (p, n, &dp->port_list, node)
344 if (p->port_no != ODPP_LOCAL)
348 rcu_assign_pointer(dps[dp->dp_idx], NULL);
349 dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
351 mutex_unlock(&dp->mutex);
352 call_rcu(&dp->rcu, destroy_dp_rcu);
353 module_put(THIS_MODULE);
356 mutex_unlock(&dp_mutex);
361 /* Called with RTNL lock and dp->mutex. */
362 static struct vport *new_vport(const struct vport_parms *parms)
367 vport = vport_add(parms);
368 if (!IS_ERR(vport)) {
369 struct datapath *dp = parms->dp;
371 rcu_assign_pointer(dp->ports[parms->port_no], vport);
372 list_add_rcu(&vport->node, &dp->port_list);
374 dp_ifinfo_notify(RTM_NEWLINK, vport);
381 int dp_detach_port(struct vport *p)
387 if (p->port_no != ODPP_LOCAL)
389 dp_ifinfo_notify(RTM_DELLINK, p);
391 /* First drop references to device. */
392 list_del_rcu(&p->node);
393 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
395 /* Then destroy it. */
403 /* Must be called with rcu_read_lock. */
404 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
406 struct datapath *dp = p->dp;
407 struct dp_stats_percpu *stats;
408 int stats_counter_off;
409 struct sw_flow_actions *acts;
410 struct loop_counter *loop;
413 OVS_CB(skb)->vport = p;
415 if (!OVS_CB(skb)->flow) {
416 struct sw_flow_key key;
417 struct tbl_node *flow_node;
420 /* Extract flow from 'skb' into 'key'. */
421 error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key, &is_frag);
422 if (unlikely(error)) {
427 if (is_frag && dp->drop_frags) {
429 stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
434 flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
435 flow_hash(&key), flow_cmp);
436 if (unlikely(!flow_node)) {
437 struct dp_upcall_info upcall;
439 upcall.type = _ODPL_MISS_NR;
442 upcall.sample_pool = 0;
443 upcall.actions = NULL;
444 upcall.actions_len = 0;
445 dp_upcall(dp, skb, &upcall);
446 stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
450 OVS_CB(skb)->flow = flow_cast(flow_node);
453 stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
454 flow_used(OVS_CB(skb)->flow, skb);
456 acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
458 /* Check whether we've looped too much. */
459 loop = loop_get_counter();
460 if (unlikely(++loop->count > MAX_LOOPS))
461 loop->looping = true;
462 if (unlikely(loop->looping)) {
463 loop_suppress(dp, acts);
468 /* Execute actions. */
469 execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
472 /* Check whether sub-actions looped too much. */
473 if (unlikely(loop->looping))
474 loop_suppress(dp, acts);
477 /* Decrement loop counter. */
479 loop->looping = false;
483 /* Update datapath statistics. */
485 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
487 write_seqcount_begin(&stats->seqlock);
488 (*(u64 *)((u8 *)stats + stats_counter_off))++;
489 write_seqcount_end(&stats->seqlock);
494 static void copy_and_csum_skb(struct sk_buff *skb, void *to)
496 u16 csum_start, csum_offset;
499 get_skb_csum_pointers(skb, &csum_start, &csum_offset);
500 csum_start -= skb_headroom(skb);
501 BUG_ON(csum_start >= skb_headlen(skb));
503 skb_copy_bits(skb, 0, to, csum_start);
505 csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start,
506 skb->len - csum_start, 0);
507 *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
510 /* Append each packet in 'skb' list to 'queue'. There will be only one packet
511 * unless we broke up a GSO packet. */
512 static int queue_control_packets(struct datapath *dp, struct sk_buff *skb,
513 const struct dp_upcall_info *upcall_info)
515 struct sk_buff *nskb;
519 if (OVS_CB(skb)->vport)
520 port_no = OVS_CB(skb)->vport->port_no;
522 port_no = ODPP_LOCAL;
525 struct odp_packet *upcall;
526 struct sk_buff *user_skb; /* to be queued to userspace */
533 len = sizeof(struct odp_packet);
534 len += nla_total_size(4); /* ODP_PACKET_ATTR_TYPE. */
535 len += nla_total_size(skb->len);
536 len += nla_total_size(FLOW_BUFSIZE);
537 if (upcall_info->userdata)
538 len += nla_total_size(8);
539 if (upcall_info->sample_pool)
540 len += nla_total_size(4);
541 if (upcall_info->actions_len)
542 len += nla_total_size(upcall_info->actions_len);
544 user_skb = alloc_skb(len, GFP_ATOMIC);
548 upcall = (struct odp_packet *)__skb_put(user_skb, sizeof(*upcall));
549 upcall->dp_idx = dp->dp_idx;
551 nla_put_u32(user_skb, ODP_PACKET_ATTR_TYPE, upcall_info->type);
553 nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_KEY);
554 flow_to_nlattrs(upcall_info->key, user_skb);
555 nla_nest_end(user_skb, nla);
557 if (upcall_info->userdata)
558 nla_put_u64(user_skb, ODP_PACKET_ATTR_USERDATA, upcall_info->userdata);
559 if (upcall_info->sample_pool)
560 nla_put_u32(user_skb, ODP_PACKET_ATTR_SAMPLE_POOL, upcall_info->sample_pool);
561 if (upcall_info->actions_len) {
562 const struct nlattr *actions = upcall_info->actions;
563 u32 actions_len = upcall_info->actions_len;
565 nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_ACTIONS);
566 memcpy(__skb_put(user_skb, actions_len), actions, actions_len);
567 nla_nest_end(user_skb, nla);
570 nla = __nla_reserve(user_skb, ODP_PACKET_ATTR_PACKET, skb->len);
571 if (skb->ip_summed == CHECKSUM_PARTIAL)
572 copy_and_csum_skb(skb, nla_data(nla));
574 skb_copy_bits(skb, 0, nla_data(nla), skb->len);
576 upcall->len = user_skb->len;
577 skb_queue_tail(&dp->queues[upcall_info->type], user_skb);
586 while ((skb = nskb) != NULL) {
593 int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info)
595 struct dp_stats_percpu *stats;
596 struct sk_buff_head *queue;
599 WARN_ON_ONCE(skb_shared(skb));
600 BUG_ON(upcall_info->type >= DP_N_QUEUES);
602 queue = &dp->queues[upcall_info->type];
604 if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
607 forward_ip_summed(skb);
609 err = vswitch_skb_checksum_setup(skb);
613 /* Break apart GSO packets into their component pieces. Otherwise
614 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
615 if (skb_is_gso(skb)) {
616 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
626 err = queue_control_packets(dp, skb, upcall_info);
627 wake_up_interruptible(&dp->waitqueue);
634 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
636 write_seqcount_begin(&stats->seqlock);
638 write_seqcount_end(&stats->seqlock);
645 static int flush_flows(int dp_idx)
647 struct tbl *old_table;
648 struct tbl *new_table;
652 dp = get_dp_locked(dp_idx);
657 old_table = get_table_protected(dp);
658 new_table = tbl_create(TBL_MIN_BUCKETS);
663 rcu_assign_pointer(dp->table, new_table);
665 tbl_deferred_destroy(old_table, flow_free_tbl);
670 mutex_unlock(&dp->mutex);
675 static int validate_actions(const struct nlattr *actions, u32 actions_len)
677 const struct nlattr *a;
680 nla_for_each_attr(a, actions, actions_len, rem) {
681 static const u32 action_lens[ODPAT_MAX + 1] = {
683 [ODPAT_CONTROLLER] = 8,
684 [ODPAT_SET_DL_TCI] = 2,
685 [ODPAT_STRIP_VLAN] = 0,
686 [ODPAT_SET_DL_SRC] = ETH_ALEN,
687 [ODPAT_SET_DL_DST] = ETH_ALEN,
688 [ODPAT_SET_NW_SRC] = 4,
689 [ODPAT_SET_NW_DST] = 4,
690 [ODPAT_SET_NW_TOS] = 1,
691 [ODPAT_SET_TP_SRC] = 2,
692 [ODPAT_SET_TP_DST] = 2,
693 [ODPAT_SET_TUNNEL] = 8,
694 [ODPAT_SET_PRIORITY] = 4,
695 [ODPAT_POP_PRIORITY] = 0,
696 [ODPAT_DROP_SPOOFED_ARP] = 0,
698 int type = nla_type(a);
700 if (type > ODPAT_MAX || nla_len(a) != action_lens[type])
707 case ODPAT_CONTROLLER:
708 case ODPAT_STRIP_VLAN:
709 case ODPAT_SET_DL_SRC:
710 case ODPAT_SET_DL_DST:
711 case ODPAT_SET_NW_SRC:
712 case ODPAT_SET_NW_DST:
713 case ODPAT_SET_TP_SRC:
714 case ODPAT_SET_TP_DST:
715 case ODPAT_SET_TUNNEL:
716 case ODPAT_SET_PRIORITY:
717 case ODPAT_POP_PRIORITY:
718 case ODPAT_DROP_SPOOFED_ARP:
719 /* No validation needed. */
723 if (nla_get_u32(a) >= DP_MAX_PORTS)
727 case ODPAT_SET_DL_TCI:
728 if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
732 case ODPAT_SET_NW_TOS:
733 if (nla_get_u8(a) & INET_ECN_MASK)
748 static struct sw_flow_actions *get_actions(const struct odp_flow *flow)
750 struct sw_flow_actions *actions;
753 actions = flow_actions_alloc(flow->actions_len);
754 error = PTR_ERR(actions);
759 if (copy_from_user(actions->actions,
760 (struct nlattr __user __force *)flow->actions,
762 goto error_free_actions;
763 error = validate_actions(actions->actions, actions->actions_len);
765 goto error_free_actions;
772 return ERR_PTR(error);
775 static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats)
778 struct timespec offset_ts, used, now_mono;
780 ktime_get_ts(&now_mono);
781 jiffies_to_timespec(jiffies - flow->used, &offset_ts);
782 set_normalized_timespec(&used, now_mono.tv_sec - offset_ts.tv_sec,
783 now_mono.tv_nsec - offset_ts.tv_nsec);
785 stats->used_sec = used.tv_sec;
786 stats->used_nsec = used.tv_nsec;
789 stats->used_nsec = 0;
792 stats->n_packets = flow->packet_count;
793 stats->n_bytes = flow->byte_count;
795 stats->tcp_flags = flow->tcp_flags;
798 static void clear_stats(struct sw_flow *flow)
802 flow->packet_count = 0;
803 flow->byte_count = 0;
806 static int expand_table(struct datapath *dp)
808 struct tbl *old_table = get_table_protected(dp);
809 struct tbl *new_table;
811 new_table = tbl_expand(old_table);
812 if (IS_ERR(new_table))
813 return PTR_ERR(new_table);
815 rcu_assign_pointer(dp->table, new_table);
816 tbl_deferred_destroy(old_table, NULL);
821 static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf,
822 struct odp_flow_stats *stats)
824 struct tbl_node *flow_node;
825 struct sw_flow_key key;
826 struct sw_flow *flow;
828 struct sw_flow_actions *acts = NULL;
832 error = flow_copy_from_user(&key, (const struct nlattr __force __user *)uf->flow.key,
837 hash = flow_hash(&key);
838 table = get_table_protected(dp);
839 flow_node = tbl_lookup(table, &key, hash, flow_cmp);
843 if (!(uf->flags & ODPPF_CREATE))
846 /* Expand table, if necessary, to make room. */
847 if (tbl_count(table) >= tbl_n_buckets(table)) {
848 error = expand_table(dp);
851 table = get_table_protected(dp);
857 error = PTR_ERR(flow);
863 /* Obtain actions. */
864 acts = get_actions(&uf->flow);
865 error = PTR_ERR(acts);
867 goto error_free_flow;
868 rcu_assign_pointer(flow->sf_acts, acts);
870 /* Put flow in bucket. */
871 error = tbl_insert(table, &flow->tbl_node, hash);
873 goto error_free_flow_acts;
875 memset(stats, 0, sizeof(struct odp_flow_stats));
877 /* We found a matching flow. */
878 struct sw_flow_actions *old_acts, *new_acts;
880 flow = flow_cast(flow_node);
882 /* Bail out if we're not allowed to modify an existing flow. */
884 if (!(uf->flags & ODPPF_MODIFY))
888 new_acts = get_actions(&uf->flow);
889 error = PTR_ERR(new_acts);
890 if (IS_ERR(new_acts))
893 old_acts = rcu_dereference_protected(flow->sf_acts,
894 lockdep_is_held(&dp->mutex));
895 if (old_acts->actions_len != new_acts->actions_len ||
896 memcmp(old_acts->actions, new_acts->actions,
897 old_acts->actions_len)) {
898 rcu_assign_pointer(flow->sf_acts, new_acts);
899 flow_deferred_free_acts(old_acts);
904 /* Fetch stats, then clear them if necessary. */
905 spin_lock_bh(&flow->lock);
906 get_stats(flow, stats);
907 if (uf->flags & ODPPF_ZERO_STATS)
909 spin_unlock_bh(&flow->lock);
914 error_free_flow_acts:
917 flow->sf_acts = NULL;
923 static int put_flow(struct odp_flow_put __user *ufp)
925 struct odp_flow_stats stats;
926 struct odp_flow_put uf;
930 if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put)))
933 dp = get_dp_locked(uf.flow.dp_idx);
937 error = do_put_flow(dp, &uf, &stats);
939 if (copy_to_user(&ufp->flow.stats, &stats,
940 sizeof(struct odp_flow_stats)))
943 mutex_unlock(&dp->mutex);
948 static int do_answer_query(struct datapath *dp, struct sw_flow *flow,
949 struct odp_flow_stats __user *ustats,
950 struct nlattr __user *actions,
951 u32 __user *actions_lenp)
953 struct sw_flow_actions *sf_acts;
954 struct odp_flow_stats stats;
957 spin_lock_bh(&flow->lock);
958 get_stats(flow, &stats);
959 spin_unlock_bh(&flow->lock);
961 if (copy_to_user(ustats, &stats, sizeof(struct odp_flow_stats)) ||
962 get_user(actions_len, actions_lenp))
968 sf_acts = rcu_dereference_protected(flow->sf_acts,
969 lockdep_is_held(&dp->mutex));
970 if (put_user(sf_acts->actions_len, actions_lenp) ||
971 (actions && copy_to_user(actions, sf_acts->actions,
972 min(sf_acts->actions_len, actions_len))))
978 static int answer_query(struct datapath *dp, struct sw_flow *flow,
979 struct odp_flow __user *ufp)
981 struct nlattr __user *actions;
983 if (get_user(actions, (struct nlattr __user * __user *)&ufp->actions))
986 return do_answer_query(dp, flow, &ufp->stats, actions, &ufp->actions_len);
989 static struct sw_flow *do_del_flow(struct datapath *dp, const struct nlattr __user *key, u32 key_len)
991 struct tbl *table = get_table_protected(dp);
992 struct tbl_node *flow_node;
993 struct sw_flow_key swkey;
996 error = flow_copy_from_user(&swkey, key, key_len);
998 return ERR_PTR(error);
1000 flow_node = tbl_lookup(table, &swkey, flow_hash(&swkey), flow_cmp);
1002 return ERR_PTR(-ENOENT);
1004 error = tbl_remove(table, flow_node);
1006 return ERR_PTR(error);
1008 /* XXX Returned flow_node's statistics might lose a few packets, since
1009 * other CPUs can be using this flow. We used to synchronize_rcu() to
1010 * make sure that we get completely accurate stats, but that blows our
1011 * performance, badly. */
1012 return flow_cast(flow_node);
1015 static int del_flow(struct odp_flow __user *ufp)
1017 struct sw_flow *flow;
1018 struct datapath *dp;
1022 if (copy_from_user(&uf, ufp, sizeof(uf)))
1025 dp = get_dp_locked(uf.dp_idx);
1029 flow = do_del_flow(dp, (const struct nlattr __force __user *)uf.key, uf.key_len);
1030 error = PTR_ERR(flow);
1031 if (!IS_ERR(flow)) {
1032 error = answer_query(dp, flow, ufp);
1033 flow_deferred_free(flow);
1035 mutex_unlock(&dp->mutex);
1040 static int query_flow(struct odp_flow __user *uflow)
1042 struct tbl_node *flow_node;
1043 struct sw_flow_key key;
1044 struct odp_flow flow;
1045 struct datapath *dp;
1048 if (copy_from_user(&flow, uflow, sizeof(flow)))
1051 dp = get_dp_locked(flow.dp_idx);
1055 error = flow_copy_from_user(&key, (const struct nlattr __force __user *)flow.key, flow.key_len);
1057 struct tbl *table = get_table_protected(dp);
1058 flow_node = tbl_lookup(table, &flow.key, flow_hash(&key), flow_cmp);
1060 error = answer_query(dp, flow_cast(flow_node), uflow);
1064 mutex_unlock(&dp->mutex);
1069 static int dump_flow(struct odp_flow_dump __user *udump)
1071 struct odp_flow __user *uflow;
1072 struct nlattr __user *ukey;
1073 struct tbl_node *tbl_node;
1074 struct odp_flow_dump dump;
1075 struct sw_flow *flow;
1076 struct datapath *dp;
1082 if (copy_from_user(&dump, udump, sizeof(struct odp_flow_dump)))
1084 uflow = (struct odp_flow __user __force *)dump.flow;
1086 dp = get_dp_locked(dump.dp_idx);
1091 table = get_table_protected(dp);
1092 tbl_node = tbl_next(table, &dump.state[0], &dump.state[1]);
1094 err = put_user(0, &uflow->key_len);
1097 flow = flow_cast(tbl_node);
1100 if (copy_to_user(udump->state, dump.state, 2 * sizeof(uint32_t)) ||
1101 get_user(ukey, (struct nlattr __user * __user*)&uflow->key) ||
1102 get_user(key_len, &uflow->key_len))
1105 key_len = flow_copy_to_user(ukey, &flow->key, key_len);
1110 if (put_user(key_len, &uflow->key_len))
1113 err = answer_query(dp, flow, uflow);
1116 mutex_unlock(&dp->mutex);
1121 static int do_execute(struct datapath *dp, const struct odp_execute *execute)
1123 struct sw_flow_key key;
1124 struct sk_buff *skb;
1125 struct sw_flow_actions *actions;
1131 if (execute->length < ETH_HLEN || execute->length > 65535)
1134 actions = flow_actions_alloc(execute->actions_len);
1135 if (IS_ERR(actions)) {
1136 err = PTR_ERR(actions);
1141 if (copy_from_user(actions->actions,
1142 (struct nlattr __user __force *)execute->actions, execute->actions_len))
1143 goto error_free_actions;
1145 err = validate_actions(actions->actions, execute->actions_len);
1147 goto error_free_actions;
1150 skb = alloc_skb(execute->length, GFP_KERNEL);
1152 goto error_free_actions;
1155 if (copy_from_user(skb_put(skb, execute->length),
1156 (const void __user __force *)execute->data,
1158 goto error_free_skb;
1160 skb_reset_mac_header(skb);
1163 /* Normally, setting the skb 'protocol' field would be handled by a
1164 * call to eth_type_trans(), but it assumes there's a sending
1165 * device, which we may not have. */
1166 if (ntohs(eth->h_proto) >= 1536)
1167 skb->protocol = eth->h_proto;
1169 skb->protocol = htons(ETH_P_802_2);
1171 err = flow_extract(skb, -1, &key, &is_frag);
1173 goto error_free_skb;
1176 err = execute_actions(dp, skb, &key, actions->actions, actions->actions_len);
1190 static int execute_packet(const struct odp_execute __user *executep)
1192 struct odp_execute execute;
1193 struct datapath *dp;
1196 if (copy_from_user(&execute, executep, sizeof(execute)))
1199 dp = get_dp_locked(execute.dp_idx);
1202 error = do_execute(dp, &execute);
1203 mutex_unlock(&dp->mutex);
1208 static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
1210 struct odp_stats stats;
1213 stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
1214 for_each_possible_cpu(i) {
1215 const struct dp_stats_percpu *percpu_stats;
1216 struct dp_stats_percpu local_stats;
1219 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
1222 seqcount = read_seqcount_begin(&percpu_stats->seqlock);
1223 local_stats = *percpu_stats;
1224 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
1226 stats.n_frags += local_stats.n_frags;
1227 stats.n_hit += local_stats.n_hit;
1228 stats.n_missed += local_stats.n_missed;
1229 stats.n_lost += local_stats.n_lost;
1231 return copy_to_user(statsp, &stats, sizeof(stats)) ? -EFAULT : 0;
1234 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
1235 int dp_min_mtu(const struct datapath *dp)
1242 list_for_each_entry_rcu (p, &dp->port_list, node) {
1245 /* Skip any internal ports, since that's what we're trying to
1247 if (is_internal_vport(p))
1250 dev_mtu = vport_get_mtu(p);
1251 if (!mtu || dev_mtu < mtu)
1255 return mtu ? mtu : ETH_DATA_LEN;
1258 /* Sets the MTU of all datapath devices to the minimum of the ports. Must
1259 * be called with RTNL lock. */
1260 void set_internal_devs_mtu(const struct datapath *dp)
1267 mtu = dp_min_mtu(dp);
1269 list_for_each_entry_rcu (p, &dp->port_list, node) {
1270 if (is_internal_vport(p))
1271 vport_set_mtu(p, mtu);
1275 static int get_listen_mask(const struct file *f)
1277 return (long)f->private_data;
1280 static void set_listen_mask(struct file *f, int listen_mask)
1282 f->private_data = (void*)(long)listen_mask;
1285 static const struct nla_policy vport_policy[ODP_VPORT_ATTR_MAX + 1] = {
1286 [ODP_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1287 [ODP_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1288 [ODP_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1289 [ODP_VPORT_ATTR_STATS] = { .len = sizeof(struct rtnl_link_stats64) },
1290 [ODP_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
1291 [ODP_VPORT_ATTR_MTU] = { .type = NLA_U32 },
1292 [ODP_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1295 static int copy_vport_to_user(void __user *dst, struct vport *vport, uint32_t total_len)
1297 struct odp_vport *odp_vport;
1298 struct sk_buff *skb;
1300 int ifindex, iflink;
1303 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1309 odp_vport = (struct odp_vport*)__skb_put(skb, sizeof(struct odp_vport));
1310 odp_vport->dp_idx = vport->dp->dp_idx;
1311 odp_vport->total_len = total_len;
1313 NLA_PUT_U32(skb, ODP_VPORT_ATTR_PORT_NO, vport->port_no);
1314 NLA_PUT_U32(skb, ODP_VPORT_ATTR_TYPE, vport_get_type(vport));
1315 NLA_PUT_STRING(skb, ODP_VPORT_ATTR_NAME, vport_get_name(vport));
1317 nla = nla_reserve(skb, ODP_VPORT_ATTR_STATS, sizeof(struct rtnl_link_stats64));
1319 goto nla_put_failure;
1320 if (vport_get_stats(vport, nla_data(nla)))
1321 __skb_trim(skb, skb->len - nla->nla_len);
1323 NLA_PUT(skb, ODP_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
1325 NLA_PUT_U32(skb, ODP_VPORT_ATTR_MTU, vport_get_mtu(vport));
1327 err = vport_get_options(vport, skb);
1329 ifindex = vport_get_ifindex(vport);
1331 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFINDEX, ifindex);
1333 iflink = vport_get_iflink(vport);
1335 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFLINK, iflink);
1338 if (skb->len > total_len)
1341 odp_vport->len = skb->len;
1342 err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
1354 static struct sk_buff *copy_vport_from_user(struct odp_vport __user *uodp_vport,
1355 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1357 struct odp_vport *odp_vport;
1358 struct sk_buff *skb;
1362 if (get_user(len, &uodp_vport->len))
1363 return ERR_PTR(-EFAULT);
1364 if (len < sizeof(struct odp_vport))
1365 return ERR_PTR(-EINVAL);
1367 skb = alloc_skb(len, GFP_KERNEL);
1369 return ERR_PTR(-ENOMEM);
1372 if (copy_from_user(__skb_put(skb, len), uodp_vport, len))
1373 goto error_free_skb;
1375 odp_vport = (struct odp_vport *)skb->data;
1377 if (odp_vport->len != len)
1378 goto error_free_skb;
1380 err = nla_parse(a, ODP_VPORT_ATTR_MAX, (struct nlattr *)(skb->data + sizeof(struct odp_vport)),
1381 skb->len - sizeof(struct odp_vport), vport_policy);
1383 goto error_free_skb;
1385 err = VERIFY_NUL_STRING(a[ODP_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1387 goto error_free_skb;
1393 return ERR_PTR(err);
1397 /* Called without any locks (or with RTNL lock).
1398 * Returns holding vport->dp->mutex.
1400 static struct vport *lookup_vport(struct odp_vport *odp_vport,
1401 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1403 struct datapath *dp;
1404 struct vport *vport;
1406 if (a[ODP_VPORT_ATTR_NAME]) {
1407 int dp_idx, port_no;
1411 vport = vport_locate(nla_data(a[ODP_VPORT_ATTR_NAME]));
1414 return ERR_PTR(-ENODEV);
1416 dp_idx = vport->dp->dp_idx;
1417 port_no = vport->port_no;
1420 dp = get_dp_locked(dp_idx);
1424 vport = get_vport_protected(dp, port_no);
1426 strcmp(vport_get_name(vport), nla_data(a[ODP_VPORT_ATTR_NAME]))) {
1427 mutex_unlock(&dp->mutex);
1432 } else if (a[ODP_VPORT_ATTR_PORT_NO]) {
1433 u32 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1435 if (port_no >= DP_MAX_PORTS)
1436 return ERR_PTR(-EINVAL);
1438 dp = get_dp_locked(odp_vport->dp_idx);
1440 return ERR_PTR(-ENODEV);
1442 vport = get_vport_protected(dp, port_no);
1444 mutex_unlock(&dp->mutex);
1445 return ERR_PTR(-ENOENT);
1449 return ERR_PTR(-EINVAL);
1452 static int change_vport(struct vport *vport, struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1455 if (a[ODP_VPORT_ATTR_STATS])
1456 err = vport_set_stats(vport, nla_data(a[ODP_VPORT_ATTR_STATS]));
1457 if (!err && a[ODP_VPORT_ATTR_ADDRESS])
1458 err = vport_set_addr(vport, nla_data(a[ODP_VPORT_ATTR_ADDRESS]));
1459 if (!err && a[ODP_VPORT_ATTR_MTU])
1460 err = vport_set_mtu(vport, nla_get_u32(a[ODP_VPORT_ATTR_MTU]));
1464 static int attach_vport(struct odp_vport __user *uodp_vport)
1466 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1467 struct odp_vport *odp_vport;
1468 struct vport_parms parms;
1469 struct vport *vport;
1470 struct sk_buff *skb;
1471 struct datapath *dp;
1475 skb = copy_vport_from_user(uodp_vport, a);
1479 odp_vport = (struct odp_vport *)skb->data;
1482 if (!a[ODP_VPORT_ATTR_NAME] || !a[ODP_VPORT_ATTR_TYPE])
1483 goto exit_kfree_skb;
1487 dp = get_dp_locked(odp_vport->dp_idx);
1490 goto exit_unlock_rtnl;
1492 if (a[ODP_VPORT_ATTR_PORT_NO]) {
1493 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1496 if (port_no >= DP_MAX_PORTS)
1497 goto exit_unlock_dp;
1499 vport = get_vport_protected(dp, port_no);
1502 goto exit_unlock_dp;
1504 for (port_no = 1; ; port_no++) {
1505 if (port_no >= DP_MAX_PORTS) {
1507 goto exit_unlock_dp;
1509 vport = get_vport_protected(dp, port_no);
1515 parms.name = nla_data(a[ODP_VPORT_ATTR_NAME]);
1516 parms.type = nla_get_u32(a[ODP_VPORT_ATTR_TYPE]);
1517 parms.options = a[ODP_VPORT_ATTR_OPTIONS];
1519 parms.port_no = port_no;
1521 vport = new_vport(&parms);
1522 err = PTR_ERR(vport);
1524 goto exit_unlock_dp;
1526 set_internal_devs_mtu(dp);
1527 dp_sysfs_add_if(vport);
1529 err = change_vport(vport, a);
1531 dp_detach_port(vport);
1532 goto exit_unlock_dp;
1535 err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
1538 mutex_unlock(&dp->mutex);
1547 static int set_vport(unsigned int cmd, struct odp_vport __user *uodp_vport)
1549 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1550 struct vport *vport;
1551 struct sk_buff *skb;
1554 skb = copy_vport_from_user(uodp_vport, a);
1560 vport = lookup_vport((struct odp_vport *)skb->data, a);
1561 err = PTR_ERR(vport);
1566 if (a[ODP_VPORT_ATTR_OPTIONS])
1567 err = vport_set_options(vport, a[ODP_VPORT_ATTR_OPTIONS]);
1569 err = change_vport(vport, a);
1571 mutex_unlock(&vport->dp->mutex);
1579 static int del_vport(unsigned int cmd, struct odp_vport __user *uodp_vport)
1581 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1582 struct datapath *dp;
1583 struct vport *vport;
1584 struct sk_buff *skb;
1587 skb = copy_vport_from_user(uodp_vport, a);
1593 vport = lookup_vport((struct odp_vport *)skb->data, a);
1594 err = PTR_ERR(vport);
1600 if (vport->port_no == ODPP_LOCAL)
1603 err = dp_detach_port(vport);
1604 mutex_unlock(&dp->mutex);
1612 static int get_vport(struct odp_vport __user *uodp_vport)
1614 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1615 struct odp_vport *odp_vport;
1616 struct vport *vport;
1617 struct sk_buff *skb;
1620 skb = copy_vport_from_user(uodp_vport, a);
1624 odp_vport = (struct odp_vport *)skb->data;
1626 vport = lookup_vport(odp_vport, a);
1627 err = PTR_ERR(vport);
1631 err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
1632 mutex_unlock(&vport->dp->mutex);
1639 static int dump_vport(struct odp_vport __user *uodp_vport)
1641 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1642 struct odp_vport *odp_vport;
1643 struct sk_buff *skb;
1644 struct datapath *dp;
1648 skb = copy_vport_from_user(uodp_vport, a);
1652 odp_vport = (struct odp_vport *)skb->data;
1654 dp = get_dp_locked(odp_vport->dp_idx);
1660 if (a[ODP_VPORT_ATTR_PORT_NO])
1661 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1662 for (; port_no < DP_MAX_PORTS; port_no++) {
1663 struct vport *vport = get_vport_protected(dp, port_no);
1665 err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
1666 goto exit_unlock_dp;
1672 mutex_unlock(&dp->mutex);
1679 static long openvswitch_ioctl(struct file *f, unsigned int cmd,
1682 int dp_idx = iminor(f->f_dentry->d_inode);
1683 struct datapath *dp;
1684 int drop_frags, listeners;
1685 unsigned int sflow_probability;
1688 /* Handle commands with special locking requirements up front. */
1691 err = create_dp(dp_idx, (char __user *)argp);
1694 case ODP_DP_DESTROY:
1695 err = destroy_dp(dp_idx);
1699 err = attach_vport((struct odp_vport __user *)argp);
1703 err = get_vport((struct odp_vport __user *)argp);
1707 err = del_vport(cmd, (struct odp_vport __user *)argp);
1711 err = set_vport(cmd, (struct odp_vport __user *)argp);
1714 case ODP_VPORT_DUMP:
1715 err = dump_vport((struct odp_vport __user *)argp);
1718 case ODP_FLOW_FLUSH:
1719 err = flush_flows(argp);
1723 err = put_flow((struct odp_flow_put __user *)argp);
1727 err = del_flow((struct odp_flow __user *)argp);
1731 err = query_flow((struct odp_flow __user *)argp);
1735 err = dump_flow((struct odp_flow_dump __user *)argp);
1739 err = execute_packet((struct odp_execute __user *)argp);
1743 dp = get_dp_locked(dp_idx);
1750 err = get_dp_stats(dp, (struct odp_stats __user *)argp);
1753 case ODP_GET_DROP_FRAGS:
1754 err = put_user(dp->drop_frags, (int __user *)argp);
1757 case ODP_SET_DROP_FRAGS:
1758 err = get_user(drop_frags, (int __user *)argp);
1762 if (drop_frags != 0 && drop_frags != 1)
1764 dp->drop_frags = drop_frags;
1768 case ODP_GET_LISTEN_MASK:
1769 err = put_user(get_listen_mask(f), (int __user *)argp);
1772 case ODP_SET_LISTEN_MASK:
1773 err = get_user(listeners, (int __user *)argp);
1777 if (listeners & ~ODPL_ALL)
1780 set_listen_mask(f, listeners);
1783 case ODP_GET_SFLOW_PROBABILITY:
1784 err = put_user(dp->sflow_probability, (unsigned int __user *)argp);
1787 case ODP_SET_SFLOW_PROBABILITY:
1788 err = get_user(sflow_probability, (unsigned int __user *)argp);
1790 dp->sflow_probability = sflow_probability;
1797 mutex_unlock(&dp->mutex);
1802 static int dp_has_packet_of_interest(struct datapath *dp, int listeners)
1805 for (i = 0; i < DP_N_QUEUES; i++) {
1806 if (listeners & (1 << i) && !skb_queue_empty(&dp->queues[i]))
1812 #ifdef CONFIG_COMPAT
1813 static int compat_get_flow(struct odp_flow *flow, const struct compat_odp_flow __user *compat)
1815 compat_uptr_t key, actions;
1817 if (!access_ok(VERIFY_READ, compat, sizeof(struct compat_odp_flow)) ||
1818 __copy_from_user(&flow->stats, &compat->stats, sizeof(struct odp_flow_stats)) ||
1819 __get_user(key, &compat->key) ||
1820 __get_user(flow->key_len, &compat->key_len) ||
1821 __get_user(actions, &compat->actions) ||
1822 __get_user(flow->actions_len, &compat->actions_len))
1825 flow->key = (struct nlattr __force *)compat_ptr(key);
1826 flow->actions = (struct nlattr __force *)compat_ptr(actions);
1830 static int compat_put_flow(struct compat_odp_flow_put __user *ufp)
1832 struct odp_flow_stats stats;
1833 struct odp_flow_put uf;
1834 struct datapath *dp;
1837 if (compat_get_flow(&uf.flow, &ufp->flow) ||
1838 get_user(uf.flags, &ufp->flags))
1841 dp = get_dp_locked(uf.flow.dp_idx);
1845 error = do_put_flow(dp, &uf, &stats);
1847 if (copy_to_user(&ufp->flow.stats, &stats,
1848 sizeof(struct odp_flow_stats)))
1851 mutex_unlock(&dp->mutex);
1857 static int compat_answer_query(struct datapath *dp, struct sw_flow *flow,
1858 struct compat_odp_flow __user *ufp)
1860 compat_uptr_t actions;
1862 if (get_user(actions, &ufp->actions))
1865 return do_answer_query(dp, flow, &ufp->stats,
1866 compat_ptr(actions), &ufp->actions_len);
1869 static int compat_del_flow(struct compat_odp_flow __user *ufp)
1871 struct sw_flow *flow;
1872 struct datapath *dp;
1876 if (compat_get_flow(&uf, ufp))
1879 dp = get_dp_locked(uf.dp_idx);
1883 flow = do_del_flow(dp, (const struct nlattr __force __user *)uf.key, uf.key_len);
1884 error = PTR_ERR(flow);
1885 if (!IS_ERR(flow)) {
1886 error = compat_answer_query(dp, flow, ufp);
1887 flow_deferred_free(flow);
1889 mutex_unlock(&dp->mutex);
1894 static int compat_query_flow(struct compat_odp_flow __user *uflow)
1896 struct tbl_node *flow_node;
1897 struct sw_flow_key key;
1898 struct odp_flow flow;
1899 struct datapath *dp;
1902 if (compat_get_flow(&flow, uflow))
1905 dp = get_dp_locked(flow.dp_idx);
1909 error = flow_copy_from_user(&key, (const struct nlattr __force __user *)flow.key, flow.key_len);
1911 struct tbl *table = get_table_protected(dp);
1912 flow_node = tbl_lookup(table, &flow.key, flow_hash(&key), flow_cmp);
1914 error = compat_answer_query(dp, flow_cast(flow_node), uflow);
1918 mutex_unlock(&dp->mutex);
1923 static int compat_dump_flow(struct compat_odp_flow_dump __user *udump)
1925 struct compat_odp_flow __user *uflow;
1926 struct nlattr __user *ukey;
1927 struct tbl_node *tbl_node;
1928 struct compat_odp_flow_dump dump;
1929 struct sw_flow *flow;
1930 compat_uptr_t ukey32;
1931 struct datapath *dp;
1937 if (copy_from_user(&dump, udump, sizeof(struct compat_odp_flow_dump)))
1939 uflow =compat_ptr(dump.flow);
1941 dp = get_dp_locked(dump.dp_idx);
1946 table = get_table_protected(dp);
1947 tbl_node = tbl_next(table, &dump.state[0], &dump.state[1]);
1949 err = put_user(0, &uflow->key_len);
1952 flow = flow_cast(tbl_node);
1955 if (copy_to_user(udump->state, dump.state, 2 * sizeof(uint32_t)) ||
1956 get_user(ukey32, &uflow->key) ||
1957 get_user(key_len, &uflow->key_len))
1959 ukey = compat_ptr(ukey32);
1961 key_len = flow_copy_to_user(ukey, &flow->key, key_len);
1966 if (put_user(key_len, &uflow->key_len))
1969 err = compat_answer_query(dp, flow, uflow);
1972 mutex_unlock(&dp->mutex);
1977 static int compat_execute(const struct compat_odp_execute __user *uexecute)
1979 struct odp_execute execute;
1980 compat_uptr_t actions;
1982 struct datapath *dp;
1985 if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_odp_execute)) ||
1986 __get_user(execute.dp_idx, &uexecute->dp_idx) ||
1987 __get_user(actions, &uexecute->actions) ||
1988 __get_user(execute.actions_len, &uexecute->actions_len) ||
1989 __get_user(data, &uexecute->data) ||
1990 __get_user(execute.length, &uexecute->length))
1993 execute.actions = (struct nlattr __force *)compat_ptr(actions);
1994 execute.data = (const void __force *)compat_ptr(data);
1996 dp = get_dp_locked(execute.dp_idx);
1999 error = do_execute(dp, &execute);
2000 mutex_unlock(&dp->mutex);
2005 static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned long argp)
2008 case ODP_DP_DESTROY:
2009 case ODP_FLOW_FLUSH:
2010 /* Ioctls that don't need any translation at all. */
2011 return openvswitch_ioctl(f, cmd, argp);
2018 case ODP_VPORT_DUMP:
2020 case ODP_GET_DROP_FRAGS:
2021 case ODP_SET_DROP_FRAGS:
2022 case ODP_SET_LISTEN_MASK:
2023 case ODP_GET_LISTEN_MASK:
2024 case ODP_SET_SFLOW_PROBABILITY:
2025 case ODP_GET_SFLOW_PROBABILITY:
2026 /* Ioctls that just need their pointer argument extended. */
2027 return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
2029 case ODP_FLOW_PUT32:
2030 return compat_put_flow(compat_ptr(argp));
2032 case ODP_FLOW_DEL32:
2033 return compat_del_flow(compat_ptr(argp));
2035 case ODP_FLOW_GET32:
2036 return compat_query_flow(compat_ptr(argp));
2038 case ODP_FLOW_DUMP32:
2039 return compat_dump_flow(compat_ptr(argp));
2042 return compat_execute(compat_ptr(argp));
2045 return -ENOIOCTLCMD;
2050 static ssize_t openvswitch_read(struct file *f, char __user *buf,
2051 size_t nbytes, loff_t *ppos)
2053 int listeners = get_listen_mask(f);
2054 int dp_idx = iminor(f->f_dentry->d_inode);
2055 struct datapath *dp = get_dp_locked(dp_idx);
2056 struct sk_buff *skb;
2063 if (nbytes == 0 || !listeners)
2069 for (i = 0; i < DP_N_QUEUES; i++) {
2070 if (listeners & (1 << i)) {
2071 skb = skb_dequeue(&dp->queues[i]);
2077 if (f->f_flags & O_NONBLOCK) {
2082 wait_event_interruptible(dp->waitqueue,
2083 dp_has_packet_of_interest(dp,
2086 if (signal_pending(current)) {
2087 retval = -ERESTARTSYS;
2092 mutex_unlock(&dp->mutex);
2095 iov.iov_len = min_t(size_t, skb->len, nbytes);
2096 retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
2104 mutex_unlock(&dp->mutex);
2108 static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
2110 int dp_idx = iminor(file->f_dentry->d_inode);
2111 struct datapath *dp = get_dp_locked(dp_idx);
2116 poll_wait(file, &dp->waitqueue, wait);
2117 if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
2118 mask |= POLLIN | POLLRDNORM;
2119 mutex_unlock(&dp->mutex);
2121 mask = POLLIN | POLLRDNORM | POLLHUP;
2126 static struct file_operations openvswitch_fops = {
2127 .owner = THIS_MODULE,
2128 .read = openvswitch_read,
2129 .poll = openvswitch_poll,
2130 .unlocked_ioctl = openvswitch_ioctl,
2131 #ifdef CONFIG_COMPAT
2132 .compat_ioctl = openvswitch_compat_ioctl,
2138 static int __init dp_init(void)
2140 struct sk_buff *dummy_skb;
2143 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2145 printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2153 goto error_flow_exit;
2155 err = register_netdevice_notifier(&dp_device_notifier);
2157 goto error_vport_exit;
2159 major = register_chrdev(0, "openvswitch", &openvswitch_fops);
2161 goto error_unreg_notifier;
2165 error_unreg_notifier:
2166 unregister_netdevice_notifier(&dp_device_notifier);
2175 static void dp_cleanup(void)
2178 unregister_chrdev(major, "openvswitch");
2179 unregister_netdevice_notifier(&dp_device_notifier);
2184 module_init(dp_init);
2185 module_exit(dp_cleanup);
2187 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2188 MODULE_LICENSE("GPL");