2 * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/module.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/kernel.h>
24 #include <linux/kthread.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/version.h>
31 #include <linux/ethtool.h>
32 #include <linux/wait.h>
33 #include <asm/system.h>
34 #include <asm/div64.h>
36 #include <linux/highmem.h>
37 #include <linux/netfilter_bridge.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/inetdevice.h>
40 #include <linux/list.h>
41 #include <linux/rculist.h>
42 #include <linux/dmi.h>
43 #include <net/inet_ecn.h>
44 #include <net/genetlink.h>
45 #include <linux/compat.h>
47 #include "openvswitch/datapath-protocol.h"
52 #include "loop_counter.h"
53 #include "odp-compat.h"
55 #include "vport-internal_dev.h"
57 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
58 EXPORT_SYMBOL(dp_ioctl_hook);
60 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
63 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
66 * It is safe to access the datapath and vport structures with just
69 static struct datapath __rcu *dps[ODP_MAX];
70 static DEFINE_MUTEX(dp_mutex);
72 static struct vport *new_vport(const struct vport_parms *);
74 /* Must be called with rcu_read_lock or dp_mutex. */
75 struct datapath *get_dp(int dp_idx)
77 if (dp_idx < 0 || dp_idx >= ODP_MAX)
79 return rcu_dereference_check(dps[dp_idx], rcu_read_lock_held() ||
80 lockdep_is_held(&dp_mutex));
82 EXPORT_SYMBOL_GPL(get_dp);
84 static struct datapath *get_dp_locked(int dp_idx)
88 mutex_lock(&dp_mutex);
91 mutex_lock(&dp->mutex);
92 mutex_unlock(&dp_mutex);
96 static struct tbl *get_table_protected(struct datapath *dp)
98 return rcu_dereference_protected(dp->table,
99 lockdep_is_held(&dp->mutex));
102 static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
104 return rcu_dereference_protected(dp->ports[port_no],
105 lockdep_is_held(&dp->mutex));
108 /* Must be called with rcu_read_lock or RTNL lock. */
109 const char *dp_name(const struct datapath *dp)
111 return vport_get_name(rcu_dereference_rtnl(dp->ports[ODPP_LOCAL]));
114 static inline size_t br_nlmsg_size(void)
116 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
117 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
118 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
119 + nla_total_size(4) /* IFLA_MASTER */
120 + nla_total_size(4) /* IFLA_MTU */
121 + nla_total_size(4) /* IFLA_LINK */
122 + nla_total_size(1); /* IFLA_OPERSTATE */
125 static int dp_fill_ifinfo(struct sk_buff *skb,
126 const struct vport *port,
127 int event, unsigned int flags)
129 struct datapath *dp = port->dp;
130 int ifindex = vport_get_ifindex(port);
131 int iflink = vport_get_iflink(port);
132 struct ifinfomsg *hdr;
133 struct nlmsghdr *nlh;
141 nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
145 hdr = nlmsg_data(nlh);
146 hdr->ifi_family = AF_BRIDGE;
148 hdr->ifi_type = ARPHRD_ETHER;
149 hdr->ifi_index = ifindex;
150 hdr->ifi_flags = vport_get_flags(port);
153 NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
154 NLA_PUT_U32(skb, IFLA_MASTER,
155 vport_get_ifindex(get_vport_protected(dp, ODPP_LOCAL)));
156 NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
157 #ifdef IFLA_OPERSTATE
158 NLA_PUT_U8(skb, IFLA_OPERSTATE,
159 vport_is_running(port)
160 ? vport_get_operstate(port)
164 NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
166 if (ifindex != iflink)
167 NLA_PUT_U32(skb, IFLA_LINK,iflink);
169 return nlmsg_end(skb, nlh);
172 nlmsg_cancel(skb, nlh);
176 static void dp_ifinfo_notify(int event, struct vport *port)
181 skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
185 err = dp_fill_ifinfo(skb, port, event, 0);
187 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
188 WARN_ON(err == -EMSGSIZE);
192 rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
196 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
199 static void release_dp(struct kobject *kobj)
201 struct datapath *dp = container_of(kobj, struct datapath, ifobj);
205 static struct kobj_type dp_ktype = {
206 .release = release_dp
209 static int create_dp(int dp_idx, const char __user *devnamep)
211 struct vport_parms parms;
212 char devname[IFNAMSIZ];
219 int retval = strncpy_from_user(devname, devnamep, IFNAMSIZ);
223 } else if (retval >= IFNAMSIZ) {
228 snprintf(devname, sizeof(devname), "of%d", dp_idx);
232 mutex_lock(&dp_mutex);
234 if (!try_module_get(THIS_MODULE))
237 /* Exit early if a datapath with that number already exists.
238 * (We don't use -EEXIST because that's ambiguous with 'devname'
239 * conflicting with an existing network device name.) */
245 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
248 INIT_LIST_HEAD(&dp->port_list);
249 mutex_init(&dp->mutex);
250 mutex_lock(&dp->mutex);
252 for (i = 0; i < DP_N_QUEUES; i++)
253 skb_queue_head_init(&dp->queues[i]);
254 init_waitqueue_head(&dp->waitqueue);
256 /* Initialize kobject for bridge. This will be added as
257 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
258 dp->ifobj.kset = NULL;
259 kobject_init(&dp->ifobj, &dp_ktype);
261 /* Allocate table. */
263 rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
267 /* Set up our datapath device. */
268 parms.name = devname;
269 parms.type = ODP_VPORT_TYPE_INTERNAL;
270 parms.options = NULL;
272 parms.port_no = ODPP_LOCAL;
273 vport = new_vport(&parms);
275 err = PTR_ERR(vport);
279 goto err_destroy_table;
283 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
284 if (!dp->stats_percpu) {
286 goto err_destroy_local_port;
289 rcu_assign_pointer(dps[dp_idx], dp);
292 mutex_unlock(&dp->mutex);
293 mutex_unlock(&dp_mutex);
298 err_destroy_local_port:
299 dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
301 tbl_destroy(get_table_protected(dp), NULL);
303 mutex_unlock(&dp->mutex);
306 module_put(THIS_MODULE);
308 mutex_unlock(&dp_mutex);
314 static void destroy_dp_rcu(struct rcu_head *rcu)
316 struct datapath *dp = container_of(rcu, struct datapath, rcu);
319 for (i = 0; i < DP_N_QUEUES; i++)
320 skb_queue_purge(&dp->queues[i]);
322 tbl_destroy((struct tbl __force *)dp->table, flow_free_tbl);
323 free_percpu(dp->stats_percpu);
324 kobject_put(&dp->ifobj);
327 static int destroy_dp(int dp_idx)
334 mutex_lock(&dp_mutex);
341 mutex_lock(&dp->mutex);
343 list_for_each_entry_safe (p, n, &dp->port_list, node)
344 if (p->port_no != ODPP_LOCAL)
348 rcu_assign_pointer(dps[dp->dp_idx], NULL);
349 dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
351 mutex_unlock(&dp->mutex);
352 call_rcu(&dp->rcu, destroy_dp_rcu);
353 module_put(THIS_MODULE);
356 mutex_unlock(&dp_mutex);
361 /* Called with RTNL lock and dp->mutex. */
362 static struct vport *new_vport(const struct vport_parms *parms)
367 vport = vport_add(parms);
368 if (!IS_ERR(vport)) {
369 struct datapath *dp = parms->dp;
371 rcu_assign_pointer(dp->ports[parms->port_no], vport);
372 list_add_rcu(&vport->node, &dp->port_list);
375 dp_ifinfo_notify(RTM_NEWLINK, vport);
382 int dp_detach_port(struct vport *p)
388 if (p->port_no != ODPP_LOCAL)
390 dp_ifinfo_notify(RTM_DELLINK, p);
392 /* First drop references to device. */
394 list_del_rcu(&p->node);
395 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
397 /* Then destroy it. */
405 /* Must be called with rcu_read_lock. */
406 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
408 struct datapath *dp = p->dp;
409 struct dp_stats_percpu *stats;
410 int stats_counter_off;
411 struct sw_flow_actions *acts;
412 struct loop_counter *loop;
415 OVS_CB(skb)->vport = p;
417 if (!OVS_CB(skb)->flow) {
418 struct sw_flow_key key;
419 struct tbl_node *flow_node;
422 /* Extract flow from 'skb' into 'key'. */
423 error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key, &is_frag);
424 if (unlikely(error)) {
429 if (is_frag && dp->drop_frags) {
431 stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
436 flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
437 flow_hash(&key), flow_cmp);
438 if (unlikely(!flow_node)) {
439 struct dp_upcall_info upcall;
441 upcall.type = _ODPL_MISS_NR;
444 upcall.sample_pool = 0;
445 upcall.actions = NULL;
446 upcall.actions_len = 0;
447 dp_upcall(dp, skb, &upcall);
448 stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
452 OVS_CB(skb)->flow = flow_cast(flow_node);
455 stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
456 flow_used(OVS_CB(skb)->flow, skb);
458 acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
460 /* Check whether we've looped too much. */
461 loop = loop_get_counter();
462 if (unlikely(++loop->count > MAX_LOOPS))
463 loop->looping = true;
464 if (unlikely(loop->looping)) {
465 loop_suppress(dp, acts);
470 /* Execute actions. */
471 execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
474 /* Check whether sub-actions looped too much. */
475 if (unlikely(loop->looping))
476 loop_suppress(dp, acts);
479 /* Decrement loop counter. */
481 loop->looping = false;
485 /* Update datapath statistics. */
487 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
489 write_seqcount_begin(&stats->seqlock);
490 (*(u64 *)((u8 *)stats + stats_counter_off))++;
491 write_seqcount_end(&stats->seqlock);
496 static void copy_and_csum_skb(struct sk_buff *skb, void *to)
498 u16 csum_start, csum_offset;
501 get_skb_csum_pointers(skb, &csum_start, &csum_offset);
502 csum_start -= skb_headroom(skb);
503 BUG_ON(csum_start >= skb_headlen(skb));
505 skb_copy_bits(skb, 0, to, csum_start);
507 csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start,
508 skb->len - csum_start, 0);
509 *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
512 /* Append each packet in 'skb' list to 'queue'. There will be only one packet
513 * unless we broke up a GSO packet. */
514 static int queue_control_packets(struct datapath *dp, struct sk_buff *skb,
515 const struct dp_upcall_info *upcall_info)
517 struct sk_buff *nskb;
521 if (OVS_CB(skb)->vport)
522 port_no = OVS_CB(skb)->vport->port_no;
524 port_no = ODPP_LOCAL;
527 struct odp_packet *upcall;
528 struct sk_buff *user_skb; /* to be queued to userspace */
535 len = sizeof(struct odp_packet);
536 len += nla_total_size(4); /* ODP_PACKET_ATTR_TYPE. */
537 len += nla_total_size(skb->len);
538 len += nla_total_size(FLOW_BUFSIZE);
539 if (upcall_info->userdata)
540 len += nla_total_size(8);
541 if (upcall_info->sample_pool)
542 len += nla_total_size(4);
543 if (upcall_info->actions_len)
544 len += nla_total_size(upcall_info->actions_len);
546 user_skb = alloc_skb(len, GFP_ATOMIC);
550 upcall = (struct odp_packet *)__skb_put(user_skb, sizeof(*upcall));
551 upcall->dp_idx = dp->dp_idx;
553 nla_put_u32(user_skb, ODP_PACKET_ATTR_TYPE, upcall_info->type);
555 nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_KEY);
556 flow_to_nlattrs(upcall_info->key, user_skb);
557 nla_nest_end(user_skb, nla);
559 if (upcall_info->userdata)
560 nla_put_u64(user_skb, ODP_PACKET_ATTR_USERDATA, upcall_info->userdata);
561 if (upcall_info->sample_pool)
562 nla_put_u32(user_skb, ODP_PACKET_ATTR_SAMPLE_POOL, upcall_info->sample_pool);
563 if (upcall_info->actions_len) {
564 const struct nlattr *actions = upcall_info->actions;
565 u32 actions_len = upcall_info->actions_len;
567 nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_ACTIONS);
568 memcpy(__skb_put(user_skb, actions_len), actions, actions_len);
569 nla_nest_end(user_skb, nla);
572 nla = __nla_reserve(user_skb, ODP_PACKET_ATTR_PACKET, skb->len);
573 if (skb->ip_summed == CHECKSUM_PARTIAL)
574 copy_and_csum_skb(skb, nla_data(nla));
576 skb_copy_bits(skb, 0, nla_data(nla), skb->len);
578 upcall->len = user_skb->len;
579 skb_queue_tail(&dp->queues[upcall_info->type], user_skb);
588 while ((skb = nskb) != NULL) {
595 int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info)
597 struct dp_stats_percpu *stats;
598 struct sk_buff_head *queue;
601 WARN_ON_ONCE(skb_shared(skb));
602 BUG_ON(upcall_info->type >= DP_N_QUEUES);
604 queue = &dp->queues[upcall_info->type];
606 if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
609 forward_ip_summed(skb);
611 err = vswitch_skb_checksum_setup(skb);
615 /* Break apart GSO packets into their component pieces. Otherwise
616 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
617 if (skb_is_gso(skb)) {
618 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
628 err = queue_control_packets(dp, skb, upcall_info);
629 wake_up_interruptible(&dp->waitqueue);
636 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
638 write_seqcount_begin(&stats->seqlock);
640 write_seqcount_end(&stats->seqlock);
647 static int flush_flows(struct datapath *dp)
649 struct tbl *old_table = get_table_protected(dp);
650 struct tbl *new_table;
652 new_table = tbl_create(TBL_MIN_BUCKETS);
656 rcu_assign_pointer(dp->table, new_table);
658 tbl_deferred_destroy(old_table, flow_free_tbl);
663 static int validate_actions(const struct nlattr *actions, u32 actions_len)
665 const struct nlattr *a;
668 nla_for_each_attr(a, actions, actions_len, rem) {
669 static const u32 action_lens[ODPAT_MAX + 1] = {
671 [ODPAT_CONTROLLER] = 8,
672 [ODPAT_SET_DL_TCI] = 2,
673 [ODPAT_STRIP_VLAN] = 0,
674 [ODPAT_SET_DL_SRC] = ETH_ALEN,
675 [ODPAT_SET_DL_DST] = ETH_ALEN,
676 [ODPAT_SET_NW_SRC] = 4,
677 [ODPAT_SET_NW_DST] = 4,
678 [ODPAT_SET_NW_TOS] = 1,
679 [ODPAT_SET_TP_SRC] = 2,
680 [ODPAT_SET_TP_DST] = 2,
681 [ODPAT_SET_TUNNEL] = 8,
682 [ODPAT_SET_PRIORITY] = 4,
683 [ODPAT_POP_PRIORITY] = 0,
684 [ODPAT_DROP_SPOOFED_ARP] = 0,
686 int type = nla_type(a);
688 if (type > ODPAT_MAX || nla_len(a) != action_lens[type])
695 case ODPAT_CONTROLLER:
696 case ODPAT_STRIP_VLAN:
697 case ODPAT_SET_DL_SRC:
698 case ODPAT_SET_DL_DST:
699 case ODPAT_SET_NW_SRC:
700 case ODPAT_SET_NW_DST:
701 case ODPAT_SET_TP_SRC:
702 case ODPAT_SET_TP_DST:
703 case ODPAT_SET_TUNNEL:
704 case ODPAT_SET_PRIORITY:
705 case ODPAT_POP_PRIORITY:
706 case ODPAT_DROP_SPOOFED_ARP:
707 /* No validation needed. */
711 if (nla_get_u32(a) >= DP_MAX_PORTS)
715 case ODPAT_SET_DL_TCI:
716 if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
720 case ODPAT_SET_NW_TOS:
721 if (nla_get_u8(a) & INET_ECN_MASK)
736 static struct sw_flow_actions *get_actions(const struct odp_flow *flow)
738 struct sw_flow_actions *actions;
741 actions = flow_actions_alloc(flow->actions_len);
742 error = PTR_ERR(actions);
747 if (copy_from_user(actions->actions,
748 (struct nlattr __user __force *)flow->actions,
750 goto error_free_actions;
751 error = validate_actions(actions->actions, actions->actions_len);
753 goto error_free_actions;
760 return ERR_PTR(error);
763 static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats)
766 struct timespec offset_ts, used, now_mono;
768 ktime_get_ts(&now_mono);
769 jiffies_to_timespec(jiffies - flow->used, &offset_ts);
770 set_normalized_timespec(&used, now_mono.tv_sec - offset_ts.tv_sec,
771 now_mono.tv_nsec - offset_ts.tv_nsec);
773 stats->used_sec = used.tv_sec;
774 stats->used_nsec = used.tv_nsec;
777 stats->used_nsec = 0;
780 stats->n_packets = flow->packet_count;
781 stats->n_bytes = flow->byte_count;
783 stats->tcp_flags = flow->tcp_flags;
787 static void clear_stats(struct sw_flow *flow)
791 flow->packet_count = 0;
792 flow->byte_count = 0;
795 static int expand_table(struct datapath *dp)
797 struct tbl *old_table = get_table_protected(dp);
798 struct tbl *new_table;
800 new_table = tbl_expand(old_table);
801 if (IS_ERR(new_table))
802 return PTR_ERR(new_table);
804 rcu_assign_pointer(dp->table, new_table);
805 tbl_deferred_destroy(old_table, NULL);
810 static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf,
811 struct odp_flow_stats *stats)
813 struct tbl_node *flow_node;
814 struct sw_flow_key key;
815 struct sw_flow *flow;
817 struct sw_flow_actions *acts = NULL;
821 error = flow_copy_from_user(&key, (const struct nlattr __force __user *)uf->flow.key,
826 hash = flow_hash(&key);
827 table = get_table_protected(dp);
828 flow_node = tbl_lookup(table, &key, hash, flow_cmp);
832 if (!(uf->flags & ODPPF_CREATE))
835 /* Expand table, if necessary, to make room. */
836 if (tbl_count(table) >= tbl_n_buckets(table)) {
837 error = expand_table(dp);
840 table = get_table_protected(dp);
846 error = PTR_ERR(flow);
852 /* Obtain actions. */
853 acts = get_actions(&uf->flow);
854 error = PTR_ERR(acts);
856 goto error_free_flow;
857 rcu_assign_pointer(flow->sf_acts, acts);
859 /* Put flow in bucket. */
860 error = tbl_insert(table, &flow->tbl_node, hash);
862 goto error_free_flow_acts;
864 memset(stats, 0, sizeof(struct odp_flow_stats));
866 /* We found a matching flow. */
867 struct sw_flow_actions *old_acts, *new_acts;
869 flow = flow_cast(flow_node);
871 /* Bail out if we're not allowed to modify an existing flow. */
873 if (!(uf->flags & ODPPF_MODIFY))
877 new_acts = get_actions(&uf->flow);
878 error = PTR_ERR(new_acts);
879 if (IS_ERR(new_acts))
882 old_acts = rcu_dereference_protected(flow->sf_acts,
883 lockdep_is_held(&dp->mutex));
884 if (old_acts->actions_len != new_acts->actions_len ||
885 memcmp(old_acts->actions, new_acts->actions,
886 old_acts->actions_len)) {
887 rcu_assign_pointer(flow->sf_acts, new_acts);
888 flow_deferred_free_acts(old_acts);
893 /* Fetch stats, then clear them if necessary. */
894 spin_lock_bh(&flow->lock);
895 get_stats(flow, stats);
896 if (uf->flags & ODPPF_ZERO_STATS)
898 spin_unlock_bh(&flow->lock);
903 error_free_flow_acts:
906 flow->sf_acts = NULL;
912 static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
914 struct odp_flow_stats stats;
915 struct odp_flow_put uf;
918 if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put)))
921 error = do_put_flow(dp, &uf, &stats);
925 if (copy_to_user(&ufp->flow.stats, &stats,
926 sizeof(struct odp_flow_stats)))
932 static int do_answer_query(struct datapath *dp, struct sw_flow *flow,
934 struct odp_flow_stats __user *ustats,
935 struct nlattr __user *actions,
936 u32 __user *actions_lenp)
938 struct sw_flow_actions *sf_acts;
939 struct odp_flow_stats stats;
942 spin_lock_bh(&flow->lock);
943 get_stats(flow, &stats);
944 if (query_flags & ODPFF_ZERO_TCP_FLAGS)
947 spin_unlock_bh(&flow->lock);
949 if (copy_to_user(ustats, &stats, sizeof(struct odp_flow_stats)) ||
950 get_user(actions_len, actions_lenp))
956 sf_acts = rcu_dereference_protected(flow->sf_acts,
957 lockdep_is_held(&dp->mutex));
958 if (put_user(sf_acts->actions_len, actions_lenp) ||
959 (actions && copy_to_user(actions, sf_acts->actions,
960 min(sf_acts->actions_len, actions_len))))
966 static int answer_query(struct datapath *dp, struct sw_flow *flow,
967 u32 query_flags, struct odp_flow __user *ufp)
969 struct nlattr __user *actions;
971 if (get_user(actions, (struct nlattr __user * __user *)&ufp->actions))
974 return do_answer_query(dp, flow, query_flags,
975 &ufp->stats, actions, &ufp->actions_len);
978 static struct sw_flow *do_del_flow(struct datapath *dp, const struct nlattr __user *key, u32 key_len)
980 struct tbl *table = get_table_protected(dp);
981 struct tbl_node *flow_node;
982 struct sw_flow_key swkey;
985 error = flow_copy_from_user(&swkey, key, key_len);
987 return ERR_PTR(error);
989 flow_node = tbl_lookup(table, &swkey, flow_hash(&swkey), flow_cmp);
991 return ERR_PTR(-ENOENT);
993 error = tbl_remove(table, flow_node);
995 return ERR_PTR(error);
997 /* XXX Returned flow_node's statistics might lose a few packets, since
998 * other CPUs can be using this flow. We used to synchronize_rcu() to
999 * make sure that we get completely accurate stats, but that blows our
1000 * performance, badly. */
1001 return flow_cast(flow_node);
1004 static int del_flow(struct datapath *dp, struct odp_flow __user *ufp)
1006 struct sw_flow *flow;
1010 if (copy_from_user(&uf, ufp, sizeof(uf)))
1013 flow = do_del_flow(dp, (const struct nlattr __force __user *)uf.key, uf.key_len);
1015 return PTR_ERR(flow);
1017 error = answer_query(dp, flow, 0, ufp);
1018 flow_deferred_free(flow);
1022 static int do_query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
1024 struct tbl *table = get_table_protected(dp);
1027 for (i = 0; i < flowvec->n_flows; i++) {
1028 struct odp_flow __user *ufp = (struct odp_flow __user __force *)&flowvec->flows[i];
1029 struct sw_flow_key key;
1031 struct tbl_node *flow_node;
1034 if (copy_from_user(&uf, ufp, sizeof(uf)))
1037 error = flow_copy_from_user(&key, (const struct nlattr __force __user *)uf.key, uf.key_len);
1041 flow_node = tbl_lookup(table, &uf.key, flow_hash(&key), flow_cmp);
1043 error = put_user(ENOENT, &ufp->stats.error);
1045 error = answer_query(dp, flow_cast(flow_node), uf.flags, ufp);
1049 return flowvec->n_flows;
1052 static int do_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1053 int (*function)(struct datapath *,
1054 const struct odp_flowvec *))
1056 struct odp_flowvec __user *uflowvec;
1057 struct odp_flowvec flowvec;
1060 uflowvec = (struct odp_flowvec __user *)argp;
1061 if (copy_from_user(&flowvec, uflowvec, sizeof(flowvec)))
1064 if (flowvec.n_flows > INT_MAX / sizeof(struct odp_flow))
1067 retval = function(dp, &flowvec);
1068 return (retval < 0 ? retval
1069 : retval == flowvec.n_flows ? 0
1070 : put_user(retval, &uflowvec->n_flows));
1073 static struct sw_flow *do_dump_flow(struct datapath *dp, u32 __user *state)
1075 struct tbl *table = get_table_protected(dp);
1076 struct tbl_node *tbl_node;
1079 if (get_user(bucket, &state[0]) || get_user(obj, &state[1]))
1080 return ERR_PTR(-EFAULT);
1082 tbl_node = tbl_next(table, &bucket, &obj);
1084 if (put_user(bucket, &state[0]) || put_user(obj, &state[1]))
1085 return ERR_PTR(-EFAULT);
1087 return tbl_node ? flow_cast(tbl_node) : NULL;
1090 static int dump_flow(struct datapath *dp, struct odp_flow_dump __user *udumpp)
1092 struct odp_flow __user *uflowp;
1093 struct nlattr __user *ukey;
1094 struct sw_flow *flow;
1097 flow = do_dump_flow(dp, udumpp->state);
1099 return PTR_ERR(flow);
1101 if (get_user(uflowp, (struct odp_flow __user *__user*)&udumpp->flow))
1105 return put_user(ODPFF_EOF, &uflowp->flags);
1107 if (put_user(0, &uflowp->flags) ||
1108 get_user(ukey, (struct nlattr __user * __user*)&uflowp->key) ||
1109 get_user(key_len, &uflowp->key_len))
1112 key_len = flow_copy_to_user(ukey, &flow->key, key_len);
1115 if (put_user(key_len, &uflowp->key_len))
1118 return answer_query(dp, flow, 0, uflowp);
1121 static int do_execute(struct datapath *dp, const struct odp_execute *execute)
1123 struct sw_flow_key key;
1124 struct sk_buff *skb;
1125 struct sw_flow_actions *actions;
1131 if (execute->length < ETH_HLEN || execute->length > 65535)
1134 actions = flow_actions_alloc(execute->actions_len);
1135 if (IS_ERR(actions)) {
1136 err = PTR_ERR(actions);
1141 if (copy_from_user(actions->actions,
1142 (struct nlattr __user __force *)execute->actions, execute->actions_len))
1143 goto error_free_actions;
1145 err = validate_actions(actions->actions, execute->actions_len);
1147 goto error_free_actions;
1150 skb = alloc_skb(execute->length, GFP_KERNEL);
1152 goto error_free_actions;
1155 if (copy_from_user(skb_put(skb, execute->length),
1156 (const void __user __force *)execute->data,
1158 goto error_free_skb;
1160 skb_reset_mac_header(skb);
1163 /* Normally, setting the skb 'protocol' field would be handled by a
1164 * call to eth_type_trans(), but it assumes there's a sending
1165 * device, which we may not have. */
1166 if (ntohs(eth->h_proto) >= 1536)
1167 skb->protocol = eth->h_proto;
1169 skb->protocol = htons(ETH_P_802_2);
1171 err = flow_extract(skb, -1, &key, &is_frag);
1173 goto error_free_skb;
1176 err = execute_actions(dp, skb, &key, actions->actions, actions->actions_len);
1190 static int execute_packet(struct datapath *dp, const struct odp_execute __user *executep)
1192 struct odp_execute execute;
1194 if (copy_from_user(&execute, executep, sizeof(execute)))
1197 return do_execute(dp, &execute);
1200 static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
1202 struct odp_stats stats;
1205 stats.n_ports = dp->n_ports;
1206 stats.max_ports = DP_MAX_PORTS;
1207 stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
1208 for_each_possible_cpu(i) {
1209 const struct dp_stats_percpu *percpu_stats;
1210 struct dp_stats_percpu local_stats;
1213 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
1216 seqcount = read_seqcount_begin(&percpu_stats->seqlock);
1217 local_stats = *percpu_stats;
1218 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
1220 stats.n_frags += local_stats.n_frags;
1221 stats.n_hit += local_stats.n_hit;
1222 stats.n_missed += local_stats.n_missed;
1223 stats.n_lost += local_stats.n_lost;
1225 stats.max_miss_queue = DP_MAX_QUEUE_LEN;
1226 stats.max_action_queue = DP_MAX_QUEUE_LEN;
1227 return copy_to_user(statsp, &stats, sizeof(stats)) ? -EFAULT : 0;
1230 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
1231 int dp_min_mtu(const struct datapath *dp)
1238 list_for_each_entry_rcu (p, &dp->port_list, node) {
1241 /* Skip any internal ports, since that's what we're trying to
1243 if (is_internal_vport(p))
1246 dev_mtu = vport_get_mtu(p);
1247 if (!mtu || dev_mtu < mtu)
1251 return mtu ? mtu : ETH_DATA_LEN;
1254 /* Sets the MTU of all datapath devices to the minimum of the ports. Must
1255 * be called with RTNL lock. */
1256 void set_internal_devs_mtu(const struct datapath *dp)
1263 mtu = dp_min_mtu(dp);
1265 list_for_each_entry_rcu (p, &dp->port_list, node) {
1266 if (is_internal_vport(p))
1267 vport_set_mtu(p, mtu);
1271 static int get_listen_mask(const struct file *f)
1273 return (long)f->private_data;
1276 static void set_listen_mask(struct file *f, int listen_mask)
1278 f->private_data = (void*)(long)listen_mask;
1281 static const struct nla_policy vport_policy[ODP_VPORT_ATTR_MAX + 1] = {
1282 [ODP_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1283 [ODP_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1284 [ODP_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1285 [ODP_VPORT_ATTR_STATS] = { .len = sizeof(struct rtnl_link_stats64) },
1286 [ODP_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
1287 [ODP_VPORT_ATTR_MTU] = { .type = NLA_U32 },
1288 [ODP_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1291 static int copy_vport_to_user(void __user *dst, struct vport *vport, uint32_t total_len)
1293 struct odp_vport *odp_vport;
1294 struct sk_buff *skb;
1296 int ifindex, iflink;
1299 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1305 odp_vport = (struct odp_vport*)__skb_put(skb, sizeof(struct odp_vport));
1306 odp_vport->dp_idx = vport->dp->dp_idx;
1307 odp_vport->total_len = total_len;
1309 NLA_PUT_U32(skb, ODP_VPORT_ATTR_PORT_NO, vport->port_no);
1310 NLA_PUT_U32(skb, ODP_VPORT_ATTR_TYPE, vport_get_type(vport));
1311 NLA_PUT_STRING(skb, ODP_VPORT_ATTR_NAME, vport_get_name(vport));
1313 nla = nla_reserve(skb, ODP_VPORT_ATTR_STATS, sizeof(struct rtnl_link_stats64));
1315 goto nla_put_failure;
1316 if (vport_get_stats(vport, nla_data(nla)))
1317 __skb_trim(skb, skb->len - nla->nla_len);
1319 NLA_PUT(skb, ODP_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
1321 NLA_PUT_U32(skb, ODP_VPORT_ATTR_MTU, vport_get_mtu(vport));
1323 err = vport_get_options(vport, skb);
1325 ifindex = vport_get_ifindex(vport);
1327 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFINDEX, ifindex);
1329 iflink = vport_get_iflink(vport);
1331 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFLINK, iflink);
1334 if (skb->len > total_len)
1337 odp_vport->len = skb->len;
1338 err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
1350 static struct sk_buff *copy_vport_from_user(struct odp_vport __user *uodp_vport,
1351 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1353 struct odp_vport *odp_vport;
1354 struct sk_buff *skb;
1358 if (get_user(len, &uodp_vport->len))
1359 return ERR_PTR(-EFAULT);
1360 if (len < sizeof(struct odp_vport))
1361 return ERR_PTR(-EINVAL);
1363 skb = alloc_skb(len, GFP_KERNEL);
1365 return ERR_PTR(-ENOMEM);
1368 if (copy_from_user(__skb_put(skb, len), uodp_vport, len))
1369 goto error_free_skb;
1371 odp_vport = (struct odp_vport *)skb->data;
1373 if (odp_vport->len != len)
1374 goto error_free_skb;
1376 err = nla_parse(a, ODP_VPORT_ATTR_MAX, (struct nlattr *)(skb->data + sizeof(struct odp_vport)),
1377 skb->len - sizeof(struct odp_vport), vport_policy);
1379 goto error_free_skb;
1381 err = VERIFY_NUL_STRING(a[ODP_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1383 goto error_free_skb;
1389 return ERR_PTR(err);
1393 /* Called without any locks (or with RTNL lock).
1394 * Returns holding vport->dp->mutex.
1396 static struct vport *lookup_vport(struct odp_vport *odp_vport,
1397 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1399 struct datapath *dp;
1400 struct vport *vport;
1402 if (a[ODP_VPORT_ATTR_NAME]) {
1403 int dp_idx, port_no;
1407 vport = vport_locate(nla_data(a[ODP_VPORT_ATTR_NAME]));
1410 return ERR_PTR(-ENODEV);
1412 dp_idx = vport->dp->dp_idx;
1413 port_no = vport->port_no;
1416 dp = get_dp_locked(dp_idx);
1420 vport = get_vport_protected(dp, port_no);
1422 strcmp(vport_get_name(vport), nla_data(a[ODP_VPORT_ATTR_NAME]))) {
1423 mutex_unlock(&dp->mutex);
1428 } else if (a[ODP_VPORT_ATTR_PORT_NO]) {
1429 u32 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1431 if (port_no >= DP_MAX_PORTS)
1432 return ERR_PTR(-EINVAL);
1434 dp = get_dp_locked(odp_vport->dp_idx);
1436 return ERR_PTR(-ENODEV);
1438 vport = get_vport_protected(dp, port_no);
1440 mutex_unlock(&dp->mutex);
1441 return ERR_PTR(-ENOENT);
1445 return ERR_PTR(-EINVAL);
1448 static int change_vport(struct vport *vport, struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1451 if (a[ODP_VPORT_ATTR_STATS])
1452 err = vport_set_stats(vport, nla_data(a[ODP_VPORT_ATTR_STATS]));
1453 if (!err && a[ODP_VPORT_ATTR_ADDRESS])
1454 err = vport_set_addr(vport, nla_data(a[ODP_VPORT_ATTR_ADDRESS]));
1455 if (!err && a[ODP_VPORT_ATTR_MTU])
1456 err = vport_set_mtu(vport, nla_get_u32(a[ODP_VPORT_ATTR_MTU]));
1460 static int attach_vport(struct odp_vport __user *uodp_vport)
1462 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1463 struct odp_vport *odp_vport;
1464 struct vport_parms parms;
1465 struct vport *vport;
1466 struct sk_buff *skb;
1467 struct datapath *dp;
1471 skb = copy_vport_from_user(uodp_vport, a);
1475 odp_vport = (struct odp_vport *)skb->data;
1478 if (!a[ODP_VPORT_ATTR_NAME] || !a[ODP_VPORT_ATTR_TYPE])
1479 goto exit_kfree_skb;
1483 dp = get_dp_locked(odp_vport->dp_idx);
1486 goto exit_unlock_rtnl;
1488 if (a[ODP_VPORT_ATTR_PORT_NO]) {
1489 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1492 if (port_no >= DP_MAX_PORTS)
1493 goto exit_unlock_dp;
1495 vport = get_vport_protected(dp, port_no);
1498 goto exit_unlock_dp;
1500 for (port_no = 1; ; port_no++) {
1501 if (port_no >= DP_MAX_PORTS) {
1503 goto exit_unlock_dp;
1505 vport = get_vport_protected(dp, port_no);
1511 parms.name = nla_data(a[ODP_VPORT_ATTR_NAME]);
1512 parms.type = nla_get_u32(a[ODP_VPORT_ATTR_TYPE]);
1513 parms.options = a[ODP_VPORT_ATTR_OPTIONS];
1515 parms.port_no = port_no;
1517 vport = new_vport(&parms);
1518 err = PTR_ERR(vport);
1520 goto exit_unlock_dp;
1522 set_internal_devs_mtu(dp);
1523 dp_sysfs_add_if(vport);
1525 err = change_vport(vport, a);
1527 dp_detach_port(vport);
1528 goto exit_unlock_dp;
1531 err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
1534 mutex_unlock(&dp->mutex);
1543 static int set_vport(unsigned int cmd, struct odp_vport __user *uodp_vport)
1545 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1546 struct vport *vport;
1547 struct sk_buff *skb;
1550 skb = copy_vport_from_user(uodp_vport, a);
1556 vport = lookup_vport((struct odp_vport *)skb->data, a);
1557 err = PTR_ERR(vport);
1562 if (a[ODP_VPORT_ATTR_OPTIONS])
1563 err = vport_set_options(vport, a[ODP_VPORT_ATTR_OPTIONS]);
1565 err = change_vport(vport, a);
1567 mutex_unlock(&vport->dp->mutex);
1575 static int del_vport(unsigned int cmd, struct odp_vport __user *uodp_vport)
1577 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1578 struct datapath *dp;
1579 struct vport *vport;
1580 struct sk_buff *skb;
1583 skb = copy_vport_from_user(uodp_vport, a);
1589 vport = lookup_vport((struct odp_vport *)skb->data, a);
1590 err = PTR_ERR(vport);
1596 if (vport->port_no == ODPP_LOCAL)
1599 err = dp_detach_port(vport);
1600 mutex_unlock(&dp->mutex);
1608 static int get_vport(struct odp_vport __user *uodp_vport)
1610 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1611 struct odp_vport *odp_vport;
1612 struct vport *vport;
1613 struct sk_buff *skb;
1616 skb = copy_vport_from_user(uodp_vport, a);
1620 odp_vport = (struct odp_vport *)skb->data;
1622 vport = lookup_vport(odp_vport, a);
1623 err = PTR_ERR(vport);
1627 err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
1628 mutex_unlock(&vport->dp->mutex);
1635 static int dump_vport(struct odp_vport __user *uodp_vport)
1637 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1638 struct odp_vport *odp_vport;
1639 struct sk_buff *skb;
1640 struct datapath *dp;
1644 skb = copy_vport_from_user(uodp_vport, a);
1648 odp_vport = (struct odp_vport *)skb->data;
1650 dp = get_dp_locked(odp_vport->dp_idx);
1656 if (a[ODP_VPORT_ATTR_PORT_NO])
1657 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1658 for (; port_no < DP_MAX_PORTS; port_no++) {
1659 struct vport *vport = get_vport_protected(dp, port_no);
1661 err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
1662 goto exit_unlock_dp;
1668 mutex_unlock(&dp->mutex);
1675 static long openvswitch_ioctl(struct file *f, unsigned int cmd,
1678 int dp_idx = iminor(f->f_dentry->d_inode);
1679 struct datapath *dp;
1680 int drop_frags, listeners;
1681 unsigned int sflow_probability;
1684 /* Handle commands with special locking requirements up front. */
1687 err = create_dp(dp_idx, (char __user *)argp);
1690 case ODP_DP_DESTROY:
1691 err = destroy_dp(dp_idx);
1695 err = attach_vport((struct odp_vport __user *)argp);
1699 err = get_vport((struct odp_vport __user *)argp);
1703 err = del_vport(cmd, (struct odp_vport __user *)argp);
1707 err = set_vport(cmd, (struct odp_vport __user *)argp);
1710 case ODP_VPORT_DUMP:
1711 err = dump_vport((struct odp_vport __user *)argp);
1715 dp = get_dp_locked(dp_idx);
1722 err = get_dp_stats(dp, (struct odp_stats __user *)argp);
1725 case ODP_GET_DROP_FRAGS:
1726 err = put_user(dp->drop_frags, (int __user *)argp);
1729 case ODP_SET_DROP_FRAGS:
1730 err = get_user(drop_frags, (int __user *)argp);
1734 if (drop_frags != 0 && drop_frags != 1)
1736 dp->drop_frags = drop_frags;
1740 case ODP_GET_LISTEN_MASK:
1741 err = put_user(get_listen_mask(f), (int __user *)argp);
1744 case ODP_SET_LISTEN_MASK:
1745 err = get_user(listeners, (int __user *)argp);
1749 if (listeners & ~ODPL_ALL)
1752 set_listen_mask(f, listeners);
1755 case ODP_GET_SFLOW_PROBABILITY:
1756 err = put_user(dp->sflow_probability, (unsigned int __user *)argp);
1759 case ODP_SET_SFLOW_PROBABILITY:
1760 err = get_user(sflow_probability, (unsigned int __user *)argp);
1762 dp->sflow_probability = sflow_probability;
1765 case ODP_FLOW_FLUSH:
1766 err = flush_flows(dp);
1770 err = put_flow(dp, (struct odp_flow_put __user *)argp);
1774 err = del_flow(dp, (struct odp_flow __user *)argp);
1778 err = do_flowvec_ioctl(dp, argp, do_query_flows);
1782 err = dump_flow(dp, (struct odp_flow_dump __user *)argp);
1786 err = execute_packet(dp, (struct odp_execute __user *)argp);
1793 mutex_unlock(&dp->mutex);
1798 static int dp_has_packet_of_interest(struct datapath *dp, int listeners)
1801 for (i = 0; i < DP_N_QUEUES; i++) {
1802 if (listeners & (1 << i) && !skb_queue_empty(&dp->queues[i]))
1808 #ifdef CONFIG_COMPAT
1809 static int compat_get_flow(struct odp_flow *flow, const struct compat_odp_flow __user *compat)
1811 compat_uptr_t key, actions;
1813 if (!access_ok(VERIFY_READ, compat, sizeof(struct compat_odp_flow)) ||
1814 __copy_from_user(&flow->stats, &compat->stats, sizeof(struct odp_flow_stats)) ||
1815 __get_user(key, &compat->key) ||
1816 __get_user(flow->key_len, &compat->key_len) ||
1817 __get_user(actions, &compat->actions) ||
1818 __get_user(flow->actions_len, &compat->actions_len) ||
1819 __get_user(flow->flags, &compat->flags))
1822 flow->key = (struct nlattr __force *)compat_ptr(key);
1823 flow->actions = (struct nlattr __force *)compat_ptr(actions);
1827 static int compat_put_flow(struct datapath *dp, struct compat_odp_flow_put __user *ufp)
1829 struct odp_flow_stats stats;
1830 struct odp_flow_put fp;
1833 if (compat_get_flow(&fp.flow, &ufp->flow) ||
1834 get_user(fp.flags, &ufp->flags))
1837 error = do_put_flow(dp, &fp, &stats);
1841 if (copy_to_user(&ufp->flow.stats, &stats,
1842 sizeof(struct odp_flow_stats)))
1848 static int compat_answer_query(struct datapath *dp, struct sw_flow *flow,
1850 struct compat_odp_flow __user *ufp)
1852 compat_uptr_t actions;
1854 if (get_user(actions, &ufp->actions))
1857 return do_answer_query(dp, flow, query_flags, &ufp->stats,
1858 compat_ptr(actions), &ufp->actions_len);
1861 static int compat_del_flow(struct datapath *dp, struct compat_odp_flow __user *ufp)
1863 struct sw_flow *flow;
1867 if (compat_get_flow(&uf, ufp))
1870 flow = do_del_flow(dp, (const struct nlattr __force __user *)uf.key, uf.key_len);
1872 return PTR_ERR(flow);
1874 error = compat_answer_query(dp, flow, 0, ufp);
1875 flow_deferred_free(flow);
1879 static int compat_query_flows(struct datapath *dp,
1880 struct compat_odp_flow __user *flows,
1883 struct tbl *table = get_table_protected(dp);
1886 for (i = 0; i < n_flows; i++) {
1887 struct compat_odp_flow __user *ufp = &flows[i];
1889 struct tbl_node *flow_node;
1890 struct sw_flow_key key;
1893 if (compat_get_flow(&uf, ufp))
1896 error = flow_copy_from_user(&key, (const struct nlattr __force __user *) uf.key, uf.key_len);
1900 flow_node = tbl_lookup(table, &key, flow_hash(&key), flow_cmp);
1902 error = put_user(ENOENT, &ufp->stats.error);
1904 error = compat_answer_query(dp, flow_cast(flow_node),
1912 static int compat_dump_flow(struct datapath *dp, struct compat_odp_flow_dump __user *udumpp)
1914 struct compat_odp_flow __user *uflowp;
1915 compat_uptr_t compat_ufp;
1916 struct sw_flow *flow;
1920 flow = do_dump_flow(dp, udumpp->state);
1922 return PTR_ERR(flow);
1924 if (get_user(compat_ufp, &udumpp->flow))
1926 uflowp = compat_ptr(compat_ufp);
1929 return put_user(ODPFF_EOF, &uflowp->flags);
1931 if (put_user(0, &uflowp->flags) ||
1932 get_user(ukey, &uflowp->key) ||
1933 get_user(key_len, &uflowp->key_len))
1936 key_len = flow_copy_to_user(compat_ptr(ukey), &flow->key, key_len);
1939 if (put_user(key_len, &uflowp->key_len))
1942 return compat_answer_query(dp, flow, 0, uflowp);
1945 static int compat_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1946 int (*function)(struct datapath *,
1947 struct compat_odp_flow __user *,
1950 struct compat_odp_flowvec __user *uflowvec;
1951 struct compat_odp_flow __user *flows;
1952 struct compat_odp_flowvec flowvec;
1955 uflowvec = compat_ptr(argp);
1956 if (!access_ok(VERIFY_WRITE, uflowvec, sizeof(*uflowvec)) ||
1957 copy_from_user(&flowvec, uflowvec, sizeof(flowvec)))
1960 if (flowvec.n_flows > INT_MAX / sizeof(struct compat_odp_flow))
1963 flows = compat_ptr(flowvec.flows);
1964 if (!access_ok(VERIFY_WRITE, flows,
1965 flowvec.n_flows * sizeof(struct compat_odp_flow)))
1968 retval = function(dp, flows, flowvec.n_flows);
1969 return (retval < 0 ? retval
1970 : retval == flowvec.n_flows ? 0
1971 : put_user(retval, &uflowvec->n_flows));
1974 static int compat_execute(struct datapath *dp, const struct compat_odp_execute __user *uexecute)
1976 struct odp_execute execute;
1977 compat_uptr_t actions;
1980 if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_odp_execute)) ||
1981 __get_user(actions, &uexecute->actions) ||
1982 __get_user(execute.actions_len, &uexecute->actions_len) ||
1983 __get_user(data, &uexecute->data) ||
1984 __get_user(execute.length, &uexecute->length))
1987 execute.actions = (struct nlattr __force *)compat_ptr(actions);
1988 execute.data = (const void __force *)compat_ptr(data);
1990 return do_execute(dp, &execute);
1993 static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned long argp)
1995 int dp_idx = iminor(f->f_dentry->d_inode);
1996 struct datapath *dp;
2000 case ODP_DP_DESTROY:
2001 case ODP_FLOW_FLUSH:
2002 /* Ioctls that don't need any translation at all. */
2003 return openvswitch_ioctl(f, cmd, argp);
2010 case ODP_VPORT_DUMP:
2012 case ODP_GET_DROP_FRAGS:
2013 case ODP_SET_DROP_FRAGS:
2014 case ODP_SET_LISTEN_MASK:
2015 case ODP_GET_LISTEN_MASK:
2016 case ODP_SET_SFLOW_PROBABILITY:
2017 case ODP_GET_SFLOW_PROBABILITY:
2018 /* Ioctls that just need their pointer argument extended. */
2019 return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
2022 dp = get_dp_locked(dp_idx);
2028 case ODP_FLOW_PUT32:
2029 err = compat_put_flow(dp, compat_ptr(argp));
2032 case ODP_FLOW_DEL32:
2033 err = compat_del_flow(dp, compat_ptr(argp));
2036 case ODP_FLOW_GET32:
2037 err = compat_flowvec_ioctl(dp, argp, compat_query_flows);
2040 case ODP_FLOW_DUMP32:
2041 err = compat_dump_flow(dp, compat_ptr(argp));
2045 err = compat_execute(dp, compat_ptr(argp));
2052 mutex_unlock(&dp->mutex);
2058 static ssize_t openvswitch_read(struct file *f, char __user *buf,
2059 size_t nbytes, loff_t *ppos)
2061 int listeners = get_listen_mask(f);
2062 int dp_idx = iminor(f->f_dentry->d_inode);
2063 struct datapath *dp = get_dp_locked(dp_idx);
2064 struct sk_buff *skb;
2071 if (nbytes == 0 || !listeners)
2077 for (i = 0; i < DP_N_QUEUES; i++) {
2078 if (listeners & (1 << i)) {
2079 skb = skb_dequeue(&dp->queues[i]);
2085 if (f->f_flags & O_NONBLOCK) {
2090 wait_event_interruptible(dp->waitqueue,
2091 dp_has_packet_of_interest(dp,
2094 if (signal_pending(current)) {
2095 retval = -ERESTARTSYS;
2100 mutex_unlock(&dp->mutex);
2103 iov.iov_len = min_t(size_t, skb->len, nbytes);
2104 retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
2112 mutex_unlock(&dp->mutex);
2116 static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
2118 int dp_idx = iminor(file->f_dentry->d_inode);
2119 struct datapath *dp = get_dp_locked(dp_idx);
2124 poll_wait(file, &dp->waitqueue, wait);
2125 if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
2126 mask |= POLLIN | POLLRDNORM;
2127 mutex_unlock(&dp->mutex);
2129 mask = POLLIN | POLLRDNORM | POLLHUP;
2134 static struct file_operations openvswitch_fops = {
2135 .owner = THIS_MODULE,
2136 .read = openvswitch_read,
2137 .poll = openvswitch_poll,
2138 .unlocked_ioctl = openvswitch_ioctl,
2139 #ifdef CONFIG_COMPAT
2140 .compat_ioctl = openvswitch_compat_ioctl,
2146 static int __init dp_init(void)
2148 struct sk_buff *dummy_skb;
2151 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2153 printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2161 goto error_flow_exit;
2163 err = register_netdevice_notifier(&dp_device_notifier);
2165 goto error_vport_exit;
2167 major = register_chrdev(0, "openvswitch", &openvswitch_fops);
2169 goto error_unreg_notifier;
2173 error_unreg_notifier:
2174 unregister_netdevice_notifier(&dp_device_notifier);
2183 static void dp_cleanup(void)
2186 unregister_chrdev(major, "openvswitch");
2187 unregister_netdevice_notifier(&dp_device_notifier);
2192 module_init(dp_init);
2193 module_exit(dp_cleanup);
2195 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2196 MODULE_LICENSE("GPL");