2 * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/module.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/kernel.h>
24 #include <linux/kthread.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/version.h>
31 #include <linux/ethtool.h>
32 #include <linux/wait.h>
33 #include <asm/system.h>
34 #include <asm/div64.h>
36 #include <linux/highmem.h>
37 #include <linux/netfilter_bridge.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/inetdevice.h>
40 #include <linux/list.h>
41 #include <linux/rculist.h>
42 #include <linux/dmi.h>
43 #include <net/inet_ecn.h>
44 #include <linux/compat.h>
46 #include "openvswitch/datapath-protocol.h"
51 #include "loop_counter.h"
52 #include "odp-compat.h"
54 #include "vport-internal_dev.h"
58 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
59 EXPORT_SYMBOL(dp_ioctl_hook);
61 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
64 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
67 * It is safe to access the datapath and vport structures with just
70 static struct datapath __rcu *dps[ODP_MAX];
71 static DEFINE_MUTEX(dp_mutex);
73 static int new_vport(struct datapath *, struct odp_port *, int port_no);
75 /* Must be called with rcu_read_lock or dp_mutex. */
76 struct datapath *get_dp(int dp_idx)
78 if (dp_idx < 0 || dp_idx >= ODP_MAX)
80 return rcu_dereference_check(dps[dp_idx], rcu_read_lock_held() ||
81 lockdep_is_held(&dp_mutex));
83 EXPORT_SYMBOL_GPL(get_dp);
85 static struct datapath *get_dp_locked(int dp_idx)
89 mutex_lock(&dp_mutex);
92 mutex_lock(&dp->mutex);
93 mutex_unlock(&dp_mutex);
97 static struct tbl *get_table_protected(struct datapath *dp)
99 return rcu_dereference_protected(dp->table, lockdep_is_held(&dp->mutex));
102 /* Must be called with rcu_read_lock or RTNL lock. */
103 const char *dp_name(const struct datapath *dp)
105 return vport_get_name(rcu_dereference_rtnl(dp->ports[ODPP_LOCAL]));
108 static inline size_t br_nlmsg_size(void)
110 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
111 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
112 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
113 + nla_total_size(4) /* IFLA_MASTER */
114 + nla_total_size(4) /* IFLA_MTU */
115 + nla_total_size(4) /* IFLA_LINK */
116 + nla_total_size(1); /* IFLA_OPERSTATE */
119 static int dp_fill_ifinfo(struct sk_buff *skb,
120 const struct vport *port,
121 int event, unsigned int flags)
123 const struct datapath *dp = port->dp;
124 int ifindex = vport_get_ifindex(port);
125 int iflink = vport_get_iflink(port);
126 struct ifinfomsg *hdr;
127 struct nlmsghdr *nlh;
135 nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
139 hdr = nlmsg_data(nlh);
140 hdr->ifi_family = AF_BRIDGE;
142 hdr->ifi_type = ARPHRD_ETHER;
143 hdr->ifi_index = ifindex;
144 hdr->ifi_flags = vport_get_flags(port);
147 NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
148 NLA_PUT_U32(skb, IFLA_MASTER,
149 vport_get_ifindex(rtnl_dereference(dp->ports[ODPP_LOCAL])));
150 NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
151 #ifdef IFLA_OPERSTATE
152 NLA_PUT_U8(skb, IFLA_OPERSTATE,
153 vport_is_running(port)
154 ? vport_get_operstate(port)
158 NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
160 if (ifindex != iflink)
161 NLA_PUT_U32(skb, IFLA_LINK,iflink);
163 return nlmsg_end(skb, nlh);
166 nlmsg_cancel(skb, nlh);
170 static void dp_ifinfo_notify(int event, struct vport *port)
175 skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
179 err = dp_fill_ifinfo(skb, port, event, 0);
181 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
182 WARN_ON(err == -EMSGSIZE);
186 rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
190 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
193 static void release_dp(struct kobject *kobj)
195 struct datapath *dp = container_of(kobj, struct datapath, ifobj);
199 static struct kobj_type dp_ktype = {
200 .release = release_dp
203 static int create_dp(int dp_idx, const char __user *devnamep)
205 struct odp_port internal_dev_port;
206 char devname[IFNAMSIZ];
212 int retval = strncpy_from_user(devname, devnamep, IFNAMSIZ);
216 } else if (retval >= IFNAMSIZ) {
221 snprintf(devname, sizeof devname, "of%d", dp_idx);
225 mutex_lock(&dp_mutex);
227 if (!try_module_get(THIS_MODULE))
230 /* Exit early if a datapath with that number already exists.
231 * (We don't use -EEXIST because that's ambiguous with 'devname'
232 * conflicting with an existing network device name.) */
238 dp = kzalloc(sizeof *dp, GFP_KERNEL);
241 INIT_LIST_HEAD(&dp->port_list);
242 mutex_init(&dp->mutex);
244 for (i = 0; i < DP_N_QUEUES; i++)
245 skb_queue_head_init(&dp->queues[i]);
246 init_waitqueue_head(&dp->waitqueue);
248 /* Initialize kobject for bridge. This will be added as
249 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
250 dp->ifobj.kset = NULL;
251 kobject_init(&dp->ifobj, &dp_ktype);
253 /* Allocate table. */
255 rcu_assign_pointer(dp->table, tbl_create(0));
259 /* Set up our datapath device. */
260 BUILD_BUG_ON(sizeof(internal_dev_port.devname) != sizeof(devname));
261 strcpy(internal_dev_port.devname, devname);
262 strcpy(internal_dev_port.type, "internal");
263 err = new_vport(dp, &internal_dev_port, ODPP_LOCAL);
268 goto err_destroy_table;
272 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
273 if (!dp->stats_percpu)
274 goto err_destroy_local_port;
276 rcu_assign_pointer(dps[dp_idx], dp);
279 mutex_unlock(&dp_mutex);
284 err_destroy_local_port:
285 dp_detach_port(dp->ports[ODPP_LOCAL]);
287 tbl_destroy(dp->table, NULL);
291 module_put(THIS_MODULE);
293 mutex_unlock(&dp_mutex);
299 static void do_destroy_dp(struct datapath *dp)
304 list_for_each_entry_safe (p, n, &dp->port_list, node)
305 if (p->port_no != ODPP_LOCAL)
310 rcu_assign_pointer(dps[dp->dp_idx], NULL);
312 dp_detach_port(dp->ports[ODPP_LOCAL]);
314 tbl_destroy(dp->table, flow_free_tbl);
316 for (i = 0; i < DP_N_QUEUES; i++)
317 skb_queue_purge(&dp->queues[i]);
318 free_percpu(dp->stats_percpu);
319 kobject_put(&dp->ifobj);
320 module_put(THIS_MODULE);
323 static int destroy_dp(int dp_idx)
329 mutex_lock(&dp_mutex);
339 mutex_unlock(&dp_mutex);
344 /* Called with RTNL lock and dp_mutex. */
345 static int new_vport(struct datapath *dp, struct odp_port *odp_port, int port_no)
347 struct vport_parms parms;
350 parms.name = odp_port->devname;
351 parms.type = odp_port->type;
352 parms.config = odp_port->config;
354 parms.port_no = port_no;
357 vport = vport_add(&parms);
361 return PTR_ERR(vport);
363 rcu_assign_pointer(dp->ports[port_no], vport);
364 list_add_rcu(&vport->node, &dp->port_list);
367 dp_ifinfo_notify(RTM_NEWLINK, vport);
372 static int attach_port(int dp_idx, struct odp_port __user *portp)
375 struct odp_port port;
380 if (copy_from_user(&port, portp, sizeof port))
382 port.devname[IFNAMSIZ - 1] = '\0';
383 port.type[VPORT_TYPE_SIZE - 1] = '\0';
386 dp = get_dp_locked(dp_idx);
389 goto out_unlock_rtnl;
391 for (port_no = 1; port_no < DP_MAX_PORTS; port_no++)
392 if (!dp->ports[port_no])
398 err = new_vport(dp, &port, port_no);
402 set_internal_devs_mtu(dp);
403 dp_sysfs_add_if(dp->ports[port_no]);
405 err = put_user(port_no, &portp->port);
408 mutex_unlock(&dp->mutex);
415 int dp_detach_port(struct vport *p)
421 if (p->port_no != ODPP_LOCAL)
423 dp_ifinfo_notify(RTM_DELLINK, p);
425 /* First drop references to device. */
427 list_del_rcu(&p->node);
428 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
430 /* Then destroy it. */
438 static int detach_port(int dp_idx, int port_no)
445 if (port_no < 0 || port_no >= DP_MAX_PORTS || port_no == ODPP_LOCAL)
449 dp = get_dp_locked(dp_idx);
452 goto out_unlock_rtnl;
454 p = dp->ports[port_no];
459 err = dp_detach_port(p);
462 mutex_unlock(&dp->mutex);
469 /* Must be called with rcu_read_lock. */
470 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
472 struct datapath *dp = p->dp;
473 struct dp_stats_percpu *stats;
474 int stats_counter_off;
475 struct sw_flow_actions *acts;
476 struct loop_counter *loop;
479 OVS_CB(skb)->vport = p;
481 if (!OVS_CB(skb)->flow) {
482 struct odp_flow_key key;
483 struct tbl_node *flow_node;
486 /* Extract flow from 'skb' into 'key'. */
487 error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key, &is_frag);
488 if (unlikely(error)) {
493 if (is_frag && dp->drop_frags) {
495 stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
500 flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
501 flow_hash(&key), flow_cmp);
502 if (unlikely(!flow_node)) {
503 dp_output_control(dp, skb, _ODPL_MISS_NR,
504 (__force u64)OVS_CB(skb)->tun_id);
505 stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
509 OVS_CB(skb)->flow = flow_cast(flow_node);
512 stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
513 flow_used(OVS_CB(skb)->flow, skb);
515 acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
517 /* Check whether we've looped too much. */
518 loop = loop_get_counter();
519 if (unlikely(++loop->count > MAX_LOOPS))
520 loop->looping = true;
521 if (unlikely(loop->looping)) {
522 loop_suppress(dp, acts);
527 /* Execute actions. */
528 execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
531 /* Check whether sub-actions looped too much. */
532 if (unlikely(loop->looping))
533 loop_suppress(dp, acts);
536 /* Decrement loop counter. */
538 loop->looping = false;
542 /* Update datapath statistics. */
544 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
546 write_seqcount_begin(&stats->seqlock);
547 (*(u64 *)((u8 *)stats + stats_counter_off))++;
548 write_seqcount_end(&stats->seqlock);
553 /* Append each packet in 'skb' list to 'queue'. There will be only one packet
554 * unless we broke up a GSO packet. */
555 static int queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
556 int queue_no, u64 arg)
558 struct sk_buff *nskb;
562 if (OVS_CB(skb)->vport)
563 port_no = OVS_CB(skb)->vport->port_no;
565 port_no = ODPP_LOCAL;
568 struct odp_msg *header;
573 err = skb_cow(skb, sizeof *header);
577 header = (struct odp_msg*)__skb_push(skb, sizeof *header);
578 header->type = queue_no;
579 header->length = skb->len;
580 header->port = port_no;
582 skb_queue_tail(queue, skb);
590 while ((skb = nskb) != NULL) {
597 int dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
600 struct dp_stats_percpu *stats;
601 struct sk_buff_head *queue;
604 WARN_ON_ONCE(skb_shared(skb));
605 BUG_ON(queue_no != _ODPL_MISS_NR && queue_no != _ODPL_ACTION_NR && queue_no != _ODPL_SFLOW_NR);
606 queue = &dp->queues[queue_no];
608 if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
611 forward_ip_summed(skb);
613 err = vswitch_skb_checksum_setup(skb);
617 /* Break apart GSO packets into their component pieces. Otherwise
618 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
619 if (skb_is_gso(skb)) {
620 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
630 err = queue_control_packets(skb, queue, queue_no, arg);
631 wake_up_interruptible(&dp->waitqueue);
638 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
640 write_seqcount_begin(&stats->seqlock);
642 write_seqcount_end(&stats->seqlock);
649 static int flush_flows(struct datapath *dp)
651 struct tbl *old_table = get_table_protected(dp);
652 struct tbl *new_table;
654 new_table = tbl_create(0);
658 rcu_assign_pointer(dp->table, new_table);
660 tbl_deferred_destroy(old_table, flow_free_tbl);
665 static int validate_actions(const struct nlattr *actions, u32 actions_len)
667 const struct nlattr *a;
670 nla_for_each_attr(a, actions, actions_len, rem) {
671 static const u32 action_lens[ODPAT_MAX + 1] = {
673 [ODPAT_CONTROLLER] = 8,
674 [ODPAT_SET_DL_TCI] = 2,
675 [ODPAT_STRIP_VLAN] = 0,
676 [ODPAT_SET_DL_SRC] = ETH_ALEN,
677 [ODPAT_SET_DL_DST] = ETH_ALEN,
678 [ODPAT_SET_NW_SRC] = 4,
679 [ODPAT_SET_NW_DST] = 4,
680 [ODPAT_SET_NW_TOS] = 1,
681 [ODPAT_SET_TP_SRC] = 2,
682 [ODPAT_SET_TP_DST] = 2,
683 [ODPAT_SET_TUNNEL] = 8,
684 [ODPAT_SET_PRIORITY] = 4,
685 [ODPAT_POP_PRIORITY] = 0,
686 [ODPAT_DROP_SPOOFED_ARP] = 0,
688 int type = nla_type(a);
690 if (type > ODPAT_MAX || nla_len(a) != action_lens[type])
697 case ODPAT_CONTROLLER:
698 case ODPAT_STRIP_VLAN:
699 case ODPAT_SET_DL_SRC:
700 case ODPAT_SET_DL_DST:
701 case ODPAT_SET_NW_SRC:
702 case ODPAT_SET_NW_DST:
703 case ODPAT_SET_TP_SRC:
704 case ODPAT_SET_TP_DST:
705 case ODPAT_SET_TUNNEL:
706 case ODPAT_SET_PRIORITY:
707 case ODPAT_POP_PRIORITY:
708 case ODPAT_DROP_SPOOFED_ARP:
709 /* No validation needed. */
713 if (nla_get_u32(a) >= DP_MAX_PORTS)
716 case ODPAT_SET_DL_TCI:
717 if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
721 case ODPAT_SET_NW_TOS:
722 if (nla_get_u8(a) & INET_ECN_MASK)
737 static struct sw_flow_actions *get_actions(const struct odp_flow *flow)
739 struct sw_flow_actions *actions;
742 actions = flow_actions_alloc(flow->actions_len);
743 error = PTR_ERR(actions);
748 if (copy_from_user(actions->actions,
749 (struct nlattr __user *)flow->actions,
751 goto error_free_actions;
752 error = validate_actions(actions->actions, actions->actions_len);
754 goto error_free_actions;
761 return ERR_PTR(error);
764 static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats)
767 struct timespec offset_ts, used, now_mono;
769 ktime_get_ts(&now_mono);
770 jiffies_to_timespec(jiffies - flow->used, &offset_ts);
771 set_normalized_timespec(&used, now_mono.tv_sec - offset_ts.tv_sec,
772 now_mono.tv_nsec - offset_ts.tv_nsec);
774 stats->used_sec = used.tv_sec;
775 stats->used_nsec = used.tv_nsec;
778 stats->used_nsec = 0;
781 stats->n_packets = flow->packet_count;
782 stats->n_bytes = flow->byte_count;
784 stats->tcp_flags = flow->tcp_flags;
788 static void clear_stats(struct sw_flow *flow)
792 flow->packet_count = 0;
793 flow->byte_count = 0;
796 static int expand_table(struct datapath *dp)
798 struct tbl *old_table = get_table_protected(dp);
799 struct tbl *new_table;
801 new_table = tbl_expand(old_table);
802 if (IS_ERR(new_table))
803 return PTR_ERR(new_table);
805 rcu_assign_pointer(dp->table, new_table);
806 tbl_deferred_destroy(old_table, NULL);
811 static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf,
812 struct odp_flow_stats *stats)
814 struct tbl_node *flow_node;
815 struct sw_flow *flow;
819 table = get_table_protected(dp);
820 flow_node = tbl_lookup(table, &uf->flow.key, flow_hash(&uf->flow.key), flow_cmp);
823 struct sw_flow_actions *acts;
826 if (!(uf->flags & ODPPF_CREATE))
829 /* Expand table, if necessary, to make room. */
830 if (tbl_count(table) >= tbl_n_buckets(table)) {
831 error = expand_table(dp);
834 table = get_table_protected(dp);
840 error = PTR_ERR(flow);
843 flow->key = uf->flow.key;
846 /* Obtain actions. */
847 acts = get_actions(&uf->flow);
848 error = PTR_ERR(acts);
850 goto error_free_flow;
851 rcu_assign_pointer(flow->sf_acts, acts);
853 /* Put flow in bucket. */
854 error = tbl_insert(table, &flow->tbl_node, flow_hash(&flow->key));
856 goto error_free_flow_acts;
858 memset(stats, 0, sizeof(struct odp_flow_stats));
860 /* We found a matching flow. */
861 struct sw_flow_actions *old_acts, *new_acts;
863 flow = flow_cast(flow_node);
865 /* Bail out if we're not allowed to modify an existing flow. */
867 if (!(uf->flags & ODPPF_MODIFY))
871 new_acts = get_actions(&uf->flow);
872 error = PTR_ERR(new_acts);
873 if (IS_ERR(new_acts))
876 old_acts = rcu_dereference_protected(flow->sf_acts,
877 lockdep_is_held(&dp->mutex));
878 if (old_acts->actions_len != new_acts->actions_len ||
879 memcmp(old_acts->actions, new_acts->actions,
880 old_acts->actions_len)) {
881 rcu_assign_pointer(flow->sf_acts, new_acts);
882 flow_deferred_free_acts(old_acts);
887 /* Fetch stats, then clear them if necessary. */
888 spin_lock_bh(&flow->lock);
889 get_stats(flow, stats);
890 if (uf->flags & ODPPF_ZERO_STATS)
892 spin_unlock_bh(&flow->lock);
897 error_free_flow_acts:
898 kfree(flow->sf_acts);
900 flow->sf_acts = NULL;
906 static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
908 struct odp_flow_stats stats;
909 struct odp_flow_put uf;
912 if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put)))
915 error = do_put_flow(dp, &uf, &stats);
919 if (copy_to_user(&ufp->flow.stats, &stats,
920 sizeof(struct odp_flow_stats)))
926 static int do_answer_query(struct datapath *dp, struct sw_flow *flow,
928 struct odp_flow_stats __user *ustats,
929 struct nlattr __user *actions,
930 u32 __user *actions_lenp)
932 struct sw_flow_actions *sf_acts;
933 struct odp_flow_stats stats;
936 spin_lock_bh(&flow->lock);
937 get_stats(flow, &stats);
938 if (query_flags & ODPFF_ZERO_TCP_FLAGS)
941 spin_unlock_bh(&flow->lock);
943 if (copy_to_user(ustats, &stats, sizeof(struct odp_flow_stats)) ||
944 get_user(actions_len, actions_lenp))
950 sf_acts = rcu_dereference_protected(flow->sf_acts,
951 lockdep_is_held(&dp->mutex));
952 if (put_user(sf_acts->actions_len, actions_lenp) ||
953 (actions && copy_to_user(actions, sf_acts->actions,
954 min(sf_acts->actions_len, actions_len))))
960 static int answer_query(struct datapath *dp, struct sw_flow *flow,
961 u32 query_flags, struct odp_flow __user *ufp)
963 struct nlattr __user *actions;
965 if (get_user(actions, (struct nlattr __user * __user *)&ufp->actions))
968 return do_answer_query(dp, flow, query_flags,
969 &ufp->stats, actions, &ufp->actions_len);
972 static struct sw_flow *do_del_flow(struct datapath *dp, struct odp_flow_key *key)
974 struct tbl *table = get_table_protected(dp);
975 struct tbl_node *flow_node;
978 flow_node = tbl_lookup(table, key, flow_hash(key), flow_cmp);
980 return ERR_PTR(-ENOENT);
982 error = tbl_remove(table, flow_node);
984 return ERR_PTR(error);
986 /* XXX Returned flow_node's statistics might lose a few packets, since
987 * other CPUs can be using this flow. We used to synchronize_rcu() to
988 * make sure that we get completely accurate stats, but that blows our
989 * performance, badly. */
990 return flow_cast(flow_node);
993 static int del_flow(struct datapath *dp, struct odp_flow __user *ufp)
995 struct sw_flow *flow;
999 if (copy_from_user(&uf, ufp, sizeof uf))
1002 flow = do_del_flow(dp, &uf.key);
1004 return PTR_ERR(flow);
1006 error = answer_query(dp, flow, 0, ufp);
1007 flow_deferred_free(flow);
1011 static int do_query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
1013 struct tbl *table = get_table_protected(dp);
1016 for (i = 0; i < flowvec->n_flows; i++) {
1017 struct odp_flow __user *ufp = (struct odp_flow __user *)&flowvec->flows[i];
1019 struct tbl_node *flow_node;
1022 if (copy_from_user(&uf, ufp, sizeof uf))
1025 flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
1027 error = put_user(ENOENT, &ufp->stats.error);
1029 error = answer_query(dp, flow_cast(flow_node), uf.flags, ufp);
1033 return flowvec->n_flows;
1036 struct list_flows_cbdata {
1037 struct datapath *dp;
1038 struct odp_flow __user *uflows;
1043 static int list_flow(struct tbl_node *node, void *cbdata_)
1045 struct sw_flow *flow = flow_cast(node);
1046 struct list_flows_cbdata *cbdata = cbdata_;
1047 struct odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
1050 if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
1052 error = answer_query(cbdata->dp, flow, 0, ufp);
1056 if (cbdata->listed_flows >= cbdata->n_flows)
1057 return cbdata->listed_flows;
1061 static int do_list_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
1063 struct list_flows_cbdata cbdata;
1066 if (!flowvec->n_flows)
1070 cbdata.uflows = (struct odp_flow __user *)flowvec->flows;
1071 cbdata.n_flows = flowvec->n_flows;
1072 cbdata.listed_flows = 0;
1074 error = tbl_foreach(get_table_protected(dp), list_flow, &cbdata);
1075 return error ? error : cbdata.listed_flows;
1078 static int do_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1079 int (*function)(struct datapath *,
1080 const struct odp_flowvec *))
1082 struct odp_flowvec __user *uflowvec;
1083 struct odp_flowvec flowvec;
1086 uflowvec = (struct odp_flowvec __user *)argp;
1087 if (copy_from_user(&flowvec, uflowvec, sizeof flowvec))
1090 if (flowvec.n_flows > INT_MAX / sizeof(struct odp_flow))
1093 retval = function(dp, &flowvec);
1094 return (retval < 0 ? retval
1095 : retval == flowvec.n_flows ? 0
1096 : put_user(retval, &uflowvec->n_flows));
1099 static int do_execute(struct datapath *dp, const struct odp_execute *execute)
1101 struct odp_flow_key key;
1102 struct sk_buff *skb;
1103 struct sw_flow_actions *actions;
1109 if (execute->length < ETH_HLEN || execute->length > 65535)
1112 actions = flow_actions_alloc(execute->actions_len);
1113 if (IS_ERR(actions)) {
1114 err = PTR_ERR(actions);
1119 if (copy_from_user(actions->actions,
1120 (struct nlattr __user *)execute->actions, execute->actions_len))
1121 goto error_free_actions;
1123 err = validate_actions(actions->actions, execute->actions_len);
1125 goto error_free_actions;
1128 skb = alloc_skb(execute->length, GFP_KERNEL);
1130 goto error_free_actions;
1133 if (copy_from_user(skb_put(skb, execute->length),
1134 (const void __user *)execute->data,
1136 goto error_free_skb;
1138 skb_reset_mac_header(skb);
1141 /* Normally, setting the skb 'protocol' field would be handled by a
1142 * call to eth_type_trans(), but it assumes there's a sending
1143 * device, which we may not have. */
1144 if (ntohs(eth->h_proto) >= 1536)
1145 skb->protocol = eth->h_proto;
1147 skb->protocol = htons(ETH_P_802_2);
1149 err = flow_extract(skb, -1, &key, &is_frag);
1151 goto error_free_skb;
1154 err = execute_actions(dp, skb, &key, actions->actions, actions->actions_len);
1168 static int execute_packet(struct datapath *dp, const struct odp_execute __user *executep)
1170 struct odp_execute execute;
1172 if (copy_from_user(&execute, executep, sizeof execute))
1175 return do_execute(dp, &execute);
1178 static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
1180 struct tbl *table = get_table_protected(dp);
1181 struct odp_stats stats;
1184 stats.n_flows = tbl_count(table);
1185 stats.cur_capacity = tbl_n_buckets(table);
1186 stats.max_capacity = TBL_MAX_BUCKETS;
1187 stats.n_ports = dp->n_ports;
1188 stats.max_ports = DP_MAX_PORTS;
1189 stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
1190 for_each_possible_cpu(i) {
1191 const struct dp_stats_percpu *percpu_stats;
1192 struct dp_stats_percpu local_stats;
1195 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
1198 seqcount = read_seqcount_begin(&percpu_stats->seqlock);
1199 local_stats = *percpu_stats;
1200 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
1202 stats.n_frags += local_stats.n_frags;
1203 stats.n_hit += local_stats.n_hit;
1204 stats.n_missed += local_stats.n_missed;
1205 stats.n_lost += local_stats.n_lost;
1207 stats.max_miss_queue = DP_MAX_QUEUE_LEN;
1208 stats.max_action_queue = DP_MAX_QUEUE_LEN;
1209 return copy_to_user(statsp, &stats, sizeof stats) ? -EFAULT : 0;
1212 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
1213 int dp_min_mtu(const struct datapath *dp)
1220 list_for_each_entry_rcu (p, &dp->port_list, node) {
1223 /* Skip any internal ports, since that's what we're trying to
1225 if (is_internal_vport(p))
1228 dev_mtu = vport_get_mtu(p);
1229 if (!mtu || dev_mtu < mtu)
1233 return mtu ? mtu : ETH_DATA_LEN;
1236 /* Sets the MTU of all datapath devices to the minimum of the ports. Must
1237 * be called with RTNL lock. */
1238 void set_internal_devs_mtu(const struct datapath *dp)
1245 mtu = dp_min_mtu(dp);
1247 list_for_each_entry_rcu (p, &dp->port_list, node) {
1248 if (is_internal_vport(p))
1249 vport_set_mtu(p, mtu);
1253 static int put_port(const struct vport *p, struct odp_port __user *uop)
1257 memset(&op, 0, sizeof op);
1260 strncpy(op.devname, vport_get_name(p), sizeof op.devname);
1261 strncpy(op.type, vport_get_type(p), sizeof op.type);
1264 op.port = p->port_no;
1266 return copy_to_user(uop, &op, sizeof op) ? -EFAULT : 0;
1269 static int query_port(struct datapath *dp, struct odp_port __user *uport)
1271 struct odp_port port;
1273 if (copy_from_user(&port, uport, sizeof port))
1276 if (port.devname[0]) {
1277 struct vport *vport;
1280 port.devname[IFNAMSIZ - 1] = '\0';
1285 vport = vport_locate(port.devname);
1290 if (vport->dp != dp) {
1295 port.port = vport->port_no;
1304 if (port.port >= DP_MAX_PORTS)
1306 if (!dp->ports[port.port])
1310 return put_port(dp->ports[port.port], uport);
1313 static int do_list_ports(struct datapath *dp, struct odp_port __user *uports,
1320 list_for_each_entry_rcu (p, &dp->port_list, node) {
1321 if (put_port(p, &uports[idx]))
1323 if (idx++ >= n_ports)
1330 static int list_ports(struct datapath *dp, struct odp_portvec __user *upv)
1332 struct odp_portvec pv;
1335 if (copy_from_user(&pv, upv, sizeof pv))
1338 retval = do_list_ports(dp, (struct odp_port __user *)pv.ports,
1343 return put_user(retval, &upv->n_ports);
1346 static int get_listen_mask(const struct file *f)
1348 return (long)f->private_data;
1351 static void set_listen_mask(struct file *f, int listen_mask)
1353 f->private_data = (void*)(long)listen_mask;
1356 static long openvswitch_ioctl(struct file *f, unsigned int cmd,
1359 int dp_idx = iminor(f->f_dentry->d_inode);
1360 struct datapath *dp;
1361 int drop_frags, listeners, port_no;
1362 unsigned int sflow_probability;
1365 /* Handle commands with special locking requirements up front. */
1368 err = create_dp(dp_idx, (char __user *)argp);
1371 case ODP_DP_DESTROY:
1372 err = destroy_dp(dp_idx);
1375 case ODP_VPORT_ATTACH:
1376 err = attach_port(dp_idx, (struct odp_port __user *)argp);
1379 case ODP_VPORT_DETACH:
1380 err = get_user(port_no, (int __user *)argp);
1382 err = detach_port(dp_idx, port_no);
1386 err = vport_user_mod((struct odp_port __user *)argp);
1389 case ODP_VPORT_STATS_GET:
1390 err = vport_user_stats_get((struct odp_vport_stats_req __user *)argp);
1393 case ODP_VPORT_STATS_SET:
1394 err = vport_user_stats_set((struct odp_vport_stats_req __user *)argp);
1397 case ODP_VPORT_ETHER_GET:
1398 err = vport_user_ether_get((struct odp_vport_ether __user *)argp);
1401 case ODP_VPORT_ETHER_SET:
1402 err = vport_user_ether_set((struct odp_vport_ether __user *)argp);
1405 case ODP_VPORT_MTU_GET:
1406 err = vport_user_mtu_get((struct odp_vport_mtu __user *)argp);
1409 case ODP_VPORT_MTU_SET:
1410 err = vport_user_mtu_set((struct odp_vport_mtu __user *)argp);
1414 dp = get_dp_locked(dp_idx);
1421 err = get_dp_stats(dp, (struct odp_stats __user *)argp);
1424 case ODP_GET_DROP_FRAGS:
1425 err = put_user(dp->drop_frags, (int __user *)argp);
1428 case ODP_SET_DROP_FRAGS:
1429 err = get_user(drop_frags, (int __user *)argp);
1433 if (drop_frags != 0 && drop_frags != 1)
1435 dp->drop_frags = drop_frags;
1439 case ODP_GET_LISTEN_MASK:
1440 err = put_user(get_listen_mask(f), (int __user *)argp);
1443 case ODP_SET_LISTEN_MASK:
1444 err = get_user(listeners, (int __user *)argp);
1448 if (listeners & ~ODPL_ALL)
1451 set_listen_mask(f, listeners);
1454 case ODP_GET_SFLOW_PROBABILITY:
1455 err = put_user(dp->sflow_probability, (unsigned int __user *)argp);
1458 case ODP_SET_SFLOW_PROBABILITY:
1459 err = get_user(sflow_probability, (unsigned int __user *)argp);
1461 dp->sflow_probability = sflow_probability;
1464 case ODP_VPORT_QUERY:
1465 err = query_port(dp, (struct odp_port __user *)argp);
1468 case ODP_VPORT_LIST:
1469 err = list_ports(dp, (struct odp_portvec __user *)argp);
1472 case ODP_FLOW_FLUSH:
1473 err = flush_flows(dp);
1477 err = put_flow(dp, (struct odp_flow_put __user *)argp);
1481 err = del_flow(dp, (struct odp_flow __user *)argp);
1485 err = do_flowvec_ioctl(dp, argp, do_query_flows);
1489 err = do_flowvec_ioctl(dp, argp, do_list_flows);
1493 err = execute_packet(dp, (struct odp_execute __user *)argp);
1500 mutex_unlock(&dp->mutex);
1505 static int dp_has_packet_of_interest(struct datapath *dp, int listeners)
1508 for (i = 0; i < DP_N_QUEUES; i++) {
1509 if (listeners & (1 << i) && !skb_queue_empty(&dp->queues[i]))
1515 #ifdef CONFIG_COMPAT
1516 static int compat_list_ports(struct datapath *dp, struct compat_odp_portvec __user *upv)
1518 struct compat_odp_portvec pv;
1521 if (copy_from_user(&pv, upv, sizeof pv))
1524 retval = do_list_ports(dp, compat_ptr(pv.ports), pv.n_ports);
1528 return put_user(retval, &upv->n_ports);
1531 static int compat_get_flow(struct odp_flow *flow, const struct compat_odp_flow __user *compat)
1533 compat_uptr_t actions;
1535 if (!access_ok(VERIFY_READ, compat, sizeof(struct compat_odp_flow)) ||
1536 __copy_from_user(&flow->stats, &compat->stats, sizeof(struct odp_flow_stats)) ||
1537 __copy_from_user(&flow->key, &compat->key, sizeof(struct odp_flow_key)) ||
1538 __get_user(actions, &compat->actions) ||
1539 __get_user(flow->actions_len, &compat->actions_len) ||
1540 __get_user(flow->flags, &compat->flags))
1543 flow->actions = (struct nlattr __force *)compat_ptr(actions);
1547 static int compat_put_flow(struct datapath *dp, struct compat_odp_flow_put __user *ufp)
1549 struct odp_flow_stats stats;
1550 struct odp_flow_put fp;
1553 if (compat_get_flow(&fp.flow, &ufp->flow) ||
1554 get_user(fp.flags, &ufp->flags))
1557 error = do_put_flow(dp, &fp, &stats);
1561 if (copy_to_user(&ufp->flow.stats, &stats,
1562 sizeof(struct odp_flow_stats)))
1568 static int compat_answer_query(struct datapath *dp, struct sw_flow *flow,
1570 struct compat_odp_flow __user *ufp)
1572 compat_uptr_t actions;
1574 if (get_user(actions, &ufp->actions))
1577 return do_answer_query(dp, flow, query_flags, &ufp->stats,
1578 compat_ptr(actions), &ufp->actions_len);
1581 static int compat_del_flow(struct datapath *dp, struct compat_odp_flow __user *ufp)
1583 struct sw_flow *flow;
1587 if (compat_get_flow(&uf, ufp))
1590 flow = do_del_flow(dp, &uf.key);
1592 return PTR_ERR(flow);
1594 error = compat_answer_query(dp, flow, 0, ufp);
1595 flow_deferred_free(flow);
1599 static int compat_query_flows(struct datapath *dp,
1600 struct compat_odp_flow __user *flows,
1603 struct tbl *table = get_table_protected(dp);
1606 for (i = 0; i < n_flows; i++) {
1607 struct compat_odp_flow __user *ufp = &flows[i];
1609 struct tbl_node *flow_node;
1612 if (compat_get_flow(&uf, ufp))
1615 flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
1617 error = put_user(ENOENT, &ufp->stats.error);
1619 error = compat_answer_query(dp, flow_cast(flow_node),
1627 struct compat_list_flows_cbdata {
1628 struct datapath *dp;
1629 struct compat_odp_flow __user *uflows;
1634 static int compat_list_flow(struct tbl_node *node, void *cbdata_)
1636 struct sw_flow *flow = flow_cast(node);
1637 struct compat_list_flows_cbdata *cbdata = cbdata_;
1638 struct compat_odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
1641 if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
1643 error = compat_answer_query(cbdata->dp, flow, 0, ufp);
1647 if (cbdata->listed_flows >= cbdata->n_flows)
1648 return cbdata->listed_flows;
1652 static int compat_list_flows(struct datapath *dp,
1653 struct compat_odp_flow __user *flows, u32 n_flows)
1655 struct compat_list_flows_cbdata cbdata;
1662 cbdata.uflows = flows;
1663 cbdata.n_flows = n_flows;
1664 cbdata.listed_flows = 0;
1666 error = tbl_foreach(get_table_protected(dp), compat_list_flow, &cbdata);
1667 return error ? error : cbdata.listed_flows;
1670 static int compat_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1671 int (*function)(struct datapath *,
1672 struct compat_odp_flow __user *,
1675 struct compat_odp_flowvec __user *uflowvec;
1676 struct compat_odp_flow __user *flows;
1677 struct compat_odp_flowvec flowvec;
1680 uflowvec = compat_ptr(argp);
1681 if (!access_ok(VERIFY_WRITE, uflowvec, sizeof *uflowvec) ||
1682 copy_from_user(&flowvec, uflowvec, sizeof flowvec))
1685 if (flowvec.n_flows > INT_MAX / sizeof(struct compat_odp_flow))
1688 flows = compat_ptr(flowvec.flows);
1689 if (!access_ok(VERIFY_WRITE, flows,
1690 flowvec.n_flows * sizeof(struct compat_odp_flow)))
1693 retval = function(dp, flows, flowvec.n_flows);
1694 return (retval < 0 ? retval
1695 : retval == flowvec.n_flows ? 0
1696 : put_user(retval, &uflowvec->n_flows));
1699 static int compat_execute(struct datapath *dp, const struct compat_odp_execute __user *uexecute)
1701 struct odp_execute execute;
1702 compat_uptr_t actions;
1705 if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_odp_execute)) ||
1706 __get_user(actions, &uexecute->actions) ||
1707 __get_user(execute.actions_len, &uexecute->actions_len) ||
1708 __get_user(data, &uexecute->data) ||
1709 __get_user(execute.length, &uexecute->length))
1712 execute.actions = (struct nlattr __force *)compat_ptr(actions);
1713 execute.data = (const void __force *)compat_ptr(data);
1715 return do_execute(dp, &execute);
1718 static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned long argp)
1720 int dp_idx = iminor(f->f_dentry->d_inode);
1721 struct datapath *dp;
1725 case ODP_DP_DESTROY:
1726 case ODP_FLOW_FLUSH:
1727 /* Ioctls that don't need any translation at all. */
1728 return openvswitch_ioctl(f, cmd, argp);
1731 case ODP_VPORT_ATTACH:
1732 case ODP_VPORT_DETACH:
1734 case ODP_VPORT_MTU_SET:
1735 case ODP_VPORT_MTU_GET:
1736 case ODP_VPORT_ETHER_SET:
1737 case ODP_VPORT_ETHER_GET:
1738 case ODP_VPORT_STATS_SET:
1739 case ODP_VPORT_STATS_GET:
1741 case ODP_GET_DROP_FRAGS:
1742 case ODP_SET_DROP_FRAGS:
1743 case ODP_SET_LISTEN_MASK:
1744 case ODP_GET_LISTEN_MASK:
1745 case ODP_SET_SFLOW_PROBABILITY:
1746 case ODP_GET_SFLOW_PROBABILITY:
1747 case ODP_VPORT_QUERY:
1748 /* Ioctls that just need their pointer argument extended. */
1749 return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
1752 dp = get_dp_locked(dp_idx);
1758 case ODP_VPORT_LIST32:
1759 err = compat_list_ports(dp, compat_ptr(argp));
1762 case ODP_FLOW_PUT32:
1763 err = compat_put_flow(dp, compat_ptr(argp));
1766 case ODP_FLOW_DEL32:
1767 err = compat_del_flow(dp, compat_ptr(argp));
1770 case ODP_FLOW_GET32:
1771 err = compat_flowvec_ioctl(dp, argp, compat_query_flows);
1774 case ODP_FLOW_LIST32:
1775 err = compat_flowvec_ioctl(dp, argp, compat_list_flows);
1779 err = compat_execute(dp, compat_ptr(argp));
1786 mutex_unlock(&dp->mutex);
1792 /* Unfortunately this function is not exported so this is a verbatim copy
1793 * from net/core/datagram.c in 2.6.30. */
1794 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
1795 u8 __user *to, int len,
1798 int start = skb_headlen(skb);
1800 int i, copy = start - offset;
1807 *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
1811 if ((len -= copy) == 0)
1818 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1821 WARN_ON(start > offset + len);
1823 end = start + skb_shinfo(skb)->frags[i].size;
1824 if ((copy = end - offset) > 0) {
1828 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1829 struct page *page = frag->page;
1834 csum2 = csum_and_copy_to_user(vaddr +
1841 *csump = csum_block_add(*csump, csum2, pos);
1851 if (skb_shinfo(skb)->frag_list) {
1852 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1854 for (; list; list=list->next) {
1857 WARN_ON(start > offset + len);
1859 end = start + list->len;
1860 if ((copy = end - offset) > 0) {
1864 if (skb_copy_and_csum_datagram(list,
1869 *csump = csum_block_add(*csump, csum2, pos);
1870 if ((len -= copy) == 0)
1886 static ssize_t openvswitch_read(struct file *f, char __user *buf,
1887 size_t nbytes, loff_t *ppos)
1889 int listeners = get_listen_mask(f);
1890 int dp_idx = iminor(f->f_dentry->d_inode);
1891 struct datapath *dp = get_dp_locked(dp_idx);
1892 struct sk_buff *skb;
1893 size_t copy_bytes, tot_copy_bytes;
1899 if (nbytes == 0 || !listeners)
1905 for (i = 0; i < DP_N_QUEUES; i++) {
1906 if (listeners & (1 << i)) {
1907 skb = skb_dequeue(&dp->queues[i]);
1913 if (f->f_flags & O_NONBLOCK) {
1918 wait_event_interruptible(dp->waitqueue,
1919 dp_has_packet_of_interest(dp,
1922 if (signal_pending(current)) {
1923 retval = -ERESTARTSYS;
1928 mutex_unlock(&dp->mutex);
1930 copy_bytes = tot_copy_bytes = min_t(size_t, skb->len, nbytes);
1933 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1934 if (copy_bytes == skb->len) {
1936 u16 csum_start, csum_offset;
1938 get_skb_csum_pointers(skb, &csum_start, &csum_offset);
1939 BUG_ON(csum_start >= skb_headlen(skb));
1940 retval = skb_copy_and_csum_datagram(skb, csum_start, buf + csum_start,
1941 copy_bytes - csum_start, &csum);
1943 __sum16 __user *csump;
1945 copy_bytes = csum_start;
1946 csump = (__sum16 __user *)(buf + csum_start + csum_offset);
1948 BUG_ON((char __user *)csump + sizeof(__sum16) >
1950 put_user(csum_fold(csum), csump);
1953 retval = skb_checksum_help(skb);
1960 iov.iov_len = copy_bytes;
1961 retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
1965 retval = tot_copy_bytes;
1971 mutex_unlock(&dp->mutex);
1975 static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
1977 int dp_idx = iminor(file->f_dentry->d_inode);
1978 struct datapath *dp = get_dp_locked(dp_idx);
1983 poll_wait(file, &dp->waitqueue, wait);
1984 if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
1985 mask |= POLLIN | POLLRDNORM;
1986 mutex_unlock(&dp->mutex);
1988 mask = POLLIN | POLLRDNORM | POLLHUP;
1993 static struct file_operations openvswitch_fops = {
1994 .read = openvswitch_read,
1995 .poll = openvswitch_poll,
1996 .unlocked_ioctl = openvswitch_ioctl,
1997 #ifdef CONFIG_COMPAT
1998 .compat_ioctl = openvswitch_compat_ioctl,
2004 static int __init dp_init(void)
2006 struct sk_buff *dummy_skb;
2009 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2011 printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2019 goto error_flow_exit;
2021 err = register_netdevice_notifier(&dp_device_notifier);
2023 goto error_vport_exit;
2025 major = register_chrdev(0, "openvswitch", &openvswitch_fops);
2027 goto error_unreg_notifier;
2031 error_unreg_notifier:
2032 unregister_netdevice_notifier(&dp_device_notifier);
2041 static void dp_cleanup(void)
2044 unregister_chrdev(major, "openvswitch");
2045 unregister_netdevice_notifier(&dp_device_notifier);
2050 module_init(dp_init);
2051 module_exit(dp_cleanup);
2053 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2054 MODULE_LICENSE("GPL");