2 * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/module.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/kernel.h>
24 #include <linux/kthread.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/version.h>
31 #include <linux/ethtool.h>
32 #include <linux/wait.h>
33 #include <asm/system.h>
34 #include <asm/div64.h>
36 #include <linux/highmem.h>
37 #include <linux/netfilter_bridge.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/inetdevice.h>
40 #include <linux/list.h>
41 #include <linux/rculist.h>
42 #include <linux/dmi.h>
43 #include <net/inet_ecn.h>
44 #include <linux/compat.h>
46 #include "openvswitch/xflow.h"
50 #include "xflow-compat.h"
52 #include "vport-internal_dev.h"
57 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
58 EXPORT_SYMBOL(dp_ioctl_hook);
60 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
63 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
66 * It is safe to access the datapath and dp_port structures with just
69 static struct datapath *dps[XFLOW_MAX];
70 static DEFINE_MUTEX(dp_mutex);
72 /* We limit the number of times that we pass into dp_process_received_packet()
73 * to avoid blowing out the stack in the event that we have a loop. */
75 int count; /* Count. */
76 bool looping; /* Loop detected? */
79 #define DP_MAX_LOOPS 5
81 /* We use a separate counter for each CPU for both interrupt and non-interrupt
82 * context in order to keep the limit deterministic for a given packet. */
83 struct percpu_loop_counters {
84 struct loop_counter counters[2];
87 static DEFINE_PER_CPU(struct percpu_loop_counters, dp_loop_counters);
89 static int new_dp_port(struct datapath *, struct xflow_port *, int port_no);
91 /* Must be called with rcu_read_lock or dp_mutex. */
92 struct datapath *get_dp(int dp_idx)
94 if (dp_idx < 0 || dp_idx >= XFLOW_MAX)
96 return rcu_dereference(dps[dp_idx]);
98 EXPORT_SYMBOL_GPL(get_dp);
100 static struct datapath *get_dp_locked(int dp_idx)
104 mutex_lock(&dp_mutex);
107 mutex_lock(&dp->mutex);
108 mutex_unlock(&dp_mutex);
112 /* Must be called with rcu_read_lock or RTNL lock. */
113 const char *dp_name(const struct datapath *dp)
115 return vport_get_name(dp->ports[XFLOWP_LOCAL]->vport);
118 static inline size_t br_nlmsg_size(void)
120 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
121 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
122 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
123 + nla_total_size(4) /* IFLA_MASTER */
124 + nla_total_size(4) /* IFLA_MTU */
125 + nla_total_size(4) /* IFLA_LINK */
126 + nla_total_size(1); /* IFLA_OPERSTATE */
129 static int dp_fill_ifinfo(struct sk_buff *skb,
130 const struct dp_port *port,
131 int event, unsigned int flags)
133 const struct datapath *dp = port->dp;
134 int ifindex = vport_get_ifindex(port->vport);
135 int iflink = vport_get_iflink(port->vport);
136 struct ifinfomsg *hdr;
137 struct nlmsghdr *nlh;
145 nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
149 hdr = nlmsg_data(nlh);
150 hdr->ifi_family = AF_BRIDGE;
152 hdr->ifi_type = ARPHRD_ETHER;
153 hdr->ifi_index = ifindex;
154 hdr->ifi_flags = vport_get_flags(port->vport);
157 NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port->vport));
158 NLA_PUT_U32(skb, IFLA_MASTER, vport_get_ifindex(dp->ports[XFLOWP_LOCAL]->vport));
159 NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port->vport));
160 #ifdef IFLA_OPERSTATE
161 NLA_PUT_U8(skb, IFLA_OPERSTATE,
162 vport_is_running(port->vport)
163 ? vport_get_operstate(port->vport)
167 NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN,
168 vport_get_addr(port->vport));
170 if (ifindex != iflink)
171 NLA_PUT_U32(skb, IFLA_LINK,iflink);
173 return nlmsg_end(skb, nlh);
176 nlmsg_cancel(skb, nlh);
180 static void dp_ifinfo_notify(int event, struct dp_port *port)
185 skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
189 err = dp_fill_ifinfo(skb, port, event, 0);
191 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
192 WARN_ON(err == -EMSGSIZE);
196 rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
200 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
203 static void release_dp(struct kobject *kobj)
205 struct datapath *dp = container_of(kobj, struct datapath, ifobj);
209 static struct kobj_type dp_ktype = {
210 .release = release_dp
213 static int create_dp(int dp_idx, const char __user *devnamep)
215 struct xflow_port internal_dev_port;
216 char devname[IFNAMSIZ];
222 int retval = strncpy_from_user(devname, devnamep, IFNAMSIZ);
226 } else if (retval >= IFNAMSIZ) {
231 snprintf(devname, sizeof devname, "of%d", dp_idx);
235 mutex_lock(&dp_mutex);
237 if (!try_module_get(THIS_MODULE))
240 /* Exit early if a datapath with that number already exists.
241 * (We don't use -EEXIST because that's ambiguous with 'devname'
242 * conflicting with an existing network device name.) */
248 dp = kzalloc(sizeof *dp, GFP_KERNEL);
251 INIT_LIST_HEAD(&dp->port_list);
252 mutex_init(&dp->mutex);
254 for (i = 0; i < DP_N_QUEUES; i++)
255 skb_queue_head_init(&dp->queues[i]);
256 init_waitqueue_head(&dp->waitqueue);
258 /* Initialize kobject for bridge. This will be added as
259 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
260 dp->ifobj.kset = NULL;
261 kobject_init(&dp->ifobj, &dp_ktype);
263 /* Allocate table. */
265 rcu_assign_pointer(dp->table, tbl_create(0));
269 /* Set up our datapath device. */
270 BUILD_BUG_ON(sizeof(internal_dev_port.devname) != sizeof(devname));
271 strcpy(internal_dev_port.devname, devname);
272 internal_dev_port.flags = XFLOW_PORT_INTERNAL;
273 err = new_dp_port(dp, &internal_dev_port, XFLOWP_LOCAL);
278 goto err_destroy_table;
282 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
283 if (!dp->stats_percpu)
284 goto err_destroy_local_port;
286 rcu_assign_pointer(dps[dp_idx], dp);
287 mutex_unlock(&dp_mutex);
294 err_destroy_local_port:
295 dp_detach_port(dp->ports[XFLOWP_LOCAL], 1);
297 tbl_destroy(dp->table, NULL);
301 module_put(THIS_MODULE);
303 mutex_unlock(&dp_mutex);
309 static void do_destroy_dp(struct datapath *dp)
311 struct dp_port *p, *n;
314 list_for_each_entry_safe (p, n, &dp->port_list, node)
315 if (p->port_no != XFLOWP_LOCAL)
316 dp_detach_port(p, 1);
320 rcu_assign_pointer(dps[dp->dp_idx], NULL);
322 dp_detach_port(dp->ports[XFLOWP_LOCAL], 1);
324 tbl_destroy(dp->table, flow_free_tbl);
326 for (i = 0; i < DP_N_QUEUES; i++)
327 skb_queue_purge(&dp->queues[i]);
328 for (i = 0; i < DP_MAX_GROUPS; i++)
329 kfree(dp->groups[i]);
330 free_percpu(dp->stats_percpu);
331 kobject_put(&dp->ifobj);
332 module_put(THIS_MODULE);
335 static int destroy_dp(int dp_idx)
341 mutex_lock(&dp_mutex);
351 mutex_unlock(&dp_mutex);
356 static void release_dp_port(struct kobject *kobj)
358 struct dp_port *p = container_of(kobj, struct dp_port, kobj);
362 static struct kobj_type brport_ktype = {
364 .sysfs_ops = &brport_sysfs_ops,
366 .release = release_dp_port
369 /* Called with RTNL lock and dp_mutex. */
370 static int new_dp_port(struct datapath *dp, struct xflow_port *xflow_port, int port_no)
376 vport = vport_locate(xflow_port->devname);
380 if (xflow_port->flags & XFLOW_PORT_INTERNAL)
381 vport = vport_add(xflow_port->devname, "internal", NULL);
383 vport = vport_add(xflow_port->devname, "netdev", NULL);
388 return PTR_ERR(vport);
391 p = kzalloc(sizeof(*p), GFP_KERNEL);
395 p->port_no = port_no;
398 atomic_set(&p->sflow_pool, 0);
400 err = vport_attach(vport, p);
406 rcu_assign_pointer(dp->ports[port_no], p);
407 list_add_rcu(&p->node, &dp->port_list);
410 /* Initialize kobject for bridge. This will be added as
411 * /sys/class/net/<devname>/brport later, if sysfs is enabled. */
413 kobject_init(&p->kobj, &brport_ktype);
415 dp_ifinfo_notify(RTM_NEWLINK, p);
420 static int attach_port(int dp_idx, struct xflow_port __user *portp)
423 struct xflow_port port;
428 if (copy_from_user(&port, portp, sizeof port))
430 port.devname[IFNAMSIZ - 1] = '\0';
433 dp = get_dp_locked(dp_idx);
436 goto out_unlock_rtnl;
438 for (port_no = 1; port_no < DP_MAX_PORTS; port_no++)
439 if (!dp->ports[port_no])
445 err = new_dp_port(dp, &port, port_no);
449 set_internal_devs_mtu(dp);
450 dp_sysfs_add_if(dp->ports[port_no]);
452 err = put_user(port_no, &portp->port);
455 mutex_unlock(&dp->mutex);
462 int dp_detach_port(struct dp_port *p, int may_delete)
464 struct vport *vport = p->vport;
469 if (p->port_no != XFLOWP_LOCAL)
471 dp_ifinfo_notify(RTM_DELLINK, p);
473 /* First drop references to device. */
475 list_del_rcu(&p->node);
476 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
478 err = vport_detach(vport);
482 /* Then wait until no one is still using it, and destroy it. */
486 const char *port_type = vport_get_type(vport);
488 if (!strcmp(port_type, "netdev") || !strcmp(port_type, "internal")) {
495 kobject_put(&p->kobj);
500 static int detach_port(int dp_idx, int port_no)
507 if (port_no < 0 || port_no >= DP_MAX_PORTS || port_no == XFLOWP_LOCAL)
511 dp = get_dp_locked(dp_idx);
514 goto out_unlock_rtnl;
516 p = dp->ports[port_no];
521 err = dp_detach_port(p, 1);
524 mutex_unlock(&dp->mutex);
531 static void suppress_loop(struct datapath *dp, struct sw_flow_actions *actions)
534 pr_warn("%s: flow looped %d times, dropping\n",
535 dp_name(dp), DP_MAX_LOOPS);
536 actions->n_actions = 0;
539 /* Must be called with rcu_read_lock. */
540 void dp_process_received_packet(struct dp_port *p, struct sk_buff *skb)
542 struct datapath *dp = p->dp;
543 struct dp_stats_percpu *stats;
544 int stats_counter_off;
545 struct sw_flow_actions *acts;
546 struct loop_counter *loop;
549 OVS_CB(skb)->dp_port = p;
551 if (!OVS_CB(skb)->flow) {
552 struct xflow_key key;
553 struct tbl_node *flow_node;
556 /* Extract flow from 'skb' into 'key'. */
557 error = flow_extract(skb, p ? p->port_no : XFLOWP_NONE, &key, &is_frag);
558 if (unlikely(error)) {
563 if (is_frag && dp->drop_frags) {
565 stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
570 flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
571 flow_hash(&key), flow_cmp);
572 if (unlikely(!flow_node)) {
573 dp_output_control(dp, skb, _XFLOWL_MISS_NR, OVS_CB(skb)->tun_id);
574 stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
578 OVS_CB(skb)->flow = flow_cast(flow_node);
581 flow_used(OVS_CB(skb)->flow, skb);
583 acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
585 /* Check whether we've looped too much. */
586 loop = &get_cpu_var(dp_loop_counters).counters[!!in_interrupt()];
587 if (unlikely(++loop->count > DP_MAX_LOOPS))
588 loop->looping = true;
589 if (unlikely(loop->looping)) {
590 suppress_loop(dp, acts);
594 /* Execute actions. */
595 execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
596 acts->n_actions, GFP_ATOMIC);
597 stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
599 /* Check whether sub-actions looped too much. */
600 if (unlikely(loop->looping))
601 suppress_loop(dp, acts);
604 /* Decrement loop counter. */
606 loop->looping = false;
607 put_cpu_var(dp_loop_counters);
610 /* Update datapath statistics. */
612 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
614 write_seqcount_begin(&stats->seqlock);
615 (*(u64 *)((u8 *)stats + stats_counter_off))++;
616 write_seqcount_end(&stats->seqlock);
621 #if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
622 /* This code is based on skb_checksum_setup() from Xen's net/dev/core.c. We
623 * can't call this function directly because it isn't exported in all
625 int vswitch_skb_checksum_setup(struct sk_buff *skb)
630 __u16 csum_start, csum_offset;
632 if (!skb->proto_csum_blank)
635 if (skb->protocol != htons(ETH_P_IP))
638 if (!pskb_may_pull(skb, skb_network_header(skb) + sizeof(struct iphdr) - skb->data))
642 th = skb_network_header(skb) + 4 * iph->ihl;
644 csum_start = th - skb->head;
645 switch (iph->protocol) {
647 csum_offset = offsetof(struct tcphdr, check);
650 csum_offset = offsetof(struct udphdr, check);
654 pr_err("Attempting to checksum a non-TCP/UDP packet, "
655 "dropping a protocol %d packet",
660 if (!pskb_may_pull(skb, th + csum_offset + 2 - skb->data))
663 skb->ip_summed = CHECKSUM_PARTIAL;
664 skb->proto_csum_blank = 0;
666 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
667 skb->csum_start = csum_start;
668 skb->csum_offset = csum_offset;
670 skb_set_transport_header(skb, csum_start - skb_headroom(skb));
671 skb->csum = csum_offset;
679 #endif /* CONFIG_XEN && HAVE_PROTO_DATA_VALID */
681 /* Types of checksums that we can receive (these all refer to L4 checksums):
682 * 1. CHECKSUM_NONE: Device that did not compute checksum, contains full
683 * (though not verified) checksum in packet but not in skb->csum. Packets
684 * from the bridge local port will also have this type.
685 * 2. CHECKSUM_COMPLETE (CHECKSUM_HW): Good device that computes checksums,
686 * also the GRE module. This is the same as CHECKSUM_NONE, except it has
687 * a valid skb->csum. Importantly, both contain a full checksum (not
688 * verified) in the packet itself. The only difference is that if the
689 * packet gets to L4 processing on this machine (not in DomU) we won't
690 * have to recompute the checksum to verify. Most hardware devices do not
691 * produce packets with this type, even if they support receive checksum
692 * offloading (they produce type #5).
693 * 3. CHECKSUM_PARTIAL (CHECKSUM_HW): Packet without full checksum and needs to
694 * be computed if it is sent off box. Unfortunately on earlier kernels,
695 * this case is impossible to distinguish from #2, despite having opposite
696 * meanings. Xen adds an extra field on earlier kernels (see #4) in order
697 * to distinguish the different states.
698 * 4. CHECKSUM_UNNECESSARY (with proto_csum_blank true): This packet was
699 * generated locally by a Xen DomU and has a partial checksum. If it is
700 * handled on this machine (Dom0 or DomU), then the checksum will not be
701 * computed. If it goes off box, the checksum in the packet needs to be
702 * completed. Calling skb_checksum_setup converts this to CHECKSUM_HW
703 * (CHECKSUM_PARTIAL) so that the checksum can be completed. In later
704 * kernels, this combination is replaced with CHECKSUM_PARTIAL.
705 * 5. CHECKSUM_UNNECESSARY (with proto_csum_blank false): Packet with a correct
706 * full checksum or using a protocol without a checksum. skb->csum is
707 * undefined. This is common from devices with receive checksum
708 * offloading. This is somewhat similar to CHECKSUM_NONE, except that
709 * nobody will try to verify the checksum with CHECKSUM_UNNECESSARY.
711 * Note that on earlier kernels, CHECKSUM_COMPLETE and CHECKSUM_PARTIAL are
712 * both defined as CHECKSUM_HW. Normally the meaning of CHECKSUM_HW is clear
713 * based on whether it is on the transmit or receive path. After the datapath
714 * it will be intepreted as CHECKSUM_PARTIAL. If the packet already has a
715 * checksum, we will panic. Since we can receive packets with checksums, we
716 * assume that all CHECKSUM_HW packets have checksums and map them to
717 * CHECKSUM_NONE, which has a similar meaning (the it is only different if the
718 * packet is processed by the local IP stack, in which case it will need to
719 * be reverified). If we receive a packet with CHECKSUM_HW that really means
720 * CHECKSUM_PARTIAL, it will be sent with the wrong checksum. However, there
721 * shouldn't be any devices that do this with bridging. */
722 void compute_ip_summed(struct sk_buff *skb, bool xmit)
724 /* For our convenience these defines change repeatedly between kernel
725 * versions, so we can't just copy them over... */
726 switch (skb->ip_summed) {
728 OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
730 case CHECKSUM_UNNECESSARY:
731 OVS_CB(skb)->ip_summed = OVS_CSUM_UNNECESSARY;
734 /* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE.
735 * However, on the receive side we should only get CHECKSUM_PARTIAL
736 * packets from Xen, which uses some special fields to represent this
737 * (see below). Since we can only make one type work, pick the one
738 * that actually happens in practice.
740 * On the transmit side (basically after skb_checksum_setup()
741 * has been run or on internal dev transmit), packets with
742 * CHECKSUM_COMPLETE aren't generated, so assume CHECKSUM_PARTIAL. */
745 OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
747 OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
751 case CHECKSUM_COMPLETE:
752 OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
754 case CHECKSUM_PARTIAL:
755 OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
759 pr_err("unknown checksum type %d\n", skb->ip_summed);
760 /* None seems the safest... */
761 OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
764 #if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
765 /* Xen has a special way of representing CHECKSUM_PARTIAL on older
766 * kernels. It should not be set on the transmit path though. */
767 if (skb->proto_csum_blank)
768 OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
770 WARN_ON_ONCE(skb->proto_csum_blank && xmit);
774 /* This function closely resembles skb_forward_csum() used by the bridge. It
775 * is slightly different because we are only concerned with bridging and not
776 * other types of forwarding and can get away with slightly more optimal
778 void forward_ip_summed(struct sk_buff *skb)
781 if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
782 skb->ip_summed = CHECKSUM_NONE;
786 /* Append each packet in 'skb' list to 'queue'. There will be only one packet
787 * unless we broke up a GSO packet. */
788 static int queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
789 int queue_no, u32 arg)
791 struct sk_buff *nskb;
795 if (OVS_CB(skb)->dp_port)
796 port_no = OVS_CB(skb)->dp_port->port_no;
798 port_no = XFLOWP_LOCAL;
801 struct xflow_msg *header;
806 err = skb_cow(skb, sizeof *header);
810 header = (struct xflow_msg*)__skb_push(skb, sizeof *header);
811 header->type = queue_no;
812 header->length = skb->len;
813 header->port = port_no;
814 header->reserved = 0;
816 skb_queue_tail(queue, skb);
824 while ((skb = nskb) != NULL) {
831 int dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
834 struct dp_stats_percpu *stats;
835 struct sk_buff_head *queue;
838 WARN_ON_ONCE(skb_shared(skb));
839 BUG_ON(queue_no != _XFLOWL_MISS_NR && queue_no != _XFLOWL_ACTION_NR && queue_no != _XFLOWL_SFLOW_NR);
840 queue = &dp->queues[queue_no];
842 if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
845 forward_ip_summed(skb);
847 err = vswitch_skb_checksum_setup(skb);
851 /* Break apart GSO packets into their component pieces. Otherwise
852 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
853 if (skb_is_gso(skb)) {
854 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
858 if (unlikely(IS_ERR(skb))) {
863 /* XXX This case might not be possible. It's hard to
864 * tell from the skb_gso_segment() code and comment. */
868 err = queue_control_packets(skb, queue, queue_no, arg);
869 wake_up_interruptible(&dp->waitqueue);
876 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
878 write_seqcount_begin(&stats->seqlock);
880 write_seqcount_end(&stats->seqlock);
887 static int flush_flows(struct datapath *dp)
889 struct tbl *old_table = rcu_dereference(dp->table);
890 struct tbl *new_table;
892 new_table = tbl_create(0);
896 rcu_assign_pointer(dp->table, new_table);
898 tbl_deferred_destroy(old_table, flow_free_tbl);
903 static int validate_actions(const struct sw_flow_actions *actions)
907 for (i = 0; i < actions->n_actions; i++) {
908 const union xflow_action *a = &actions->actions[i];
913 if (a->output.port >= DP_MAX_PORTS)
917 case XFLOWAT_OUTPUT_GROUP:
918 if (a->output_group.group >= DP_MAX_GROUPS)
922 case XFLOWAT_SET_DL_TCI:
923 mask = a->dl_tci.mask;
924 if (mask != htons(VLAN_VID_MASK) &&
925 mask != htons(VLAN_PCP_MASK) &&
926 mask != htons(VLAN_VID_MASK | VLAN_PCP_MASK))
928 if (a->dl_tci.tci & ~mask)
932 case XFLOWAT_SET_NW_TOS:
933 if (a->nw_tos.nw_tos & INET_ECN_MASK)
938 if (a->type >= XFLOWAT_N_ACTIONS)
947 static struct sw_flow_actions *get_actions(const struct xflow_flow *flow)
949 struct sw_flow_actions *actions;
952 actions = flow_actions_alloc(flow->n_actions);
953 error = PTR_ERR(actions);
958 if (copy_from_user(actions->actions, flow->actions,
959 flow->n_actions * sizeof(union xflow_action)))
960 goto error_free_actions;
961 error = validate_actions(actions);
963 goto error_free_actions;
970 return ERR_PTR(error);
973 static struct timespec get_time_offset(void)
975 struct timespec now_mono, now_jiffies;
977 ktime_get_ts(&now_mono);
978 jiffies_to_timespec(jiffies, &now_jiffies);
979 return timespec_sub(now_mono, now_jiffies);
982 static void get_stats(struct sw_flow *flow, struct xflow_flow_stats *stats,
983 struct timespec time_offset)
986 struct timespec flow_ts, used;
988 jiffies_to_timespec(flow->used, &flow_ts);
989 set_normalized_timespec(&used, flow_ts.tv_sec + time_offset.tv_sec,
990 flow_ts.tv_nsec + time_offset.tv_nsec);
992 stats->used_sec = used.tv_sec;
993 stats->used_nsec = used.tv_nsec;
996 stats->used_nsec = 0;
999 stats->n_packets = flow->packet_count;
1000 stats->n_bytes = flow->byte_count;
1001 stats->reserved = 0;
1002 stats->tcp_flags = flow->tcp_flags;
1006 static void clear_stats(struct sw_flow *flow)
1009 flow->tcp_flags = 0;
1010 flow->packet_count = 0;
1011 flow->byte_count = 0;
1014 static int expand_table(struct datapath *dp)
1016 struct tbl *old_table = rcu_dereference(dp->table);
1017 struct tbl *new_table;
1019 new_table = tbl_expand(old_table);
1020 if (IS_ERR(new_table))
1021 return PTR_ERR(new_table);
1023 rcu_assign_pointer(dp->table, new_table);
1024 tbl_deferred_destroy(old_table, NULL);
1029 static int do_put_flow(struct datapath *dp, struct xflow_flow_put *uf,
1030 struct xflow_flow_stats *stats)
1032 struct tbl_node *flow_node;
1033 struct sw_flow *flow;
1037 table = rcu_dereference(dp->table);
1038 flow_node = tbl_lookup(table, &uf->flow.key, flow_hash(&uf->flow.key), flow_cmp);
1041 struct sw_flow_actions *acts;
1044 if (!(uf->flags & XFLOWPF_CREATE))
1047 /* Expand table, if necessary, to make room. */
1048 if (tbl_count(table) >= tbl_n_buckets(table)) {
1049 error = expand_table(dp);
1052 table = rcu_dereference(dp->table);
1055 /* Allocate flow. */
1056 flow = flow_alloc();
1058 error = PTR_ERR(flow);
1061 flow->key = uf->flow.key;
1064 /* Obtain actions. */
1065 acts = get_actions(&uf->flow);
1066 error = PTR_ERR(acts);
1068 goto error_free_flow;
1069 rcu_assign_pointer(flow->sf_acts, acts);
1071 /* Put flow in bucket. */
1072 error = tbl_insert(table, &flow->tbl_node, flow_hash(&flow->key));
1074 goto error_free_flow_acts;
1076 memset(stats, 0, sizeof(struct xflow_flow_stats));
1078 /* We found a matching flow. */
1079 struct sw_flow_actions *old_acts, *new_acts;
1081 flow = flow_cast(flow_node);
1083 /* Bail out if we're not allowed to modify an existing flow. */
1085 if (!(uf->flags & XFLOWPF_MODIFY))
1089 new_acts = get_actions(&uf->flow);
1090 error = PTR_ERR(new_acts);
1091 if (IS_ERR(new_acts))
1093 old_acts = rcu_dereference(flow->sf_acts);
1094 if (old_acts->n_actions != new_acts->n_actions ||
1095 memcmp(old_acts->actions, new_acts->actions,
1096 sizeof(union xflow_action) * old_acts->n_actions)) {
1097 rcu_assign_pointer(flow->sf_acts, new_acts);
1098 flow_deferred_free_acts(old_acts);
1103 /* Fetch stats, then clear them if necessary. */
1104 spin_lock_bh(&flow->lock);
1105 get_stats(flow, stats, get_time_offset());
1106 if (uf->flags & XFLOWPF_ZERO_STATS)
1108 spin_unlock_bh(&flow->lock);
1113 error_free_flow_acts:
1114 kfree(flow->sf_acts);
1116 flow->sf_acts = NULL;
1122 static int put_flow(struct datapath *dp, struct xflow_flow_put __user *ufp)
1124 struct xflow_flow_stats stats;
1125 struct xflow_flow_put uf;
1128 if (copy_from_user(&uf, ufp, sizeof(struct xflow_flow_put)))
1131 error = do_put_flow(dp, &uf, &stats);
1135 if (copy_to_user(&ufp->flow.stats, &stats,
1136 sizeof(struct xflow_flow_stats)))
1142 static int do_answer_query(struct sw_flow *flow, u32 query_flags,
1143 struct timespec time_offset,
1144 struct xflow_flow_stats __user *ustats,
1145 union xflow_action __user *actions,
1146 u32 __user *n_actionsp)
1148 struct sw_flow_actions *sf_acts;
1149 struct xflow_flow_stats stats;
1152 spin_lock_bh(&flow->lock);
1153 get_stats(flow, &stats, time_offset);
1154 if (query_flags & XFLOWFF_ZERO_TCP_FLAGS)
1155 flow->tcp_flags = 0;
1157 spin_unlock_bh(&flow->lock);
1159 if (copy_to_user(ustats, &stats, sizeof(struct xflow_flow_stats)) ||
1160 get_user(n_actions, n_actionsp))
1166 sf_acts = rcu_dereference(flow->sf_acts);
1167 if (put_user(sf_acts->n_actions, n_actionsp) ||
1168 (actions && copy_to_user(actions, sf_acts->actions,
1169 sizeof(union xflow_action) *
1170 min(sf_acts->n_actions, n_actions))))
1176 static int answer_query(struct sw_flow *flow, u32 query_flags,
1177 struct timespec time_offset,
1178 struct xflow_flow __user *ufp)
1180 union xflow_action *actions;
1182 if (get_user(actions, &ufp->actions))
1185 return do_answer_query(flow, query_flags, time_offset,
1186 &ufp->stats, actions, &ufp->n_actions);
1189 static struct sw_flow *do_del_flow(struct datapath *dp, struct xflow_key *key)
1191 struct tbl *table = rcu_dereference(dp->table);
1192 struct tbl_node *flow_node;
1195 flow_node = tbl_lookup(table, key, flow_hash(key), flow_cmp);
1197 return ERR_PTR(-ENOENT);
1199 error = tbl_remove(table, flow_node);
1201 return ERR_PTR(error);
1203 /* XXX Returned flow_node's statistics might lose a few packets, since
1204 * other CPUs can be using this flow. We used to synchronize_rcu() to
1205 * make sure that we get completely accurate stats, but that blows our
1206 * performance, badly. */
1207 return flow_cast(flow_node);
1210 static int del_flow(struct datapath *dp, struct xflow_flow __user *ufp)
1212 struct sw_flow *flow;
1213 struct xflow_flow uf;
1216 if (copy_from_user(&uf, ufp, sizeof uf))
1219 flow = do_del_flow(dp, &uf.key);
1221 return PTR_ERR(flow);
1223 error = answer_query(flow, 0, get_time_offset(), ufp);
1224 flow_deferred_free(flow);
1228 static int do_query_flows(struct datapath *dp, const struct xflow_flowvec *flowvec)
1230 struct tbl *table = rcu_dereference(dp->table);
1231 struct timespec time_offset;
1234 time_offset = get_time_offset();
1236 for (i = 0; i < flowvec->n_flows; i++) {
1237 struct xflow_flow __user *ufp = &flowvec->flows[i];
1238 struct xflow_flow uf;
1239 struct tbl_node *flow_node;
1242 if (copy_from_user(&uf, ufp, sizeof uf))
1245 flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
1247 error = put_user(ENOENT, &ufp->stats.error);
1249 error = answer_query(flow_cast(flow_node), uf.flags, time_offset, ufp);
1253 return flowvec->n_flows;
1256 struct list_flows_cbdata {
1257 struct xflow_flow __user *uflows;
1260 struct timespec time_offset;
1263 static int list_flow(struct tbl_node *node, void *cbdata_)
1265 struct sw_flow *flow = flow_cast(node);
1266 struct list_flows_cbdata *cbdata = cbdata_;
1267 struct xflow_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
1270 if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
1272 error = answer_query(flow, 0, cbdata->time_offset, ufp);
1276 if (cbdata->listed_flows >= cbdata->n_flows)
1277 return cbdata->listed_flows;
1281 static int do_list_flows(struct datapath *dp, const struct xflow_flowvec *flowvec)
1283 struct list_flows_cbdata cbdata;
1286 if (!flowvec->n_flows)
1289 cbdata.uflows = flowvec->flows;
1290 cbdata.n_flows = flowvec->n_flows;
1291 cbdata.listed_flows = 0;
1292 cbdata.time_offset = get_time_offset();
1294 error = tbl_foreach(rcu_dereference(dp->table), list_flow, &cbdata);
1295 return error ? error : cbdata.listed_flows;
1298 static int do_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1299 int (*function)(struct datapath *,
1300 const struct xflow_flowvec *))
1302 struct xflow_flowvec __user *uflowvec;
1303 struct xflow_flowvec flowvec;
1306 uflowvec = (struct xflow_flowvec __user *)argp;
1307 if (copy_from_user(&flowvec, uflowvec, sizeof flowvec))
1310 if (flowvec.n_flows > INT_MAX / sizeof(struct xflow_flow))
1313 retval = function(dp, &flowvec);
1314 return (retval < 0 ? retval
1315 : retval == flowvec.n_flows ? 0
1316 : put_user(retval, &uflowvec->n_flows));
1319 static int do_execute(struct datapath *dp, const struct xflow_execute *execute)
1321 struct xflow_key key;
1322 struct sk_buff *skb;
1323 struct sw_flow_actions *actions;
1329 if (execute->length < ETH_HLEN || execute->length > 65535)
1332 actions = flow_actions_alloc(execute->n_actions);
1333 if (IS_ERR(actions)) {
1334 err = PTR_ERR(actions);
1339 if (copy_from_user(actions->actions, execute->actions,
1340 execute->n_actions * sizeof *execute->actions))
1341 goto error_free_actions;
1343 err = validate_actions(actions);
1345 goto error_free_actions;
1348 skb = alloc_skb(execute->length, GFP_KERNEL);
1350 goto error_free_actions;
1352 if (execute->in_port < DP_MAX_PORTS)
1353 OVS_CB(skb)->dp_port = dp->ports[execute->in_port];
1355 OVS_CB(skb)->dp_port = NULL;
1358 if (copy_from_user(skb_put(skb, execute->length), execute->data,
1360 goto error_free_skb;
1362 skb_reset_mac_header(skb);
1365 /* Normally, setting the skb 'protocol' field would be handled by a
1366 * call to eth_type_trans(), but it assumes there's a sending
1367 * device, which we may not have. */
1368 if (ntohs(eth->h_proto) >= 1536)
1369 skb->protocol = eth->h_proto;
1371 skb->protocol = htons(ETH_P_802_2);
1373 err = flow_extract(skb, execute->in_port, &key, &is_frag);
1375 goto error_free_skb;
1378 err = execute_actions(dp, skb, &key, actions->actions,
1379 actions->n_actions, GFP_KERNEL);
1393 static int execute_packet(struct datapath *dp, const struct xflow_execute __user *executep)
1395 struct xflow_execute execute;
1397 if (copy_from_user(&execute, executep, sizeof execute))
1400 return do_execute(dp, &execute);
1403 static int get_dp_stats(struct datapath *dp, struct xflow_stats __user *statsp)
1405 struct tbl *table = rcu_dereference(dp->table);
1406 struct xflow_stats stats;
1409 stats.n_flows = tbl_count(table);
1410 stats.cur_capacity = tbl_n_buckets(table);
1411 stats.max_capacity = TBL_MAX_BUCKETS;
1412 stats.n_ports = dp->n_ports;
1413 stats.max_ports = DP_MAX_PORTS;
1414 stats.max_groups = DP_MAX_GROUPS;
1415 stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
1416 for_each_possible_cpu(i) {
1417 const struct dp_stats_percpu *percpu_stats;
1418 struct dp_stats_percpu local_stats;
1421 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
1424 seqcount = read_seqcount_begin(&percpu_stats->seqlock);
1425 local_stats = *percpu_stats;
1426 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
1428 stats.n_frags += local_stats.n_frags;
1429 stats.n_hit += local_stats.n_hit;
1430 stats.n_missed += local_stats.n_missed;
1431 stats.n_lost += local_stats.n_lost;
1433 stats.max_miss_queue = DP_MAX_QUEUE_LEN;
1434 stats.max_action_queue = DP_MAX_QUEUE_LEN;
1435 return copy_to_user(statsp, &stats, sizeof stats) ? -EFAULT : 0;
1438 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
1439 int dp_min_mtu(const struct datapath *dp)
1446 list_for_each_entry_rcu (p, &dp->port_list, node) {
1449 /* Skip any internal ports, since that's what we're trying to
1451 if (is_internal_vport(p->vport))
1454 dev_mtu = vport_get_mtu(p->vport);
1455 if (!mtu || dev_mtu < mtu)
1459 return mtu ? mtu : ETH_DATA_LEN;
1462 /* Sets the MTU of all datapath devices to the minimum of the ports. Must
1463 * be called with RTNL lock. */
1464 void set_internal_devs_mtu(const struct datapath *dp)
1471 mtu = dp_min_mtu(dp);
1473 list_for_each_entry_rcu (p, &dp->port_list, node) {
1474 if (is_internal_vport(p->vport))
1475 vport_set_mtu(p->vport, mtu);
1479 static int put_port(const struct dp_port *p, struct xflow_port __user *uop)
1481 struct xflow_port op;
1483 memset(&op, 0, sizeof op);
1486 strncpy(op.devname, vport_get_name(p->vport), sizeof op.devname);
1489 op.port = p->port_no;
1490 op.flags = is_internal_vport(p->vport) ? XFLOW_PORT_INTERNAL : 0;
1492 return copy_to_user(uop, &op, sizeof op) ? -EFAULT : 0;
1495 static int query_port(struct datapath *dp, struct xflow_port __user *uport)
1497 struct xflow_port port;
1499 if (copy_from_user(&port, uport, sizeof port))
1502 if (port.devname[0]) {
1503 struct vport *vport;
1504 struct dp_port *dp_port;
1507 port.devname[IFNAMSIZ - 1] = '\0';
1512 vport = vport_locate(port.devname);
1518 dp_port = vport_get_dp_port(vport);
1519 if (!dp_port || dp_port->dp != dp) {
1524 port.port = dp_port->port_no;
1533 if (port.port >= DP_MAX_PORTS)
1535 if (!dp->ports[port.port])
1539 return put_port(dp->ports[port.port], uport);
1542 static int do_list_ports(struct datapath *dp, struct xflow_port __user *uports,
1549 list_for_each_entry_rcu (p, &dp->port_list, node) {
1550 if (put_port(p, &uports[idx]))
1552 if (idx++ >= n_ports)
1559 static int list_ports(struct datapath *dp, struct xflow_portvec __user *upv)
1561 struct xflow_portvec pv;
1564 if (copy_from_user(&pv, upv, sizeof pv))
1567 retval = do_list_ports(dp, pv.ports, pv.n_ports);
1571 return put_user(retval, &upv->n_ports);
1574 /* RCU callback for freeing a dp_port_group */
1575 static void free_port_group(struct rcu_head *rcu)
1577 struct dp_port_group *g = container_of(rcu, struct dp_port_group, rcu);
1581 static int do_set_port_group(struct datapath *dp, u16 __user *ports,
1582 int n_ports, int group)
1584 struct dp_port_group *new_group, *old_group;
1588 if (n_ports > DP_MAX_PORTS || group >= DP_MAX_GROUPS)
1592 new_group = kmalloc(sizeof *new_group + sizeof(u16) * n_ports, GFP_KERNEL);
1596 new_group->n_ports = n_ports;
1598 if (copy_from_user(new_group->ports, ports, sizeof(u16) * n_ports))
1601 old_group = rcu_dereference(dp->groups[group]);
1602 rcu_assign_pointer(dp->groups[group], new_group);
1604 call_rcu(&old_group->rcu, free_port_group);
1613 static int set_port_group(struct datapath *dp,
1614 const struct xflow_port_group __user *upg)
1616 struct xflow_port_group pg;
1618 if (copy_from_user(&pg, upg, sizeof pg))
1621 return do_set_port_group(dp, pg.ports, pg.n_ports, pg.group);
1624 static int do_get_port_group(struct datapath *dp,
1625 u16 __user *ports, int n_ports, int group,
1626 u16 __user *n_portsp)
1628 struct dp_port_group *g;
1631 if (group >= DP_MAX_GROUPS)
1634 g = dp->groups[group];
1635 n_copy = g ? min_t(int, g->n_ports, n_ports) : 0;
1636 if (n_copy && copy_to_user(ports, g->ports, n_copy * sizeof(u16)))
1639 if (put_user(g ? g->n_ports : 0, n_portsp))
1645 static int get_port_group(struct datapath *dp, struct xflow_port_group __user *upg)
1647 struct xflow_port_group pg;
1649 if (copy_from_user(&pg, upg, sizeof pg))
1652 return do_get_port_group(dp, pg.ports, pg.n_ports, pg.group, &upg->n_ports);
1655 static int get_listen_mask(const struct file *f)
1657 return (long)f->private_data;
1660 static void set_listen_mask(struct file *f, int listen_mask)
1662 f->private_data = (void*)(long)listen_mask;
1665 static long openvswitch_ioctl(struct file *f, unsigned int cmd,
1668 int dp_idx = iminor(f->f_dentry->d_inode);
1669 struct datapath *dp;
1670 int drop_frags, listeners, port_no;
1671 unsigned int sflow_probability;
1674 /* Handle commands with special locking requirements up front. */
1676 case XFLOW_DP_CREATE:
1677 err = create_dp(dp_idx, (char __user *)argp);
1680 case XFLOW_DP_DESTROY:
1681 err = destroy_dp(dp_idx);
1684 case XFLOW_PORT_ATTACH:
1685 err = attach_port(dp_idx, (struct xflow_port __user *)argp);
1688 case XFLOW_PORT_DETACH:
1689 err = get_user(port_no, (int __user *)argp);
1691 err = detach_port(dp_idx, port_no);
1694 case XFLOW_VPORT_ADD:
1695 err = vport_user_add((struct xflow_vport_add __user *)argp);
1698 case XFLOW_VPORT_MOD:
1699 err = vport_user_mod((struct xflow_vport_mod __user *)argp);
1702 case XFLOW_VPORT_DEL:
1703 err = vport_user_del((char __user *)argp);
1706 case XFLOW_VPORT_STATS_GET:
1707 err = vport_user_stats_get((struct xflow_vport_stats_req __user *)argp);
1710 case XFLOW_VPORT_STATS_SET:
1711 err = vport_user_stats_set((struct xflow_vport_stats_req __user *)argp);
1714 case XFLOW_VPORT_ETHER_GET:
1715 err = vport_user_ether_get((struct xflow_vport_ether __user *)argp);
1718 case XFLOW_VPORT_ETHER_SET:
1719 err = vport_user_ether_set((struct xflow_vport_ether __user *)argp);
1722 case XFLOW_VPORT_MTU_GET:
1723 err = vport_user_mtu_get((struct xflow_vport_mtu __user *)argp);
1726 case XFLOW_VPORT_MTU_SET:
1727 err = vport_user_mtu_set((struct xflow_vport_mtu __user *)argp);
1731 dp = get_dp_locked(dp_idx);
1737 case XFLOW_DP_STATS:
1738 err = get_dp_stats(dp, (struct xflow_stats __user *)argp);
1741 case XFLOW_GET_DROP_FRAGS:
1742 err = put_user(dp->drop_frags, (int __user *)argp);
1745 case XFLOW_SET_DROP_FRAGS:
1746 err = get_user(drop_frags, (int __user *)argp);
1750 if (drop_frags != 0 && drop_frags != 1)
1752 dp->drop_frags = drop_frags;
1756 case XFLOW_GET_LISTEN_MASK:
1757 err = put_user(get_listen_mask(f), (int __user *)argp);
1760 case XFLOW_SET_LISTEN_MASK:
1761 err = get_user(listeners, (int __user *)argp);
1765 if (listeners & ~XFLOWL_ALL)
1768 set_listen_mask(f, listeners);
1771 case XFLOW_GET_SFLOW_PROBABILITY:
1772 err = put_user(dp->sflow_probability, (unsigned int __user *)argp);
1775 case XFLOW_SET_SFLOW_PROBABILITY:
1776 err = get_user(sflow_probability, (unsigned int __user *)argp);
1778 dp->sflow_probability = sflow_probability;
1781 case XFLOW_PORT_QUERY:
1782 err = query_port(dp, (struct xflow_port __user *)argp);
1785 case XFLOW_PORT_LIST:
1786 err = list_ports(dp, (struct xflow_portvec __user *)argp);
1789 case XFLOW_PORT_GROUP_SET:
1790 err = set_port_group(dp, (struct xflow_port_group __user *)argp);
1793 case XFLOW_PORT_GROUP_GET:
1794 err = get_port_group(dp, (struct xflow_port_group __user *)argp);
1797 case XFLOW_FLOW_FLUSH:
1798 err = flush_flows(dp);
1801 case XFLOW_FLOW_PUT:
1802 err = put_flow(dp, (struct xflow_flow_put __user *)argp);
1805 case XFLOW_FLOW_DEL:
1806 err = del_flow(dp, (struct xflow_flow __user *)argp);
1809 case XFLOW_FLOW_GET:
1810 err = do_flowvec_ioctl(dp, argp, do_query_flows);
1813 case XFLOW_FLOW_LIST:
1814 err = do_flowvec_ioctl(dp, argp, do_list_flows);
1818 err = execute_packet(dp, (struct xflow_execute __user *)argp);
1825 mutex_unlock(&dp->mutex);
1830 static int dp_has_packet_of_interest(struct datapath *dp, int listeners)
1833 for (i = 0; i < DP_N_QUEUES; i++) {
1834 if (listeners & (1 << i) && !skb_queue_empty(&dp->queues[i]))
1840 #ifdef CONFIG_COMPAT
1841 static int compat_list_ports(struct datapath *dp, struct compat_xflow_portvec __user *upv)
1843 struct compat_xflow_portvec pv;
1846 if (copy_from_user(&pv, upv, sizeof pv))
1849 retval = do_list_ports(dp, compat_ptr(pv.ports), pv.n_ports);
1853 return put_user(retval, &upv->n_ports);
1856 static int compat_set_port_group(struct datapath *dp, const struct compat_xflow_port_group __user *upg)
1858 struct compat_xflow_port_group pg;
1860 if (copy_from_user(&pg, upg, sizeof pg))
1863 return do_set_port_group(dp, compat_ptr(pg.ports), pg.n_ports, pg.group);
1866 static int compat_get_port_group(struct datapath *dp, struct compat_xflow_port_group __user *upg)
1868 struct compat_xflow_port_group pg;
1870 if (copy_from_user(&pg, upg, sizeof pg))
1873 return do_get_port_group(dp, compat_ptr(pg.ports), pg.n_ports,
1874 pg.group, &upg->n_ports);
1877 static int compat_get_flow(struct xflow_flow *flow, const struct compat_xflow_flow __user *compat)
1879 compat_uptr_t actions;
1881 if (!access_ok(VERIFY_READ, compat, sizeof(struct compat_xflow_flow)) ||
1882 __copy_from_user(&flow->stats, &compat->stats, sizeof(struct xflow_flow_stats)) ||
1883 __copy_from_user(&flow->key, &compat->key, sizeof(struct xflow_key)) ||
1884 __get_user(actions, &compat->actions) ||
1885 __get_user(flow->n_actions, &compat->n_actions) ||
1886 __get_user(flow->flags, &compat->flags))
1889 flow->actions = compat_ptr(actions);
1893 static int compat_put_flow(struct datapath *dp, struct compat_xflow_flow_put __user *ufp)
1895 struct xflow_flow_stats stats;
1896 struct xflow_flow_put fp;
1899 if (compat_get_flow(&fp.flow, &ufp->flow) ||
1900 get_user(fp.flags, &ufp->flags))
1903 error = do_put_flow(dp, &fp, &stats);
1907 if (copy_to_user(&ufp->flow.stats, &stats,
1908 sizeof(struct xflow_flow_stats)))
1914 static int compat_answer_query(struct sw_flow *flow, u32 query_flags,
1915 struct timespec time_offset,
1916 struct compat_xflow_flow __user *ufp)
1918 compat_uptr_t actions;
1920 if (get_user(actions, &ufp->actions))
1923 return do_answer_query(flow, query_flags, time_offset, &ufp->stats,
1924 compat_ptr(actions), &ufp->n_actions);
1927 static int compat_del_flow(struct datapath *dp, struct compat_xflow_flow __user *ufp)
1929 struct sw_flow *flow;
1930 struct xflow_flow uf;
1933 if (compat_get_flow(&uf, ufp))
1936 flow = do_del_flow(dp, &uf.key);
1938 return PTR_ERR(flow);
1940 error = compat_answer_query(flow, 0, get_time_offset(), ufp);
1941 flow_deferred_free(flow);
1945 static int compat_query_flows(struct datapath *dp, struct compat_xflow_flow *flows, u32 n_flows)
1947 struct tbl *table = rcu_dereference(dp->table);
1948 struct timespec time_offset;
1951 time_offset = get_time_offset();
1953 for (i = 0; i < n_flows; i++) {
1954 struct compat_xflow_flow __user *ufp = &flows[i];
1955 struct xflow_flow uf;
1956 struct tbl_node *flow_node;
1959 if (compat_get_flow(&uf, ufp))
1962 flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
1964 error = put_user(ENOENT, &ufp->stats.error);
1966 error = compat_answer_query(flow_cast(flow_node), uf.flags, time_offset, ufp);
1973 struct compat_list_flows_cbdata {
1974 struct compat_xflow_flow __user *uflows;
1977 struct timespec time_offset;
1980 static int compat_list_flow(struct tbl_node *node, void *cbdata_)
1982 struct sw_flow *flow = flow_cast(node);
1983 struct compat_list_flows_cbdata *cbdata = cbdata_;
1984 struct compat_xflow_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
1987 if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
1989 error = compat_answer_query(flow, 0, cbdata->time_offset, ufp);
1993 if (cbdata->listed_flows >= cbdata->n_flows)
1994 return cbdata->listed_flows;
1998 static int compat_list_flows(struct datapath *dp, struct compat_xflow_flow *flows, u32 n_flows)
2000 struct compat_list_flows_cbdata cbdata;
2006 cbdata.uflows = flows;
2007 cbdata.n_flows = n_flows;
2008 cbdata.listed_flows = 0;
2009 cbdata.time_offset = get_time_offset();
2011 error = tbl_foreach(rcu_dereference(dp->table), compat_list_flow, &cbdata);
2012 return error ? error : cbdata.listed_flows;
2015 static int compat_flowvec_ioctl(struct datapath *dp, unsigned long argp,
2016 int (*function)(struct datapath *,
2017 struct compat_xflow_flow *,
2020 struct compat_xflow_flowvec __user *uflowvec;
2021 struct compat_xflow_flow __user *flows;
2022 struct compat_xflow_flowvec flowvec;
2025 uflowvec = compat_ptr(argp);
2026 if (!access_ok(VERIFY_WRITE, uflowvec, sizeof *uflowvec) ||
2027 copy_from_user(&flowvec, uflowvec, sizeof flowvec))
2030 if (flowvec.n_flows > INT_MAX / sizeof(struct compat_xflow_flow))
2033 flows = compat_ptr(flowvec.flows);
2034 if (!access_ok(VERIFY_WRITE, flows,
2035 flowvec.n_flows * sizeof(struct compat_xflow_flow)))
2038 retval = function(dp, flows, flowvec.n_flows);
2039 return (retval < 0 ? retval
2040 : retval == flowvec.n_flows ? 0
2041 : put_user(retval, &uflowvec->n_flows));
2044 static int compat_execute(struct datapath *dp, const struct compat_xflow_execute __user *uexecute)
2046 struct xflow_execute execute;
2047 compat_uptr_t actions;
2050 if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_xflow_execute)) ||
2051 __get_user(execute.in_port, &uexecute->in_port) ||
2052 __get_user(actions, &uexecute->actions) ||
2053 __get_user(execute.n_actions, &uexecute->n_actions) ||
2054 __get_user(data, &uexecute->data) ||
2055 __get_user(execute.length, &uexecute->length))
2058 execute.actions = compat_ptr(actions);
2059 execute.data = compat_ptr(data);
2061 return do_execute(dp, &execute);
2064 static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned long argp)
2066 int dp_idx = iminor(f->f_dentry->d_inode);
2067 struct datapath *dp;
2071 case XFLOW_DP_DESTROY:
2072 case XFLOW_FLOW_FLUSH:
2073 /* Ioctls that don't need any translation at all. */
2074 return openvswitch_ioctl(f, cmd, argp);
2076 case XFLOW_DP_CREATE:
2077 case XFLOW_PORT_ATTACH:
2078 case XFLOW_PORT_DETACH:
2079 case XFLOW_VPORT_DEL:
2080 case XFLOW_VPORT_MTU_SET:
2081 case XFLOW_VPORT_MTU_GET:
2082 case XFLOW_VPORT_ETHER_SET:
2083 case XFLOW_VPORT_ETHER_GET:
2084 case XFLOW_VPORT_STATS_SET:
2085 case XFLOW_VPORT_STATS_GET:
2086 case XFLOW_DP_STATS:
2087 case XFLOW_GET_DROP_FRAGS:
2088 case XFLOW_SET_DROP_FRAGS:
2089 case XFLOW_SET_LISTEN_MASK:
2090 case XFLOW_GET_LISTEN_MASK:
2091 case XFLOW_SET_SFLOW_PROBABILITY:
2092 case XFLOW_GET_SFLOW_PROBABILITY:
2093 case XFLOW_PORT_QUERY:
2094 /* Ioctls that just need their pointer argument extended. */
2095 return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
2097 case XFLOW_VPORT_ADD32:
2098 return compat_vport_user_add(compat_ptr(argp));
2100 case XFLOW_VPORT_MOD32:
2101 return compat_vport_user_mod(compat_ptr(argp));
2104 dp = get_dp_locked(dp_idx);
2110 case XFLOW_PORT_LIST32:
2111 err = compat_list_ports(dp, compat_ptr(argp));
2114 case XFLOW_PORT_GROUP_SET32:
2115 err = compat_set_port_group(dp, compat_ptr(argp));
2118 case XFLOW_PORT_GROUP_GET32:
2119 err = compat_get_port_group(dp, compat_ptr(argp));
2122 case XFLOW_FLOW_PUT32:
2123 err = compat_put_flow(dp, compat_ptr(argp));
2126 case XFLOW_FLOW_DEL32:
2127 err = compat_del_flow(dp, compat_ptr(argp));
2130 case XFLOW_FLOW_GET32:
2131 err = compat_flowvec_ioctl(dp, argp, compat_query_flows);
2134 case XFLOW_FLOW_LIST32:
2135 err = compat_flowvec_ioctl(dp, argp, compat_list_flows);
2138 case XFLOW_EXECUTE32:
2139 err = compat_execute(dp, compat_ptr(argp));
2146 mutex_unlock(&dp->mutex);
2152 /* Unfortunately this function is not exported so this is a verbatim copy
2153 * from net/core/datagram.c in 2.6.30. */
2154 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
2155 u8 __user *to, int len,
2158 int start = skb_headlen(skb);
2160 int i, copy = start - offset;
2167 *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
2171 if ((len -= copy) == 0)
2178 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2181 WARN_ON(start > offset + len);
2183 end = start + skb_shinfo(skb)->frags[i].size;
2184 if ((copy = end - offset) > 0) {
2188 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2189 struct page *page = frag->page;
2194 csum2 = csum_and_copy_to_user(vaddr +
2201 *csump = csum_block_add(*csump, csum2, pos);
2211 if (skb_shinfo(skb)->frag_list) {
2212 struct sk_buff *list = skb_shinfo(skb)->frag_list;
2214 for (; list; list=list->next) {
2217 WARN_ON(start > offset + len);
2219 end = start + list->len;
2220 if ((copy = end - offset) > 0) {
2224 if (skb_copy_and_csum_datagram(list,
2229 *csump = csum_block_add(*csump, csum2, pos);
2230 if ((len -= copy) == 0)
2246 ssize_t openvswitch_read(struct file *f, char __user *buf, size_t nbytes,
2249 /* XXX is there sufficient synchronization here? */
2250 int listeners = get_listen_mask(f);
2251 int dp_idx = iminor(f->f_dentry->d_inode);
2252 struct datapath *dp = get_dp(dp_idx);
2253 struct sk_buff *skb;
2254 size_t copy_bytes, tot_copy_bytes;
2260 if (nbytes == 0 || !listeners)
2266 for (i = 0; i < DP_N_QUEUES; i++) {
2267 if (listeners & (1 << i)) {
2268 skb = skb_dequeue(&dp->queues[i]);
2274 if (f->f_flags & O_NONBLOCK) {
2279 wait_event_interruptible(dp->waitqueue,
2280 dp_has_packet_of_interest(dp,
2283 if (signal_pending(current)) {
2284 retval = -ERESTARTSYS;
2289 copy_bytes = tot_copy_bytes = min_t(size_t, skb->len, nbytes);
2292 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2293 if (copy_bytes == skb->len) {
2295 unsigned int csum_start, csum_offset;
2297 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
2298 csum_start = skb->csum_start - skb_headroom(skb);
2299 csum_offset = skb->csum_offset;
2301 csum_start = skb_transport_header(skb) - skb->data;
2302 csum_offset = skb->csum;
2304 BUG_ON(csum_start >= skb_headlen(skb));
2305 retval = skb_copy_and_csum_datagram(skb, csum_start, buf + csum_start,
2306 copy_bytes - csum_start, &csum);
2308 __sum16 __user *csump;
2310 copy_bytes = csum_start;
2311 csump = (__sum16 __user *)(buf + csum_start + csum_offset);
2313 BUG_ON((char *)csump + sizeof(__sum16) > buf + nbytes);
2314 put_user(csum_fold(csum), csump);
2317 retval = skb_checksum_help(skb);
2321 struct iovec __user iov;
2324 iov.iov_len = copy_bytes;
2325 retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
2329 retval = tot_copy_bytes;
2337 static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
2339 /* XXX is there sufficient synchronization here? */
2340 int dp_idx = iminor(file->f_dentry->d_inode);
2341 struct datapath *dp = get_dp(dp_idx);
2346 poll_wait(file, &dp->waitqueue, wait);
2347 if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
2348 mask |= POLLIN | POLLRDNORM;
2350 mask = POLLIN | POLLRDNORM | POLLHUP;
2355 struct file_operations openvswitch_fops = {
2356 /* XXX .aio_read = openvswitch_aio_read, */
2357 .read = openvswitch_read,
2358 .poll = openvswitch_poll,
2359 .unlocked_ioctl = openvswitch_ioctl,
2360 #ifdef CONFIG_COMPAT
2361 .compat_ioctl = openvswitch_compat_ioctl,
2363 /* XXX .fasync = openvswitch_fasync, */
2368 static int __init dp_init(void)
2370 struct sk_buff *dummy_skb;
2373 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2375 printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2383 goto error_flow_exit;
2385 err = register_netdevice_notifier(&dp_device_notifier);
2387 goto error_vport_exit;
2389 major = register_chrdev(0, "openvswitch", &openvswitch_fops);
2391 goto error_unreg_notifier;
2395 error_unreg_notifier:
2396 unregister_netdevice_notifier(&dp_device_notifier);
2405 static void dp_cleanup(void)
2408 unregister_chrdev(major, "openvswitch");
2409 unregister_netdevice_notifier(&dp_device_notifier);
2414 module_init(dp_init);
2415 module_exit(dp_cleanup);
2417 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2418 MODULE_LICENSE("GPL");