2 * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #include <linux/init.h>
12 #include <linux/module.h>
14 #include <linux/if_arp.h>
15 #include <linux/if_bridge.h>
16 #include <linux/if_vlan.h>
19 #include <linux/delay.h>
20 #include <linux/time.h>
21 #include <linux/etherdevice.h>
22 #include <linux/kernel.h>
23 #include <linux/kthread.h>
24 #include <linux/llc.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/version.h>
31 #include <linux/ethtool.h>
32 #include <linux/random.h>
33 #include <linux/wait.h>
34 #include <asm/system.h>
35 #include <asm/div64.h>
37 #include <linux/netfilter_bridge.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/inetdevice.h>
40 #include <linux/list.h>
41 #include <linux/rculist.h>
42 #include <linux/workqueue.h>
43 #include <linux/dmi.h>
44 #include <net/inet_ecn.h>
47 #include "openvswitch/datapath-protocol.h"
56 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
57 EXPORT_SYMBOL(dp_ioctl_hook);
59 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
62 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
65 * It is safe to access the datapath and net_bridge_port structures with just
68 static struct datapath *dps[ODP_MAX];
69 static DEFINE_MUTEX(dp_mutex);
71 /* Number of milliseconds between runs of the maintenance thread. */
72 #define MAINT_SLEEP_MSECS 1000
74 static int new_nbp(struct datapath *, struct net_device *, int port_no);
76 /* Must be called with rcu_read_lock or dp_mutex. */
77 struct datapath *get_dp(int dp_idx)
79 if (dp_idx < 0 || dp_idx >= ODP_MAX)
81 return rcu_dereference(dps[dp_idx]);
83 EXPORT_SYMBOL_GPL(get_dp);
85 static struct datapath *get_dp_locked(int dp_idx)
89 mutex_lock(&dp_mutex);
92 mutex_lock(&dp->mutex);
93 mutex_unlock(&dp_mutex);
97 static inline size_t br_nlmsg_size(void)
99 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
100 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
101 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
102 + nla_total_size(4) /* IFLA_MASTER */
103 + nla_total_size(4) /* IFLA_MTU */
104 + nla_total_size(4) /* IFLA_LINK */
105 + nla_total_size(1); /* IFLA_OPERSTATE */
108 static int dp_fill_ifinfo(struct sk_buff *skb,
109 const struct net_bridge_port *port,
110 int event, unsigned int flags)
112 const struct datapath *dp = port->dp;
113 const struct net_device *dev = port->dev;
114 struct ifinfomsg *hdr;
115 struct nlmsghdr *nlh;
117 nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
121 hdr = nlmsg_data(nlh);
122 hdr->ifi_family = AF_BRIDGE;
124 hdr->ifi_type = dev->type;
125 hdr->ifi_index = dev->ifindex;
126 hdr->ifi_flags = dev_get_flags(dev);
129 NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
130 NLA_PUT_U32(skb, IFLA_MASTER, dp->ports[ODPP_LOCAL]->dev->ifindex);
131 NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
132 #ifdef IFLA_OPERSTATE
133 NLA_PUT_U8(skb, IFLA_OPERSTATE,
134 netif_running(dev) ? dev->operstate : IF_OPER_DOWN);
138 NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
140 if (dev->ifindex != dev->iflink)
141 NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
143 return nlmsg_end(skb, nlh);
146 nlmsg_cancel(skb, nlh);
150 static void dp_ifinfo_notify(int event, struct net_bridge_port *port)
152 struct net *net = dev_net(port->dev);
156 skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
160 err = dp_fill_ifinfo(skb, port, event, 0);
162 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
163 WARN_ON(err == -EMSGSIZE);
167 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
171 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
174 static void release_dp(struct kobject *kobj)
176 struct datapath *dp = container_of(kobj, struct datapath, ifobj);
180 static struct kobj_type dp_ktype = {
181 .release = release_dp
184 static int create_dp(int dp_idx, const char __user *devnamep)
186 struct net_device *dp_dev;
187 char devname[IFNAMSIZ];
194 if (strncpy_from_user(devname, devnamep, IFNAMSIZ - 1) < 0)
196 devname[IFNAMSIZ - 1] = '\0';
198 snprintf(devname, sizeof devname, "of%d", dp_idx);
202 mutex_lock(&dp_mutex);
204 if (!try_module_get(THIS_MODULE))
207 /* Exit early if a datapath with that number already exists.
208 * (We don't use -EEXIST because that's ambiguous with 'devname'
209 * conflicting with an existing network device name.) */
215 dp = kzalloc(sizeof *dp, GFP_KERNEL);
218 INIT_LIST_HEAD(&dp->port_list);
219 mutex_init(&dp->mutex);
221 for (i = 0; i < DP_N_QUEUES; i++)
222 skb_queue_head_init(&dp->queues[i]);
223 init_waitqueue_head(&dp->waitqueue);
225 /* Initialize kobject for bridge. This will be added as
226 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
227 dp->ifobj.kset = NULL;
228 kobject_init(&dp->ifobj, &dp_ktype);
230 /* Allocate table. */
232 rcu_assign_pointer(dp->table, dp_table_create(DP_L1_SIZE));
236 /* Set up our datapath device. */
237 dp_dev = dp_dev_create(dp, devname, ODPP_LOCAL);
238 err = PTR_ERR(dp_dev);
240 goto err_destroy_table;
242 err = new_nbp(dp, dp_dev, ODPP_LOCAL);
244 dp_dev_destroy(dp_dev);
245 goto err_destroy_table;
249 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
250 if (!dp->stats_percpu)
251 goto err_destroy_local_port;
253 rcu_assign_pointer(dps[dp_idx], dp);
254 mutex_unlock(&dp_mutex);
261 err_destroy_local_port:
262 dp_del_port(dp->ports[ODPP_LOCAL]);
264 dp_table_destroy(dp->table, 0);
268 module_put(THIS_MODULE);
270 mutex_unlock(&dp_mutex);
276 static void do_destroy_dp(struct datapath *dp)
278 struct net_bridge_port *p, *n;
281 list_for_each_entry_safe (p, n, &dp->port_list, node)
282 if (p->port_no != ODPP_LOCAL)
287 rcu_assign_pointer(dps[dp->dp_idx], NULL);
289 dp_del_port(dp->ports[ODPP_LOCAL]);
291 dp_table_destroy(dp->table, 1);
293 for (i = 0; i < DP_N_QUEUES; i++)
294 skb_queue_purge(&dp->queues[i]);
295 for (i = 0; i < DP_MAX_GROUPS; i++)
296 kfree(dp->groups[i]);
297 free_percpu(dp->stats_percpu);
298 kobject_put(&dp->ifobj);
299 module_put(THIS_MODULE);
302 static int destroy_dp(int dp_idx)
308 mutex_lock(&dp_mutex);
318 mutex_unlock(&dp_mutex);
323 static void release_nbp(struct kobject *kobj)
325 struct net_bridge_port *p = container_of(kobj, struct net_bridge_port, kobj);
329 static struct kobj_type brport_ktype = {
331 .sysfs_ops = &brport_sysfs_ops,
333 .release = release_nbp
336 /* Called with RTNL lock and dp_mutex. */
337 static int new_nbp(struct datapath *dp, struct net_device *dev, int port_no)
339 struct net_bridge_port *p;
341 if (dev->br_port != NULL)
344 p = kzalloc(sizeof(*p), GFP_KERNEL);
348 dev_set_promiscuity(dev, 1);
350 p->port_no = port_no;
353 atomic_set(&p->sflow_pool, 0);
355 rcu_assign_pointer(dev->br_port, p);
357 /* It would make sense to assign dev->br_port here too, but
358 * that causes packets received on internal ports to get caught
359 * in dp_frame_hook(). In turn dp_frame_hook() can reject them
360 * back to network stack, but that's a waste of time. */
362 dev_disable_lro(dev);
363 rcu_assign_pointer(dp->ports[port_no], p);
364 list_add_rcu(&p->node, &dp->port_list);
367 /* Initialize kobject for bridge. This will be added as
368 * /sys/class/net/<devname>/brport later, if sysfs is enabled. */
370 kobject_init(&p->kobj, &brport_ktype);
372 dp_ifinfo_notify(RTM_NEWLINK, p);
377 static int add_port(int dp_idx, struct odp_port __user *portp)
379 struct net_device *dev;
381 struct odp_port port;
386 if (copy_from_user(&port, portp, sizeof port))
388 port.devname[IFNAMSIZ - 1] = '\0';
391 dp = get_dp_locked(dp_idx);
394 goto out_unlock_rtnl;
396 for (port_no = 1; port_no < DP_MAX_PORTS; port_no++)
397 if (!dp->ports[port_no])
403 if (!(port.flags & ODP_PORT_INTERNAL)) {
405 dev = dev_get_by_name(&init_net, port.devname);
410 if (dev->flags & IFF_LOOPBACK || dev->type != ARPHRD_ETHER ||
414 dev = dp_dev_create(dp, port.devname, port_no);
421 err = new_nbp(dp, dev, port_no);
425 set_dp_devs_mtu(dp, dev);
426 dp_sysfs_add_if(dp->ports[port_no]);
428 err = __put_user(port_no, &portp->port);
433 mutex_unlock(&dp->mutex);
440 int dp_del_port(struct net_bridge_port *p)
444 if (p->port_no != ODPP_LOCAL)
446 dp_ifinfo_notify(RTM_DELLINK, p);
450 if (is_dp_dev(p->dev)) {
451 /* Make sure that no packets arrive from now on, since
452 * dp_dev_xmit() will try to find itself through
453 * p->dp->ports[], and we're about to set that to null. */
454 netif_tx_disable(p->dev);
457 /* First drop references to device. */
458 dev_set_promiscuity(p->dev, -1);
459 list_del_rcu(&p->node);
460 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
461 rcu_assign_pointer(p->dev->br_port, NULL);
463 /* Then wait until no one is still using it, and destroy it. */
466 if (is_dp_dev(p->dev))
467 dp_dev_destroy(p->dev);
469 kobject_put(&p->kobj);
474 static int del_port(int dp_idx, int port_no)
476 struct net_bridge_port *p;
482 if (port_no < 0 || port_no >= DP_MAX_PORTS || port_no == ODPP_LOCAL)
486 dp = get_dp_locked(dp_idx);
489 goto out_unlock_rtnl;
491 p = dp->ports[port_no];
496 err = dp_del_port(p);
499 mutex_unlock(&dp->mutex);
506 /* Must be called with rcu_read_lock. */
508 do_port_input(struct net_bridge_port *p, struct sk_buff *skb)
510 /* LRO isn't suitable for bridging. We turn it off but make sure
511 * that it wasn't reactivated. */
512 if (skb_warn_if_lro(skb))
515 /* Make our own copy of the packet. Otherwise we will mangle the
516 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
517 * (No one comes after us, since we tell handle_bridge() that we took
519 skb = skb_share_check(skb, GFP_ATOMIC);
523 /* Push the Ethernet header back on. */
524 skb_push(skb, ETH_HLEN);
525 skb_reset_mac_header(skb);
526 dp_process_received_packet(skb, p);
529 /* Must be called with rcu_read_lock and with bottom-halves disabled. */
530 void dp_process_received_packet(struct sk_buff *skb, struct net_bridge_port *p)
532 struct datapath *dp = p->dp;
533 struct dp_stats_percpu *stats;
534 struct odp_flow_key key;
535 struct sw_flow *flow;
537 WARN_ON_ONCE(skb_shared(skb));
539 compute_ip_summed(skb, false);
540 OVS_CB(skb)->tun_id = 0;
542 /* BHs are off so we don't have to use get_cpu()/put_cpu() here. */
543 stats = percpu_ptr(dp->stats_percpu, smp_processor_id());
545 if (flow_extract(skb, p ? p->port_no : ODPP_NONE, &key)) {
546 if (dp->drop_frags) {
553 flow = dp_table_lookup(rcu_dereference(dp->table), &key);
555 struct sw_flow_actions *acts = rcu_dereference(flow->sf_acts);
556 flow_used(flow, skb);
557 execute_actions(dp, skb, &key, acts->actions, acts->n_actions,
562 dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
567 * Used as br_handle_frame_hook. (Cannot run bridge at the same time, even on
568 * different set of devices!)
570 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
571 /* Called with rcu_read_lock and bottom-halves disabled. */
572 static struct sk_buff *dp_frame_hook(struct net_bridge_port *p,
575 do_port_input(p, skb);
578 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
579 /* Called with rcu_read_lock and bottom-halves disabled. */
580 static int dp_frame_hook(struct net_bridge_port *p, struct sk_buff **pskb)
582 do_port_input(p, *pskb);
589 #if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
590 /* This code is based on a skb_checksum_setup from net/dev/core.c from a
591 * combination of Lenny's 2.6.26 Xen kernel and Xen's
592 * linux-2.6.18-92.1.10.el5.xs5.0.0.394.644. We can't call this function
593 * directly because it isn't exported in all versions. */
594 static int skb_pull_up_to(struct sk_buff *skb, void *ptr)
596 if (ptr < (void *)skb->tail)
598 if (__pskb_pull_tail(skb,
599 ptr - (void *)skb->data - skb_headlen(skb))) {
606 int vswitch_skb_checksum_setup(struct sk_buff *skb)
611 __u16 csum_start, csum_offset;
613 if (!skb->proto_csum_blank)
616 if (skb->protocol != htons(ETH_P_IP))
619 if (!skb_pull_up_to(skb, skb_network_header(skb) + sizeof(struct iphdr)))
623 th = skb_network_header(skb) + 4 * iph->ihl;
625 csum_start = th - skb->head;
626 switch (iph->protocol) {
628 csum_offset = offsetof(struct tcphdr, check);
631 csum_offset = offsetof(struct udphdr, check);
635 printk(KERN_ERR "Attempting to checksum a non-"
636 "TCP/UDP packet, dropping a protocol"
637 " %d packet", iph->protocol);
641 if (!skb_pull_up_to(skb, th + csum_offset + 2))
644 skb->ip_summed = CHECKSUM_PARTIAL;
645 skb->proto_csum_blank = 0;
647 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
648 skb->csum_start = csum_start;
649 skb->csum_offset = csum_offset;
651 skb_set_transport_header(skb, csum_start - skb_headroom(skb));
652 skb->csum = csum_offset;
660 #endif /* CONFIG_XEN && HAVE_PROTO_DATA_VALID */
662 /* Types of checksums that we can receive (these all refer to L4 checksums):
663 * 1. CHECKSUM_NONE: Device that did not compute checksum, contains full
664 * (though not verified) checksum in packet but not in skb->csum. Packets
665 * from the bridge local port will also have this type.
666 * 2. CHECKSUM_COMPLETE (CHECKSUM_HW): Good device that computes checksums,
667 * also the GRE module. This is the same as CHECKSUM_NONE, except it has
668 * a valid skb->csum. Importantly, both contain a full checksum (not
669 * verified) in the packet itself. The only difference is that if the
670 * packet gets to L4 processing on this machine (not in DomU) we won't
671 * have to recompute the checksum to verify. Most hardware devices do not
672 * produce packets with this type, even if they support receive checksum
673 * offloading (they produce type #5).
674 * 3. CHECKSUM_PARTIAL (CHECKSUM_HW): Packet without full checksum and needs to
675 * be computed if it is sent off box. Unfortunately on earlier kernels,
676 * this case is impossible to distinguish from #2, despite having opposite
677 * meanings. Xen adds an extra field on earlier kernels (see #4) in order
678 * to distinguish the different states. The only real user of this type
679 * with bridging is Xen (on later kernels).
680 * 4. CHECKSUM_UNNECESSARY (with proto_csum_blank true): This packet was
681 * generated locally by a Xen DomU and has a partial checksum. If it is
682 * handled on this machine (Dom0 or DomU), then the checksum will not be
683 * computed. If it goes off box, the checksum in the packet needs to be
684 * completed. Calling skb_checksum_setup converts this to CHECKSUM_HW
685 * (CHECKSUM_PARTIAL) so that the checksum can be completed. In later
686 * kernels, this combination is replaced with CHECKSUM_PARTIAL.
687 * 5. CHECKSUM_UNNECESSARY (with proto_csum_blank false): Packet with a correct
688 * full checksum or using a protocol without a checksum. skb->csum is
689 * undefined. This is common from devices with receive checksum
690 * offloading. This is somewhat similar to CHECKSUM_NONE, except that
691 * nobody will try to verify the checksum with CHECKSUM_UNNECESSARY.
693 * Note that on earlier kernels, CHECKSUM_COMPLETE and CHECKSUM_PARTIAL are
694 * both defined as CHECKSUM_HW. Normally the meaning of CHECKSUM_HW is clear
695 * based on whether it is on the transmit or receive path. After the datapath
696 * it will be intepreted as CHECKSUM_PARTIAL. If the packet already has a
697 * checksum, we will panic. Since we can receive packets with checksums, we
698 * assume that all CHECKSUM_HW packets have checksums and map them to
699 * CHECKSUM_NONE, which has a similar meaning (the it is only different if the
700 * packet is processed by the local IP stack, in which case it will need to
701 * be reverified). If we receive a packet with CHECKSUM_HW that really means
702 * CHECKSUM_PARTIAL, it will be sent with the wrong checksum. However, there
703 * shouldn't be any devices that do this with bridging.
705 * The bridge has similar behavior and this function closely resembles
706 * skb_forward_csum(). It is slightly different because we are only concerned
707 * with bridging and not other types of forwarding and can get away with
708 * slightly more optimal behavior.*/
710 compute_ip_summed(struct sk_buff *skb, bool xmit)
712 /* For our convenience these defines change repeatedly between kernel
713 * versions, so we can't just copy them over... */
714 switch (skb->ip_summed) {
716 OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
718 case CHECKSUM_UNNECESSARY:
719 OVS_CB(skb)->ip_summed = OVS_CSUM_UNNECESSARY;
722 /* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE.
723 * However, we should only get CHECKSUM_PARTIAL packets from Xen, which
724 * uses some special fields to represent this (see below). Since we
725 * can only make one type work, pick the one that actually happens in
728 * The one exception to this is if we are on the transmit path
729 * (basically after skb_checksum_setup() has been run) the type has
730 * already been converted, so we should stay with that. */
733 OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
735 OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
739 case CHECKSUM_COMPLETE:
740 OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
742 case CHECKSUM_PARTIAL:
743 OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
747 printk(KERN_ERR "openvswitch: unknown checksum type %d\n",
749 /* None seems the safest... */
750 OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
753 #if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
754 /* Xen has a special way of representing CHECKSUM_PARTIAL on older
755 * kernels. It should not be set on the transmit path though. */
756 if (skb->proto_csum_blank)
757 OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
759 WARN_ON_ONCE(skb->proto_csum_blank && xmit);
764 forward_ip_summed(struct sk_buff *skb)
767 if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
768 skb->ip_summed = CHECKSUM_NONE;
772 /* Append each packet in 'skb' list to 'queue'. There will be only one packet
773 * unless we broke up a GSO packet. */
775 queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
776 int queue_no, u32 arg)
778 struct sk_buff *nskb;
782 port_no = ODPP_LOCAL;
784 if (skb->dev->br_port)
785 port_no = skb->dev->br_port->port_no;
786 else if (is_dp_dev(skb->dev))
787 port_no = dp_dev_priv(skb->dev)->port_no;
791 struct odp_msg *header;
796 /* If a checksum-deferred packet is forwarded to the
797 * controller, correct the pointers and checksum. This happens
798 * on a regular basis only on Xen, on which VMs can pass up
799 * packets that do not have their checksum computed.
801 err = vswitch_skb_checksum_setup(skb);
805 if (skb->ip_summed == CHECKSUM_PARTIAL) {
806 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
807 /* Until 2.6.22, the start of the transport header was
808 * also the start of data to be checksummed. Linux
809 * 2.6.22 introduced the csum_start field for this
810 * purpose, but we should point the transport header to
811 * it anyway for backward compatibility, as
812 * dev_queue_xmit() does even in 2.6.28. */
813 skb_set_transport_header(skb, skb->csum_start -
816 err = skb_checksum_help(skb);
821 if (skb->ip_summed == CHECKSUM_HW) {
822 err = skb_checksum_help(skb, 0);
828 err = skb_cow(skb, sizeof *header);
832 header = (struct odp_msg*)__skb_push(skb, sizeof *header);
833 header->type = queue_no;
834 header->length = skb->len;
835 header->port = port_no;
836 header->reserved = 0;
838 skb_queue_tail(queue, skb);
846 while ((skb = nskb) != NULL) {
854 dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
857 struct dp_stats_percpu *stats;
858 struct sk_buff_head *queue;
861 WARN_ON_ONCE(skb_shared(skb));
862 BUG_ON(queue_no != _ODPL_MISS_NR && queue_no != _ODPL_ACTION_NR && queue_no != _ODPL_SFLOW_NR);
863 queue = &dp->queues[queue_no];
865 if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
868 forward_ip_summed(skb);
870 /* Break apart GSO packets into their component pieces. Otherwise
871 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
872 if (skb_is_gso(skb)) {
873 struct sk_buff *nskb = skb_gso_segment(skb, 0);
877 if (unlikely(IS_ERR(skb))) {
882 /* XXX This case might not be possible. It's hard to
883 * tell from the skb_gso_segment() code and comment. */
887 err = queue_control_packets(skb, queue, queue_no, arg);
888 wake_up_interruptible(&dp->waitqueue);
894 stats = percpu_ptr(dp->stats_percpu, get_cpu());
901 static int flush_flows(struct datapath *dp)
904 return dp_table_flush(dp);
907 static int validate_actions(const struct sw_flow_actions *actions)
911 for (i = 0; i < actions->n_actions; i++) {
912 const union odp_action *a = &actions->actions[i];
915 if (a->output.port >= DP_MAX_PORTS)
919 case ODPAT_OUTPUT_GROUP:
920 if (a->output_group.group >= DP_MAX_GROUPS)
924 case ODPAT_SET_VLAN_VID:
925 if (a->vlan_vid.vlan_vid & htons(~VLAN_VID_MASK))
929 case ODPAT_SET_VLAN_PCP:
930 if (a->vlan_pcp.vlan_pcp
931 & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT))
935 case ODPAT_SET_NW_TOS:
936 if (a->nw_tos.nw_tos & INET_ECN_MASK)
941 if (a->type >= ODPAT_N_ACTIONS)
950 static struct sw_flow_actions *get_actions(const struct odp_flow *flow)
952 struct sw_flow_actions *actions;
955 actions = flow_actions_alloc(flow->n_actions);
956 error = PTR_ERR(actions);
961 if (copy_from_user(actions->actions, flow->actions,
962 flow->n_actions * sizeof(union odp_action)))
963 goto error_free_actions;
964 error = validate_actions(actions);
966 goto error_free_actions;
973 return ERR_PTR(error);
976 static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats)
978 if (flow->used.tv_sec) {
979 stats->used_sec = flow->used.tv_sec;
980 stats->used_nsec = flow->used.tv_nsec;
983 stats->used_nsec = 0;
985 stats->n_packets = flow->packet_count;
986 stats->n_bytes = flow->byte_count;
987 stats->ip_tos = flow->ip_tos;
988 stats->tcp_flags = flow->tcp_flags;
992 static void clear_stats(struct sw_flow *flow)
994 flow->used.tv_sec = flow->used.tv_nsec = 0;
997 flow->packet_count = 0;
998 flow->byte_count = 0;
1001 static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
1003 struct odp_flow_put uf;
1004 struct sw_flow *flow;
1005 struct dp_table *table;
1006 struct odp_flow_stats stats;
1010 if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put)))
1012 memset(uf.flow.key.reserved, 0, sizeof uf.flow.key.reserved);
1014 table = rcu_dereference(dp->table);
1015 flow = dp_table_lookup(table, &uf.flow.key);
1018 struct sw_flow_actions *acts;
1021 if (!(uf.flags & ODPPF_CREATE))
1024 /* Expand table, if necessary, to make room. */
1025 if (dp->n_flows >= table->n_buckets) {
1027 if (table->n_buckets >= DP_MAX_BUCKETS)
1030 error = dp_table_expand(dp);
1033 table = rcu_dereference(dp->table);
1036 /* Allocate flow. */
1038 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
1041 flow->key = uf.flow.key;
1042 spin_lock_init(&flow->lock);
1045 /* Obtain actions. */
1046 acts = get_actions(&uf.flow);
1047 error = PTR_ERR(acts);
1049 goto error_free_flow;
1050 rcu_assign_pointer(flow->sf_acts, acts);
1052 /* Put flow in bucket. */
1053 error = dp_table_insert(table, flow);
1055 goto error_free_flow_acts;
1057 memset(&stats, 0, sizeof(struct odp_flow_stats));
1059 /* We found a matching flow. */
1060 struct sw_flow_actions *old_acts, *new_acts;
1061 unsigned long int flags;
1063 /* Bail out if we're not allowed to modify an existing flow. */
1065 if (!(uf.flags & ODPPF_MODIFY))
1069 new_acts = get_actions(&uf.flow);
1070 error = PTR_ERR(new_acts);
1071 if (IS_ERR(new_acts))
1073 old_acts = rcu_dereference(flow->sf_acts);
1074 if (old_acts->n_actions != new_acts->n_actions ||
1075 memcmp(old_acts->actions, new_acts->actions,
1076 sizeof(union odp_action) * old_acts->n_actions)) {
1077 rcu_assign_pointer(flow->sf_acts, new_acts);
1078 flow_deferred_free_acts(old_acts);
1083 /* Fetch stats, then clear them if necessary. */
1084 spin_lock_irqsave(&flow->lock, flags);
1085 get_stats(flow, &stats);
1086 if (uf.flags & ODPPF_ZERO_STATS)
1088 spin_unlock_irqrestore(&flow->lock, flags);
1091 /* Copy stats to userspace. */
1092 if (__copy_to_user(&ufp->flow.stats, &stats,
1093 sizeof(struct odp_flow_stats)))
1097 error_free_flow_acts:
1098 kfree(flow->sf_acts);
1100 kmem_cache_free(flow_cache, flow);
1105 static int put_actions(const struct sw_flow *flow, struct odp_flow __user *ufp)
1107 union odp_action __user *actions;
1108 struct sw_flow_actions *sf_acts;
1111 if (__get_user(actions, &ufp->actions) ||
1112 __get_user(n_actions, &ufp->n_actions))
1118 sf_acts = rcu_dereference(flow->sf_acts);
1119 if (__put_user(sf_acts->n_actions, &ufp->n_actions) ||
1120 (actions && copy_to_user(actions, sf_acts->actions,
1121 sizeof(union odp_action) *
1122 min(sf_acts->n_actions, n_actions))))
1128 static int answer_query(struct sw_flow *flow, u32 query_flags,
1129 struct odp_flow __user *ufp)
1131 struct odp_flow_stats stats;
1132 unsigned long int flags;
1134 spin_lock_irqsave(&flow->lock, flags);
1135 get_stats(flow, &stats);
1137 if (query_flags & ODPFF_ZERO_TCP_FLAGS) {
1138 flow->tcp_flags = 0;
1140 spin_unlock_irqrestore(&flow->lock, flags);
1142 if (__copy_to_user(&ufp->stats, &stats, sizeof(struct odp_flow_stats)))
1144 return put_actions(flow, ufp);
1147 static int del_flow(struct datapath *dp, struct odp_flow __user *ufp)
1149 struct dp_table *table = rcu_dereference(dp->table);
1151 struct sw_flow *flow;
1155 if (copy_from_user(&uf, ufp, sizeof uf))
1157 memset(uf.key.reserved, 0, sizeof uf.key.reserved);
1159 flow = dp_table_lookup(table, &uf.key);
1164 /* XXX redundant lookup */
1165 error = dp_table_delete(table, flow);
1169 /* XXX These statistics might lose a few packets, since other CPUs can
1170 * be using this flow. We used to synchronize_rcu() to make sure that
1171 * we get completely accurate stats, but that blows our performance,
1174 error = answer_query(flow, 0, ufp);
1175 flow_deferred_free(flow);
1181 static int query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
1183 struct dp_table *table = rcu_dereference(dp->table);
1185 for (i = 0; i < flowvec->n_flows; i++) {
1186 struct __user odp_flow *ufp = &flowvec->flows[i];
1188 struct sw_flow *flow;
1191 if (__copy_from_user(&uf, ufp, sizeof uf))
1193 memset(uf.key.reserved, 0, sizeof uf.key.reserved);
1195 flow = dp_table_lookup(table, &uf.key);
1197 error = __put_user(ENOENT, &ufp->stats.error);
1199 error = answer_query(flow, uf.flags, ufp);
1203 return flowvec->n_flows;
1206 struct list_flows_cbdata {
1207 struct odp_flow __user *uflows;
1212 static int list_flow(struct sw_flow *flow, void *cbdata_)
1214 struct list_flows_cbdata *cbdata = cbdata_;
1215 struct odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
1218 if (__copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
1220 error = answer_query(flow, 0, ufp);
1224 if (cbdata->listed_flows >= cbdata->n_flows)
1225 return cbdata->listed_flows;
1229 static int list_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
1231 struct list_flows_cbdata cbdata;
1234 if (!flowvec->n_flows)
1237 cbdata.uflows = flowvec->flows;
1238 cbdata.n_flows = flowvec->n_flows;
1239 cbdata.listed_flows = 0;
1240 error = dp_table_foreach(rcu_dereference(dp->table),
1241 list_flow, &cbdata);
1242 return error ? error : cbdata.listed_flows;
1245 static int do_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1246 int (*function)(struct datapath *,
1247 const struct odp_flowvec *))
1249 struct odp_flowvec __user *uflowvec;
1250 struct odp_flowvec flowvec;
1253 uflowvec = (struct odp_flowvec __user *)argp;
1254 if (!access_ok(VERIFY_WRITE, uflowvec, sizeof *uflowvec) ||
1255 copy_from_user(&flowvec, uflowvec, sizeof flowvec))
1258 if (flowvec.n_flows > INT_MAX / sizeof(struct odp_flow))
1261 if (!access_ok(VERIFY_WRITE, flowvec.flows,
1262 flowvec.n_flows * sizeof(struct odp_flow)))
1265 retval = function(dp, &flowvec);
1266 return (retval < 0 ? retval
1267 : retval == flowvec.n_flows ? 0
1268 : __put_user(retval, &uflowvec->n_flows));
1271 static int do_execute(struct datapath *dp, const struct odp_execute *executep)
1273 struct odp_execute execute;
1274 struct odp_flow_key key;
1275 struct sk_buff *skb;
1276 struct sw_flow_actions *actions;
1281 if (copy_from_user(&execute, executep, sizeof execute))
1285 if (execute.length < ETH_HLEN || execute.length > 65535)
1289 actions = flow_actions_alloc(execute.n_actions);
1294 if (copy_from_user(actions->actions, execute.actions,
1295 execute.n_actions * sizeof *execute.actions))
1296 goto error_free_actions;
1298 err = validate_actions(actions);
1300 goto error_free_actions;
1303 skb = alloc_skb(execute.length, GFP_KERNEL);
1305 goto error_free_actions;
1307 if (execute.in_port < DP_MAX_PORTS) {
1308 struct net_bridge_port *p = dp->ports[execute.in_port];
1314 if (copy_from_user(skb_put(skb, execute.length), execute.data,
1316 goto error_free_skb;
1318 skb_reset_mac_header(skb);
1321 /* Normally, setting the skb 'protocol' field would be handled by a
1322 * call to eth_type_trans(), but it assumes there's a sending
1323 * device, which we may not have. */
1324 if (ntohs(eth->h_proto) >= 1536)
1325 skb->protocol = eth->h_proto;
1327 skb->protocol = htons(ETH_P_802_2);
1329 flow_extract(skb, execute.in_port, &key);
1330 err = execute_actions(dp, skb, &key, actions->actions,
1331 actions->n_actions, GFP_KERNEL);
1343 static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
1345 struct odp_stats stats;
1348 stats.n_flows = dp->n_flows;
1349 stats.cur_capacity = rcu_dereference(dp->table)->n_buckets;
1350 stats.max_capacity = DP_MAX_BUCKETS;
1351 stats.n_ports = dp->n_ports;
1352 stats.max_ports = DP_MAX_PORTS;
1353 stats.max_groups = DP_MAX_GROUPS;
1354 stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
1355 for_each_possible_cpu(i) {
1356 const struct dp_stats_percpu *s;
1357 s = percpu_ptr(dp->stats_percpu, i);
1358 stats.n_frags += s->n_frags;
1359 stats.n_hit += s->n_hit;
1360 stats.n_missed += s->n_missed;
1361 stats.n_lost += s->n_lost;
1363 stats.max_miss_queue = DP_MAX_QUEUE_LEN;
1364 stats.max_action_queue = DP_MAX_QUEUE_LEN;
1365 return copy_to_user(statsp, &stats, sizeof stats) ? -EFAULT : 0;
1368 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
1369 int dp_min_mtu(const struct datapath *dp)
1371 struct net_bridge_port *p;
1376 list_for_each_entry_rcu (p, &dp->port_list, node) {
1377 struct net_device *dev = p->dev;
1379 /* Skip any internal ports, since that's what we're trying to
1384 if (!mtu || dev->mtu < mtu)
1388 return mtu ? mtu : ETH_DATA_LEN;
1391 /* Sets the MTU of all datapath devices to the minimum of the ports. 'dev'
1392 * is the device whose MTU may have changed. Must be called with RTNL lock
1394 void set_dp_devs_mtu(const struct datapath *dp, struct net_device *dev)
1396 struct net_bridge_port *p;
1404 mtu = dp_min_mtu(dp);
1406 list_for_each_entry_rcu (p, &dp->port_list, node) {
1407 struct net_device *br_dev = p->dev;
1409 if (is_dp_dev(br_dev))
1410 dev_set_mtu(br_dev, mtu);
1415 put_port(const struct net_bridge_port *p, struct odp_port __user *uop)
1418 memset(&op, 0, sizeof op);
1419 strncpy(op.devname, p->dev->name, sizeof op.devname);
1420 op.port = p->port_no;
1421 op.flags = is_dp_dev(p->dev) ? ODP_PORT_INTERNAL : 0;
1422 return copy_to_user(uop, &op, sizeof op) ? -EFAULT : 0;
1426 query_port(struct datapath *dp, struct odp_port __user *uport)
1428 struct odp_port port;
1430 if (copy_from_user(&port, uport, sizeof port))
1432 if (port.devname[0]) {
1433 struct net_bridge_port *p;
1434 struct net_device *dev;
1437 port.devname[IFNAMSIZ - 1] = '\0';
1439 dev = dev_get_by_name(&init_net, port.devname);
1444 if (!p && is_dp_dev(dev)) {
1445 struct dp_dev *dp_dev = dp_dev_priv(dev);
1446 if (dp_dev->dp == dp)
1447 p = dp->ports[dp_dev->port_no];
1449 err = p && p->dp == dp ? put_port(p, uport) : -ENOENT;
1454 if (port.port >= DP_MAX_PORTS)
1456 if (!dp->ports[port.port])
1458 return put_port(dp->ports[port.port], uport);
1463 list_ports(struct datapath *dp, struct odp_portvec __user *pvp)
1465 struct odp_portvec pv;
1466 struct net_bridge_port *p;
1469 if (copy_from_user(&pv, pvp, sizeof pv))
1474 list_for_each_entry_rcu (p, &dp->port_list, node) {
1475 if (put_port(p, &pv.ports[idx]))
1477 if (idx++ >= pv.n_ports)
1481 return put_user(dp->n_ports, &pvp->n_ports);
1484 /* RCU callback for freeing a dp_port_group */
1485 static void free_port_group(struct rcu_head *rcu)
1487 struct dp_port_group *g = container_of(rcu, struct dp_port_group, rcu);
1492 set_port_group(struct datapath *dp, const struct odp_port_group __user *upg)
1494 struct odp_port_group pg;
1495 struct dp_port_group *new_group, *old_group;
1499 if (copy_from_user(&pg, upg, sizeof pg))
1503 if (pg.n_ports > DP_MAX_PORTS || pg.group >= DP_MAX_GROUPS)
1507 new_group = kmalloc(sizeof *new_group + sizeof(u16) * pg.n_ports,
1512 new_group->n_ports = pg.n_ports;
1514 if (copy_from_user(new_group->ports, pg.ports,
1515 sizeof(u16) * pg.n_ports))
1518 old_group = rcu_dereference(dp->groups[pg.group]);
1519 rcu_assign_pointer(dp->groups[pg.group], new_group);
1521 call_rcu(&old_group->rcu, free_port_group);
1531 get_port_group(struct datapath *dp, struct odp_port_group *upg)
1533 struct odp_port_group pg;
1534 struct dp_port_group *g;
1537 if (copy_from_user(&pg, upg, sizeof pg))
1540 if (pg.group >= DP_MAX_GROUPS)
1543 g = dp->groups[pg.group];
1544 n_copy = g ? min_t(int, g->n_ports, pg.n_ports) : 0;
1545 if (n_copy && copy_to_user(pg.ports, g->ports, n_copy * sizeof(u16)))
1548 if (put_user(g ? g->n_ports : 0, &upg->n_ports))
1554 static int get_listen_mask(const struct file *f)
1556 return (long)f->private_data;
1559 static void set_listen_mask(struct file *f, int listen_mask)
1561 f->private_data = (void*)(long)listen_mask;
1564 static long openvswitch_ioctl(struct file *f, unsigned int cmd,
1567 int dp_idx = iminor(f->f_dentry->d_inode);
1568 struct datapath *dp;
1569 int drop_frags, listeners, port_no;
1570 unsigned int sflow_probability;
1573 /* Handle commands with special locking requirements up front. */
1576 err = create_dp(dp_idx, (char __user *)argp);
1579 case ODP_DP_DESTROY:
1580 err = destroy_dp(dp_idx);
1584 err = add_port(dp_idx, (struct odp_port __user *)argp);
1588 err = get_user(port_no, (int __user *)argp);
1590 err = del_port(dp_idx, port_no);
1594 dp = get_dp_locked(dp_idx);
1601 err = get_dp_stats(dp, (struct odp_stats __user *)argp);
1604 case ODP_GET_DROP_FRAGS:
1605 err = put_user(dp->drop_frags, (int __user *)argp);
1608 case ODP_SET_DROP_FRAGS:
1609 err = get_user(drop_frags, (int __user *)argp);
1613 if (drop_frags != 0 && drop_frags != 1)
1615 dp->drop_frags = drop_frags;
1619 case ODP_GET_LISTEN_MASK:
1620 err = put_user(get_listen_mask(f), (int __user *)argp);
1623 case ODP_SET_LISTEN_MASK:
1624 err = get_user(listeners, (int __user *)argp);
1628 if (listeners & ~ODPL_ALL)
1631 set_listen_mask(f, listeners);
1634 case ODP_GET_SFLOW_PROBABILITY:
1635 err = put_user(dp->sflow_probability, (unsigned int __user *)argp);
1638 case ODP_SET_SFLOW_PROBABILITY:
1639 err = get_user(sflow_probability, (unsigned int __user *)argp);
1641 dp->sflow_probability = sflow_probability;
1644 case ODP_PORT_QUERY:
1645 err = query_port(dp, (struct odp_port __user *)argp);
1649 err = list_ports(dp, (struct odp_portvec __user *)argp);
1652 case ODP_PORT_GROUP_SET:
1653 err = set_port_group(dp, (struct odp_port_group __user *)argp);
1656 case ODP_PORT_GROUP_GET:
1657 err = get_port_group(dp, (struct odp_port_group __user *)argp);
1660 case ODP_FLOW_FLUSH:
1661 err = flush_flows(dp);
1665 err = put_flow(dp, (struct odp_flow_put __user *)argp);
1669 err = del_flow(dp, (struct odp_flow __user *)argp);
1673 err = do_flowvec_ioctl(dp, argp, query_flows);
1677 err = do_flowvec_ioctl(dp, argp, list_flows);
1681 err = do_execute(dp, (struct odp_execute __user *)argp);
1688 mutex_unlock(&dp->mutex);
1693 static int dp_has_packet_of_interest(struct datapath *dp, int listeners)
1696 for (i = 0; i < DP_N_QUEUES; i++) {
1697 if (listeners & (1 << i) && !skb_queue_empty(&dp->queues[i]))
1703 ssize_t openvswitch_read(struct file *f, char __user *buf, size_t nbytes,
1706 /* XXX is there sufficient synchronization here? */
1707 int listeners = get_listen_mask(f);
1708 int dp_idx = iminor(f->f_dentry->d_inode);
1709 struct datapath *dp = get_dp(dp_idx);
1710 struct sk_buff *skb;
1711 struct iovec __user iov;
1718 if (nbytes == 0 || !listeners)
1724 for (i = 0; i < DP_N_QUEUES; i++) {
1725 if (listeners & (1 << i)) {
1726 skb = skb_dequeue(&dp->queues[i]);
1732 if (f->f_flags & O_NONBLOCK) {
1737 wait_event_interruptible(dp->waitqueue,
1738 dp_has_packet_of_interest(dp,
1741 if (signal_pending(current)) {
1742 retval = -ERESTARTSYS;
1747 copy_bytes = min_t(size_t, skb->len, nbytes);
1749 iov.iov_len = copy_bytes;
1750 retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
1752 retval = copy_bytes;
1759 static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
1761 /* XXX is there sufficient synchronization here? */
1762 int dp_idx = iminor(file->f_dentry->d_inode);
1763 struct datapath *dp = get_dp(dp_idx);
1768 poll_wait(file, &dp->waitqueue, wait);
1769 if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
1770 mask |= POLLIN | POLLRDNORM;
1772 mask = POLLIN | POLLRDNORM | POLLHUP;
1777 struct file_operations openvswitch_fops = {
1778 /* XXX .aio_read = openvswitch_aio_read, */
1779 .read = openvswitch_read,
1780 .poll = openvswitch_poll,
1781 .unlocked_ioctl = openvswitch_ioctl,
1782 /* XXX .fasync = openvswitch_fasync, */
1787 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
1788 static struct llc_sap *dp_stp_sap;
1790 static int dp_stp_rcv(struct sk_buff *skb, struct net_device *dev,
1791 struct packet_type *pt, struct net_device *orig_dev)
1793 /* We don't really care about STP packets, we just listen for them for
1794 * mutual exclusion with the bridge module, so this just discards
1800 static int dp_avoid_bridge_init(void)
1802 /* Register to receive STP packets because the bridge module also
1803 * attempts to do so. Since there can only be a single listener for a
1804 * given protocol, this provides mutual exclusion against the bridge
1805 * module, preventing both of them from being loaded at the same
1807 dp_stp_sap = llc_sap_open(LLC_SAP_BSPAN, dp_stp_rcv);
1809 printk(KERN_ERR "openvswitch: can't register sap for STP (probably the bridge module is loaded)\n");
1815 static void dp_avoid_bridge_exit(void)
1817 llc_sap_put(dp_stp_sap);
1819 #else /* Linux 2.6.27 or later. */
1820 static int dp_avoid_bridge_init(void)
1822 /* Linux 2.6.27 introduces a way for multiple clients to register for
1823 * STP packets, which interferes with what we try to do above.
1824 * Instead, just check whether there's a bridge hook defined. This is
1825 * not as safe--the bridge module is willing to load over the top of
1826 * us--but it provides a little bit of protection. */
1827 if (br_handle_frame_hook) {
1828 printk(KERN_ERR "openvswitch: bridge module is loaded, cannot load over it\n");
1834 static void dp_avoid_bridge_exit(void)
1836 /* Nothing to do. */
1838 #endif /* Linux 2.6.27 or later */
1840 static int __init dp_init(void)
1844 printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
1846 err = dp_avoid_bridge_init();
1854 err = register_netdevice_notifier(&dp_device_notifier);
1856 goto error_flow_exit;
1858 major = register_chrdev(0, "openvswitch", &openvswitch_fops);
1860 goto error_unreg_notifier;
1862 /* Hook into callback used by the bridge to intercept packets.
1863 * Parasites we are. */
1864 br_handle_frame_hook = dp_frame_hook;
1868 error_unreg_notifier:
1869 unregister_netdevice_notifier(&dp_device_notifier);
1876 static void dp_cleanup(void)
1879 unregister_chrdev(major, "openvswitch");
1880 unregister_netdevice_notifier(&dp_device_notifier);
1882 br_handle_frame_hook = NULL;
1883 dp_avoid_bridge_exit();
1886 module_init(dp_init);
1887 module_exit(dp_cleanup);
1889 MODULE_DESCRIPTION("Open vSwitch switching datapath");
1890 MODULE_LICENSE("GPL");