2 * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #include <linux/init.h>
12 #include <linux/module.h>
14 #include <linux/if_arp.h>
15 #include <linux/if_vlan.h>
18 #include <linux/delay.h>
19 #include <linux/time.h>
20 #include <linux/etherdevice.h>
21 #include <linux/kernel.h>
22 #include <linux/kthread.h>
23 #include <linux/mutex.h>
24 #include <linux/percpu.h>
25 #include <linux/rcupdate.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/version.h>
29 #include <linux/ethtool.h>
30 #include <linux/random.h>
31 #include <linux/wait.h>
32 #include <asm/system.h>
33 #include <asm/div64.h>
35 #include <linux/netfilter_bridge.h>
36 #include <linux/netfilter_ipv4.h>
37 #include <linux/inetdevice.h>
38 #include <linux/list.h>
39 #include <linux/rculist.h>
40 #include <linux/workqueue.h>
41 #include <linux/dmi.h>
42 #include <net/inet_ecn.h>
44 #include "openvswitch/datapath-protocol.h"
48 #include "vport-internal_dev.h"
53 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
54 EXPORT_SYMBOL(dp_ioctl_hook);
56 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
59 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
62 * It is safe to access the datapath and dp_port structures with just
65 static struct datapath *dps[ODP_MAX];
66 static DEFINE_MUTEX(dp_mutex);
68 /* Number of milliseconds between runs of the maintenance thread. */
69 #define MAINT_SLEEP_MSECS 1000
71 static int new_dp_port(struct datapath *, struct odp_port *, int port_no);
73 /* Must be called with rcu_read_lock or dp_mutex. */
74 struct datapath *get_dp(int dp_idx)
76 if (dp_idx < 0 || dp_idx >= ODP_MAX)
78 return rcu_dereference(dps[dp_idx]);
80 EXPORT_SYMBOL_GPL(get_dp);
82 static struct datapath *get_dp_locked(int dp_idx)
86 mutex_lock(&dp_mutex);
89 mutex_lock(&dp->mutex);
90 mutex_unlock(&dp_mutex);
94 /* Must be called with rcu_read_lock or RTNL lock. */
95 const char *dp_name(const struct datapath *dp)
97 return vport_get_name(dp->ports[ODPP_LOCAL]->vport);
100 static inline size_t br_nlmsg_size(void)
102 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
103 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
104 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
105 + nla_total_size(4) /* IFLA_MASTER */
106 + nla_total_size(4) /* IFLA_MTU */
107 + nla_total_size(4) /* IFLA_LINK */
108 + nla_total_size(1); /* IFLA_OPERSTATE */
111 static int dp_fill_ifinfo(struct sk_buff *skb,
112 const struct dp_port *port,
113 int event, unsigned int flags)
115 const struct datapath *dp = port->dp;
116 int ifindex = vport_get_ifindex(port->vport);
117 int iflink = vport_get_iflink(port->vport);
118 struct ifinfomsg *hdr;
119 struct nlmsghdr *nlh;
127 nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
131 hdr = nlmsg_data(nlh);
132 hdr->ifi_family = AF_BRIDGE;
134 hdr->ifi_type = ARPHRD_ETHER;
135 hdr->ifi_index = ifindex;
136 hdr->ifi_flags = vport_get_flags(port->vport);
139 NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port->vport));
140 NLA_PUT_U32(skb, IFLA_MASTER, vport_get_ifindex(dp->ports[ODPP_LOCAL]->vport));
141 NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port->vport));
142 #ifdef IFLA_OPERSTATE
143 NLA_PUT_U8(skb, IFLA_OPERSTATE,
144 vport_is_running(port->vport)
145 ? vport_get_operstate(port->vport)
149 NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN,
150 vport_get_addr(port->vport));
152 if (ifindex != iflink)
153 NLA_PUT_U32(skb, IFLA_LINK,iflink);
155 return nlmsg_end(skb, nlh);
158 nlmsg_cancel(skb, nlh);
162 static void dp_ifinfo_notify(int event, struct dp_port *port)
167 skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
171 err = dp_fill_ifinfo(skb, port, event, 0);
173 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
174 WARN_ON(err == -EMSGSIZE);
178 rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
182 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
185 static void release_dp(struct kobject *kobj)
187 struct datapath *dp = container_of(kobj, struct datapath, ifobj);
191 static struct kobj_type dp_ktype = {
192 .release = release_dp
195 static int create_dp(int dp_idx, const char __user *devnamep)
197 struct odp_port internal_dev_port;
198 char devname[IFNAMSIZ];
205 if (strncpy_from_user(devname, devnamep, IFNAMSIZ - 1) < 0)
207 devname[IFNAMSIZ - 1] = '\0';
209 snprintf(devname, sizeof devname, "of%d", dp_idx);
213 mutex_lock(&dp_mutex);
215 if (!try_module_get(THIS_MODULE))
218 /* Exit early if a datapath with that number already exists.
219 * (We don't use -EEXIST because that's ambiguous with 'devname'
220 * conflicting with an existing network device name.) */
226 dp = kzalloc(sizeof *dp, GFP_KERNEL);
229 INIT_LIST_HEAD(&dp->port_list);
230 mutex_init(&dp->mutex);
232 for (i = 0; i < DP_N_QUEUES; i++)
233 skb_queue_head_init(&dp->queues[i]);
234 init_waitqueue_head(&dp->waitqueue);
236 /* Initialize kobject for bridge. This will be added as
237 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
238 dp->ifobj.kset = NULL;
239 kobject_init(&dp->ifobj, &dp_ktype);
241 /* Allocate table. */
243 rcu_assign_pointer(dp->table, dp_table_create(DP_L1_SIZE));
247 /* Set up our datapath device. */
248 strncpy(internal_dev_port.devname, devname, IFNAMSIZ - 1);
249 internal_dev_port.flags = ODP_PORT_INTERNAL;
250 err = new_dp_port(dp, &internal_dev_port, ODPP_LOCAL);
255 goto err_destroy_table;
259 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
260 if (!dp->stats_percpu)
261 goto err_destroy_local_port;
263 rcu_assign_pointer(dps[dp_idx], dp);
264 mutex_unlock(&dp_mutex);
271 err_destroy_local_port:
272 dp_detach_port(dp->ports[ODPP_LOCAL], 1);
274 dp_table_destroy(dp->table, 0);
278 module_put(THIS_MODULE);
280 mutex_unlock(&dp_mutex);
286 static void do_destroy_dp(struct datapath *dp)
288 struct dp_port *p, *n;
291 list_for_each_entry_safe (p, n, &dp->port_list, node)
292 if (p->port_no != ODPP_LOCAL)
293 dp_detach_port(p, 1);
297 rcu_assign_pointer(dps[dp->dp_idx], NULL);
299 dp_detach_port(dp->ports[ODPP_LOCAL], 1);
301 dp_table_destroy(dp->table, 1);
303 for (i = 0; i < DP_N_QUEUES; i++)
304 skb_queue_purge(&dp->queues[i]);
305 for (i = 0; i < DP_MAX_GROUPS; i++)
306 kfree(dp->groups[i]);
307 free_percpu(dp->stats_percpu);
308 kobject_put(&dp->ifobj);
309 module_put(THIS_MODULE);
312 static int destroy_dp(int dp_idx)
318 mutex_lock(&dp_mutex);
328 mutex_unlock(&dp_mutex);
333 static void release_dp_port(struct kobject *kobj)
335 struct dp_port *p = container_of(kobj, struct dp_port, kobj);
339 static struct kobj_type brport_ktype = {
341 .sysfs_ops = &brport_sysfs_ops,
343 .release = release_dp_port
346 /* Called with RTNL lock and dp_mutex. */
347 static int new_dp_port(struct datapath *dp, struct odp_port *odp_port, int port_no)
353 vport = vport_locate(odp_port->devname);
357 if (odp_port->flags & ODP_PORT_INTERNAL)
358 vport = __vport_add(odp_port->devname, "internal", NULL);
360 vport = __vport_add(odp_port->devname, "netdev", NULL);
365 return PTR_ERR(vport);
368 p = kzalloc(sizeof(*p), GFP_KERNEL);
372 p->port_no = port_no;
374 atomic_set(&p->sflow_pool, 0);
376 err = vport_attach(vport, p);
382 rcu_assign_pointer(dp->ports[port_no], p);
383 list_add_rcu(&p->node, &dp->port_list);
386 /* Initialize kobject for bridge. This will be added as
387 * /sys/class/net/<devname>/brport later, if sysfs is enabled. */
389 kobject_init(&p->kobj, &brport_ktype);
391 dp_ifinfo_notify(RTM_NEWLINK, p);
396 static int attach_port(int dp_idx, struct odp_port __user *portp)
399 struct odp_port port;
404 if (copy_from_user(&port, portp, sizeof port))
406 port.devname[IFNAMSIZ - 1] = '\0';
409 dp = get_dp_locked(dp_idx);
412 goto out_unlock_rtnl;
414 for (port_no = 1; port_no < DP_MAX_PORTS; port_no++)
415 if (!dp->ports[port_no])
421 err = new_dp_port(dp, &port, port_no);
425 if (!(port.flags & ODP_PORT_INTERNAL))
426 set_internal_devs_mtu(dp);
427 dp_sysfs_add_if(dp->ports[port_no]);
429 err = __put_user(port_no, &portp->port);
432 mutex_unlock(&dp->mutex);
439 int dp_detach_port(struct dp_port *p, int may_delete)
441 struct vport *vport = p->vport;
446 if (p->port_no != ODPP_LOCAL)
448 dp_ifinfo_notify(RTM_DELLINK, p);
450 /* First drop references to device. */
452 list_del_rcu(&p->node);
453 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
455 err = vport_detach(vport);
459 /* Then wait until no one is still using it, and destroy it. */
463 const char *port_type = vport_get_type(vport);
465 if (!strcmp(port_type, "netdev") || !strcmp(port_type, "internal")) {
472 kobject_put(&p->kobj);
477 static int detach_port(int dp_idx, int port_no)
484 if (port_no < 0 || port_no >= DP_MAX_PORTS || port_no == ODPP_LOCAL)
488 dp = get_dp_locked(dp_idx);
491 goto out_unlock_rtnl;
493 p = dp->ports[port_no];
498 err = dp_detach_port(p, 1);
501 mutex_unlock(&dp->mutex);
508 /* Must be called with rcu_read_lock and with bottom-halves disabled. */
509 void dp_process_received_packet(struct dp_port *p, struct sk_buff *skb)
511 struct datapath *dp = p->dp;
512 struct dp_stats_percpu *stats;
513 struct odp_flow_key key;
514 struct sw_flow *flow;
516 WARN_ON_ONCE(skb_shared(skb));
517 skb_warn_if_lro(skb);
519 OVS_CB(skb)->dp_port = p;
520 compute_ip_summed(skb, false);
522 /* BHs are off so we don't have to use get_cpu()/put_cpu() here. */
523 stats = percpu_ptr(dp->stats_percpu, smp_processor_id());
525 if (flow_extract(skb, p ? p->port_no : ODPP_NONE, &key)) {
526 if (dp->drop_frags) {
533 flow = dp_table_lookup(rcu_dereference(dp->table), &key);
535 struct sw_flow_actions *acts = rcu_dereference(flow->sf_acts);
536 flow_used(flow, skb);
537 execute_actions(dp, skb, &key, acts->actions, acts->n_actions,
542 dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
546 #if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
547 /* This code is based on a skb_checksum_setup from net/dev/core.c from a
548 * combination of Lenny's 2.6.26 Xen kernel and Xen's
549 * linux-2.6.18-92.1.10.el5.xs5.0.0.394.644. We can't call this function
550 * directly because it isn't exported in all versions. */
551 static int skb_pull_up_to(struct sk_buff *skb, void *ptr)
553 if (ptr < (void *)skb->tail)
555 if (__pskb_pull_tail(skb,
556 ptr - (void *)skb->data - skb_headlen(skb))) {
563 int vswitch_skb_checksum_setup(struct sk_buff *skb)
568 __u16 csum_start, csum_offset;
570 if (!skb->proto_csum_blank)
573 if (skb->protocol != htons(ETH_P_IP))
576 if (!skb_pull_up_to(skb, skb_network_header(skb) + sizeof(struct iphdr)))
580 th = skb_network_header(skb) + 4 * iph->ihl;
582 csum_start = th - skb->head;
583 switch (iph->protocol) {
585 csum_offset = offsetof(struct tcphdr, check);
588 csum_offset = offsetof(struct udphdr, check);
592 printk(KERN_ERR "Attempting to checksum a non-"
593 "TCP/UDP packet, dropping a protocol"
594 " %d packet", iph->protocol);
598 if (!skb_pull_up_to(skb, th + csum_offset + 2))
601 skb->ip_summed = CHECKSUM_PARTIAL;
602 skb->proto_csum_blank = 0;
604 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
605 skb->csum_start = csum_start;
606 skb->csum_offset = csum_offset;
608 skb_set_transport_header(skb, csum_start - skb_headroom(skb));
609 skb->csum = csum_offset;
617 #endif /* CONFIG_XEN && HAVE_PROTO_DATA_VALID */
619 /* Types of checksums that we can receive (these all refer to L4 checksums):
620 * 1. CHECKSUM_NONE: Device that did not compute checksum, contains full
621 * (though not verified) checksum in packet but not in skb->csum. Packets
622 * from the bridge local port will also have this type.
623 * 2. CHECKSUM_COMPLETE (CHECKSUM_HW): Good device that computes checksums,
624 * also the GRE module. This is the same as CHECKSUM_NONE, except it has
625 * a valid skb->csum. Importantly, both contain a full checksum (not
626 * verified) in the packet itself. The only difference is that if the
627 * packet gets to L4 processing on this machine (not in DomU) we won't
628 * have to recompute the checksum to verify. Most hardware devices do not
629 * produce packets with this type, even if they support receive checksum
630 * offloading (they produce type #5).
631 * 3. CHECKSUM_PARTIAL (CHECKSUM_HW): Packet without full checksum and needs to
632 * be computed if it is sent off box. Unfortunately on earlier kernels,
633 * this case is impossible to distinguish from #2, despite having opposite
634 * meanings. Xen adds an extra field on earlier kernels (see #4) in order
635 * to distinguish the different states. The only real user of this type
636 * with bridging is Xen (on later kernels).
637 * 4. CHECKSUM_UNNECESSARY (with proto_csum_blank true): This packet was
638 * generated locally by a Xen DomU and has a partial checksum. If it is
639 * handled on this machine (Dom0 or DomU), then the checksum will not be
640 * computed. If it goes off box, the checksum in the packet needs to be
641 * completed. Calling skb_checksum_setup converts this to CHECKSUM_HW
642 * (CHECKSUM_PARTIAL) so that the checksum can be completed. In later
643 * kernels, this combination is replaced with CHECKSUM_PARTIAL.
644 * 5. CHECKSUM_UNNECESSARY (with proto_csum_blank false): Packet with a correct
645 * full checksum or using a protocol without a checksum. skb->csum is
646 * undefined. This is common from devices with receive checksum
647 * offloading. This is somewhat similar to CHECKSUM_NONE, except that
648 * nobody will try to verify the checksum with CHECKSUM_UNNECESSARY.
650 * Note that on earlier kernels, CHECKSUM_COMPLETE and CHECKSUM_PARTIAL are
651 * both defined as CHECKSUM_HW. Normally the meaning of CHECKSUM_HW is clear
652 * based on whether it is on the transmit or receive path. After the datapath
653 * it will be intepreted as CHECKSUM_PARTIAL. If the packet already has a
654 * checksum, we will panic. Since we can receive packets with checksums, we
655 * assume that all CHECKSUM_HW packets have checksums and map them to
656 * CHECKSUM_NONE, which has a similar meaning (the it is only different if the
657 * packet is processed by the local IP stack, in which case it will need to
658 * be reverified). If we receive a packet with CHECKSUM_HW that really means
659 * CHECKSUM_PARTIAL, it will be sent with the wrong checksum. However, there
660 * shouldn't be any devices that do this with bridging.
662 * The bridge has similar behavior and this function closely resembles
663 * skb_forward_csum(). It is slightly different because we are only concerned
664 * with bridging and not other types of forwarding and can get away with
665 * slightly more optimal behavior.*/
667 compute_ip_summed(struct sk_buff *skb, bool xmit)
669 /* For our convenience these defines change repeatedly between kernel
670 * versions, so we can't just copy them over... */
671 switch (skb->ip_summed) {
673 OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
675 case CHECKSUM_UNNECESSARY:
676 OVS_CB(skb)->ip_summed = OVS_CSUM_UNNECESSARY;
679 /* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE.
680 * However, we should only get CHECKSUM_PARTIAL packets from Xen, which
681 * uses some special fields to represent this (see below). Since we
682 * can only make one type work, pick the one that actually happens in
685 * The one exception to this is if we are on the transmit path
686 * (basically after skb_checksum_setup() has been run) the type has
687 * already been converted, so we should stay with that. */
690 OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
692 OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
696 case CHECKSUM_COMPLETE:
697 OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
699 case CHECKSUM_PARTIAL:
700 OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
704 printk(KERN_ERR "openvswitch: unknown checksum type %d\n",
706 /* None seems the safest... */
707 OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
710 #if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
711 /* Xen has a special way of representing CHECKSUM_PARTIAL on older
712 * kernels. It should not be set on the transmit path though. */
713 if (skb->proto_csum_blank)
714 OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
716 WARN_ON_ONCE(skb->proto_csum_blank && xmit);
721 forward_ip_summed(struct sk_buff *skb)
724 if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
725 skb->ip_summed = CHECKSUM_NONE;
729 /* Append each packet in 'skb' list to 'queue'. There will be only one packet
730 * unless we broke up a GSO packet. */
732 queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
733 int queue_no, u32 arg)
735 struct sk_buff *nskb;
739 if (OVS_CB(skb)->dp_port)
740 port_no = OVS_CB(skb)->dp_port->port_no;
742 port_no = ODPP_LOCAL;
745 struct odp_msg *header;
750 /* If a checksum-deferred packet is forwarded to the
751 * controller, correct the pointers and checksum. This happens
752 * on a regular basis only on Xen, on which VMs can pass up
753 * packets that do not have their checksum computed.
755 err = vswitch_skb_checksum_setup(skb);
759 if (skb->ip_summed == CHECKSUM_PARTIAL) {
760 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
761 /* Until 2.6.22, the start of the transport header was
762 * also the start of data to be checksummed. Linux
763 * 2.6.22 introduced the csum_start field for this
764 * purpose, but we should point the transport header to
765 * it anyway for backward compatibility, as
766 * dev_queue_xmit() does even in 2.6.28. */
767 skb_set_transport_header(skb, skb->csum_start -
770 err = skb_checksum_help(skb);
775 if (skb->ip_summed == CHECKSUM_HW) {
776 err = skb_checksum_help(skb, 0);
782 err = skb_cow(skb, sizeof *header);
786 header = (struct odp_msg*)__skb_push(skb, sizeof *header);
787 header->type = queue_no;
788 header->length = skb->len;
789 header->port = port_no;
790 header->reserved = 0;
792 skb_queue_tail(queue, skb);
800 while ((skb = nskb) != NULL) {
808 dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
811 struct dp_stats_percpu *stats;
812 struct sk_buff_head *queue;
815 WARN_ON_ONCE(skb_shared(skb));
816 BUG_ON(queue_no != _ODPL_MISS_NR && queue_no != _ODPL_ACTION_NR && queue_no != _ODPL_SFLOW_NR);
817 queue = &dp->queues[queue_no];
819 if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
822 forward_ip_summed(skb);
824 /* Break apart GSO packets into their component pieces. Otherwise
825 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
826 if (skb_is_gso(skb)) {
827 struct sk_buff *nskb = skb_gso_segment(skb, 0);
831 if (unlikely(IS_ERR(skb))) {
836 /* XXX This case might not be possible. It's hard to
837 * tell from the skb_gso_segment() code and comment. */
841 err = queue_control_packets(skb, queue, queue_no, arg);
842 wake_up_interruptible(&dp->waitqueue);
848 stats = percpu_ptr(dp->stats_percpu, get_cpu());
855 static int flush_flows(struct datapath *dp)
858 return dp_table_flush(dp);
861 static int validate_actions(const struct sw_flow_actions *actions)
865 for (i = 0; i < actions->n_actions; i++) {
866 const union odp_action *a = &actions->actions[i];
869 if (a->output.port >= DP_MAX_PORTS)
873 case ODPAT_OUTPUT_GROUP:
874 if (a->output_group.group >= DP_MAX_GROUPS)
878 case ODPAT_SET_VLAN_VID:
879 if (a->vlan_vid.vlan_vid & htons(~VLAN_VID_MASK))
883 case ODPAT_SET_VLAN_PCP:
884 if (a->vlan_pcp.vlan_pcp
885 & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT))
889 case ODPAT_SET_NW_TOS:
890 if (a->nw_tos.nw_tos & INET_ECN_MASK)
895 if (a->type >= ODPAT_N_ACTIONS)
904 static struct sw_flow_actions *get_actions(const struct odp_flow *flow)
906 struct sw_flow_actions *actions;
909 actions = flow_actions_alloc(flow->n_actions);
910 error = PTR_ERR(actions);
915 if (copy_from_user(actions->actions, flow->actions,
916 flow->n_actions * sizeof(union odp_action)))
917 goto error_free_actions;
918 error = validate_actions(actions);
920 goto error_free_actions;
927 return ERR_PTR(error);
930 static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats)
932 if (flow->used.tv_sec) {
933 stats->used_sec = flow->used.tv_sec;
934 stats->used_nsec = flow->used.tv_nsec;
937 stats->used_nsec = 0;
939 stats->n_packets = flow->packet_count;
940 stats->n_bytes = flow->byte_count;
941 stats->ip_tos = flow->ip_tos;
942 stats->tcp_flags = flow->tcp_flags;
946 static void clear_stats(struct sw_flow *flow)
948 flow->used.tv_sec = flow->used.tv_nsec = 0;
951 flow->packet_count = 0;
952 flow->byte_count = 0;
955 static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
957 struct odp_flow_put uf;
958 struct sw_flow *flow;
959 struct dp_table *table;
960 struct odp_flow_stats stats;
964 if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put)))
966 memset(uf.flow.key.reserved, 0, sizeof uf.flow.key.reserved);
968 table = rcu_dereference(dp->table);
969 flow = dp_table_lookup(table, &uf.flow.key);
972 struct sw_flow_actions *acts;
975 if (!(uf.flags & ODPPF_CREATE))
978 /* Expand table, if necessary, to make room. */
979 if (dp->n_flows >= table->n_buckets) {
981 if (table->n_buckets >= DP_MAX_BUCKETS)
984 error = dp_table_expand(dp);
987 table = rcu_dereference(dp->table);
992 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
995 flow->key = uf.flow.key;
996 spin_lock_init(&flow->lock);
999 /* Obtain actions. */
1000 acts = get_actions(&uf.flow);
1001 error = PTR_ERR(acts);
1003 goto error_free_flow;
1004 rcu_assign_pointer(flow->sf_acts, acts);
1006 /* Put flow in bucket. */
1007 error = dp_table_insert(table, flow);
1009 goto error_free_flow_acts;
1011 memset(&stats, 0, sizeof(struct odp_flow_stats));
1013 /* We found a matching flow. */
1014 struct sw_flow_actions *old_acts, *new_acts;
1015 unsigned long int flags;
1017 /* Bail out if we're not allowed to modify an existing flow. */
1019 if (!(uf.flags & ODPPF_MODIFY))
1023 new_acts = get_actions(&uf.flow);
1024 error = PTR_ERR(new_acts);
1025 if (IS_ERR(new_acts))
1027 old_acts = rcu_dereference(flow->sf_acts);
1028 if (old_acts->n_actions != new_acts->n_actions ||
1029 memcmp(old_acts->actions, new_acts->actions,
1030 sizeof(union odp_action) * old_acts->n_actions)) {
1031 rcu_assign_pointer(flow->sf_acts, new_acts);
1032 flow_deferred_free_acts(old_acts);
1037 /* Fetch stats, then clear them if necessary. */
1038 spin_lock_irqsave(&flow->lock, flags);
1039 get_stats(flow, &stats);
1040 if (uf.flags & ODPPF_ZERO_STATS)
1042 spin_unlock_irqrestore(&flow->lock, flags);
1045 /* Copy stats to userspace. */
1046 if (__copy_to_user(&ufp->flow.stats, &stats,
1047 sizeof(struct odp_flow_stats)))
1051 error_free_flow_acts:
1052 kfree(flow->sf_acts);
1054 kmem_cache_free(flow_cache, flow);
1059 static int put_actions(const struct sw_flow *flow, struct odp_flow __user *ufp)
1061 union odp_action __user *actions;
1062 struct sw_flow_actions *sf_acts;
1065 if (__get_user(actions, &ufp->actions) ||
1066 __get_user(n_actions, &ufp->n_actions))
1072 sf_acts = rcu_dereference(flow->sf_acts);
1073 if (__put_user(sf_acts->n_actions, &ufp->n_actions) ||
1074 (actions && copy_to_user(actions, sf_acts->actions,
1075 sizeof(union odp_action) *
1076 min(sf_acts->n_actions, n_actions))))
1082 static int answer_query(struct sw_flow *flow, u32 query_flags,
1083 struct odp_flow __user *ufp)
1085 struct odp_flow_stats stats;
1086 unsigned long int flags;
1088 spin_lock_irqsave(&flow->lock, flags);
1089 get_stats(flow, &stats);
1091 if (query_flags & ODPFF_ZERO_TCP_FLAGS) {
1092 flow->tcp_flags = 0;
1094 spin_unlock_irqrestore(&flow->lock, flags);
1096 if (__copy_to_user(&ufp->stats, &stats, sizeof(struct odp_flow_stats)))
1098 return put_actions(flow, ufp);
1101 static int del_flow(struct datapath *dp, struct odp_flow __user *ufp)
1103 struct dp_table *table = rcu_dereference(dp->table);
1105 struct sw_flow *flow;
1109 if (copy_from_user(&uf, ufp, sizeof uf))
1111 memset(uf.key.reserved, 0, sizeof uf.key.reserved);
1113 flow = dp_table_lookup(table, &uf.key);
1118 /* XXX redundant lookup */
1119 error = dp_table_delete(table, flow);
1123 /* XXX These statistics might lose a few packets, since other CPUs can
1124 * be using this flow. We used to synchronize_rcu() to make sure that
1125 * we get completely accurate stats, but that blows our performance,
1128 error = answer_query(flow, 0, ufp);
1129 flow_deferred_free(flow);
1135 static int query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
1137 struct dp_table *table = rcu_dereference(dp->table);
1139 for (i = 0; i < flowvec->n_flows; i++) {
1140 struct __user odp_flow *ufp = &flowvec->flows[i];
1142 struct sw_flow *flow;
1145 if (__copy_from_user(&uf, ufp, sizeof uf))
1147 memset(uf.key.reserved, 0, sizeof uf.key.reserved);
1149 flow = dp_table_lookup(table, &uf.key);
1151 error = __put_user(ENOENT, &ufp->stats.error);
1153 error = answer_query(flow, uf.flags, ufp);
1157 return flowvec->n_flows;
1160 struct list_flows_cbdata {
1161 struct odp_flow __user *uflows;
1166 static int list_flow(struct sw_flow *flow, void *cbdata_)
1168 struct list_flows_cbdata *cbdata = cbdata_;
1169 struct odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
1172 if (__copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
1174 error = answer_query(flow, 0, ufp);
1178 if (cbdata->listed_flows >= cbdata->n_flows)
1179 return cbdata->listed_flows;
1183 static int list_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
1185 struct list_flows_cbdata cbdata;
1188 if (!flowvec->n_flows)
1191 cbdata.uflows = flowvec->flows;
1192 cbdata.n_flows = flowvec->n_flows;
1193 cbdata.listed_flows = 0;
1194 error = dp_table_foreach(rcu_dereference(dp->table),
1195 list_flow, &cbdata);
1196 return error ? error : cbdata.listed_flows;
1199 static int do_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1200 int (*function)(struct datapath *,
1201 const struct odp_flowvec *))
1203 struct odp_flowvec __user *uflowvec;
1204 struct odp_flowvec flowvec;
1207 uflowvec = (struct odp_flowvec __user *)argp;
1208 if (!access_ok(VERIFY_WRITE, uflowvec, sizeof *uflowvec) ||
1209 copy_from_user(&flowvec, uflowvec, sizeof flowvec))
1212 if (flowvec.n_flows > INT_MAX / sizeof(struct odp_flow))
1215 if (!access_ok(VERIFY_WRITE, flowvec.flows,
1216 flowvec.n_flows * sizeof(struct odp_flow)))
1219 retval = function(dp, &flowvec);
1220 return (retval < 0 ? retval
1221 : retval == flowvec.n_flows ? 0
1222 : __put_user(retval, &uflowvec->n_flows));
1225 static int do_execute(struct datapath *dp, const struct odp_execute *executep)
1227 struct odp_execute execute;
1228 struct odp_flow_key key;
1229 struct sk_buff *skb;
1230 struct sw_flow_actions *actions;
1235 if (copy_from_user(&execute, executep, sizeof execute))
1239 if (execute.length < ETH_HLEN || execute.length > 65535)
1243 actions = flow_actions_alloc(execute.n_actions);
1248 if (copy_from_user(actions->actions, execute.actions,
1249 execute.n_actions * sizeof *execute.actions))
1250 goto error_free_actions;
1252 err = validate_actions(actions);
1254 goto error_free_actions;
1257 skb = alloc_skb(execute.length, GFP_KERNEL);
1259 goto error_free_actions;
1261 if (execute.in_port < DP_MAX_PORTS)
1262 OVS_CB(skb)->dp_port = dp->ports[execute.in_port];
1264 OVS_CB(skb)->dp_port = NULL;
1267 if (copy_from_user(skb_put(skb, execute.length), execute.data,
1269 goto error_free_skb;
1271 skb_reset_mac_header(skb);
1274 /* Normally, setting the skb 'protocol' field would be handled by a
1275 * call to eth_type_trans(), but it assumes there's a sending
1276 * device, which we may not have. */
1277 if (ntohs(eth->h_proto) >= 1536)
1278 skb->protocol = eth->h_proto;
1280 skb->protocol = htons(ETH_P_802_2);
1282 flow_extract(skb, execute.in_port, &key);
1283 err = execute_actions(dp, skb, &key, actions->actions,
1284 actions->n_actions, GFP_KERNEL);
1296 static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
1298 struct odp_stats stats;
1301 stats.n_flows = dp->n_flows;
1302 stats.cur_capacity = rcu_dereference(dp->table)->n_buckets;
1303 stats.max_capacity = DP_MAX_BUCKETS;
1304 stats.n_ports = dp->n_ports;
1305 stats.max_ports = DP_MAX_PORTS;
1306 stats.max_groups = DP_MAX_GROUPS;
1307 stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
1308 for_each_possible_cpu(i) {
1309 const struct dp_stats_percpu *s;
1310 s = percpu_ptr(dp->stats_percpu, i);
1311 stats.n_frags += s->n_frags;
1312 stats.n_hit += s->n_hit;
1313 stats.n_missed += s->n_missed;
1314 stats.n_lost += s->n_lost;
1316 stats.max_miss_queue = DP_MAX_QUEUE_LEN;
1317 stats.max_action_queue = DP_MAX_QUEUE_LEN;
1318 return copy_to_user(statsp, &stats, sizeof stats) ? -EFAULT : 0;
1321 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
1322 int dp_min_mtu(const struct datapath *dp)
1329 list_for_each_entry_rcu (p, &dp->port_list, node) {
1332 /* Skip any internal ports, since that's what we're trying to
1334 if (is_internal_vport(p->vport))
1337 dev_mtu = vport_get_mtu(p->vport);
1338 if (!mtu || dev_mtu < mtu)
1342 return mtu ? mtu : ETH_DATA_LEN;
1345 /* Sets the MTU of all datapath devices to the minimum of the ports. Must
1346 * be called with RTNL lock and dp_mutex. */
1347 void set_internal_devs_mtu(const struct datapath *dp)
1354 mtu = dp_min_mtu(dp);
1356 list_for_each_entry_rcu (p, &dp->port_list, node) {
1357 if (is_internal_vport(p->vport))
1358 vport_set_mtu(p->vport, mtu);
1363 put_port(const struct dp_port *p, struct odp_port __user *uop)
1367 memset(&op, 0, sizeof op);
1370 strncpy(op.devname, vport_get_name(p->vport), sizeof op.devname);
1373 op.port = p->port_no;
1374 op.flags = is_internal_vport(p->vport) ? ODP_PORT_INTERNAL : 0;
1376 return copy_to_user(uop, &op, sizeof op) ? -EFAULT : 0;
1380 query_port(struct datapath *dp, struct odp_port __user *uport)
1382 struct odp_port port;
1384 if (copy_from_user(&port, uport, sizeof port))
1387 if (port.devname[0]) {
1388 struct vport *vport;
1389 struct dp_port *dp_port;
1392 port.devname[IFNAMSIZ - 1] = '\0';
1397 vport = vport_locate(port.devname);
1403 dp_port = vport_get_dp_port(vport);
1404 if (!dp_port || dp_port->dp != dp) {
1409 port.port = dp_port->port_no;
1418 if (port.port >= DP_MAX_PORTS)
1420 if (!dp->ports[port.port])
1424 return put_port(dp->ports[port.port], uport);
1428 list_ports(struct datapath *dp, struct odp_portvec __user *pvp)
1430 struct odp_portvec pv;
1434 if (copy_from_user(&pv, pvp, sizeof pv))
1439 list_for_each_entry_rcu (p, &dp->port_list, node) {
1440 if (put_port(p, &pv.ports[idx]))
1442 if (idx++ >= pv.n_ports)
1446 return put_user(dp->n_ports, &pvp->n_ports);
1449 /* RCU callback for freeing a dp_port_group */
1450 static void free_port_group(struct rcu_head *rcu)
1452 struct dp_port_group *g = container_of(rcu, struct dp_port_group, rcu);
1457 set_port_group(struct datapath *dp, const struct odp_port_group __user *upg)
1459 struct odp_port_group pg;
1460 struct dp_port_group *new_group, *old_group;
1464 if (copy_from_user(&pg, upg, sizeof pg))
1468 if (pg.n_ports > DP_MAX_PORTS || pg.group >= DP_MAX_GROUPS)
1472 new_group = kmalloc(sizeof *new_group + sizeof(u16) * pg.n_ports,
1477 new_group->n_ports = pg.n_ports;
1479 if (copy_from_user(new_group->ports, pg.ports,
1480 sizeof(u16) * pg.n_ports))
1483 old_group = rcu_dereference(dp->groups[pg.group]);
1484 rcu_assign_pointer(dp->groups[pg.group], new_group);
1486 call_rcu(&old_group->rcu, free_port_group);
1496 get_port_group(struct datapath *dp, struct odp_port_group *upg)
1498 struct odp_port_group pg;
1499 struct dp_port_group *g;
1502 if (copy_from_user(&pg, upg, sizeof pg))
1505 if (pg.group >= DP_MAX_GROUPS)
1508 g = dp->groups[pg.group];
1509 n_copy = g ? min_t(int, g->n_ports, pg.n_ports) : 0;
1510 if (n_copy && copy_to_user(pg.ports, g->ports, n_copy * sizeof(u16)))
1513 if (put_user(g ? g->n_ports : 0, &upg->n_ports))
1519 static int get_listen_mask(const struct file *f)
1521 return (long)f->private_data;
1524 static void set_listen_mask(struct file *f, int listen_mask)
1526 f->private_data = (void*)(long)listen_mask;
1529 static long openvswitch_ioctl(struct file *f, unsigned int cmd,
1532 int dp_idx = iminor(f->f_dentry->d_inode);
1533 struct datapath *dp;
1534 int drop_frags, listeners, port_no;
1535 unsigned int sflow_probability;
1538 /* Handle commands with special locking requirements up front. */
1541 err = create_dp(dp_idx, (char __user *)argp);
1544 case ODP_DP_DESTROY:
1545 err = destroy_dp(dp_idx);
1548 case ODP_PORT_ATTACH:
1549 err = attach_port(dp_idx, (struct odp_port __user *)argp);
1552 case ODP_PORT_DETACH:
1553 err = get_user(port_no, (int __user *)argp);
1555 err = detach_port(dp_idx, port_no);
1559 err = vport_add((struct odp_vport_add __user *)argp);
1563 err = vport_mod((struct odp_vport_mod __user *)argp);
1567 err = vport_del((char __user *)argp);
1570 case ODP_VPORT_STATS_GET:
1571 err = vport_stats_get((struct odp_vport_stats_req __user *)argp);
1574 case ODP_VPORT_ETHER_GET:
1575 err = vport_ether_get((struct odp_vport_ether __user *)argp);
1578 case ODP_VPORT_ETHER_SET:
1579 err = vport_ether_set((struct odp_vport_ether __user *)argp);
1582 case ODP_VPORT_MTU_GET:
1583 err = vport_mtu_get((struct odp_vport_mtu __user *)argp);
1586 case ODP_VPORT_MTU_SET:
1587 err = vport_mtu_set((struct odp_vport_mtu __user *)argp);
1591 dp = get_dp_locked(dp_idx);
1598 err = get_dp_stats(dp, (struct odp_stats __user *)argp);
1601 case ODP_GET_DROP_FRAGS:
1602 err = put_user(dp->drop_frags, (int __user *)argp);
1605 case ODP_SET_DROP_FRAGS:
1606 err = get_user(drop_frags, (int __user *)argp);
1610 if (drop_frags != 0 && drop_frags != 1)
1612 dp->drop_frags = drop_frags;
1616 case ODP_GET_LISTEN_MASK:
1617 err = put_user(get_listen_mask(f), (int __user *)argp);
1620 case ODP_SET_LISTEN_MASK:
1621 err = get_user(listeners, (int __user *)argp);
1625 if (listeners & ~ODPL_ALL)
1628 set_listen_mask(f, listeners);
1631 case ODP_GET_SFLOW_PROBABILITY:
1632 err = put_user(dp->sflow_probability, (unsigned int __user *)argp);
1635 case ODP_SET_SFLOW_PROBABILITY:
1636 err = get_user(sflow_probability, (unsigned int __user *)argp);
1638 dp->sflow_probability = sflow_probability;
1641 case ODP_PORT_QUERY:
1642 err = query_port(dp, (struct odp_port __user *)argp);
1646 err = list_ports(dp, (struct odp_portvec __user *)argp);
1649 case ODP_PORT_GROUP_SET:
1650 err = set_port_group(dp, (struct odp_port_group __user *)argp);
1653 case ODP_PORT_GROUP_GET:
1654 err = get_port_group(dp, (struct odp_port_group __user *)argp);
1657 case ODP_FLOW_FLUSH:
1658 err = flush_flows(dp);
1662 err = put_flow(dp, (struct odp_flow_put __user *)argp);
1666 err = del_flow(dp, (struct odp_flow __user *)argp);
1670 err = do_flowvec_ioctl(dp, argp, query_flows);
1674 err = do_flowvec_ioctl(dp, argp, list_flows);
1678 err = do_execute(dp, (struct odp_execute __user *)argp);
1685 mutex_unlock(&dp->mutex);
1690 static int dp_has_packet_of_interest(struct datapath *dp, int listeners)
1693 for (i = 0; i < DP_N_QUEUES; i++) {
1694 if (listeners & (1 << i) && !skb_queue_empty(&dp->queues[i]))
1700 ssize_t openvswitch_read(struct file *f, char __user *buf, size_t nbytes,
1703 /* XXX is there sufficient synchronization here? */
1704 int listeners = get_listen_mask(f);
1705 int dp_idx = iminor(f->f_dentry->d_inode);
1706 struct datapath *dp = get_dp(dp_idx);
1707 struct sk_buff *skb;
1708 struct iovec __user iov;
1715 if (nbytes == 0 || !listeners)
1721 for (i = 0; i < DP_N_QUEUES; i++) {
1722 if (listeners & (1 << i)) {
1723 skb = skb_dequeue(&dp->queues[i]);
1729 if (f->f_flags & O_NONBLOCK) {
1734 wait_event_interruptible(dp->waitqueue,
1735 dp_has_packet_of_interest(dp,
1738 if (signal_pending(current)) {
1739 retval = -ERESTARTSYS;
1744 copy_bytes = min_t(size_t, skb->len, nbytes);
1746 iov.iov_len = copy_bytes;
1747 retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
1749 retval = copy_bytes;
1756 static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
1758 /* XXX is there sufficient synchronization here? */
1759 int dp_idx = iminor(file->f_dentry->d_inode);
1760 struct datapath *dp = get_dp(dp_idx);
1765 poll_wait(file, &dp->waitqueue, wait);
1766 if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
1767 mask |= POLLIN | POLLRDNORM;
1769 mask = POLLIN | POLLRDNORM | POLLHUP;
1774 struct file_operations openvswitch_fops = {
1775 /* XXX .aio_read = openvswitch_aio_read, */
1776 .read = openvswitch_read,
1777 .poll = openvswitch_poll,
1778 .unlocked_ioctl = openvswitch_ioctl,
1779 /* XXX .fasync = openvswitch_fasync, */
1784 static int __init dp_init(void)
1786 struct sk_buff *dummy_skb;
1789 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
1791 printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
1799 goto error_flow_exit;
1801 err = register_netdevice_notifier(&dp_device_notifier);
1803 goto error_vport_exit;
1805 major = register_chrdev(0, "openvswitch", &openvswitch_fops);
1807 goto error_unreg_notifier;
1811 error_unreg_notifier:
1812 unregister_netdevice_notifier(&dp_device_notifier);
1821 static void dp_cleanup(void)
1824 unregister_chrdev(major, "openvswitch");
1825 unregister_netdevice_notifier(&dp_device_notifier);
1830 module_init(dp_init);
1831 module_exit(dp_cleanup);
1833 MODULE_DESCRIPTION("Open vSwitch switching datapath");
1834 MODULE_LICENSE("GPL");