X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=datapath%2Fdatapath.c;h=79a1f63251f7ece61613e81f6738603af7e7db86;hb=54adbf40ff717f05e081967102083889143447a7;hp=926f278f952509abb201ab8cdbce8da31f75f2a1;hpb=b2f460c72dc738302adf44b988e9dc4b44c4c621;p=sliver-openvswitch.git diff --git a/datapath/datapath.c b/datapath/datapath.c index 926f278f9..79a1f6325 100644 --- a/datapath/datapath.c +++ b/datapath/datapath.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2008, 2009 Nicira Networks. + * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks. * Distributed under the terms of the GNU GPL version 2. * * Significant portions of this file may be copied from parts of the Linux @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -21,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -41,13 +39,14 @@ #include #include #include -#include +#include #include "openvswitch/datapath-protocol.h" #include "datapath.h" #include "actions.h" -#include "dp_dev.h" #include "flow.h" +#include "table.h" +#include "vport-internal_dev.h" #include "compat.h" @@ -55,27 +54,13 @@ int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd); EXPORT_SYMBOL(dp_ioctl_hook); -int (*dp_add_dp_hook)(struct datapath *dp); -EXPORT_SYMBOL(dp_add_dp_hook); - -int (*dp_del_dp_hook)(struct datapath *dp); -EXPORT_SYMBOL(dp_del_dp_hook); - -int (*dp_add_if_hook)(struct net_bridge_port *p); -EXPORT_SYMBOL(dp_add_if_hook); - -int (*dp_del_if_hook)(struct net_bridge_port *p); -EXPORT_SYMBOL(dp_del_if_hook); - /* Datapaths. Protected on the read side by rcu_read_lock, on the write side - * by dp_mutex. dp_mutex is almost completely redundant with genl_mutex - * maintained by the Generic Netlink code, but the timeout path needs mutual - * exclusion too. + * by dp_mutex. * * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL * lock first. * - * It is safe to access the datapath and net_bridge_port structures with just + * It is safe to access the datapath and dp_port structures with just * dp_mutex. */ static struct datapath *dps[ODP_MAX]; @@ -84,7 +69,7 @@ static DEFINE_MUTEX(dp_mutex); /* Number of milliseconds between runs of the maintenance thread. */ #define MAINT_SLEEP_MSECS 1000 -static int new_nbp(struct datapath *, struct net_device *, int port_no); +static int new_dp_port(struct datapath *, struct odp_port *, int port_no); /* Must be called with rcu_read_lock or dp_mutex. */ struct datapath *get_dp(int dp_idx) @@ -95,7 +80,7 @@ struct datapath *get_dp(int dp_idx) } EXPORT_SYMBOL_GPL(get_dp); -struct datapath *get_dp_locked(int dp_idx) +static struct datapath *get_dp_locked(int dp_idx) { struct datapath *dp; @@ -107,6 +92,12 @@ struct datapath *get_dp_locked(int dp_idx) return dp; } +/* Must be called with rcu_read_lock or RTNL lock. */ +const char *dp_name(const struct datapath *dp) +{ + return vport_get_name(dp->ports[ODPP_LOCAL]->vport); +} + static inline size_t br_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ifinfomsg)) @@ -119,14 +110,21 @@ static inline size_t br_nlmsg_size(void) } static int dp_fill_ifinfo(struct sk_buff *skb, - const struct net_bridge_port *port, + const struct dp_port *port, int event, unsigned int flags) { const struct datapath *dp = port->dp; - const struct net_device *dev = port->dev; + int ifindex = vport_get_ifindex(port->vport); + int iflink = vport_get_iflink(port->vport); struct ifinfomsg *hdr; struct nlmsghdr *nlh; + if (ifindex < 0) + return ifindex; + + if (iflink < 0) + return iflink; + nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags); if (nlh == NULL) return -EMSGSIZE; @@ -134,24 +132,26 @@ static int dp_fill_ifinfo(struct sk_buff *skb, hdr = nlmsg_data(nlh); hdr->ifi_family = AF_BRIDGE; hdr->__ifi_pad = 0; - hdr->ifi_type = dev->type; - hdr->ifi_index = dev->ifindex; - hdr->ifi_flags = dev_get_flags(dev); + hdr->ifi_type = ARPHRD_ETHER; + hdr->ifi_index = ifindex; + hdr->ifi_flags = vport_get_flags(port->vport); hdr->ifi_change = 0; - NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); - NLA_PUT_U32(skb, IFLA_MASTER, dp->ports[ODPP_LOCAL]->dev->ifindex); - NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); + NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port->vport)); + NLA_PUT_U32(skb, IFLA_MASTER, vport_get_ifindex(dp->ports[ODPP_LOCAL]->vport)); + NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port->vport)); #ifdef IFLA_OPERSTATE NLA_PUT_U8(skb, IFLA_OPERSTATE, - netif_running(dev) ? dev->operstate : IF_OPER_DOWN); + vport_is_running(port->vport) + ? vport_get_operstate(port->vport) + : IF_OPER_DOWN); #endif - if (dev->addr_len) - NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); + NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, + vport_get_addr(port->vport)); - if (dev->ifindex != dev->iflink) - NLA_PUT_U32(skb, IFLA_LINK, dev->iflink); + if (ifindex != iflink) + NLA_PUT_U32(skb, IFLA_LINK,iflink); return nlmsg_end(skb, nlh); @@ -160,9 +160,8 @@ nla_put_failure: return -EMSGSIZE; } -static void dp_ifinfo_notify(int event, struct net_bridge_port *port) +static void dp_ifinfo_notify(int event, struct dp_port *port) { - struct net *net = dev_net(port->dev); struct sk_buff *skb; int err = -ENOBUFS; @@ -177,26 +176,40 @@ static void dp_ifinfo_notify(int event, struct net_bridge_port *port) kfree_skb(skb); goto errout; } - rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL); + rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL); return; errout: if (err < 0) - rtnl_set_sk_err(net, RTNLGRP_LINK, err); + rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err); } +static void release_dp(struct kobject *kobj) +{ + struct datapath *dp = container_of(kobj, struct datapath, ifobj); + kfree(dp); +} + +static struct kobj_type dp_ktype = { + .release = release_dp +}; + static int create_dp(int dp_idx, const char __user *devnamep) { - struct net_device *dp_dev; + struct odp_port internal_dev_port; char devname[IFNAMSIZ]; struct datapath *dp; int err; int i; if (devnamep) { - err = -EFAULT; - if (strncpy_from_user(devname, devnamep, IFNAMSIZ - 1) < 0) + int retval = strncpy_from_user(devname, devnamep, IFNAMSIZ); + if (retval < 0) { + err = -EFAULT; goto err; - devname[IFNAMSIZ - 1] = '\0'; + } else if (retval >= IFNAMSIZ) { + err = -ENAMETOOLONG; + goto err; + } } else { snprintf(devname, sizeof devname, "of%d", dp_idx); } @@ -225,21 +238,26 @@ static int create_dp(int dp_idx, const char __user *devnamep) skb_queue_head_init(&dp->queues[i]); init_waitqueue_head(&dp->waitqueue); + /* Initialize kobject for bridge. This will be added as + * /sys/class/net//brif later, if sysfs is enabled. */ + dp->ifobj.kset = NULL; + kobject_init(&dp->ifobj, &dp_ktype); + /* Allocate table. */ err = -ENOMEM; - rcu_assign_pointer(dp->table, dp_table_create(DP_L1_SIZE)); + rcu_assign_pointer(dp->table, tbl_create(0)); if (!dp->table) goto err_free_dp; - /* Setup our datapath device */ - dp_dev = dp_dev_create(dp, devname, ODPP_LOCAL); - err = PTR_ERR(dp_dev); - if (IS_ERR(dp_dev)) - goto err_destroy_table; - - err = new_nbp(dp, dp_dev, ODPP_LOCAL); + /* Set up our datapath device. */ + BUILD_BUG_ON(sizeof(internal_dev_port.devname) != sizeof(devname)); + strcpy(internal_dev_port.devname, devname); + internal_dev_port.flags = ODP_PORT_INTERNAL; + err = new_dp_port(dp, &internal_dev_port, ODPP_LOCAL); if (err) { - dp_dev_destroy(dp_dev); + if (err == -EBUSY) + err = -EEXIST; + goto err_destroy_table; } @@ -252,15 +270,14 @@ static int create_dp(int dp_idx, const char __user *devnamep) mutex_unlock(&dp_mutex); rtnl_unlock(); - if (dp_add_dp_hook) - dp_add_dp_hook(dp); + dp_sysfs_add_dp(dp); return 0; err_destroy_local_port: - dp_del_port(dp->ports[ODPP_LOCAL]); + dp_detach_port(dp->ports[ODPP_LOCAL], 1); err_destroy_table: - dp_table_destroy(dp->table, 0); + tbl_destroy(dp->table, NULL); err_free_dp: kfree(dp); err_put_module: @@ -274,28 +291,27 @@ err: static void do_destroy_dp(struct datapath *dp) { - struct net_bridge_port *p, *n; + struct dp_port *p, *n; int i; list_for_each_entry_safe (p, n, &dp->port_list, node) if (p->port_no != ODPP_LOCAL) - dp_del_port(p); + dp_detach_port(p, 1); - if (dp_del_dp_hook) - dp_del_dp_hook(dp); + dp_sysfs_del_dp(dp); rcu_assign_pointer(dps[dp->dp_idx], NULL); - dp_del_port(dp->ports[ODPP_LOCAL]); + dp_detach_port(dp->ports[ODPP_LOCAL], 1); - dp_table_destroy(dp->table, 1); + tbl_destroy(dp->table, flow_free_tbl); for (i = 0; i < DP_N_QUEUES; i++) skb_queue_purge(&dp->queues[i]); for (i = 0; i < DP_MAX_GROUPS; i++) kfree(dp->groups[i]); free_percpu(dp->stats_percpu); - kfree(dp); + kobject_put(&dp->ifobj); module_put(THIS_MODULE); } @@ -320,43 +336,71 @@ err_unlock: return err; } +static void release_dp_port(struct kobject *kobj) +{ + struct dp_port *p = container_of(kobj, struct dp_port, kobj); + kfree(p); +} + +static struct kobj_type brport_ktype = { +#ifdef CONFIG_SYSFS + .sysfs_ops = &brport_sysfs_ops, +#endif + .release = release_dp_port +}; + /* Called with RTNL lock and dp_mutex. */ -static int new_nbp(struct datapath *dp, struct net_device *dev, int port_no) +static int new_dp_port(struct datapath *dp, struct odp_port *odp_port, int port_no) { - struct net_bridge_port *p; + struct vport *vport; + struct dp_port *p; + int err; + + vport = vport_locate(odp_port->devname); + if (!vport) { + vport_lock(); - if (dev->br_port != NULL) - return -EBUSY; + if (odp_port->flags & ODP_PORT_INTERNAL) + vport = __vport_add(odp_port->devname, "internal", NULL); + else + vport = __vport_add(odp_port->devname, "netdev", NULL); + + vport_unlock(); + + if (IS_ERR(vport)) + return PTR_ERR(vport); + } p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; - dev_set_promiscuity(dev, 1); - dev_hold(dev); p->port_no = port_no; p->dp = dp; - p->dev = dev; - if (!is_dp_dev(dev)) - rcu_assign_pointer(dev->br_port, p); - else { - /* It would make sense to assign dev->br_port here too, but - * that causes packets received on internal ports to get caught - * in dp_frame_hook(). In turn dp_frame_hook() can reject them - * back to network stack, but that's a waste of time. */ + atomic_set(&p->sflow_pool, 0); + + err = vport_attach(vport, p); + if (err) { + kfree(p); + return err; } + rcu_assign_pointer(dp->ports[port_no], p); list_add_rcu(&p->node, &dp->port_list); dp->n_ports++; + /* Initialize kobject for bridge. This will be added as + * /sys/class/net//brport later, if sysfs is enabled. */ + p->kobj.kset = NULL; + kobject_init(&p->kobj, &brport_ktype); + dp_ifinfo_notify(RTM_NEWLINK, p); return 0; } -static int add_port(int dp_idx, struct odp_port __user *portp) +static int attach_port(int dp_idx, struct odp_port __user *portp) { - struct net_device *dev; struct datapath *dp; struct odp_port port; int port_no; @@ -376,39 +420,19 @@ static int add_port(int dp_idx, struct odp_port __user *portp) for (port_no = 1; port_no < DP_MAX_PORTS; port_no++) if (!dp->ports[port_no]) goto got_port_no; - err = -EXFULL; + err = -EFBIG; goto out_unlock_dp; got_port_no: - if (!(port.flags & ODP_PORT_INTERNAL)) { - err = -ENODEV; - dev = dev_get_by_name(&init_net, port.devname); - if (!dev) - goto out_unlock_dp; - - err = -EINVAL; - if (dev->flags & IFF_LOOPBACK || dev->type != ARPHRD_ETHER || - is_dp_dev(dev)) - goto out_put; - } else { - dev = dp_dev_create(dp, port.devname, port_no); - err = PTR_ERR(dev); - if (IS_ERR(dev)) - goto out_unlock_dp; - dev_hold(dev); - } - - err = new_nbp(dp, dev, port_no); + err = new_dp_port(dp, &port, port_no); if (err) - goto out_put; + goto out_unlock_dp; - if (dp_add_if_hook) - dp_add_if_hook(dp->ports[port_no]); + set_internal_devs_mtu(dp); + dp_sysfs_add_if(dp->ports[port_no]); - err = __put_user(port_no, &port.port); + err = __put_user(port_no, &portp->port); -out_put: - dev_put(dev); out_unlock_dp: mutex_unlock(&dp->mutex); out_unlock_rtnl: @@ -417,55 +441,48 @@ out: return err; } -int dp_del_port(struct net_bridge_port *p) +int dp_detach_port(struct dp_port *p, int may_delete) { + struct vport *vport = p->vport; + int err; + ASSERT_RTNL(); - if (p->port_no != ODPP_LOCAL && dp_del_if_hook) { -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) - sysfs_remove_link(&p->dp->ifobj, p->dev->name); -#else - sysfs_remove_link(p->dp->ifobj, p->dev->name); -#endif - } + if (p->port_no != ODPP_LOCAL) + dp_sysfs_del_if(p); dp_ifinfo_notify(RTM_DELLINK, p); - p->dp->n_ports--; - - if (is_dp_dev(p->dev)) { - /* Make sure that no packets arrive from now on, since - * dp_dev_xmit() will try to find itself through - * p->dp->ports[], and we're about to set that to null. */ - netif_tx_disable(p->dev); - } - /* First drop references to device. */ - dev_set_promiscuity(p->dev, -1); + p->dp->n_ports--; list_del_rcu(&p->node); rcu_assign_pointer(p->dp->ports[p->port_no], NULL); - rcu_assign_pointer(p->dev->br_port, NULL); + + err = vport_detach(vport); + if (err) + return err; /* Then wait until no one is still using it, and destroy it. */ synchronize_rcu(); - if (is_dp_dev(p->dev)) { - dp_dev_destroy(p->dev); - } - if (p->port_no != ODPP_LOCAL && dp_del_if_hook) { - dp_del_if_hook(p); - } else { - dev_put(p->dev); - kfree(p); + if (may_delete) { + const char *port_type = vport_get_type(vport); + + if (!strcmp(port_type, "netdev") || !strcmp(port_type, "internal")) { + vport_lock(); + __vport_del(vport); + vport_unlock(); + } } + kobject_put(&p->kobj); + return 0; } -static int del_port(int dp_idx, int port_no) +static int detach_port(int dp_idx, int port_no) { - struct net_bridge_port *p; + struct dp_port *p; struct datapath *dp; - LIST_HEAD(dp_devs); int err; err = -EINVAL; @@ -483,7 +500,7 @@ static int del_port(int dp_idx, int port_no) if (!p) goto out_unlock_dp; - err = dp_del_port(p); + err = dp_detach_port(p, 1); out_unlock_dp: mutex_unlock(&dp->mutex); @@ -493,33 +510,18 @@ out: return err; } -/* Must be called with rcu_read_lock. */ -static void -do_port_input(struct net_bridge_port *p, struct sk_buff *skb) -{ - /* Make our own copy of the packet. Otherwise we will mangle the - * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). - * (No one comes after us, since we tell handle_bridge() that we took - * the packet.) */ - skb = skb_share_check(skb, GFP_ATOMIC); - if (!skb) - return; - - /* Push the Ethernet header back on. */ - skb_push(skb, ETH_HLEN); - skb_reset_mac_header(skb); - dp_process_received_packet(skb, p); -} - /* Must be called with rcu_read_lock and with bottom-halves disabled. */ -void dp_process_received_packet(struct sk_buff *skb, struct net_bridge_port *p) +void dp_process_received_packet(struct dp_port *p, struct sk_buff *skb) { struct datapath *dp = p->dp; struct dp_stats_percpu *stats; struct odp_flow_key key; - struct sw_flow *flow; + struct tbl_node *flow_node; WARN_ON_ONCE(skb_shared(skb)); + skb_warn_if_lro(skb); + + OVS_CB(skb)->dp_port = p; /* BHs are off so we don't have to use get_cpu()/put_cpu() here. */ stats = percpu_ptr(dp->stats_percpu, smp_processor_id()); @@ -532,8 +534,9 @@ void dp_process_received_packet(struct sk_buff *skb, struct net_bridge_port *p) } } - flow = dp_table_lookup(rcu_dereference(dp->table), &key); - if (flow) { + flow_node = tbl_lookup(rcu_dereference(dp->table), &key, flow_hash(&key), flow_cmp); + if (flow_node) { + struct sw_flow *flow = flow_cast(flow_node); struct sw_flow_actions *acts = rcu_dereference(flow->sf_acts); flow_used(flow, skb); execute_actions(dp, skb, &key, acts->actions, acts->n_actions, @@ -541,83 +544,248 @@ void dp_process_received_packet(struct sk_buff *skb, struct net_bridge_port *p) stats->n_hit++; } else { stats->n_missed++; - dp_output_control(dp, skb, _ODPL_MISS_NR, 0); + dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id); } } -/* - * Used as br_handle_frame_hook. (Cannot run bridge at the same time, even on - * different set of devices!) - */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) -/* Called with rcu_read_lock and bottom-halves disabled. */ -static struct sk_buff *dp_frame_hook(struct net_bridge_port *p, - struct sk_buff *skb) +#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID) +/* This code is based on skb_checksum_setup() from Xen's net/dev/core.c. We + * can't call this function directly because it isn't exported in all + * versions. */ +int vswitch_skb_checksum_setup(struct sk_buff *skb) { - do_port_input(p, skb); - return NULL; + struct iphdr *iph; + unsigned char *th; + int err = -EPROTO; + __u16 csum_start, csum_offset; + + if (!skb->proto_csum_blank) + return 0; + + if (skb->protocol != htons(ETH_P_IP)) + goto out; + + if (!pskb_may_pull(skb, skb_network_header(skb) + sizeof(struct iphdr) - skb->data)) + goto out; + + iph = ip_hdr(skb); + th = skb_network_header(skb) + 4 * iph->ihl; + + csum_start = th - skb->head; + switch (iph->protocol) { + case IPPROTO_TCP: + csum_offset = offsetof(struct tcphdr, check); + break; + case IPPROTO_UDP: + csum_offset = offsetof(struct udphdr, check); + break; + default: + if (net_ratelimit()) + printk(KERN_ERR "Attempting to checksum a non-" + "TCP/UDP packet, dropping a protocol" + " %d packet", iph->protocol); + goto out; + } + + if (!pskb_may_pull(skb, th + csum_offset + 2 - skb->data)) + goto out; + + skb->ip_summed = CHECKSUM_PARTIAL; + skb->proto_csum_blank = 0; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) + skb->csum_start = csum_start; + skb->csum_offset = csum_offset; +#else + skb_set_transport_header(skb, csum_start - skb_headroom(skb)); + skb->csum = csum_offset; +#endif + + err = 0; + +out: + return err; } -#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) -/* Called with rcu_read_lock and bottom-halves disabled. */ -static int dp_frame_hook(struct net_bridge_port *p, struct sk_buff **pskb) +#endif /* CONFIG_XEN && HAVE_PROTO_DATA_VALID */ + + /* Types of checksums that we can receive (these all refer to L4 checksums): + * 1. CHECKSUM_NONE: Device that did not compute checksum, contains full + * (though not verified) checksum in packet but not in skb->csum. Packets + * from the bridge local port will also have this type. + * 2. CHECKSUM_COMPLETE (CHECKSUM_HW): Good device that computes checksums, + * also the GRE module. This is the same as CHECKSUM_NONE, except it has + * a valid skb->csum. Importantly, both contain a full checksum (not + * verified) in the packet itself. The only difference is that if the + * packet gets to L4 processing on this machine (not in DomU) we won't + * have to recompute the checksum to verify. Most hardware devices do not + * produce packets with this type, even if they support receive checksum + * offloading (they produce type #5). + * 3. CHECKSUM_PARTIAL (CHECKSUM_HW): Packet without full checksum and needs to + * be computed if it is sent off box. Unfortunately on earlier kernels, + * this case is impossible to distinguish from #2, despite having opposite + * meanings. Xen adds an extra field on earlier kernels (see #4) in order + * to distinguish the different states. + * 4. CHECKSUM_UNNECESSARY (with proto_csum_blank true): This packet was + * generated locally by a Xen DomU and has a partial checksum. If it is + * handled on this machine (Dom0 or DomU), then the checksum will not be + * computed. If it goes off box, the checksum in the packet needs to be + * completed. Calling skb_checksum_setup converts this to CHECKSUM_HW + * (CHECKSUM_PARTIAL) so that the checksum can be completed. In later + * kernels, this combination is replaced with CHECKSUM_PARTIAL. + * 5. CHECKSUM_UNNECESSARY (with proto_csum_blank false): Packet with a correct + * full checksum or using a protocol without a checksum. skb->csum is + * undefined. This is common from devices with receive checksum + * offloading. This is somewhat similar to CHECKSUM_NONE, except that + * nobody will try to verify the checksum with CHECKSUM_UNNECESSARY. + * + * Note that on earlier kernels, CHECKSUM_COMPLETE and CHECKSUM_PARTIAL are + * both defined as CHECKSUM_HW. Normally the meaning of CHECKSUM_HW is clear + * based on whether it is on the transmit or receive path. After the datapath + * it will be intepreted as CHECKSUM_PARTIAL. If the packet already has a + * checksum, we will panic. Since we can receive packets with checksums, we + * assume that all CHECKSUM_HW packets have checksums and map them to + * CHECKSUM_NONE, which has a similar meaning (the it is only different if the + * packet is processed by the local IP stack, in which case it will need to + * be reverified). If we receive a packet with CHECKSUM_HW that really means + * CHECKSUM_PARTIAL, it will be sent with the wrong checksum. However, there + * shouldn't be any devices that do this with bridging. */ +void +compute_ip_summed(struct sk_buff *skb, bool xmit) { - do_port_input(p, *pskb); - return 1; -} + /* For our convenience these defines change repeatedly between kernel + * versions, so we can't just copy them over... */ + switch (skb->ip_summed) { + case CHECKSUM_NONE: + OVS_CB(skb)->ip_summed = OVS_CSUM_NONE; + break; + case CHECKSUM_UNNECESSARY: + OVS_CB(skb)->ip_summed = OVS_CSUM_UNNECESSARY; + break; +#ifdef CHECKSUM_HW + /* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE. + * However, on the receive side we should only get CHECKSUM_PARTIAL + * packets from Xen, which uses some special fields to represent this + * (see below). Since we can only make one type work, pick the one + * that actually happens in practice. + * + * On the transmit side (basically after skb_checksum_setup() + * has been run or on internal dev transmit), packets with + * CHECKSUM_COMPLETE aren't generated, so assume CHECKSUM_PARTIAL. */ + case CHECKSUM_HW: + if (!xmit) + OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE; + else + OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL; + + break; #else -#error + case CHECKSUM_COMPLETE: + OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE; + break; + case CHECKSUM_PARTIAL: + OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL; + break; #endif + default: + printk(KERN_ERR "openvswitch: unknown checksum type %d\n", + skb->ip_summed); + /* None seems the safest... */ + OVS_CB(skb)->ip_summed = OVS_CSUM_NONE; + } + +#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID) + /* Xen has a special way of representing CHECKSUM_PARTIAL on older + * kernels. It should not be set on the transmit path though. */ + if (skb->proto_csum_blank) + OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL; + + WARN_ON_ONCE(skb->proto_csum_blank && xmit); +#endif +} -#if defined(CONFIG_XEN) && LINUX_VERSION_CODE == KERNEL_VERSION(2,6,18) -/* This code is copied verbatim from net/dev/core.c in Xen's - * linux-2.6.18-92.1.10.el5.xs5.0.0.394.644. We can't call those functions - * directly because they aren't exported. */ -static int skb_pull_up_to(struct sk_buff *skb, void *ptr) +/* This function closely resembles skb_forward_csum() used by the bridge. It + * is slightly different because we are only concerned with bridging and not + * other types of forwarding and can get away with slightly more optimal + * behavior.*/ +void +forward_ip_summed(struct sk_buff *skb) { - if (ptr < (void *)skb->tail) - return 1; - if (__pskb_pull_tail(skb, - ptr - (void *)skb->data - skb_headlen(skb))) { - return 1; - } else { - return 0; - } +#ifdef CHECKSUM_HW + if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE) + skb->ip_summed = CHECKSUM_NONE; +#endif } -int vswitch_skb_checksum_setup(struct sk_buff *skb) +/* Append each packet in 'skb' list to 'queue'. There will be only one packet + * unless we broke up a GSO packet. */ +static int +queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue, + int queue_no, u32 arg) { - if (skb->proto_csum_blank) { - if (skb->protocol != htons(ETH_P_IP)) - goto out; - if (!skb_pull_up_to(skb, skb->nh.iph + 1)) - goto out; - skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl; - switch (skb->nh.iph->protocol) { - case IPPROTO_TCP: - skb->csum = offsetof(struct tcphdr, check); - break; - case IPPROTO_UDP: - skb->csum = offsetof(struct udphdr, check); - break; - default: - if (net_ratelimit()) - printk(KERN_ERR "Attempting to checksum a non-" - "TCP/UDP packet, dropping a protocol" - " %d packet", skb->nh.iph->protocol); - goto out; + struct sk_buff *nskb; + int port_no; + int err; + + if (OVS_CB(skb)->dp_port) + port_no = OVS_CB(skb)->dp_port->port_no; + else + port_no = ODPP_LOCAL; + + do { + struct odp_msg *header; + + nskb = skb->next; + skb->next = NULL; + + /* If a checksum-deferred packet is forwarded to the + * controller, correct the pointers and checksum. + */ + err = vswitch_skb_checksum_setup(skb); + if (err) + goto err_kfree_skbs; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) + /* Until 2.6.22, the start of the transport header was + * also the start of data to be checksummed. Linux + * 2.6.22 introduced the csum_start field for this + * purpose, but we should point the transport header to + * it anyway for backward compatibility, as + * dev_queue_xmit() does even in 2.6.28. */ + skb_set_transport_header(skb, skb->csum_start - + skb_headroom(skb)); +#endif + + err = skb_checksum_help(skb); + if (err) + goto err_kfree_skbs; } - if (!skb_pull_up_to(skb, skb->h.raw + skb->csum + 2)) - goto out; - skb->ip_summed = CHECKSUM_HW; - skb->proto_csum_blank = 0; - } + + err = skb_cow(skb, sizeof *header); + if (err) + goto err_kfree_skbs; + + header = (struct odp_msg*)__skb_push(skb, sizeof *header); + header->type = queue_no; + header->length = skb->len; + header->port = port_no; + header->reserved = 0; + header->arg = arg; + skb_queue_tail(queue, skb); + + skb = nskb; + } while (skb); return 0; -out: - return -EPROTO; + +err_kfree_skbs: + kfree_skb(skb); + while ((skb = nskb) != NULL) { + nskb = skb->next; + kfree_skb(skb); + } + return err; } -#else -int vswitch_skb_checksum_setup(struct sk_buff *skb) { return 0; } -#endif /* CONFIG_XEN && linux == 2.6.18 */ int dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no, @@ -625,50 +793,16 @@ dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no, { struct dp_stats_percpu *stats; struct sk_buff_head *queue; - int port_no; int err; WARN_ON_ONCE(skb_shared(skb)); - BUG_ON(queue_no != _ODPL_MISS_NR && queue_no != _ODPL_ACTION_NR); - + BUG_ON(queue_no != _ODPL_MISS_NR && queue_no != _ODPL_ACTION_NR && queue_no != _ODPL_SFLOW_NR); queue = &dp->queues[queue_no]; err = -ENOBUFS; if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN) goto err_kfree_skb; - /* If a checksum-deferred packet is forwarded to the controller, - * correct the pointers and checksum. This happens on a regular basis - * only on Xen (the CHECKSUM_HW case), on which VMs can pass up packets - * that do not have their checksum computed. We also implement it for - * the non-Xen case, but it is difficult to trigger or test this case - * there, hence the WARN_ON_ONCE(). - */ - err = vswitch_skb_checksum_setup(skb); - if (err) - goto err_kfree_skb; -#ifndef CHECKSUM_HW - if (skb->ip_summed == CHECKSUM_PARTIAL) { - WARN_ON_ONCE(1); -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) - /* Until 2.6.22, the start of the transport header was also the - * start of data to be checksummed. Linux 2.6.22 introduced - * the csum_start field for this purpose, but we should point - * the transport header to it anyway for backward - * compatibility, as dev_queue_xmit() does even in 2.6.28. */ - skb_set_transport_header(skb, skb->csum_start - - skb_headroom(skb)); -#endif - err = skb_checksum_help(skb); - if (err) - goto err_kfree_skb; - } -#else - if (skb->ip_summed == CHECKSUM_HW) { - err = skb_checksum_help(skb, 0); - if (err) - goto err_kfree_skb; - } -#endif + forward_ip_summed(skb); /* Break apart GSO packets into their component pieces. Otherwise * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */ @@ -687,45 +821,9 @@ dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no, } } - /* Figure out port number. */ - port_no = ODPP_LOCAL; - if (skb->dev) { - if (skb->dev->br_port) - port_no = skb->dev->br_port->port_no; - else if (is_dp_dev(skb->dev)) - port_no = dp_dev_priv(skb->dev)->port_no; - } - - /* Append each packet to queue. There will be only one packet unless - * we broke up a GSO packet above. */ - do { - struct odp_msg *header; - struct sk_buff *nskb = skb->next; - skb->next = NULL; - - err = skb_cow(skb, sizeof *header); - if (err) { - while (nskb) { - kfree_skb(skb); - skb = nskb; - nskb = skb->next; - } - goto err_kfree_skb; - } - - header = (struct odp_msg*)__skb_push(skb, sizeof *header); - header->type = queue_no; - header->length = skb->len; - header->port = port_no; - header->reserved = 0; - header->arg = arg; - skb_queue_tail(queue, skb); - - skb = nskb; - } while (skb); - + err = queue_control_packets(skb, queue, queue_no, arg); wake_up_interruptible(&dp->waitqueue); - return 0; + return err; err_kfree_skb: kfree_skb(skb); @@ -739,8 +837,18 @@ err: static int flush_flows(struct datapath *dp) { - dp->n_flows = 0; - return dp_table_flush(dp); + struct tbl *old_table = rcu_dereference(dp->table); + struct tbl *new_table; + + new_table = tbl_create(0); + if (!new_table) + return -ENOMEM; + + rcu_assign_pointer(dp->table, new_table); + + tbl_deferred_destroy(old_table, flow_free_tbl); + + return 0; } static int validate_actions(const struct sw_flow_actions *actions) @@ -766,7 +874,13 @@ static int validate_actions(const struct sw_flow_actions *actions) break; case ODPAT_SET_VLAN_PCP: - if (a->vlan_pcp.vlan_pcp & ~VLAN_PCP_MASK) + if (a->vlan_pcp.vlan_pcp + & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT)) + return -EINVAL; + break; + + case ODPAT_SET_NW_TOS: + if (a->nw_tos.nw_tos & INET_ECN_MASK) return -EINVAL; break; @@ -831,28 +945,39 @@ static void clear_stats(struct sw_flow *flow) flow->byte_count = 0; } +static int expand_table(struct datapath *dp) +{ + struct tbl *old_table = rcu_dereference(dp->table); + struct tbl *new_table; + + new_table = tbl_expand(old_table); + if (IS_ERR(new_table)) + return PTR_ERR(new_table); + + rcu_assign_pointer(dp->table, new_table); + tbl_deferred_destroy(old_table, NULL); + + return 0; +} + static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp) { struct odp_flow_put uf; - struct sw_flow *flow, **bucket; - struct dp_table *table; + struct tbl_node *flow_node; + struct sw_flow *flow; + struct tbl *table; struct odp_flow_stats stats; int error; error = -EFAULT; if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put))) goto error; - uf.flow.key.reserved = 0; + memset(uf.flow.key.reserved, 0, sizeof uf.flow.key.reserved); -retry: table = rcu_dereference(dp->table); - bucket = dp_table_lookup_for_insert(table, &uf.flow.key); - if (!bucket) { - /* No such flow, and the slots where it could go are full. */ - error = uf.flags & ODPPF_CREATE ? -EXFULL : -ENOENT; - goto error; - } else if (!*bucket) { - /* No such flow, but we found an available slot for it. */ + flow_node = tbl_lookup(table, &uf.flow.key, flow_hash(&uf.flow.key), flow_cmp); + if (!flow_node) { + /* No such flow. */ struct sw_flow_actions *acts; error = -ENOENT; @@ -860,14 +985,11 @@ retry: goto error; /* Expand table, if necessary, to make room. */ - if (dp->n_flows * 4 >= table->n_buckets && - table->n_buckets < DP_MAX_BUCKETS) { - error = dp_table_expand(dp); + if (tbl_count(table) >= tbl_n_buckets(table)) { + error = expand_table(dp); if (error) goto error; - - /* The bucket's location has changed. Try again. */ - goto retry; + table = rcu_dereference(dp->table); } /* Allocate flow. */ @@ -887,15 +1009,18 @@ retry: rcu_assign_pointer(flow->sf_acts, acts); /* Put flow in bucket. */ - rcu_assign_pointer(*bucket, flow); - dp->n_flows++; + error = tbl_insert(table, &flow->tbl_node, flow_hash(&flow->key)); + if (error) + goto error_free_flow_acts; + memset(&stats, 0, sizeof(struct odp_flow_stats)); } else { /* We found a matching flow. */ - struct sw_flow *flow = *rcu_dereference(bucket); struct sw_flow_actions *old_acts, *new_acts; unsigned long int flags; + flow = flow_cast(flow_node); + /* Bail out if we're not allowed to modify an existing flow. */ error = -EEXIST; if (!(uf.flags & ODPPF_MODIFY)) @@ -930,6 +1055,8 @@ retry: return -EFAULT; return 0; +error_free_flow_acts: + kfree(flow->sf_acts); error_free_flow: kmem_cache_free(flow_cache, flow); error: @@ -959,13 +1086,18 @@ static int put_actions(const struct sw_flow *flow, struct odp_flow __user *ufp) return 0; } -static int answer_query(struct sw_flow *flow, struct odp_flow __user *ufp) +static int answer_query(struct sw_flow *flow, u32 query_flags, + struct odp_flow __user *ufp) { struct odp_flow_stats stats; unsigned long int flags; spin_lock_irqsave(&flow->lock, flags); get_stats(flow, &stats); + + if (query_flags & ODPFF_ZERO_TCP_FLAGS) { + flow->tcp_flags = 0; + } spin_unlock_irqrestore(&flow->lock, flags); if (__copy_to_user(&ufp->stats, &stats, sizeof(struct odp_flow_stats))) @@ -975,23 +1107,23 @@ static int answer_query(struct sw_flow *flow, struct odp_flow __user *ufp) static int del_flow(struct datapath *dp, struct odp_flow __user *ufp) { - struct dp_table *table = rcu_dereference(dp->table); + struct tbl *table = rcu_dereference(dp->table); struct odp_flow uf; + struct tbl_node *flow_node; struct sw_flow *flow; int error; error = -EFAULT; if (copy_from_user(&uf, ufp, sizeof uf)) goto error; - uf.key.reserved = 0; + memset(uf.key.reserved, 0, sizeof uf.key.reserved); - flow = dp_table_lookup(table, &uf.key); + flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp); error = -ENOENT; - if (!flow) + if (!flow_node) goto error; - /* XXX redundant lookup */ - error = dp_table_delete(table, flow); + error = tbl_remove(table, flow_node); if (error) goto error; @@ -999,8 +1131,9 @@ static int del_flow(struct datapath *dp, struct odp_flow __user *ufp) * be using this flow. We used to synchronize_rcu() to make sure that * we get completely accurate stats, but that blows our performance, * badly. */ - dp->n_flows--; - error = answer_query(flow, ufp); + + flow = flow_cast(flow_node); + error = answer_query(flow, 0, ufp); flow_deferred_free(flow); error: @@ -1009,23 +1142,23 @@ error: static int query_flows(struct datapath *dp, const struct odp_flowvec *flowvec) { - struct dp_table *table = rcu_dereference(dp->table); + struct tbl *table = rcu_dereference(dp->table); int i; for (i = 0; i < flowvec->n_flows; i++) { struct __user odp_flow *ufp = &flowvec->flows[i]; struct odp_flow uf; - struct sw_flow *flow; + struct tbl_node *flow_node; int error; if (__copy_from_user(&uf, ufp, sizeof uf)) return -EFAULT; - uf.key.reserved = 0; + memset(uf.key.reserved, 0, sizeof uf.key.reserved); - flow = dp_table_lookup(table, &uf.key); - if (!flow) + flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp); + if (!flow_node) error = __put_user(ENOENT, &ufp->stats.error); else - error = answer_query(flow, ufp); + error = answer_query(flow_cast(flow_node), uf.flags, ufp); if (error) return -EFAULT; } @@ -1038,15 +1171,16 @@ struct list_flows_cbdata { int listed_flows; }; -static int list_flow(struct sw_flow *flow, void *cbdata_) +static int list_flow(struct tbl_node *node, void *cbdata_) { + struct sw_flow *flow = flow_cast(node); struct list_flows_cbdata *cbdata = cbdata_; struct odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++]; int error; if (__copy_to_user(&ufp->key, &flow->key, sizeof flow->key)) return -EFAULT; - error = answer_query(flow, ufp); + error = answer_query(flow, 0, ufp); if (error) return error; @@ -1066,8 +1200,7 @@ static int list_flows(struct datapath *dp, const struct odp_flowvec *flowvec) cbdata.uflows = flowvec->flows; cbdata.n_flows = flowvec->n_flows; cbdata.listed_flows = 0; - error = dp_table_foreach(rcu_dereference(dp->table), - list_flow, &cbdata); + error = tbl_foreach(rcu_dereference(dp->table), list_flow, &cbdata); return error ? error : cbdata.listed_flows; } @@ -1103,6 +1236,7 @@ static int do_execute(struct datapath *dp, const struct odp_execute *executep) struct odp_flow_key key; struct sk_buff *skb; struct sw_flow_actions *actions; + struct ethhdr *eth; int err; err = -EFAULT; @@ -1131,17 +1265,28 @@ static int do_execute(struct datapath *dp, const struct odp_execute *executep) skb = alloc_skb(execute.length, GFP_KERNEL); if (!skb) goto error_free_actions; - if (execute.in_port < DP_MAX_PORTS) { - struct net_bridge_port *p = dp->ports[execute.in_port]; - if (p) - skb->dev = p->dev; - } + + if (execute.in_port < DP_MAX_PORTS) + OVS_CB(skb)->dp_port = dp->ports[execute.in_port]; + else + OVS_CB(skb)->dp_port = NULL; err = -EFAULT; if (copy_from_user(skb_put(skb, execute.length), execute.data, execute.length)) goto error_free_skb; + skb_reset_mac_header(skb); + eth = eth_hdr(skb); + + /* Normally, setting the skb 'protocol' field would be handled by a + * call to eth_type_trans(), but it assumes there's a sending + * device, which we may not have. */ + if (ntohs(eth->h_proto) >= 1536) + skb->protocol = eth->h_proto; + else + skb->protocol = htons(ETH_P_802_2); + flow_extract(skb, execute.in_port, &key); err = execute_actions(dp, skb, &key, actions->actions, actions->n_actions, GFP_KERNEL); @@ -1158,12 +1303,13 @@ error: static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp) { + struct tbl *table = rcu_dereference(dp->table); struct odp_stats stats; int i; - stats.n_flows = dp->n_flows; - stats.cur_capacity = rcu_dereference(dp->table)->n_buckets * 2; - stats.max_capacity = DP_MAX_BUCKETS * 2; + stats.n_flows = tbl_count(table); + stats.cur_capacity = tbl_n_buckets(table); + stats.max_capacity = TBL_MAX_BUCKETS; stats.n_ports = dp->n_ports; stats.max_ports = DP_MAX_PORTS; stats.max_groups = DP_MAX_GROUPS; @@ -1181,14 +1327,61 @@ static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp) return copy_to_user(statsp, &stats, sizeof stats) ? -EFAULT : 0; } +/* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */ +int dp_min_mtu(const struct datapath *dp) +{ + struct dp_port *p; + int mtu = 0; + + ASSERT_RTNL(); + + list_for_each_entry_rcu (p, &dp->port_list, node) { + int dev_mtu; + + /* Skip any internal ports, since that's what we're trying to + * set. */ + if (is_internal_vport(p->vport)) + continue; + + dev_mtu = vport_get_mtu(p->vport); + if (!mtu || dev_mtu < mtu) + mtu = dev_mtu; + } + + return mtu ? mtu : ETH_DATA_LEN; +} + +/* Sets the MTU of all datapath devices to the minimum of the ports. Must + * be called with RTNL lock. */ +void set_internal_devs_mtu(const struct datapath *dp) +{ + struct dp_port *p; + int mtu; + + ASSERT_RTNL(); + + mtu = dp_min_mtu(dp); + + list_for_each_entry_rcu (p, &dp->port_list, node) { + if (is_internal_vport(p->vport)) + vport_set_mtu(p->vport, mtu); + } +} + static int -put_port(const struct net_bridge_port *p, struct odp_port __user *uop) +put_port(const struct dp_port *p, struct odp_port __user *uop) { struct odp_port op; + memset(&op, 0, sizeof op); - strncpy(op.devname, p->dev->name, sizeof op.devname); + + rcu_read_lock(); + strncpy(op.devname, vport_get_name(p->vport), sizeof op.devname); + rcu_read_unlock(); + op.port = p->port_no; - op.flags = is_dp_dev(p->dev) ? ODP_PORT_INTERNAL : 0; + op.flags = is_internal_vport(p->vport) ? ODP_PORT_INTERNAL : 0; + return copy_to_user(uop, &op, sizeof op) ? -EFAULT : 0; } @@ -1199,41 +1392,52 @@ query_port(struct datapath *dp, struct odp_port __user *uport) if (copy_from_user(&port, uport, sizeof port)) return -EFAULT; + if (port.devname[0]) { - struct net_bridge_port *p; - struct net_device *dev; - int err; + struct vport *vport; + struct dp_port *dp_port; + int err = 0; port.devname[IFNAMSIZ - 1] = '\0'; - dev = dev_get_by_name(&init_net, port.devname); - if (!dev) - return -ENODEV; + vport_lock(); + rcu_read_lock(); - p = dev->br_port; - if (!p && is_dp_dev(dev)) { - struct dp_dev *dp_dev = dp_dev_priv(dev); - if (dp_dev->dp == dp) - p = dp->ports[dp_dev->port_no]; + vport = vport_locate(port.devname); + if (!vport) { + err = -ENODEV; + goto error_unlock; } - err = p && p->dp == dp ? put_port(p, uport) : -ENOENT; - dev_put(dev); - return err; + dp_port = vport_get_dp_port(vport); + if (!dp_port || dp_port->dp != dp) { + err = -ENOENT; + goto error_unlock; + } + + port.port = dp_port->port_no; + +error_unlock: + rcu_read_unlock(); + vport_unlock(); + + if (err) + return err; } else { if (port.port >= DP_MAX_PORTS) return -EINVAL; if (!dp->ports[port.port]) return -ENOENT; - return put_port(dp->ports[port.port], uport); } + + return put_port(dp->ports[port.port], uport); } static int list_ports(struct datapath *dp, struct odp_portvec __user *pvp) { struct odp_portvec pv; - struct net_bridge_port *p; + struct dp_port *p; int idx; if (copy_from_user(&pv, pvp, sizeof pv)) @@ -1321,12 +1525,23 @@ get_port_group(struct datapath *dp, struct odp_port_group *upg) return 0; } +static int get_listen_mask(const struct file *f) +{ + return (long)f->private_data; +} + +static void set_listen_mask(struct file *f, int listen_mask) +{ + f->private_data = (void*)(long)listen_mask; +} + static long openvswitch_ioctl(struct file *f, unsigned int cmd, unsigned long argp) { int dp_idx = iminor(f->f_dentry->d_inode); struct datapath *dp; int drop_frags, listeners, port_no; + unsigned int sflow_probability; int err; /* Handle commands with special locking requirements up front. */ @@ -1339,14 +1554,46 @@ static long openvswitch_ioctl(struct file *f, unsigned int cmd, err = destroy_dp(dp_idx); goto exit; - case ODP_PORT_ADD: - err = add_port(dp_idx, (struct odp_port __user *)argp); + case ODP_PORT_ATTACH: + err = attach_port(dp_idx, (struct odp_port __user *)argp); goto exit; - case ODP_PORT_DEL: + case ODP_PORT_DETACH: err = get_user(port_no, (int __user *)argp); if (!err) - err = del_port(dp_idx, port_no); + err = detach_port(dp_idx, port_no); + goto exit; + + case ODP_VPORT_ADD: + err = vport_add((struct odp_vport_add __user *)argp); + goto exit; + + case ODP_VPORT_MOD: + err = vport_mod((struct odp_vport_mod __user *)argp); + goto exit; + + case ODP_VPORT_DEL: + err = vport_del((char __user *)argp); + goto exit; + + case ODP_VPORT_STATS_GET: + err = vport_stats_get((struct odp_vport_stats_req __user *)argp); + goto exit; + + case ODP_VPORT_ETHER_GET: + err = vport_ether_get((struct odp_vport_ether __user *)argp); + goto exit; + + case ODP_VPORT_ETHER_SET: + err = vport_ether_set((struct odp_vport_ether __user *)argp); + goto exit; + + case ODP_VPORT_MTU_GET: + err = vport_mtu_get((struct odp_vport_mtu __user *)argp); + goto exit; + + case ODP_VPORT_MTU_SET: + err = vport_mtu_set((struct odp_vport_mtu __user *)argp); goto exit; } @@ -1376,7 +1623,7 @@ static long openvswitch_ioctl(struct file *f, unsigned int cmd, break; case ODP_GET_LISTEN_MASK: - err = put_user((int)f->private_data, (int __user *)argp); + err = put_user(get_listen_mask(f), (int __user *)argp); break; case ODP_SET_LISTEN_MASK: @@ -1387,7 +1634,17 @@ static long openvswitch_ioctl(struct file *f, unsigned int cmd, if (listeners & ~ODPL_ALL) break; err = 0; - f->private_data = (void*)listeners; + set_listen_mask(f, listeners); + break; + + case ODP_GET_SFLOW_PROBABILITY: + err = put_user(dp->sflow_probability, (unsigned int __user *)argp); + break; + + case ODP_SET_SFLOW_PROBABILITY: + err = get_user(sflow_probability, (unsigned int __user *)argp); + if (!err) + dp->sflow_probability = sflow_probability; break; case ODP_PORT_QUERY: @@ -1453,7 +1710,7 @@ ssize_t openvswitch_read(struct file *f, char __user *buf, size_t nbytes, loff_t *ppos) { /* XXX is there sufficient synchronization here? */ - int listeners = (int) f->private_data; + int listeners = get_listen_mask(f); int dp_idx = iminor(f->f_dentry->d_inode); struct datapath *dp = get_dp(dp_idx); struct sk_buff *skb; @@ -1493,7 +1750,7 @@ ssize_t openvswitch_read(struct file *f, char __user *buf, size_t nbytes, } } success: - copy_bytes = min(skb->len, nbytes); + copy_bytes = min_t(size_t, skb->len, nbytes); iov.iov_base = buf; iov.iov_len = copy_bytes; retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len); @@ -1515,7 +1772,7 @@ static unsigned int openvswitch_poll(struct file *file, poll_table *wait) if (dp) { mask = 0; poll_wait(file, &dp->waitqueue, wait); - if (dp_has_packet_of_interest(dp, (int)file->private_data)) + if (dp_has_packet_of_interest(dp, get_listen_mask(file))) mask |= POLLIN | POLLRDNORM; } else { mask = POLLIN | POLLRDNORM | POLLHUP; @@ -1532,55 +1789,38 @@ struct file_operations openvswitch_fops = { }; static int major; -static struct llc_sap *dp_stp_sap; - -static int dp_stp_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, struct net_device *orig_dev) -{ - /* We don't really care about STP packets, we just listen for them for - * mutual exclusion with the bridge module, so this just discards - * them. */ - kfree_skb(skb); - return 0; -} static int __init dp_init(void) { + struct sk_buff *dummy_skb; int err; - printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR); + BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb)); - /* Register to receive STP packets because the bridge module also - * attempts to do so. Since there can only be a single listener for a - * given protocol, this provides mutual exclusion against the bridge - * module, preventing both of them from being loaded at the same - * time. */ - dp_stp_sap = llc_sap_open(LLC_SAP_BSPAN, dp_stp_rcv); - if (!dp_stp_sap) { - printk(KERN_ERR "openvswitch: can't register sap for STP (probably the bridge module is loaded)\n"); - return -EADDRINUSE; - } + printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR); err = flow_init(); if (err) goto error; - err = register_netdevice_notifier(&dp_device_notifier); + err = vport_init(); if (err) goto error_flow_exit; + err = register_netdevice_notifier(&dp_device_notifier); + if (err) + goto error_vport_exit; + major = register_chrdev(0, "openvswitch", &openvswitch_fops); if (err < 0) goto error_unreg_notifier; - /* Hook into callback used by the bridge to intercept packets. - * Parasites we are. */ - br_handle_frame_hook = dp_frame_hook; - return 0; error_unreg_notifier: unregister_netdevice_notifier(&dp_device_notifier); +error_vport_exit: + vport_exit(); error_flow_exit: flow_exit(); error: @@ -1592,9 +1832,8 @@ static void dp_cleanup(void) rcu_barrier(); unregister_chrdev(major, "openvswitch"); unregister_netdevice_notifier(&dp_device_notifier); + vport_exit(); flow_exit(); - br_handle_frame_hook = NULL; - llc_sap_put(dp_stp_sap); } module_init(dp_init);