X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=datapath%2Fdatapath.c;h=e88b1da240067e507bab8961093940829bf85707;hb=e379e4d167e31d1cd5f7b86fff091a2e09ff6e45;hp=1fba23bf708c51971ed4859204f14d209c624ff1;hpb=5721c788f25dda3be1b281331db262f6806ab35e;p=sliver-openvswitch.git diff --git a/datapath/datapath.c b/datapath/datapath.c index 1fba23bf7..fcaafd169 100644 --- a/datapath/datapath.c +++ b/datapath/datapath.c @@ -1,13 +1,21 @@ /* - * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks. - * Distributed under the terms of the GNU GPL version 2. + * Copyright (c) 2007-2014 Nicira, Inc. * - * Significant portions of this file may be copied from parts of the Linux - * kernel, by Linus Torvalds and others. + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA */ -/* Functions for managing the dp interface/device. */ - #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include @@ -31,72 +39,117 @@ #include #include #include -#include #include -#include #include #include #include #include #include +#include #include #include -#include +#include #include +#include +#include +#include -#include "openvswitch/datapath-protocol.h" -#include "checksum.h" #include "datapath.h" -#include "actions.h" #include "flow.h" +#include "flow_table.h" +#include "flow_netlink.h" #include "vlan.h" -#include "tunnel.h" #include "vport-internal_dev.h" +#include "vport-netdev.h" -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \ - LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0) -#error Kernels before 2.6.18 or after 3.0 are not supported by this version of Open vSwitch. -#endif +int ovs_net_id __read_mostly; -int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd); -EXPORT_SYMBOL(dp_ioctl_hook); +static struct genl_family dp_packet_genl_family; +static struct genl_family dp_flow_genl_family; +static struct genl_family dp_datapath_genl_family; + +static struct genl_multicast_group ovs_dp_flow_multicast_group = { + .name = OVS_FLOW_MCGROUP +}; + +static struct genl_multicast_group ovs_dp_datapath_multicast_group = { + .name = OVS_DATAPATH_MCGROUP +}; + +struct genl_multicast_group ovs_dp_vport_multicast_group = { + .name = OVS_VPORT_MCGROUP +}; + +/* Check if need to build a reply message. + * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */ +static bool ovs_must_notify(struct genl_info *info, + const struct genl_multicast_group *grp) +{ + return info->nlhdr->nlmsg_flags & NLM_F_ECHO || + netlink_has_listeners(genl_info_net(info)->genl_sock, GROUP_ID(grp)); +} + +static void ovs_notify(struct genl_family *family, struct genl_multicast_group *grp, + struct sk_buff *skb, struct genl_info *info) +{ + genl_notify(family, skb, genl_info_net(info), + info->snd_portid, GROUP_ID(grp), info->nlhdr, GFP_KERNEL); +} /** * DOC: Locking: * - * Writes to device state (add/remove datapath, port, set operations on vports, - * etc.) are protected by RTNL. - * - * Writes to other state (flow table modifications, set miscellaneous datapath - * parameters such as drop frags, etc.) are protected by genl_mutex. The RTNL - * lock nests inside genl_mutex. + * All writes e.g. Writes to device state (add/remove datapath, port, set + * operations on vports, etc.), Writes to other state (flow table + * modifications, set miscellaneous datapath parameters, etc.) are protected + * by ovs_lock. * * Reads are protected by RCU. * * There are a few special cases (mostly stats) that have their own * synchronization but they nest under all of above and don't interact with * each other. + * + * The RTNL lock nests inside ovs_mutex. */ -/* Global list of datapaths to enable dumping them all out. - * Protected by genl_mutex. - */ -static LIST_HEAD(dps); +static DEFINE_MUTEX(ovs_mutex); -static struct vport *new_vport(const struct vport_parms *); -static int queue_userspace_packets(struct datapath *, struct sk_buff *, - const struct dp_upcall_info *); +void ovs_lock(void) +{ + mutex_lock(&ovs_mutex); +} + +void ovs_unlock(void) +{ + mutex_unlock(&ovs_mutex); +} + +#ifdef CONFIG_LOCKDEP +int lockdep_ovsl_is_held(void) +{ + if (debug_locks) + return lockdep_is_held(&ovs_mutex); + else + return 1; +} +#endif -/* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */ -struct datapath *get_dp(int dp_ifindex) +static int queue_gso_packets(struct datapath *dp, struct sk_buff *, + const struct dp_upcall_info *); +static int queue_userspace_packet(struct datapath *dp, struct sk_buff *, + const struct dp_upcall_info *); + +/* Must be called with rcu_read_lock or ovs_mutex. */ +static struct datapath *get_dp(struct net *net, int dp_ifindex) { struct datapath *dp = NULL; struct net_device *dev; rcu_read_lock(); - dev = dev_get_by_index_rcu(&init_net, dp_ifindex); + dev = dev_get_by_index_rcu(net, dp_ifindex); if (dev) { - struct vport *vport = internal_dev_get_vport(dev); + struct vport *vport = ovs_internal_dev_get_vport(dev); if (vport) dp = vport->dp; } @@ -104,515 +157,364 @@ struct datapath *get_dp(int dp_ifindex) return dp; } -EXPORT_SYMBOL_GPL(get_dp); - -/* Must be called with genl_mutex. */ -static struct flow_table *get_table_protected(struct datapath *dp) -{ - return rcu_dereference_protected(dp->table, lockdep_genl_is_held()); -} - -/* Must be called with rcu_read_lock or RTNL lock. */ -static struct vport *get_vport_protected(struct datapath *dp, u16 port_no) -{ - return rcu_dereference_rtnl(dp->ports[port_no]); -} - -/* Must be called with rcu_read_lock or RTNL lock. */ -const char *dp_name(const struct datapath *dp) -{ - return vport_get_name(rcu_dereference_rtnl(dp->ports[OVSP_LOCAL])); -} -static inline size_t br_nlmsg_size(void) +/* Must be called with rcu_read_lock or ovs_mutex. */ +const char *ovs_dp_name(const struct datapath *dp) { - return NLMSG_ALIGN(sizeof(struct ifinfomsg)) - + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ - + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ - + nla_total_size(4) /* IFLA_MASTER */ - + nla_total_size(4) /* IFLA_MTU */ - + nla_total_size(1); /* IFLA_OPERSTATE */ + struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL); + return vport->ops->get_name(vport); } -/* Caller must hold RTNL lock. */ -static int dp_fill_ifinfo(struct sk_buff *skb, - const struct vport *port, - int event, unsigned int flags) +static int get_dpifindex(struct datapath *dp) { - struct datapath *dp = port->dp; - int ifindex = vport_get_ifindex(port); - struct ifinfomsg *hdr; - struct nlmsghdr *nlh; - - if (ifindex < 0) - return ifindex; - - nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags); - if (nlh == NULL) - return -EMSGSIZE; + struct vport *local; + int ifindex; - hdr = nlmsg_data(nlh); - hdr->ifi_family = AF_BRIDGE; - hdr->__ifi_pad = 0; - hdr->ifi_type = ARPHRD_ETHER; - hdr->ifi_index = ifindex; - hdr->ifi_flags = vport_get_flags(port); - hdr->ifi_change = 0; - - NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port)); - NLA_PUT_U32(skb, IFLA_MASTER, - vport_get_ifindex(get_vport_protected(dp, OVSP_LOCAL))); - NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port)); -#ifdef IFLA_OPERSTATE - NLA_PUT_U8(skb, IFLA_OPERSTATE, - vport_is_running(port) - ? vport_get_operstate(port) - : IF_OPER_DOWN); -#endif + rcu_read_lock(); - NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port)); + local = ovs_vport_rcu(dp, OVSP_LOCAL); + if (local) + ifindex = netdev_vport_priv(local)->dev->ifindex; + else + ifindex = 0; - return nlmsg_end(skb, nlh); + rcu_read_unlock(); -nla_put_failure: - nlmsg_cancel(skb, nlh); - return -EMSGSIZE; + return ifindex; } -/* Caller must hold RTNL lock. */ -static void dp_ifinfo_notify(int event, struct vport *port) +static void destroy_dp_rcu(struct rcu_head *rcu) { - struct sk_buff *skb; - int err = -ENOBUFS; - - skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL); - if (skb == NULL) - goto errout; + struct datapath *dp = container_of(rcu, struct datapath, rcu); - err = dp_fill_ifinfo(skb, port, event, 0); - if (err < 0) { - /* -EMSGSIZE implies BUG in br_nlmsg_size() */ - WARN_ON(err == -EMSGSIZE); - kfree_skb(skb); - goto errout; - } - rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL); - return; -errout: - if (err < 0) - rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err); + ovs_flow_tbl_destroy(&dp->table); + free_percpu(dp->stats_percpu); + release_net(ovs_dp_get_net(dp)); + kfree(dp->ports); + kfree(dp); } -static void release_dp(struct kobject *kobj) +static struct hlist_head *vport_hash_bucket(const struct datapath *dp, + u16 port_no) { - struct datapath *dp = container_of(kobj, struct datapath, ifobj); - kfree(dp); + return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)]; } -static struct kobj_type dp_ktype = { - .release = release_dp -}; - -static void destroy_dp_rcu(struct rcu_head *rcu) +/* Called with ovs_mutex or RCU read lock. */ +struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no) { - struct datapath *dp = container_of(rcu, struct datapath, rcu); + struct vport *vport; + struct hlist_head *head; - flow_tbl_destroy(dp->table); - free_percpu(dp->stats_percpu); - kobject_put(&dp->ifobj); + head = vport_hash_bucket(dp, port_no); + hlist_for_each_entry_rcu(vport, head, dp_hash_node) { + if (vport->port_no == port_no) + return vport; + } + return NULL; } -/* Called with RTNL lock and genl_lock. */ +/* Called with ovs_mutex. */ static struct vport *new_vport(const struct vport_parms *parms) { struct vport *vport; - vport = vport_add(parms); + vport = ovs_vport_add(parms); if (!IS_ERR(vport)) { struct datapath *dp = parms->dp; + struct hlist_head *head = vport_hash_bucket(dp, vport->port_no); - rcu_assign_pointer(dp->ports[parms->port_no], vport); - list_add(&vport->node, &dp->port_list); - - dp_ifinfo_notify(RTM_NEWLINK, vport); + hlist_add_head_rcu(&vport->dp_hash_node, head); } - return vport; } -/* Called with RTNL lock. */ -void dp_detach_port(struct vport *p) +void ovs_dp_detach_port(struct vport *p) { - ASSERT_RTNL(); - - if (p->port_no != OVSP_LOCAL) - dp_sysfs_del_if(p); - dp_ifinfo_notify(RTM_DELLINK, p); + ASSERT_OVSL(); /* First drop references to device. */ - list_del(&p->node); - rcu_assign_pointer(p->dp->ports[p->port_no], NULL); + hlist_del_rcu(&p->dp_hash_node); /* Then destroy it. */ - vport_del(p); + ovs_vport_del(p); } -/* Must be called with rcu_read_lock. */ -void dp_process_received_packet(struct vport *p, struct sk_buff *skb) +void ovs_dp_process_packet_with_key(struct sk_buff *skb, + struct sw_flow_key *pkt_key) { + const struct vport *p = OVS_CB(skb)->input_vport; struct datapath *dp = p->dp; struct sw_flow *flow; struct dp_stats_percpu *stats; - int stats_counter_off; - int error; - - OVS_CB(skb)->vport = p; - - if (!OVS_CB(skb)->flow) { - struct sw_flow_key key; - int key_len; - bool is_frag; - - /* Extract flow from 'skb' into 'key'. */ - error = flow_extract(skb, p->port_no, &key, &key_len, &is_frag); - if (unlikely(error)) { - kfree_skb(skb); - return; - } + u64 *stats_counter; + u32 n_mask_hit; - if (is_frag && dp->drop_frags) { - consume_skb(skb); - stats_counter_off = offsetof(struct dp_stats_percpu, n_frags); - goto out; - } + stats = this_cpu_ptr(dp->stats_percpu); - /* Look up flow. */ - flow = flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len); - if (unlikely(!flow)) { - struct dp_upcall_info upcall; - - upcall.cmd = OVS_PACKET_CMD_MISS; - upcall.key = &key; - upcall.userdata = 0; - upcall.sample_pool = 0; - upcall.actions = NULL; - upcall.actions_len = 0; - dp_upcall(dp, skb, &upcall); - stats_counter_off = offsetof(struct dp_stats_percpu, n_missed); - goto out; - } + /* Look up flow. */ + flow = ovs_flow_tbl_lookup_stats(&dp->table, pkt_key, &n_mask_hit); + if (unlikely(!flow)) { + struct dp_upcall_info upcall; - OVS_CB(skb)->flow = flow; + upcall.cmd = OVS_PACKET_CMD_MISS; + upcall.key = pkt_key; + upcall.userdata = NULL; + upcall.portid = ovs_vport_find_upcall_portid(p, skb); + ovs_dp_upcall(dp, skb, &upcall); + consume_skb(skb); + stats_counter = &stats->n_missed; + goto out; } - stats_counter_off = offsetof(struct dp_stats_percpu, n_hit); - flow_used(OVS_CB(skb)->flow, skb); - execute_actions(dp, skb); + OVS_CB(skb)->pkt_key = pkt_key; + OVS_CB(skb)->flow = flow; + + ovs_flow_stats_update(OVS_CB(skb)->flow, pkt_key->tp.flags, skb); + ovs_execute_actions(dp, skb); + stats_counter = &stats->n_hit; out: /* Update datapath statistics. */ - local_bh_disable(); - stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); - - write_seqcount_begin(&stats->seqlock); - (*(u64 *)((u8 *)stats + stats_counter_off))++; - write_seqcount_end(&stats->seqlock); - - local_bh_enable(); -} - -static void copy_and_csum_skb(struct sk_buff *skb, void *to) -{ - u16 csum_start, csum_offset; - __wsum csum; - - get_skb_csum_pointers(skb, &csum_start, &csum_offset); - csum_start -= skb_headroom(skb); - - skb_copy_bits(skb, 0, to, csum_start); - - csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start, - skb->len - csum_start, 0); - *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum); -} - -static struct genl_family dp_packet_genl_family = { - .id = GENL_ID_GENERATE, - .hdrsize = sizeof(struct ovs_header), - .name = OVS_PACKET_FAMILY, - .version = 1, - .maxattr = OVS_PACKET_ATTR_MAX -}; - -/* Generic Netlink multicast groups for upcalls. - * - * We really want three unique multicast groups per datapath, but we can't even - * get one, because genl_register_mc_group() takes genl_lock, which is also - * held during Generic Netlink message processing, so trying to acquire - * multicast groups during OVS_DP_NEW processing deadlocks. Instead, we - * preallocate a few groups and use them round-robin for datapaths. Collision - * isn't fatal--multicast listeners should check that the family is the one - * that they want and discard others--but it wastes time and memory to receive - * unwanted messages. - */ -#define PACKET_N_MC_GROUPS 16 -static struct genl_multicast_group packet_mc_groups[PACKET_N_MC_GROUPS]; - -static u32 packet_mc_group(struct datapath *dp, u8 cmd) -{ - u32 idx; - BUILD_BUG_ON_NOT_POWER_OF_2(PACKET_N_MC_GROUPS); - - idx = jhash_2words(dp->dp_ifindex, cmd, 0) & (PACKET_N_MC_GROUPS - 1); - return packet_mc_groups[idx].id; + u64_stats_update_begin(&stats->sync); + (*stats_counter)++; + stats->n_mask_hit += n_mask_hit; + u64_stats_update_end(&stats->sync); } -static int packet_register_mc_groups(void) +/* Must be called with rcu_read_lock. */ +void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) { - int i; + int error; + struct sw_flow_key key; - for (i = 0; i < PACKET_N_MC_GROUPS; i++) { - struct genl_multicast_group *group = &packet_mc_groups[i]; - int error; + OVS_CB(skb)->input_vport = p; - sprintf(group->name, "packet%d", i); - error = genl_register_mc_group(&dp_packet_genl_family, group); - if (error) - return error; + /* Extract flow from 'skb' into 'key'. */ + error = ovs_flow_extract(skb, p->port_no, &key); + if (unlikely(error)) { + kfree_skb(skb); + return; } - return 0; + + ovs_dp_process_packet_with_key(skb, &key); } -int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info) +int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, + const struct dp_upcall_info *upcall_info) { struct dp_stats_percpu *stats; int err; - WARN_ON_ONCE(skb_shared(skb)); - - forward_ip_summed(skb, true); - - /* Break apart GSO packets into their component pieces. Otherwise - * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */ - if (skb_is_gso(skb)) { - struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM); - - if (IS_ERR(nskb)) { - kfree_skb(skb); - err = PTR_ERR(nskb); - goto err; - } - consume_skb(skb); - skb = nskb; + if (upcall_info->portid == 0) { + err = -ENOTCONN; + goto err; } - err = queue_userspace_packets(dp, skb, upcall_info); + if (!skb_is_gso(skb)) + err = queue_userspace_packet(dp, skb, upcall_info); + else + err = queue_gso_packets(dp, skb, upcall_info); if (err) goto err; return 0; err: - local_bh_disable(); - stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); + stats = this_cpu_ptr(dp->stats_percpu); - write_seqcount_begin(&stats->seqlock); + u64_stats_update_begin(&stats->sync); stats->n_lost++; - write_seqcount_end(&stats->seqlock); - - local_bh_enable(); + u64_stats_update_end(&stats->sync); return err; } -/* Send each packet in the 'skb' list to userspace for 'dp' as directed by - * 'upcall_info'. There will be only one packet unless we broke up a GSO - * packet. - */ -static int queue_userspace_packets(struct datapath *dp, struct sk_buff *skb, - const struct dp_upcall_info *upcall_info) +static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, + const struct dp_upcall_info *upcall_info) { - u32 group = packet_mc_group(dp, upcall_info->cmd); - struct sk_buff *nskb; + unsigned short gso_type = skb_shinfo(skb)->gso_type; + struct dp_upcall_info later_info; + struct sw_flow_key later_key; + struct sk_buff *segs, *nskb; int err; + segs = __skb_gso_segment(skb, NETIF_F_SG, false); + if (IS_ERR(segs)) + return PTR_ERR(segs); + + /* Queue all of the segments. */ + skb = segs; do { - struct ovs_header *upcall; - struct sk_buff *user_skb; /* to be queued to userspace */ - struct nlattr *nla; - unsigned int len; + err = queue_userspace_packet(dp, skb, upcall_info); + if (err) + break; - nskb = skb->next; - skb->next = NULL; + if (skb == segs && gso_type & SKB_GSO_UDP) { + /* The initial flow key extracted by ovs_flow_extract() + * in this case is for a first fragment, so we need to + * properly mark later fragments. + */ + later_key = *upcall_info->key; + later_key.ip.frag = OVS_FRAG_TYPE_LATER; + + later_info = *upcall_info; + later_info.key = &later_key; + upcall_info = &later_info; + } + } while ((skb = skb->next)); - err = vlan_deaccel_tag(skb); - if (unlikely(err)) - goto err_kfree_skbs; + /* Free all of the segments. */ + skb = segs; + do { + nskb = skb->next; + if (err) + kfree_skb(skb); + else + consume_skb(skb); + } while ((skb = nskb)); + return err; +} - if (nla_attr_size(skb->len) > USHRT_MAX) { - err = -EFBIG; - goto err_kfree_skbs; - } +static size_t key_attr_size(void) +{ + return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */ + + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */ + + nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */ + + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */ + + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */ + + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */ + + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */ + + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */ + + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */ + + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */ + + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */ + + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */ + + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ + + nla_total_size(4) /* OVS_KEY_ATTR_8021Q */ + + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */ + + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ + + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */ + + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */ + + nla_total_size(28); /* OVS_KEY_ATTR_ND */ +} - len = sizeof(struct ovs_header); - len += nla_total_size(skb->len); - len += nla_total_size(FLOW_BUFSIZE); - if (upcall_info->userdata) - len += nla_total_size(8); - if (upcall_info->sample_pool) - len += nla_total_size(4); - if (upcall_info->actions_len) - len += nla_total_size(upcall_info->actions_len); - - user_skb = genlmsg_new(len, GFP_ATOMIC); - if (!user_skb) { - netlink_set_err(INIT_NET_GENL_SOCK, 0, group, -ENOBUFS); - err = -ENOMEM; - goto err_kfree_skbs; - } +static size_t upcall_msg_size(const struct nlattr *userdata, + unsigned int hdrlen) +{ + size_t size = NLMSG_ALIGN(sizeof(struct ovs_header)) + + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */ + + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */ - upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd); - upcall->dp_ifindex = dp->dp_ifindex; + /* OVS_PACKET_ATTR_USERDATA */ + if (userdata) + size += NLA_ALIGN(userdata->nla_len); - nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); - flow_to_nlattrs(upcall_info->key, user_skb); - nla_nest_end(user_skb, nla); + return size; +} - if (upcall_info->userdata) - nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA, upcall_info->userdata); - if (upcall_info->sample_pool) - nla_put_u32(user_skb, OVS_PACKET_ATTR_SAMPLE_POOL, upcall_info->sample_pool); - if (upcall_info->actions_len) { - const struct nlattr *actions = upcall_info->actions; - u32 actions_len = upcall_info->actions_len; +static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, + const struct dp_upcall_info *upcall_info) +{ + struct ovs_header *upcall; + struct sk_buff *nskb = NULL; + struct sk_buff *user_skb; /* to be queued to userspace */ + struct nlattr *nla; + struct genl_info info = { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) + .dst_sk = ovs_dp_get_net(dp)->genl_sock, +#endif + .snd_portid = upcall_info->portid, + }; + size_t len; + unsigned int hlen; + int err, dp_ifindex; + + dp_ifindex = get_dpifindex(dp); + if (!dp_ifindex) + return -ENODEV; - nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS); - memcpy(__skb_put(user_skb, actions_len), actions, actions_len); - nla_nest_end(user_skb, nla); - } + if (vlan_tx_tag_present(skb)) { + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + return -ENOMEM; - nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len); - if (skb->ip_summed == CHECKSUM_PARTIAL) - copy_and_csum_skb(skb, nla_data(nla)); - else - skb_copy_bits(skb, 0, nla_data(nla), skb->len); + nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb)); + if (!nskb) + return -ENOMEM; - err = genlmsg_multicast(user_skb, 0, group, GFP_ATOMIC); - if (err) - goto err_kfree_skbs; + vlan_set_tci(nskb, 0); - consume_skb(skb); skb = nskb; - } while (skb); - return 0; + } -err_kfree_skbs: - kfree_skb(skb); - while ((skb = nskb) != NULL) { - nskb = skb->next; - kfree_skb(skb); + if (nla_attr_size(skb->len) > USHRT_MAX) { + err = -EFBIG; + goto out; } - return err; -} -/* Called with genl_mutex. */ -static int flush_flows(int dp_ifindex) -{ - struct flow_table *old_table; - struct flow_table *new_table; - struct datapath *dp; + /* Complete checksum if needed */ + if (skb->ip_summed == CHECKSUM_PARTIAL && + (err = skb_checksum_help(skb))) + goto out; - dp = get_dp(dp_ifindex); - if (!dp) - return -ENODEV; + /* Older versions of OVS user space enforce alignment of the last + * Netlink attribute to NLA_ALIGNTO which would require extensive + * padding logic. Only perform zerocopy if padding is not required. + */ + if (dp->user_features & OVS_DP_F_UNALIGNED) + hlen = skb_zerocopy_headlen(skb); + else + hlen = skb->len; - old_table = get_table_protected(dp); - new_table = flow_tbl_alloc(TBL_MIN_BUCKETS); - if (!new_table) - return -ENOMEM; + len = upcall_msg_size(upcall_info->userdata, hlen); + user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC); + if (!user_skb) { + err = -ENOMEM; + goto out; + } - rcu_assign_pointer(dp->table, new_table); + upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, + 0, upcall_info->cmd); + upcall->dp_ifindex = dp_ifindex; - flow_tbl_deferred_destroy(old_table); - return 0; -} + nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); + ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb); + nla_nest_end(user_skb, nla); -static int validate_actions(const struct nlattr *attr) -{ - const struct nlattr *a; - int rem; - - nla_for_each_nested(a, attr, rem) { - static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = { - [OVS_ACTION_ATTR_OUTPUT] = 4, - [OVS_ACTION_ATTR_USERSPACE] = 8, - [OVS_ACTION_ATTR_PUSH_VLAN] = 2, - [OVS_ACTION_ATTR_POP_VLAN] = 0, - [OVS_ACTION_ATTR_SET_DL_SRC] = ETH_ALEN, - [OVS_ACTION_ATTR_SET_DL_DST] = ETH_ALEN, - [OVS_ACTION_ATTR_SET_NW_SRC] = 4, - [OVS_ACTION_ATTR_SET_NW_DST] = 4, - [OVS_ACTION_ATTR_SET_NW_TOS] = 1, - [OVS_ACTION_ATTR_SET_TP_SRC] = 2, - [OVS_ACTION_ATTR_SET_TP_DST] = 2, - [OVS_ACTION_ATTR_SET_TUNNEL] = 8, - [OVS_ACTION_ATTR_SET_PRIORITY] = 4, - [OVS_ACTION_ATTR_POP_PRIORITY] = 0, - }; - int type = nla_type(a); - - if (type > OVS_ACTION_ATTR_MAX || nla_len(a) != action_lens[type]) - return -EINVAL; - - switch (type) { - case OVS_ACTION_ATTR_UNSPEC: - return -EINVAL; - - case OVS_ACTION_ATTR_USERSPACE: - case OVS_ACTION_ATTR_POP_VLAN: - case OVS_ACTION_ATTR_SET_DL_SRC: - case OVS_ACTION_ATTR_SET_DL_DST: - case OVS_ACTION_ATTR_SET_NW_SRC: - case OVS_ACTION_ATTR_SET_NW_DST: - case OVS_ACTION_ATTR_SET_TP_SRC: - case OVS_ACTION_ATTR_SET_TP_DST: - case OVS_ACTION_ATTR_SET_TUNNEL: - case OVS_ACTION_ATTR_SET_PRIORITY: - case OVS_ACTION_ATTR_POP_PRIORITY: - /* No validation needed. */ - break; + if (upcall_info->userdata) + __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA, + nla_len(upcall_info->userdata), + nla_data(upcall_info->userdata)); - case OVS_ACTION_ATTR_OUTPUT: - if (nla_get_u32(a) >= DP_MAX_PORTS) - return -EINVAL; - break; + /* Only reserve room for attribute header, packet data is added + * in skb_zerocopy() */ + if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) { + err = -ENOBUFS; + goto out; + } + nla->nla_len = nla_attr_size(skb->len); - case OVS_ACTION_ATTR_PUSH_VLAN: - if (nla_get_be16(a) & htons(VLAN_CFI_MASK)) - return -EINVAL; - break; + err = skb_zerocopy(user_skb, skb, skb->len, hlen); + if (err) + goto out; - case OVS_ACTION_ATTR_SET_NW_TOS: - if (nla_get_u8(a) & INET_ECN_MASK) - return -EINVAL; - break; + /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */ + if (!(dp->user_features & OVS_DP_F_UNALIGNED)) { + size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len; - default: - return -EOPNOTSUPP; - } + if (plen > 0) + memset(skb_put(user_skb, plen), 0, plen); } - if (rem > 0) - return -EINVAL; + ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len; - return 0; -} -static void clear_stats(struct sw_flow *flow) -{ - flow->used = 0; - flow->tcp_flags = 0; - flow->packet_count = 0; - flow->byte_count = 0; + err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid); +out: + if (err) + skb_tx_error(skb); + kfree_skb(nskb); + return err; } static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) @@ -624,19 +526,13 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) struct sw_flow *flow; struct datapath *dp; struct ethhdr *eth; - bool is_frag; + struct vport *input_vport; int len; int err; - int key_len; err = -EINVAL; if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || - !a[OVS_PACKET_ATTR_ACTIONS] || - nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN) - goto err; - - err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS]); - if (err) + !a[OVS_PACKET_ATTR_ACTIONS]) goto err; len = nla_len(a[OVS_PACKET_ATTR_PACKET]); @@ -646,7 +542,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) goto err; skb_reserve(packet, NET_IP_ALIGN); - memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len); + nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len); skb_reset_mac_header(packet); eth = eth_hdr(packet); @@ -654,57 +550,67 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) /* Normally, setting the skb 'protocol' field would be handled by a * call to eth_type_trans(), but it assumes there's a sending * device, which we may not have. */ - if (ntohs(eth->h_proto) >= 1536) + if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) packet->protocol = eth->h_proto; else packet->protocol = htons(ETH_P_802_2); /* Build an sw_flow for sending this packet. */ - flow = flow_alloc(); + flow = ovs_flow_alloc(); err = PTR_ERR(flow); if (IS_ERR(flow)) goto err_kfree_skb; - err = flow_extract(packet, -1, &flow->key, &key_len, &is_frag); + err = ovs_flow_extract(packet, -1, &flow->key); if (err) - goto err_flow_put; + goto err_flow_free; - err = flow_metadata_from_nlattrs(&flow->key.eth.in_port, - &flow->key.eth.tun_id, - a[OVS_PACKET_ATTR_KEY]); + err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]); if (err) - goto err_flow_put; - - flow->hash = flow_hash(&flow->key, key_len); - - acts = flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]); + goto err_flow_free; + acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS])); err = PTR_ERR(acts); if (IS_ERR(acts)) - goto err_flow_put; + goto err_flow_free; + + err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], + &flow->key, 0, &acts); rcu_assign_pointer(flow->sf_acts, acts); + if (err) + goto err_flow_free; OVS_CB(packet)->flow = flow; + OVS_CB(packet)->pkt_key = &flow->key; + packet->priority = flow->key.phy.priority; + packet->mark = flow->key.phy.skb_mark; rcu_read_lock(); - dp = get_dp(ovs_header->dp_ifindex); + dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); err = -ENODEV; if (!dp) goto err_unlock; - if (flow->key.eth.in_port < DP_MAX_PORTS) - OVS_CB(packet)->vport = get_vport_protected(dp, - flow->key.eth.in_port); + input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port); + if (!input_vport) + input_vport = ovs_vport_rcu(dp, OVSP_LOCAL); + + if (!input_vport) + goto err_unlock; + + OVS_CB(packet)->input_vport = input_vport; - err = execute_actions(dp, packet); + local_bh_disable(); + err = ovs_execute_actions(dp, packet); + local_bh_enable(); rcu_read_unlock(); - flow_put(flow); + ovs_flow_free(flow, false); return err; err_unlock: rcu_read_unlock(); -err_flow_put: - flow_put(flow); +err_flow_free: + ovs_flow_free(flow, false); err_kfree_skb: kfree_skb(packet); err: @@ -712,7 +618,7 @@ err: } static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = { - [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC }, + [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN }, [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, }; @@ -725,96 +631,113 @@ static struct genl_ops dp_packet_genl_ops[] = { } }; -static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats) +static struct genl_family dp_packet_genl_family = { + .id = GENL_ID_GENERATE, + .hdrsize = sizeof(struct ovs_header), + .name = OVS_PACKET_FAMILY, + .version = OVS_PACKET_VERSION, + .maxattr = OVS_PACKET_ATTR_MAX, + .netnsok = true, + .parallel_ops = true, + .ops = dp_packet_genl_ops, + .n_ops = ARRAY_SIZE(dp_packet_genl_ops), +}; + +static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats, + struct ovs_dp_megaflow_stats *mega_stats) { int i; - struct flow_table *table = get_table_protected(dp); - stats->n_flows = flow_tbl_count(table); + memset(mega_stats, 0, sizeof(*mega_stats)); + + stats->n_flows = ovs_flow_tbl_count(&dp->table); + mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table); + + stats->n_hit = stats->n_missed = stats->n_lost = 0; - stats->n_frags = stats->n_hit = stats->n_missed = stats->n_lost = 0; for_each_possible_cpu(i) { const struct dp_stats_percpu *percpu_stats; struct dp_stats_percpu local_stats; - unsigned seqcount; + unsigned int start; percpu_stats = per_cpu_ptr(dp->stats_percpu, i); do { - seqcount = read_seqcount_begin(&percpu_stats->seqlock); + start = u64_stats_fetch_begin_bh(&percpu_stats->sync); local_stats = *percpu_stats; - } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount)); + } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start)); - stats->n_frags += local_stats.n_frags; stats->n_hit += local_stats.n_hit; stats->n_missed += local_stats.n_missed; stats->n_lost += local_stats.n_lost; + mega_stats->n_mask_hit += local_stats.n_mask_hit; } } -static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = { - [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED }, - [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED }, - [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG }, -}; - -static struct genl_family dp_flow_genl_family = { - .id = GENL_ID_GENERATE, - .hdrsize = sizeof(struct ovs_header), - .name = OVS_FLOW_FAMILY, - .version = 1, - .maxattr = OVS_FLOW_ATTR_MAX -}; - -static struct genl_multicast_group dp_flow_multicast_group = { - .name = OVS_FLOW_MCGROUP -}; +static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) +{ + return NLMSG_ALIGN(sizeof(struct ovs_header)) + + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */ + + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */ + + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */ + + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */ + + nla_total_size(8) /* OVS_FLOW_ATTR_USED */ + + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */ +} -/* Called with genl_lock. */ -static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, - struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd) +/* Called with ovs_mutex or RCU read lock. */ +static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex, + struct sk_buff *skb, u32 portid, + u32 seq, u32 flags, u8 cmd) { const int skb_orig_len = skb->len; - const struct sw_flow_actions *sf_acts; + struct nlattr *start; struct ovs_flow_stats stats; + __be16 tcp_flags; + unsigned long used; struct ovs_header *ovs_header; struct nlattr *nla; - unsigned long used; - u8 tcp_flags; int err; - sf_acts = rcu_dereference_protected(flow->sf_acts, - lockdep_genl_is_held()); - - ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd); + ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd); if (!ovs_header) return -EMSGSIZE; - ovs_header->dp_ifindex = dp->dp_ifindex; + ovs_header->dp_ifindex = dp_ifindex; + /* Fill flow key. */ nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY); if (!nla) goto nla_put_failure; - err = flow_to_nlattrs(&flow->key, skb); + + err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb); + if (err) + goto error; + nla_nest_end(skb, nla); + + nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK); + if (!nla) + goto nla_put_failure; + + err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb); if (err) goto error; + nla_nest_end(skb, nla); - spin_lock_bh(&flow->lock); - used = flow->used; - stats.n_packets = flow->packet_count; - stats.n_bytes = flow->byte_count; - tcp_flags = flow->tcp_flags; - spin_unlock_bh(&flow->lock); + ovs_flow_stats_get(flow, &stats, &used, &tcp_flags); - if (used) - NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, flow_used_time(used)); + if (used && + nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used))) + goto nla_put_failure; - if (stats.n_packets) - NLA_PUT(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats); + if (stats.n_packets && + nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats)) + goto nla_put_failure; - if (tcp_flags) - NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags); + if ((u8)ntohs(tcp_flags) && + nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags))) + goto nla_put_failure; /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if * this is the first flow to be dumped into 'skb'. This is unusual for @@ -826,10 +749,24 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, * This can only fail for dump operations because the skb is always * properly sized for single flows. */ - err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len, - sf_acts->actions); - if (err < 0 && skb_orig_len) - goto error; + start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS); + if (start) { + const struct sw_flow_actions *sf_acts; + + sf_acts = rcu_dereference_ovsl(flow->sf_acts); + err = ovs_nla_put_actions(sf_acts->actions, + sf_acts->actions_len, skb); + + if (!err) + nla_nest_end(skb, start); + else { + if (skb_orig_len) + goto error; + + nla_nest_cancel(skb, start); + } + } else if (skb_orig_len) + goto nla_put_failure; return genlmsg_end(skb, ovs_header); @@ -840,118 +777,128 @@ error: return err; } -static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow) +/* May not be called with RCU read lock. */ +static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts, + struct genl_info *info, + bool always) { - const struct sw_flow_actions *sf_acts; - int len; + struct sk_buff *skb; + + if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group)) + return NULL; - sf_acts = rcu_dereference_protected(flow->sf_acts, - lockdep_genl_is_held()); + skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL); + + if (!skb) + return ERR_PTR(-ENOMEM); - len = nla_total_size(FLOW_BUFSIZE); /* OVS_FLOW_ATTR_KEY */ - len += nla_total_size(sf_acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */ - len += nla_total_size(sizeof(struct ovs_flow_stats)); /* OVS_FLOW_ATTR_STATS */ - len += nla_total_size(1); /* OVS_FLOW_ATTR_TCP_FLAGS */ - len += nla_total_size(8); /* OVS_FLOW_ATTR_USED */ - return genlmsg_new(NLMSG_ALIGN(sizeof(struct ovs_header)) + len, GFP_KERNEL); + return skb; } -static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, struct datapath *dp, - u32 pid, u32 seq, u8 cmd) +/* Called with ovs_mutex. */ +static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow, + int dp_ifindex, + struct genl_info *info, u8 cmd, + bool always) { struct sk_buff *skb; int retval; - skb = ovs_flow_cmd_alloc_info(flow); - if (!skb) - return ERR_PTR(-ENOMEM); + skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info, + always); + if (!skb || IS_ERR(skb)) + return skb; - retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd); + retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb, + info->snd_portid, info->snd_seq, 0, + cmd); BUG_ON(retval < 0); return skb; } -static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) +static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct ovs_header *ovs_header = info->userhdr; - struct sw_flow_key key; - struct sw_flow *flow; + struct sw_flow *flow, *new_flow; + struct sw_flow_mask mask; struct sk_buff *reply; struct datapath *dp; - struct flow_table *table; + struct sw_flow_actions *acts; + struct sw_flow_match match; int error; - int key_len; - /* Extract key. */ + /* Must have key and actions. */ error = -EINVAL; if (!a[OVS_FLOW_ATTR_KEY]) goto error; - error = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); - if (error) + if (!a[OVS_FLOW_ATTR_ACTIONS]) goto error; - /* Validate actions. */ - if (a[OVS_FLOW_ATTR_ACTIONS]) { - error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS]); - if (error) - goto error; - } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) { - error = -EINVAL; + /* Most of the time we need to allocate a new flow, do it before + * locking. */ + new_flow = ovs_flow_alloc(); + if (IS_ERR(new_flow)) { + error = PTR_ERR(new_flow); goto error; } - dp = get_dp(ovs_header->dp_ifindex); - error = -ENODEV; - if (!dp) - goto error; - - table = get_table_protected(dp); - flow = flow_tbl_lookup(table, &key, key_len); - if (!flow) { - struct sw_flow_actions *acts; + /* Extract key. */ + ovs_match_init(&match, &new_flow->unmasked_key, &mask); + error = ovs_nla_get_match(&match, + a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); + if (error) + goto err_kfree_flow; - /* Bail out if we're not allowed to create a new flow. */ - error = -ENOENT; - if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) - goto error; + ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask); - /* Expand table, if necessary, to make room. */ - if (flow_tbl_need_to_expand(table)) { - struct flow_table *new_table; + /* Validate actions. */ + acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS])); + error = PTR_ERR(acts); + if (IS_ERR(acts)) + goto err_kfree_flow; - new_table = flow_tbl_expand(table); - if (!IS_ERR(new_table)) { - rcu_assign_pointer(dp->table, new_table); - flow_tbl_deferred_destroy(table); - table = get_table_protected(dp); - } - } + error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key, + 0, &acts); + if (error) { + OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); + goto err_kfree_acts; + } - /* Allocate flow. */ - flow = flow_alloc(); - if (IS_ERR(flow)) { - error = PTR_ERR(flow); - goto error; - } - flow->key = key; - clear_stats(flow); + reply = ovs_flow_cmd_alloc_info(acts, info, false); + if (IS_ERR(reply)) { + error = PTR_ERR(reply); + goto err_kfree_acts; + } - /* Obtain actions. */ - acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]); - error = PTR_ERR(acts); - if (IS_ERR(acts)) - goto error_free_flow; - rcu_assign_pointer(flow->sf_acts, acts); + ovs_lock(); + dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); + if (unlikely(!dp)) { + error = -ENODEV; + goto err_unlock_ovs; + } + /* Check if this is a duplicate flow */ + flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->unmasked_key); + if (likely(!flow)) { + rcu_assign_pointer(new_flow->sf_acts, acts); /* Put flow in bucket. */ - flow->hash = flow_hash(&key, key_len); - flow_tbl_insert(table, flow); + error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask); + if (unlikely(error)) { + acts = NULL; + goto err_unlock_ovs; + } - reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, - info->snd_seq, OVS_FLOW_CMD_NEW); + if (unlikely(reply)) { + error = ovs_flow_cmd_fill_info(new_flow, + ovs_header->dp_ifindex, + reply, info->snd_portid, + info->snd_seq, 0, + OVS_FLOW_CMD_NEW); + BUG_ON(error < 0); + } + ovs_unlock(); } else { - /* We found a matching flow. */ struct sw_flow_actions *old_acts; /* Bail out if we're not allowed to modify an existing flow. @@ -960,50 +907,154 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) * request. We also accept NLM_F_EXCL in case that bug ever * gets fixed. */ - error = -EEXIST; - if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW && - info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) + if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE + | NLM_F_EXCL))) { + error = -EEXIST; + goto err_unlock_ovs; + } + /* The unmasked key has to be the same for flow updates. */ + if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) { + error = -EEXIST; + goto err_unlock_ovs; + } + /* Update actions. */ + old_acts = ovsl_dereference(flow->sf_acts); + rcu_assign_pointer(flow->sf_acts, acts); + + if (unlikely(reply)) { + error = ovs_flow_cmd_fill_info(flow, + ovs_header->dp_ifindex, + reply, info->snd_portid, + info->snd_seq, 0, + OVS_FLOW_CMD_NEW); + BUG_ON(error < 0); + } + ovs_unlock(); + + ovs_nla_free_flow_actions(old_acts); + ovs_flow_free(new_flow, false); + } + + if (reply) + ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info); + return 0; + +err_unlock_ovs: + ovs_unlock(); + kfree_skb(reply); +err_kfree_acts: + kfree(acts); +err_kfree_flow: + ovs_flow_free(new_flow, false); +error: + return error; +} + +static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr **a = info->attrs; + struct ovs_header *ovs_header = info->userhdr; + struct sw_flow_key key, masked_key; + struct sw_flow *flow; + struct sw_flow_mask mask; + struct sk_buff *reply = NULL; + struct datapath *dp; + struct sw_flow_actions *old_acts = NULL, *acts = NULL; + struct sw_flow_match match; + int error; + + /* Extract key. */ + error = -EINVAL; + if (!a[OVS_FLOW_ATTR_KEY]) + goto error; + + ovs_match_init(&match, &key, &mask); + error = ovs_nla_get_match(&match, + a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); + if (error) + goto error; + + /* Validate actions. */ + if (a[OVS_FLOW_ATTR_ACTIONS]) { + acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS])); + error = PTR_ERR(acts); + if (IS_ERR(acts)) goto error; - /* Update actions. */ - old_acts = rcu_dereference_protected(flow->sf_acts, - lockdep_genl_is_held()); - if (a[OVS_FLOW_ATTR_ACTIONS] && - (old_acts->actions_len != nla_len(a[OVS_FLOW_ATTR_ACTIONS]) || - memcmp(old_acts->actions, nla_data(a[OVS_FLOW_ATTR_ACTIONS]), - old_acts->actions_len))) { - struct sw_flow_actions *new_acts; - - new_acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]); - error = PTR_ERR(new_acts); - if (IS_ERR(new_acts)) - goto error; + ovs_flow_mask_key(&masked_key, &key, &mask); + error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], + &masked_key, 0, &acts); + if (error) { + OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); + goto err_kfree_acts; + } + } - rcu_assign_pointer(flow->sf_acts, new_acts); - flow_deferred_free_acts(old_acts); + /* Can allocate before locking if have acts. */ + if (acts) { + reply = ovs_flow_cmd_alloc_info(acts, info, false); + if (IS_ERR(reply)) { + error = PTR_ERR(reply); + goto err_kfree_acts; } + } - reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, - info->snd_seq, OVS_FLOW_CMD_NEW); + ovs_lock(); + dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); + if (unlikely(!dp)) { + error = -ENODEV; + goto err_unlock_ovs; + } + /* Check that the flow exists. */ + flow = ovs_flow_tbl_lookup(&dp->table, &key); + if (unlikely(!flow)) { + error = -ENOENT; + goto err_unlock_ovs; + } + /* The unmasked key has to be the same for flow updates. */ + if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) { + error = -EEXIST; + goto err_unlock_ovs; + } + /* Update actions, if present. */ + if (likely(acts)) { + old_acts = ovsl_dereference(flow->sf_acts); + rcu_assign_pointer(flow->sf_acts, acts); - /* Clear stats. */ - if (a[OVS_FLOW_ATTR_CLEAR]) { - spin_lock_bh(&flow->lock); - clear_stats(flow); - spin_unlock_bh(&flow->lock); + if (unlikely(reply)) { + error = ovs_flow_cmd_fill_info(flow, + ovs_header->dp_ifindex, + reply, info->snd_portid, + info->snd_seq, 0, + OVS_FLOW_CMD_NEW); + BUG_ON(error < 0); + } + } else { + /* Could not alloc without acts before locking. */ + reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, + info, OVS_FLOW_CMD_NEW, false); + if (unlikely(IS_ERR(reply))) { + error = PTR_ERR(reply); + goto err_unlock_ovs; } } - if (!IS_ERR(reply)) - genl_notify(reply, genl_info_net(info), info->snd_pid, - dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL); - else - netlink_set_err(INIT_NET_GENL_SOCK, 0, - dp_flow_multicast_group.id, PTR_ERR(reply)); + /* Clear stats. */ + if (a[OVS_FLOW_ATTR_CLEAR]) + ovs_flow_stats_clear(flow); + ovs_unlock(); + + if (reply) + ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info); + if (old_acts) + ovs_nla_free_flow_actions(old_acts); return 0; -error_free_flow: - flow_put(flow); +err_unlock_ovs: + ovs_unlock(); + kfree_skb(reply); +err_kfree_acts: + kfree(acts); error: return error; } @@ -1016,30 +1067,44 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) struct sk_buff *reply; struct sw_flow *flow; struct datapath *dp; - struct flow_table *table; + struct sw_flow_match match; int err; - int key_len; - if (!a[OVS_FLOW_ATTR_KEY]) + if (!a[OVS_FLOW_ATTR_KEY]) { + OVS_NLERR("Flow get message rejected, Key attribute missing.\n"); return -EINVAL; - err = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); + } + + ovs_match_init(&match, &key, NULL); + err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL); if (err) return err; - dp = get_dp(ovs_header->dp_ifindex); - if (!dp) - return -ENODEV; + ovs_lock(); + dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); + if (!dp) { + err = -ENODEV; + goto unlock; + } - table = get_table_protected(dp); - flow = flow_tbl_lookup(table, &key, key_len); - if (!flow) - return -ENOENT; + flow = ovs_flow_tbl_lookup(&dp->table, &key); + if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { + err = -ENOENT; + goto unlock; + } - reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW); - if (IS_ERR(reply)) - return PTR_ERR(reply); + reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info, + OVS_FLOW_CMD_NEW, true); + if (IS_ERR(reply)) { + err = PTR_ERR(reply); + goto unlock; + } + ovs_unlock(); return genlmsg_reply(reply, info); +unlock: + ovs_unlock(); + return err; } static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) @@ -1050,62 +1115,89 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) struct sk_buff *reply; struct sw_flow *flow; struct datapath *dp; - struct flow_table *table; + struct sw_flow_match match; int err; - int key_len; - if (!a[OVS_FLOW_ATTR_KEY]) - return flush_flows(ovs_header->dp_ifindex); - err = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); - if (err) - return err; - - dp = get_dp(ovs_header->dp_ifindex); - if (!dp) - return -ENODEV; - - table = get_table_protected(dp); - flow = flow_tbl_lookup(table, &key, key_len); - if (!flow) - return -ENOENT; - - reply = ovs_flow_cmd_alloc_info(flow); - if (!reply) - return -ENOMEM; + if (likely(a[OVS_FLOW_ATTR_KEY])) { + ovs_match_init(&match, &key, NULL); + err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL); + if (unlikely(err)) + return err; + } - flow_tbl_remove(table, flow); + ovs_lock(); + dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); + if (unlikely(!dp)) { + err = -ENODEV; + goto unlock; + } + if (unlikely(!a[OVS_FLOW_ATTR_KEY])) { + err = ovs_flow_tbl_flush(&dp->table); + goto unlock; + } + flow = ovs_flow_tbl_lookup(&dp->table, &key); + if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) { + err = -ENOENT; + goto unlock; + } - err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid, - info->snd_seq, 0, OVS_FLOW_CMD_DEL); - BUG_ON(err < 0); + ovs_flow_tbl_remove(&dp->table, flow); + ovs_unlock(); + + reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *)flow->sf_acts, + info, false); + + if (likely(reply)) { + if (likely(!IS_ERR(reply))) { + rcu_read_lock(); /* Keep RCU checker happy. */ + err = ovs_flow_cmd_fill_info(flow, + ovs_header->dp_ifindex, + reply, info->snd_portid, + info->snd_seq, 0, + OVS_FLOW_CMD_DEL); + rcu_read_unlock(); + BUG_ON(err < 0); + ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info); + } else { + genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0, + GROUP_ID(&ovs_dp_flow_multicast_group), PTR_ERR(reply)); - flow_deferred_free(flow); + } + } - genl_notify(reply, genl_info_net(info), info->snd_pid, - dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL); + ovs_flow_free(flow, true); return 0; +unlock: + ovs_unlock(); + return err; } static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); + struct table_instance *ti; struct datapath *dp; - dp = get_dp(ovs_header->dp_ifindex); - if (!dp) + rcu_read_lock(); + dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); + if (!dp) { + rcu_read_unlock(); return -ENODEV; + } + ti = rcu_dereference(dp->table.ti); for (;;) { struct sw_flow *flow; u32 bucket, obj; bucket = cb->args[0]; obj = cb->args[1]; - flow = flow_tbl_next(get_table_protected(dp), &bucket, &obj); + flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj); if (!flow) break; - if (ovs_flow_cmd_fill_info(flow, dp, skb, NETLINK_CB(cb->skb).pid, + if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb, + NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, OVS_FLOW_CMD_NEW) < 0) break; @@ -1113,14 +1205,21 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) cb->args[0] = bucket; cb->args[1] = obj; } + rcu_read_unlock(); return skb->len; } +static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = { + [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED }, + [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED }, + [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG }, +}; + static struct genl_ops dp_flow_genl_ops[] = { { .cmd = OVS_FLOW_CMD_NEW, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = flow_policy, - .doit = ovs_flow_cmd_new_or_set + .doit = ovs_flow_cmd_new }, { .cmd = OVS_FLOW_CMD_DEL, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ @@ -1136,68 +1235,68 @@ static struct genl_ops dp_flow_genl_ops[] = { { .cmd = OVS_FLOW_CMD_SET, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = flow_policy, - .doit = ovs_flow_cmd_new_or_set, + .doit = ovs_flow_cmd_set, }, }; -static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { -#ifdef HAVE_NLA_NUL_STRING - [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, -#endif - [OVS_DP_ATTR_IPV4_FRAGS] = { .type = NLA_U32 }, - [OVS_DP_ATTR_SAMPLING] = { .type = NLA_U32 }, -}; - -static struct genl_family dp_datapath_genl_family = { +static struct genl_family dp_flow_genl_family = { .id = GENL_ID_GENERATE, .hdrsize = sizeof(struct ovs_header), - .name = OVS_DATAPATH_FAMILY, - .version = 1, - .maxattr = OVS_DP_ATTR_MAX + .name = OVS_FLOW_FAMILY, + .version = OVS_FLOW_VERSION, + .maxattr = OVS_FLOW_ATTR_MAX, + .netnsok = true, + .parallel_ops = true, + .ops = dp_flow_genl_ops, + .n_ops = ARRAY_SIZE(dp_flow_genl_ops), + .mcgrps = &ovs_dp_flow_multicast_group, + .n_mcgrps = 1, }; -static struct genl_multicast_group dp_datapath_multicast_group = { - .name = OVS_DATAPATH_MCGROUP -}; +static size_t ovs_dp_cmd_msg_size(void) +{ + size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header)); + + msgsize += nla_total_size(IFNAMSIZ); + msgsize += nla_total_size(sizeof(struct ovs_dp_stats)); + msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats)); + msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */ + return msgsize; +} + +/* Called with ovs_mutex or RCU read lock. */ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, - u32 pid, u32 seq, u32 flags, u8 cmd) + u32 portid, u32 seq, u32 flags, u8 cmd) { struct ovs_header *ovs_header; - struct nlattr *nla; + struct ovs_dp_stats dp_stats; + struct ovs_dp_megaflow_stats dp_megaflow_stats; int err; - ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family, + ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family, flags, cmd); if (!ovs_header) goto error; - ovs_header->dp_ifindex = dp->dp_ifindex; + ovs_header->dp_ifindex = get_dpifindex(dp); - rcu_read_lock(); - err = nla_put_string(skb, OVS_DP_ATTR_NAME, dp_name(dp)); - rcu_read_unlock(); + err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp)); if (err) goto nla_put_failure; - nla = nla_reserve(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats)); - if (!nla) + get_dp_stats(dp, &dp_stats, &dp_megaflow_stats); + if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), + &dp_stats)) goto nla_put_failure; - get_dp_stats(dp, nla_data(nla)); - - NLA_PUT_U32(skb, OVS_DP_ATTR_IPV4_FRAGS, - dp->drop_frags ? OVS_DP_FRAG_DROP : OVS_DP_FRAG_ZERO); - if (dp->sflow_probability) - NLA_PUT_U32(skb, OVS_DP_ATTR_SAMPLING, dp->sflow_probability); + if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS, + sizeof(struct ovs_dp_megaflow_stats), + &dp_megaflow_stats)) + goto nla_put_failure; - nla = nla_nest_start(skb, OVS_DP_ATTR_MCGROUPS); - if (!nla) + if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features)) goto nla_put_failure; - NLA_PUT_U32(skb, OVS_PACKET_CMD_MISS, packet_mc_group(dp, OVS_PACKET_CMD_MISS)); - NLA_PUT_U32(skb, OVS_PACKET_CMD_ACTION, packet_mc_group(dp, OVS_PACKET_CMD_ACTION)); - NLA_PUT_U32(skb, OVS_PACKET_CMD_SAMPLE, packet_mc_group(dp, OVS_PACKET_CMD_SAMPLE)); - nla_nest_end(skb, nla); return genlmsg_end(skb, ovs_header); @@ -1207,61 +1306,45 @@ error: return -EMSGSIZE; } -static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid, - u32 seq, u8 cmd) -{ - struct sk_buff *skb; - int retval; - - skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!skb) - return ERR_PTR(-ENOMEM); - - retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd); - if (retval < 0) { - kfree_skb(skb); - return ERR_PTR(retval); - } - return skb; -} - -static int ovs_dp_cmd_validate(struct nlattr *a[OVS_DP_ATTR_MAX + 1]) +static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info) { - if (a[OVS_DP_ATTR_IPV4_FRAGS]) { - u32 frags = nla_get_u32(a[OVS_DP_ATTR_IPV4_FRAGS]); - - if (frags != OVS_DP_FRAG_ZERO && frags != OVS_DP_FRAG_DROP) - return -EINVAL; - } - - return CHECK_NUL_STRING(a[OVS_DP_ATTR_NAME], IFNAMSIZ - 1); + return genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL); } -/* Called with genl_mutex and optionally with RTNL lock also. */ -static struct datapath *lookup_datapath(struct ovs_header *ovs_header, struct nlattr *a[OVS_DP_ATTR_MAX + 1]) +/* Called with rcu_read_lock or ovs_mutex. */ +static struct datapath *lookup_datapath(struct net *net, + struct ovs_header *ovs_header, + struct nlattr *a[OVS_DP_ATTR_MAX + 1]) { struct datapath *dp; if (!a[OVS_DP_ATTR_NAME]) - dp = get_dp(ovs_header->dp_ifindex); + dp = get_dp(net, ovs_header->dp_ifindex); else { struct vport *vport; - rcu_read_lock(); - vport = vport_locate(nla_data(a[OVS_DP_ATTR_NAME])); + vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME])); dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL; - rcu_read_unlock(); } return dp ? dp : ERR_PTR(-ENODEV); } -/* Called with genl_mutex. */ -static void change_datapath(struct datapath *dp, struct nlattr *a[OVS_DP_ATTR_MAX + 1]) +static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info) +{ + struct datapath *dp; + + dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); + if (IS_ERR(dp)) + return; + + WARN(dp->user_features, "Dropping previously announced user features\n"); + dp->user_features = 0; +} + +static void ovs_dp_change(struct datapath *dp, struct nlattr **a) { - if (a[OVS_DP_ATTR_IPV4_FRAGS]) - dp->drop_frags = nla_get_u32(a[OVS_DP_ATTR_IPV4_FRAGS]) == OVS_DP_FRAG_DROP; - if (a[OVS_DP_ATTR_SAMPLING]) - dp->sflow_probability = nla_get_u32(a[OVS_DP_ATTR_SAMPLING]); + if (a[OVS_DP_ATTR_USER_FEATURES]) + dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]); } static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) @@ -1271,139 +1354,164 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) struct sk_buff *reply; struct datapath *dp; struct vport *vport; - int err; + struct ovs_net *ovs_net; + int err, i; err = -EINVAL; - if (!a[OVS_DP_ATTR_NAME]) + if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID]) goto err; - err = ovs_dp_cmd_validate(a); - if (err) - goto err; - - rtnl_lock(); - err = -ENODEV; - if (!try_module_get(THIS_MODULE)) - goto err_unlock_rtnl; + reply = ovs_dp_cmd_alloc_info(info); + if (!reply) + return -ENOMEM; err = -ENOMEM; dp = kzalloc(sizeof(*dp), GFP_KERNEL); if (dp == NULL) - goto err_put_module; - INIT_LIST_HEAD(&dp->port_list); + goto err_free_reply; - /* Initialize kobject for bridge. This will be added as - * /sys/class/net//brif later, if sysfs is enabled. */ - dp->ifobj.kset = NULL; - kobject_init(&dp->ifobj, &dp_ktype); + ovs_dp_set_net(dp, hold_net(sock_net(skb->sk))); /* Allocate table. */ - err = -ENOMEM; - rcu_assign_pointer(dp->table, flow_tbl_alloc(TBL_MIN_BUCKETS)); - if (!dp->table) + err = ovs_flow_tbl_init(&dp->table); + if (err) goto err_free_dp; + dp->stats_percpu = alloc_percpu(struct dp_stats_percpu); + if (!dp->stats_percpu) { + err = -ENOMEM; + goto err_destroy_table; + } + + for_each_possible_cpu(i) { + struct dp_stats_percpu *dpath_stats; + dpath_stats = per_cpu_ptr(dp->stats_percpu, i); + u64_stats_init(&dpath_stats->sync); + } + + dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head), + GFP_KERNEL); + if (!dp->ports) { + err = -ENOMEM; + goto err_destroy_percpu; + } + + for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) + INIT_HLIST_HEAD(&dp->ports[i]); + /* Set up our datapath device. */ parms.name = nla_data(a[OVS_DP_ATTR_NAME]); parms.type = OVS_VPORT_TYPE_INTERNAL; parms.options = NULL; parms.dp = dp; parms.port_no = OVSP_LOCAL; + parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID]; + + ovs_dp_change(dp, a); + + /* So far only local changes have been made, now need the lock. */ + ovs_lock(); + vport = new_vport(&parms); if (IS_ERR(vport)) { err = PTR_ERR(vport); if (err == -EBUSY) err = -EEXIST; - goto err_destroy_table; - } - dp->dp_ifindex = vport_get_ifindex(vport); + if (err == -EEXIST) { + /* An outdated user space instance that does not understand + * the concept of user_features has attempted to create a new + * datapath and is likely to reuse it. Drop all user features. + */ + if (info->genlhdr->version < OVS_DP_VER_FEATURES) + ovs_dp_reset_user_features(skb, info); + } - dp->drop_frags = 0; - dp->stats_percpu = alloc_percpu(struct dp_stats_percpu); - if (!dp->stats_percpu) { - err = -ENOMEM; - goto err_destroy_local_port; + goto err_destroy_ports_array; } - change_datapath(dp, a); - - reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW); - err = PTR_ERR(reply); - if (IS_ERR(reply)) - goto err_destroy_local_port; + err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, + info->snd_seq, 0, OVS_DP_CMD_NEW); + BUG_ON(err < 0); - list_add_tail(&dp->list_node, &dps); - dp_sysfs_add_dp(dp); + ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id); + list_add_tail_rcu(&dp->list_node, &ovs_net->dps); - rtnl_unlock(); + ovs_unlock(); - genl_notify(reply, genl_info_net(info), info->snd_pid, - dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL); + ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info); return 0; -err_destroy_local_port: - dp_detach_port(get_vport_protected(dp, OVSP_LOCAL)); +err_destroy_ports_array: + ovs_unlock(); + kfree(dp->ports); +err_destroy_percpu: + free_percpu(dp->stats_percpu); err_destroy_table: - flow_tbl_destroy(get_table_protected(dp)); + ovs_flow_tbl_destroy(&dp->table); err_free_dp: + release_net(ovs_dp_get_net(dp)); kfree(dp); -err_put_module: - module_put(THIS_MODULE); -err_unlock_rtnl: - rtnl_unlock(); +err_free_reply: + kfree_skb(reply); err: return err; } +/* Called with ovs_mutex. */ +static void __dp_destroy(struct datapath *dp) +{ + int i; + + for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { + struct vport *vport; + struct hlist_node *n; + + hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) + if (vport->port_no != OVSP_LOCAL) + ovs_dp_detach_port(vport); + } + + list_del_rcu(&dp->list_node); + + /* OVSP_LOCAL is datapath internal port. We need to make sure that + * all ports in datapath are destroyed first before freeing datapath. + */ + ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL)); + + /* RCU destroy the flow table */ + call_rcu(&dp->rcu, destroy_dp_rcu); +} + static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) { - struct vport *vport, *next_vport; struct sk_buff *reply; struct datapath *dp; int err; - err = ovs_dp_cmd_validate(info->attrs); - if (err) - goto exit; + reply = ovs_dp_cmd_alloc_info(info); + if (!reply) + return -ENOMEM; - rtnl_lock(); - dp = lookup_datapath(info->userhdr, info->attrs); + ovs_lock(); + dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); err = PTR_ERR(dp); if (IS_ERR(dp)) - goto exit_unlock; - - reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_DEL); - err = PTR_ERR(reply); - if (IS_ERR(reply)) - goto exit_unlock; - - list_for_each_entry_safe (vport, next_vport, &dp->port_list, node) - if (vport->port_no != OVSP_LOCAL) - dp_detach_port(vport); - - dp_sysfs_del_dp(dp); - list_del(&dp->list_node); - dp_detach_port(get_vport_protected(dp, OVSP_LOCAL)); + goto err_unlock_free; - /* rtnl_unlock() will wait until all the references to devices that - * are pending unregistration have been dropped. We do it here to - * ensure that any internal devices (which contain DP pointers) are - * fully destroyed before freeing the datapath. - */ - rtnl_unlock(); - - call_rcu(&dp->rcu, destroy_dp_rcu); - module_put(THIS_MODULE); + err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, + info->snd_seq, 0, OVS_DP_CMD_DEL); + BUG_ON(err < 0); - genl_notify(reply, genl_info_net(info), info->snd_pid, - dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL); + __dp_destroy(dp); + ovs_unlock(); + ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info); return 0; -exit_unlock: - rtnl_unlock(); -exit: +err_unlock_free: + ovs_unlock(); + kfree_skb(reply); return err; } @@ -1413,27 +1521,30 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) struct datapath *dp; int err; - err = ovs_dp_cmd_validate(info->attrs); - if (err) - return err; + reply = ovs_dp_cmd_alloc_info(info); + if (!reply) + return -ENOMEM; - dp = lookup_datapath(info->userhdr, info->attrs); + ovs_lock(); + dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); + err = PTR_ERR(dp); if (IS_ERR(dp)) - return PTR_ERR(dp); + goto err_unlock_free; - change_datapath(dp, info->attrs); + ovs_dp_change(dp, info->attrs); - reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW); - if (IS_ERR(reply)) { - err = PTR_ERR(reply); - netlink_set_err(INIT_NET_GENL_SOCK, 0, - dp_datapath_multicast_group.id, err); - return 0; - } + err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, + info->snd_seq, 0, OVS_DP_CMD_NEW); + BUG_ON(err < 0); - genl_notify(reply, genl_info_net(info), info->snd_pid, - dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL); + ovs_unlock(); + ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info); return 0; + +err_unlock_free: + ovs_unlock(); + kfree_skb(reply); + return err; } static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) @@ -1442,42 +1553,58 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) struct datapath *dp; int err; - err = ovs_dp_cmd_validate(info->attrs); - if (err) - return err; - - dp = lookup_datapath(info->userhdr, info->attrs); - if (IS_ERR(dp)) - return PTR_ERR(dp); + reply = ovs_dp_cmd_alloc_info(info); + if (!reply) + return -ENOMEM; - reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW); - if (IS_ERR(reply)) - return PTR_ERR(reply); + rcu_read_lock(); + dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); + if (IS_ERR(dp)) { + err = PTR_ERR(dp); + goto err_unlock_free; + } + err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, + info->snd_seq, 0, OVS_DP_CMD_NEW); + BUG_ON(err < 0); + rcu_read_unlock(); return genlmsg_reply(reply, info); + +err_unlock_free: + rcu_read_unlock(); + kfree_skb(reply); + return err; } static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) { + struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id); struct datapath *dp; int skip = cb->args[0]; int i = 0; - list_for_each_entry (dp, &dps, list_node) { - if (i < skip) - continue; - if (ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid, + rcu_read_lock(); + list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) { + if (i >= skip && + ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, OVS_DP_CMD_NEW) < 0) break; i++; } + rcu_read_unlock(); cb->args[0] = i; return skb->len; } +static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { + [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, + [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 }, + [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 }, +}; + static struct genl_ops dp_datapath_genl_ops[] = { { .cmd = OVS_DP_CMD_NEW, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ @@ -1502,68 +1629,52 @@ static struct genl_ops dp_datapath_genl_ops[] = { }, }; -static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = { -#ifdef HAVE_NLA_NUL_STRING - [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, - [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 }, - [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 }, - [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) }, - [OVS_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN }, -#else - [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct ovs_vport_stats) }, - [OVS_VPORT_ATTR_ADDRESS] = { .minlen = ETH_ALEN }, -#endif - [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED }, -}; - -static struct genl_family dp_vport_genl_family = { +static struct genl_family dp_datapath_genl_family = { .id = GENL_ID_GENERATE, .hdrsize = sizeof(struct ovs_header), - .name = OVS_VPORT_FAMILY, - .version = 1, - .maxattr = OVS_VPORT_ATTR_MAX -}; - -struct genl_multicast_group dp_vport_multicast_group = { - .name = OVS_VPORT_MCGROUP + .name = OVS_DATAPATH_FAMILY, + .version = OVS_DATAPATH_VERSION, + .maxattr = OVS_DP_ATTR_MAX, + .netnsok = true, + .parallel_ops = true, + .ops = dp_datapath_genl_ops, + .n_ops = ARRAY_SIZE(dp_datapath_genl_ops), + .mcgrps = &ovs_dp_datapath_multicast_group, + .n_mcgrps = 1, }; -/* Called with RTNL lock or RCU read lock. */ +/* Called with ovs_mutex or RCU read lock. */ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, - u32 pid, u32 seq, u32 flags, u8 cmd) + u32 portid, u32 seq, u32 flags, u8 cmd) { struct ovs_header *ovs_header; - struct nlattr *nla; - int ifindex; + struct ovs_vport_stats vport_stats; int err; - ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family, + ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family, flags, cmd); if (!ovs_header) return -EMSGSIZE; - ovs_header->dp_ifindex = vport->dp->dp_ifindex; - - NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no); - NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport_get_type(vport)); - NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport_get_name(vport)); + ovs_header->dp_ifindex = get_dpifindex(vport->dp); - nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats)); - if (!nla) + if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) || + nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) || + nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport))) goto nla_put_failure; - vport_get_stats(vport, nla_data(nla)); + ovs_vport_get_stats(vport, &vport_stats); + if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats), + &vport_stats)) + goto nla_put_failure; - NLA_PUT(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport)); + if (ovs_vport_get_upcall_portids(vport, skb)) + goto nla_put_failure; - err = vport_get_options(vport, skb); + err = ovs_vport_get_options(vport, skb); if (err == -EMSGSIZE) goto error; - ifindex = vport_get_ifindex(vport); - if (ifindex > 0) - NLA_PUT_U32(skb, OVS_VPORT_ATTR_IFINDEX, ifindex); - return genlmsg_end(skb, ovs_header); nla_put_failure: @@ -1573,8 +1684,13 @@ error: return err; } -/* Called with RTNL lock or RCU read lock. */ -struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid, +static struct sk_buff *ovs_vport_cmd_alloc_info(void) +{ + return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); +} + +/* Called with ovs_mutex, only via ovs_dp_notify_wq(). */ +struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid, u32 seq, u8 cmd) { struct sk_buff *skb; @@ -1584,30 +1700,27 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid, if (!skb) return ERR_PTR(-ENOMEM); - retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd); - if (retval < 0) { - kfree_skb(skb); - return ERR_PTR(retval); - } - return skb; -} + retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd); + BUG_ON(retval < 0); -static int ovs_vport_cmd_validate(struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) -{ - return CHECK_NUL_STRING(a[OVS_VPORT_ATTR_NAME], IFNAMSIZ - 1); + return skb; } -/* Called with RTNL lock or RCU read lock. */ -static struct vport *lookup_vport(struct ovs_header *ovs_header, +/* Called with ovs_mutex or RCU read lock. */ +static struct vport *lookup_vport(struct net *net, + struct ovs_header *ovs_header, struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) { struct datapath *dp; struct vport *vport; if (a[OVS_VPORT_ATTR_NAME]) { - vport = vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME])); + vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME])); if (!vport) return ERR_PTR(-ENODEV); + if (ovs_header->dp_ifindex && + ovs_header->dp_ifindex != get_dpifindex(vport->dp)) + return ERR_PTR(-ENODEV); return vport; } else if (a[OVS_VPORT_ATTR_PORT_NO]) { u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]); @@ -1615,32 +1728,18 @@ static struct vport *lookup_vport(struct ovs_header *ovs_header, if (port_no >= DP_MAX_PORTS) return ERR_PTR(-EFBIG); - dp = get_dp(ovs_header->dp_ifindex); + dp = get_dp(net, ovs_header->dp_ifindex); if (!dp) return ERR_PTR(-ENODEV); - vport = get_vport_protected(dp, port_no); + vport = ovs_vport_ovsl_rcu(dp, port_no); if (!vport) - return ERR_PTR(-ENOENT); + return ERR_PTR(-ENODEV); return vport; } else return ERR_PTR(-EINVAL); } -/* Called with RTNL lock. */ -static int change_vport(struct vport *vport, struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) -{ - int err = 0; - - if (a[OVS_VPORT_ATTR_STATS]) - vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS])); - - if (a[OVS_VPORT_ATTR_ADDRESS]) - err = vport_set_addr(vport, nla_data(a[OVS_VPORT_ATTR_ADDRESS])); - - return err; -} - static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; @@ -1652,38 +1751,37 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) u32 port_no; int err; - err = -EINVAL; - if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE]) - goto exit; + if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] || + !a[OVS_VPORT_ATTR_UPCALL_PID]) + return -EINVAL; - err = ovs_vport_cmd_validate(a); - if (err) - goto exit; + port_no = a[OVS_VPORT_ATTR_PORT_NO] + ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0; + if (port_no >= DP_MAX_PORTS) + return -EFBIG; + + reply = ovs_vport_cmd_alloc_info(); + if (!reply) + return -ENOMEM; - rtnl_lock(); - dp = get_dp(ovs_header->dp_ifindex); + ovs_lock(); + dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); err = -ENODEV; if (!dp) - goto exit_unlock; - - if (a[OVS_VPORT_ATTR_PORT_NO]) { - port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]); - - err = -EFBIG; - if (port_no >= DP_MAX_PORTS) - goto exit_unlock; + goto exit_unlock_free; - vport = get_vport_protected(dp, port_no); + if (port_no) { + vport = ovs_vport_ovsl(dp, port_no); err = -EBUSY; if (vport) - goto exit_unlock; + goto exit_unlock_free; } else { for (port_no = 1; ; port_no++) { if (port_no >= DP_MAX_PORTS) { err = -EFBIG; - goto exit_unlock; + goto exit_unlock_free; } - vport = get_vport_protected(dp, port_no); + vport = ovs_vport_ovsl(dp, port_no); if (!vport) break; } @@ -1694,32 +1792,28 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) parms.options = a[OVS_VPORT_ATTR_OPTIONS]; parms.dp = dp; parms.port_no = port_no; + parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID]; vport = new_vport(&parms); err = PTR_ERR(vport); if (IS_ERR(vport)) - goto exit_unlock; + goto exit_unlock_free; - dp_sysfs_add_if(vport); + err = 0; + if (a[OVS_VPORT_ATTR_STATS]) + ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS])); - err = change_vport(vport, a); - if (!err) { - reply = ovs_vport_cmd_build_info(vport, info->snd_pid, - info->snd_seq, OVS_VPORT_CMD_NEW); - if (IS_ERR(reply)) - err = PTR_ERR(reply); - } - if (err) { - dp_detach_port(vport); - goto exit_unlock; - } - genl_notify(reply, genl_info_net(info), info->snd_pid, - dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); + err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, + info->snd_seq, 0, OVS_VPORT_CMD_NEW); + BUG_ON(err < 0); + ovs_unlock(); + ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info); + return 0; -exit_unlock: - rtnl_unlock(); -exit: +exit_unlock_free: + ovs_unlock(); + kfree_skb(reply); return err; } @@ -1730,37 +1824,50 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) struct vport *vport; int err; - err = ovs_vport_cmd_validate(a); - if (err) - goto exit; + reply = ovs_vport_cmd_alloc_info(); + if (!reply) + return -ENOMEM; - rtnl_lock(); - vport = lookup_vport(info->userhdr, a); + ovs_lock(); + vport = lookup_vport(sock_net(skb->sk), info->userhdr, a); err = PTR_ERR(vport); if (IS_ERR(vport)) - goto exit_unlock; + goto exit_unlock_free; - err = 0; - if (a[OVS_VPORT_ATTR_OPTIONS]) - err = vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); - if (!err) - err = change_vport(vport, a); + if (a[OVS_VPORT_ATTR_TYPE] && + nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) { + err = -EINVAL; + goto exit_unlock_free; + } - reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, - OVS_VPORT_CMD_NEW); - if (IS_ERR(reply)) { - err = PTR_ERR(reply); - netlink_set_err(INIT_NET_GENL_SOCK, 0, - dp_vport_multicast_group.id, err); - return 0; + if (a[OVS_VPORT_ATTR_OPTIONS]) { + err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); + if (err) + goto exit_unlock_free; + } + + if (a[OVS_VPORT_ATTR_STATS]) + ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS])); + + + if (a[OVS_VPORT_ATTR_UPCALL_PID]) { + err = ovs_vport_set_upcall_portids(vport, + a[OVS_VPORT_ATTR_UPCALL_PID]); + if (err) + goto exit_unlock_free; } - genl_notify(reply, genl_info_net(info), info->snd_pid, - dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); + err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, + info->snd_seq, 0, OVS_VPORT_CMD_NEW); + BUG_ON(err < 0); + ovs_unlock(); + + ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info); + return 0; -exit_unlock: - rtnl_unlock(); -exit: +exit_unlock_free: + ovs_unlock(); + kfree_skb(reply); return err; } @@ -1771,35 +1878,33 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info) struct vport *vport; int err; - err = ovs_vport_cmd_validate(a); - if (err) - goto exit; + reply = ovs_vport_cmd_alloc_info(); + if (!reply) + return -ENOMEM; - rtnl_lock(); - vport = lookup_vport(info->userhdr, a); + ovs_lock(); + vport = lookup_vport(sock_net(skb->sk), info->userhdr, a); err = PTR_ERR(vport); if (IS_ERR(vport)) - goto exit_unlock; + goto exit_unlock_free; if (vport->port_no == OVSP_LOCAL) { err = -EINVAL; - goto exit_unlock; + goto exit_unlock_free; } - reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, - OVS_VPORT_CMD_DEL); - err = PTR_ERR(reply); - if (IS_ERR(reply)) - goto exit_unlock; - - dp_detach_port(vport); + err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, + info->snd_seq, 0, OVS_VPORT_CMD_DEL); + BUG_ON(err < 0); + ovs_dp_detach_port(vport); + ovs_unlock(); - genl_notify(reply, genl_info_net(info), info->snd_pid, - dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); + ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info); + return 0; -exit_unlock: - rtnl_unlock(); -exit: +exit_unlock_free: + ovs_unlock(); + kfree_skb(reply); return err; } @@ -1811,29 +1916,25 @@ static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info) struct vport *vport; int err; - err = ovs_vport_cmd_validate(a); - if (err) - goto exit; + reply = ovs_vport_cmd_alloc_info(); + if (!reply) + return -ENOMEM; rcu_read_lock(); - vport = lookup_vport(ovs_header, a); + vport = lookup_vport(sock_net(skb->sk), ovs_header, a); err = PTR_ERR(vport); if (IS_ERR(vport)) - goto exit_unlock; - - reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, - OVS_VPORT_CMD_NEW); - err = PTR_ERR(reply); - if (IS_ERR(reply)) - goto exit_unlock; - + goto exit_unlock_free; + err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, + info->snd_seq, 0, OVS_VPORT_CMD_NEW); + BUG_ON(err < 0); rcu_read_unlock(); return genlmsg_reply(reply, info); -exit_unlock: +exit_unlock_free: rcu_read_unlock(); -exit: + kfree_skb(reply); return err; } @@ -1841,34 +1942,50 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); struct datapath *dp; - u32 port_no; - int retval; - - dp = get_dp(ovs_header->dp_ifindex); - if (!dp) - return -ENODEV; + int bucket = cb->args[0], skip = cb->args[1]; + int i, j = 0; rcu_read_lock(); - for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) { + dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); + if (!dp) { + rcu_read_unlock(); + return -ENODEV; + } + for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { struct vport *vport; - vport = get_vport_protected(dp, port_no); - if (!vport) - continue; - - if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, - OVS_VPORT_CMD_NEW) < 0) - break; + j = 0; + hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) { + if (j >= skip && + ovs_vport_cmd_fill_info(vport, skb, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI, + OVS_VPORT_CMD_NEW) < 0) + goto out; + + j++; + } + skip = 0; } +out: rcu_read_unlock(); - cb->args[0] = port_no; - retval = skb->len; + cb->args[0] = i; + cb->args[1] = j; - return retval; + return skb->len; } +static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = { + [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, + [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) }, + [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 }, + [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 }, + [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 }, + [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED }, +}; + static struct genl_ops dp_vport_genl_ops[] = { { .cmd = OVS_VPORT_CMD_NEW, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ @@ -1893,26 +2010,25 @@ static struct genl_ops dp_vport_genl_ops[] = { }, }; -struct genl_family_and_ops { - struct genl_family *family; - struct genl_ops *ops; - int n_ops; - struct genl_multicast_group *group; +struct genl_family dp_vport_genl_family = { + .id = GENL_ID_GENERATE, + .hdrsize = sizeof(struct ovs_header), + .name = OVS_VPORT_FAMILY, + .version = OVS_VPORT_VERSION, + .maxattr = OVS_VPORT_ATTR_MAX, + .netnsok = true, + .parallel_ops = true, + .ops = dp_vport_genl_ops, + .n_ops = ARRAY_SIZE(dp_vport_genl_ops), + .mcgrps = &ovs_dp_vport_multicast_group, + .n_mcgrps = 1, }; -static const struct genl_family_and_ops dp_genl_families[] = { - { &dp_datapath_genl_family, - dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops), - &dp_datapath_multicast_group }, - { &dp_vport_genl_family, - dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops), - &dp_vport_multicast_group }, - { &dp_flow_genl_family, - dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops), - &dp_flow_multicast_group }, - { &dp_packet_genl_family, - dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops), - NULL }, +static struct genl_family *dp_genl_families[] = { + &dp_datapath_genl_family, + &dp_vport_genl_family, + &dp_flow_genl_family, + &dp_packet_genl_family, }; static void dp_unregister_genl(int n_families) @@ -1920,67 +2036,84 @@ static void dp_unregister_genl(int n_families) int i; for (i = 0; i < n_families; i++) - genl_unregister_family(dp_genl_families[i].family); + genl_unregister_family(dp_genl_families[i]); } static int dp_register_genl(void) { - int n_registered; int err; int i; - n_registered = 0; for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) { - const struct genl_family_and_ops *f = &dp_genl_families[i]; - err = genl_register_family_with_ops(f->family, f->ops, - f->n_ops); + err = genl_register_family(dp_genl_families[i]); if (err) goto error; - n_registered++; - - if (f->group) { - err = genl_register_mc_group(f->family, f->group); - if (err) - goto error; - } } - err = packet_register_mc_groups(); - if (err) - goto error; return 0; error: - dp_unregister_genl(n_registered); + dp_unregister_genl(i); return err; } +static int __net_init ovs_init_net(struct net *net) +{ + struct ovs_net *ovs_net = net_generic(net, ovs_net_id); + + INIT_LIST_HEAD(&ovs_net->dps); + INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq); + return 0; +} + +static void __net_exit ovs_exit_net(struct net *net) +{ + struct datapath *dp, *dp_next; + struct ovs_net *ovs_net = net_generic(net, ovs_net_id); + + ovs_lock(); + list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node) + __dp_destroy(dp); + ovs_unlock(); + + cancel_work_sync(&ovs_net->dp_notify_work); +} + +static struct pernet_operations ovs_net_ops = { + .init = ovs_init_net, + .exit = ovs_exit_net, + .id = &ovs_net_id, + .size = sizeof(struct ovs_net), +}; + +DEFINE_COMPAT_PNET_REG_FUNC(device); + static int __init dp_init(void) { - struct sk_buff *dummy_skb; int err; - BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb)); + BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb)); - printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR); + pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n", + VERSION); - err = tnl_init(); + err = ovs_flow_init(); if (err) goto error; - err = flow_init(); - if (err) - goto error_tnl_exit; - - err = vport_init(); + err = ovs_vport_init(); if (err) goto error_flow_exit; - err = register_netdevice_notifier(&dp_device_notifier); + err = register_pernet_device(&ovs_net_ops); if (err) goto error_vport_exit; + err = register_netdevice_notifier(&ovs_dp_device_notifier); + if (err) + goto error_netns_exit; + err = dp_register_genl(); if (err < 0) goto error_unreg_notifier; @@ -1988,25 +2121,25 @@ static int __init dp_init(void) return 0; error_unreg_notifier: - unregister_netdevice_notifier(&dp_device_notifier); + unregister_netdevice_notifier(&ovs_dp_device_notifier); +error_netns_exit: + unregister_pernet_device(&ovs_net_ops); error_vport_exit: - vport_exit(); + ovs_vport_exit(); error_flow_exit: - flow_exit(); -error_tnl_exit: - tnl_exit(); + ovs_flow_exit(); error: return err; } static void dp_cleanup(void) { - rcu_barrier(); dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); - unregister_netdevice_notifier(&dp_device_notifier); - vport_exit(); - flow_exit(); - tnl_exit(); + unregister_netdevice_notifier(&ovs_dp_device_notifier); + unregister_pernet_device(&ovs_net_ops); + rcu_barrier(); + ovs_vport_exit(); + ovs_flow_exit(); } module_init(dp_init); @@ -2014,3 +2147,4 @@ module_exit(dp_cleanup); MODULE_DESCRIPTION("Open vSwitch switching datapath"); MODULE_LICENSE("GPL"); +MODULE_VERSION(VERSION);