datapath: Add support for kernel 3.14.
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/version.h>
40 #include <linux/ethtool.h>
41 #include <linux/wait.h>
42 #include <asm/div64.h>
43 #include <linux/highmem.h>
44 #include <linux/netfilter_bridge.h>
45 #include <linux/netfilter_ipv4.h>
46 #include <linux/inetdevice.h>
47 #include <linux/list.h>
48 #include <linux/openvswitch.h>
49 #include <linux/rculist.h>
50 #include <linux/dmi.h>
51 #include <linux/genetlink.h>
52 #include <net/genetlink.h>
53 #include <net/genetlink.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56
57 #include "datapath.h"
58 #include "flow.h"
59 #include "flow_table.h"
60 #include "flow_netlink.h"
61 #include "vlan.h"
62 #include "vport-internal_dev.h"
63 #include "vport-netdev.h"
64
65 int ovs_net_id __read_mostly;
66
67 static struct genl_family dp_packet_genl_family;
68 static struct genl_family dp_flow_genl_family;
69 static struct genl_family dp_datapath_genl_family;
70
71 static struct genl_multicast_group ovs_dp_flow_multicast_group = {
72         .name = OVS_FLOW_MCGROUP
73 };
74
75 static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
76         .name = OVS_DATAPATH_MCGROUP
77 };
78
79 struct genl_multicast_group ovs_dp_vport_multicast_group = {
80         .name = OVS_VPORT_MCGROUP
81 };
82
83 /* Check if need to build a reply message.
84  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
85 static bool ovs_must_notify(struct genl_info *info,
86                             const struct genl_multicast_group *grp)
87 {
88         return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
89                 netlink_has_listeners(genl_info_net(info)->genl_sock, GROUP_ID(grp));
90 }
91
92 static void ovs_notify(struct genl_family *family, struct genl_multicast_group *grp,
93                        struct sk_buff *skb, struct genl_info *info)
94 {
95         genl_notify(family, skb, genl_info_net(info),
96                     info->snd_portid, GROUP_ID(grp), info->nlhdr, GFP_KERNEL);
97 }
98
99 /**
100  * DOC: Locking:
101  *
102  * All writes e.g. Writes to device state (add/remove datapath, port, set
103  * operations on vports, etc.), Writes to other state (flow table
104  * modifications, set miscellaneous datapath parameters, etc.) are protected
105  * by ovs_lock.
106  *
107  * Reads are protected by RCU.
108  *
109  * There are a few special cases (mostly stats) that have their own
110  * synchronization but they nest under all of above and don't interact with
111  * each other.
112  *
113  * The RTNL lock nests inside ovs_mutex.
114  */
115
116 static DEFINE_MUTEX(ovs_mutex);
117
118 void ovs_lock(void)
119 {
120         mutex_lock(&ovs_mutex);
121 }
122
123 void ovs_unlock(void)
124 {
125         mutex_unlock(&ovs_mutex);
126 }
127
128 #ifdef CONFIG_LOCKDEP
129 int lockdep_ovsl_is_held(void)
130 {
131         if (debug_locks)
132                 return lockdep_is_held(&ovs_mutex);
133         else
134                 return 1;
135 }
136 #endif
137
138 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
139                              const struct dp_upcall_info *);
140 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
141                                   const struct dp_upcall_info *);
142
143 /* Must be called with rcu_read_lock or ovs_mutex. */
144 static struct datapath *get_dp(struct net *net, int dp_ifindex)
145 {
146         struct datapath *dp = NULL;
147         struct net_device *dev;
148
149         rcu_read_lock();
150         dev = dev_get_by_index_rcu(net, dp_ifindex);
151         if (dev) {
152                 struct vport *vport = ovs_internal_dev_get_vport(dev);
153                 if (vport)
154                         dp = vport->dp;
155         }
156         rcu_read_unlock();
157
158         return dp;
159 }
160
161 /* Must be called with rcu_read_lock or ovs_mutex. */
162 const char *ovs_dp_name(const struct datapath *dp)
163 {
164         struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
165         return vport->ops->get_name(vport);
166 }
167
168 static int get_dpifindex(struct datapath *dp)
169 {
170         struct vport *local;
171         int ifindex;
172
173         rcu_read_lock();
174
175         local = ovs_vport_rcu(dp, OVSP_LOCAL);
176         if (local)
177                 ifindex = netdev_vport_priv(local)->dev->ifindex;
178         else
179                 ifindex = 0;
180
181         rcu_read_unlock();
182
183         return ifindex;
184 }
185
186 static void destroy_dp_rcu(struct rcu_head *rcu)
187 {
188         struct datapath *dp = container_of(rcu, struct datapath, rcu);
189
190         ovs_flow_tbl_destroy(&dp->table);
191         free_percpu(dp->stats_percpu);
192         release_net(ovs_dp_get_net(dp));
193         kfree(dp->ports);
194         kfree(dp);
195 }
196
197 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
198                                             u16 port_no)
199 {
200         return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
201 }
202
203 /* Called with ovs_mutex or RCU read lock. */
204 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
205 {
206         struct vport *vport;
207         struct hlist_head *head;
208
209         head = vport_hash_bucket(dp, port_no);
210         hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
211                 if (vport->port_no == port_no)
212                         return vport;
213         }
214         return NULL;
215 }
216
217 /* Called with ovs_mutex. */
218 static struct vport *new_vport(const struct vport_parms *parms)
219 {
220         struct vport *vport;
221
222         vport = ovs_vport_add(parms);
223         if (!IS_ERR(vport)) {
224                 struct datapath *dp = parms->dp;
225                 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
226
227                 hlist_add_head_rcu(&vport->dp_hash_node, head);
228         }
229         return vport;
230 }
231
232 void ovs_dp_detach_port(struct vport *p)
233 {
234         ASSERT_OVSL();
235
236         /* First drop references to device. */
237         hlist_del_rcu(&p->dp_hash_node);
238
239         /* Then destroy it. */
240         ovs_vport_del(p);
241 }
242
243 void ovs_dp_process_packet_with_key(struct sk_buff *skb,
244                                     struct sw_flow_key *pkt_key,
245                                     bool recirc)
246 {
247         const struct vport *p = OVS_CB(skb)->input_vport;
248         struct datapath *dp = p->dp;
249         struct sw_flow *flow;
250         struct dp_stats_percpu *stats;
251         u64 *stats_counter;
252         u32 n_mask_hit;
253
254         stats = this_cpu_ptr(dp->stats_percpu);
255
256         /* Look up flow. */
257         flow = ovs_flow_tbl_lookup_stats(&dp->table, pkt_key, skb_get_hash(skb),
258                                          &n_mask_hit);
259         if (unlikely(!flow)) {
260                 struct dp_upcall_info upcall;
261
262                 upcall.cmd = OVS_PACKET_CMD_MISS;
263                 upcall.key = pkt_key;
264                 upcall.userdata = NULL;
265                 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
266                 ovs_dp_upcall(dp, skb, &upcall);
267                 consume_skb(skb);
268                 stats_counter = &stats->n_missed;
269                 goto out;
270         }
271
272         OVS_CB(skb)->pkt_key = pkt_key;
273         OVS_CB(skb)->flow = flow;
274
275         ovs_flow_stats_update(OVS_CB(skb)->flow, pkt_key->tp.flags, skb);
276         ovs_execute_actions(dp, skb, recirc);
277         stats_counter = &stats->n_hit;
278
279 out:
280         /* Update datapath statistics. */
281         u64_stats_update_begin(&stats->sync);
282         (*stats_counter)++;
283         stats->n_mask_hit += n_mask_hit;
284         u64_stats_update_end(&stats->sync);
285 }
286
287 /* Must be called with rcu_read_lock. */
288 void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
289 {
290         int error;
291         struct sw_flow_key key;
292
293         OVS_CB(skb)->input_vport = p;
294
295         /* Extract flow from 'skb' into 'key'. */
296         error = ovs_flow_extract(skb, p->port_no, &key);
297         if (unlikely(error)) {
298                 kfree_skb(skb);
299                 return;
300         }
301
302         ovs_dp_process_packet_with_key(skb, &key, false);
303 }
304
305 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
306                   const struct dp_upcall_info *upcall_info)
307 {
308         struct dp_stats_percpu *stats;
309         int err;
310
311         if (upcall_info->portid == 0) {
312                 err = -ENOTCONN;
313                 goto err;
314         }
315
316         if (!skb_is_gso(skb))
317                 err = queue_userspace_packet(dp, skb, upcall_info);
318         else
319                 err = queue_gso_packets(dp, skb, upcall_info);
320         if (err)
321                 goto err;
322
323         return 0;
324
325 err:
326         stats = this_cpu_ptr(dp->stats_percpu);
327
328         u64_stats_update_begin(&stats->sync);
329         stats->n_lost++;
330         u64_stats_update_end(&stats->sync);
331
332         return err;
333 }
334
335 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
336                              const struct dp_upcall_info *upcall_info)
337 {
338         unsigned short gso_type = skb_shinfo(skb)->gso_type;
339         struct dp_upcall_info later_info;
340         struct sw_flow_key later_key;
341         struct sk_buff *segs, *nskb;
342         int err;
343
344         segs = __skb_gso_segment(skb, NETIF_F_SG, false);
345         if (IS_ERR(segs))
346                 return PTR_ERR(segs);
347
348         /* Queue all of the segments. */
349         skb = segs;
350         do {
351                 err = queue_userspace_packet(dp, skb, upcall_info);
352                 if (err)
353                         break;
354
355                 if (skb == segs && gso_type & SKB_GSO_UDP) {
356                         /* The initial flow key extracted by ovs_flow_extract()
357                          * in this case is for a first fragment, so we need to
358                          * properly mark later fragments.
359                          */
360                         later_key = *upcall_info->key;
361                         later_key.ip.frag = OVS_FRAG_TYPE_LATER;
362
363                         later_info = *upcall_info;
364                         later_info.key = &later_key;
365                         upcall_info = &later_info;
366                 }
367         } while ((skb = skb->next));
368
369         /* Free all of the segments. */
370         skb = segs;
371         do {
372                 nskb = skb->next;
373                 if (err)
374                         kfree_skb(skb);
375                 else
376                         consume_skb(skb);
377         } while ((skb = nskb));
378         return err;
379 }
380
381 static size_t key_attr_size(void)
382 {
383         return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
384                 + nla_total_size(0)   /* OVS_KEY_ATTR_TUNNEL */
385                   + nla_total_size(8)   /* OVS_TUNNEL_KEY_ATTR_ID */
386                   + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
387                   + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
388                   + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TOS */
389                   + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TTL */
390                   + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
391                   + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_CSUM */
392                 + nla_total_size(4)   /* OVS_KEY_ATTR_IN_PORT */
393                 + nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
394                 + nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
395                 + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
396                 + nla_total_size(4)   /* OVS_KEY_ATTR_8021Q */
397                 + nla_total_size(0)   /* OVS_KEY_ATTR_ENCAP */
398                 + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
399                 + nla_total_size(40)  /* OVS_KEY_ATTR_IPV6 */
400                 + nla_total_size(2)   /* OVS_KEY_ATTR_ICMPV6 */
401                 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
402 }
403
404 static size_t upcall_msg_size(const struct nlattr *userdata,
405                               unsigned int hdrlen)
406 {
407         size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
408                 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
409                 + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
410
411         /* OVS_PACKET_ATTR_USERDATA */
412         if (userdata)
413                 size += NLA_ALIGN(userdata->nla_len);
414
415         return size;
416 }
417
418 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
419                                   const struct dp_upcall_info *upcall_info)
420 {
421         struct ovs_header *upcall;
422         struct sk_buff *nskb = NULL;
423         struct sk_buff *user_skb; /* to be queued to userspace */
424         struct nlattr *nla;
425         struct genl_info info = {
426 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
427                 .dst_sk = ovs_dp_get_net(dp)->genl_sock,
428 #endif
429                 .snd_portid = upcall_info->portid,
430         };
431         size_t len;
432         unsigned int hlen;
433         int err, dp_ifindex;
434
435         dp_ifindex = get_dpifindex(dp);
436         if (!dp_ifindex)
437                 return -ENODEV;
438
439         if (vlan_tx_tag_present(skb)) {
440                 nskb = skb_clone(skb, GFP_ATOMIC);
441                 if (!nskb)
442                         return -ENOMEM;
443
444                 nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
445                 if (!nskb)
446                         return -ENOMEM;
447
448                 vlan_set_tci(nskb, 0);
449
450                 skb = nskb;
451         }
452
453         if (nla_attr_size(skb->len) > USHRT_MAX) {
454                 err = -EFBIG;
455                 goto out;
456         }
457
458         /* Complete checksum if needed */
459         if (skb->ip_summed == CHECKSUM_PARTIAL &&
460             (err = skb_checksum_help(skb)))
461                 goto out;
462
463         /* Older versions of OVS user space enforce alignment of the last
464          * Netlink attribute to NLA_ALIGNTO which would require extensive
465          * padding logic. Only perform zerocopy if padding is not required.
466          */
467         if (dp->user_features & OVS_DP_F_UNALIGNED)
468                 hlen = skb_zerocopy_headlen(skb);
469         else
470                 hlen = skb->len;
471
472         len = upcall_msg_size(upcall_info->userdata, hlen);
473         user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
474         if (!user_skb) {
475                 err = -ENOMEM;
476                 goto out;
477         }
478
479         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
480                              0, upcall_info->cmd);
481         upcall->dp_ifindex = dp_ifindex;
482
483         nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
484         ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
485         nla_nest_end(user_skb, nla);
486
487         if (upcall_info->userdata)
488                 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
489                           nla_len(upcall_info->userdata),
490                           nla_data(upcall_info->userdata));
491
492         /* Only reserve room for attribute header, packet data is added
493          * in skb_zerocopy() */
494         if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
495                 err = -ENOBUFS;
496                 goto out;
497         }
498         nla->nla_len = nla_attr_size(skb->len);
499
500         err = skb_zerocopy(user_skb, skb, skb->len, hlen);
501         if (err)
502                 goto out;
503
504         /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
505         if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
506                 size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;
507
508                 if (plen > 0)
509                         memset(skb_put(user_skb, plen), 0, plen);
510         }
511
512         ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
513
514         err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
515 out:
516         if (err)
517                 skb_tx_error(skb);
518         kfree_skb(nskb);
519         return err;
520 }
521
522 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
523 {
524         struct ovs_header *ovs_header = info->userhdr;
525         struct nlattr **a = info->attrs;
526         struct sw_flow_actions *acts;
527         struct sk_buff *packet;
528         struct sw_flow *flow;
529         struct datapath *dp;
530         struct ethhdr *eth;
531         struct vport *input_vport;
532         int len;
533         int err;
534
535         err = -EINVAL;
536         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
537             !a[OVS_PACKET_ATTR_ACTIONS])
538                 goto err;
539
540         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
541         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
542         err = -ENOMEM;
543         if (!packet)
544                 goto err;
545         skb_reserve(packet, NET_IP_ALIGN);
546
547         nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
548
549         skb_reset_mac_header(packet);
550         eth = eth_hdr(packet);
551
552         /* Normally, setting the skb 'protocol' field would be handled by a
553          * call to eth_type_trans(), but it assumes there's a sending
554          * device, which we may not have. */
555         if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
556                 packet->protocol = eth->h_proto;
557         else
558                 packet->protocol = htons(ETH_P_802_2);
559
560         /* Build an sw_flow for sending this packet. */
561         flow = ovs_flow_alloc();
562         err = PTR_ERR(flow);
563         if (IS_ERR(flow))
564                 goto err_kfree_skb;
565
566         err = ovs_flow_extract(packet, -1, &flow->key);
567         if (err)
568                 goto err_flow_free;
569
570         err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]);
571         if (err)
572                 goto err_flow_free;
573         acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
574         err = PTR_ERR(acts);
575         if (IS_ERR(acts))
576                 goto err_flow_free;
577
578         err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
579                                    &flow->key, 0, &acts);
580         rcu_assign_pointer(flow->sf_acts, acts);
581         if (err)
582                 goto err_flow_free;
583
584         OVS_CB(packet)->flow = flow;
585         OVS_CB(packet)->pkt_key = &flow->key;
586         packet->priority = flow->key.phy.priority;
587         packet->mark = flow->key.phy.skb_mark;
588
589         rcu_read_lock();
590         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
591         err = -ENODEV;
592         if (!dp)
593                 goto err_unlock;
594
595         input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
596         if (!input_vport)
597                 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
598
599         if (!input_vport)
600                 goto err_unlock;
601
602         OVS_CB(packet)->input_vport = input_vport;
603
604         local_bh_disable();
605         err = ovs_execute_actions(dp, packet, false);
606         local_bh_enable();
607         rcu_read_unlock();
608
609         ovs_flow_free(flow, false);
610         return err;
611
612 err_unlock:
613         rcu_read_unlock();
614 err_flow_free:
615         ovs_flow_free(flow, false);
616 err_kfree_skb:
617         kfree_skb(packet);
618 err:
619         return err;
620 }
621
622 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
623         [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
624         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
625         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
626 };
627
628 static struct genl_ops dp_packet_genl_ops[] = {
629         { .cmd = OVS_PACKET_CMD_EXECUTE,
630           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
631           .policy = packet_policy,
632           .doit = ovs_packet_cmd_execute
633         }
634 };
635
636 static struct genl_family dp_packet_genl_family = {
637         .id = GENL_ID_GENERATE,
638         .hdrsize = sizeof(struct ovs_header),
639         .name = OVS_PACKET_FAMILY,
640         .version = OVS_PACKET_VERSION,
641         .maxattr = OVS_PACKET_ATTR_MAX,
642         .netnsok = true,
643         .parallel_ops = true,
644         .ops = dp_packet_genl_ops,
645         .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
646 };
647
648 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
649                          struct ovs_dp_megaflow_stats *mega_stats)
650 {
651         int i;
652
653         memset(mega_stats, 0, sizeof(*mega_stats));
654
655         stats->n_flows = ovs_flow_tbl_count(&dp->table);
656         mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
657
658         stats->n_hit = stats->n_missed = stats->n_lost = 0;
659
660         for_each_possible_cpu(i) {
661                 const struct dp_stats_percpu *percpu_stats;
662                 struct dp_stats_percpu local_stats;
663                 unsigned int start;
664
665                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
666
667                 do {
668                         start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
669                         local_stats = *percpu_stats;
670                 } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
671
672                 stats->n_hit += local_stats.n_hit;
673                 stats->n_missed += local_stats.n_missed;
674                 stats->n_lost += local_stats.n_lost;
675                 mega_stats->n_mask_hit += local_stats.n_mask_hit;
676         }
677 }
678
679 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
680 {
681         return NLMSG_ALIGN(sizeof(struct ovs_header))
682                 + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
683                 + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */
684                 + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
685                 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
686                 + nla_total_size(8) /* OVS_FLOW_ATTR_USED */
687                 + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
688 }
689
690 /* Called with ovs_mutex or RCU read lock. */
691 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
692                                   struct sk_buff *skb, u32 portid,
693                                   u32 seq, u32 flags, u8 cmd)
694 {
695         const int skb_orig_len = skb->len;
696         struct nlattr *start;
697         struct ovs_flow_stats stats;
698         __be16 tcp_flags;
699         unsigned long used;
700         struct ovs_header *ovs_header;
701         struct nlattr *nla;
702         int err;
703
704         ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
705         if (!ovs_header)
706                 return -EMSGSIZE;
707
708         ovs_header->dp_ifindex = dp_ifindex;
709
710         /* Fill flow key. */
711         nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
712         if (!nla)
713                 goto nla_put_failure;
714
715         err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
716         if (err)
717                 goto error;
718         nla_nest_end(skb, nla);
719
720         nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
721         if (!nla)
722                 goto nla_put_failure;
723
724         err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
725         if (err)
726                 goto error;
727
728         nla_nest_end(skb, nla);
729
730         ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
731
732         if (used &&
733             nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
734                 goto nla_put_failure;
735
736         if (stats.n_packets &&
737             nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
738                 goto nla_put_failure;
739
740         if ((u8)ntohs(tcp_flags) &&
741              nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
742                 goto nla_put_failure;
743
744         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
745          * this is the first flow to be dumped into 'skb'.  This is unusual for
746          * Netlink but individual action lists can be longer than
747          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
748          * The userspace caller can always fetch the actions separately if it
749          * really wants them.  (Most userspace callers in fact don't care.)
750          *
751          * This can only fail for dump operations because the skb is always
752          * properly sized for single flows.
753          */
754         start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
755         if (start) {
756                 const struct sw_flow_actions *sf_acts;
757
758                 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
759                 err = ovs_nla_put_actions(sf_acts->actions,
760                                           sf_acts->actions_len, skb);
761
762                 if (!err)
763                         nla_nest_end(skb, start);
764                 else {
765                         if (skb_orig_len)
766                                 goto error;
767
768                         nla_nest_cancel(skb, start);
769                 }
770         } else if (skb_orig_len)
771                 goto nla_put_failure;
772
773         return genlmsg_end(skb, ovs_header);
774
775 nla_put_failure:
776         err = -EMSGSIZE;
777 error:
778         genlmsg_cancel(skb, ovs_header);
779         return err;
780 }
781
782 /* May not be called with RCU read lock. */
783 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
784                                                struct genl_info *info,
785                                                bool always)
786 {
787         struct sk_buff *skb;
788
789         if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group))
790                 return NULL;
791
792         skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
793
794         if (!skb)
795                 return ERR_PTR(-ENOMEM);
796
797         return skb;
798 }
799
800 /* Called with ovs_mutex. */
801 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
802                                                int dp_ifindex,
803                                                struct genl_info *info, u8 cmd,
804                                                bool always)
805 {
806         struct sk_buff *skb;
807         int retval;
808
809         skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
810                                       always);
811         if (!skb || IS_ERR(skb))
812                 return skb;
813
814         retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
815                                         info->snd_portid, info->snd_seq, 0,
816                                         cmd);
817         BUG_ON(retval < 0);
818         return skb;
819 }
820
821 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
822 {
823         struct nlattr **a = info->attrs;
824         struct ovs_header *ovs_header = info->userhdr;
825         struct sw_flow *flow, *new_flow;
826         struct sw_flow_mask mask;
827         struct sk_buff *reply;
828         struct datapath *dp;
829         struct sw_flow_actions *acts;
830         struct sw_flow_match match;
831         int error;
832
833         /* Must have key and actions. */
834         error = -EINVAL;
835         if (!a[OVS_FLOW_ATTR_KEY])
836                 goto error;
837         if (!a[OVS_FLOW_ATTR_ACTIONS])
838                 goto error;
839
840         /* Most of the time we need to allocate a new flow, do it before
841          * locking. */
842         new_flow = ovs_flow_alloc();
843         if (IS_ERR(new_flow)) {
844                 error = PTR_ERR(new_flow);
845                 goto error;
846         }
847
848         /* Extract key. */
849         ovs_match_init(&match, &new_flow->unmasked_key, &mask);
850         error = ovs_nla_get_match(&match,
851                                   a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
852         if (error)
853                 goto err_kfree_flow;
854
855         ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
856
857         /* Validate actions. */
858         acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
859         error = PTR_ERR(acts);
860         if (IS_ERR(acts))
861                 goto err_kfree_flow;
862
863         error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
864                                      0, &acts);
865         if (error) {
866                 OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
867                 goto err_kfree_acts;
868         }
869
870         reply = ovs_flow_cmd_alloc_info(acts, info, false);
871         if (IS_ERR(reply)) {
872                 error = PTR_ERR(reply);
873                 goto err_kfree_acts;
874         }
875
876         ovs_lock();
877         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
878         if (unlikely(!dp)) {
879                 error = -ENODEV;
880                 goto err_unlock_ovs;
881         }
882         /* Check if this is a duplicate flow */
883         flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->unmasked_key);
884         if (likely(!flow)) {
885                 rcu_assign_pointer(new_flow->sf_acts, acts);
886
887                 /* Put flow in bucket. */
888                 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
889                 if (unlikely(error)) {
890                         acts = NULL;
891                         goto err_unlock_ovs;
892                 }
893
894                 if (unlikely(reply)) {
895                         error = ovs_flow_cmd_fill_info(new_flow,
896                                                        ovs_header->dp_ifindex,
897                                                        reply, info->snd_portid,
898                                                        info->snd_seq, 0,
899                                                        OVS_FLOW_CMD_NEW);
900                         BUG_ON(error < 0);
901                 }
902                 ovs_unlock();
903         } else {
904                 struct sw_flow_actions *old_acts;
905
906                 /* Bail out if we're not allowed to modify an existing flow.
907                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
908                  * because Generic Netlink treats the latter as a dump
909                  * request.  We also accept NLM_F_EXCL in case that bug ever
910                  * gets fixed.
911                  */
912                 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
913                                                          | NLM_F_EXCL))) {
914                         error = -EEXIST;
915                         goto err_unlock_ovs;
916                 }
917                 /* The unmasked key has to be the same for flow updates. */
918                 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
919                         error = -EEXIST;
920                         goto err_unlock_ovs;
921                 }
922                 /* Update actions. */
923                 old_acts = ovsl_dereference(flow->sf_acts);
924                 rcu_assign_pointer(flow->sf_acts, acts);
925
926                 if (unlikely(reply)) {
927                         error = ovs_flow_cmd_fill_info(flow,
928                                                        ovs_header->dp_ifindex,
929                                                        reply, info->snd_portid,
930                                                        info->snd_seq, 0,
931                                                        OVS_FLOW_CMD_NEW);
932                         BUG_ON(error < 0);
933                 }
934                 ovs_unlock();
935
936                 ovs_nla_free_flow_actions(old_acts);
937                 ovs_flow_free(new_flow, false);
938         }
939
940         if (reply)
941                 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
942         return 0;
943
944 err_unlock_ovs:
945         ovs_unlock();
946         kfree_skb(reply);
947 err_kfree_acts:
948         kfree(acts);
949 err_kfree_flow:
950         ovs_flow_free(new_flow, false);
951 error:
952         return error;
953 }
954
955 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
956 {
957         struct nlattr **a = info->attrs;
958         struct ovs_header *ovs_header = info->userhdr;
959         struct sw_flow_key key, masked_key;
960         struct sw_flow *flow;
961         struct sw_flow_mask mask;
962         struct sk_buff *reply = NULL;
963         struct datapath *dp;
964         struct sw_flow_actions *old_acts = NULL, *acts = NULL;
965         struct sw_flow_match match;
966         int error;
967
968         /* Extract key. */
969         error = -EINVAL;
970         if (!a[OVS_FLOW_ATTR_KEY])
971                 goto error;
972
973         ovs_match_init(&match, &key, &mask);
974         error = ovs_nla_get_match(&match,
975                                   a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
976         if (error)
977                 goto error;
978
979         /* Validate actions. */
980         if (a[OVS_FLOW_ATTR_ACTIONS]) {
981                 acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
982                 error = PTR_ERR(acts);
983                 if (IS_ERR(acts))
984                         goto error;
985
986                 ovs_flow_mask_key(&masked_key, &key, &mask);
987                 error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
988                                              &masked_key, 0, &acts);
989                 if (error) {
990                         OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
991                         goto err_kfree_acts;
992                 }
993         }
994
995         /* Can allocate before locking if have acts. */
996         if (acts) {
997                 reply = ovs_flow_cmd_alloc_info(acts, info, false);
998                 if (IS_ERR(reply)) {
999                         error = PTR_ERR(reply);
1000                         goto err_kfree_acts;
1001                 }
1002         }
1003
1004         ovs_lock();
1005         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1006         if (unlikely(!dp)) {
1007                 error = -ENODEV;
1008                 goto err_unlock_ovs;
1009         }
1010         /* Check that the flow exists. */
1011         flow = ovs_flow_tbl_lookup(&dp->table, &key);
1012         if (unlikely(!flow)) {
1013                 error = -ENOENT;
1014                 goto err_unlock_ovs;
1015         }
1016         /* The unmasked key has to be the same for flow updates. */
1017         if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
1018                 error = -EEXIST;
1019                 goto err_unlock_ovs;
1020         }
1021         /* Update actions, if present. */
1022         if (likely(acts)) {
1023                 old_acts = ovsl_dereference(flow->sf_acts);
1024                 rcu_assign_pointer(flow->sf_acts, acts);
1025
1026                 if (unlikely(reply)) {
1027                         error = ovs_flow_cmd_fill_info(flow,
1028                                                        ovs_header->dp_ifindex,
1029                                                        reply, info->snd_portid,
1030                                                        info->snd_seq, 0,
1031                                                        OVS_FLOW_CMD_NEW);
1032                         BUG_ON(error < 0);
1033                 }
1034         } else {
1035                 /* Could not alloc without acts before locking. */
1036                 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1037                                                 info, OVS_FLOW_CMD_NEW, false);
1038                 if (unlikely(IS_ERR(reply))) {
1039                         error = PTR_ERR(reply);
1040                         goto err_unlock_ovs;
1041                 }
1042         }
1043
1044         /* Clear stats. */
1045         if (a[OVS_FLOW_ATTR_CLEAR])
1046                 ovs_flow_stats_clear(flow);
1047         ovs_unlock();
1048
1049         if (reply)
1050                 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
1051         if (old_acts)
1052                 ovs_nla_free_flow_actions(old_acts);
1053         return 0;
1054
1055 err_unlock_ovs:
1056         ovs_unlock();
1057         kfree_skb(reply);
1058 err_kfree_acts:
1059         kfree(acts);
1060 error:
1061         return error;
1062 }
1063
1064 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1065 {
1066         struct nlattr **a = info->attrs;
1067         struct ovs_header *ovs_header = info->userhdr;
1068         struct sw_flow_key key;
1069         struct sk_buff *reply;
1070         struct sw_flow *flow;
1071         struct datapath *dp;
1072         struct sw_flow_match match;
1073         int err;
1074
1075         if (!a[OVS_FLOW_ATTR_KEY]) {
1076                 OVS_NLERR("Flow get message rejected, Key attribute missing.\n");
1077                 return -EINVAL;
1078         }
1079
1080         ovs_match_init(&match, &key, NULL);
1081         err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1082         if (err)
1083                 return err;
1084
1085         ovs_lock();
1086         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1087         if (!dp) {
1088                 err = -ENODEV;
1089                 goto unlock;
1090         }
1091
1092         flow = ovs_flow_tbl_lookup(&dp->table, &key);
1093         if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
1094                 err = -ENOENT;
1095                 goto unlock;
1096         }
1097
1098         reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1099                                         OVS_FLOW_CMD_NEW, true);
1100         if (IS_ERR(reply)) {
1101                 err = PTR_ERR(reply);
1102                 goto unlock;
1103         }
1104
1105         ovs_unlock();
1106         return genlmsg_reply(reply, info);
1107 unlock:
1108         ovs_unlock();
1109         return err;
1110 }
1111
1112 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1113 {
1114         struct nlattr **a = info->attrs;
1115         struct ovs_header *ovs_header = info->userhdr;
1116         struct sw_flow_key key;
1117         struct sk_buff *reply;
1118         struct sw_flow *flow;
1119         struct datapath *dp;
1120         struct sw_flow_match match;
1121         int err;
1122
1123         if (likely(a[OVS_FLOW_ATTR_KEY])) {
1124                 ovs_match_init(&match, &key, NULL);
1125                 err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1126                 if (unlikely(err))
1127                         return err;
1128         }
1129
1130         ovs_lock();
1131         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1132         if (unlikely(!dp)) {
1133                 err = -ENODEV;
1134                 goto unlock;
1135         }
1136         if (unlikely(!a[OVS_FLOW_ATTR_KEY])) {
1137                 err = ovs_flow_tbl_flush(&dp->table);
1138                 goto unlock;
1139         }
1140         flow = ovs_flow_tbl_lookup(&dp->table, &key);
1141         if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) {
1142                 err = -ENOENT;
1143                 goto unlock;
1144         }
1145
1146         ovs_flow_tbl_remove(&dp->table, flow);
1147         ovs_unlock();
1148
1149         reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *)flow->sf_acts,
1150                                         info, false);
1151
1152         if (likely(reply)) {
1153                 if (likely(!IS_ERR(reply))) {
1154                         rcu_read_lock(); /* Keep RCU checker happy. */
1155                         err = ovs_flow_cmd_fill_info(flow,
1156                                                      ovs_header->dp_ifindex,
1157                                                      reply, info->snd_portid,
1158                                                      info->snd_seq, 0,
1159                                                      OVS_FLOW_CMD_DEL);
1160                         rcu_read_unlock();
1161                         BUG_ON(err < 0);
1162                         ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
1163                 } else {
1164                         genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0,
1165                                      GROUP_ID(&ovs_dp_flow_multicast_group), PTR_ERR(reply));
1166
1167                 }
1168         }
1169
1170         ovs_flow_free(flow, true);
1171         return 0;
1172 unlock:
1173         ovs_unlock();
1174         return err;
1175 }
1176
1177 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1178 {
1179         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1180         struct table_instance *ti;
1181         struct datapath *dp;
1182
1183         rcu_read_lock();
1184         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1185         if (!dp) {
1186                 rcu_read_unlock();
1187                 return -ENODEV;
1188         }
1189
1190         ti = rcu_dereference(dp->table.ti);
1191         for (;;) {
1192                 struct sw_flow *flow;
1193                 u32 bucket, obj;
1194
1195                 bucket = cb->args[0];
1196                 obj = cb->args[1];
1197                 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1198                 if (!flow)
1199                         break;
1200
1201                 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1202                                            NETLINK_CB(cb->skb).portid,
1203                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1204                                            OVS_FLOW_CMD_NEW) < 0)
1205                         break;
1206
1207                 cb->args[0] = bucket;
1208                 cb->args[1] = obj;
1209         }
1210         rcu_read_unlock();
1211         return skb->len;
1212 }
1213
1214 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1215         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1216         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1217         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1218 };
1219
1220 static struct genl_ops dp_flow_genl_ops[] = {
1221         { .cmd = OVS_FLOW_CMD_NEW,
1222           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1223           .policy = flow_policy,
1224           .doit = ovs_flow_cmd_new
1225         },
1226         { .cmd = OVS_FLOW_CMD_DEL,
1227           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1228           .policy = flow_policy,
1229           .doit = ovs_flow_cmd_del
1230         },
1231         { .cmd = OVS_FLOW_CMD_GET,
1232           .flags = 0,               /* OK for unprivileged users. */
1233           .policy = flow_policy,
1234           .doit = ovs_flow_cmd_get,
1235           .dumpit = ovs_flow_cmd_dump
1236         },
1237         { .cmd = OVS_FLOW_CMD_SET,
1238           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1239           .policy = flow_policy,
1240           .doit = ovs_flow_cmd_set,
1241         },
1242 };
1243
1244 static struct genl_family dp_flow_genl_family = {
1245         .id = GENL_ID_GENERATE,
1246         .hdrsize = sizeof(struct ovs_header),
1247         .name = OVS_FLOW_FAMILY,
1248         .version = OVS_FLOW_VERSION,
1249         .maxattr = OVS_FLOW_ATTR_MAX,
1250         .netnsok = true,
1251         .parallel_ops = true,
1252         .ops = dp_flow_genl_ops,
1253         .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1254         .mcgrps = &ovs_dp_flow_multicast_group,
1255         .n_mcgrps = 1,
1256 };
1257
1258 static size_t ovs_dp_cmd_msg_size(void)
1259 {
1260         size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1261
1262         msgsize += nla_total_size(IFNAMSIZ);
1263         msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
1264         msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
1265         msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1266
1267         return msgsize;
1268 }
1269
1270 /* Called with ovs_mutex or RCU read lock. */
1271 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1272                                 u32 portid, u32 seq, u32 flags, u8 cmd)
1273 {
1274         struct ovs_header *ovs_header;
1275         struct ovs_dp_stats dp_stats;
1276         struct ovs_dp_megaflow_stats dp_megaflow_stats;
1277         int err;
1278
1279         ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1280                                    flags, cmd);
1281         if (!ovs_header)
1282                 goto error;
1283
1284         ovs_header->dp_ifindex = get_dpifindex(dp);
1285
1286         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1287         if (err)
1288                 goto nla_put_failure;
1289
1290         get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1291         if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1292                         &dp_stats))
1293                 goto nla_put_failure;
1294
1295         if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1296                         sizeof(struct ovs_dp_megaflow_stats),
1297                         &dp_megaflow_stats))
1298                 goto nla_put_failure;
1299
1300         if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1301                 goto nla_put_failure;
1302
1303         return genlmsg_end(skb, ovs_header);
1304
1305 nla_put_failure:
1306         genlmsg_cancel(skb, ovs_header);
1307 error:
1308         return -EMSGSIZE;
1309 }
1310
1311 static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info)
1312 {
1313         return genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
1314 }
1315
1316 /* Called with rcu_read_lock or ovs_mutex. */
1317 static struct datapath *lookup_datapath(struct net *net,
1318                                         struct ovs_header *ovs_header,
1319                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1320 {
1321         struct datapath *dp;
1322
1323         if (!a[OVS_DP_ATTR_NAME])
1324                 dp = get_dp(net, ovs_header->dp_ifindex);
1325         else {
1326                 struct vport *vport;
1327
1328                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1329                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1330         }
1331         return dp ? dp : ERR_PTR(-ENODEV);
1332 }
1333
1334 static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1335 {
1336         struct datapath *dp;
1337
1338         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1339         if (IS_ERR(dp))
1340                 return;
1341
1342         WARN(dp->user_features, "Dropping previously announced user features\n");
1343         dp->user_features = 0;
1344 }
1345
1346 static void ovs_dp_change(struct datapath *dp, struct nlattr **a)
1347 {
1348         if (a[OVS_DP_ATTR_USER_FEATURES])
1349                 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1350 }
1351
1352 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1353 {
1354         struct nlattr **a = info->attrs;
1355         struct vport_parms parms;
1356         struct sk_buff *reply;
1357         struct datapath *dp;
1358         struct vport *vport;
1359         struct ovs_net *ovs_net;
1360         int err, i;
1361
1362         err = -EINVAL;
1363         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1364                 goto err;
1365
1366         reply = ovs_dp_cmd_alloc_info(info);
1367         if (!reply)
1368                 return -ENOMEM;
1369
1370         err = -ENOMEM;
1371         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1372         if (dp == NULL)
1373                 goto err_free_reply;
1374
1375         ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1376
1377         /* Allocate table. */
1378         err = ovs_flow_tbl_init(&dp->table);
1379         if (err)
1380                 goto err_free_dp;
1381
1382         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1383         if (!dp->stats_percpu) {
1384                 err = -ENOMEM;
1385                 goto err_destroy_table;
1386         }
1387
1388         for_each_possible_cpu(i) {
1389                 struct dp_stats_percpu *dpath_stats;
1390                 dpath_stats = per_cpu_ptr(dp->stats_percpu, i);
1391                 u64_stats_init(&dpath_stats->sync);
1392         }
1393
1394         dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1395                             GFP_KERNEL);
1396         if (!dp->ports) {
1397                 err = -ENOMEM;
1398                 goto err_destroy_percpu;
1399         }
1400
1401         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1402                 INIT_HLIST_HEAD(&dp->ports[i]);
1403
1404         /* Set up our datapath device. */
1405         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1406         parms.type = OVS_VPORT_TYPE_INTERNAL;
1407         parms.options = NULL;
1408         parms.dp = dp;
1409         parms.port_no = OVSP_LOCAL;
1410         parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1411
1412         ovs_dp_change(dp, a);
1413
1414         /* So far only local changes have been made, now need the lock. */
1415         ovs_lock();
1416
1417         vport = new_vport(&parms);
1418         if (IS_ERR(vport)) {
1419                 err = PTR_ERR(vport);
1420                 if (err == -EBUSY)
1421                         err = -EEXIST;
1422
1423                 if (err == -EEXIST) {
1424                         /* An outdated user space instance that does not understand
1425                          * the concept of user_features has attempted to create a new
1426                          * datapath and is likely to reuse it. Drop all user features.
1427                          */
1428                         if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1429                                 ovs_dp_reset_user_features(skb, info);
1430                 }
1431
1432                 goto err_destroy_ports_array;
1433         }
1434
1435         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1436                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1437         BUG_ON(err < 0);
1438
1439         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1440         list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1441
1442         ovs_unlock();
1443
1444         ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
1445         return 0;
1446
1447 err_destroy_ports_array:
1448         ovs_unlock();
1449         kfree(dp->ports);
1450 err_destroy_percpu:
1451         free_percpu(dp->stats_percpu);
1452 err_destroy_table:
1453         ovs_flow_tbl_destroy(&dp->table);
1454 err_free_dp:
1455         release_net(ovs_dp_get_net(dp));
1456         kfree(dp);
1457 err_free_reply:
1458         kfree_skb(reply);
1459 err:
1460         return err;
1461 }
1462
1463 /* Called with ovs_mutex. */
1464 static void __dp_destroy(struct datapath *dp)
1465 {
1466         int i;
1467
1468         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1469                 struct vport *vport;
1470                 struct hlist_node *n;
1471
1472                 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1473                         if (vport->port_no != OVSP_LOCAL)
1474                                 ovs_dp_detach_port(vport);
1475         }
1476
1477         list_del_rcu(&dp->list_node);
1478
1479         /* OVSP_LOCAL is datapath internal port. We need to make sure that
1480          * all ports in datapath are destroyed first before freeing datapath.
1481          */
1482         ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1483
1484         /* RCU destroy the flow table */
1485         call_rcu(&dp->rcu, destroy_dp_rcu);
1486 }
1487
1488 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1489 {
1490         struct sk_buff *reply;
1491         struct datapath *dp;
1492         int err;
1493
1494         reply = ovs_dp_cmd_alloc_info(info);
1495         if (!reply)
1496                 return -ENOMEM;
1497
1498         ovs_lock();
1499         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1500         err = PTR_ERR(dp);
1501         if (IS_ERR(dp))
1502                 goto err_unlock_free;
1503
1504         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1505                                    info->snd_seq, 0, OVS_DP_CMD_DEL);
1506         BUG_ON(err < 0);
1507
1508         __dp_destroy(dp);
1509
1510         ovs_unlock();
1511         ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
1512         return 0;
1513
1514 err_unlock_free:
1515         ovs_unlock();
1516         kfree_skb(reply);
1517         return err;
1518 }
1519
1520 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1521 {
1522         struct sk_buff *reply;
1523         struct datapath *dp;
1524         int err;
1525
1526         reply = ovs_dp_cmd_alloc_info(info);
1527         if (!reply)
1528                 return -ENOMEM;
1529
1530         ovs_lock();
1531         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1532         err = PTR_ERR(dp);
1533         if (IS_ERR(dp))
1534                 goto err_unlock_free;
1535
1536         ovs_dp_change(dp, info->attrs);
1537
1538         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1539                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1540         BUG_ON(err < 0);
1541
1542         ovs_unlock();
1543         ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
1544         return 0;
1545
1546 err_unlock_free:
1547         ovs_unlock();
1548         kfree_skb(reply);
1549         return err;
1550 }
1551
1552 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1553 {
1554         struct sk_buff *reply;
1555         struct datapath *dp;
1556         int err;
1557
1558         reply = ovs_dp_cmd_alloc_info(info);
1559         if (!reply)
1560                 return -ENOMEM;
1561
1562         rcu_read_lock();
1563         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1564         if (IS_ERR(dp)) {
1565                 err = PTR_ERR(dp);
1566                 goto err_unlock_free;
1567         }
1568         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1569                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1570         BUG_ON(err < 0);
1571         rcu_read_unlock();
1572
1573         return genlmsg_reply(reply, info);
1574
1575 err_unlock_free:
1576         rcu_read_unlock();
1577         kfree_skb(reply);
1578         return err;
1579 }
1580
1581 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1582 {
1583         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1584         struct datapath *dp;
1585         int skip = cb->args[0];
1586         int i = 0;
1587
1588         rcu_read_lock();
1589         list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) {
1590                 if (i >= skip &&
1591                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1592                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1593                                          OVS_DP_CMD_NEW) < 0)
1594                         break;
1595                 i++;
1596         }
1597         rcu_read_unlock();
1598
1599         cb->args[0] = i;
1600
1601         return skb->len;
1602 }
1603
1604 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1605         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1606         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1607         [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1608 };
1609
1610 static struct genl_ops dp_datapath_genl_ops[] = {
1611         { .cmd = OVS_DP_CMD_NEW,
1612           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1613           .policy = datapath_policy,
1614           .doit = ovs_dp_cmd_new
1615         },
1616         { .cmd = OVS_DP_CMD_DEL,
1617           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1618           .policy = datapath_policy,
1619           .doit = ovs_dp_cmd_del
1620         },
1621         { .cmd = OVS_DP_CMD_GET,
1622           .flags = 0,               /* OK for unprivileged users. */
1623           .policy = datapath_policy,
1624           .doit = ovs_dp_cmd_get,
1625           .dumpit = ovs_dp_cmd_dump
1626         },
1627         { .cmd = OVS_DP_CMD_SET,
1628           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1629           .policy = datapath_policy,
1630           .doit = ovs_dp_cmd_set,
1631         },
1632 };
1633
1634 static struct genl_family dp_datapath_genl_family = {
1635         .id = GENL_ID_GENERATE,
1636         .hdrsize = sizeof(struct ovs_header),
1637         .name = OVS_DATAPATH_FAMILY,
1638         .version = OVS_DATAPATH_VERSION,
1639         .maxattr = OVS_DP_ATTR_MAX,
1640         .netnsok = true,
1641         .parallel_ops = true,
1642         .ops = dp_datapath_genl_ops,
1643         .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1644         .mcgrps = &ovs_dp_datapath_multicast_group,
1645         .n_mcgrps = 1,
1646 };
1647
1648 /* Called with ovs_mutex or RCU read lock. */
1649 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1650                                    u32 portid, u32 seq, u32 flags, u8 cmd)
1651 {
1652         struct ovs_header *ovs_header;
1653         struct ovs_vport_stats vport_stats;
1654         int err;
1655
1656         ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1657                                  flags, cmd);
1658         if (!ovs_header)
1659                 return -EMSGSIZE;
1660
1661         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1662
1663         if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1664             nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1665             nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)))
1666                 goto nla_put_failure;
1667
1668         ovs_vport_get_stats(vport, &vport_stats);
1669         if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1670                     &vport_stats))
1671                 goto nla_put_failure;
1672
1673         if (ovs_vport_get_upcall_portids(vport, skb))
1674                 goto nla_put_failure;
1675
1676         err = ovs_vport_get_options(vport, skb);
1677         if (err == -EMSGSIZE)
1678                 goto error;
1679
1680         return genlmsg_end(skb, ovs_header);
1681
1682 nla_put_failure:
1683         err = -EMSGSIZE;
1684 error:
1685         genlmsg_cancel(skb, ovs_header);
1686         return err;
1687 }
1688
1689 static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1690 {
1691         return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1692 }
1693
1694 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1695 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1696                                          u32 seq, u8 cmd)
1697 {
1698         struct sk_buff *skb;
1699         int retval;
1700
1701         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1702         if (!skb)
1703                 return ERR_PTR(-ENOMEM);
1704
1705         retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1706         BUG_ON(retval < 0);
1707
1708         return skb;
1709 }
1710
1711 /* Called with ovs_mutex or RCU read lock. */
1712 static struct vport *lookup_vport(struct net *net,
1713                                   struct ovs_header *ovs_header,
1714                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1715 {
1716         struct datapath *dp;
1717         struct vport *vport;
1718
1719         if (a[OVS_VPORT_ATTR_NAME]) {
1720                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1721                 if (!vport)
1722                         return ERR_PTR(-ENODEV);
1723                 if (ovs_header->dp_ifindex &&
1724                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1725                         return ERR_PTR(-ENODEV);
1726                 return vport;
1727         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1728                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1729
1730                 if (port_no >= DP_MAX_PORTS)
1731                         return ERR_PTR(-EFBIG);
1732
1733                 dp = get_dp(net, ovs_header->dp_ifindex);
1734                 if (!dp)
1735                         return ERR_PTR(-ENODEV);
1736
1737                 vport = ovs_vport_ovsl_rcu(dp, port_no);
1738                 if (!vport)
1739                         return ERR_PTR(-ENODEV);
1740                 return vport;
1741         } else
1742                 return ERR_PTR(-EINVAL);
1743 }
1744
1745 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1746 {
1747         struct nlattr **a = info->attrs;
1748         struct ovs_header *ovs_header = info->userhdr;
1749         struct vport_parms parms;
1750         struct sk_buff *reply;
1751         struct vport *vport;
1752         struct datapath *dp;
1753         u32 port_no;
1754         int err;
1755
1756         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1757             !a[OVS_VPORT_ATTR_UPCALL_PID])
1758                 return -EINVAL;
1759
1760         port_no = a[OVS_VPORT_ATTR_PORT_NO]
1761                 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
1762         if (port_no >= DP_MAX_PORTS)
1763                 return -EFBIG;
1764
1765         reply = ovs_vport_cmd_alloc_info();
1766         if (!reply)
1767                 return -ENOMEM;
1768
1769         ovs_lock();
1770         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1771         err = -ENODEV;
1772         if (!dp)
1773                 goto exit_unlock_free;
1774
1775         if (port_no) {
1776                 vport = ovs_vport_ovsl(dp, port_no);
1777                 err = -EBUSY;
1778                 if (vport)
1779                         goto exit_unlock_free;
1780         } else {
1781                 for (port_no = 1; ; port_no++) {
1782                         if (port_no >= DP_MAX_PORTS) {
1783                                 err = -EFBIG;
1784                                 goto exit_unlock_free;
1785                         }
1786                         vport = ovs_vport_ovsl(dp, port_no);
1787                         if (!vport)
1788                                 break;
1789                 }
1790         }
1791
1792         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1793         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1794         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1795         parms.dp = dp;
1796         parms.port_no = port_no;
1797         parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
1798
1799         vport = new_vport(&parms);
1800         err = PTR_ERR(vport);
1801         if (IS_ERR(vport))
1802                 goto exit_unlock_free;
1803
1804         err = 0;
1805         if (a[OVS_VPORT_ATTR_STATS])
1806                 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1807
1808         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1809                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1810         BUG_ON(err < 0);
1811         ovs_unlock();
1812
1813         ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
1814         return 0;
1815
1816 exit_unlock_free:
1817         ovs_unlock();
1818         kfree_skb(reply);
1819         return err;
1820 }
1821
1822 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1823 {
1824         struct nlattr **a = info->attrs;
1825         struct sk_buff *reply;
1826         struct vport *vport;
1827         int err;
1828
1829         reply = ovs_vport_cmd_alloc_info();
1830         if (!reply)
1831                 return -ENOMEM;
1832
1833         ovs_lock();
1834         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1835         err = PTR_ERR(vport);
1836         if (IS_ERR(vport))
1837                 goto exit_unlock_free;
1838
1839         if (a[OVS_VPORT_ATTR_TYPE] &&
1840             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
1841                 err = -EINVAL;
1842                 goto exit_unlock_free;
1843         }
1844
1845         if (a[OVS_VPORT_ATTR_OPTIONS]) {
1846                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1847                 if (err)
1848                         goto exit_unlock_free;
1849         }
1850
1851         if (a[OVS_VPORT_ATTR_STATS])
1852                 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1853
1854
1855         if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
1856                 err = ovs_vport_set_upcall_portids(vport,
1857                                                    a[OVS_VPORT_ATTR_UPCALL_PID]);
1858                 if (err)
1859                         goto exit_unlock_free;
1860         }
1861
1862         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1863                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1864         BUG_ON(err < 0);
1865         ovs_unlock();
1866
1867         ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
1868         return 0;
1869
1870 exit_unlock_free:
1871         ovs_unlock();
1872         kfree_skb(reply);
1873         return err;
1874 }
1875
1876 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1877 {
1878         struct nlattr **a = info->attrs;
1879         struct sk_buff *reply;
1880         struct vport *vport;
1881         int err;
1882
1883         reply = ovs_vport_cmd_alloc_info();
1884         if (!reply)
1885                 return -ENOMEM;
1886
1887         ovs_lock();
1888         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1889         err = PTR_ERR(vport);
1890         if (IS_ERR(vport))
1891                 goto exit_unlock_free;
1892
1893         if (vport->port_no == OVSP_LOCAL) {
1894                 err = -EINVAL;
1895                 goto exit_unlock_free;
1896         }
1897
1898         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1899                                       info->snd_seq, 0, OVS_VPORT_CMD_DEL);
1900         BUG_ON(err < 0);
1901         ovs_dp_detach_port(vport);
1902         ovs_unlock();
1903
1904         ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
1905         return 0;
1906
1907 exit_unlock_free:
1908         ovs_unlock();
1909         kfree_skb(reply);
1910         return err;
1911 }
1912
1913 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1914 {
1915         struct nlattr **a = info->attrs;
1916         struct ovs_header *ovs_header = info->userhdr;
1917         struct sk_buff *reply;
1918         struct vport *vport;
1919         int err;
1920
1921         reply = ovs_vport_cmd_alloc_info();
1922         if (!reply)
1923                 return -ENOMEM;
1924
1925         rcu_read_lock();
1926         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
1927         err = PTR_ERR(vport);
1928         if (IS_ERR(vport))
1929                 goto exit_unlock_free;
1930         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1931                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1932         BUG_ON(err < 0);
1933         rcu_read_unlock();
1934
1935         return genlmsg_reply(reply, info);
1936
1937 exit_unlock_free:
1938         rcu_read_unlock();
1939         kfree_skb(reply);
1940         return err;
1941 }
1942
1943 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1944 {
1945         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1946         struct datapath *dp;
1947         int bucket = cb->args[0], skip = cb->args[1];
1948         int i, j = 0;
1949
1950         rcu_read_lock();
1951         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1952         if (!dp) {
1953                 rcu_read_unlock();
1954                 return -ENODEV;
1955         }
1956         for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
1957                 struct vport *vport;
1958
1959                 j = 0;
1960                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
1961                         if (j >= skip &&
1962                             ovs_vport_cmd_fill_info(vport, skb,
1963                                                     NETLINK_CB(cb->skb).portid,
1964                                                     cb->nlh->nlmsg_seq,
1965                                                     NLM_F_MULTI,
1966                                                     OVS_VPORT_CMD_NEW) < 0)
1967                                 goto out;
1968
1969                         j++;
1970                 }
1971                 skip = 0;
1972         }
1973 out:
1974         rcu_read_unlock();
1975
1976         cb->args[0] = i;
1977         cb->args[1] = j;
1978
1979         return skb->len;
1980 }
1981
1982 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1983         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1984         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1985         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1986         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1987         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1988         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1989 };
1990
1991 static struct genl_ops dp_vport_genl_ops[] = {
1992         { .cmd = OVS_VPORT_CMD_NEW,
1993           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1994           .policy = vport_policy,
1995           .doit = ovs_vport_cmd_new
1996         },
1997         { .cmd = OVS_VPORT_CMD_DEL,
1998           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1999           .policy = vport_policy,
2000           .doit = ovs_vport_cmd_del
2001         },
2002         { .cmd = OVS_VPORT_CMD_GET,
2003           .flags = 0,               /* OK for unprivileged users. */
2004           .policy = vport_policy,
2005           .doit = ovs_vport_cmd_get,
2006           .dumpit = ovs_vport_cmd_dump
2007         },
2008         { .cmd = OVS_VPORT_CMD_SET,
2009           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2010           .policy = vport_policy,
2011           .doit = ovs_vport_cmd_set,
2012         },
2013 };
2014
2015 struct genl_family dp_vport_genl_family = {
2016         .id = GENL_ID_GENERATE,
2017         .hdrsize = sizeof(struct ovs_header),
2018         .name = OVS_VPORT_FAMILY,
2019         .version = OVS_VPORT_VERSION,
2020         .maxattr = OVS_VPORT_ATTR_MAX,
2021         .netnsok = true,
2022         .parallel_ops = true,
2023         .ops = dp_vport_genl_ops,
2024         .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2025         .mcgrps = &ovs_dp_vport_multicast_group,
2026         .n_mcgrps = 1,
2027 };
2028
2029 static struct genl_family *dp_genl_families[] = {
2030         &dp_datapath_genl_family,
2031         &dp_vport_genl_family,
2032         &dp_flow_genl_family,
2033         &dp_packet_genl_family,
2034 };
2035
2036 static void dp_unregister_genl(int n_families)
2037 {
2038         int i;
2039
2040         for (i = 0; i < n_families; i++)
2041                 genl_unregister_family(dp_genl_families[i]);
2042 }
2043
2044 static int dp_register_genl(void)
2045 {
2046         int err;
2047         int i;
2048
2049         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2050
2051                 err = genl_register_family(dp_genl_families[i]);
2052                 if (err)
2053                         goto error;
2054         }
2055
2056         return 0;
2057
2058 error:
2059         dp_unregister_genl(i);
2060         return err;
2061 }
2062
2063 static int __net_init ovs_init_net(struct net *net)
2064 {
2065         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2066
2067         INIT_LIST_HEAD(&ovs_net->dps);
2068         INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2069         return 0;
2070 }
2071
2072 static void __net_exit ovs_exit_net(struct net *net)
2073 {
2074         struct datapath *dp, *dp_next;
2075         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2076
2077         ovs_lock();
2078         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2079                 __dp_destroy(dp);
2080         ovs_unlock();
2081
2082         cancel_work_sync(&ovs_net->dp_notify_work);
2083 }
2084
2085 static struct pernet_operations ovs_net_ops = {
2086         .init = ovs_init_net,
2087         .exit = ovs_exit_net,
2088         .id   = &ovs_net_id,
2089         .size = sizeof(struct ovs_net),
2090 };
2091
2092 DEFINE_COMPAT_PNET_REG_FUNC(device);
2093
2094 static int __init dp_init(void)
2095 {
2096         int err;
2097
2098         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2099
2100         pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
2101                 VERSION);
2102
2103         err = ovs_flow_init();
2104         if (err)
2105                 goto error;
2106
2107         err = ovs_vport_init();
2108         if (err)
2109                 goto error_flow_exit;
2110
2111         err = register_pernet_device(&ovs_net_ops);
2112         if (err)
2113                 goto error_vport_exit;
2114
2115         err = register_netdevice_notifier(&ovs_dp_device_notifier);
2116         if (err)
2117                 goto error_netns_exit;
2118
2119         err = dp_register_genl();
2120         if (err < 0)
2121                 goto error_unreg_notifier;
2122
2123         return 0;
2124
2125 error_unreg_notifier:
2126         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2127 error_netns_exit:
2128         unregister_pernet_device(&ovs_net_ops);
2129 error_vport_exit:
2130         ovs_vport_exit();
2131 error_flow_exit:
2132         ovs_flow_exit();
2133 error:
2134         return err;
2135 }
2136
2137 static void dp_cleanup(void)
2138 {
2139         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2140         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2141         unregister_pernet_device(&ovs_net_ops);
2142         rcu_barrier();
2143         ovs_vport_exit();
2144         ovs_flow_exit();
2145 }
2146
2147 module_init(dp_init);
2148 module_exit(dp_cleanup);
2149
2150 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2151 MODULE_LICENSE("GPL");
2152 MODULE_VERSION(VERSION);