datapath: Move table destroy to dp-rcu callback.
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/version.h>
40 #include <linux/ethtool.h>
41 #include <linux/wait.h>
42 #include <asm/div64.h>
43 #include <linux/highmem.h>
44 #include <linux/netfilter_bridge.h>
45 #include <linux/netfilter_ipv4.h>
46 #include <linux/inetdevice.h>
47 #include <linux/list.h>
48 #include <linux/openvswitch.h>
49 #include <linux/rculist.h>
50 #include <linux/dmi.h>
51 #include <linux/genetlink.h>
52 #include <net/genetlink.h>
53 #include <net/genetlink.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56
57 #include "datapath.h"
58 #include "flow.h"
59 #include "flow_table.h"
60 #include "flow_netlink.h"
61 #include "vlan.h"
62 #include "vport-internal_dev.h"
63 #include "vport-netdev.h"
64
65 int ovs_net_id __read_mostly;
66
67 static struct genl_family dp_packet_genl_family;
68 static struct genl_family dp_flow_genl_family;
69 static struct genl_family dp_datapath_genl_family;
70
71 static struct genl_multicast_group ovs_dp_flow_multicast_group = {
72         .name = OVS_FLOW_MCGROUP
73 };
74
75 static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
76         .name = OVS_DATAPATH_MCGROUP
77 };
78
79 struct genl_multicast_group ovs_dp_vport_multicast_group = {
80         .name = OVS_VPORT_MCGROUP
81 };
82
83 /* Check if need to build a reply message.
84  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
85 static bool ovs_must_notify(struct genl_info *info,
86                             const struct genl_multicast_group *grp)
87 {
88         return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
89                 netlink_has_listeners(genl_info_net(info)->genl_sock, GROUP_ID(grp));
90 }
91
92 static void ovs_notify(struct genl_family *family, struct genl_multicast_group *grp,
93                        struct sk_buff *skb, struct genl_info *info)
94 {
95         genl_notify(family, skb, genl_info_net(info),
96                     info->snd_portid, GROUP_ID(grp), info->nlhdr, GFP_KERNEL);
97 }
98
99 /**
100  * DOC: Locking:
101  *
102  * All writes e.g. Writes to device state (add/remove datapath, port, set
103  * operations on vports, etc.), Writes to other state (flow table
104  * modifications, set miscellaneous datapath parameters, etc.) are protected
105  * by ovs_lock.
106  *
107  * Reads are protected by RCU.
108  *
109  * There are a few special cases (mostly stats) that have their own
110  * synchronization but they nest under all of above and don't interact with
111  * each other.
112  *
113  * The RTNL lock nests inside ovs_mutex.
114  */
115
116 static DEFINE_MUTEX(ovs_mutex);
117
118 void ovs_lock(void)
119 {
120         mutex_lock(&ovs_mutex);
121 }
122
123 void ovs_unlock(void)
124 {
125         mutex_unlock(&ovs_mutex);
126 }
127
128 #ifdef CONFIG_LOCKDEP
129 int lockdep_ovsl_is_held(void)
130 {
131         if (debug_locks)
132                 return lockdep_is_held(&ovs_mutex);
133         else
134                 return 1;
135 }
136 #endif
137
138 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
139                              const struct dp_upcall_info *);
140 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
141                                   const struct dp_upcall_info *);
142
143 /* Must be called with rcu_read_lock or ovs_mutex. */
144 static struct datapath *get_dp(struct net *net, int dp_ifindex)
145 {
146         struct datapath *dp = NULL;
147         struct net_device *dev;
148
149         rcu_read_lock();
150         dev = dev_get_by_index_rcu(net, dp_ifindex);
151         if (dev) {
152                 struct vport *vport = ovs_internal_dev_get_vport(dev);
153                 if (vport)
154                         dp = vport->dp;
155         }
156         rcu_read_unlock();
157
158         return dp;
159 }
160
161 /* Must be called with rcu_read_lock or ovs_mutex. */
162 const char *ovs_dp_name(const struct datapath *dp)
163 {
164         struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
165         return vport->ops->get_name(vport);
166 }
167
168 static int get_dpifindex(struct datapath *dp)
169 {
170         struct vport *local;
171         int ifindex;
172
173         rcu_read_lock();
174
175         local = ovs_vport_rcu(dp, OVSP_LOCAL);
176         if (local)
177                 ifindex = netdev_vport_priv(local)->dev->ifindex;
178         else
179                 ifindex = 0;
180
181         rcu_read_unlock();
182
183         return ifindex;
184 }
185
186 static void destroy_dp_rcu(struct rcu_head *rcu)
187 {
188         struct datapath *dp = container_of(rcu, struct datapath, rcu);
189
190         ovs_flow_tbl_destroy(&dp->table);
191         free_percpu(dp->stats_percpu);
192         release_net(ovs_dp_get_net(dp));
193         kfree(dp->ports);
194         kfree(dp);
195 }
196
197 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
198                                             u16 port_no)
199 {
200         return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
201 }
202
203 /* Called with ovs_mutex or RCU read lock. */
204 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
205 {
206         struct vport *vport;
207         struct hlist_head *head;
208
209         head = vport_hash_bucket(dp, port_no);
210         hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
211                 if (vport->port_no == port_no)
212                         return vport;
213         }
214         return NULL;
215 }
216
217 /* Called with ovs_mutex. */
218 static struct vport *new_vport(const struct vport_parms *parms)
219 {
220         struct vport *vport;
221
222         vport = ovs_vport_add(parms);
223         if (!IS_ERR(vport)) {
224                 struct datapath *dp = parms->dp;
225                 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
226
227                 hlist_add_head_rcu(&vport->dp_hash_node, head);
228         }
229         return vport;
230 }
231
232 void ovs_dp_detach_port(struct vport *p)
233 {
234         ASSERT_OVSL();
235
236         /* First drop references to device. */
237         hlist_del_rcu(&p->dp_hash_node);
238
239         /* Then destroy it. */
240         ovs_vport_del(p);
241 }
242
243 void ovs_dp_process_packet_with_key(struct sk_buff *skb,
244                 struct sw_flow_key *pkt_key)
245 {
246         const struct vport *p = OVS_CB(skb)->input_vport;
247         struct datapath *dp = p->dp;
248         struct sw_flow *flow;
249         struct dp_stats_percpu *stats;
250         u64 *stats_counter;
251         u32 n_mask_hit;
252
253         stats = this_cpu_ptr(dp->stats_percpu);
254
255         /* Look up flow. */
256         flow = ovs_flow_tbl_lookup_stats(&dp->table, pkt_key, &n_mask_hit);
257         if (unlikely(!flow)) {
258                 struct dp_upcall_info upcall;
259
260                 upcall.cmd = OVS_PACKET_CMD_MISS;
261                 upcall.key = pkt_key;
262                 upcall.userdata = NULL;
263                 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
264                 ovs_dp_upcall(dp, skb, &upcall);
265                 consume_skb(skb);
266                 stats_counter = &stats->n_missed;
267                 goto out;
268         }
269
270         OVS_CB(skb)->pkt_key = pkt_key;
271         OVS_CB(skb)->flow = flow;
272
273         ovs_flow_stats_update(OVS_CB(skb)->flow, pkt_key->tp.flags, skb);
274         ovs_execute_actions(dp, skb);
275         stats_counter = &stats->n_hit;
276
277 out:
278         /* Update datapath statistics. */
279         u64_stats_update_begin(&stats->sync);
280         (*stats_counter)++;
281         stats->n_mask_hit += n_mask_hit;
282         u64_stats_update_end(&stats->sync);
283 }
284
285 /* Must be called with rcu_read_lock. */
286 void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
287 {
288         int error;
289         struct sw_flow_key key;
290
291         OVS_CB(skb)->input_vport = p;
292
293         /* Extract flow from 'skb' into 'key'. */
294         error = ovs_flow_extract(skb, p->port_no, &key);
295         if (unlikely(error)) {
296                 kfree_skb(skb);
297                 return;
298         }
299
300         ovs_dp_process_packet_with_key(skb, &key);
301 }
302
303 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
304                   const struct dp_upcall_info *upcall_info)
305 {
306         struct dp_stats_percpu *stats;
307         int err;
308
309         if (upcall_info->portid == 0) {
310                 err = -ENOTCONN;
311                 goto err;
312         }
313
314         if (!skb_is_gso(skb))
315                 err = queue_userspace_packet(dp, skb, upcall_info);
316         else
317                 err = queue_gso_packets(dp, skb, upcall_info);
318         if (err)
319                 goto err;
320
321         return 0;
322
323 err:
324         stats = this_cpu_ptr(dp->stats_percpu);
325
326         u64_stats_update_begin(&stats->sync);
327         stats->n_lost++;
328         u64_stats_update_end(&stats->sync);
329
330         return err;
331 }
332
333 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
334                              const struct dp_upcall_info *upcall_info)
335 {
336         unsigned short gso_type = skb_shinfo(skb)->gso_type;
337         struct dp_upcall_info later_info;
338         struct sw_flow_key later_key;
339         struct sk_buff *segs, *nskb;
340         int err;
341
342         segs = __skb_gso_segment(skb, NETIF_F_SG, false);
343         if (IS_ERR(segs))
344                 return PTR_ERR(segs);
345
346         /* Queue all of the segments. */
347         skb = segs;
348         do {
349                 err = queue_userspace_packet(dp, skb, upcall_info);
350                 if (err)
351                         break;
352
353                 if (skb == segs && gso_type & SKB_GSO_UDP) {
354                         /* The initial flow key extracted by ovs_flow_extract()
355                          * in this case is for a first fragment, so we need to
356                          * properly mark later fragments.
357                          */
358                         later_key = *upcall_info->key;
359                         later_key.ip.frag = OVS_FRAG_TYPE_LATER;
360
361                         later_info = *upcall_info;
362                         later_info.key = &later_key;
363                         upcall_info = &later_info;
364                 }
365         } while ((skb = skb->next));
366
367         /* Free all of the segments. */
368         skb = segs;
369         do {
370                 nskb = skb->next;
371                 if (err)
372                         kfree_skb(skb);
373                 else
374                         consume_skb(skb);
375         } while ((skb = nskb));
376         return err;
377 }
378
379 static size_t key_attr_size(void)
380 {
381         return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
382                 + nla_total_size(0)   /* OVS_KEY_ATTR_TUNNEL */
383                   + nla_total_size(8)   /* OVS_TUNNEL_KEY_ATTR_ID */
384                   + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
385                   + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
386                   + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TOS */
387                   + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TTL */
388                   + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
389                   + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_CSUM */
390                 + nla_total_size(4)   /* OVS_KEY_ATTR_IN_PORT */
391                 + nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
392                 + nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
393                 + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
394                 + nla_total_size(4)   /* OVS_KEY_ATTR_8021Q */
395                 + nla_total_size(0)   /* OVS_KEY_ATTR_ENCAP */
396                 + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
397                 + nla_total_size(40)  /* OVS_KEY_ATTR_IPV6 */
398                 + nla_total_size(2)   /* OVS_KEY_ATTR_ICMPV6 */
399                 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
400 }
401
402 static size_t upcall_msg_size(const struct nlattr *userdata,
403                               unsigned int hdrlen)
404 {
405         size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
406                 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
407                 + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
408
409         /* OVS_PACKET_ATTR_USERDATA */
410         if (userdata)
411                 size += NLA_ALIGN(userdata->nla_len);
412
413         return size;
414 }
415
416 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
417                                   const struct dp_upcall_info *upcall_info)
418 {
419         struct ovs_header *upcall;
420         struct sk_buff *nskb = NULL;
421         struct sk_buff *user_skb; /* to be queued to userspace */
422         struct nlattr *nla;
423         struct genl_info info = {
424 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
425                 .dst_sk = ovs_dp_get_net(dp)->genl_sock,
426 #endif
427                 .snd_portid = upcall_info->portid,
428         };
429         size_t len;
430         unsigned int hlen;
431         int err, dp_ifindex;
432
433         dp_ifindex = get_dpifindex(dp);
434         if (!dp_ifindex)
435                 return -ENODEV;
436
437         if (vlan_tx_tag_present(skb)) {
438                 nskb = skb_clone(skb, GFP_ATOMIC);
439                 if (!nskb)
440                         return -ENOMEM;
441
442                 nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
443                 if (!nskb)
444                         return -ENOMEM;
445
446                 vlan_set_tci(nskb, 0);
447
448                 skb = nskb;
449         }
450
451         if (nla_attr_size(skb->len) > USHRT_MAX) {
452                 err = -EFBIG;
453                 goto out;
454         }
455
456         /* Complete checksum if needed */
457         if (skb->ip_summed == CHECKSUM_PARTIAL &&
458             (err = skb_checksum_help(skb)))
459                 goto out;
460
461         /* Older versions of OVS user space enforce alignment of the last
462          * Netlink attribute to NLA_ALIGNTO which would require extensive
463          * padding logic. Only perform zerocopy if padding is not required.
464          */
465         if (dp->user_features & OVS_DP_F_UNALIGNED)
466                 hlen = skb_zerocopy_headlen(skb);
467         else
468                 hlen = skb->len;
469
470         len = upcall_msg_size(upcall_info->userdata, hlen);
471         user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
472         if (!user_skb) {
473                 err = -ENOMEM;
474                 goto out;
475         }
476
477         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
478                              0, upcall_info->cmd);
479         upcall->dp_ifindex = dp_ifindex;
480
481         nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
482         ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
483         nla_nest_end(user_skb, nla);
484
485         if (upcall_info->userdata)
486                 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
487                           nla_len(upcall_info->userdata),
488                           nla_data(upcall_info->userdata));
489
490         /* Only reserve room for attribute header, packet data is added
491          * in skb_zerocopy() */
492         if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
493                 err = -ENOBUFS;
494                 goto out;
495         }
496         nla->nla_len = nla_attr_size(skb->len);
497
498         err = skb_zerocopy(user_skb, skb, skb->len, hlen);
499         if (err)
500                 goto out;
501
502         /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
503         if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
504                 size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;
505
506                 if (plen > 0)
507                         memset(skb_put(user_skb, plen), 0, plen);
508         }
509
510         ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
511
512         err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
513 out:
514         if (err)
515                 skb_tx_error(skb);
516         kfree_skb(nskb);
517         return err;
518 }
519
520 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
521 {
522         struct ovs_header *ovs_header = info->userhdr;
523         struct nlattr **a = info->attrs;
524         struct sw_flow_actions *acts;
525         struct sk_buff *packet;
526         struct sw_flow *flow;
527         struct datapath *dp;
528         struct ethhdr *eth;
529         struct vport *input_vport;
530         int len;
531         int err;
532
533         err = -EINVAL;
534         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
535             !a[OVS_PACKET_ATTR_ACTIONS])
536                 goto err;
537
538         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
539         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
540         err = -ENOMEM;
541         if (!packet)
542                 goto err;
543         skb_reserve(packet, NET_IP_ALIGN);
544
545         nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
546
547         skb_reset_mac_header(packet);
548         eth = eth_hdr(packet);
549
550         /* Normally, setting the skb 'protocol' field would be handled by a
551          * call to eth_type_trans(), but it assumes there's a sending
552          * device, which we may not have. */
553         if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
554                 packet->protocol = eth->h_proto;
555         else
556                 packet->protocol = htons(ETH_P_802_2);
557
558         /* Build an sw_flow for sending this packet. */
559         flow = ovs_flow_alloc();
560         err = PTR_ERR(flow);
561         if (IS_ERR(flow))
562                 goto err_kfree_skb;
563
564         err = ovs_flow_extract(packet, -1, &flow->key);
565         if (err)
566                 goto err_flow_free;
567
568         err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]);
569         if (err)
570                 goto err_flow_free;
571         acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
572         err = PTR_ERR(acts);
573         if (IS_ERR(acts))
574                 goto err_flow_free;
575
576         err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
577                                    &flow->key, 0, &acts);
578         rcu_assign_pointer(flow->sf_acts, acts);
579         if (err)
580                 goto err_flow_free;
581
582         OVS_CB(packet)->flow = flow;
583         OVS_CB(packet)->pkt_key = &flow->key;
584         packet->priority = flow->key.phy.priority;
585         packet->mark = flow->key.phy.skb_mark;
586
587         rcu_read_lock();
588         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
589         err = -ENODEV;
590         if (!dp)
591                 goto err_unlock;
592
593         input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
594         if (!input_vport)
595                 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
596
597         if (!input_vport)
598                 goto err_unlock;
599
600         OVS_CB(packet)->input_vport = input_vport;
601
602         local_bh_disable();
603         err = ovs_execute_actions(dp, packet);
604         local_bh_enable();
605         rcu_read_unlock();
606
607         ovs_flow_free(flow, false);
608         return err;
609
610 err_unlock:
611         rcu_read_unlock();
612 err_flow_free:
613         ovs_flow_free(flow, false);
614 err_kfree_skb:
615         kfree_skb(packet);
616 err:
617         return err;
618 }
619
620 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
621         [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
622         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
623         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
624 };
625
626 static struct genl_ops dp_packet_genl_ops[] = {
627         { .cmd = OVS_PACKET_CMD_EXECUTE,
628           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
629           .policy = packet_policy,
630           .doit = ovs_packet_cmd_execute
631         }
632 };
633
634 static struct genl_family dp_packet_genl_family = {
635         .id = GENL_ID_GENERATE,
636         .hdrsize = sizeof(struct ovs_header),
637         .name = OVS_PACKET_FAMILY,
638         .version = OVS_PACKET_VERSION,
639         .maxattr = OVS_PACKET_ATTR_MAX,
640         .netnsok = true,
641         .parallel_ops = true,
642         .ops = dp_packet_genl_ops,
643         .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
644 };
645
646 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
647                          struct ovs_dp_megaflow_stats *mega_stats)
648 {
649         int i;
650
651         memset(mega_stats, 0, sizeof(*mega_stats));
652
653         stats->n_flows = ovs_flow_tbl_count(&dp->table);
654         mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
655
656         stats->n_hit = stats->n_missed = stats->n_lost = 0;
657
658         for_each_possible_cpu(i) {
659                 const struct dp_stats_percpu *percpu_stats;
660                 struct dp_stats_percpu local_stats;
661                 unsigned int start;
662
663                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
664
665                 do {
666                         start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
667                         local_stats = *percpu_stats;
668                 } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
669
670                 stats->n_hit += local_stats.n_hit;
671                 stats->n_missed += local_stats.n_missed;
672                 stats->n_lost += local_stats.n_lost;
673                 mega_stats->n_mask_hit += local_stats.n_mask_hit;
674         }
675 }
676
677 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
678 {
679         return NLMSG_ALIGN(sizeof(struct ovs_header))
680                 + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
681                 + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */
682                 + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
683                 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
684                 + nla_total_size(8) /* OVS_FLOW_ATTR_USED */
685                 + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
686 }
687
688 /* Called with ovs_mutex or RCU read lock. */
689 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
690                                   struct sk_buff *skb, u32 portid,
691                                   u32 seq, u32 flags, u8 cmd)
692 {
693         const int skb_orig_len = skb->len;
694         struct nlattr *start;
695         struct ovs_flow_stats stats;
696         __be16 tcp_flags;
697         unsigned long used;
698         struct ovs_header *ovs_header;
699         struct nlattr *nla;
700         int err;
701
702         ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
703         if (!ovs_header)
704                 return -EMSGSIZE;
705
706         ovs_header->dp_ifindex = dp_ifindex;
707
708         /* Fill flow key. */
709         nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
710         if (!nla)
711                 goto nla_put_failure;
712
713         err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
714         if (err)
715                 goto error;
716         nla_nest_end(skb, nla);
717
718         nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
719         if (!nla)
720                 goto nla_put_failure;
721
722         err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
723         if (err)
724                 goto error;
725
726         nla_nest_end(skb, nla);
727
728         ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
729
730         if (used &&
731             nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
732                 goto nla_put_failure;
733
734         if (stats.n_packets &&
735             nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
736                 goto nla_put_failure;
737
738         if ((u8)ntohs(tcp_flags) &&
739              nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
740                 goto nla_put_failure;
741
742         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
743          * this is the first flow to be dumped into 'skb'.  This is unusual for
744          * Netlink but individual action lists can be longer than
745          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
746          * The userspace caller can always fetch the actions separately if it
747          * really wants them.  (Most userspace callers in fact don't care.)
748          *
749          * This can only fail for dump operations because the skb is always
750          * properly sized for single flows.
751          */
752         start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
753         if (start) {
754                 const struct sw_flow_actions *sf_acts;
755
756                 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
757                 err = ovs_nla_put_actions(sf_acts->actions,
758                                           sf_acts->actions_len, skb);
759
760                 if (!err)
761                         nla_nest_end(skb, start);
762                 else {
763                         if (skb_orig_len)
764                                 goto error;
765
766                         nla_nest_cancel(skb, start);
767                 }
768         } else if (skb_orig_len)
769                 goto nla_put_failure;
770
771         return genlmsg_end(skb, ovs_header);
772
773 nla_put_failure:
774         err = -EMSGSIZE;
775 error:
776         genlmsg_cancel(skb, ovs_header);
777         return err;
778 }
779
780 /* May not be called with RCU read lock. */
781 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
782                                                struct genl_info *info,
783                                                bool always)
784 {
785         struct sk_buff *skb;
786
787         if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group))
788                 return NULL;
789
790         skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
791
792         if (!skb)
793                 return ERR_PTR(-ENOMEM);
794
795         return skb;
796 }
797
798 /* Called with ovs_mutex. */
799 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
800                                                int dp_ifindex,
801                                                struct genl_info *info, u8 cmd,
802                                                bool always)
803 {
804         struct sk_buff *skb;
805         int retval;
806
807         skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
808                                       always);
809         if (!skb || IS_ERR(skb))
810                 return skb;
811
812         retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
813                                         info->snd_portid, info->snd_seq, 0,
814                                         cmd);
815         BUG_ON(retval < 0);
816         return skb;
817 }
818
819 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
820 {
821         struct nlattr **a = info->attrs;
822         struct ovs_header *ovs_header = info->userhdr;
823         struct sw_flow *flow, *new_flow;
824         struct sw_flow_mask mask;
825         struct sk_buff *reply;
826         struct datapath *dp;
827         struct sw_flow_actions *acts;
828         struct sw_flow_match match;
829         int error;
830
831         /* Must have key and actions. */
832         error = -EINVAL;
833         if (!a[OVS_FLOW_ATTR_KEY])
834                 goto error;
835         if (!a[OVS_FLOW_ATTR_ACTIONS])
836                 goto error;
837
838         /* Most of the time we need to allocate a new flow, do it before
839          * locking. */
840         new_flow = ovs_flow_alloc();
841         if (IS_ERR(new_flow)) {
842                 error = PTR_ERR(new_flow);
843                 goto error;
844         }
845
846         /* Extract key. */
847         ovs_match_init(&match, &new_flow->unmasked_key, &mask);
848         error = ovs_nla_get_match(&match,
849                                   a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
850         if (error)
851                 goto err_kfree_flow;
852
853         ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
854
855         /* Validate actions. */
856         acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
857         error = PTR_ERR(acts);
858         if (IS_ERR(acts))
859                 goto err_kfree_flow;
860
861         error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
862                                      0, &acts);
863         if (error) {
864                 OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
865                 goto err_kfree_acts;
866         }
867
868         reply = ovs_flow_cmd_alloc_info(acts, info, false);
869         if (IS_ERR(reply)) {
870                 error = PTR_ERR(reply);
871                 goto err_kfree_acts;
872         }
873
874         ovs_lock();
875         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
876         if (unlikely(!dp)) {
877                 error = -ENODEV;
878                 goto err_unlock_ovs;
879         }
880         /* Check if this is a duplicate flow */
881         flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->unmasked_key);
882         if (likely(!flow)) {
883                 rcu_assign_pointer(new_flow->sf_acts, acts);
884
885                 /* Put flow in bucket. */
886                 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
887                 if (unlikely(error)) {
888                         acts = NULL;
889                         goto err_unlock_ovs;
890                 }
891
892                 if (unlikely(reply)) {
893                         error = ovs_flow_cmd_fill_info(new_flow,
894                                                        ovs_header->dp_ifindex,
895                                                        reply, info->snd_portid,
896                                                        info->snd_seq, 0,
897                                                        OVS_FLOW_CMD_NEW);
898                         BUG_ON(error < 0);
899                 }
900                 ovs_unlock();
901         } else {
902                 struct sw_flow_actions *old_acts;
903
904                 /* Bail out if we're not allowed to modify an existing flow.
905                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
906                  * because Generic Netlink treats the latter as a dump
907                  * request.  We also accept NLM_F_EXCL in case that bug ever
908                  * gets fixed.
909                  */
910                 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
911                                                          | NLM_F_EXCL))) {
912                         error = -EEXIST;
913                         goto err_unlock_ovs;
914                 }
915                 /* The unmasked key has to be the same for flow updates. */
916                 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
917                         error = -EEXIST;
918                         goto err_unlock_ovs;
919                 }
920                 /* Update actions. */
921                 old_acts = ovsl_dereference(flow->sf_acts);
922                 rcu_assign_pointer(flow->sf_acts, acts);
923
924                 if (unlikely(reply)) {
925                         error = ovs_flow_cmd_fill_info(flow,
926                                                        ovs_header->dp_ifindex,
927                                                        reply, info->snd_portid,
928                                                        info->snd_seq, 0,
929                                                        OVS_FLOW_CMD_NEW);
930                         BUG_ON(error < 0);
931                 }
932                 ovs_unlock();
933
934                 ovs_nla_free_flow_actions(old_acts);
935                 ovs_flow_free(new_flow, false);
936         }
937
938         if (reply)
939                 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
940         return 0;
941
942 err_unlock_ovs:
943         ovs_unlock();
944         kfree_skb(reply);
945 err_kfree_acts:
946         kfree(acts);
947 err_kfree_flow:
948         ovs_flow_free(new_flow, false);
949 error:
950         return error;
951 }
952
953 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
954 {
955         struct nlattr **a = info->attrs;
956         struct ovs_header *ovs_header = info->userhdr;
957         struct sw_flow_key key, masked_key;
958         struct sw_flow *flow;
959         struct sw_flow_mask mask;
960         struct sk_buff *reply = NULL;
961         struct datapath *dp;
962         struct sw_flow_actions *old_acts = NULL, *acts = NULL;
963         struct sw_flow_match match;
964         int error;
965
966         /* Extract key. */
967         error = -EINVAL;
968         if (!a[OVS_FLOW_ATTR_KEY])
969                 goto error;
970
971         ovs_match_init(&match, &key, &mask);
972         error = ovs_nla_get_match(&match,
973                                   a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
974         if (error)
975                 goto error;
976
977         /* Validate actions. */
978         if (a[OVS_FLOW_ATTR_ACTIONS]) {
979                 acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
980                 error = PTR_ERR(acts);
981                 if (IS_ERR(acts))
982                         goto error;
983
984                 ovs_flow_mask_key(&masked_key, &key, &mask);
985                 error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
986                                              &masked_key, 0, &acts);
987                 if (error) {
988                         OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
989                         goto err_kfree_acts;
990                 }
991         }
992
993         /* Can allocate before locking if have acts. */
994         if (acts) {
995                 reply = ovs_flow_cmd_alloc_info(acts, info, false);
996                 if (IS_ERR(reply)) {
997                         error = PTR_ERR(reply);
998                         goto err_kfree_acts;
999                 }
1000         }
1001
1002         ovs_lock();
1003         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1004         if (unlikely(!dp)) {
1005                 error = -ENODEV;
1006                 goto err_unlock_ovs;
1007         }
1008         /* Check that the flow exists. */
1009         flow = ovs_flow_tbl_lookup(&dp->table, &key);
1010         if (unlikely(!flow)) {
1011                 error = -ENOENT;
1012                 goto err_unlock_ovs;
1013         }
1014         /* The unmasked key has to be the same for flow updates. */
1015         if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
1016                 error = -EEXIST;
1017                 goto err_unlock_ovs;
1018         }
1019         /* Update actions, if present. */
1020         if (likely(acts)) {
1021                 old_acts = ovsl_dereference(flow->sf_acts);
1022                 rcu_assign_pointer(flow->sf_acts, acts);
1023
1024                 if (unlikely(reply)) {
1025                         error = ovs_flow_cmd_fill_info(flow,
1026                                                        ovs_header->dp_ifindex,
1027                                                        reply, info->snd_portid,
1028                                                        info->snd_seq, 0,
1029                                                        OVS_FLOW_CMD_NEW);
1030                         BUG_ON(error < 0);
1031                 }
1032         } else {
1033                 /* Could not alloc without acts before locking. */
1034                 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1035                                                 info, OVS_FLOW_CMD_NEW, false);
1036                 if (unlikely(IS_ERR(reply))) {
1037                         error = PTR_ERR(reply);
1038                         goto err_unlock_ovs;
1039                 }
1040         }
1041
1042         /* Clear stats. */
1043         if (a[OVS_FLOW_ATTR_CLEAR])
1044                 ovs_flow_stats_clear(flow);
1045         ovs_unlock();
1046
1047         if (reply)
1048                 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
1049         if (old_acts)
1050                 ovs_nla_free_flow_actions(old_acts);
1051         return 0;
1052
1053 err_unlock_ovs:
1054         ovs_unlock();
1055         kfree_skb(reply);
1056 err_kfree_acts:
1057         kfree(acts);
1058 error:
1059         return error;
1060 }
1061
1062 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1063 {
1064         struct nlattr **a = info->attrs;
1065         struct ovs_header *ovs_header = info->userhdr;
1066         struct sw_flow_key key;
1067         struct sk_buff *reply;
1068         struct sw_flow *flow;
1069         struct datapath *dp;
1070         struct sw_flow_match match;
1071         int err;
1072
1073         if (!a[OVS_FLOW_ATTR_KEY]) {
1074                 OVS_NLERR("Flow get message rejected, Key attribute missing.\n");
1075                 return -EINVAL;
1076         }
1077
1078         ovs_match_init(&match, &key, NULL);
1079         err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1080         if (err)
1081                 return err;
1082
1083         ovs_lock();
1084         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1085         if (!dp) {
1086                 err = -ENODEV;
1087                 goto unlock;
1088         }
1089
1090         flow = ovs_flow_tbl_lookup(&dp->table, &key);
1091         if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
1092                 err = -ENOENT;
1093                 goto unlock;
1094         }
1095
1096         reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1097                                         OVS_FLOW_CMD_NEW, true);
1098         if (IS_ERR(reply)) {
1099                 err = PTR_ERR(reply);
1100                 goto unlock;
1101         }
1102
1103         ovs_unlock();
1104         return genlmsg_reply(reply, info);
1105 unlock:
1106         ovs_unlock();
1107         return err;
1108 }
1109
1110 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1111 {
1112         struct nlattr **a = info->attrs;
1113         struct ovs_header *ovs_header = info->userhdr;
1114         struct sw_flow_key key;
1115         struct sk_buff *reply;
1116         struct sw_flow *flow;
1117         struct datapath *dp;
1118         struct sw_flow_match match;
1119         int err;
1120
1121         if (likely(a[OVS_FLOW_ATTR_KEY])) {
1122                 ovs_match_init(&match, &key, NULL);
1123                 err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1124                 if (unlikely(err))
1125                         return err;
1126         }
1127
1128         ovs_lock();
1129         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1130         if (unlikely(!dp)) {
1131                 err = -ENODEV;
1132                 goto unlock;
1133         }
1134         if (unlikely(!a[OVS_FLOW_ATTR_KEY])) {
1135                 err = ovs_flow_tbl_flush(&dp->table);
1136                 goto unlock;
1137         }
1138         flow = ovs_flow_tbl_lookup(&dp->table, &key);
1139         if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) {
1140                 err = -ENOENT;
1141                 goto unlock;
1142         }
1143
1144         ovs_flow_tbl_remove(&dp->table, flow);
1145         ovs_unlock();
1146
1147         reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *)flow->sf_acts,
1148                                         info, false);
1149
1150         if (likely(reply)) {
1151                 if (likely(!IS_ERR(reply))) {
1152                         rcu_read_lock(); /* Keep RCU checker happy. */
1153                         err = ovs_flow_cmd_fill_info(flow,
1154                                                      ovs_header->dp_ifindex,
1155                                                      reply, info->snd_portid,
1156                                                      info->snd_seq, 0,
1157                                                      OVS_FLOW_CMD_DEL);
1158                         rcu_read_unlock();
1159                         BUG_ON(err < 0);
1160                         ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
1161                 } else {
1162                         genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0,
1163                                      GROUP_ID(&ovs_dp_flow_multicast_group), PTR_ERR(reply));
1164
1165                 }
1166         }
1167
1168         ovs_flow_free(flow, true);
1169         return 0;
1170 unlock:
1171         ovs_unlock();
1172         return err;
1173 }
1174
1175 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1176 {
1177         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1178         struct table_instance *ti;
1179         struct datapath *dp;
1180
1181         rcu_read_lock();
1182         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1183         if (!dp) {
1184                 rcu_read_unlock();
1185                 return -ENODEV;
1186         }
1187
1188         ti = rcu_dereference(dp->table.ti);
1189         for (;;) {
1190                 struct sw_flow *flow;
1191                 u32 bucket, obj;
1192
1193                 bucket = cb->args[0];
1194                 obj = cb->args[1];
1195                 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1196                 if (!flow)
1197                         break;
1198
1199                 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1200                                            NETLINK_CB(cb->skb).portid,
1201                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1202                                            OVS_FLOW_CMD_NEW) < 0)
1203                         break;
1204
1205                 cb->args[0] = bucket;
1206                 cb->args[1] = obj;
1207         }
1208         rcu_read_unlock();
1209         return skb->len;
1210 }
1211
1212 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1213         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1214         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1215         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1216 };
1217
1218 static struct genl_ops dp_flow_genl_ops[] = {
1219         { .cmd = OVS_FLOW_CMD_NEW,
1220           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1221           .policy = flow_policy,
1222           .doit = ovs_flow_cmd_new
1223         },
1224         { .cmd = OVS_FLOW_CMD_DEL,
1225           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1226           .policy = flow_policy,
1227           .doit = ovs_flow_cmd_del
1228         },
1229         { .cmd = OVS_FLOW_CMD_GET,
1230           .flags = 0,               /* OK for unprivileged users. */
1231           .policy = flow_policy,
1232           .doit = ovs_flow_cmd_get,
1233           .dumpit = ovs_flow_cmd_dump
1234         },
1235         { .cmd = OVS_FLOW_CMD_SET,
1236           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1237           .policy = flow_policy,
1238           .doit = ovs_flow_cmd_set,
1239         },
1240 };
1241
1242 static struct genl_family dp_flow_genl_family = {
1243         .id = GENL_ID_GENERATE,
1244         .hdrsize = sizeof(struct ovs_header),
1245         .name = OVS_FLOW_FAMILY,
1246         .version = OVS_FLOW_VERSION,
1247         .maxattr = OVS_FLOW_ATTR_MAX,
1248         .netnsok = true,
1249         .parallel_ops = true,
1250         .ops = dp_flow_genl_ops,
1251         .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1252         .mcgrps = &ovs_dp_flow_multicast_group,
1253         .n_mcgrps = 1,
1254 };
1255
1256 static size_t ovs_dp_cmd_msg_size(void)
1257 {
1258         size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1259
1260         msgsize += nla_total_size(IFNAMSIZ);
1261         msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
1262         msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
1263         msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1264
1265         return msgsize;
1266 }
1267
1268 /* Called with ovs_mutex or RCU read lock. */
1269 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1270                                 u32 portid, u32 seq, u32 flags, u8 cmd)
1271 {
1272         struct ovs_header *ovs_header;
1273         struct ovs_dp_stats dp_stats;
1274         struct ovs_dp_megaflow_stats dp_megaflow_stats;
1275         int err;
1276
1277         ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1278                                    flags, cmd);
1279         if (!ovs_header)
1280                 goto error;
1281
1282         ovs_header->dp_ifindex = get_dpifindex(dp);
1283
1284         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1285         if (err)
1286                 goto nla_put_failure;
1287
1288         get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1289         if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1290                         &dp_stats))
1291                 goto nla_put_failure;
1292
1293         if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1294                         sizeof(struct ovs_dp_megaflow_stats),
1295                         &dp_megaflow_stats))
1296                 goto nla_put_failure;
1297
1298         if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1299                 goto nla_put_failure;
1300
1301         return genlmsg_end(skb, ovs_header);
1302
1303 nla_put_failure:
1304         genlmsg_cancel(skb, ovs_header);
1305 error:
1306         return -EMSGSIZE;
1307 }
1308
1309 static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info)
1310 {
1311         return genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
1312 }
1313
1314 /* Called with rcu_read_lock or ovs_mutex. */
1315 static struct datapath *lookup_datapath(struct net *net,
1316                                         struct ovs_header *ovs_header,
1317                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1318 {
1319         struct datapath *dp;
1320
1321         if (!a[OVS_DP_ATTR_NAME])
1322                 dp = get_dp(net, ovs_header->dp_ifindex);
1323         else {
1324                 struct vport *vport;
1325
1326                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1327                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1328         }
1329         return dp ? dp : ERR_PTR(-ENODEV);
1330 }
1331
1332 static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1333 {
1334         struct datapath *dp;
1335
1336         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1337         if (IS_ERR(dp))
1338                 return;
1339
1340         WARN(dp->user_features, "Dropping previously announced user features\n");
1341         dp->user_features = 0;
1342 }
1343
1344 static void ovs_dp_change(struct datapath *dp, struct nlattr **a)
1345 {
1346         if (a[OVS_DP_ATTR_USER_FEATURES])
1347                 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1348 }
1349
1350 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1351 {
1352         struct nlattr **a = info->attrs;
1353         struct vport_parms parms;
1354         struct sk_buff *reply;
1355         struct datapath *dp;
1356         struct vport *vport;
1357         struct ovs_net *ovs_net;
1358         int err, i;
1359
1360         err = -EINVAL;
1361         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1362                 goto err;
1363
1364         reply = ovs_dp_cmd_alloc_info(info);
1365         if (!reply)
1366                 return -ENOMEM;
1367
1368         err = -ENOMEM;
1369         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1370         if (dp == NULL)
1371                 goto err_free_reply;
1372
1373         ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1374
1375         /* Allocate table. */
1376         err = ovs_flow_tbl_init(&dp->table);
1377         if (err)
1378                 goto err_free_dp;
1379
1380         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1381         if (!dp->stats_percpu) {
1382                 err = -ENOMEM;
1383                 goto err_destroy_table;
1384         }
1385
1386         for_each_possible_cpu(i) {
1387                 struct dp_stats_percpu *dpath_stats;
1388                 dpath_stats = per_cpu_ptr(dp->stats_percpu, i);
1389                 u64_stats_init(&dpath_stats->sync);
1390         }
1391
1392         dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1393                             GFP_KERNEL);
1394         if (!dp->ports) {
1395                 err = -ENOMEM;
1396                 goto err_destroy_percpu;
1397         }
1398
1399         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1400                 INIT_HLIST_HEAD(&dp->ports[i]);
1401
1402         /* Set up our datapath device. */
1403         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1404         parms.type = OVS_VPORT_TYPE_INTERNAL;
1405         parms.options = NULL;
1406         parms.dp = dp;
1407         parms.port_no = OVSP_LOCAL;
1408         parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1409
1410         ovs_dp_change(dp, a);
1411
1412         /* So far only local changes have been made, now need the lock. */
1413         ovs_lock();
1414
1415         vport = new_vport(&parms);
1416         if (IS_ERR(vport)) {
1417                 err = PTR_ERR(vport);
1418                 if (err == -EBUSY)
1419                         err = -EEXIST;
1420
1421                 if (err == -EEXIST) {
1422                         /* An outdated user space instance that does not understand
1423                          * the concept of user_features has attempted to create a new
1424                          * datapath and is likely to reuse it. Drop all user features.
1425                          */
1426                         if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1427                                 ovs_dp_reset_user_features(skb, info);
1428                 }
1429
1430                 goto err_destroy_ports_array;
1431         }
1432
1433         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1434                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1435         BUG_ON(err < 0);
1436
1437         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1438         list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1439
1440         ovs_unlock();
1441
1442         ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
1443         return 0;
1444
1445 err_destroy_ports_array:
1446         ovs_unlock();
1447         kfree(dp->ports);
1448 err_destroy_percpu:
1449         free_percpu(dp->stats_percpu);
1450 err_destroy_table:
1451         ovs_flow_tbl_destroy(&dp->table);
1452 err_free_dp:
1453         release_net(ovs_dp_get_net(dp));
1454         kfree(dp);
1455 err_free_reply:
1456         kfree_skb(reply);
1457 err:
1458         return err;
1459 }
1460
1461 /* Called with ovs_mutex. */
1462 static void __dp_destroy(struct datapath *dp)
1463 {
1464         int i;
1465
1466         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1467                 struct vport *vport;
1468                 struct hlist_node *n;
1469
1470                 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1471                         if (vport->port_no != OVSP_LOCAL)
1472                                 ovs_dp_detach_port(vport);
1473         }
1474
1475         list_del_rcu(&dp->list_node);
1476
1477         /* OVSP_LOCAL is datapath internal port. We need to make sure that
1478          * all ports in datapath are destroyed first before freeing datapath.
1479          */
1480         ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1481
1482         /* RCU destroy the flow table */
1483         call_rcu(&dp->rcu, destroy_dp_rcu);
1484 }
1485
1486 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1487 {
1488         struct sk_buff *reply;
1489         struct datapath *dp;
1490         int err;
1491
1492         reply = ovs_dp_cmd_alloc_info(info);
1493         if (!reply)
1494                 return -ENOMEM;
1495
1496         ovs_lock();
1497         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1498         err = PTR_ERR(dp);
1499         if (IS_ERR(dp))
1500                 goto err_unlock_free;
1501
1502         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1503                                    info->snd_seq, 0, OVS_DP_CMD_DEL);
1504         BUG_ON(err < 0);
1505
1506         __dp_destroy(dp);
1507
1508         ovs_unlock();
1509         ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
1510         return 0;
1511
1512 err_unlock_free:
1513         ovs_unlock();
1514         kfree_skb(reply);
1515         return err;
1516 }
1517
1518 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1519 {
1520         struct sk_buff *reply;
1521         struct datapath *dp;
1522         int err;
1523
1524         reply = ovs_dp_cmd_alloc_info(info);
1525         if (!reply)
1526                 return -ENOMEM;
1527
1528         ovs_lock();
1529         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1530         err = PTR_ERR(dp);
1531         if (IS_ERR(dp))
1532                 goto err_unlock_free;
1533
1534         ovs_dp_change(dp, info->attrs);
1535
1536         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1537                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1538         BUG_ON(err < 0);
1539
1540         ovs_unlock();
1541         ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
1542         return 0;
1543
1544 err_unlock_free:
1545         ovs_unlock();
1546         kfree_skb(reply);
1547         return err;
1548 }
1549
1550 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1551 {
1552         struct sk_buff *reply;
1553         struct datapath *dp;
1554         int err;
1555
1556         reply = ovs_dp_cmd_alloc_info(info);
1557         if (!reply)
1558                 return -ENOMEM;
1559
1560         rcu_read_lock();
1561         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1562         if (IS_ERR(dp)) {
1563                 err = PTR_ERR(dp);
1564                 goto err_unlock_free;
1565         }
1566         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1567                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1568         BUG_ON(err < 0);
1569         rcu_read_unlock();
1570
1571         return genlmsg_reply(reply, info);
1572
1573 err_unlock_free:
1574         rcu_read_unlock();
1575         kfree_skb(reply);
1576         return err;
1577 }
1578
1579 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1580 {
1581         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1582         struct datapath *dp;
1583         int skip = cb->args[0];
1584         int i = 0;
1585
1586         rcu_read_lock();
1587         list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) {
1588                 if (i >= skip &&
1589                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1590                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1591                                          OVS_DP_CMD_NEW) < 0)
1592                         break;
1593                 i++;
1594         }
1595         rcu_read_unlock();
1596
1597         cb->args[0] = i;
1598
1599         return skb->len;
1600 }
1601
1602 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1603         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1604         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1605         [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1606 };
1607
1608 static struct genl_ops dp_datapath_genl_ops[] = {
1609         { .cmd = OVS_DP_CMD_NEW,
1610           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1611           .policy = datapath_policy,
1612           .doit = ovs_dp_cmd_new
1613         },
1614         { .cmd = OVS_DP_CMD_DEL,
1615           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1616           .policy = datapath_policy,
1617           .doit = ovs_dp_cmd_del
1618         },
1619         { .cmd = OVS_DP_CMD_GET,
1620           .flags = 0,               /* OK for unprivileged users. */
1621           .policy = datapath_policy,
1622           .doit = ovs_dp_cmd_get,
1623           .dumpit = ovs_dp_cmd_dump
1624         },
1625         { .cmd = OVS_DP_CMD_SET,
1626           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1627           .policy = datapath_policy,
1628           .doit = ovs_dp_cmd_set,
1629         },
1630 };
1631
1632 static struct genl_family dp_datapath_genl_family = {
1633         .id = GENL_ID_GENERATE,
1634         .hdrsize = sizeof(struct ovs_header),
1635         .name = OVS_DATAPATH_FAMILY,
1636         .version = OVS_DATAPATH_VERSION,
1637         .maxattr = OVS_DP_ATTR_MAX,
1638         .netnsok = true,
1639         .parallel_ops = true,
1640         .ops = dp_datapath_genl_ops,
1641         .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1642         .mcgrps = &ovs_dp_datapath_multicast_group,
1643         .n_mcgrps = 1,
1644 };
1645
1646 /* Called with ovs_mutex or RCU read lock. */
1647 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1648                                    u32 portid, u32 seq, u32 flags, u8 cmd)
1649 {
1650         struct ovs_header *ovs_header;
1651         struct ovs_vport_stats vport_stats;
1652         int err;
1653
1654         ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1655                                  flags, cmd);
1656         if (!ovs_header)
1657                 return -EMSGSIZE;
1658
1659         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1660
1661         if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1662             nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1663             nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)))
1664                 goto nla_put_failure;
1665
1666         ovs_vport_get_stats(vport, &vport_stats);
1667         if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1668                     &vport_stats))
1669                 goto nla_put_failure;
1670
1671         if (ovs_vport_get_upcall_portids(vport, skb))
1672                 goto nla_put_failure;
1673
1674         err = ovs_vport_get_options(vport, skb);
1675         if (err == -EMSGSIZE)
1676                 goto error;
1677
1678         return genlmsg_end(skb, ovs_header);
1679
1680 nla_put_failure:
1681         err = -EMSGSIZE;
1682 error:
1683         genlmsg_cancel(skb, ovs_header);
1684         return err;
1685 }
1686
1687 static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1688 {
1689         return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1690 }
1691
1692 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1693 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1694                                          u32 seq, u8 cmd)
1695 {
1696         struct sk_buff *skb;
1697         int retval;
1698
1699         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1700         if (!skb)
1701                 return ERR_PTR(-ENOMEM);
1702
1703         retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1704         BUG_ON(retval < 0);
1705
1706         return skb;
1707 }
1708
1709 /* Called with ovs_mutex or RCU read lock. */
1710 static struct vport *lookup_vport(struct net *net,
1711                                   struct ovs_header *ovs_header,
1712                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1713 {
1714         struct datapath *dp;
1715         struct vport *vport;
1716
1717         if (a[OVS_VPORT_ATTR_NAME]) {
1718                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1719                 if (!vport)
1720                         return ERR_PTR(-ENODEV);
1721                 if (ovs_header->dp_ifindex &&
1722                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1723                         return ERR_PTR(-ENODEV);
1724                 return vport;
1725         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1726                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1727
1728                 if (port_no >= DP_MAX_PORTS)
1729                         return ERR_PTR(-EFBIG);
1730
1731                 dp = get_dp(net, ovs_header->dp_ifindex);
1732                 if (!dp)
1733                         return ERR_PTR(-ENODEV);
1734
1735                 vport = ovs_vport_ovsl_rcu(dp, port_no);
1736                 if (!vport)
1737                         return ERR_PTR(-ENODEV);
1738                 return vport;
1739         } else
1740                 return ERR_PTR(-EINVAL);
1741 }
1742
1743 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1744 {
1745         struct nlattr **a = info->attrs;
1746         struct ovs_header *ovs_header = info->userhdr;
1747         struct vport_parms parms;
1748         struct sk_buff *reply;
1749         struct vport *vport;
1750         struct datapath *dp;
1751         u32 port_no;
1752         int err;
1753
1754         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1755             !a[OVS_VPORT_ATTR_UPCALL_PID])
1756                 return -EINVAL;
1757
1758         port_no = a[OVS_VPORT_ATTR_PORT_NO]
1759                 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
1760         if (port_no >= DP_MAX_PORTS)
1761                 return -EFBIG;
1762
1763         reply = ovs_vport_cmd_alloc_info();
1764         if (!reply)
1765                 return -ENOMEM;
1766
1767         ovs_lock();
1768         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1769         err = -ENODEV;
1770         if (!dp)
1771                 goto exit_unlock_free;
1772
1773         if (port_no) {
1774                 vport = ovs_vport_ovsl(dp, port_no);
1775                 err = -EBUSY;
1776                 if (vport)
1777                         goto exit_unlock_free;
1778         } else {
1779                 for (port_no = 1; ; port_no++) {
1780                         if (port_no >= DP_MAX_PORTS) {
1781                                 err = -EFBIG;
1782                                 goto exit_unlock_free;
1783                         }
1784                         vport = ovs_vport_ovsl(dp, port_no);
1785                         if (!vport)
1786                                 break;
1787                 }
1788         }
1789
1790         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1791         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1792         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1793         parms.dp = dp;
1794         parms.port_no = port_no;
1795         parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
1796
1797         vport = new_vport(&parms);
1798         err = PTR_ERR(vport);
1799         if (IS_ERR(vport))
1800                 goto exit_unlock_free;
1801
1802         err = 0;
1803         if (a[OVS_VPORT_ATTR_STATS])
1804                 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1805
1806         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1807                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1808         BUG_ON(err < 0);
1809         ovs_unlock();
1810
1811         ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
1812         return 0;
1813
1814 exit_unlock_free:
1815         ovs_unlock();
1816         kfree_skb(reply);
1817         return err;
1818 }
1819
1820 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1821 {
1822         struct nlattr **a = info->attrs;
1823         struct sk_buff *reply;
1824         struct vport *vport;
1825         int err;
1826
1827         reply = ovs_vport_cmd_alloc_info();
1828         if (!reply)
1829                 return -ENOMEM;
1830
1831         ovs_lock();
1832         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1833         err = PTR_ERR(vport);
1834         if (IS_ERR(vport))
1835                 goto exit_unlock_free;
1836
1837         if (a[OVS_VPORT_ATTR_TYPE] &&
1838             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
1839                 err = -EINVAL;
1840                 goto exit_unlock_free;
1841         }
1842
1843         if (a[OVS_VPORT_ATTR_OPTIONS]) {
1844                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1845                 if (err)
1846                         goto exit_unlock_free;
1847         }
1848
1849         if (a[OVS_VPORT_ATTR_STATS])
1850                 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1851
1852
1853         if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
1854                 err = ovs_vport_set_upcall_portids(vport,
1855                                                    a[OVS_VPORT_ATTR_UPCALL_PID]);
1856                 if (err)
1857                         goto exit_unlock_free;
1858         }
1859
1860         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1861                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1862         BUG_ON(err < 0);
1863         ovs_unlock();
1864
1865         ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
1866         return 0;
1867
1868 exit_unlock_free:
1869         ovs_unlock();
1870         kfree_skb(reply);
1871         return err;
1872 }
1873
1874 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1875 {
1876         struct nlattr **a = info->attrs;
1877         struct sk_buff *reply;
1878         struct vport *vport;
1879         int err;
1880
1881         reply = ovs_vport_cmd_alloc_info();
1882         if (!reply)
1883                 return -ENOMEM;
1884
1885         ovs_lock();
1886         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1887         err = PTR_ERR(vport);
1888         if (IS_ERR(vport))
1889                 goto exit_unlock_free;
1890
1891         if (vport->port_no == OVSP_LOCAL) {
1892                 err = -EINVAL;
1893                 goto exit_unlock_free;
1894         }
1895
1896         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1897                                       info->snd_seq, 0, OVS_VPORT_CMD_DEL);
1898         BUG_ON(err < 0);
1899         ovs_dp_detach_port(vport);
1900         ovs_unlock();
1901
1902         ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
1903         return 0;
1904
1905 exit_unlock_free:
1906         ovs_unlock();
1907         kfree_skb(reply);
1908         return err;
1909 }
1910
1911 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1912 {
1913         struct nlattr **a = info->attrs;
1914         struct ovs_header *ovs_header = info->userhdr;
1915         struct sk_buff *reply;
1916         struct vport *vport;
1917         int err;
1918
1919         reply = ovs_vport_cmd_alloc_info();
1920         if (!reply)
1921                 return -ENOMEM;
1922
1923         rcu_read_lock();
1924         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
1925         err = PTR_ERR(vport);
1926         if (IS_ERR(vport))
1927                 goto exit_unlock_free;
1928         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1929                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1930         BUG_ON(err < 0);
1931         rcu_read_unlock();
1932
1933         return genlmsg_reply(reply, info);
1934
1935 exit_unlock_free:
1936         rcu_read_unlock();
1937         kfree_skb(reply);
1938         return err;
1939 }
1940
1941 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1942 {
1943         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1944         struct datapath *dp;
1945         int bucket = cb->args[0], skip = cb->args[1];
1946         int i, j = 0;
1947
1948         rcu_read_lock();
1949         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1950         if (!dp) {
1951                 rcu_read_unlock();
1952                 return -ENODEV;
1953         }
1954         for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
1955                 struct vport *vport;
1956
1957                 j = 0;
1958                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
1959                         if (j >= skip &&
1960                             ovs_vport_cmd_fill_info(vport, skb,
1961                                                     NETLINK_CB(cb->skb).portid,
1962                                                     cb->nlh->nlmsg_seq,
1963                                                     NLM_F_MULTI,
1964                                                     OVS_VPORT_CMD_NEW) < 0)
1965                                 goto out;
1966
1967                         j++;
1968                 }
1969                 skip = 0;
1970         }
1971 out:
1972         rcu_read_unlock();
1973
1974         cb->args[0] = i;
1975         cb->args[1] = j;
1976
1977         return skb->len;
1978 }
1979
1980 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1981         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1982         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1983         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1984         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1985         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1986         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1987 };
1988
1989 static struct genl_ops dp_vport_genl_ops[] = {
1990         { .cmd = OVS_VPORT_CMD_NEW,
1991           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1992           .policy = vport_policy,
1993           .doit = ovs_vport_cmd_new
1994         },
1995         { .cmd = OVS_VPORT_CMD_DEL,
1996           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1997           .policy = vport_policy,
1998           .doit = ovs_vport_cmd_del
1999         },
2000         { .cmd = OVS_VPORT_CMD_GET,
2001           .flags = 0,               /* OK for unprivileged users. */
2002           .policy = vport_policy,
2003           .doit = ovs_vport_cmd_get,
2004           .dumpit = ovs_vport_cmd_dump
2005         },
2006         { .cmd = OVS_VPORT_CMD_SET,
2007           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2008           .policy = vport_policy,
2009           .doit = ovs_vport_cmd_set,
2010         },
2011 };
2012
2013 struct genl_family dp_vport_genl_family = {
2014         .id = GENL_ID_GENERATE,
2015         .hdrsize = sizeof(struct ovs_header),
2016         .name = OVS_VPORT_FAMILY,
2017         .version = OVS_VPORT_VERSION,
2018         .maxattr = OVS_VPORT_ATTR_MAX,
2019         .netnsok = true,
2020         .parallel_ops = true,
2021         .ops = dp_vport_genl_ops,
2022         .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2023         .mcgrps = &ovs_dp_vport_multicast_group,
2024         .n_mcgrps = 1,
2025 };
2026
2027 static struct genl_family *dp_genl_families[] = {
2028         &dp_datapath_genl_family,
2029         &dp_vport_genl_family,
2030         &dp_flow_genl_family,
2031         &dp_packet_genl_family,
2032 };
2033
2034 static void dp_unregister_genl(int n_families)
2035 {
2036         int i;
2037
2038         for (i = 0; i < n_families; i++)
2039                 genl_unregister_family(dp_genl_families[i]);
2040 }
2041
2042 static int dp_register_genl(void)
2043 {
2044         int err;
2045         int i;
2046
2047         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2048
2049                 err = genl_register_family(dp_genl_families[i]);
2050                 if (err)
2051                         goto error;
2052         }
2053
2054         return 0;
2055
2056 error:
2057         dp_unregister_genl(i);
2058         return err;
2059 }
2060
2061 static int __net_init ovs_init_net(struct net *net)
2062 {
2063         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2064
2065         INIT_LIST_HEAD(&ovs_net->dps);
2066         INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2067         return 0;
2068 }
2069
2070 static void __net_exit ovs_exit_net(struct net *net)
2071 {
2072         struct datapath *dp, *dp_next;
2073         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2074
2075         ovs_lock();
2076         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2077                 __dp_destroy(dp);
2078         ovs_unlock();
2079
2080         cancel_work_sync(&ovs_net->dp_notify_work);
2081 }
2082
2083 static struct pernet_operations ovs_net_ops = {
2084         .init = ovs_init_net,
2085         .exit = ovs_exit_net,
2086         .id   = &ovs_net_id,
2087         .size = sizeof(struct ovs_net),
2088 };
2089
2090 DEFINE_COMPAT_PNET_REG_FUNC(device);
2091
2092 static int __init dp_init(void)
2093 {
2094         int err;
2095
2096         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2097
2098         pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
2099                 VERSION);
2100
2101         err = ovs_flow_init();
2102         if (err)
2103                 goto error;
2104
2105         err = ovs_vport_init();
2106         if (err)
2107                 goto error_flow_exit;
2108
2109         err = register_pernet_device(&ovs_net_ops);
2110         if (err)
2111                 goto error_vport_exit;
2112
2113         err = register_netdevice_notifier(&ovs_dp_device_notifier);
2114         if (err)
2115                 goto error_netns_exit;
2116
2117         err = dp_register_genl();
2118         if (err < 0)
2119                 goto error_unreg_notifier;
2120
2121         return 0;
2122
2123 error_unreg_notifier:
2124         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2125 error_netns_exit:
2126         unregister_pernet_device(&ovs_net_ops);
2127 error_vport_exit:
2128         ovs_vport_exit();
2129 error_flow_exit:
2130         ovs_flow_exit();
2131 error:
2132         return err;
2133 }
2134
2135 static void dp_cleanup(void)
2136 {
2137         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2138         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2139         unregister_pernet_device(&ovs_net_ops);
2140         rcu_barrier();
2141         ovs_vport_exit();
2142         ovs_flow_exit();
2143 }
2144
2145 module_init(dp_init);
2146 module_exit(dp_cleanup);
2147
2148 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2149 MODULE_LICENSE("GPL");
2150 MODULE_VERSION(VERSION);