datapath: Minimize ovs_flow_cmd_new|set critical sections.
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007-2013 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/version.h>
40 #include <linux/ethtool.h>
41 #include <linux/wait.h>
42 #include <asm/div64.h>
43 #include <linux/highmem.h>
44 #include <linux/netfilter_bridge.h>
45 #include <linux/netfilter_ipv4.h>
46 #include <linux/inetdevice.h>
47 #include <linux/list.h>
48 #include <linux/openvswitch.h>
49 #include <linux/rculist.h>
50 #include <linux/dmi.h>
51 #include <linux/genetlink.h>
52 #include <net/genetlink.h>
53 #include <net/genetlink.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56
57 #include "datapath.h"
58 #include "flow.h"
59 #include "flow_table.h"
60 #include "flow_netlink.h"
61 #include "vlan.h"
62 #include "vport-internal_dev.h"
63 #include "vport-netdev.h"
64
65 int ovs_net_id __read_mostly;
66
67 /* Check if need to build a reply message.
68  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
69 static bool ovs_must_notify(struct genl_info *info,
70                             const struct genl_multicast_group *grp)
71 {
72         return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
73                 netlink_has_listeners(genl_info_net(info)->genl_sock, grp->id);
74 }
75
76 static void ovs_notify(struct sk_buff *skb, struct genl_info *info,
77                        struct genl_multicast_group *grp)
78 {
79         genl_notify(skb, genl_info_net(info), info->snd_portid,
80                     grp->id, info->nlhdr, GFP_KERNEL);
81 }
82
83 /**
84  * DOC: Locking:
85  *
86  * All writes e.g. Writes to device state (add/remove datapath, port, set
87  * operations on vports, etc.), Writes to other state (flow table
88  * modifications, set miscellaneous datapath parameters, etc.) are protected
89  * by ovs_lock.
90  *
91  * Reads are protected by RCU.
92  *
93  * There are a few special cases (mostly stats) that have their own
94  * synchronization but they nest under all of above and don't interact with
95  * each other.
96  *
97  * The RTNL lock nests inside ovs_mutex.
98  */
99
100 static DEFINE_MUTEX(ovs_mutex);
101
102 void ovs_lock(void)
103 {
104         mutex_lock(&ovs_mutex);
105 }
106
107 void ovs_unlock(void)
108 {
109         mutex_unlock(&ovs_mutex);
110 }
111
112 #ifdef CONFIG_LOCKDEP
113 int lockdep_ovsl_is_held(void)
114 {
115         if (debug_locks)
116                 return lockdep_is_held(&ovs_mutex);
117         else
118                 return 1;
119 }
120 #endif
121
122 static struct vport *new_vport(const struct vport_parms *);
123 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
124                              const struct dp_upcall_info *);
125 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
126                                   const struct dp_upcall_info *);
127
128 /* Must be called with rcu_read_lock or ovs_mutex. */
129 static struct datapath *get_dp(struct net *net, int dp_ifindex)
130 {
131         struct datapath *dp = NULL;
132         struct net_device *dev;
133
134         rcu_read_lock();
135         dev = dev_get_by_index_rcu(net, dp_ifindex);
136         if (dev) {
137                 struct vport *vport = ovs_internal_dev_get_vport(dev);
138                 if (vport)
139                         dp = vport->dp;
140         }
141         rcu_read_unlock();
142
143         return dp;
144 }
145
146 /* Must be called with rcu_read_lock or ovs_mutex. */
147 const char *ovs_dp_name(const struct datapath *dp)
148 {
149         struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
150         return vport->ops->get_name(vport);
151 }
152
153 static int get_dpifindex(struct datapath *dp)
154 {
155         struct vport *local;
156         int ifindex;
157
158         rcu_read_lock();
159
160         local = ovs_vport_rcu(dp, OVSP_LOCAL);
161         if (local)
162                 ifindex = netdev_vport_priv(local)->dev->ifindex;
163         else
164                 ifindex = 0;
165
166         rcu_read_unlock();
167
168         return ifindex;
169 }
170
171 static void destroy_dp_rcu(struct rcu_head *rcu)
172 {
173         struct datapath *dp = container_of(rcu, struct datapath, rcu);
174
175         free_percpu(dp->stats_percpu);
176         release_net(ovs_dp_get_net(dp));
177         kfree(dp->ports);
178         kfree(dp);
179 }
180
181 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
182                                             u16 port_no)
183 {
184         return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
185 }
186
187 /* Called with ovs_mutex or RCU read lock. */
188 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
189 {
190         struct vport *vport;
191         struct hlist_head *head;
192
193         head = vport_hash_bucket(dp, port_no);
194         hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
195                 if (vport->port_no == port_no)
196                         return vport;
197         }
198         return NULL;
199 }
200
201 /* Called with ovs_mutex. */
202 static struct vport *new_vport(const struct vport_parms *parms)
203 {
204         struct vport *vport;
205
206         vport = ovs_vport_add(parms);
207         if (!IS_ERR(vport)) {
208                 struct datapath *dp = parms->dp;
209                 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
210
211                 hlist_add_head_rcu(&vport->dp_hash_node, head);
212         }
213         return vport;
214 }
215
216 void ovs_dp_detach_port(struct vport *p)
217 {
218         ASSERT_OVSL();
219
220         /* First drop references to device. */
221         hlist_del_rcu(&p->dp_hash_node);
222
223         /* Then destroy it. */
224         ovs_vport_del(p);
225 }
226
227 /* Must be called with rcu_read_lock. */
228 void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
229 {
230         struct datapath *dp = p->dp;
231         struct sw_flow *flow;
232         struct dp_stats_percpu *stats;
233         struct sw_flow_key key;
234         u64 *stats_counter;
235         u32 n_mask_hit;
236         int error;
237
238         stats = this_cpu_ptr(dp->stats_percpu);
239
240         /* Extract flow from 'skb' into 'key'. */
241         error = ovs_flow_extract(skb, p->port_no, &key);
242         if (unlikely(error)) {
243                 kfree_skb(skb);
244                 return;
245         }
246
247         /* Look up flow. */
248         flow = ovs_flow_tbl_lookup_stats(&dp->table, &key, &n_mask_hit);
249         if (unlikely(!flow)) {
250                 struct dp_upcall_info upcall;
251
252                 upcall.cmd = OVS_PACKET_CMD_MISS;
253                 upcall.key = &key;
254                 upcall.userdata = NULL;
255                 upcall.portid = p->upcall_portid;
256                 ovs_dp_upcall(dp, skb, &upcall);
257                 consume_skb(skb);
258                 stats_counter = &stats->n_missed;
259                 goto out;
260         }
261
262         OVS_CB(skb)->flow = flow;
263         OVS_CB(skb)->pkt_key = &key;
264
265         ovs_flow_stats_update(OVS_CB(skb)->flow, skb);
266         ovs_execute_actions(dp, skb);
267         stats_counter = &stats->n_hit;
268
269 out:
270         /* Update datapath statistics. */
271         u64_stats_update_begin(&stats->sync);
272         (*stats_counter)++;
273         stats->n_mask_hit += n_mask_hit;
274         u64_stats_update_end(&stats->sync);
275 }
276
277 static struct genl_family dp_packet_genl_family = {
278         .id = GENL_ID_GENERATE,
279         .hdrsize = sizeof(struct ovs_header),
280         .name = OVS_PACKET_FAMILY,
281         .version = OVS_PACKET_VERSION,
282         .maxattr = OVS_PACKET_ATTR_MAX,
283         .netnsok = true,
284          SET_PARALLEL_OPS
285 };
286
287 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
288                   const struct dp_upcall_info *upcall_info)
289 {
290         struct dp_stats_percpu *stats;
291         int err;
292
293         if (upcall_info->portid == 0) {
294                 err = -ENOTCONN;
295                 goto err;
296         }
297
298         if (!skb_is_gso(skb))
299                 err = queue_userspace_packet(dp, skb, upcall_info);
300         else
301                 err = queue_gso_packets(dp, skb, upcall_info);
302         if (err)
303                 goto err;
304
305         return 0;
306
307 err:
308         stats = this_cpu_ptr(dp->stats_percpu);
309
310         u64_stats_update_begin(&stats->sync);
311         stats->n_lost++;
312         u64_stats_update_end(&stats->sync);
313
314         return err;
315 }
316
317 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
318                              const struct dp_upcall_info *upcall_info)
319 {
320         unsigned short gso_type = skb_shinfo(skb)->gso_type;
321         struct dp_upcall_info later_info;
322         struct sw_flow_key later_key;
323         struct sk_buff *segs, *nskb;
324         int err;
325
326         segs = __skb_gso_segment(skb, NETIF_F_SG, false);
327         if (IS_ERR(segs))
328                 return PTR_ERR(segs);
329
330         /* Queue all of the segments. */
331         skb = segs;
332         do {
333                 err = queue_userspace_packet(dp, skb, upcall_info);
334                 if (err)
335                         break;
336
337                 if (skb == segs && gso_type & SKB_GSO_UDP) {
338                         /* The initial flow key extracted by ovs_flow_extract()
339                          * in this case is for a first fragment, so we need to
340                          * properly mark later fragments.
341                          */
342                         later_key = *upcall_info->key;
343                         later_key.ip.frag = OVS_FRAG_TYPE_LATER;
344
345                         later_info = *upcall_info;
346                         later_info.key = &later_key;
347                         upcall_info = &later_info;
348                 }
349         } while ((skb = skb->next));
350
351         /* Free all of the segments. */
352         skb = segs;
353         do {
354                 nskb = skb->next;
355                 if (err)
356                         kfree_skb(skb);
357                 else
358                         consume_skb(skb);
359         } while ((skb = nskb));
360         return err;
361 }
362
363 static size_t key_attr_size(void)
364 {
365         return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
366                 + nla_total_size(0)   /* OVS_KEY_ATTR_TUNNEL */
367                   + nla_total_size(8)   /* OVS_TUNNEL_KEY_ATTR_ID */
368                   + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
369                   + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
370                   + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TOS */
371                   + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TTL */
372                   + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
373                   + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_CSUM */
374                 + nla_total_size(4)   /* OVS_KEY_ATTR_IN_PORT */
375                 + nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
376                 + nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
377                 + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
378                 + nla_total_size(4)   /* OVS_KEY_ATTR_8021Q */
379                 + nla_total_size(0)   /* OVS_KEY_ATTR_ENCAP */
380                 + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
381                 + nla_total_size(40)  /* OVS_KEY_ATTR_IPV6 */
382                 + nla_total_size(2)   /* OVS_KEY_ATTR_ICMPV6 */
383                 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
384 }
385
386 static size_t upcall_msg_size(const struct nlattr *userdata,
387                               unsigned int hdrlen)
388 {
389         size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
390                 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
391                 + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
392
393         /* OVS_PACKET_ATTR_USERDATA */
394         if (userdata)
395                 size += NLA_ALIGN(userdata->nla_len);
396
397         return size;
398 }
399
400 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
401                                   const struct dp_upcall_info *upcall_info)
402 {
403         struct ovs_header *upcall;
404         struct sk_buff *nskb = NULL;
405         struct sk_buff *user_skb; /* to be queued to userspace */
406         struct nlattr *nla;
407         struct genl_info info = {
408 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
409                 .dst_sk = ovs_dp_get_net(dp)->genl_sock,
410 #endif
411                 .snd_portid = upcall_info->portid,
412         };
413         size_t len;
414         unsigned int hlen;
415         int err, dp_ifindex;
416
417         dp_ifindex = get_dpifindex(dp);
418         if (!dp_ifindex)
419                 return -ENODEV;
420
421         if (vlan_tx_tag_present(skb)) {
422                 nskb = skb_clone(skb, GFP_ATOMIC);
423                 if (!nskb)
424                         return -ENOMEM;
425
426                 nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
427                 if (!nskb)
428                         return -ENOMEM;
429
430                 vlan_set_tci(nskb, 0);
431
432                 skb = nskb;
433         }
434
435         if (nla_attr_size(skb->len) > USHRT_MAX) {
436                 err = -EFBIG;
437                 goto out;
438         }
439
440         /* Complete checksum if needed */
441         if (skb->ip_summed == CHECKSUM_PARTIAL &&
442             (err = skb_checksum_help(skb)))
443                 goto out;
444
445         /* Older versions of OVS user space enforce alignment of the last
446          * Netlink attribute to NLA_ALIGNTO which would require extensive
447          * padding logic. Only perform zerocopy if padding is not required.
448          */
449         if (dp->user_features & OVS_DP_F_UNALIGNED)
450                 hlen = skb_zerocopy_headlen(skb);
451         else
452                 hlen = skb->len;
453
454         len = upcall_msg_size(upcall_info->userdata, hlen);
455         user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
456         if (!user_skb) {
457                 err = -ENOMEM;
458                 goto out;
459         }
460
461         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
462                              0, upcall_info->cmd);
463         upcall->dp_ifindex = dp_ifindex;
464
465         nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
466         ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
467         nla_nest_end(user_skb, nla);
468
469         if (upcall_info->userdata)
470                 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
471                           nla_len(upcall_info->userdata),
472                           nla_data(upcall_info->userdata));
473
474         /* Only reserve room for attribute header, packet data is added
475          * in skb_zerocopy() */
476         if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
477                 err = -ENOBUFS;
478                 goto out;
479         }
480         nla->nla_len = nla_attr_size(skb->len);
481
482         skb_zerocopy(user_skb, skb, skb->len, hlen);
483
484         /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
485         if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
486                 size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;
487
488                 if (plen > 0)
489                         memset(skb_put(user_skb, plen), 0, plen);
490         }
491
492         ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
493
494         err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
495 out:
496         kfree_skb(nskb);
497         return err;
498 }
499
500 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
501 {
502         struct ovs_header *ovs_header = info->userhdr;
503         struct nlattr **a = info->attrs;
504         struct sw_flow_actions *acts;
505         struct sk_buff *packet;
506         struct sw_flow *flow;
507         struct datapath *dp;
508         struct ethhdr *eth;
509         int len;
510         int err;
511
512         err = -EINVAL;
513         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
514             !a[OVS_PACKET_ATTR_ACTIONS])
515                 goto err;
516
517         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
518         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
519         err = -ENOMEM;
520         if (!packet)
521                 goto err;
522         skb_reserve(packet, NET_IP_ALIGN);
523
524         nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
525
526         skb_reset_mac_header(packet);
527         eth = eth_hdr(packet);
528
529         /* Normally, setting the skb 'protocol' field would be handled by a
530          * call to eth_type_trans(), but it assumes there's a sending
531          * device, which we may not have. */
532         if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
533                 packet->protocol = eth->h_proto;
534         else
535                 packet->protocol = htons(ETH_P_802_2);
536
537         /* Build an sw_flow for sending this packet. */
538         flow = ovs_flow_alloc();
539         err = PTR_ERR(flow);
540         if (IS_ERR(flow))
541                 goto err_kfree_skb;
542
543         err = ovs_flow_extract(packet, -1, &flow->key);
544         if (err)
545                 goto err_flow_free;
546
547         err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]);
548         if (err)
549                 goto err_flow_free;
550         acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
551         err = PTR_ERR(acts);
552         if (IS_ERR(acts))
553                 goto err_flow_free;
554
555         err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
556                                    &flow->key, 0, &acts);
557         rcu_assign_pointer(flow->sf_acts, acts);
558         if (err)
559                 goto err_flow_free;
560
561         OVS_CB(packet)->flow = flow;
562         OVS_CB(packet)->pkt_key = &flow->key;
563         packet->priority = flow->key.phy.priority;
564         packet->mark = flow->key.phy.skb_mark;
565
566         rcu_read_lock();
567         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
568         err = -ENODEV;
569         if (!dp)
570                 goto err_unlock;
571
572         local_bh_disable();
573         err = ovs_execute_actions(dp, packet);
574         local_bh_enable();
575         rcu_read_unlock();
576
577         ovs_flow_free(flow, false);
578         return err;
579
580 err_unlock:
581         rcu_read_unlock();
582 err_flow_free:
583         ovs_flow_free(flow, false);
584 err_kfree_skb:
585         kfree_skb(packet);
586 err:
587         return err;
588 }
589
590 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
591         [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
592         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
593         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
594 };
595
596 static struct genl_ops dp_packet_genl_ops[] = {
597         { .cmd = OVS_PACKET_CMD_EXECUTE,
598           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
599           .policy = packet_policy,
600           .doit = ovs_packet_cmd_execute
601         }
602 };
603
604 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
605                          struct ovs_dp_megaflow_stats *mega_stats)
606 {
607         int i;
608
609         memset(mega_stats, 0, sizeof(*mega_stats));
610
611         stats->n_flows = ovs_flow_tbl_count(&dp->table);
612         mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
613
614         stats->n_hit = stats->n_missed = stats->n_lost = 0;
615
616         for_each_possible_cpu(i) {
617                 const struct dp_stats_percpu *percpu_stats;
618                 struct dp_stats_percpu local_stats;
619                 unsigned int start;
620
621                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
622
623                 do {
624                         start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
625                         local_stats = *percpu_stats;
626                 } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
627
628                 stats->n_hit += local_stats.n_hit;
629                 stats->n_missed += local_stats.n_missed;
630                 stats->n_lost += local_stats.n_lost;
631                 mega_stats->n_mask_hit += local_stats.n_mask_hit;
632         }
633 }
634
635 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
636         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
637         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
638         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
639 };
640
641 static struct genl_family dp_flow_genl_family = {
642         .id = GENL_ID_GENERATE,
643         .hdrsize = sizeof(struct ovs_header),
644         .name = OVS_FLOW_FAMILY,
645         .version = OVS_FLOW_VERSION,
646         .maxattr = OVS_FLOW_ATTR_MAX,
647         .netnsok = true,
648          SET_PARALLEL_OPS
649 };
650
651 static struct genl_multicast_group ovs_dp_flow_multicast_group = {
652         .name = OVS_FLOW_MCGROUP
653 };
654
655 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
656 {
657         return NLMSG_ALIGN(sizeof(struct ovs_header))
658                 + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
659                 + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */
660                 + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
661                 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
662                 + nla_total_size(8) /* OVS_FLOW_ATTR_USED */
663                 + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
664 }
665
666 /* Called with ovs_mutex or RCU read lock. */
667 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
668                                   struct sk_buff *skb, u32 portid,
669                                   u32 seq, u32 flags, u8 cmd)
670 {
671         const int skb_orig_len = skb->len;
672         struct nlattr *start;
673         struct ovs_flow_stats stats;
674         __be16 tcp_flags;
675         unsigned long used;
676         struct ovs_header *ovs_header;
677         struct nlattr *nla;
678         int err;
679
680         ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
681         if (!ovs_header)
682                 return -EMSGSIZE;
683
684         ovs_header->dp_ifindex = dp_ifindex;
685
686         /* Fill flow key. */
687         nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
688         if (!nla)
689                 goto nla_put_failure;
690
691         err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
692         if (err)
693                 goto error;
694         nla_nest_end(skb, nla);
695
696         nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
697         if (!nla)
698                 goto nla_put_failure;
699
700         err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
701         if (err)
702                 goto error;
703
704         nla_nest_end(skb, nla);
705
706         ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
707
708         if (used &&
709             nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
710                 goto nla_put_failure;
711
712         if (stats.n_packets &&
713             nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
714                 goto nla_put_failure;
715
716         if ((u8)ntohs(tcp_flags) &&
717              nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
718                 goto nla_put_failure;
719
720         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
721          * this is the first flow to be dumped into 'skb'.  This is unusual for
722          * Netlink but individual action lists can be longer than
723          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
724          * The userspace caller can always fetch the actions separately if it
725          * really wants them.  (Most userspace callers in fact don't care.)
726          *
727          * This can only fail for dump operations because the skb is always
728          * properly sized for single flows.
729          */
730         start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
731         if (start) {
732                 const struct sw_flow_actions *sf_acts;
733
734                 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
735                 err = ovs_nla_put_actions(sf_acts->actions,
736                                           sf_acts->actions_len, skb);
737
738                 if (!err)
739                         nla_nest_end(skb, start);
740                 else {
741                         if (skb_orig_len)
742                                 goto error;
743
744                         nla_nest_cancel(skb, start);
745                 }
746         } else if (skb_orig_len)
747                 goto nla_put_failure;
748
749         return genlmsg_end(skb, ovs_header);
750
751 nla_put_failure:
752         err = -EMSGSIZE;
753 error:
754         genlmsg_cancel(skb, ovs_header);
755         return err;
756 }
757
758 /* May not be called with RCU read lock. */
759 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
760                                                struct genl_info *info,
761                                                bool always)
762 {
763         struct sk_buff *skb;
764
765         if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group))
766                 return NULL;
767
768         skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
769
770         if (!skb)
771                 return ERR_PTR(-ENOMEM);
772
773         return skb;
774 }
775
776 /* Called with ovs_mutex. */
777 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
778                                                int dp_ifindex,
779                                                struct genl_info *info, u8 cmd,
780                                                bool always)
781 {
782         struct sk_buff *skb;
783         int retval;
784
785         skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
786                                       always);
787         if (!skb || IS_ERR(skb))
788                 return skb;
789
790         retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
791                                         info->snd_portid, info->snd_seq, 0,
792                                         cmd);
793         BUG_ON(retval < 0);
794         return skb;
795 }
796
797 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
798 {
799         struct nlattr **a = info->attrs;
800         struct ovs_header *ovs_header = info->userhdr;
801         struct sw_flow *flow, *new_flow;
802         struct sw_flow_mask mask;
803         struct sk_buff *reply;
804         struct datapath *dp;
805         struct sw_flow_actions *acts;
806         struct sw_flow_match match;
807         int error;
808
809         /* Must have key and actions. */
810         error = -EINVAL;
811         if (!a[OVS_FLOW_ATTR_KEY])
812                 goto error;
813         if (!a[OVS_FLOW_ATTR_ACTIONS])
814                 goto error;
815
816         /* Most of the time we need to allocate a new flow, do it before
817          * locking. */
818         new_flow = ovs_flow_alloc();
819         if (IS_ERR(new_flow)) {
820                 error = PTR_ERR(new_flow);
821                 goto error;
822         }
823
824         /* Extract key. */
825         ovs_match_init(&match, &new_flow->unmasked_key, &mask);
826         error = ovs_nla_get_match(&match,
827                                   a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
828         if (error)
829                 goto err_kfree_flow;
830
831         ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
832
833         /* Validate actions. */
834         acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
835         error = PTR_ERR(acts);
836         if (IS_ERR(acts))
837                 goto err_kfree_flow;
838
839         error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
840                                      0, &acts);
841         if (error) {
842                 OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
843                 goto err_kfree_acts;
844         }
845
846         reply = ovs_flow_cmd_alloc_info(acts, info, false);
847         if (IS_ERR(reply)) {
848                 error = PTR_ERR(reply);
849                 goto err_kfree_acts;
850         }
851
852         ovs_lock();
853         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
854         if (unlikely(!dp)) {
855                 error = -ENODEV;
856                 goto err_unlock_ovs;
857         }
858         /* Check if this is a duplicate flow */
859         flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->unmasked_key);
860         if (likely(!flow)) {
861                 rcu_assign_pointer(new_flow->sf_acts, acts);
862
863                 /* Put flow in bucket. */
864                 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
865                 if (unlikely(error)) {
866                         acts = NULL;
867                         goto err_unlock_ovs;
868                 }
869
870                 if (unlikely(reply)) {
871                         error = ovs_flow_cmd_fill_info(new_flow,
872                                                        ovs_header->dp_ifindex,
873                                                        reply, info->snd_portid,
874                                                        info->snd_seq, 0,
875                                                        OVS_FLOW_CMD_NEW);
876                         BUG_ON(error < 0);
877                 }
878                 ovs_unlock();
879         } else {
880                 struct sw_flow_actions *old_acts;
881
882                 /* Bail out if we're not allowed to modify an existing flow.
883                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
884                  * because Generic Netlink treats the latter as a dump
885                  * request.  We also accept NLM_F_EXCL in case that bug ever
886                  * gets fixed.
887                  */
888                 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
889                                                          | NLM_F_EXCL))) {
890                         error = -EEXIST;
891                         goto err_unlock_ovs;
892                 }
893                 /* The unmasked key has to be the same for flow updates. */
894                 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
895                         error = -EEXIST;
896                         goto err_unlock_ovs;
897                 }
898                 /* Update actions. */
899                 old_acts = ovsl_dereference(flow->sf_acts);
900                 rcu_assign_pointer(flow->sf_acts, acts);
901
902                 if (unlikely(reply)) {
903                         error = ovs_flow_cmd_fill_info(flow,
904                                                        ovs_header->dp_ifindex,
905                                                        reply, info->snd_portid,
906                                                        info->snd_seq, 0,
907                                                        OVS_FLOW_CMD_NEW);
908                         BUG_ON(error < 0);
909                 }
910                 ovs_unlock();
911
912                 ovs_nla_free_flow_actions(old_acts);
913                 ovs_flow_free(new_flow, false);
914         }
915
916         if (reply)
917                 ovs_notify(reply, info, &ovs_dp_flow_multicast_group);
918         return 0;
919
920 err_unlock_ovs:
921         ovs_unlock();
922         kfree_skb(reply);
923 err_kfree_acts:
924         kfree(acts);
925 err_kfree_flow:
926         ovs_flow_free(new_flow, false);
927 error:
928         return error;
929 }
930
931 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
932 {
933         struct nlattr **a = info->attrs;
934         struct ovs_header *ovs_header = info->userhdr;
935         struct sw_flow_key key, masked_key;
936         struct sw_flow *flow;
937         struct sw_flow_mask mask;
938         struct sk_buff *reply = NULL;
939         struct datapath *dp;
940         struct sw_flow_actions *old_acts = NULL, *acts = NULL;
941         struct sw_flow_match match;
942         int error;
943
944         /* Extract key. */
945         error = -EINVAL;
946         if (!a[OVS_FLOW_ATTR_KEY])
947                 goto error;
948
949         ovs_match_init(&match, &key, &mask);
950         error = ovs_nla_get_match(&match,
951                                   a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
952         if (error)
953                 goto error;
954
955         /* Validate actions. */
956         if (a[OVS_FLOW_ATTR_ACTIONS]) {
957                 acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
958                 error = PTR_ERR(acts);
959                 if (IS_ERR(acts))
960                         goto error;
961
962                 ovs_flow_mask_key(&masked_key, &key, &mask);
963                 error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
964                                              &masked_key, 0, &acts);
965                 if (error) {
966                         OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
967                         goto err_kfree_acts;
968                 }
969         }
970
971         /* Can allocate before locking if have acts. */
972         if (acts) {
973                 reply = ovs_flow_cmd_alloc_info(acts, info, false);
974                 if (IS_ERR(reply)) {
975                         error = PTR_ERR(reply);
976                         goto err_kfree_acts;
977                 }
978         }
979
980         ovs_lock();
981         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
982         if (unlikely(!dp)) {
983                 error = -ENODEV;
984                 goto err_unlock_ovs;
985         }
986         /* Check that the flow exists. */
987         flow = ovs_flow_tbl_lookup(&dp->table, &key);
988         if (unlikely(!flow)) {
989                 error = -ENOENT;
990                 goto err_unlock_ovs;
991         }
992         /* The unmasked key has to be the same for flow updates. */
993         if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
994                 error = -EEXIST;
995                 goto err_unlock_ovs;
996         }
997         /* Update actions, if present. */
998         if (likely(acts)) {
999                 old_acts = ovsl_dereference(flow->sf_acts);
1000                 rcu_assign_pointer(flow->sf_acts, acts);
1001
1002                 if (unlikely(reply)) {
1003                         error = ovs_flow_cmd_fill_info(flow,
1004                                                        ovs_header->dp_ifindex,
1005                                                        reply, info->snd_portid,
1006                                                        info->snd_seq, 0,
1007                                                        OVS_FLOW_CMD_NEW);
1008                         BUG_ON(error < 0);
1009                 }
1010         } else {
1011                 /* Could not alloc without acts before locking. */
1012                 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1013                                                 info, OVS_FLOW_CMD_NEW, false);
1014                 if (unlikely(IS_ERR(reply))) {
1015                         error = PTR_ERR(reply);
1016                         goto err_unlock_ovs;
1017                 }
1018         }
1019
1020         /* Clear stats. */
1021         if (a[OVS_FLOW_ATTR_CLEAR])
1022                 ovs_flow_stats_clear(flow);
1023         ovs_unlock();
1024
1025         if (reply)
1026                 ovs_notify(reply, info, &ovs_dp_flow_multicast_group);
1027         if (old_acts)
1028                 ovs_nla_free_flow_actions(old_acts);
1029         return 0;
1030
1031 err_unlock_ovs:
1032         ovs_unlock();
1033         kfree_skb(reply);
1034 err_kfree_acts:
1035         kfree(acts);
1036 error:
1037         return error;
1038 }
1039
1040 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1041 {
1042         struct nlattr **a = info->attrs;
1043         struct ovs_header *ovs_header = info->userhdr;
1044         struct sw_flow_key key;
1045         struct sk_buff *reply;
1046         struct sw_flow *flow;
1047         struct datapath *dp;
1048         struct sw_flow_match match;
1049         int err;
1050
1051         if (!a[OVS_FLOW_ATTR_KEY]) {
1052                 OVS_NLERR("Flow get message rejected, Key attribute missing.\n");
1053                 return -EINVAL;
1054         }
1055
1056         ovs_match_init(&match, &key, NULL);
1057         err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1058         if (err)
1059                 return err;
1060
1061         ovs_lock();
1062         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1063         if (!dp) {
1064                 err = -ENODEV;
1065                 goto unlock;
1066         }
1067
1068         flow = ovs_flow_tbl_lookup(&dp->table, &key);
1069         if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
1070                 err = -ENOENT;
1071                 goto unlock;
1072         }
1073
1074         reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1075                                         OVS_FLOW_CMD_NEW, true);
1076         if (IS_ERR(reply)) {
1077                 err = PTR_ERR(reply);
1078                 goto unlock;
1079         }
1080
1081         ovs_unlock();
1082         return genlmsg_reply(reply, info);
1083 unlock:
1084         ovs_unlock();
1085         return err;
1086 }
1087
1088 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1089 {
1090         struct nlattr **a = info->attrs;
1091         struct ovs_header *ovs_header = info->userhdr;
1092         struct sw_flow_key key;
1093         struct sk_buff *reply;
1094         struct sw_flow *flow;
1095         struct datapath *dp;
1096         struct sw_flow_match match;
1097         int err;
1098
1099         if (likely(a[OVS_FLOW_ATTR_KEY])) {
1100                 ovs_match_init(&match, &key, NULL);
1101                 err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1102                 if (unlikely(err))
1103                         return err;
1104         }
1105
1106         ovs_lock();
1107         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1108         if (unlikely(!dp)) {
1109                 err = -ENODEV;
1110                 goto unlock;
1111         }
1112         if (unlikely(!a[OVS_FLOW_ATTR_KEY])) {
1113                 err = ovs_flow_tbl_flush(&dp->table);
1114                 goto unlock;
1115         }
1116         flow = ovs_flow_tbl_lookup(&dp->table, &key);
1117         if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) {
1118                 err = -ENOENT;
1119                 goto unlock;
1120         }
1121
1122         ovs_flow_tbl_remove(&dp->table, flow);
1123         ovs_unlock();
1124
1125         reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *)flow->sf_acts,
1126                                         info, false);
1127
1128         if (likely(reply)) {
1129                 if (likely(!IS_ERR(reply))) {
1130                         rcu_read_lock(); /* Keep RCU checker happy. */
1131                         err = ovs_flow_cmd_fill_info(flow,
1132                                                      ovs_header->dp_ifindex,
1133                                                      reply, info->snd_portid,
1134                                                      info->snd_seq, 0,
1135                                                      OVS_FLOW_CMD_DEL);
1136                         rcu_read_unlock();
1137                         BUG_ON(err < 0);
1138                         ovs_notify(reply, info, &ovs_dp_flow_multicast_group);
1139                 } else {
1140                         netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
1141                                         ovs_dp_flow_multicast_group.id,
1142                                         PTR_ERR(reply));
1143                 }
1144         }
1145
1146         ovs_flow_free(flow, true);
1147         return 0;
1148 unlock:
1149         ovs_unlock();
1150         return err;
1151 }
1152
1153 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1154 {
1155         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1156         struct table_instance *ti;
1157         struct datapath *dp;
1158
1159         rcu_read_lock();
1160         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1161         if (!dp) {
1162                 rcu_read_unlock();
1163                 return -ENODEV;
1164         }
1165
1166         ti = rcu_dereference(dp->table.ti);
1167         for (;;) {
1168                 struct sw_flow *flow;
1169                 u32 bucket, obj;
1170
1171                 bucket = cb->args[0];
1172                 obj = cb->args[1];
1173                 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1174                 if (!flow)
1175                         break;
1176
1177                 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1178                                            NETLINK_CB(cb->skb).portid,
1179                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1180                                            OVS_FLOW_CMD_NEW) < 0)
1181                         break;
1182
1183                 cb->args[0] = bucket;
1184                 cb->args[1] = obj;
1185         }
1186         rcu_read_unlock();
1187         return skb->len;
1188 }
1189
1190 static struct genl_ops dp_flow_genl_ops[] = {
1191         { .cmd = OVS_FLOW_CMD_NEW,
1192           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1193           .policy = flow_policy,
1194           .doit = ovs_flow_cmd_new
1195         },
1196         { .cmd = OVS_FLOW_CMD_DEL,
1197           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1198           .policy = flow_policy,
1199           .doit = ovs_flow_cmd_del
1200         },
1201         { .cmd = OVS_FLOW_CMD_GET,
1202           .flags = 0,               /* OK for unprivileged users. */
1203           .policy = flow_policy,
1204           .doit = ovs_flow_cmd_get,
1205           .dumpit = ovs_flow_cmd_dump
1206         },
1207         { .cmd = OVS_FLOW_CMD_SET,
1208           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1209           .policy = flow_policy,
1210           .doit = ovs_flow_cmd_set,
1211         },
1212 };
1213
1214 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1215         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1216         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1217         [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1218 };
1219
1220 static struct genl_family dp_datapath_genl_family = {
1221         .id = GENL_ID_GENERATE,
1222         .hdrsize = sizeof(struct ovs_header),
1223         .name = OVS_DATAPATH_FAMILY,
1224         .version = OVS_DATAPATH_VERSION,
1225         .maxattr = OVS_DP_ATTR_MAX,
1226         .netnsok = true,
1227          SET_PARALLEL_OPS
1228 };
1229
1230 static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
1231         .name = OVS_DATAPATH_MCGROUP
1232 };
1233
1234 static size_t ovs_dp_cmd_msg_size(void)
1235 {
1236         size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1237
1238         msgsize += nla_total_size(IFNAMSIZ);
1239         msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
1240         msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
1241         msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1242
1243         return msgsize;
1244 }
1245
1246 /* Called with ovs_mutex or RCU read lock. */
1247 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1248                                 u32 portid, u32 seq, u32 flags, u8 cmd)
1249 {
1250         struct ovs_header *ovs_header;
1251         struct ovs_dp_stats dp_stats;
1252         struct ovs_dp_megaflow_stats dp_megaflow_stats;
1253         int err;
1254
1255         ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1256                                    flags, cmd);
1257         if (!ovs_header)
1258                 goto error;
1259
1260         ovs_header->dp_ifindex = get_dpifindex(dp);
1261
1262         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1263         if (err)
1264                 goto nla_put_failure;
1265
1266         get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1267         if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1268                         &dp_stats))
1269                 goto nla_put_failure;
1270
1271         if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1272                         sizeof(struct ovs_dp_megaflow_stats),
1273                         &dp_megaflow_stats))
1274                 goto nla_put_failure;
1275
1276         if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1277                 goto nla_put_failure;
1278
1279         return genlmsg_end(skb, ovs_header);
1280
1281 nla_put_failure:
1282         genlmsg_cancel(skb, ovs_header);
1283 error:
1284         return -EMSGSIZE;
1285 }
1286
1287 static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info)
1288 {
1289         return genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
1290 }
1291
1292 /* Called with rcu_read_lock or ovs_mutex. */
1293 static struct datapath *lookup_datapath(struct net *net,
1294                                         struct ovs_header *ovs_header,
1295                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1296 {
1297         struct datapath *dp;
1298
1299         if (!a[OVS_DP_ATTR_NAME])
1300                 dp = get_dp(net, ovs_header->dp_ifindex);
1301         else {
1302                 struct vport *vport;
1303
1304                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1305                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1306         }
1307         return dp ? dp : ERR_PTR(-ENODEV);
1308 }
1309
1310 static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1311 {
1312         struct datapath *dp;
1313
1314         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1315         if (IS_ERR(dp))
1316                 return;
1317
1318         WARN(dp->user_features, "Dropping previously announced user features\n");
1319         dp->user_features = 0;
1320 }
1321
1322 static void ovs_dp_change(struct datapath *dp, struct nlattr **a)
1323 {
1324         if (a[OVS_DP_ATTR_USER_FEATURES])
1325                 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1326 }
1327
1328 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1329 {
1330         struct nlattr **a = info->attrs;
1331         struct vport_parms parms;
1332         struct sk_buff *reply;
1333         struct datapath *dp;
1334         struct vport *vport;
1335         struct ovs_net *ovs_net;
1336         int err, i;
1337
1338         err = -EINVAL;
1339         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1340                 goto err;
1341
1342         reply = ovs_dp_cmd_alloc_info(info);
1343         if (!reply)
1344                 return -ENOMEM;
1345
1346         err = -ENOMEM;
1347         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1348         if (dp == NULL)
1349                 goto err_free_reply;
1350
1351         ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1352
1353         /* Allocate table. */
1354         err = ovs_flow_tbl_init(&dp->table);
1355         if (err)
1356                 goto err_free_dp;
1357
1358         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1359         if (!dp->stats_percpu) {
1360                 err = -ENOMEM;
1361                 goto err_destroy_table;
1362         }
1363
1364         dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1365                             GFP_KERNEL);
1366         if (!dp->ports) {
1367                 err = -ENOMEM;
1368                 goto err_destroy_percpu;
1369         }
1370
1371         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1372                 INIT_HLIST_HEAD(&dp->ports[i]);
1373
1374         /* Set up our datapath device. */
1375         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1376         parms.type = OVS_VPORT_TYPE_INTERNAL;
1377         parms.options = NULL;
1378         parms.dp = dp;
1379         parms.port_no = OVSP_LOCAL;
1380         parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
1381
1382         ovs_dp_change(dp, a);
1383
1384         /* So far only local changes have been made, now need the lock. */
1385         ovs_lock();
1386
1387         vport = new_vport(&parms);
1388         if (IS_ERR(vport)) {
1389                 err = PTR_ERR(vport);
1390                 if (err == -EBUSY)
1391                         err = -EEXIST;
1392
1393                 if (err == -EEXIST) {
1394                         /* An outdated user space instance that does not understand
1395                          * the concept of user_features has attempted to create a new
1396                          * datapath and is likely to reuse it. Drop all user features.
1397                          */
1398                         if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1399                                 ovs_dp_reset_user_features(skb, info);
1400                 }
1401
1402                 goto err_destroy_ports_array;
1403         }
1404
1405         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1406                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1407         BUG_ON(err < 0);
1408
1409         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1410         list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1411
1412         ovs_unlock();
1413
1414         ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
1415         return 0;
1416
1417 err_destroy_ports_array:
1418         ovs_unlock();
1419         kfree(dp->ports);
1420 err_destroy_percpu:
1421         free_percpu(dp->stats_percpu);
1422 err_destroy_table:
1423         ovs_flow_tbl_destroy(&dp->table, false);
1424 err_free_dp:
1425         release_net(ovs_dp_get_net(dp));
1426         kfree(dp);
1427 err_free_reply:
1428         kfree_skb(reply);
1429 err:
1430         return err;
1431 }
1432
1433 /* Called with ovs_mutex. */
1434 static void __dp_destroy(struct datapath *dp)
1435 {
1436         int i;
1437
1438         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1439                 struct vport *vport;
1440                 struct hlist_node *n;
1441
1442                 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1443                         if (vport->port_no != OVSP_LOCAL)
1444                                 ovs_dp_detach_port(vport);
1445         }
1446
1447         list_del_rcu(&dp->list_node);
1448
1449         /* OVSP_LOCAL is datapath internal port. We need to make sure that
1450          * all ports in datapath are destroyed first before freeing datapath.
1451          */
1452         ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1453
1454         /* RCU destroy the flow table */
1455         ovs_flow_tbl_destroy(&dp->table, true);
1456
1457         call_rcu(&dp->rcu, destroy_dp_rcu);
1458 }
1459
1460 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1461 {
1462         struct sk_buff *reply;
1463         struct datapath *dp;
1464         int err;
1465
1466         reply = ovs_dp_cmd_alloc_info(info);
1467         if (!reply)
1468                 return -ENOMEM;
1469
1470         ovs_lock();
1471         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1472         err = PTR_ERR(dp);
1473         if (IS_ERR(dp))
1474                 goto err_unlock_free;
1475
1476         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1477                                    info->snd_seq, 0, OVS_DP_CMD_DEL);
1478         BUG_ON(err < 0);
1479
1480         __dp_destroy(dp);
1481
1482         ovs_unlock();
1483         ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
1484         return 0;
1485
1486 err_unlock_free:
1487         ovs_unlock();
1488         kfree_skb(reply);
1489         return err;
1490 }
1491
1492 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1493 {
1494         struct sk_buff *reply;
1495         struct datapath *dp;
1496         int err;
1497
1498         reply = ovs_dp_cmd_alloc_info(info);
1499         if (!reply)
1500                 return -ENOMEM;
1501
1502         ovs_lock();
1503         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1504         err = PTR_ERR(dp);
1505         if (IS_ERR(dp))
1506                 goto err_unlock_free;
1507
1508         ovs_dp_change(dp, info->attrs);
1509
1510         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1511                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1512         BUG_ON(err < 0);
1513
1514         ovs_unlock();
1515         ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
1516         return 0;
1517
1518 err_unlock_free:
1519         ovs_unlock();
1520         kfree_skb(reply);
1521         return err;
1522 }
1523
1524 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1525 {
1526         struct sk_buff *reply;
1527         struct datapath *dp;
1528         int err;
1529
1530         reply = ovs_dp_cmd_alloc_info(info);
1531         if (!reply)
1532                 return -ENOMEM;
1533
1534         rcu_read_lock();
1535         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1536         if (IS_ERR(dp)) {
1537                 err = PTR_ERR(dp);
1538                 goto err_unlock_free;
1539         }
1540         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1541                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1542         BUG_ON(err < 0);
1543         rcu_read_unlock();
1544
1545         return genlmsg_reply(reply, info);
1546
1547 err_unlock_free:
1548         rcu_read_unlock();
1549         kfree_skb(reply);
1550         return err;
1551 }
1552
1553 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1554 {
1555         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1556         struct datapath *dp;
1557         int skip = cb->args[0];
1558         int i = 0;
1559
1560         rcu_read_lock();
1561         list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) {
1562                 if (i >= skip &&
1563                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1564                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1565                                          OVS_DP_CMD_NEW) < 0)
1566                         break;
1567                 i++;
1568         }
1569         rcu_read_unlock();
1570
1571         cb->args[0] = i;
1572
1573         return skb->len;
1574 }
1575
1576 static struct genl_ops dp_datapath_genl_ops[] = {
1577         { .cmd = OVS_DP_CMD_NEW,
1578           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1579           .policy = datapath_policy,
1580           .doit = ovs_dp_cmd_new
1581         },
1582         { .cmd = OVS_DP_CMD_DEL,
1583           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1584           .policy = datapath_policy,
1585           .doit = ovs_dp_cmd_del
1586         },
1587         { .cmd = OVS_DP_CMD_GET,
1588           .flags = 0,               /* OK for unprivileged users. */
1589           .policy = datapath_policy,
1590           .doit = ovs_dp_cmd_get,
1591           .dumpit = ovs_dp_cmd_dump
1592         },
1593         { .cmd = OVS_DP_CMD_SET,
1594           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1595           .policy = datapath_policy,
1596           .doit = ovs_dp_cmd_set,
1597         },
1598 };
1599
1600 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1601         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1602         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1603         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1604         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1605         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1606         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1607 };
1608
1609 static struct genl_family dp_vport_genl_family = {
1610         .id = GENL_ID_GENERATE,
1611         .hdrsize = sizeof(struct ovs_header),
1612         .name = OVS_VPORT_FAMILY,
1613         .version = OVS_VPORT_VERSION,
1614         .maxattr = OVS_VPORT_ATTR_MAX,
1615         .netnsok = true,
1616          SET_PARALLEL_OPS
1617 };
1618
1619 struct genl_multicast_group ovs_dp_vport_multicast_group = {
1620         .name = OVS_VPORT_MCGROUP
1621 };
1622
1623 /* Called with ovs_mutex or RCU read lock. */
1624 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1625                                    u32 portid, u32 seq, u32 flags, u8 cmd)
1626 {
1627         struct ovs_header *ovs_header;
1628         struct ovs_vport_stats vport_stats;
1629         int err;
1630
1631         ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1632                                  flags, cmd);
1633         if (!ovs_header)
1634                 return -EMSGSIZE;
1635
1636         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1637
1638         if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1639             nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1640             nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
1641             nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_portid))
1642                 goto nla_put_failure;
1643
1644         ovs_vport_get_stats(vport, &vport_stats);
1645         if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1646                     &vport_stats))
1647                 goto nla_put_failure;
1648
1649         err = ovs_vport_get_options(vport, skb);
1650         if (err == -EMSGSIZE)
1651                 goto error;
1652
1653         return genlmsg_end(skb, ovs_header);
1654
1655 nla_put_failure:
1656         err = -EMSGSIZE;
1657 error:
1658         genlmsg_cancel(skb, ovs_header);
1659         return err;
1660 }
1661
1662 static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1663 {
1664         return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1665 }
1666
1667 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1668 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1669                                          u32 seq, u8 cmd)
1670 {
1671         struct sk_buff *skb;
1672         int retval;
1673
1674         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1675         if (!skb)
1676                 return ERR_PTR(-ENOMEM);
1677
1678         retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1679         BUG_ON(retval < 0);
1680
1681         return skb;
1682 }
1683
1684 /* Called with ovs_mutex or RCU read lock. */
1685 static struct vport *lookup_vport(struct net *net,
1686                                   struct ovs_header *ovs_header,
1687                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1688 {
1689         struct datapath *dp;
1690         struct vport *vport;
1691
1692         if (a[OVS_VPORT_ATTR_NAME]) {
1693                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1694                 if (!vport)
1695                         return ERR_PTR(-ENODEV);
1696                 if (ovs_header->dp_ifindex &&
1697                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1698                         return ERR_PTR(-ENODEV);
1699                 return vport;
1700         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1701                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1702
1703                 if (port_no >= DP_MAX_PORTS)
1704                         return ERR_PTR(-EFBIG);
1705
1706                 dp = get_dp(net, ovs_header->dp_ifindex);
1707                 if (!dp)
1708                         return ERR_PTR(-ENODEV);
1709
1710                 vport = ovs_vport_ovsl_rcu(dp, port_no);
1711                 if (!vport)
1712                         return ERR_PTR(-ENODEV);
1713                 return vport;
1714         } else
1715                 return ERR_PTR(-EINVAL);
1716 }
1717
1718 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1719 {
1720         struct nlattr **a = info->attrs;
1721         struct ovs_header *ovs_header = info->userhdr;
1722         struct vport_parms parms;
1723         struct sk_buff *reply;
1724         struct vport *vport;
1725         struct datapath *dp;
1726         u32 port_no;
1727         int err;
1728
1729         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1730             !a[OVS_VPORT_ATTR_UPCALL_PID])
1731                 return -EINVAL;
1732
1733         port_no = a[OVS_VPORT_ATTR_PORT_NO]
1734                 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
1735         if (port_no >= DP_MAX_PORTS)
1736                 return -EFBIG;
1737
1738         reply = ovs_vport_cmd_alloc_info();
1739         if (!reply)
1740                 return -ENOMEM;
1741
1742         ovs_lock();
1743         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1744         err = -ENODEV;
1745         if (!dp)
1746                 goto exit_unlock_free;
1747
1748         if (port_no) {
1749                 vport = ovs_vport_ovsl(dp, port_no);
1750                 err = -EBUSY;
1751                 if (vport)
1752                         goto exit_unlock_free;
1753         } else {
1754                 for (port_no = 1; ; port_no++) {
1755                         if (port_no >= DP_MAX_PORTS) {
1756                                 err = -EFBIG;
1757                                 goto exit_unlock_free;
1758                         }
1759                         vport = ovs_vport_ovsl(dp, port_no);
1760                         if (!vport)
1761                                 break;
1762                 }
1763         }
1764
1765         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1766         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1767         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1768         parms.dp = dp;
1769         parms.port_no = port_no;
1770         parms.upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1771
1772         vport = new_vport(&parms);
1773         err = PTR_ERR(vport);
1774         if (IS_ERR(vport))
1775                 goto exit_unlock_free;
1776
1777         err = 0;
1778         if (a[OVS_VPORT_ATTR_STATS])
1779                 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1780
1781         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1782                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1783         BUG_ON(err < 0);
1784         ovs_unlock();
1785
1786         ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
1787         return 0;
1788
1789 exit_unlock_free:
1790         ovs_unlock();
1791         kfree_skb(reply);
1792         return err;
1793 }
1794
1795 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1796 {
1797         struct nlattr **a = info->attrs;
1798         struct sk_buff *reply;
1799         struct vport *vport;
1800         int err;
1801
1802         reply = ovs_vport_cmd_alloc_info();
1803         if (!reply)
1804                 return -ENOMEM;
1805
1806         ovs_lock();
1807         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1808         err = PTR_ERR(vport);
1809         if (IS_ERR(vport))
1810                 goto exit_unlock_free;
1811
1812         if (a[OVS_VPORT_ATTR_TYPE] &&
1813             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
1814                 err = -EINVAL;
1815                 goto exit_unlock_free;
1816         }
1817
1818         if (a[OVS_VPORT_ATTR_OPTIONS]) {
1819                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1820                 if (err)
1821                         goto exit_unlock_free;
1822         }
1823
1824         if (a[OVS_VPORT_ATTR_STATS])
1825                 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1826
1827         if (a[OVS_VPORT_ATTR_UPCALL_PID])
1828                 vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1829
1830         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1831                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1832         BUG_ON(err < 0);
1833         ovs_unlock();
1834
1835         ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
1836         return 0;
1837
1838 exit_unlock_free:
1839         ovs_unlock();
1840         kfree_skb(reply);
1841         return err;
1842 }
1843
1844 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1845 {
1846         struct nlattr **a = info->attrs;
1847         struct sk_buff *reply;
1848         struct vport *vport;
1849         int err;
1850
1851         reply = ovs_vport_cmd_alloc_info();
1852         if (!reply)
1853                 return -ENOMEM;
1854
1855         ovs_lock();
1856         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1857         err = PTR_ERR(vport);
1858         if (IS_ERR(vport))
1859                 goto exit_unlock_free;
1860
1861         if (vport->port_no == OVSP_LOCAL) {
1862                 err = -EINVAL;
1863                 goto exit_unlock_free;
1864         }
1865
1866         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1867                                       info->snd_seq, 0, OVS_VPORT_CMD_DEL);
1868         BUG_ON(err < 0);
1869         ovs_dp_detach_port(vport);
1870         ovs_unlock();
1871
1872         ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
1873         return 0;
1874
1875 exit_unlock_free:
1876         ovs_unlock();
1877         kfree_skb(reply);
1878         return err;
1879 }
1880
1881 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1882 {
1883         struct nlattr **a = info->attrs;
1884         struct ovs_header *ovs_header = info->userhdr;
1885         struct sk_buff *reply;
1886         struct vport *vport;
1887         int err;
1888
1889         reply = ovs_vport_cmd_alloc_info();
1890         if (!reply)
1891                 return -ENOMEM;
1892
1893         rcu_read_lock();
1894         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
1895         err = PTR_ERR(vport);
1896         if (IS_ERR(vport))
1897                 goto exit_unlock_free;
1898         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1899                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1900         BUG_ON(err < 0);
1901         rcu_read_unlock();
1902
1903         return genlmsg_reply(reply, info);
1904
1905 exit_unlock_free:
1906         rcu_read_unlock();
1907         kfree_skb(reply);
1908         return err;
1909 }
1910
1911 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1912 {
1913         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1914         struct datapath *dp;
1915         int bucket = cb->args[0], skip = cb->args[1];
1916         int i, j = 0;
1917
1918         rcu_read_lock();
1919         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1920         if (!dp) {
1921                 rcu_read_unlock();
1922                 return -ENODEV;
1923         }
1924         for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
1925                 struct vport *vport;
1926
1927                 j = 0;
1928                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
1929                         if (j >= skip &&
1930                             ovs_vport_cmd_fill_info(vport, skb,
1931                                                     NETLINK_CB(cb->skb).portid,
1932                                                     cb->nlh->nlmsg_seq,
1933                                                     NLM_F_MULTI,
1934                                                     OVS_VPORT_CMD_NEW) < 0)
1935                                 goto out;
1936
1937                         j++;
1938                 }
1939                 skip = 0;
1940         }
1941 out:
1942         rcu_read_unlock();
1943
1944         cb->args[0] = i;
1945         cb->args[1] = j;
1946
1947         return skb->len;
1948 }
1949
1950 static struct genl_ops dp_vport_genl_ops[] = {
1951         { .cmd = OVS_VPORT_CMD_NEW,
1952           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1953           .policy = vport_policy,
1954           .doit = ovs_vport_cmd_new
1955         },
1956         { .cmd = OVS_VPORT_CMD_DEL,
1957           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1958           .policy = vport_policy,
1959           .doit = ovs_vport_cmd_del
1960         },
1961         { .cmd = OVS_VPORT_CMD_GET,
1962           .flags = 0,               /* OK for unprivileged users. */
1963           .policy = vport_policy,
1964           .doit = ovs_vport_cmd_get,
1965           .dumpit = ovs_vport_cmd_dump
1966         },
1967         { .cmd = OVS_VPORT_CMD_SET,
1968           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1969           .policy = vport_policy,
1970           .doit = ovs_vport_cmd_set,
1971         },
1972 };
1973
1974 struct genl_family_and_ops {
1975         struct genl_family *family;
1976         struct genl_ops *ops;
1977         int n_ops;
1978         struct genl_multicast_group *group;
1979 };
1980
1981 static const struct genl_family_and_ops dp_genl_families[] = {
1982         { &dp_datapath_genl_family,
1983           dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
1984           &ovs_dp_datapath_multicast_group },
1985         { &dp_vport_genl_family,
1986           dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
1987           &ovs_dp_vport_multicast_group },
1988         { &dp_flow_genl_family,
1989           dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
1990           &ovs_dp_flow_multicast_group },
1991         { &dp_packet_genl_family,
1992           dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
1993           NULL },
1994 };
1995
1996 static void dp_unregister_genl(int n_families)
1997 {
1998         int i;
1999
2000         for (i = 0; i < n_families; i++)
2001                 genl_unregister_family(dp_genl_families[i].family);
2002 }
2003
2004 static int dp_register_genl(void)
2005 {
2006         int n_registered;
2007         int err;
2008         int i;
2009
2010         n_registered = 0;
2011         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2012                 const struct genl_family_and_ops *f = &dp_genl_families[i];
2013
2014                 err = genl_register_family_with_ops(f->family, f->ops,
2015                                                     f->n_ops);
2016                 if (err)
2017                         goto error;
2018                 n_registered++;
2019
2020                 if (f->group) {
2021                         err = genl_register_mc_group(f->family, f->group);
2022                         if (err)
2023                                 goto error;
2024                 }
2025         }
2026
2027         return 0;
2028
2029 error:
2030         dp_unregister_genl(n_registered);
2031         return err;
2032 }
2033
2034 static int __net_init ovs_init_net(struct net *net)
2035 {
2036         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2037
2038         INIT_LIST_HEAD(&ovs_net->dps);
2039         INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2040         return 0;
2041 }
2042
2043 static void __net_exit ovs_exit_net(struct net *net)
2044 {
2045         struct datapath *dp, *dp_next;
2046         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2047
2048         ovs_lock();
2049         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2050                 __dp_destroy(dp);
2051         ovs_unlock();
2052
2053         cancel_work_sync(&ovs_net->dp_notify_work);
2054 }
2055
2056 static struct pernet_operations ovs_net_ops = {
2057         .init = ovs_init_net,
2058         .exit = ovs_exit_net,
2059         .id   = &ovs_net_id,
2060         .size = sizeof(struct ovs_net),
2061 };
2062
2063 DEFINE_COMPAT_PNET_REG_FUNC(device);
2064
2065 static int __init dp_init(void)
2066 {
2067         int err;
2068
2069         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2070
2071         pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
2072                 VERSION);
2073
2074         err = ovs_flow_init();
2075         if (err)
2076                 goto error;
2077
2078         err = ovs_vport_init();
2079         if (err)
2080                 goto error_flow_exit;
2081
2082         err = register_pernet_device(&ovs_net_ops);
2083         if (err)
2084                 goto error_vport_exit;
2085
2086         err = register_netdevice_notifier(&ovs_dp_device_notifier);
2087         if (err)
2088                 goto error_netns_exit;
2089
2090         err = dp_register_genl();
2091         if (err < 0)
2092                 goto error_unreg_notifier;
2093
2094         return 0;
2095
2096 error_unreg_notifier:
2097         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2098 error_netns_exit:
2099         unregister_pernet_device(&ovs_net_ops);
2100 error_vport_exit:
2101         ovs_vport_exit();
2102 error_flow_exit:
2103         ovs_flow_exit();
2104 error:
2105         return err;
2106 }
2107
2108 static void dp_cleanup(void)
2109 {
2110         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2111         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2112         unregister_pernet_device(&ovs_net_ops);
2113         rcu_barrier();
2114         ovs_vport_exit();
2115         ovs_flow_exit();
2116 }
2117
2118 module_init(dp_init);
2119 module_exit(dp_cleanup);
2120
2121 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2122 MODULE_LICENSE("GPL");
2123 MODULE_VERSION(VERSION);