5fcf81ba359f43667c5094929f533a0f7031e5cc
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
3  * Distributed under the terms of the GNU GPL version 2.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 /* Functions for managing the dp interface/device. */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/if_arp.h>
16 #include <linux/if_vlan.h>
17 #include <linux/in.h>
18 #include <linux/ip.h>
19 #include <linux/jhash.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/genetlink.h>
24 #include <linux/kernel.h>
25 #include <linux/kthread.h>
26 #include <linux/mutex.h>
27 #include <linux/percpu.h>
28 #include <linux/rcupdate.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/version.h>
32 #include <linux/ethtool.h>
33 #include <linux/wait.h>
34 #include <asm/system.h>
35 #include <asm/div64.h>
36 #include <asm/bug.h>
37 #include <linux/highmem.h>
38 #include <linux/netfilter_bridge.h>
39 #include <linux/netfilter_ipv4.h>
40 #include <linux/inetdevice.h>
41 #include <linux/list.h>
42 #include <linux/rculist.h>
43 #include <linux/dmi.h>
44 #include <net/inet_ecn.h>
45 #include <net/genetlink.h>
46
47 #include "openvswitch/datapath-protocol.h"
48 #include "checksum.h"
49 #include "datapath.h"
50 #include "actions.h"
51 #include "flow.h"
52 #include "vlan.h"
53 #include "tunnel.h"
54 #include "vport-internal_dev.h"
55
56 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
57     LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
58 #error Kernels before 2.6.18 or after 3.0 are not supported by this version of Open vSwitch.
59 #endif
60
61 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
62 EXPORT_SYMBOL(dp_ioctl_hook);
63
64 /**
65  * DOC: Locking:
66  *
67  * Writes to device state (add/remove datapath, port, set operations on vports,
68  * etc.) are protected by RTNL.
69  *
70  * Writes to other state (flow table modifications, set miscellaneous datapath
71  * parameters such as drop frags, etc.) are protected by genl_mutex.  The RTNL
72  * lock nests inside genl_mutex.
73  *
74  * Reads are protected by RCU.
75  *
76  * There are a few special cases (mostly stats) that have their own
77  * synchronization but they nest under all of above and don't interact with
78  * each other.
79  */
80
81 /* Global list of datapaths to enable dumping them all out.
82  * Protected by genl_mutex.
83  */
84 static LIST_HEAD(dps);
85
86 static struct vport *new_vport(const struct vport_parms *);
87 static int queue_userspace_packets(struct datapath *, struct sk_buff *,
88                                  const struct dp_upcall_info *);
89
90 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
91 struct datapath *get_dp(int dp_ifindex)
92 {
93         struct datapath *dp = NULL;
94         struct net_device *dev;
95
96         rcu_read_lock();
97         dev = dev_get_by_index_rcu(&init_net, dp_ifindex);
98         if (dev) {
99                 struct vport *vport = internal_dev_get_vport(dev);
100                 if (vport)
101                         dp = vport->dp;
102         }
103         rcu_read_unlock();
104
105         return dp;
106 }
107 EXPORT_SYMBOL_GPL(get_dp);
108
109 /* Must be called with genl_mutex. */
110 static struct flow_table *get_table_protected(struct datapath *dp)
111 {
112         return rcu_dereference_protected(dp->table, lockdep_genl_is_held());
113 }
114
115 /* Must be called with rcu_read_lock or RTNL lock. */
116 static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
117 {
118         return rcu_dereference_rtnl(dp->ports[port_no]);
119 }
120
121 /* Must be called with rcu_read_lock or RTNL lock. */
122 const char *dp_name(const struct datapath *dp)
123 {
124         return vport_get_name(rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]));
125 }
126
127 static int get_dpifindex(struct datapath *dp)
128 {
129         struct vport *local;
130         int ifindex;
131
132         rcu_read_lock();
133
134         local = get_vport_protected(dp, OVSP_LOCAL);
135         if (local)
136                 ifindex = vport_get_ifindex(local);
137         else
138                 ifindex = 0;
139
140         rcu_read_unlock();
141
142         return ifindex;
143 }
144
145 static inline size_t br_nlmsg_size(void)
146 {
147         return NLMSG_ALIGN(sizeof(struct ifinfomsg))
148                + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
149                + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
150                + nla_total_size(4) /* IFLA_MASTER */
151                + nla_total_size(4) /* IFLA_MTU */
152                + nla_total_size(1); /* IFLA_OPERSTATE */
153 }
154
155 /* Caller must hold RTNL lock. */
156 static int dp_fill_ifinfo(struct sk_buff *skb,
157                           const struct vport *port,
158                           int event, unsigned int flags)
159 {
160         struct datapath *dp = port->dp;
161         int ifindex = vport_get_ifindex(port);
162         struct ifinfomsg *hdr;
163         struct nlmsghdr *nlh;
164
165         if (ifindex < 0)
166                 return ifindex;
167
168         nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
169         if (nlh == NULL)
170                 return -EMSGSIZE;
171
172         hdr = nlmsg_data(nlh);
173         hdr->ifi_family = AF_BRIDGE;
174         hdr->__ifi_pad = 0;
175         hdr->ifi_type = ARPHRD_ETHER;
176         hdr->ifi_index = ifindex;
177         hdr->ifi_flags = vport_get_flags(port);
178         hdr->ifi_change = 0;
179
180         NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
181         NLA_PUT_U32(skb, IFLA_MASTER, get_dpifindex(dp));
182         NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
183 #ifdef IFLA_OPERSTATE
184         NLA_PUT_U8(skb, IFLA_OPERSTATE,
185                    vport_is_running(port)
186                         ? vport_get_operstate(port)
187                         : IF_OPER_DOWN);
188 #endif
189
190         NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
191
192         return nlmsg_end(skb, nlh);
193
194 nla_put_failure:
195         nlmsg_cancel(skb, nlh);
196         return -EMSGSIZE;
197 }
198
199 /* Caller must hold RTNL lock. */
200 static void dp_ifinfo_notify(int event, struct vport *port)
201 {
202         struct sk_buff *skb;
203         int err = -ENOBUFS;
204
205         skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
206         if (skb == NULL)
207                 goto errout;
208
209         err = dp_fill_ifinfo(skb, port, event, 0);
210         if (err < 0) {
211                 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
212                 WARN_ON(err == -EMSGSIZE);
213                 kfree_skb(skb);
214                 goto errout;
215         }
216         rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
217         return;
218 errout:
219         if (err < 0)
220                 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
221 }
222
223 static void release_dp(struct kobject *kobj)
224 {
225         struct datapath *dp = container_of(kobj, struct datapath, ifobj);
226         kfree(dp);
227 }
228
229 static struct kobj_type dp_ktype = {
230         .release = release_dp
231 };
232
233 static void destroy_dp_rcu(struct rcu_head *rcu)
234 {
235         struct datapath *dp = container_of(rcu, struct datapath, rcu);
236
237         flow_tbl_destroy(dp->table);
238         free_percpu(dp->stats_percpu);
239         kobject_put(&dp->ifobj);
240 }
241
242 /* Called with RTNL lock and genl_lock. */
243 static struct vport *new_vport(const struct vport_parms *parms)
244 {
245         struct vport *vport;
246
247         vport = vport_add(parms);
248         if (!IS_ERR(vport)) {
249                 struct datapath *dp = parms->dp;
250
251                 rcu_assign_pointer(dp->ports[parms->port_no], vport);
252                 list_add(&vport->node, &dp->port_list);
253
254                 dp_ifinfo_notify(RTM_NEWLINK, vport);
255         }
256
257         return vport;
258 }
259
260 /* Called with RTNL lock. */
261 void dp_detach_port(struct vport *p)
262 {
263         ASSERT_RTNL();
264
265         if (p->port_no != OVSP_LOCAL)
266                 dp_sysfs_del_if(p);
267         dp_ifinfo_notify(RTM_DELLINK, p);
268
269         /* First drop references to device. */
270         list_del(&p->node);
271         rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
272
273         /* Then destroy it. */
274         vport_del(p);
275 }
276
277 /* Must be called with rcu_read_lock. */
278 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
279 {
280         struct datapath *dp = p->dp;
281         struct sw_flow *flow;
282         struct dp_stats_percpu *stats;
283         int stats_counter_off;
284         int error;
285
286         OVS_CB(skb)->vport = p;
287
288         if (!OVS_CB(skb)->flow) {
289                 struct sw_flow_key key;
290                 int key_len;
291                 bool is_frag;
292
293                 /* Extract flow from 'skb' into 'key'. */
294                 error = flow_extract(skb, p->port_no, &key, &key_len, &is_frag);
295                 if (unlikely(error)) {
296                         kfree_skb(skb);
297                         return;
298                 }
299
300                 if (is_frag && dp->drop_frags) {
301                         consume_skb(skb);
302                         stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
303                         goto out;
304                 }
305
306                 /* Look up flow. */
307                 flow = flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
308                 if (unlikely(!flow)) {
309                         struct dp_upcall_info upcall;
310
311                         upcall.cmd = OVS_PACKET_CMD_MISS;
312                         upcall.key = &key;
313                         upcall.userdata = 0;
314                         upcall.sample_pool = 0;
315                         upcall.actions = NULL;
316                         upcall.actions_len = 0;
317                         dp_upcall(dp, skb, &upcall);
318                         stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
319                         goto out;
320                 }
321
322                 OVS_CB(skb)->flow = flow;
323         }
324
325         stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
326         flow_used(OVS_CB(skb)->flow, skb);
327         execute_actions(dp, skb);
328
329 out:
330         /* Update datapath statistics. */
331         local_bh_disable();
332         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
333
334         write_seqcount_begin(&stats->seqlock);
335         (*(u64 *)((u8 *)stats + stats_counter_off))++;
336         write_seqcount_end(&stats->seqlock);
337
338         local_bh_enable();
339 }
340
341 static void copy_and_csum_skb(struct sk_buff *skb, void *to)
342 {
343         u16 csum_start, csum_offset;
344         __wsum csum;
345
346         get_skb_csum_pointers(skb, &csum_start, &csum_offset);
347         csum_start -= skb_headroom(skb);
348
349         skb_copy_bits(skb, 0, to, csum_start);
350
351         csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start,
352                                       skb->len - csum_start, 0);
353         *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
354 }
355
356 static struct genl_family dp_packet_genl_family = {
357         .id = GENL_ID_GENERATE,
358         .hdrsize = sizeof(struct ovs_header),
359         .name = OVS_PACKET_FAMILY,
360         .version = 1,
361         .maxattr = OVS_PACKET_ATTR_MAX
362 };
363
364 /* Generic Netlink multicast groups for upcalls.
365  *
366  * We really want three unique multicast groups per datapath, but we can't even
367  * get one, because genl_register_mc_group() takes genl_lock, which is also
368  * held during Generic Netlink message processing, so trying to acquire
369  * multicast groups during OVS_DP_NEW processing deadlocks.  Instead, we
370  * preallocate a few groups and use them round-robin for datapaths.  Collision
371  * isn't fatal--multicast listeners should check that the family is the one
372  * that they want and discard others--but it wastes time and memory to receive
373  * unwanted messages.
374  */
375 #define PACKET_N_MC_GROUPS 16
376 static struct genl_multicast_group packet_mc_groups[PACKET_N_MC_GROUPS];
377
378 static u32 packet_mc_group(int dp_ifindex, u8 cmd)
379 {
380         u32 idx;
381         BUILD_BUG_ON_NOT_POWER_OF_2(PACKET_N_MC_GROUPS);
382
383         idx = jhash_2words(dp_ifindex, cmd, 0) & (PACKET_N_MC_GROUPS - 1);
384         return packet_mc_groups[idx].id;
385 }
386
387 static int packet_register_mc_groups(void)
388 {
389         int i;
390
391         for (i = 0; i < PACKET_N_MC_GROUPS; i++) {
392                 struct genl_multicast_group *group = &packet_mc_groups[i];
393                 int error;
394
395                 sprintf(group->name, "packet%d", i);
396                 error = genl_register_mc_group(&dp_packet_genl_family, group);
397                 if (error)
398                         return error;
399         }
400         return 0;
401 }
402
403 int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info)
404 {
405         struct dp_stats_percpu *stats;
406         int err;
407
408         forward_ip_summed(skb, true);
409
410         /* Break apart GSO packets into their component pieces.  Otherwise
411          * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
412         if (skb_is_gso(skb)) {
413                 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
414                 
415                 if (IS_ERR(nskb)) {
416                         kfree_skb(skb);
417                         err = PTR_ERR(nskb);
418                         goto err;
419                 }
420                 consume_skb(skb);
421                 skb = nskb;
422         }
423
424         err = queue_userspace_packets(dp, skb, upcall_info);
425         if (err)
426                 goto err;
427
428         return 0;
429
430 err:
431         local_bh_disable();
432         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
433
434         write_seqcount_begin(&stats->seqlock);
435         stats->n_lost++;
436         write_seqcount_end(&stats->seqlock);
437
438         local_bh_enable();
439
440         return err;
441 }
442
443 /* Send each packet in the 'skb' list to userspace for 'dp' as directed by
444  * 'upcall_info'.  There will be only one packet unless we broke up a GSO
445  * packet.
446  */
447 static int queue_userspace_packets(struct datapath *dp, struct sk_buff *skb,
448                                  const struct dp_upcall_info *upcall_info)
449 {
450         int dp_ifindex;
451         u32 group;
452         struct sk_buff *nskb;
453         int err;
454
455         dp_ifindex = get_dpifindex(dp);
456         if (!dp_ifindex) {
457                 err = -ENODEV;
458                 nskb = skb->next;
459                 goto err_kfree_skbs;
460         }
461
462         group = packet_mc_group(dp_ifindex, upcall_info->cmd);
463
464         do {
465                 struct ovs_header *upcall;
466                 struct sk_buff *user_skb; /* to be queued to userspace */
467                 struct nlattr *nla;
468                 unsigned int len;
469
470                 nskb = skb->next;
471                 skb->next = NULL;
472
473                 err = vlan_deaccel_tag(skb);
474                 if (unlikely(err))
475                         goto err_kfree_skbs;
476
477                 if (nla_attr_size(skb->len) > USHRT_MAX) {
478                         err = -EFBIG;
479                         goto err_kfree_skbs;
480                 }
481
482                 len = sizeof(struct ovs_header);
483                 len += nla_total_size(skb->len);
484                 len += nla_total_size(FLOW_BUFSIZE);
485                 if (upcall_info->userdata)
486                         len += nla_total_size(8);
487                 if (upcall_info->sample_pool)
488                         len += nla_total_size(4);
489                 if (upcall_info->actions_len)
490                         len += nla_total_size(upcall_info->actions_len);
491
492                 user_skb = genlmsg_new(len, GFP_ATOMIC);
493                 if (!user_skb) {
494                         netlink_set_err(INIT_NET_GENL_SOCK, 0, group, -ENOBUFS);
495                         err = -ENOMEM;
496                         goto err_kfree_skbs;
497                 }
498
499                 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd);
500                 upcall->dp_ifindex = dp_ifindex;
501
502                 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
503                 flow_to_nlattrs(upcall_info->key, user_skb);
504                 nla_nest_end(user_skb, nla);
505
506                 if (upcall_info->userdata)
507                         nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA, upcall_info->userdata);
508                 if (upcall_info->sample_pool)
509                         nla_put_u32(user_skb, OVS_PACKET_ATTR_SAMPLE_POOL, upcall_info->sample_pool);
510                 if (upcall_info->actions_len) {
511                         const struct nlattr *actions = upcall_info->actions;
512                         u32 actions_len = upcall_info->actions_len;
513
514                         nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
515                         memcpy(__skb_put(user_skb, actions_len), actions, actions_len);
516                         nla_nest_end(user_skb, nla);
517                 }
518
519                 nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
520                 if (skb->ip_summed == CHECKSUM_PARTIAL)
521                         copy_and_csum_skb(skb, nla_data(nla));
522                 else
523                         skb_copy_bits(skb, 0, nla_data(nla), skb->len);
524
525                 err = genlmsg_multicast(user_skb, 0, group, GFP_ATOMIC);
526                 if (err)
527                         goto err_kfree_skbs;
528
529                 consume_skb(skb);
530                 skb = nskb;
531         } while (skb);
532         return 0;
533
534 err_kfree_skbs:
535         kfree_skb(skb);
536         while ((skb = nskb) != NULL) {
537                 nskb = skb->next;
538                 kfree_skb(skb);
539         }
540         return err;
541 }
542
543 /* Called with genl_mutex. */
544 static int flush_flows(int dp_ifindex)
545 {
546         struct flow_table *old_table;
547         struct flow_table *new_table;
548         struct datapath *dp;
549
550         dp = get_dp(dp_ifindex);
551         if (!dp)
552                 return -ENODEV;
553
554         old_table = get_table_protected(dp);
555         new_table = flow_tbl_alloc(TBL_MIN_BUCKETS);
556         if (!new_table)
557                 return -ENOMEM;
558
559         rcu_assign_pointer(dp->table, new_table);
560
561         flow_tbl_deferred_destroy(old_table);
562         return 0;
563 }
564
565 static int validate_actions(const struct nlattr *attr)
566 {
567         const struct nlattr *a;
568         int rem;
569
570         nla_for_each_nested(a, attr, rem) {
571                 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
572                         [OVS_ACTION_ATTR_OUTPUT] = 4,
573                         [OVS_ACTION_ATTR_USERSPACE] = 8,
574                         [OVS_ACTION_ATTR_PUSH_VLAN] = 2,
575                         [OVS_ACTION_ATTR_POP_VLAN] = 0,
576                         [OVS_ACTION_ATTR_SET_DL_SRC] = ETH_ALEN,
577                         [OVS_ACTION_ATTR_SET_DL_DST] = ETH_ALEN,
578                         [OVS_ACTION_ATTR_SET_NW_SRC] = 4,
579                         [OVS_ACTION_ATTR_SET_NW_DST] = 4,
580                         [OVS_ACTION_ATTR_SET_NW_TOS] = 1,
581                         [OVS_ACTION_ATTR_SET_TP_SRC] = 2,
582                         [OVS_ACTION_ATTR_SET_TP_DST] = 2,
583                         [OVS_ACTION_ATTR_SET_TUNNEL] = 8,
584                         [OVS_ACTION_ATTR_SET_PRIORITY] = 4,
585                         [OVS_ACTION_ATTR_POP_PRIORITY] = 0,
586                 };
587                 int type = nla_type(a);
588
589                 if (type > OVS_ACTION_ATTR_MAX || nla_len(a) != action_lens[type])
590                         return -EINVAL;
591
592                 switch (type) {
593                 case OVS_ACTION_ATTR_UNSPEC:
594                         return -EINVAL;
595
596                 case OVS_ACTION_ATTR_USERSPACE:
597                 case OVS_ACTION_ATTR_POP_VLAN:
598                 case OVS_ACTION_ATTR_SET_DL_SRC:
599                 case OVS_ACTION_ATTR_SET_DL_DST:
600                 case OVS_ACTION_ATTR_SET_NW_SRC:
601                 case OVS_ACTION_ATTR_SET_NW_DST:
602                 case OVS_ACTION_ATTR_SET_TP_SRC:
603                 case OVS_ACTION_ATTR_SET_TP_DST:
604                 case OVS_ACTION_ATTR_SET_TUNNEL:
605                 case OVS_ACTION_ATTR_SET_PRIORITY:
606                 case OVS_ACTION_ATTR_POP_PRIORITY:
607                         /* No validation needed. */
608                         break;
609
610                 case OVS_ACTION_ATTR_OUTPUT:
611                         if (nla_get_u32(a) >= DP_MAX_PORTS)
612                                 return -EINVAL;
613                         break;
614
615                 case OVS_ACTION_ATTR_PUSH_VLAN:
616                         if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
617                                 return -EINVAL;
618                         break;
619
620                 case OVS_ACTION_ATTR_SET_NW_TOS:
621                         if (nla_get_u8(a) & INET_ECN_MASK)
622                                 return -EINVAL;
623                         break;
624
625                 default:
626                         return -EOPNOTSUPP;
627                 }
628         }
629
630         if (rem > 0)
631                 return -EINVAL;
632
633         return 0;
634 }
635 static void clear_stats(struct sw_flow *flow)
636 {
637         flow->used = 0;
638         flow->tcp_flags = 0;
639         flow->packet_count = 0;
640         flow->byte_count = 0;
641 }
642
643 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
644 {
645         struct ovs_header *ovs_header = info->userhdr;
646         struct nlattr **a = info->attrs;
647         struct sw_flow_actions *acts;
648         struct sk_buff *packet;
649         struct sw_flow *flow;
650         struct datapath *dp;
651         struct ethhdr *eth;
652         bool is_frag;
653         int len;
654         int err;
655         int key_len;
656
657         err = -EINVAL;
658         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
659             !a[OVS_PACKET_ATTR_ACTIONS] ||
660             nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN)
661                 goto err;
662
663         err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS]);
664         if (err)
665                 goto err;
666
667         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
668         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
669         err = -ENOMEM;
670         if (!packet)
671                 goto err;
672         skb_reserve(packet, NET_IP_ALIGN);
673
674         memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len);
675
676         skb_reset_mac_header(packet);
677         eth = eth_hdr(packet);
678
679         /* Normally, setting the skb 'protocol' field would be handled by a
680          * call to eth_type_trans(), but it assumes there's a sending
681          * device, which we may not have. */
682         if (ntohs(eth->h_proto) >= 1536)
683                 packet->protocol = eth->h_proto;
684         else
685                 packet->protocol = htons(ETH_P_802_2);
686
687         /* Build an sw_flow for sending this packet. */
688         flow = flow_alloc();
689         err = PTR_ERR(flow);
690         if (IS_ERR(flow))
691                 goto err_kfree_skb;
692
693         err = flow_extract(packet, -1, &flow->key, &key_len, &is_frag);
694         if (err)
695                 goto err_flow_put;
696
697         err = flow_metadata_from_nlattrs(&flow->key.eth.in_port,
698                                          &flow->key.eth.tun_id,
699                                          a[OVS_PACKET_ATTR_KEY]);
700         if (err)
701                 goto err_flow_put;
702
703         flow->hash = flow_hash(&flow->key, key_len);
704
705         acts = flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
706         err = PTR_ERR(acts);
707         if (IS_ERR(acts))
708                 goto err_flow_put;
709         rcu_assign_pointer(flow->sf_acts, acts);
710
711         OVS_CB(packet)->flow = flow;
712
713         rcu_read_lock();
714         dp = get_dp(ovs_header->dp_ifindex);
715         err = -ENODEV;
716         if (!dp)
717                 goto err_unlock;
718
719         if (flow->key.eth.in_port < DP_MAX_PORTS)
720                 OVS_CB(packet)->vport = get_vport_protected(dp,
721                                                         flow->key.eth.in_port);
722
723         err = execute_actions(dp, packet);
724         rcu_read_unlock();
725
726         flow_put(flow);
727         return err;
728
729 err_unlock:
730         rcu_read_unlock();
731 err_flow_put:
732         flow_put(flow);
733 err_kfree_skb:
734         kfree_skb(packet);
735 err:
736         return err;
737 }
738
739 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
740         [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
741         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
742         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
743 };
744
745 static struct genl_ops dp_packet_genl_ops[] = {
746         { .cmd = OVS_PACKET_CMD_EXECUTE,
747           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
748           .policy = packet_policy,
749           .doit = ovs_packet_cmd_execute
750         }
751 };
752
753 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
754 {
755         int i;
756         struct flow_table *table = get_table_protected(dp);
757
758         stats->n_flows = flow_tbl_count(table);
759
760         stats->n_frags = stats->n_hit = stats->n_missed = stats->n_lost = 0;
761         for_each_possible_cpu(i) {
762                 const struct dp_stats_percpu *percpu_stats;
763                 struct dp_stats_percpu local_stats;
764                 unsigned seqcount;
765
766                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
767
768                 do {
769                         seqcount = read_seqcount_begin(&percpu_stats->seqlock);
770                         local_stats = *percpu_stats;
771                 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
772
773                 stats->n_frags += local_stats.n_frags;
774                 stats->n_hit += local_stats.n_hit;
775                 stats->n_missed += local_stats.n_missed;
776                 stats->n_lost += local_stats.n_lost;
777         }
778 }
779
780 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
781         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
782         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
783         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
784 };
785
786 static struct genl_family dp_flow_genl_family = {
787         .id = GENL_ID_GENERATE,
788         .hdrsize = sizeof(struct ovs_header),
789         .name = OVS_FLOW_FAMILY,
790         .version = 1,
791         .maxattr = OVS_FLOW_ATTR_MAX
792 };
793
794 static struct genl_multicast_group dp_flow_multicast_group = {
795         .name = OVS_FLOW_MCGROUP
796 };
797
798 /* Called with genl_lock. */
799 static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
800                                   struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd)
801 {
802         const int skb_orig_len = skb->len;
803         const struct sw_flow_actions *sf_acts;
804         struct ovs_flow_stats stats;
805         struct ovs_header *ovs_header;
806         struct nlattr *nla;
807         unsigned long used;
808         u8 tcp_flags;
809         int err;
810
811         sf_acts = rcu_dereference_protected(flow->sf_acts,
812                                             lockdep_genl_is_held());
813
814         ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
815         if (!ovs_header)
816                 return -EMSGSIZE;
817
818         ovs_header->dp_ifindex = get_dpifindex(dp);
819
820         nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
821         if (!nla)
822                 goto nla_put_failure;
823         err = flow_to_nlattrs(&flow->key, skb);
824         if (err)
825                 goto error;
826         nla_nest_end(skb, nla);
827
828         spin_lock_bh(&flow->lock);
829         used = flow->used;
830         stats.n_packets = flow->packet_count;
831         stats.n_bytes = flow->byte_count;
832         tcp_flags = flow->tcp_flags;
833         spin_unlock_bh(&flow->lock);
834
835         if (used)
836                 NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, flow_used_time(used));
837
838         if (stats.n_packets)
839                 NLA_PUT(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats);
840
841         if (tcp_flags)
842                 NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags);
843
844         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
845          * this is the first flow to be dumped into 'skb'.  This is unusual for
846          * Netlink but individual action lists can be longer than
847          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
848          * The userspace caller can always fetch the actions separately if it
849          * really wants them.  (Most userspace callers in fact don't care.)
850          *
851          * This can only fail for dump operations because the skb is always
852          * properly sized for single flows.
853          */
854         err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
855                       sf_acts->actions);
856         if (err < 0 && skb_orig_len)
857                 goto error;
858
859         return genlmsg_end(skb, ovs_header);
860
861 nla_put_failure:
862         err = -EMSGSIZE;
863 error:
864         genlmsg_cancel(skb, ovs_header);
865         return err;
866 }
867
868 static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
869 {
870         const struct sw_flow_actions *sf_acts;
871         int len;
872
873         sf_acts = rcu_dereference_protected(flow->sf_acts,
874                                             lockdep_genl_is_held());
875
876         len = nla_total_size(FLOW_BUFSIZE); /* OVS_FLOW_ATTR_KEY */
877         len += nla_total_size(sf_acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
878         len += nla_total_size(sizeof(struct ovs_flow_stats)); /* OVS_FLOW_ATTR_STATS */
879         len += nla_total_size(1); /* OVS_FLOW_ATTR_TCP_FLAGS */
880         len += nla_total_size(8); /* OVS_FLOW_ATTR_USED */
881         return genlmsg_new(NLMSG_ALIGN(sizeof(struct ovs_header)) + len, GFP_KERNEL);
882 }
883
884 static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, struct datapath *dp,
885                                                u32 pid, u32 seq, u8 cmd)
886 {
887         struct sk_buff *skb;
888         int retval;
889
890         skb = ovs_flow_cmd_alloc_info(flow);
891         if (!skb)
892                 return ERR_PTR(-ENOMEM);
893
894         retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
895         BUG_ON(retval < 0);
896         return skb;
897 }
898
899 static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
900 {
901         struct nlattr **a = info->attrs;
902         struct ovs_header *ovs_header = info->userhdr;
903         struct sw_flow_key key;
904         struct sw_flow *flow;
905         struct sk_buff *reply;
906         struct datapath *dp;
907         struct flow_table *table;
908         int error;
909         int key_len;
910
911         /* Extract key. */
912         error = -EINVAL;
913         if (!a[OVS_FLOW_ATTR_KEY])
914                 goto error;
915         error = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
916         if (error)
917                 goto error;
918
919         /* Validate actions. */
920         if (a[OVS_FLOW_ATTR_ACTIONS]) {
921                 error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS]);
922                 if (error)
923                         goto error;
924         } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
925                 error = -EINVAL;
926                 goto error;
927         }
928
929         dp = get_dp(ovs_header->dp_ifindex);
930         error = -ENODEV;
931         if (!dp)
932                 goto error;
933
934         table = get_table_protected(dp);
935         flow = flow_tbl_lookup(table, &key, key_len);
936         if (!flow) {
937                 struct sw_flow_actions *acts;
938
939                 /* Bail out if we're not allowed to create a new flow. */
940                 error = -ENOENT;
941                 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
942                         goto error;
943
944                 /* Expand table, if necessary, to make room. */
945                 if (flow_tbl_need_to_expand(table)) {
946                         struct flow_table *new_table;
947
948                         new_table = flow_tbl_expand(table);
949                         if (!IS_ERR(new_table)) {
950                                 rcu_assign_pointer(dp->table, new_table);
951                                 flow_tbl_deferred_destroy(table);
952                                 table = get_table_protected(dp);
953                         }
954                 }
955
956                 /* Allocate flow. */
957                 flow = flow_alloc();
958                 if (IS_ERR(flow)) {
959                         error = PTR_ERR(flow);
960                         goto error;
961                 }
962                 flow->key = key;
963                 clear_stats(flow);
964
965                 /* Obtain actions. */
966                 acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
967                 error = PTR_ERR(acts);
968                 if (IS_ERR(acts))
969                         goto error_free_flow;
970                 rcu_assign_pointer(flow->sf_acts, acts);
971
972                 /* Put flow in bucket. */
973                 flow->hash = flow_hash(&key, key_len);
974                 flow_tbl_insert(table, flow);
975
976                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
977                                                 info->snd_seq, OVS_FLOW_CMD_NEW);
978         } else {
979                 /* We found a matching flow. */
980                 struct sw_flow_actions *old_acts;
981
982                 /* Bail out if we're not allowed to modify an existing flow.
983                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
984                  * because Generic Netlink treats the latter as a dump
985                  * request.  We also accept NLM_F_EXCL in case that bug ever
986                  * gets fixed.
987                  */
988                 error = -EEXIST;
989                 if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
990                     info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
991                         goto error;
992
993                 /* Update actions. */
994                 old_acts = rcu_dereference_protected(flow->sf_acts,
995                                                      lockdep_genl_is_held());
996                 if (a[OVS_FLOW_ATTR_ACTIONS] &&
997                     (old_acts->actions_len != nla_len(a[OVS_FLOW_ATTR_ACTIONS]) ||
998                      memcmp(old_acts->actions, nla_data(a[OVS_FLOW_ATTR_ACTIONS]),
999                             old_acts->actions_len))) {
1000                         struct sw_flow_actions *new_acts;
1001
1002                         new_acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
1003                         error = PTR_ERR(new_acts);
1004                         if (IS_ERR(new_acts))
1005                                 goto error;
1006
1007                         rcu_assign_pointer(flow->sf_acts, new_acts);
1008                         flow_deferred_free_acts(old_acts);
1009                 }
1010
1011                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
1012                                                 info->snd_seq, OVS_FLOW_CMD_NEW);
1013
1014                 /* Clear stats. */
1015                 if (a[OVS_FLOW_ATTR_CLEAR]) {
1016                         spin_lock_bh(&flow->lock);
1017                         clear_stats(flow);
1018                         spin_unlock_bh(&flow->lock);
1019                 }
1020         }
1021
1022         if (!IS_ERR(reply))
1023                 genl_notify(reply, genl_info_net(info), info->snd_pid,
1024                             dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1025         else
1026                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1027                                 dp_flow_multicast_group.id, PTR_ERR(reply));
1028         return 0;
1029
1030 error_free_flow:
1031         flow_put(flow);
1032 error:
1033         return error;
1034 }
1035
1036 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1037 {
1038         struct nlattr **a = info->attrs;
1039         struct ovs_header *ovs_header = info->userhdr;
1040         struct sw_flow_key key;
1041         struct sk_buff *reply;
1042         struct sw_flow *flow;
1043         struct datapath *dp;
1044         struct flow_table *table;
1045         int err;
1046         int key_len;
1047
1048         if (!a[OVS_FLOW_ATTR_KEY])
1049                 return -EINVAL;
1050         err = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1051         if (err)
1052                 return err;
1053
1054         dp = get_dp(ovs_header->dp_ifindex);
1055         if (!dp)
1056                 return -ENODEV;
1057
1058         table = get_table_protected(dp);
1059         flow = flow_tbl_lookup(table, &key, key_len);
1060         if (!flow)
1061                 return -ENOENT;
1062
1063         reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW);
1064         if (IS_ERR(reply))
1065                 return PTR_ERR(reply);
1066
1067         return genlmsg_reply(reply, info);
1068 }
1069
1070 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1071 {
1072         struct nlattr **a = info->attrs;
1073         struct ovs_header *ovs_header = info->userhdr;
1074         struct sw_flow_key key;
1075         struct sk_buff *reply;
1076         struct sw_flow *flow;
1077         struct datapath *dp;
1078         struct flow_table *table;
1079         int err;
1080         int key_len;
1081
1082         if (!a[OVS_FLOW_ATTR_KEY])
1083                 return flush_flows(ovs_header->dp_ifindex);
1084         err = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1085         if (err)
1086                 return err;
1087
1088         dp = get_dp(ovs_header->dp_ifindex);
1089         if (!dp)
1090                 return -ENODEV;
1091
1092         table = get_table_protected(dp);
1093         flow = flow_tbl_lookup(table, &key, key_len);
1094         if (!flow)
1095                 return -ENOENT;
1096
1097         reply = ovs_flow_cmd_alloc_info(flow);
1098         if (!reply)
1099                 return -ENOMEM;
1100
1101         flow_tbl_remove(table, flow);
1102
1103         err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
1104                                      info->snd_seq, 0, OVS_FLOW_CMD_DEL);
1105         BUG_ON(err < 0);
1106
1107         flow_deferred_free(flow);
1108
1109         genl_notify(reply, genl_info_net(info), info->snd_pid,
1110                     dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1111         return 0;
1112 }
1113
1114 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1115 {
1116         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1117         struct datapath *dp;
1118
1119         dp = get_dp(ovs_header->dp_ifindex);
1120         if (!dp)
1121                 return -ENODEV;
1122
1123         for (;;) {
1124                 struct sw_flow *flow;
1125                 u32 bucket, obj;
1126
1127                 bucket = cb->args[0];
1128                 obj = cb->args[1];
1129                 flow = flow_tbl_next(get_table_protected(dp), &bucket, &obj);
1130                 if (!flow)
1131                         break;
1132
1133                 if (ovs_flow_cmd_fill_info(flow, dp, skb, NETLINK_CB(cb->skb).pid,
1134                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1135                                            OVS_FLOW_CMD_NEW) < 0)
1136                         break;
1137
1138                 cb->args[0] = bucket;
1139                 cb->args[1] = obj;
1140         }
1141         return skb->len;
1142 }
1143
1144 static struct genl_ops dp_flow_genl_ops[] = {
1145         { .cmd = OVS_FLOW_CMD_NEW,
1146           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1147           .policy = flow_policy,
1148           .doit = ovs_flow_cmd_new_or_set
1149         },
1150         { .cmd = OVS_FLOW_CMD_DEL,
1151           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1152           .policy = flow_policy,
1153           .doit = ovs_flow_cmd_del
1154         },
1155         { .cmd = OVS_FLOW_CMD_GET,
1156           .flags = 0,               /* OK for unprivileged users. */
1157           .policy = flow_policy,
1158           .doit = ovs_flow_cmd_get,
1159           .dumpit = ovs_flow_cmd_dump
1160         },
1161         { .cmd = OVS_FLOW_CMD_SET,
1162           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1163           .policy = flow_policy,
1164           .doit = ovs_flow_cmd_new_or_set,
1165         },
1166 };
1167
1168 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1169 #ifdef HAVE_NLA_NUL_STRING
1170         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1171 #endif
1172         [OVS_DP_ATTR_IPV4_FRAGS] = { .type = NLA_U32 },
1173         [OVS_DP_ATTR_SAMPLING] = { .type = NLA_U32 },
1174 };
1175
1176 static struct genl_family dp_datapath_genl_family = {
1177         .id = GENL_ID_GENERATE,
1178         .hdrsize = sizeof(struct ovs_header),
1179         .name = OVS_DATAPATH_FAMILY,
1180         .version = 1,
1181         .maxattr = OVS_DP_ATTR_MAX
1182 };
1183
1184 static struct genl_multicast_group dp_datapath_multicast_group = {
1185         .name = OVS_DATAPATH_MCGROUP
1186 };
1187
1188 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1189                                 u32 pid, u32 seq, u32 flags, u8 cmd)
1190 {
1191         struct ovs_header *ovs_header;
1192         struct nlattr *nla;
1193         int err;
1194         int dp_ifindex = get_dpifindex(dp);
1195
1196         ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
1197                                    flags, cmd);
1198         if (!ovs_header)
1199                 goto error;
1200
1201         ovs_header->dp_ifindex = dp_ifindex;
1202
1203         rcu_read_lock();
1204         err = nla_put_string(skb, OVS_DP_ATTR_NAME, dp_name(dp));
1205         rcu_read_unlock();
1206         if (err)
1207                 goto nla_put_failure;
1208
1209         nla = nla_reserve(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats));
1210         if (!nla)
1211                 goto nla_put_failure;
1212         get_dp_stats(dp, nla_data(nla));
1213
1214         NLA_PUT_U32(skb, OVS_DP_ATTR_IPV4_FRAGS,
1215                     dp->drop_frags ? OVS_DP_FRAG_DROP : OVS_DP_FRAG_ZERO);
1216
1217         if (dp->sflow_probability)
1218                 NLA_PUT_U32(skb, OVS_DP_ATTR_SAMPLING, dp->sflow_probability);
1219
1220         nla = nla_nest_start(skb, OVS_DP_ATTR_MCGROUPS);
1221         if (!nla)
1222                 goto nla_put_failure;
1223         NLA_PUT_U32(skb, OVS_PACKET_CMD_MISS,
1224                         packet_mc_group(dp_ifindex, OVS_PACKET_CMD_MISS));
1225         NLA_PUT_U32(skb, OVS_PACKET_CMD_ACTION,
1226                         packet_mc_group(dp_ifindex, OVS_PACKET_CMD_ACTION));
1227         NLA_PUT_U32(skb, OVS_PACKET_CMD_SAMPLE,
1228                         packet_mc_group(dp_ifindex, OVS_PACKET_CMD_SAMPLE));
1229         nla_nest_end(skb, nla);
1230
1231         return genlmsg_end(skb, ovs_header);
1232
1233 nla_put_failure:
1234         genlmsg_cancel(skb, ovs_header);
1235 error:
1236         return -EMSGSIZE;
1237 }
1238
1239 static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
1240                                              u32 seq, u8 cmd)
1241 {
1242         struct sk_buff *skb;
1243         int retval;
1244
1245         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1246         if (!skb)
1247                 return ERR_PTR(-ENOMEM);
1248
1249         retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
1250         if (retval < 0) {
1251                 kfree_skb(skb);
1252                 return ERR_PTR(retval);
1253         }
1254         return skb;
1255 }
1256
1257 static int ovs_dp_cmd_validate(struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1258 {
1259         if (a[OVS_DP_ATTR_IPV4_FRAGS]) {
1260                 u32 frags = nla_get_u32(a[OVS_DP_ATTR_IPV4_FRAGS]);
1261
1262                 if (frags != OVS_DP_FRAG_ZERO && frags != OVS_DP_FRAG_DROP)
1263                         return -EINVAL;
1264         }
1265
1266         return CHECK_NUL_STRING(a[OVS_DP_ATTR_NAME], IFNAMSIZ - 1);
1267 }
1268
1269 /* Called with genl_mutex and optionally with RTNL lock also. */
1270 static struct datapath *lookup_datapath(struct ovs_header *ovs_header, struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1271 {
1272         struct datapath *dp;
1273
1274         if (!a[OVS_DP_ATTR_NAME])
1275                 dp = get_dp(ovs_header->dp_ifindex);
1276         else {
1277                 struct vport *vport;
1278
1279                 rcu_read_lock();
1280                 vport = vport_locate(nla_data(a[OVS_DP_ATTR_NAME]));
1281                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1282                 rcu_read_unlock();
1283         }
1284         return dp ? dp : ERR_PTR(-ENODEV);
1285 }
1286
1287 /* Called with genl_mutex. */
1288 static void change_datapath(struct datapath *dp, struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1289 {
1290         if (a[OVS_DP_ATTR_IPV4_FRAGS])
1291                 dp->drop_frags = nla_get_u32(a[OVS_DP_ATTR_IPV4_FRAGS]) == OVS_DP_FRAG_DROP;
1292         if (a[OVS_DP_ATTR_SAMPLING])
1293                 dp->sflow_probability = nla_get_u32(a[OVS_DP_ATTR_SAMPLING]);
1294 }
1295
1296 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1297 {
1298         struct nlattr **a = info->attrs;
1299         struct vport_parms parms;
1300         struct sk_buff *reply;
1301         struct datapath *dp;
1302         struct vport *vport;
1303         int err;
1304
1305         err = -EINVAL;
1306         if (!a[OVS_DP_ATTR_NAME])
1307                 goto err;
1308
1309         err = ovs_dp_cmd_validate(a);
1310         if (err)
1311                 goto err;
1312
1313         rtnl_lock();
1314         err = -ENODEV;
1315         if (!try_module_get(THIS_MODULE))
1316                 goto err_unlock_rtnl;
1317
1318         err = -ENOMEM;
1319         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1320         if (dp == NULL)
1321                 goto err_put_module;
1322         INIT_LIST_HEAD(&dp->port_list);
1323
1324         /* Initialize kobject for bridge.  This will be added as
1325          * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
1326         dp->ifobj.kset = NULL;
1327         kobject_init(&dp->ifobj, &dp_ktype);
1328
1329         /* Allocate table. */
1330         err = -ENOMEM;
1331         rcu_assign_pointer(dp->table, flow_tbl_alloc(TBL_MIN_BUCKETS));
1332         if (!dp->table)
1333                 goto err_free_dp;
1334
1335         dp->drop_frags = 0;
1336         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1337         if (!dp->stats_percpu) {
1338                 err = -ENOMEM;
1339                 goto err_destroy_table;
1340         }
1341
1342         change_datapath(dp, a);
1343
1344         /* Set up our datapath device. */
1345         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1346         parms.type = OVS_VPORT_TYPE_INTERNAL;
1347         parms.options = NULL;
1348         parms.dp = dp;
1349         parms.port_no = OVSP_LOCAL;
1350         vport = new_vport(&parms);
1351         if (IS_ERR(vport)) {
1352                 err = PTR_ERR(vport);
1353                 if (err == -EBUSY)
1354                         err = -EEXIST;
1355
1356                 goto err_destroy_percpu;
1357         }
1358
1359         reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
1360         err = PTR_ERR(reply);
1361         if (IS_ERR(reply))
1362                 goto err_destroy_local_port;
1363
1364         list_add_tail(&dp->list_node, &dps);
1365         dp_sysfs_add_dp(dp);
1366
1367         rtnl_unlock();
1368
1369         genl_notify(reply, genl_info_net(info), info->snd_pid,
1370                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1371         return 0;
1372
1373 err_destroy_local_port:
1374         dp_detach_port(get_vport_protected(dp, OVSP_LOCAL));
1375 err_destroy_percpu:
1376         free_percpu(dp->stats_percpu);
1377 err_destroy_table:
1378         flow_tbl_destroy(get_table_protected(dp));
1379 err_free_dp:
1380         kfree(dp);
1381 err_put_module:
1382         module_put(THIS_MODULE);
1383 err_unlock_rtnl:
1384         rtnl_unlock();
1385 err:
1386         return err;
1387 }
1388
1389 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1390 {
1391         struct vport *vport, *next_vport;
1392         struct sk_buff *reply;
1393         struct datapath *dp;
1394         int err;
1395
1396         err = ovs_dp_cmd_validate(info->attrs);
1397         if (err)
1398                 goto exit;
1399
1400         rtnl_lock();
1401         dp = lookup_datapath(info->userhdr, info->attrs);
1402         err = PTR_ERR(dp);
1403         if (IS_ERR(dp))
1404                 goto exit_unlock;
1405
1406         reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_DEL);
1407         err = PTR_ERR(reply);
1408         if (IS_ERR(reply))
1409                 goto exit_unlock;
1410
1411         list_for_each_entry_safe (vport, next_vport, &dp->port_list, node)
1412                 if (vport->port_no != OVSP_LOCAL)
1413                         dp_detach_port(vport);
1414
1415         dp_sysfs_del_dp(dp);
1416         list_del(&dp->list_node);
1417         dp_detach_port(get_vport_protected(dp, OVSP_LOCAL));
1418
1419         /* rtnl_unlock() will wait until all the references to devices that
1420          * are pending unregistration have been dropped.  We do it here to
1421          * ensure that any internal devices (which contain DP pointers) are
1422          * fully destroyed before freeing the datapath.
1423          */
1424         rtnl_unlock();
1425
1426         call_rcu(&dp->rcu, destroy_dp_rcu);
1427         module_put(THIS_MODULE);
1428
1429         genl_notify(reply, genl_info_net(info), info->snd_pid,
1430                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1431
1432         return 0;
1433
1434 exit_unlock:
1435         rtnl_unlock();
1436 exit:
1437         return err;
1438 }
1439
1440 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1441 {
1442         struct sk_buff *reply;
1443         struct datapath *dp;
1444         int err;
1445
1446         err = ovs_dp_cmd_validate(info->attrs);
1447         if (err)
1448                 return err;
1449
1450         dp = lookup_datapath(info->userhdr, info->attrs);
1451         if (IS_ERR(dp))
1452                 return PTR_ERR(dp);
1453
1454         change_datapath(dp, info->attrs);
1455
1456         reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
1457         if (IS_ERR(reply)) {
1458                 err = PTR_ERR(reply);
1459                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1460                                 dp_datapath_multicast_group.id, err);
1461                 return 0;
1462         }
1463
1464         genl_notify(reply, genl_info_net(info), info->snd_pid,
1465                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1466         return 0;
1467 }
1468
1469 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1470 {
1471         struct sk_buff *reply;
1472         struct datapath *dp;
1473         int err;
1474
1475         err = ovs_dp_cmd_validate(info->attrs);
1476         if (err)
1477                 return err;
1478
1479         dp = lookup_datapath(info->userhdr, info->attrs);
1480         if (IS_ERR(dp))
1481                 return PTR_ERR(dp);
1482
1483         reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
1484         if (IS_ERR(reply))
1485                 return PTR_ERR(reply);
1486
1487         return genlmsg_reply(reply, info);
1488 }
1489
1490 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1491 {
1492         struct datapath *dp;
1493         int skip = cb->args[0];
1494         int i = 0;
1495
1496         list_for_each_entry (dp, &dps, list_node) {
1497                 if (i < skip)
1498                         continue;
1499                 if (ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
1500                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1501                                          OVS_DP_CMD_NEW) < 0)
1502                         break;
1503                 i++;
1504         }
1505
1506         cb->args[0] = i;
1507
1508         return skb->len;
1509 }
1510
1511 static struct genl_ops dp_datapath_genl_ops[] = {
1512         { .cmd = OVS_DP_CMD_NEW,
1513           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1514           .policy = datapath_policy,
1515           .doit = ovs_dp_cmd_new
1516         },
1517         { .cmd = OVS_DP_CMD_DEL,
1518           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1519           .policy = datapath_policy,
1520           .doit = ovs_dp_cmd_del
1521         },
1522         { .cmd = OVS_DP_CMD_GET,
1523           .flags = 0,               /* OK for unprivileged users. */
1524           .policy = datapath_policy,
1525           .doit = ovs_dp_cmd_get,
1526           .dumpit = ovs_dp_cmd_dump
1527         },
1528         { .cmd = OVS_DP_CMD_SET,
1529           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1530           .policy = datapath_policy,
1531           .doit = ovs_dp_cmd_set,
1532         },
1533 };
1534
1535 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1536 #ifdef HAVE_NLA_NUL_STRING
1537         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1538         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1539         [OVS_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
1540 #else
1541         [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct ovs_vport_stats) },
1542         [OVS_VPORT_ATTR_ADDRESS] = { .minlen = ETH_ALEN },
1543 #endif
1544         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1545         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1546         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1547 };
1548
1549 static struct genl_family dp_vport_genl_family = {
1550         .id = GENL_ID_GENERATE,
1551         .hdrsize = sizeof(struct ovs_header),
1552         .name = OVS_VPORT_FAMILY,
1553         .version = 1,
1554         .maxattr = OVS_VPORT_ATTR_MAX
1555 };
1556
1557 struct genl_multicast_group dp_vport_multicast_group = {
1558         .name = OVS_VPORT_MCGROUP
1559 };
1560
1561 /* Called with RTNL lock or RCU read lock. */
1562 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1563                                    u32 pid, u32 seq, u32 flags, u8 cmd)
1564 {
1565         struct ovs_header *ovs_header;
1566         struct nlattr *nla;
1567         int ifindex;
1568         int err;
1569
1570         ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
1571                                  flags, cmd);
1572         if (!ovs_header)
1573                 return -EMSGSIZE;
1574
1575         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1576
1577         NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
1578         NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport_get_type(vport));
1579         NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport_get_name(vport));
1580
1581         nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats));
1582         if (!nla)
1583                 goto nla_put_failure;
1584
1585         vport_get_stats(vport, nla_data(nla));
1586
1587         NLA_PUT(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
1588
1589         err = vport_get_options(vport, skb);
1590         if (err == -EMSGSIZE)
1591                 goto error;
1592
1593         ifindex = vport_get_ifindex(vport);
1594         if (ifindex > 0)
1595                 NLA_PUT_U32(skb, OVS_VPORT_ATTR_IFINDEX, ifindex);
1596
1597         return genlmsg_end(skb, ovs_header);
1598
1599 nla_put_failure:
1600         err = -EMSGSIZE;
1601 error:
1602         genlmsg_cancel(skb, ovs_header);
1603         return err;
1604 }
1605
1606 /* Called with RTNL lock or RCU read lock. */
1607 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
1608                                          u32 seq, u8 cmd)
1609 {
1610         struct sk_buff *skb;
1611         int retval;
1612
1613         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1614         if (!skb)
1615                 return ERR_PTR(-ENOMEM);
1616
1617         retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
1618         if (retval < 0) {
1619                 kfree_skb(skb);
1620                 return ERR_PTR(retval);
1621         }
1622         return skb;
1623 }
1624
1625 static int ovs_vport_cmd_validate(struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1626 {
1627         return CHECK_NUL_STRING(a[OVS_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1628 }
1629
1630 /* Called with RTNL lock or RCU read lock. */
1631 static struct vport *lookup_vport(struct ovs_header *ovs_header,
1632                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1633 {
1634         struct datapath *dp;
1635         struct vport *vport;
1636
1637         if (a[OVS_VPORT_ATTR_NAME]) {
1638                 vport = vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
1639                 if (!vport)
1640                         return ERR_PTR(-ENODEV);
1641                 return vport;
1642         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1643                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1644
1645                 if (port_no >= DP_MAX_PORTS)
1646                         return ERR_PTR(-EFBIG);
1647
1648                 dp = get_dp(ovs_header->dp_ifindex);
1649                 if (!dp)
1650                         return ERR_PTR(-ENODEV);
1651
1652                 vport = get_vport_protected(dp, port_no);
1653                 if (!vport)
1654                         return ERR_PTR(-ENOENT);
1655                 return vport;
1656         } else
1657                 return ERR_PTR(-EINVAL);
1658 }
1659
1660 /* Called with RTNL lock. */
1661 static int change_vport(struct vport *vport, struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1662 {
1663         int err = 0;
1664
1665         if (a[OVS_VPORT_ATTR_STATS])
1666                 vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1667
1668         if (a[OVS_VPORT_ATTR_ADDRESS])
1669                 err = vport_set_addr(vport, nla_data(a[OVS_VPORT_ATTR_ADDRESS]));
1670
1671         return err;
1672 }
1673
1674 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1675 {
1676         struct nlattr **a = info->attrs;
1677         struct ovs_header *ovs_header = info->userhdr;
1678         struct vport_parms parms;
1679         struct sk_buff *reply;
1680         struct vport *vport;
1681         struct datapath *dp;
1682         u32 port_no;
1683         int err;
1684
1685         err = -EINVAL;
1686         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE])
1687                 goto exit;
1688
1689         err = ovs_vport_cmd_validate(a);
1690         if (err)
1691                 goto exit;
1692
1693         rtnl_lock();
1694         dp = get_dp(ovs_header->dp_ifindex);
1695         err = -ENODEV;
1696         if (!dp)
1697                 goto exit_unlock;
1698
1699         if (a[OVS_VPORT_ATTR_PORT_NO]) {
1700                 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1701
1702                 err = -EFBIG;
1703                 if (port_no >= DP_MAX_PORTS)
1704                         goto exit_unlock;
1705
1706                 vport = get_vport_protected(dp, port_no);
1707                 err = -EBUSY;
1708                 if (vport)
1709                         goto exit_unlock;
1710         } else {
1711                 for (port_no = 1; ; port_no++) {
1712                         if (port_no >= DP_MAX_PORTS) {
1713                                 err = -EFBIG;
1714                                 goto exit_unlock;
1715                         }
1716                         vport = get_vport_protected(dp, port_no);
1717                         if (!vport)
1718                                 break;
1719                 }
1720         }
1721
1722         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1723         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1724         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1725         parms.dp = dp;
1726         parms.port_no = port_no;
1727
1728         vport = new_vport(&parms);
1729         err = PTR_ERR(vport);
1730         if (IS_ERR(vport))
1731                 goto exit_unlock;
1732
1733         dp_sysfs_add_if(vport);
1734
1735         err = change_vport(vport, a);
1736         if (!err) {
1737                 reply = ovs_vport_cmd_build_info(vport, info->snd_pid,
1738                                                  info->snd_seq, OVS_VPORT_CMD_NEW);
1739                 if (IS_ERR(reply))
1740                         err = PTR_ERR(reply);
1741         }
1742         if (err) {
1743                 dp_detach_port(vport);
1744                 goto exit_unlock;
1745         }
1746         genl_notify(reply, genl_info_net(info), info->snd_pid,
1747                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1748
1749
1750 exit_unlock:
1751         rtnl_unlock();
1752 exit:
1753         return err;
1754 }
1755
1756 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1757 {
1758         struct nlattr **a = info->attrs;
1759         struct sk_buff *reply;
1760         struct vport *vport;
1761         int err;
1762
1763         err = ovs_vport_cmd_validate(a);
1764         if (err)
1765                 goto exit;
1766
1767         rtnl_lock();
1768         vport = lookup_vport(info->userhdr, a);
1769         err = PTR_ERR(vport);
1770         if (IS_ERR(vport))
1771                 goto exit_unlock;
1772
1773         err = 0;
1774         if (a[OVS_VPORT_ATTR_OPTIONS])
1775                 err = vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1776         if (!err)
1777                 err = change_vport(vport, a);
1778
1779         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1780                                          OVS_VPORT_CMD_NEW);
1781         if (IS_ERR(reply)) {
1782                 err = PTR_ERR(reply);
1783                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1784                                 dp_vport_multicast_group.id, err);
1785                 return 0;
1786         }
1787
1788         genl_notify(reply, genl_info_net(info), info->snd_pid,
1789                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1790
1791 exit_unlock:
1792         rtnl_unlock();
1793 exit:
1794         return err;
1795 }
1796
1797 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1798 {
1799         struct nlattr **a = info->attrs;
1800         struct sk_buff *reply;
1801         struct vport *vport;
1802         int err;
1803
1804         err = ovs_vport_cmd_validate(a);
1805         if (err)
1806                 goto exit;
1807
1808         rtnl_lock();
1809         vport = lookup_vport(info->userhdr, a);
1810         err = PTR_ERR(vport);
1811         if (IS_ERR(vport))
1812                 goto exit_unlock;
1813
1814         if (vport->port_no == OVSP_LOCAL) {
1815                 err = -EINVAL;
1816                 goto exit_unlock;
1817         }
1818
1819         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1820                                          OVS_VPORT_CMD_DEL);
1821         err = PTR_ERR(reply);
1822         if (IS_ERR(reply))
1823                 goto exit_unlock;
1824
1825         dp_detach_port(vport);
1826
1827         genl_notify(reply, genl_info_net(info), info->snd_pid,
1828                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1829
1830 exit_unlock:
1831         rtnl_unlock();
1832 exit:
1833         return err;
1834 }
1835
1836 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1837 {
1838         struct nlattr **a = info->attrs;
1839         struct ovs_header *ovs_header = info->userhdr;
1840         struct sk_buff *reply;
1841         struct vport *vport;
1842         int err;
1843
1844         err = ovs_vport_cmd_validate(a);
1845         if (err)
1846                 goto exit;
1847
1848         rcu_read_lock();
1849         vport = lookup_vport(ovs_header, a);
1850         err = PTR_ERR(vport);
1851         if (IS_ERR(vport))
1852                 goto exit_unlock;
1853
1854         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1855                                          OVS_VPORT_CMD_NEW);
1856         err = PTR_ERR(reply);
1857         if (IS_ERR(reply))
1858                 goto exit_unlock;
1859
1860         rcu_read_unlock();
1861
1862         return genlmsg_reply(reply, info);
1863
1864 exit_unlock:
1865         rcu_read_unlock();
1866 exit:
1867         return err;
1868 }
1869
1870 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1871 {
1872         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1873         struct datapath *dp;
1874         u32 port_no;
1875         int retval;
1876
1877         dp = get_dp(ovs_header->dp_ifindex);
1878         if (!dp)
1879                 return -ENODEV;
1880
1881         rcu_read_lock();
1882         for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
1883                 struct vport *vport;
1884
1885                 vport = get_vport_protected(dp, port_no);
1886                 if (!vport)
1887                         continue;
1888
1889                 if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid,
1890                                             cb->nlh->nlmsg_seq, NLM_F_MULTI,
1891                                             OVS_VPORT_CMD_NEW) < 0)
1892                         break;
1893         }
1894         rcu_read_unlock();
1895
1896         cb->args[0] = port_no;
1897         retval = skb->len;
1898
1899         return retval;
1900 }
1901
1902 static struct genl_ops dp_vport_genl_ops[] = {
1903         { .cmd = OVS_VPORT_CMD_NEW,
1904           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1905           .policy = vport_policy,
1906           .doit = ovs_vport_cmd_new
1907         },
1908         { .cmd = OVS_VPORT_CMD_DEL,
1909           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1910           .policy = vport_policy,
1911           .doit = ovs_vport_cmd_del
1912         },
1913         { .cmd = OVS_VPORT_CMD_GET,
1914           .flags = 0,               /* OK for unprivileged users. */
1915           .policy = vport_policy,
1916           .doit = ovs_vport_cmd_get,
1917           .dumpit = ovs_vport_cmd_dump
1918         },
1919         { .cmd = OVS_VPORT_CMD_SET,
1920           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1921           .policy = vport_policy,
1922           .doit = ovs_vport_cmd_set,
1923         },
1924 };
1925
1926 struct genl_family_and_ops {
1927         struct genl_family *family;
1928         struct genl_ops *ops;
1929         int n_ops;
1930         struct genl_multicast_group *group;
1931 };
1932
1933 static const struct genl_family_and_ops dp_genl_families[] = {
1934         { &dp_datapath_genl_family,
1935           dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
1936           &dp_datapath_multicast_group },
1937         { &dp_vport_genl_family,
1938           dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
1939           &dp_vport_multicast_group },
1940         { &dp_flow_genl_family,
1941           dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
1942           &dp_flow_multicast_group },
1943         { &dp_packet_genl_family,
1944           dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
1945           NULL },
1946 };
1947
1948 static void dp_unregister_genl(int n_families)
1949 {
1950         int i;
1951
1952         for (i = 0; i < n_families; i++)
1953                 genl_unregister_family(dp_genl_families[i].family);
1954 }
1955
1956 static int dp_register_genl(void)
1957 {
1958         int n_registered;
1959         int err;
1960         int i;
1961
1962         n_registered = 0;
1963         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
1964                 const struct genl_family_and_ops *f = &dp_genl_families[i];
1965
1966                 err = genl_register_family_with_ops(f->family, f->ops,
1967                                                     f->n_ops);
1968                 if (err)
1969                         goto error;
1970                 n_registered++;
1971
1972                 if (f->group) {
1973                         err = genl_register_mc_group(f->family, f->group);
1974                         if (err)
1975                                 goto error;
1976                 }
1977         }
1978
1979         err = packet_register_mc_groups();
1980         if (err)
1981                 goto error;
1982         return 0;
1983
1984 error:
1985         dp_unregister_genl(n_registered);
1986         return err;
1987 }
1988
1989 static int __init dp_init(void)
1990 {
1991         struct sk_buff *dummy_skb;
1992         int err;
1993
1994         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
1995
1996         printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
1997
1998         err = tnl_init();
1999         if (err)
2000                 goto error;
2001
2002         err = flow_init();
2003         if (err)
2004                 goto error_tnl_exit;
2005
2006         err = vport_init();
2007         if (err)
2008                 goto error_flow_exit;
2009
2010         err = register_netdevice_notifier(&dp_device_notifier);
2011         if (err)
2012                 goto error_vport_exit;
2013
2014         err = dp_register_genl();
2015         if (err < 0)
2016                 goto error_unreg_notifier;
2017
2018         return 0;
2019
2020 error_unreg_notifier:
2021         unregister_netdevice_notifier(&dp_device_notifier);
2022 error_vport_exit:
2023         vport_exit();
2024 error_flow_exit:
2025         flow_exit();
2026 error_tnl_exit:
2027         tnl_exit();
2028 error:
2029         return err;
2030 }
2031
2032 static void dp_cleanup(void)
2033 {
2034         rcu_barrier();
2035         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2036         unregister_netdevice_notifier(&dp_device_notifier);
2037         vport_exit();
2038         flow_exit();
2039         tnl_exit();
2040 }
2041
2042 module_init(dp_init);
2043 module_exit(dp_cleanup);
2044
2045 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2046 MODULE_LICENSE("GPL");