datapath: Require explicit upcall_pid for new datapaths and vports.
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
3  * Distributed under the terms of the GNU GPL version 2.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 /* Functions for managing the dp interface/device. */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/if_arp.h>
16 #include <linux/if_vlan.h>
17 #include <linux/in.h>
18 #include <linux/ip.h>
19 #include <linux/jhash.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/genetlink.h>
24 #include <linux/kernel.h>
25 #include <linux/kthread.h>
26 #include <linux/mutex.h>
27 #include <linux/percpu.h>
28 #include <linux/rcupdate.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/version.h>
32 #include <linux/ethtool.h>
33 #include <linux/wait.h>
34 #include <asm/system.h>
35 #include <asm/div64.h>
36 #include <asm/bug.h>
37 #include <linux/highmem.h>
38 #include <linux/netfilter_bridge.h>
39 #include <linux/netfilter_ipv4.h>
40 #include <linux/inetdevice.h>
41 #include <linux/list.h>
42 #include <linux/rculist.h>
43 #include <linux/dmi.h>
44 #include <net/inet_ecn.h>
45 #include <net/genetlink.h>
46
47 #include "openvswitch/datapath-protocol.h"
48 #include "checksum.h"
49 #include "datapath.h"
50 #include "actions.h"
51 #include "flow.h"
52 #include "vlan.h"
53 #include "tunnel.h"
54 #include "vport-internal_dev.h"
55
56 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
57     LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
58 #error Kernels before 2.6.18 or after 3.0 are not supported by this version of Open vSwitch.
59 #endif
60
61 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
62 EXPORT_SYMBOL(dp_ioctl_hook);
63
64 /**
65  * DOC: Locking:
66  *
67  * Writes to device state (add/remove datapath, port, set operations on vports,
68  * etc.) are protected by RTNL.
69  *
70  * Writes to other state (flow table modifications, set miscellaneous datapath
71  * parameters such as drop frags, etc.) are protected by genl_mutex.  The RTNL
72  * lock nests inside genl_mutex.
73  *
74  * Reads are protected by RCU.
75  *
76  * There are a few special cases (mostly stats) that have their own
77  * synchronization but they nest under all of above and don't interact with
78  * each other.
79  */
80
81 /* Global list of datapaths to enable dumping them all out.
82  * Protected by genl_mutex.
83  */
84 static LIST_HEAD(dps);
85
86 static struct vport *new_vport(const struct vport_parms *);
87 static int queue_userspace_packets(struct datapath *, struct sk_buff *,
88                                  const struct dp_upcall_info *);
89
90 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
91 struct datapath *get_dp(int dp_ifindex)
92 {
93         struct datapath *dp = NULL;
94         struct net_device *dev;
95
96         rcu_read_lock();
97         dev = dev_get_by_index_rcu(&init_net, dp_ifindex);
98         if (dev) {
99                 struct vport *vport = internal_dev_get_vport(dev);
100                 if (vport)
101                         dp = vport->dp;
102         }
103         rcu_read_unlock();
104
105         return dp;
106 }
107 EXPORT_SYMBOL_GPL(get_dp);
108
109 /* Must be called with genl_mutex. */
110 static struct flow_table *get_table_protected(struct datapath *dp)
111 {
112         return rcu_dereference_protected(dp->table, lockdep_genl_is_held());
113 }
114
115 /* Must be called with rcu_read_lock or RTNL lock. */
116 static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
117 {
118         return rcu_dereference_rtnl(dp->ports[port_no]);
119 }
120
121 /* Must be called with rcu_read_lock or RTNL lock. */
122 const char *dp_name(const struct datapath *dp)
123 {
124         return vport_get_name(rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]));
125 }
126
127 static int get_dpifindex(struct datapath *dp)
128 {
129         struct vport *local;
130         int ifindex;
131
132         rcu_read_lock();
133
134         local = get_vport_protected(dp, OVSP_LOCAL);
135         if (local)
136                 ifindex = vport_get_ifindex(local);
137         else
138                 ifindex = 0;
139
140         rcu_read_unlock();
141
142         return ifindex;
143 }
144
145 static inline size_t br_nlmsg_size(void)
146 {
147         return NLMSG_ALIGN(sizeof(struct ifinfomsg))
148                + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
149                + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
150                + nla_total_size(4) /* IFLA_MASTER */
151                + nla_total_size(4) /* IFLA_MTU */
152                + nla_total_size(1); /* IFLA_OPERSTATE */
153 }
154
155 /* Caller must hold RTNL lock. */
156 static int dp_fill_ifinfo(struct sk_buff *skb,
157                           const struct vport *port,
158                           int event, unsigned int flags)
159 {
160         struct datapath *dp = port->dp;
161         int ifindex = vport_get_ifindex(port);
162         struct ifinfomsg *hdr;
163         struct nlmsghdr *nlh;
164
165         if (ifindex < 0)
166                 return ifindex;
167
168         nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
169         if (nlh == NULL)
170                 return -EMSGSIZE;
171
172         hdr = nlmsg_data(nlh);
173         hdr->ifi_family = AF_BRIDGE;
174         hdr->__ifi_pad = 0;
175         hdr->ifi_type = ARPHRD_ETHER;
176         hdr->ifi_index = ifindex;
177         hdr->ifi_flags = vport_get_flags(port);
178         hdr->ifi_change = 0;
179
180         NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
181         NLA_PUT_U32(skb, IFLA_MASTER, get_dpifindex(dp));
182         NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
183 #ifdef IFLA_OPERSTATE
184         NLA_PUT_U8(skb, IFLA_OPERSTATE,
185                    vport_is_running(port)
186                         ? vport_get_operstate(port)
187                         : IF_OPER_DOWN);
188 #endif
189
190         NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
191
192         return nlmsg_end(skb, nlh);
193
194 nla_put_failure:
195         nlmsg_cancel(skb, nlh);
196         return -EMSGSIZE;
197 }
198
199 /* Caller must hold RTNL lock. */
200 static void dp_ifinfo_notify(int event, struct vport *port)
201 {
202         struct sk_buff *skb;
203         int err = -ENOBUFS;
204
205         skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
206         if (skb == NULL)
207                 goto errout;
208
209         err = dp_fill_ifinfo(skb, port, event, 0);
210         if (err < 0) {
211                 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
212                 WARN_ON(err == -EMSGSIZE);
213                 kfree_skb(skb);
214                 goto errout;
215         }
216         rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
217         return;
218 errout:
219         if (err < 0)
220                 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
221 }
222
223 static void release_dp(struct kobject *kobj)
224 {
225         struct datapath *dp = container_of(kobj, struct datapath, ifobj);
226         kfree(dp);
227 }
228
229 static struct kobj_type dp_ktype = {
230         .release = release_dp
231 };
232
233 static void destroy_dp_rcu(struct rcu_head *rcu)
234 {
235         struct datapath *dp = container_of(rcu, struct datapath, rcu);
236
237         flow_tbl_destroy(dp->table);
238         free_percpu(dp->stats_percpu);
239         kobject_put(&dp->ifobj);
240 }
241
242 /* Called with RTNL lock and genl_lock. */
243 static struct vport *new_vport(const struct vport_parms *parms)
244 {
245         struct vport *vport;
246
247         vport = vport_add(parms);
248         if (!IS_ERR(vport)) {
249                 struct datapath *dp = parms->dp;
250
251                 rcu_assign_pointer(dp->ports[parms->port_no], vport);
252                 list_add(&vport->node, &dp->port_list);
253
254                 dp_ifinfo_notify(RTM_NEWLINK, vport);
255         }
256
257         return vport;
258 }
259
260 /* Called with RTNL lock. */
261 void dp_detach_port(struct vport *p)
262 {
263         ASSERT_RTNL();
264
265         if (p->port_no != OVSP_LOCAL)
266                 dp_sysfs_del_if(p);
267         dp_ifinfo_notify(RTM_DELLINK, p);
268
269         /* First drop references to device. */
270         list_del(&p->node);
271         rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
272
273         /* Then destroy it. */
274         vport_del(p);
275 }
276
277 /* Must be called with rcu_read_lock. */
278 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
279 {
280         struct datapath *dp = p->dp;
281         struct sw_flow *flow;
282         struct dp_stats_percpu *stats;
283         u64 *stats_counter;
284         int error;
285
286         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
287         OVS_CB(skb)->vport = p;
288
289         if (!OVS_CB(skb)->flow) {
290                 struct sw_flow_key key;
291                 int key_len;
292                 bool is_frag;
293
294                 /* Extract flow from 'skb' into 'key'. */
295                 error = flow_extract(skb, p->port_no, &key, &key_len, &is_frag);
296                 if (unlikely(error)) {
297                         kfree_skb(skb);
298                         return;
299                 }
300
301                 if (is_frag && dp->drop_frags) {
302                         consume_skb(skb);
303                         stats_counter = &stats->n_frags;
304                         goto out;
305                 }
306
307                 /* Look up flow. */
308                 flow = flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
309                 if (unlikely(!flow)) {
310                         struct dp_upcall_info upcall;
311
312                         upcall.cmd = OVS_PACKET_CMD_MISS;
313                         upcall.key = &key;
314                         upcall.userdata = NULL;
315                         upcall.pid = p->upcall_pid;
316                         dp_upcall(dp, skb, &upcall);
317                         kfree_skb(skb);
318                         stats_counter = &stats->n_missed;
319                         goto out;
320                 }
321
322                 OVS_CB(skb)->flow = flow;
323         }
324
325         stats_counter = &stats->n_hit;
326         flow_used(OVS_CB(skb)->flow, skb);
327         execute_actions(dp, skb);
328
329 out:
330         /* Update datapath statistics. */
331
332         write_seqcount_begin(&stats->seqlock);
333         (*stats_counter)++;
334         write_seqcount_end(&stats->seqlock);
335 }
336
337 static void copy_and_csum_skb(struct sk_buff *skb, void *to)
338 {
339         u16 csum_start, csum_offset;
340         __wsum csum;
341
342         get_skb_csum_pointers(skb, &csum_start, &csum_offset);
343         csum_start -= skb_headroom(skb);
344
345         skb_copy_bits(skb, 0, to, csum_start);
346
347         csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start,
348                                       skb->len - csum_start, 0);
349         *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
350 }
351
352 static struct genl_family dp_packet_genl_family = {
353         .id = GENL_ID_GENERATE,
354         .hdrsize = sizeof(struct ovs_header),
355         .name = OVS_PACKET_FAMILY,
356         .version = 1,
357         .maxattr = OVS_PACKET_ATTR_MAX
358 };
359
360 int dp_upcall(struct datapath *dp, struct sk_buff *skb,
361               const struct dp_upcall_info *upcall_info)
362 {
363         struct sk_buff *segs = NULL;
364         struct dp_stats_percpu *stats;
365         int err;
366
367         if (upcall_info->pid == 0) {
368                 err = -ENOTCONN;
369                 goto err;
370         }
371
372         forward_ip_summed(skb, true);
373
374         /* Break apart GSO packets into their component pieces.  Otherwise
375          * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
376         if (skb_is_gso(skb)) {
377                 segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
378                 
379                 if (IS_ERR(segs)) {
380                         err = PTR_ERR(segs);
381                         goto err;
382                 }
383                 skb = segs;
384         }
385
386         err = queue_userspace_packets(dp, skb, upcall_info);
387         if (segs) {
388                 struct sk_buff *next;
389                 /* Free GSO-segments */
390                 do {
391                         next = segs->next;
392                         kfree_skb(segs);
393                 } while ((segs = next) != NULL);
394         }
395
396         if (err)
397                 goto err;
398
399         return 0;
400
401 err:
402         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
403
404         write_seqcount_begin(&stats->seqlock);
405         stats->n_lost++;
406         write_seqcount_end(&stats->seqlock);
407
408         return err;
409 }
410
411 /* Send each packet in the 'skb' list to userspace for 'dp' as directed by
412  * 'upcall_info'.  There will be only one packet unless we broke up a GSO
413  * packet.
414  */
415 static int queue_userspace_packets(struct datapath *dp, struct sk_buff *skb,
416                                    const struct dp_upcall_info *upcall_info)
417 {
418         int dp_ifindex;
419
420         dp_ifindex = get_dpifindex(dp);
421         if (!dp_ifindex)
422                 return -ENODEV;
423
424         do {
425                 struct ovs_header *upcall;
426                 struct sk_buff *user_skb; /* to be queued to userspace */
427                 struct nlattr *nla;
428                 unsigned int len;
429                 int err;
430
431                 err = vlan_deaccel_tag(skb);
432                 if (unlikely(err))
433                         return err;
434
435                 if (nla_attr_size(skb->len) > USHRT_MAX)
436                         return -EFBIG;
437
438                 len = sizeof(struct ovs_header);
439                 len += nla_total_size(skb->len);
440                 len += nla_total_size(FLOW_BUFSIZE);
441                 if (upcall_info->cmd == OVS_PACKET_CMD_ACTION)
442                         len += nla_total_size(8);
443
444                 user_skb = genlmsg_new(len, GFP_ATOMIC);
445                 if (!user_skb)
446                         return -ENOMEM;
447
448                 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
449                                          0, upcall_info->cmd);
450                 upcall->dp_ifindex = dp_ifindex;
451
452                 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
453                 flow_to_nlattrs(upcall_info->key, user_skb);
454                 nla_nest_end(user_skb, nla);
455
456                 if (upcall_info->userdata)
457                         nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA,
458                                     nla_get_u64(upcall_info->userdata));
459
460                 nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
461                 if (skb->ip_summed == CHECKSUM_PARTIAL)
462                         copy_and_csum_skb(skb, nla_data(nla));
463                 else
464                         skb_copy_bits(skb, 0, nla_data(nla), skb->len);
465
466                 err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid);
467                 if (err)
468                         return err;
469
470         } while ((skb = skb->next));
471
472         return 0;
473 }
474
475 /* Called with genl_mutex. */
476 static int flush_flows(int dp_ifindex)
477 {
478         struct flow_table *old_table;
479         struct flow_table *new_table;
480         struct datapath *dp;
481
482         dp = get_dp(dp_ifindex);
483         if (!dp)
484                 return -ENODEV;
485
486         old_table = get_table_protected(dp);
487         new_table = flow_tbl_alloc(TBL_MIN_BUCKETS);
488         if (!new_table)
489                 return -ENOMEM;
490
491         rcu_assign_pointer(dp->table, new_table);
492
493         flow_tbl_deferred_destroy(old_table);
494         return 0;
495 }
496
497 static int validate_actions(const struct nlattr *attr, int depth);
498
499 static int validate_sample(const struct nlattr *attr, int depth)
500 {
501         static const struct nla_policy sample_policy[OVS_SAMPLE_ATTR_MAX + 1] =
502         {
503                 [OVS_SAMPLE_ATTR_PROBABILITY] = {.type = NLA_U32 },
504                 [OVS_SAMPLE_ATTR_ACTIONS] = {.type = NLA_UNSPEC },
505         };
506         struct nlattr *a[OVS_SAMPLE_ATTR_MAX + 1];
507         int error;
508
509         error = nla_parse_nested(a, OVS_SAMPLE_ATTR_MAX, attr, sample_policy);
510         if (error)
511                 return error;
512
513         if (!a[OVS_SAMPLE_ATTR_PROBABILITY])
514                 return -EINVAL;
515         if (!a[OVS_SAMPLE_ATTR_ACTIONS])
516                 return -EINVAL;
517
518         return validate_actions(a[OVS_SAMPLE_ATTR_ACTIONS], (depth + 1));
519 }
520
521 static int validate_userspace(const struct nlattr *attr)
522 {
523         static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] =
524         {
525                 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
526                 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 },
527         };
528         struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
529         int error;
530
531         error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, attr, userspace_policy);
532         if (error)
533                 return error;
534
535         if (!a[OVS_USERSPACE_ATTR_PID] || !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
536                 return -EINVAL;
537
538         return 0;
539 }
540
541 static int validate_actions(const struct nlattr *attr, int depth)
542 {
543         const struct nlattr *a;
544         int rem, err;
545
546         if (depth >= SAMPLE_ACTION_DEPTH)
547                 return -EOVERFLOW;
548
549         nla_for_each_nested(a, attr, rem) {
550                 /* Expected argument lengths, (u32)-1 for variable length. */
551                 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
552                         [OVS_ACTION_ATTR_OUTPUT] = 4,
553                         [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
554                         [OVS_ACTION_ATTR_PUSH_VLAN] = 2,
555                         [OVS_ACTION_ATTR_POP_VLAN] = 0,
556                         [OVS_ACTION_ATTR_SET_DL_SRC] = ETH_ALEN,
557                         [OVS_ACTION_ATTR_SET_DL_DST] = ETH_ALEN,
558                         [OVS_ACTION_ATTR_SET_NW_SRC] = 4,
559                         [OVS_ACTION_ATTR_SET_NW_DST] = 4,
560                         [OVS_ACTION_ATTR_SET_NW_TOS] = 1,
561                         [OVS_ACTION_ATTR_SET_TP_SRC] = 2,
562                         [OVS_ACTION_ATTR_SET_TP_DST] = 2,
563                         [OVS_ACTION_ATTR_SET_TUNNEL] = 8,
564                         [OVS_ACTION_ATTR_SET_PRIORITY] = 4,
565                         [OVS_ACTION_ATTR_POP_PRIORITY] = 0,
566                         [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
567                 };
568                 int type = nla_type(a);
569
570                 if (type > OVS_ACTION_ATTR_MAX ||
571                     (action_lens[type] != nla_len(a) &&
572                      action_lens[type] != (u32)-1))
573                         return -EINVAL;
574
575                 switch (type) {
576                 case OVS_ACTION_ATTR_UNSPEC:
577                         return -EINVAL;
578
579                 case OVS_ACTION_ATTR_POP_VLAN:
580                 case OVS_ACTION_ATTR_SET_DL_SRC:
581                 case OVS_ACTION_ATTR_SET_DL_DST:
582                 case OVS_ACTION_ATTR_SET_NW_SRC:
583                 case OVS_ACTION_ATTR_SET_NW_DST:
584                 case OVS_ACTION_ATTR_SET_TP_SRC:
585                 case OVS_ACTION_ATTR_SET_TP_DST:
586                 case OVS_ACTION_ATTR_SET_TUNNEL:
587                 case OVS_ACTION_ATTR_SET_PRIORITY:
588                 case OVS_ACTION_ATTR_POP_PRIORITY:
589                         /* No validation needed. */
590                         break;
591
592                 case OVS_ACTION_ATTR_USERSPACE:
593                         err = validate_userspace(a);
594                         if (err)
595                                 return err;
596                         break;
597
598                 case OVS_ACTION_ATTR_OUTPUT:
599                         if (nla_get_u32(a) >= DP_MAX_PORTS)
600                                 return -EINVAL;
601                         break;
602
603                 case OVS_ACTION_ATTR_PUSH_VLAN:
604                         if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
605                                 return -EINVAL;
606                         break;
607
608                 case OVS_ACTION_ATTR_SET_NW_TOS:
609                         if (nla_get_u8(a) & INET_ECN_MASK)
610                                 return -EINVAL;
611                         break;
612
613                 case OVS_ACTION_ATTR_SAMPLE:
614                         err = validate_sample(a, depth);
615                         if (err)
616                                 return err;
617                         break;
618
619                 default:
620                         return -EOPNOTSUPP;
621                 }
622         }
623
624         if (rem > 0)
625                 return -EINVAL;
626
627         return 0;
628 }
629 static void clear_stats(struct sw_flow *flow)
630 {
631         flow->used = 0;
632         flow->tcp_flags = 0;
633         flow->packet_count = 0;
634         flow->byte_count = 0;
635 }
636
637 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
638 {
639         struct ovs_header *ovs_header = info->userhdr;
640         struct nlattr **a = info->attrs;
641         struct sw_flow_actions *acts;
642         struct sk_buff *packet;
643         struct sw_flow *flow;
644         struct datapath *dp;
645         struct ethhdr *eth;
646         bool is_frag;
647         int len;
648         int err;
649         int key_len;
650
651         err = -EINVAL;
652         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
653             !a[OVS_PACKET_ATTR_ACTIONS] ||
654             nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN)
655                 goto err;
656
657         err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], 0);
658         if (err)
659                 goto err;
660
661         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
662         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
663         err = -ENOMEM;
664         if (!packet)
665                 goto err;
666         skb_reserve(packet, NET_IP_ALIGN);
667
668         memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len);
669
670         skb_reset_mac_header(packet);
671         eth = eth_hdr(packet);
672
673         /* Normally, setting the skb 'protocol' field would be handled by a
674          * call to eth_type_trans(), but it assumes there's a sending
675          * device, which we may not have. */
676         if (ntohs(eth->h_proto) >= 1536)
677                 packet->protocol = eth->h_proto;
678         else
679                 packet->protocol = htons(ETH_P_802_2);
680
681         /* Build an sw_flow for sending this packet. */
682         flow = flow_alloc();
683         err = PTR_ERR(flow);
684         if (IS_ERR(flow))
685                 goto err_kfree_skb;
686
687         err = flow_extract(packet, -1, &flow->key, &key_len, &is_frag);
688         if (err)
689                 goto err_flow_put;
690
691         err = flow_metadata_from_nlattrs(&flow->key.eth.in_port,
692                                          &flow->key.eth.tun_id,
693                                          a[OVS_PACKET_ATTR_KEY]);
694         if (err)
695                 goto err_flow_put;
696
697         flow->hash = flow_hash(&flow->key, key_len);
698
699         acts = flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
700         err = PTR_ERR(acts);
701         if (IS_ERR(acts))
702                 goto err_flow_put;
703         rcu_assign_pointer(flow->sf_acts, acts);
704
705         OVS_CB(packet)->flow = flow;
706
707         rcu_read_lock();
708         dp = get_dp(ovs_header->dp_ifindex);
709         err = -ENODEV;
710         if (!dp)
711                 goto err_unlock;
712
713         if (flow->key.eth.in_port < DP_MAX_PORTS)
714                 OVS_CB(packet)->vport = get_vport_protected(dp,
715                                                         flow->key.eth.in_port);
716
717         local_bh_disable();
718         err = execute_actions(dp, packet);
719         local_bh_enable();
720         rcu_read_unlock();
721
722         flow_put(flow);
723         return err;
724
725 err_unlock:
726         rcu_read_unlock();
727 err_flow_put:
728         flow_put(flow);
729 err_kfree_skb:
730         kfree_skb(packet);
731 err:
732         return err;
733 }
734
735 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
736         [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
737         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
738         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
739 };
740
741 static struct genl_ops dp_packet_genl_ops[] = {
742         { .cmd = OVS_PACKET_CMD_EXECUTE,
743           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
744           .policy = packet_policy,
745           .doit = ovs_packet_cmd_execute
746         }
747 };
748
749 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
750 {
751         int i;
752         struct flow_table *table = get_table_protected(dp);
753
754         stats->n_flows = flow_tbl_count(table);
755
756         stats->n_frags = stats->n_hit = stats->n_missed = stats->n_lost = 0;
757         for_each_possible_cpu(i) {
758                 const struct dp_stats_percpu *percpu_stats;
759                 struct dp_stats_percpu local_stats;
760                 unsigned seqcount;
761
762                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
763
764                 do {
765                         seqcount = read_seqcount_begin(&percpu_stats->seqlock);
766                         local_stats = *percpu_stats;
767                 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
768
769                 stats->n_frags += local_stats.n_frags;
770                 stats->n_hit += local_stats.n_hit;
771                 stats->n_missed += local_stats.n_missed;
772                 stats->n_lost += local_stats.n_lost;
773         }
774 }
775
776 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
777         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
778         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
779         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
780 };
781
782 static struct genl_family dp_flow_genl_family = {
783         .id = GENL_ID_GENERATE,
784         .hdrsize = sizeof(struct ovs_header),
785         .name = OVS_FLOW_FAMILY,
786         .version = 1,
787         .maxattr = OVS_FLOW_ATTR_MAX
788 };
789
790 static struct genl_multicast_group dp_flow_multicast_group = {
791         .name = OVS_FLOW_MCGROUP
792 };
793
794 /* Called with genl_lock. */
795 static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
796                                   struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd)
797 {
798         const int skb_orig_len = skb->len;
799         const struct sw_flow_actions *sf_acts;
800         struct ovs_flow_stats stats;
801         struct ovs_header *ovs_header;
802         struct nlattr *nla;
803         unsigned long used;
804         u8 tcp_flags;
805         int err;
806
807         sf_acts = rcu_dereference_protected(flow->sf_acts,
808                                             lockdep_genl_is_held());
809
810         ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
811         if (!ovs_header)
812                 return -EMSGSIZE;
813
814         ovs_header->dp_ifindex = get_dpifindex(dp);
815
816         nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
817         if (!nla)
818                 goto nla_put_failure;
819         err = flow_to_nlattrs(&flow->key, skb);
820         if (err)
821                 goto error;
822         nla_nest_end(skb, nla);
823
824         spin_lock_bh(&flow->lock);
825         used = flow->used;
826         stats.n_packets = flow->packet_count;
827         stats.n_bytes = flow->byte_count;
828         tcp_flags = flow->tcp_flags;
829         spin_unlock_bh(&flow->lock);
830
831         if (used)
832                 NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, flow_used_time(used));
833
834         if (stats.n_packets)
835                 NLA_PUT(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats);
836
837         if (tcp_flags)
838                 NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags);
839
840         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
841          * this is the first flow to be dumped into 'skb'.  This is unusual for
842          * Netlink but individual action lists can be longer than
843          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
844          * The userspace caller can always fetch the actions separately if it
845          * really wants them.  (Most userspace callers in fact don't care.)
846          *
847          * This can only fail for dump operations because the skb is always
848          * properly sized for single flows.
849          */
850         err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
851                       sf_acts->actions);
852         if (err < 0 && skb_orig_len)
853                 goto error;
854
855         return genlmsg_end(skb, ovs_header);
856
857 nla_put_failure:
858         err = -EMSGSIZE;
859 error:
860         genlmsg_cancel(skb, ovs_header);
861         return err;
862 }
863
864 static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
865 {
866         const struct sw_flow_actions *sf_acts;
867         int len;
868
869         sf_acts = rcu_dereference_protected(flow->sf_acts,
870                                             lockdep_genl_is_held());
871
872         len = nla_total_size(FLOW_BUFSIZE); /* OVS_FLOW_ATTR_KEY */
873         len += nla_total_size(sf_acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
874         len += nla_total_size(sizeof(struct ovs_flow_stats)); /* OVS_FLOW_ATTR_STATS */
875         len += nla_total_size(1); /* OVS_FLOW_ATTR_TCP_FLAGS */
876         len += nla_total_size(8); /* OVS_FLOW_ATTR_USED */
877         return genlmsg_new(NLMSG_ALIGN(sizeof(struct ovs_header)) + len, GFP_KERNEL);
878 }
879
880 static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, struct datapath *dp,
881                                                u32 pid, u32 seq, u8 cmd)
882 {
883         struct sk_buff *skb;
884         int retval;
885
886         skb = ovs_flow_cmd_alloc_info(flow);
887         if (!skb)
888                 return ERR_PTR(-ENOMEM);
889
890         retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
891         BUG_ON(retval < 0);
892         return skb;
893 }
894
895 static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
896 {
897         struct nlattr **a = info->attrs;
898         struct ovs_header *ovs_header = info->userhdr;
899         struct sw_flow_key key;
900         struct sw_flow *flow;
901         struct sk_buff *reply;
902         struct datapath *dp;
903         struct flow_table *table;
904         int error;
905         int key_len;
906
907         /* Extract key. */
908         error = -EINVAL;
909         if (!a[OVS_FLOW_ATTR_KEY])
910                 goto error;
911         error = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
912         if (error)
913                 goto error;
914
915         /* Validate actions. */
916         if (a[OVS_FLOW_ATTR_ACTIONS]) {
917                 error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS], 0);
918                 if (error)
919                         goto error;
920         } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
921                 error = -EINVAL;
922                 goto error;
923         }
924
925         dp = get_dp(ovs_header->dp_ifindex);
926         error = -ENODEV;
927         if (!dp)
928                 goto error;
929
930         table = get_table_protected(dp);
931         flow = flow_tbl_lookup(table, &key, key_len);
932         if (!flow) {
933                 struct sw_flow_actions *acts;
934
935                 /* Bail out if we're not allowed to create a new flow. */
936                 error = -ENOENT;
937                 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
938                         goto error;
939
940                 /* Expand table, if necessary, to make room. */
941                 if (flow_tbl_need_to_expand(table)) {
942                         struct flow_table *new_table;
943
944                         new_table = flow_tbl_expand(table);
945                         if (!IS_ERR(new_table)) {
946                                 rcu_assign_pointer(dp->table, new_table);
947                                 flow_tbl_deferred_destroy(table);
948                                 table = get_table_protected(dp);
949                         }
950                 }
951
952                 /* Allocate flow. */
953                 flow = flow_alloc();
954                 if (IS_ERR(flow)) {
955                         error = PTR_ERR(flow);
956                         goto error;
957                 }
958                 flow->key = key;
959                 clear_stats(flow);
960
961                 /* Obtain actions. */
962                 acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
963                 error = PTR_ERR(acts);
964                 if (IS_ERR(acts))
965                         goto error_free_flow;
966                 rcu_assign_pointer(flow->sf_acts, acts);
967
968                 /* Put flow in bucket. */
969                 flow->hash = flow_hash(&key, key_len);
970                 flow_tbl_insert(table, flow);
971
972                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
973                                                 info->snd_seq, OVS_FLOW_CMD_NEW);
974         } else {
975                 /* We found a matching flow. */
976                 struct sw_flow_actions *old_acts;
977
978                 /* Bail out if we're not allowed to modify an existing flow.
979                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
980                  * because Generic Netlink treats the latter as a dump
981                  * request.  We also accept NLM_F_EXCL in case that bug ever
982                  * gets fixed.
983                  */
984                 error = -EEXIST;
985                 if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
986                     info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
987                         goto error;
988
989                 /* Update actions. */
990                 old_acts = rcu_dereference_protected(flow->sf_acts,
991                                                      lockdep_genl_is_held());
992                 if (a[OVS_FLOW_ATTR_ACTIONS] &&
993                     (old_acts->actions_len != nla_len(a[OVS_FLOW_ATTR_ACTIONS]) ||
994                      memcmp(old_acts->actions, nla_data(a[OVS_FLOW_ATTR_ACTIONS]),
995                             old_acts->actions_len))) {
996                         struct sw_flow_actions *new_acts;
997
998                         new_acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
999                         error = PTR_ERR(new_acts);
1000                         if (IS_ERR(new_acts))
1001                                 goto error;
1002
1003                         rcu_assign_pointer(flow->sf_acts, new_acts);
1004                         flow_deferred_free_acts(old_acts);
1005                 }
1006
1007                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
1008                                                 info->snd_seq, OVS_FLOW_CMD_NEW);
1009
1010                 /* Clear stats. */
1011                 if (a[OVS_FLOW_ATTR_CLEAR]) {
1012                         spin_lock_bh(&flow->lock);
1013                         clear_stats(flow);
1014                         spin_unlock_bh(&flow->lock);
1015                 }
1016         }
1017
1018         if (!IS_ERR(reply))
1019                 genl_notify(reply, genl_info_net(info), info->snd_pid,
1020                             dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1021         else
1022                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1023                                 dp_flow_multicast_group.id, PTR_ERR(reply));
1024         return 0;
1025
1026 error_free_flow:
1027         flow_put(flow);
1028 error:
1029         return error;
1030 }
1031
1032 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1033 {
1034         struct nlattr **a = info->attrs;
1035         struct ovs_header *ovs_header = info->userhdr;
1036         struct sw_flow_key key;
1037         struct sk_buff *reply;
1038         struct sw_flow *flow;
1039         struct datapath *dp;
1040         struct flow_table *table;
1041         int err;
1042         int key_len;
1043
1044         if (!a[OVS_FLOW_ATTR_KEY])
1045                 return -EINVAL;
1046         err = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1047         if (err)
1048                 return err;
1049
1050         dp = get_dp(ovs_header->dp_ifindex);
1051         if (!dp)
1052                 return -ENODEV;
1053
1054         table = get_table_protected(dp);
1055         flow = flow_tbl_lookup(table, &key, key_len);
1056         if (!flow)
1057                 return -ENOENT;
1058
1059         reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW);
1060         if (IS_ERR(reply))
1061                 return PTR_ERR(reply);
1062
1063         return genlmsg_reply(reply, info);
1064 }
1065
1066 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1067 {
1068         struct nlattr **a = info->attrs;
1069         struct ovs_header *ovs_header = info->userhdr;
1070         struct sw_flow_key key;
1071         struct sk_buff *reply;
1072         struct sw_flow *flow;
1073         struct datapath *dp;
1074         struct flow_table *table;
1075         int err;
1076         int key_len;
1077
1078         if (!a[OVS_FLOW_ATTR_KEY])
1079                 return flush_flows(ovs_header->dp_ifindex);
1080         err = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1081         if (err)
1082                 return err;
1083
1084         dp = get_dp(ovs_header->dp_ifindex);
1085         if (!dp)
1086                 return -ENODEV;
1087
1088         table = get_table_protected(dp);
1089         flow = flow_tbl_lookup(table, &key, key_len);
1090         if (!flow)
1091                 return -ENOENT;
1092
1093         reply = ovs_flow_cmd_alloc_info(flow);
1094         if (!reply)
1095                 return -ENOMEM;
1096
1097         flow_tbl_remove(table, flow);
1098
1099         err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
1100                                      info->snd_seq, 0, OVS_FLOW_CMD_DEL);
1101         BUG_ON(err < 0);
1102
1103         flow_deferred_free(flow);
1104
1105         genl_notify(reply, genl_info_net(info), info->snd_pid,
1106                     dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1107         return 0;
1108 }
1109
1110 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1111 {
1112         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1113         struct datapath *dp;
1114
1115         dp = get_dp(ovs_header->dp_ifindex);
1116         if (!dp)
1117                 return -ENODEV;
1118
1119         for (;;) {
1120                 struct sw_flow *flow;
1121                 u32 bucket, obj;
1122
1123                 bucket = cb->args[0];
1124                 obj = cb->args[1];
1125                 flow = flow_tbl_next(get_table_protected(dp), &bucket, &obj);
1126                 if (!flow)
1127                         break;
1128
1129                 if (ovs_flow_cmd_fill_info(flow, dp, skb, NETLINK_CB(cb->skb).pid,
1130                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1131                                            OVS_FLOW_CMD_NEW) < 0)
1132                         break;
1133
1134                 cb->args[0] = bucket;
1135                 cb->args[1] = obj;
1136         }
1137         return skb->len;
1138 }
1139
1140 static struct genl_ops dp_flow_genl_ops[] = {
1141         { .cmd = OVS_FLOW_CMD_NEW,
1142           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1143           .policy = flow_policy,
1144           .doit = ovs_flow_cmd_new_or_set
1145         },
1146         { .cmd = OVS_FLOW_CMD_DEL,
1147           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1148           .policy = flow_policy,
1149           .doit = ovs_flow_cmd_del
1150         },
1151         { .cmd = OVS_FLOW_CMD_GET,
1152           .flags = 0,               /* OK for unprivileged users. */
1153           .policy = flow_policy,
1154           .doit = ovs_flow_cmd_get,
1155           .dumpit = ovs_flow_cmd_dump
1156         },
1157         { .cmd = OVS_FLOW_CMD_SET,
1158           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1159           .policy = flow_policy,
1160           .doit = ovs_flow_cmd_new_or_set,
1161         },
1162 };
1163
1164 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1165 #ifdef HAVE_NLA_NUL_STRING
1166         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1167 #endif
1168         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1169         [OVS_DP_ATTR_IPV4_FRAGS] = { .type = NLA_U32 },
1170 };
1171
1172 static struct genl_family dp_datapath_genl_family = {
1173         .id = GENL_ID_GENERATE,
1174         .hdrsize = sizeof(struct ovs_header),
1175         .name = OVS_DATAPATH_FAMILY,
1176         .version = 1,
1177         .maxattr = OVS_DP_ATTR_MAX
1178 };
1179
1180 static struct genl_multicast_group dp_datapath_multicast_group = {
1181         .name = OVS_DATAPATH_MCGROUP
1182 };
1183
1184 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1185                                 u32 pid, u32 seq, u32 flags, u8 cmd)
1186 {
1187         struct ovs_header *ovs_header;
1188         struct nlattr *nla;
1189         int err;
1190
1191         ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
1192                                    flags, cmd);
1193         if (!ovs_header)
1194                 goto error;
1195
1196         ovs_header->dp_ifindex = get_dpifindex(dp);
1197
1198         rcu_read_lock();
1199         err = nla_put_string(skb, OVS_DP_ATTR_NAME, dp_name(dp));
1200         rcu_read_unlock();
1201         if (err)
1202                 goto nla_put_failure;
1203
1204         nla = nla_reserve(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats));
1205         if (!nla)
1206                 goto nla_put_failure;
1207         get_dp_stats(dp, nla_data(nla));
1208
1209         NLA_PUT_U32(skb, OVS_DP_ATTR_IPV4_FRAGS,
1210                     dp->drop_frags ? OVS_DP_FRAG_DROP : OVS_DP_FRAG_ZERO);
1211
1212         return genlmsg_end(skb, ovs_header);
1213
1214 nla_put_failure:
1215         genlmsg_cancel(skb, ovs_header);
1216 error:
1217         return -EMSGSIZE;
1218 }
1219
1220 static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
1221                                              u32 seq, u8 cmd)
1222 {
1223         struct sk_buff *skb;
1224         int retval;
1225
1226         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1227         if (!skb)
1228                 return ERR_PTR(-ENOMEM);
1229
1230         retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
1231         if (retval < 0) {
1232                 kfree_skb(skb);
1233                 return ERR_PTR(retval);
1234         }
1235         return skb;
1236 }
1237
1238 static int ovs_dp_cmd_validate(struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1239 {
1240         if (a[OVS_DP_ATTR_IPV4_FRAGS]) {
1241                 u32 frags = nla_get_u32(a[OVS_DP_ATTR_IPV4_FRAGS]);
1242
1243                 if (frags != OVS_DP_FRAG_ZERO && frags != OVS_DP_FRAG_DROP)
1244                         return -EINVAL;
1245         }
1246
1247         return CHECK_NUL_STRING(a[OVS_DP_ATTR_NAME], IFNAMSIZ - 1);
1248 }
1249
1250 /* Called with genl_mutex and optionally with RTNL lock also. */
1251 static struct datapath *lookup_datapath(struct ovs_header *ovs_header, struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1252 {
1253         struct datapath *dp;
1254
1255         if (!a[OVS_DP_ATTR_NAME])
1256                 dp = get_dp(ovs_header->dp_ifindex);
1257         else {
1258                 struct vport *vport;
1259
1260                 rcu_read_lock();
1261                 vport = vport_locate(nla_data(a[OVS_DP_ATTR_NAME]));
1262                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1263                 rcu_read_unlock();
1264         }
1265         return dp ? dp : ERR_PTR(-ENODEV);
1266 }
1267
1268 /* Called with genl_mutex. */
1269 static void change_datapath(struct datapath *dp, struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1270 {
1271         if (a[OVS_DP_ATTR_IPV4_FRAGS])
1272                 dp->drop_frags = nla_get_u32(a[OVS_DP_ATTR_IPV4_FRAGS]) == OVS_DP_FRAG_DROP;
1273 }
1274
1275 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1276 {
1277         struct nlattr **a = info->attrs;
1278         struct vport_parms parms;
1279         struct sk_buff *reply;
1280         struct datapath *dp;
1281         struct vport *vport;
1282         int err;
1283
1284         err = -EINVAL;
1285         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1286                 goto err;
1287
1288         err = ovs_dp_cmd_validate(a);
1289         if (err)
1290                 goto err;
1291
1292         rtnl_lock();
1293         err = -ENODEV;
1294         if (!try_module_get(THIS_MODULE))
1295                 goto err_unlock_rtnl;
1296
1297         err = -ENOMEM;
1298         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1299         if (dp == NULL)
1300                 goto err_put_module;
1301         INIT_LIST_HEAD(&dp->port_list);
1302
1303         /* Initialize kobject for bridge.  This will be added as
1304          * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
1305         dp->ifobj.kset = NULL;
1306         kobject_init(&dp->ifobj, &dp_ktype);
1307
1308         /* Allocate table. */
1309         err = -ENOMEM;
1310         rcu_assign_pointer(dp->table, flow_tbl_alloc(TBL_MIN_BUCKETS));
1311         if (!dp->table)
1312                 goto err_free_dp;
1313
1314         dp->drop_frags = 0;
1315         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1316         if (!dp->stats_percpu) {
1317                 err = -ENOMEM;
1318                 goto err_destroy_table;
1319         }
1320
1321         change_datapath(dp, a);
1322
1323         /* Set up our datapath device. */
1324         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1325         parms.type = OVS_VPORT_TYPE_INTERNAL;
1326         parms.options = NULL;
1327         parms.dp = dp;
1328         parms.port_no = OVSP_LOCAL;
1329         parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
1330
1331         vport = new_vport(&parms);
1332         if (IS_ERR(vport)) {
1333                 err = PTR_ERR(vport);
1334                 if (err == -EBUSY)
1335                         err = -EEXIST;
1336
1337                 goto err_destroy_percpu;
1338         }
1339
1340         reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
1341         err = PTR_ERR(reply);
1342         if (IS_ERR(reply))
1343                 goto err_destroy_local_port;
1344
1345         list_add_tail(&dp->list_node, &dps);
1346         dp_sysfs_add_dp(dp);
1347
1348         rtnl_unlock();
1349
1350         genl_notify(reply, genl_info_net(info), info->snd_pid,
1351                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1352         return 0;
1353
1354 err_destroy_local_port:
1355         dp_detach_port(get_vport_protected(dp, OVSP_LOCAL));
1356 err_destroy_percpu:
1357         free_percpu(dp->stats_percpu);
1358 err_destroy_table:
1359         flow_tbl_destroy(get_table_protected(dp));
1360 err_free_dp:
1361         kfree(dp);
1362 err_put_module:
1363         module_put(THIS_MODULE);
1364 err_unlock_rtnl:
1365         rtnl_unlock();
1366 err:
1367         return err;
1368 }
1369
1370 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1371 {
1372         struct vport *vport, *next_vport;
1373         struct sk_buff *reply;
1374         struct datapath *dp;
1375         int err;
1376
1377         err = ovs_dp_cmd_validate(info->attrs);
1378         if (err)
1379                 goto exit;
1380
1381         rtnl_lock();
1382         dp = lookup_datapath(info->userhdr, info->attrs);
1383         err = PTR_ERR(dp);
1384         if (IS_ERR(dp))
1385                 goto exit_unlock;
1386
1387         reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_DEL);
1388         err = PTR_ERR(reply);
1389         if (IS_ERR(reply))
1390                 goto exit_unlock;
1391
1392         list_for_each_entry_safe (vport, next_vport, &dp->port_list, node)
1393                 if (vport->port_no != OVSP_LOCAL)
1394                         dp_detach_port(vport);
1395
1396         dp_sysfs_del_dp(dp);
1397         list_del(&dp->list_node);
1398         dp_detach_port(get_vport_protected(dp, OVSP_LOCAL));
1399
1400         /* rtnl_unlock() will wait until all the references to devices that
1401          * are pending unregistration have been dropped.  We do it here to
1402          * ensure that any internal devices (which contain DP pointers) are
1403          * fully destroyed before freeing the datapath.
1404          */
1405         rtnl_unlock();
1406
1407         call_rcu(&dp->rcu, destroy_dp_rcu);
1408         module_put(THIS_MODULE);
1409
1410         genl_notify(reply, genl_info_net(info), info->snd_pid,
1411                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1412
1413         return 0;
1414
1415 exit_unlock:
1416         rtnl_unlock();
1417 exit:
1418         return err;
1419 }
1420
1421 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1422 {
1423         struct sk_buff *reply;
1424         struct datapath *dp;
1425         int err;
1426
1427         err = ovs_dp_cmd_validate(info->attrs);
1428         if (err)
1429                 return err;
1430
1431         dp = lookup_datapath(info->userhdr, info->attrs);
1432         if (IS_ERR(dp))
1433                 return PTR_ERR(dp);
1434
1435         change_datapath(dp, info->attrs);
1436
1437         reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
1438         if (IS_ERR(reply)) {
1439                 err = PTR_ERR(reply);
1440                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1441                                 dp_datapath_multicast_group.id, err);
1442                 return 0;
1443         }
1444
1445         genl_notify(reply, genl_info_net(info), info->snd_pid,
1446                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1447         return 0;
1448 }
1449
1450 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1451 {
1452         struct sk_buff *reply;
1453         struct datapath *dp;
1454         int err;
1455
1456         err = ovs_dp_cmd_validate(info->attrs);
1457         if (err)
1458                 return err;
1459
1460         dp = lookup_datapath(info->userhdr, info->attrs);
1461         if (IS_ERR(dp))
1462                 return PTR_ERR(dp);
1463
1464         reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
1465         if (IS_ERR(reply))
1466                 return PTR_ERR(reply);
1467
1468         return genlmsg_reply(reply, info);
1469 }
1470
1471 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1472 {
1473         struct datapath *dp;
1474         int skip = cb->args[0];
1475         int i = 0;
1476
1477         list_for_each_entry (dp, &dps, list_node) {
1478                 if (i < skip)
1479                         continue;
1480                 if (ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
1481                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1482                                          OVS_DP_CMD_NEW) < 0)
1483                         break;
1484                 i++;
1485         }
1486
1487         cb->args[0] = i;
1488
1489         return skb->len;
1490 }
1491
1492 static struct genl_ops dp_datapath_genl_ops[] = {
1493         { .cmd = OVS_DP_CMD_NEW,
1494           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1495           .policy = datapath_policy,
1496           .doit = ovs_dp_cmd_new
1497         },
1498         { .cmd = OVS_DP_CMD_DEL,
1499           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1500           .policy = datapath_policy,
1501           .doit = ovs_dp_cmd_del
1502         },
1503         { .cmd = OVS_DP_CMD_GET,
1504           .flags = 0,               /* OK for unprivileged users. */
1505           .policy = datapath_policy,
1506           .doit = ovs_dp_cmd_get,
1507           .dumpit = ovs_dp_cmd_dump
1508         },
1509         { .cmd = OVS_DP_CMD_SET,
1510           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1511           .policy = datapath_policy,
1512           .doit = ovs_dp_cmd_set,
1513         },
1514 };
1515
1516 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1517 #ifdef HAVE_NLA_NUL_STRING
1518         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1519         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1520         [OVS_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
1521 #else
1522         [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct ovs_vport_stats) },
1523         [OVS_VPORT_ATTR_ADDRESS] = { .minlen = ETH_ALEN },
1524 #endif
1525         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1526         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1527         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1528         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1529 };
1530
1531 static struct genl_family dp_vport_genl_family = {
1532         .id = GENL_ID_GENERATE,
1533         .hdrsize = sizeof(struct ovs_header),
1534         .name = OVS_VPORT_FAMILY,
1535         .version = 1,
1536         .maxattr = OVS_VPORT_ATTR_MAX
1537 };
1538
1539 struct genl_multicast_group dp_vport_multicast_group = {
1540         .name = OVS_VPORT_MCGROUP
1541 };
1542
1543 /* Called with RTNL lock or RCU read lock. */
1544 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1545                                    u32 pid, u32 seq, u32 flags, u8 cmd)
1546 {
1547         struct ovs_header *ovs_header;
1548         struct nlattr *nla;
1549         int err;
1550
1551         ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
1552                                  flags, cmd);
1553         if (!ovs_header)
1554                 return -EMSGSIZE;
1555
1556         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1557
1558         NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
1559         NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport_get_type(vport));
1560         NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport_get_name(vport));
1561         NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
1562
1563         nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats));
1564         if (!nla)
1565                 goto nla_put_failure;
1566
1567         vport_get_stats(vport, nla_data(nla));
1568
1569         NLA_PUT(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
1570
1571         err = vport_get_options(vport, skb);
1572         if (err == -EMSGSIZE)
1573                 goto error;
1574
1575         return genlmsg_end(skb, ovs_header);
1576
1577 nla_put_failure:
1578         err = -EMSGSIZE;
1579 error:
1580         genlmsg_cancel(skb, ovs_header);
1581         return err;
1582 }
1583
1584 /* Called with RTNL lock or RCU read lock. */
1585 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
1586                                          u32 seq, u8 cmd)
1587 {
1588         struct sk_buff *skb;
1589         int retval;
1590
1591         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1592         if (!skb)
1593                 return ERR_PTR(-ENOMEM);
1594
1595         retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
1596         if (retval < 0) {
1597                 kfree_skb(skb);
1598                 return ERR_PTR(retval);
1599         }
1600         return skb;
1601 }
1602
1603 static int ovs_vport_cmd_validate(struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1604 {
1605         return CHECK_NUL_STRING(a[OVS_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1606 }
1607
1608 /* Called with RTNL lock or RCU read lock. */
1609 static struct vport *lookup_vport(struct ovs_header *ovs_header,
1610                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1611 {
1612         struct datapath *dp;
1613         struct vport *vport;
1614
1615         if (a[OVS_VPORT_ATTR_NAME]) {
1616                 vport = vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
1617                 if (!vport)
1618                         return ERR_PTR(-ENODEV);
1619                 return vport;
1620         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1621                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1622
1623                 if (port_no >= DP_MAX_PORTS)
1624                         return ERR_PTR(-EFBIG);
1625
1626                 dp = get_dp(ovs_header->dp_ifindex);
1627                 if (!dp)
1628                         return ERR_PTR(-ENODEV);
1629
1630                 vport = get_vport_protected(dp, port_no);
1631                 if (!vport)
1632                         return ERR_PTR(-ENOENT);
1633                 return vport;
1634         } else
1635                 return ERR_PTR(-EINVAL);
1636 }
1637
1638 /* Called with RTNL lock. */
1639 static int change_vport(struct vport *vport, struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1640 {
1641         int err = 0;
1642
1643         if (a[OVS_VPORT_ATTR_STATS])
1644                 vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1645
1646         if (a[OVS_VPORT_ATTR_ADDRESS])
1647                 err = vport_set_addr(vport, nla_data(a[OVS_VPORT_ATTR_ADDRESS]));
1648
1649         return err;
1650 }
1651
1652 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1653 {
1654         struct nlattr **a = info->attrs;
1655         struct ovs_header *ovs_header = info->userhdr;
1656         struct vport_parms parms;
1657         struct sk_buff *reply;
1658         struct vport *vport;
1659         struct datapath *dp;
1660         u32 port_no;
1661         int err;
1662
1663         err = -EINVAL;
1664         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1665             !a[OVS_VPORT_ATTR_UPCALL_PID])
1666                 goto exit;
1667
1668         err = ovs_vport_cmd_validate(a);
1669         if (err)
1670                 goto exit;
1671
1672         rtnl_lock();
1673         dp = get_dp(ovs_header->dp_ifindex);
1674         err = -ENODEV;
1675         if (!dp)
1676                 goto exit_unlock;
1677
1678         if (a[OVS_VPORT_ATTR_PORT_NO]) {
1679                 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1680
1681                 err = -EFBIG;
1682                 if (port_no >= DP_MAX_PORTS)
1683                         goto exit_unlock;
1684
1685                 vport = get_vport_protected(dp, port_no);
1686                 err = -EBUSY;
1687                 if (vport)
1688                         goto exit_unlock;
1689         } else {
1690                 for (port_no = 1; ; port_no++) {
1691                         if (port_no >= DP_MAX_PORTS) {
1692                                 err = -EFBIG;
1693                                 goto exit_unlock;
1694                         }
1695                         vport = get_vport_protected(dp, port_no);
1696                         if (!vport)
1697                                 break;
1698                 }
1699         }
1700
1701         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1702         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1703         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1704         parms.dp = dp;
1705         parms.port_no = port_no;
1706         parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1707
1708         vport = new_vport(&parms);
1709         err = PTR_ERR(vport);
1710         if (IS_ERR(vport))
1711                 goto exit_unlock;
1712
1713         dp_sysfs_add_if(vport);
1714
1715         err = change_vport(vport, a);
1716         if (!err) {
1717                 reply = ovs_vport_cmd_build_info(vport, info->snd_pid,
1718                                                  info->snd_seq, OVS_VPORT_CMD_NEW);
1719                 if (IS_ERR(reply))
1720                         err = PTR_ERR(reply);
1721         }
1722         if (err) {
1723                 dp_detach_port(vport);
1724                 goto exit_unlock;
1725         }
1726         genl_notify(reply, genl_info_net(info), info->snd_pid,
1727                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1728
1729
1730 exit_unlock:
1731         rtnl_unlock();
1732 exit:
1733         return err;
1734 }
1735
1736 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1737 {
1738         struct nlattr **a = info->attrs;
1739         struct sk_buff *reply;
1740         struct vport *vport;
1741         int err;
1742
1743         err = ovs_vport_cmd_validate(a);
1744         if (err)
1745                 goto exit;
1746
1747         rtnl_lock();
1748         vport = lookup_vport(info->userhdr, a);
1749         err = PTR_ERR(vport);
1750         if (IS_ERR(vport))
1751                 goto exit_unlock;
1752
1753         err = 0;
1754         if (a[OVS_VPORT_ATTR_OPTIONS])
1755                 err = vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1756         if (!err)
1757                 err = change_vport(vport, a);
1758         if (!err && a[OVS_VPORT_ATTR_UPCALL_PID])
1759                 vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1760
1761         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1762                                          OVS_VPORT_CMD_NEW);
1763         if (IS_ERR(reply)) {
1764                 err = PTR_ERR(reply);
1765                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1766                                 dp_vport_multicast_group.id, err);
1767                 return 0;
1768         }
1769
1770         genl_notify(reply, genl_info_net(info), info->snd_pid,
1771                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1772
1773 exit_unlock:
1774         rtnl_unlock();
1775 exit:
1776         return err;
1777 }
1778
1779 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1780 {
1781         struct nlattr **a = info->attrs;
1782         struct sk_buff *reply;
1783         struct vport *vport;
1784         int err;
1785
1786         err = ovs_vport_cmd_validate(a);
1787         if (err)
1788                 goto exit;
1789
1790         rtnl_lock();
1791         vport = lookup_vport(info->userhdr, a);
1792         err = PTR_ERR(vport);
1793         if (IS_ERR(vport))
1794                 goto exit_unlock;
1795
1796         if (vport->port_no == OVSP_LOCAL) {
1797                 err = -EINVAL;
1798                 goto exit_unlock;
1799         }
1800
1801         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1802                                          OVS_VPORT_CMD_DEL);
1803         err = PTR_ERR(reply);
1804         if (IS_ERR(reply))
1805                 goto exit_unlock;
1806
1807         dp_detach_port(vport);
1808
1809         genl_notify(reply, genl_info_net(info), info->snd_pid,
1810                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1811
1812 exit_unlock:
1813         rtnl_unlock();
1814 exit:
1815         return err;
1816 }
1817
1818 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1819 {
1820         struct nlattr **a = info->attrs;
1821         struct ovs_header *ovs_header = info->userhdr;
1822         struct sk_buff *reply;
1823         struct vport *vport;
1824         int err;
1825
1826         err = ovs_vport_cmd_validate(a);
1827         if (err)
1828                 goto exit;
1829
1830         rcu_read_lock();
1831         vport = lookup_vport(ovs_header, a);
1832         err = PTR_ERR(vport);
1833         if (IS_ERR(vport))
1834                 goto exit_unlock;
1835
1836         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1837                                          OVS_VPORT_CMD_NEW);
1838         err = PTR_ERR(reply);
1839         if (IS_ERR(reply))
1840                 goto exit_unlock;
1841
1842         rcu_read_unlock();
1843
1844         return genlmsg_reply(reply, info);
1845
1846 exit_unlock:
1847         rcu_read_unlock();
1848 exit:
1849         return err;
1850 }
1851
1852 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1853 {
1854         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1855         struct datapath *dp;
1856         u32 port_no;
1857         int retval;
1858
1859         dp = get_dp(ovs_header->dp_ifindex);
1860         if (!dp)
1861                 return -ENODEV;
1862
1863         rcu_read_lock();
1864         for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
1865                 struct vport *vport;
1866
1867                 vport = get_vport_protected(dp, port_no);
1868                 if (!vport)
1869                         continue;
1870
1871                 if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid,
1872                                             cb->nlh->nlmsg_seq, NLM_F_MULTI,
1873                                             OVS_VPORT_CMD_NEW) < 0)
1874                         break;
1875         }
1876         rcu_read_unlock();
1877
1878         cb->args[0] = port_no;
1879         retval = skb->len;
1880
1881         return retval;
1882 }
1883
1884 static struct genl_ops dp_vport_genl_ops[] = {
1885         { .cmd = OVS_VPORT_CMD_NEW,
1886           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1887           .policy = vport_policy,
1888           .doit = ovs_vport_cmd_new
1889         },
1890         { .cmd = OVS_VPORT_CMD_DEL,
1891           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1892           .policy = vport_policy,
1893           .doit = ovs_vport_cmd_del
1894         },
1895         { .cmd = OVS_VPORT_CMD_GET,
1896           .flags = 0,               /* OK for unprivileged users. */
1897           .policy = vport_policy,
1898           .doit = ovs_vport_cmd_get,
1899           .dumpit = ovs_vport_cmd_dump
1900         },
1901         { .cmd = OVS_VPORT_CMD_SET,
1902           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1903           .policy = vport_policy,
1904           .doit = ovs_vport_cmd_set,
1905         },
1906 };
1907
1908 struct genl_family_and_ops {
1909         struct genl_family *family;
1910         struct genl_ops *ops;
1911         int n_ops;
1912         struct genl_multicast_group *group;
1913 };
1914
1915 static const struct genl_family_and_ops dp_genl_families[] = {
1916         { &dp_datapath_genl_family,
1917           dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
1918           &dp_datapath_multicast_group },
1919         { &dp_vport_genl_family,
1920           dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
1921           &dp_vport_multicast_group },
1922         { &dp_flow_genl_family,
1923           dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
1924           &dp_flow_multicast_group },
1925         { &dp_packet_genl_family,
1926           dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
1927           NULL },
1928 };
1929
1930 static void dp_unregister_genl(int n_families)
1931 {
1932         int i;
1933
1934         for (i = 0; i < n_families; i++)
1935                 genl_unregister_family(dp_genl_families[i].family);
1936 }
1937
1938 static int dp_register_genl(void)
1939 {
1940         int n_registered;
1941         int err;
1942         int i;
1943
1944         n_registered = 0;
1945         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
1946                 const struct genl_family_and_ops *f = &dp_genl_families[i];
1947
1948                 err = genl_register_family_with_ops(f->family, f->ops,
1949                                                     f->n_ops);
1950                 if (err)
1951                         goto error;
1952                 n_registered++;
1953
1954                 if (f->group) {
1955                         err = genl_register_mc_group(f->family, f->group);
1956                         if (err)
1957                                 goto error;
1958                 }
1959         }
1960
1961         return 0;
1962
1963 error:
1964         dp_unregister_genl(n_registered);
1965         return err;
1966 }
1967
1968 static int __init dp_init(void)
1969 {
1970         struct sk_buff *dummy_skb;
1971         int err;
1972
1973         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
1974
1975         printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
1976
1977         err = tnl_init();
1978         if (err)
1979                 goto error;
1980
1981         err = flow_init();
1982         if (err)
1983                 goto error_tnl_exit;
1984
1985         err = vport_init();
1986         if (err)
1987                 goto error_flow_exit;
1988
1989         err = register_netdevice_notifier(&dp_device_notifier);
1990         if (err)
1991                 goto error_vport_exit;
1992
1993         err = dp_register_genl();
1994         if (err < 0)
1995                 goto error_unreg_notifier;
1996
1997         return 0;
1998
1999 error_unreg_notifier:
2000         unregister_netdevice_notifier(&dp_device_notifier);
2001 error_vport_exit:
2002         vport_exit();
2003 error_flow_exit:
2004         flow_exit();
2005 error_tnl_exit:
2006         tnl_exit();
2007 error:
2008         return err;
2009 }
2010
2011 static void dp_cleanup(void)
2012 {
2013         rcu_barrier();
2014         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2015         unregister_netdevice_notifier(&dp_device_notifier);
2016         vport_exit();
2017         flow_exit();
2018         tnl_exit();
2019 }
2020
2021 module_init(dp_init);
2022 module_exit(dp_cleanup);
2023
2024 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2025 MODULE_LICENSE("GPL");