datapath: Convert kernel priority actions into match/set.
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
3  * Distributed under the terms of the GNU GPL version 2.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 /* Functions for managing the dp interface/device. */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/if_arp.h>
16 #include <linux/if_vlan.h>
17 #include <linux/in.h>
18 #include <linux/ip.h>
19 #include <linux/jhash.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/genetlink.h>
24 #include <linux/kernel.h>
25 #include <linux/kthread.h>
26 #include <linux/mutex.h>
27 #include <linux/percpu.h>
28 #include <linux/rcupdate.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/version.h>
32 #include <linux/ethtool.h>
33 #include <linux/wait.h>
34 #include <asm/system.h>
35 #include <asm/div64.h>
36 #include <asm/bug.h>
37 #include <linux/highmem.h>
38 #include <linux/netfilter_bridge.h>
39 #include <linux/netfilter_ipv4.h>
40 #include <linux/inetdevice.h>
41 #include <linux/list.h>
42 #include <linux/openvswitch.h>
43 #include <linux/rculist.h>
44 #include <linux/dmi.h>
45 #include <net/inet_ecn.h>
46 #include <net/genetlink.h>
47
48 #include "checksum.h"
49 #include "datapath.h"
50 #include "actions.h"
51 #include "flow.h"
52 #include "vlan.h"
53 #include "tunnel.h"
54 #include "vport-internal_dev.h"
55
56 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
57     LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)
58 #error Kernels before 2.6.18 or after 3.1 are not supported by this version of Open vSwitch.
59 #endif
60
61 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
62 EXPORT_SYMBOL(dp_ioctl_hook);
63
64 /**
65  * DOC: Locking:
66  *
67  * Writes to device state (add/remove datapath, port, set operations on vports,
68  * etc.) are protected by RTNL.
69  *
70  * Writes to other state (flow table modifications, set miscellaneous datapath
71  * parameters, etc.) are protected by genl_mutex.  The RTNL lock nests inside
72  * genl_mutex.
73  *
74  * Reads are protected by RCU.
75  *
76  * There are a few special cases (mostly stats) that have their own
77  * synchronization but they nest under all of above and don't interact with
78  * each other.
79  */
80
81 /* Global list of datapaths to enable dumping them all out.
82  * Protected by genl_mutex.
83  */
84 static LIST_HEAD(dps);
85
86 static struct vport *new_vport(const struct vport_parms *);
87 static int queue_gso_packets(int dp_ifindex, struct sk_buff *,
88                              const struct dp_upcall_info *);
89 static int queue_userspace_packet(int dp_ifindex, struct sk_buff *,
90                                   const struct dp_upcall_info *);
91
92 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
93 struct datapath *get_dp(int dp_ifindex)
94 {
95         struct datapath *dp = NULL;
96         struct net_device *dev;
97
98         rcu_read_lock();
99         dev = dev_get_by_index_rcu(&init_net, dp_ifindex);
100         if (dev) {
101                 struct vport *vport = internal_dev_get_vport(dev);
102                 if (vport)
103                         dp = vport->dp;
104         }
105         rcu_read_unlock();
106
107         return dp;
108 }
109 EXPORT_SYMBOL_GPL(get_dp);
110
111 /* Must be called with genl_mutex. */
112 static struct flow_table *get_table_protected(struct datapath *dp)
113 {
114         return rcu_dereference_protected(dp->table, lockdep_genl_is_held());
115 }
116
117 /* Must be called with rcu_read_lock or RTNL lock. */
118 static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
119 {
120         return rcu_dereference_rtnl(dp->ports[port_no]);
121 }
122
123 /* Must be called with rcu_read_lock or RTNL lock. */
124 const char *dp_name(const struct datapath *dp)
125 {
126         return vport_get_name(rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]));
127 }
128
129 static int get_dpifindex(struct datapath *dp)
130 {
131         struct vport *local;
132         int ifindex;
133
134         rcu_read_lock();
135
136         local = get_vport_protected(dp, OVSP_LOCAL);
137         if (local)
138                 ifindex = vport_get_ifindex(local);
139         else
140                 ifindex = 0;
141
142         rcu_read_unlock();
143
144         return ifindex;
145 }
146
147 static inline size_t br_nlmsg_size(void)
148 {
149         return NLMSG_ALIGN(sizeof(struct ifinfomsg))
150                + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
151                + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
152                + nla_total_size(4) /* IFLA_MASTER */
153                + nla_total_size(4) /* IFLA_MTU */
154                + nla_total_size(1); /* IFLA_OPERSTATE */
155 }
156
157 /* Caller must hold RTNL lock. */
158 static int dp_fill_ifinfo(struct sk_buff *skb,
159                           const struct vport *port,
160                           int event, unsigned int flags)
161 {
162         struct datapath *dp = port->dp;
163         int ifindex = vport_get_ifindex(port);
164         struct ifinfomsg *hdr;
165         struct nlmsghdr *nlh;
166
167         if (ifindex < 0)
168                 return ifindex;
169
170         nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
171         if (nlh == NULL)
172                 return -EMSGSIZE;
173
174         hdr = nlmsg_data(nlh);
175         hdr->ifi_family = AF_BRIDGE;
176         hdr->__ifi_pad = 0;
177         hdr->ifi_type = ARPHRD_ETHER;
178         hdr->ifi_index = ifindex;
179         hdr->ifi_flags = vport_get_flags(port);
180         hdr->ifi_change = 0;
181
182         NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
183         NLA_PUT_U32(skb, IFLA_MASTER, get_dpifindex(dp));
184         NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
185 #ifdef IFLA_OPERSTATE
186         NLA_PUT_U8(skb, IFLA_OPERSTATE,
187                    vport_is_running(port)
188                         ? vport_get_operstate(port)
189                         : IF_OPER_DOWN);
190 #endif
191
192         NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
193
194         return nlmsg_end(skb, nlh);
195
196 nla_put_failure:
197         nlmsg_cancel(skb, nlh);
198         return -EMSGSIZE;
199 }
200
201 /* Caller must hold RTNL lock. */
202 static void dp_ifinfo_notify(int event, struct vport *port)
203 {
204         struct sk_buff *skb;
205         int err = -ENOBUFS;
206
207         skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
208         if (skb == NULL)
209                 goto errout;
210
211         err = dp_fill_ifinfo(skb, port, event, 0);
212         if (err < 0) {
213                 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
214                 WARN_ON(err == -EMSGSIZE);
215                 kfree_skb(skb);
216                 goto errout;
217         }
218         rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
219         return;
220 errout:
221         if (err < 0)
222                 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
223 }
224
225 static void release_dp(struct kobject *kobj)
226 {
227         struct datapath *dp = container_of(kobj, struct datapath, ifobj);
228         kfree(dp);
229 }
230
231 static struct kobj_type dp_ktype = {
232         .release = release_dp
233 };
234
235 static void destroy_dp_rcu(struct rcu_head *rcu)
236 {
237         struct datapath *dp = container_of(rcu, struct datapath, rcu);
238
239         flow_tbl_destroy(dp->table);
240         free_percpu(dp->stats_percpu);
241         kobject_put(&dp->ifobj);
242 }
243
244 /* Called with RTNL lock and genl_lock. */
245 static struct vport *new_vport(const struct vport_parms *parms)
246 {
247         struct vport *vport;
248
249         vport = vport_add(parms);
250         if (!IS_ERR(vport)) {
251                 struct datapath *dp = parms->dp;
252
253                 rcu_assign_pointer(dp->ports[parms->port_no], vport);
254                 list_add(&vport->node, &dp->port_list);
255
256                 dp_ifinfo_notify(RTM_NEWLINK, vport);
257         }
258
259         return vport;
260 }
261
262 /* Called with RTNL lock. */
263 void dp_detach_port(struct vport *p)
264 {
265         ASSERT_RTNL();
266
267         if (p->port_no != OVSP_LOCAL)
268                 dp_sysfs_del_if(p);
269         dp_ifinfo_notify(RTM_DELLINK, p);
270
271         /* First drop references to device. */
272         list_del(&p->node);
273         rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
274
275         /* Then destroy it. */
276         vport_del(p);
277 }
278
279 /* Must be called with rcu_read_lock. */
280 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
281 {
282         struct datapath *dp = p->dp;
283         struct sw_flow *flow;
284         struct dp_stats_percpu *stats;
285         u64 *stats_counter;
286         int error;
287
288         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
289         OVS_CB(skb)->vport = p;
290
291         if (!OVS_CB(skb)->flow) {
292                 struct sw_flow_key key;
293                 int key_len;
294
295                 /* Extract flow from 'skb' into 'key'. */
296                 error = flow_extract(skb, p->port_no, &key, &key_len);
297                 if (unlikely(error)) {
298                         kfree_skb(skb);
299                         return;
300                 }
301
302                 /* Look up flow. */
303                 flow = flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
304                 if (unlikely(!flow)) {
305                         struct dp_upcall_info upcall;
306
307                         upcall.cmd = OVS_PACKET_CMD_MISS;
308                         upcall.key = &key;
309                         upcall.userdata = NULL;
310                         upcall.pid = p->upcall_pid;
311                         dp_upcall(dp, skb, &upcall);
312                         consume_skb(skb);
313                         stats_counter = &stats->n_missed;
314                         goto out;
315                 }
316
317                 OVS_CB(skb)->flow = flow;
318         }
319
320         stats_counter = &stats->n_hit;
321         flow_used(OVS_CB(skb)->flow, skb);
322         execute_actions(dp, skb);
323
324 out:
325         /* Update datapath statistics. */
326
327         write_seqcount_begin(&stats->seqlock);
328         (*stats_counter)++;
329         write_seqcount_end(&stats->seqlock);
330 }
331
332 static void copy_and_csum_skb(struct sk_buff *skb, void *to)
333 {
334         u16 csum_start, csum_offset;
335         __wsum csum;
336
337         get_skb_csum_pointers(skb, &csum_start, &csum_offset);
338         csum_start -= skb_headroom(skb);
339
340         skb_copy_bits(skb, 0, to, csum_start);
341
342         csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start,
343                                       skb->len - csum_start, 0);
344         *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
345 }
346
347 static struct genl_family dp_packet_genl_family = {
348         .id = GENL_ID_GENERATE,
349         .hdrsize = sizeof(struct ovs_header),
350         .name = OVS_PACKET_FAMILY,
351         .version = OVS_PACKET_VERSION,
352         .maxattr = OVS_PACKET_ATTR_MAX
353 };
354
355 int dp_upcall(struct datapath *dp, struct sk_buff *skb,
356               const struct dp_upcall_info *upcall_info)
357 {
358         struct dp_stats_percpu *stats;
359         int dp_ifindex;
360         int err;
361
362         if (upcall_info->pid == 0) {
363                 err = -ENOTCONN;
364                 goto err;
365         }
366
367         dp_ifindex = get_dpifindex(dp);
368         if (!dp_ifindex) {
369                 err = -ENODEV;
370                 goto err;
371         }
372
373         forward_ip_summed(skb, true);
374
375         if (!skb_is_gso(skb))
376                 err = queue_userspace_packet(dp_ifindex, skb, upcall_info);
377         else
378                 err = queue_gso_packets(dp_ifindex, skb, upcall_info);
379         if (err)
380                 goto err;
381
382         return 0;
383
384 err:
385         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
386
387         write_seqcount_begin(&stats->seqlock);
388         stats->n_lost++;
389         write_seqcount_end(&stats->seqlock);
390
391         return err;
392 }
393
394 static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
395                              const struct dp_upcall_info *upcall_info)
396 {
397         struct dp_upcall_info later_info;
398         struct sw_flow_key later_key;
399         struct sk_buff *segs, *nskb;
400         int err;
401
402         segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
403         if (IS_ERR(skb))
404                 return PTR_ERR(skb);
405
406         /* Queue all of the segments. */
407         skb = segs;
408         do {
409                 err = queue_userspace_packet(dp_ifindex, skb, upcall_info);
410                 if (err)
411                         break;
412
413                 if (skb == segs && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
414                         /* The initial flow key extracted by flow_extract() in
415                          * this case is for a first fragment, so we need to
416                          * properly mark later fragments.
417                          */
418                         later_key = *upcall_info->key;
419                         later_key.ip.tos_frag &= ~OVS_FRAG_TYPE_MASK;
420                         later_key.ip.tos_frag |= OVS_FRAG_TYPE_LATER;
421
422                         later_info = *upcall_info;
423                         later_info.key = &later_key;
424                         upcall_info = &later_info;
425                 }
426         } while ((skb = skb->next));
427
428         /* Free all of the segments. */
429         skb = segs;
430         do {
431                 nskb = skb->next;
432                 if (err)
433                         kfree_skb(skb);
434                 else
435                         consume_skb(skb);
436         } while ((skb = nskb));
437         return err;
438 }
439
440 static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
441                                   const struct dp_upcall_info *upcall_info)
442 {
443         struct ovs_header *upcall;
444         struct sk_buff *user_skb; /* to be queued to userspace */
445         struct nlattr *nla;
446         unsigned int len;
447         int err;
448
449         err = vlan_deaccel_tag(skb);
450         if (unlikely(err))
451                 return err;
452
453         if (nla_attr_size(skb->len) > USHRT_MAX)
454                 return -EFBIG;
455
456         len = sizeof(struct ovs_header);
457         len += nla_total_size(skb->len);
458         len += nla_total_size(FLOW_BUFSIZE);
459         if (upcall_info->cmd == OVS_PACKET_CMD_ACTION)
460                 len += nla_total_size(8);
461
462         user_skb = genlmsg_new(len, GFP_ATOMIC);
463         if (!user_skb)
464                 return -ENOMEM;
465
466         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
467                              0, upcall_info->cmd);
468         upcall->dp_ifindex = dp_ifindex;
469
470         nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
471         flow_to_nlattrs(upcall_info->key, user_skb);
472         nla_nest_end(user_skb, nla);
473
474         if (upcall_info->userdata)
475                 nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA,
476                             nla_get_u64(upcall_info->userdata));
477
478         nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
479         if (skb->ip_summed == CHECKSUM_PARTIAL)
480                 copy_and_csum_skb(skb, nla_data(nla));
481         else
482                 skb_copy_bits(skb, 0, nla_data(nla), skb->len);
483
484         return genlmsg_unicast(&init_net, user_skb, upcall_info->pid);
485 }
486
487 /* Called with genl_mutex. */
488 static int flush_flows(int dp_ifindex)
489 {
490         struct flow_table *old_table;
491         struct flow_table *new_table;
492         struct datapath *dp;
493
494         dp = get_dp(dp_ifindex);
495         if (!dp)
496                 return -ENODEV;
497
498         old_table = get_table_protected(dp);
499         new_table = flow_tbl_alloc(TBL_MIN_BUCKETS);
500         if (!new_table)
501                 return -ENOMEM;
502
503         rcu_assign_pointer(dp->table, new_table);
504
505         flow_tbl_deferred_destroy(old_table);
506         return 0;
507 }
508
509 static int validate_actions(const struct nlattr *attr,
510                                 const struct sw_flow_key *key, int depth);
511
512 static int validate_sample(const struct nlattr *attr,
513                                 const struct sw_flow_key *key, int depth)
514 {
515         const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
516         const struct nlattr *probability, *actions;
517         const struct nlattr *a;
518         int rem;
519
520         memset(attrs, 0, sizeof(attrs));
521         nla_for_each_nested (a, attr, rem) {
522                 int type = nla_type(a);
523                 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
524                         return -EINVAL;
525                 attrs[type] = a;
526         }
527         if (rem)
528                 return -EINVAL;
529
530         probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
531         if (!probability || nla_len(probability) != sizeof(u32))
532                 return -EINVAL;
533
534         actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
535         if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
536                 return -EINVAL;
537         return validate_actions(actions, key, depth + 1);
538 }
539
540 static int validate_action_key(const struct nlattr *a,
541                                 const struct sw_flow_key *flow_key)
542 {
543         int act_type = nla_type(a);
544         const struct nlattr *ovs_key = nla_data(a);
545         int key_type = nla_type(ovs_key);
546
547         /* There can be only one key in a action */
548         if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
549                 return -EINVAL;
550
551         if (key_type > OVS_KEY_ATTR_MAX ||
552             nla_len(ovs_key) != ovs_key_lens[key_type])
553                 return -EINVAL;
554
555 #define ACTION(act, key)        (((act) << 8) | (key))
556
557         switch(ACTION(act_type, key_type)) {
558         const struct ovs_key_ipv4 *ipv4_key;
559         const struct ovs_key_8021q *q_key;
560
561         case ACTION(OVS_ACTION_ATTR_SET, OVS_KEY_ATTR_PRIORITY):
562         case ACTION(OVS_ACTION_ATTR_SET, OVS_KEY_ATTR_TUN_ID):
563         case ACTION(OVS_ACTION_ATTR_SET, OVS_KEY_ATTR_ETHERNET):
564                 break;
565
566         case ACTION(OVS_ACTION_ATTR_PUSH, OVS_KEY_ATTR_8021Q):
567                 q_key = nla_data(ovs_key);
568                 if (q_key->q_tpid != htons(ETH_P_8021Q))
569                         return -EINVAL;
570
571                 if (q_key->q_tci & htons(VLAN_TAG_PRESENT))
572                         return -EINVAL;
573                 break;
574
575         case ACTION(OVS_ACTION_ATTR_SET, OVS_KEY_ATTR_IPV4):
576                 if (flow_key->eth.type != htons(ETH_P_IP))
577                         return -EINVAL;
578
579                 if (!flow_key->ipv4.addr.src || !flow_key->ipv4.addr.dst)
580                         return -EINVAL;
581
582                 ipv4_key = nla_data(ovs_key);
583                 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
584                         return -EINVAL;
585
586                 if (ipv4_key->ipv4_tos & INET_ECN_MASK)
587                         return -EINVAL;
588
589                 if (ipv4_key->ipv4_frag !=
590                     (flow_key->ip.tos_frag & OVS_FRAG_TYPE_MASK))
591                         return -EINVAL;
592
593                 break;
594
595         case ACTION(OVS_ACTION_ATTR_SET, OVS_KEY_ATTR_TCP):
596                 if (flow_key->ip.proto != IPPROTO_TCP)
597                         return -EINVAL;
598
599                 if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
600                         return -EINVAL;
601
602                 break;
603
604         case ACTION(OVS_ACTION_ATTR_SET, OVS_KEY_ATTR_UDP):
605                 if (flow_key->ip.proto != IPPROTO_UDP)
606                         return -EINVAL;
607
608                 if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
609                         return -EINVAL;
610                 break;
611
612         default:
613                 return -EINVAL;
614         }
615 #undef ACTION
616         return 0;
617 }
618
619 static int validate_userspace(const struct nlattr *attr)
620 {
621         static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] =
622         {
623                 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
624                 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 },
625         };
626         struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
627         int error;
628
629         error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, attr, userspace_policy);
630         if (error)
631                 return error;
632
633         if (!a[OVS_USERSPACE_ATTR_PID] || !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
634                 return -EINVAL;
635
636         return 0;
637 }
638
639 static int validate_actions(const struct nlattr *attr,
640                                 const struct sw_flow_key *key,  int depth)
641 {
642         const struct nlattr *a;
643         int rem, err;
644
645         if (depth >= SAMPLE_ACTION_DEPTH)
646                 return -EOVERFLOW;
647
648         nla_for_each_nested(a, attr, rem) {
649                 /* Expected argument lengths, (u32)-1 for variable length. */
650                 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
651                         [OVS_ACTION_ATTR_OUTPUT] = 4,
652                         [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
653                         [OVS_ACTION_ATTR_PUSH] = (u32)-1,
654                         [OVS_ACTION_ATTR_POP] = 2,
655                         [OVS_ACTION_ATTR_SET] = (u32)-1,
656                         [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
657                 };
658                 int type = nla_type(a);
659
660                 if (type > OVS_ACTION_ATTR_MAX ||
661                     (action_lens[type] != nla_len(a) &&
662                      action_lens[type] != (u32)-1))
663                         return -EINVAL;
664
665                 switch (type) {
666                 case OVS_ACTION_ATTR_UNSPEC:
667                         return -EINVAL;
668
669                 case OVS_ACTION_ATTR_USERSPACE:
670                         err = validate_userspace(a);
671                         if (err)
672                                 return err;
673                         break;
674
675                 case OVS_ACTION_ATTR_OUTPUT:
676                         if (nla_get_u32(a) >= DP_MAX_PORTS)
677                                 return -EINVAL;
678                         break;
679
680
681                 case OVS_ACTION_ATTR_POP:
682                         if (nla_get_u16(a) != OVS_KEY_ATTR_8021Q)
683                                 return -EINVAL;
684                         break;
685
686                 case OVS_ACTION_ATTR_SET:
687                 case OVS_ACTION_ATTR_PUSH:
688                         err = validate_action_key(a, key);
689                         if (err)
690                                 return err;
691                         break;
692
693                 case OVS_ACTION_ATTR_SAMPLE:
694                         err = validate_sample(a, key, depth);
695                         if (err)
696                                 return err;
697                         break;
698
699                 default:
700                         return -EINVAL;
701                 }
702         }
703
704         if (rem > 0)
705                 return -EINVAL;
706
707         return 0;
708 }
709
710 static void clear_stats(struct sw_flow *flow)
711 {
712         flow->used = 0;
713         flow->tcp_flags = 0;
714         flow->packet_count = 0;
715         flow->byte_count = 0;
716 }
717
718 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
719 {
720         struct ovs_header *ovs_header = info->userhdr;
721         struct nlattr **a = info->attrs;
722         struct sw_flow_actions *acts;
723         struct sk_buff *packet;
724         struct sw_flow *flow;
725         struct datapath *dp;
726         struct ethhdr *eth;
727         int len;
728         int err;
729         int key_len;
730
731         err = -EINVAL;
732         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
733             !a[OVS_PACKET_ATTR_ACTIONS] ||
734             nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN)
735                 goto err;
736
737         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
738         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
739         err = -ENOMEM;
740         if (!packet)
741                 goto err;
742         skb_reserve(packet, NET_IP_ALIGN);
743
744         memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len);
745
746         skb_reset_mac_header(packet);
747         eth = eth_hdr(packet);
748
749         /* Normally, setting the skb 'protocol' field would be handled by a
750          * call to eth_type_trans(), but it assumes there's a sending
751          * device, which we may not have. */
752         if (ntohs(eth->h_proto) >= 1536)
753                 packet->protocol = eth->h_proto;
754         else
755                 packet->protocol = htons(ETH_P_802_2);
756
757         /* Build an sw_flow for sending this packet. */
758         flow = flow_alloc();
759         err = PTR_ERR(flow);
760         if (IS_ERR(flow))
761                 goto err_kfree_skb;
762
763         err = flow_extract(packet, -1, &flow->key, &key_len);
764         if (err)
765                 goto err_flow_put;
766
767         err = flow_metadata_from_nlattrs(&flow->key.phy.priority,
768                                          &flow->key.phy.in_port,
769                                          &flow->key.phy.tun_id,
770                                          a[OVS_PACKET_ATTR_KEY]);
771         if (err)
772                 goto err_flow_put;
773
774         err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0);
775         if (err)
776                 goto err_flow_put;
777
778         flow->hash = flow_hash(&flow->key, key_len);
779
780         acts = flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
781         err = PTR_ERR(acts);
782         if (IS_ERR(acts))
783                 goto err_flow_put;
784         rcu_assign_pointer(flow->sf_acts, acts);
785
786         OVS_CB(packet)->flow = flow;
787         packet->priority = flow->key.phy.priority;
788
789         rcu_read_lock();
790         dp = get_dp(ovs_header->dp_ifindex);
791         err = -ENODEV;
792         if (!dp)
793                 goto err_unlock;
794
795         if (flow->key.phy.in_port < DP_MAX_PORTS)
796                 OVS_CB(packet)->vport = get_vport_protected(dp,
797                                                         flow->key.phy.in_port);
798
799         local_bh_disable();
800         err = execute_actions(dp, packet);
801         local_bh_enable();
802         rcu_read_unlock();
803
804         flow_put(flow);
805         return err;
806
807 err_unlock:
808         rcu_read_unlock();
809 err_flow_put:
810         flow_put(flow);
811 err_kfree_skb:
812         kfree_skb(packet);
813 err:
814         return err;
815 }
816
817 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
818         [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
819         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
820         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
821 };
822
823 static struct genl_ops dp_packet_genl_ops[] = {
824         { .cmd = OVS_PACKET_CMD_EXECUTE,
825           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
826           .policy = packet_policy,
827           .doit = ovs_packet_cmd_execute
828         }
829 };
830
831 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
832 {
833         int i;
834         struct flow_table *table = get_table_protected(dp);
835
836         stats->n_flows = flow_tbl_count(table);
837
838         stats->n_hit = stats->n_missed = stats->n_lost = 0;
839         for_each_possible_cpu(i) {
840                 const struct dp_stats_percpu *percpu_stats;
841                 struct dp_stats_percpu local_stats;
842                 unsigned seqcount;
843
844                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
845
846                 do {
847                         seqcount = read_seqcount_begin(&percpu_stats->seqlock);
848                         local_stats = *percpu_stats;
849                 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
850
851                 stats->n_hit += local_stats.n_hit;
852                 stats->n_missed += local_stats.n_missed;
853                 stats->n_lost += local_stats.n_lost;
854         }
855 }
856
857 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
858         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
859         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
860         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
861 };
862
863 static struct genl_family dp_flow_genl_family = {
864         .id = GENL_ID_GENERATE,
865         .hdrsize = sizeof(struct ovs_header),
866         .name = OVS_FLOW_FAMILY,
867         .version = OVS_FLOW_VERSION,
868         .maxattr = OVS_FLOW_ATTR_MAX
869 };
870
871 static struct genl_multicast_group dp_flow_multicast_group = {
872         .name = OVS_FLOW_MCGROUP
873 };
874
875 /* Called with genl_lock. */
876 static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
877                                   struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd)
878 {
879         const int skb_orig_len = skb->len;
880         const struct sw_flow_actions *sf_acts;
881         struct ovs_flow_stats stats;
882         struct ovs_header *ovs_header;
883         struct nlattr *nla;
884         unsigned long used;
885         u8 tcp_flags;
886         int err;
887
888         sf_acts = rcu_dereference_protected(flow->sf_acts,
889                                             lockdep_genl_is_held());
890
891         ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
892         if (!ovs_header)
893                 return -EMSGSIZE;
894
895         ovs_header->dp_ifindex = get_dpifindex(dp);
896
897         nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
898         if (!nla)
899                 goto nla_put_failure;
900         err = flow_to_nlattrs(&flow->key, skb);
901         if (err)
902                 goto error;
903         nla_nest_end(skb, nla);
904
905         spin_lock_bh(&flow->lock);
906         used = flow->used;
907         stats.n_packets = flow->packet_count;
908         stats.n_bytes = flow->byte_count;
909         tcp_flags = flow->tcp_flags;
910         spin_unlock_bh(&flow->lock);
911
912         if (used)
913                 NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, flow_used_time(used));
914
915         if (stats.n_packets)
916                 NLA_PUT(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats);
917
918         if (tcp_flags)
919                 NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags);
920
921         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
922          * this is the first flow to be dumped into 'skb'.  This is unusual for
923          * Netlink but individual action lists can be longer than
924          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
925          * The userspace caller can always fetch the actions separately if it
926          * really wants them.  (Most userspace callers in fact don't care.)
927          *
928          * This can only fail for dump operations because the skb is always
929          * properly sized for single flows.
930          */
931         err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
932                       sf_acts->actions);
933         if (err < 0 && skb_orig_len)
934                 goto error;
935
936         return genlmsg_end(skb, ovs_header);
937
938 nla_put_failure:
939         err = -EMSGSIZE;
940 error:
941         genlmsg_cancel(skb, ovs_header);
942         return err;
943 }
944
945 static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
946 {
947         const struct sw_flow_actions *sf_acts;
948         int len;
949
950         sf_acts = rcu_dereference_protected(flow->sf_acts,
951                                             lockdep_genl_is_held());
952
953         len = nla_total_size(FLOW_BUFSIZE); /* OVS_FLOW_ATTR_KEY */
954         len += nla_total_size(sf_acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
955         len += nla_total_size(sizeof(struct ovs_flow_stats)); /* OVS_FLOW_ATTR_STATS */
956         len += nla_total_size(1); /* OVS_FLOW_ATTR_TCP_FLAGS */
957         len += nla_total_size(8); /* OVS_FLOW_ATTR_USED */
958         return genlmsg_new(NLMSG_ALIGN(sizeof(struct ovs_header)) + len, GFP_KERNEL);
959 }
960
961 static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, struct datapath *dp,
962                                                u32 pid, u32 seq, u8 cmd)
963 {
964         struct sk_buff *skb;
965         int retval;
966
967         skb = ovs_flow_cmd_alloc_info(flow);
968         if (!skb)
969                 return ERR_PTR(-ENOMEM);
970
971         retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
972         BUG_ON(retval < 0);
973         return skb;
974 }
975
976 static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
977 {
978         struct nlattr **a = info->attrs;
979         struct ovs_header *ovs_header = info->userhdr;
980         struct sw_flow_key key;
981         struct sw_flow *flow;
982         struct sk_buff *reply;
983         struct datapath *dp;
984         struct flow_table *table;
985         int error;
986         int key_len;
987
988         /* Extract key. */
989         error = -EINVAL;
990         if (!a[OVS_FLOW_ATTR_KEY])
991                 goto error;
992         error = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
993         if (error)
994                 goto error;
995
996         /* Validate actions. */
997         if (a[OVS_FLOW_ATTR_ACTIONS]) {
998                 error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS], &key,  0);
999                 if (error)
1000                         goto error;
1001         } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
1002                 error = -EINVAL;
1003                 goto error;
1004         }
1005
1006         dp = get_dp(ovs_header->dp_ifindex);
1007         error = -ENODEV;
1008         if (!dp)
1009                 goto error;
1010
1011         table = get_table_protected(dp);
1012         flow = flow_tbl_lookup(table, &key, key_len);
1013         if (!flow) {
1014                 struct sw_flow_actions *acts;
1015
1016                 /* Bail out if we're not allowed to create a new flow. */
1017                 error = -ENOENT;
1018                 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
1019                         goto error;
1020
1021                 /* Expand table, if necessary, to make room. */
1022                 if (flow_tbl_need_to_expand(table)) {
1023                         struct flow_table *new_table;
1024
1025                         new_table = flow_tbl_expand(table);
1026                         if (!IS_ERR(new_table)) {
1027                                 rcu_assign_pointer(dp->table, new_table);
1028                                 flow_tbl_deferred_destroy(table);
1029                                 table = get_table_protected(dp);
1030                         }
1031                 }
1032
1033                 /* Allocate flow. */
1034                 flow = flow_alloc();
1035                 if (IS_ERR(flow)) {
1036                         error = PTR_ERR(flow);
1037                         goto error;
1038                 }
1039                 flow->key = key;
1040                 clear_stats(flow);
1041
1042                 /* Obtain actions. */
1043                 acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
1044                 error = PTR_ERR(acts);
1045                 if (IS_ERR(acts))
1046                         goto error_free_flow;
1047                 rcu_assign_pointer(flow->sf_acts, acts);
1048
1049                 /* Put flow in bucket. */
1050                 flow->hash = flow_hash(&key, key_len);
1051                 flow_tbl_insert(table, flow);
1052
1053                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
1054                                                 info->snd_seq, OVS_FLOW_CMD_NEW);
1055         } else {
1056                 /* We found a matching flow. */
1057                 struct sw_flow_actions *old_acts;
1058
1059                 /* Bail out if we're not allowed to modify an existing flow.
1060                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1061                  * because Generic Netlink treats the latter as a dump
1062                  * request.  We also accept NLM_F_EXCL in case that bug ever
1063                  * gets fixed.
1064                  */
1065                 error = -EEXIST;
1066                 if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
1067                     info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
1068                         goto error;
1069
1070                 /* Update actions. */
1071                 old_acts = rcu_dereference_protected(flow->sf_acts,
1072                                                      lockdep_genl_is_held());
1073                 if (a[OVS_FLOW_ATTR_ACTIONS] &&
1074                     (old_acts->actions_len != nla_len(a[OVS_FLOW_ATTR_ACTIONS]) ||
1075                      memcmp(old_acts->actions, nla_data(a[OVS_FLOW_ATTR_ACTIONS]),
1076                             old_acts->actions_len))) {
1077                         struct sw_flow_actions *new_acts;
1078
1079                         new_acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
1080                         error = PTR_ERR(new_acts);
1081                         if (IS_ERR(new_acts))
1082                                 goto error;
1083
1084                         rcu_assign_pointer(flow->sf_acts, new_acts);
1085                         flow_deferred_free_acts(old_acts);
1086                 }
1087
1088                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
1089                                                 info->snd_seq, OVS_FLOW_CMD_NEW);
1090
1091                 /* Clear stats. */
1092                 if (a[OVS_FLOW_ATTR_CLEAR]) {
1093                         spin_lock_bh(&flow->lock);
1094                         clear_stats(flow);
1095                         spin_unlock_bh(&flow->lock);
1096                 }
1097         }
1098
1099         if (!IS_ERR(reply))
1100                 genl_notify(reply, genl_info_net(info), info->snd_pid,
1101                             dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1102         else
1103                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1104                                 dp_flow_multicast_group.id, PTR_ERR(reply));
1105         return 0;
1106
1107 error_free_flow:
1108         flow_put(flow);
1109 error:
1110         return error;
1111 }
1112
1113 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1114 {
1115         struct nlattr **a = info->attrs;
1116         struct ovs_header *ovs_header = info->userhdr;
1117         struct sw_flow_key key;
1118         struct sk_buff *reply;
1119         struct sw_flow *flow;
1120         struct datapath *dp;
1121         struct flow_table *table;
1122         int err;
1123         int key_len;
1124
1125         if (!a[OVS_FLOW_ATTR_KEY])
1126                 return -EINVAL;
1127         err = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1128         if (err)
1129                 return err;
1130
1131         dp = get_dp(ovs_header->dp_ifindex);
1132         if (!dp)
1133                 return -ENODEV;
1134
1135         table = get_table_protected(dp);
1136         flow = flow_tbl_lookup(table, &key, key_len);
1137         if (!flow)
1138                 return -ENOENT;
1139
1140         reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW);
1141         if (IS_ERR(reply))
1142                 return PTR_ERR(reply);
1143
1144         return genlmsg_reply(reply, info);
1145 }
1146
1147 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1148 {
1149         struct nlattr **a = info->attrs;
1150         struct ovs_header *ovs_header = info->userhdr;
1151         struct sw_flow_key key;
1152         struct sk_buff *reply;
1153         struct sw_flow *flow;
1154         struct datapath *dp;
1155         struct flow_table *table;
1156         int err;
1157         int key_len;
1158
1159         if (!a[OVS_FLOW_ATTR_KEY])
1160                 return flush_flows(ovs_header->dp_ifindex);
1161         err = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1162         if (err)
1163                 return err;
1164
1165         dp = get_dp(ovs_header->dp_ifindex);
1166         if (!dp)
1167                 return -ENODEV;
1168
1169         table = get_table_protected(dp);
1170         flow = flow_tbl_lookup(table, &key, key_len);
1171         if (!flow)
1172                 return -ENOENT;
1173
1174         reply = ovs_flow_cmd_alloc_info(flow);
1175         if (!reply)
1176                 return -ENOMEM;
1177
1178         flow_tbl_remove(table, flow);
1179
1180         err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
1181                                      info->snd_seq, 0, OVS_FLOW_CMD_DEL);
1182         BUG_ON(err < 0);
1183
1184         flow_deferred_free(flow);
1185
1186         genl_notify(reply, genl_info_net(info), info->snd_pid,
1187                     dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1188         return 0;
1189 }
1190
1191 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1192 {
1193         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1194         struct datapath *dp;
1195
1196         dp = get_dp(ovs_header->dp_ifindex);
1197         if (!dp)
1198                 return -ENODEV;
1199
1200         for (;;) {
1201                 struct sw_flow *flow;
1202                 u32 bucket, obj;
1203
1204                 bucket = cb->args[0];
1205                 obj = cb->args[1];
1206                 flow = flow_tbl_next(get_table_protected(dp), &bucket, &obj);
1207                 if (!flow)
1208                         break;
1209
1210                 if (ovs_flow_cmd_fill_info(flow, dp, skb, NETLINK_CB(cb->skb).pid,
1211                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1212                                            OVS_FLOW_CMD_NEW) < 0)
1213                         break;
1214
1215                 cb->args[0] = bucket;
1216                 cb->args[1] = obj;
1217         }
1218         return skb->len;
1219 }
1220
1221 static struct genl_ops dp_flow_genl_ops[] = {
1222         { .cmd = OVS_FLOW_CMD_NEW,
1223           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1224           .policy = flow_policy,
1225           .doit = ovs_flow_cmd_new_or_set
1226         },
1227         { .cmd = OVS_FLOW_CMD_DEL,
1228           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1229           .policy = flow_policy,
1230           .doit = ovs_flow_cmd_del
1231         },
1232         { .cmd = OVS_FLOW_CMD_GET,
1233           .flags = 0,               /* OK for unprivileged users. */
1234           .policy = flow_policy,
1235           .doit = ovs_flow_cmd_get,
1236           .dumpit = ovs_flow_cmd_dump
1237         },
1238         { .cmd = OVS_FLOW_CMD_SET,
1239           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1240           .policy = flow_policy,
1241           .doit = ovs_flow_cmd_new_or_set,
1242         },
1243 };
1244
1245 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1246 #ifdef HAVE_NLA_NUL_STRING
1247         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1248 #endif
1249         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1250 };
1251
1252 static struct genl_family dp_datapath_genl_family = {
1253         .id = GENL_ID_GENERATE,
1254         .hdrsize = sizeof(struct ovs_header),
1255         .name = OVS_DATAPATH_FAMILY,
1256         .version = OVS_DATAPATH_VERSION,
1257         .maxattr = OVS_DP_ATTR_MAX
1258 };
1259
1260 static struct genl_multicast_group dp_datapath_multicast_group = {
1261         .name = OVS_DATAPATH_MCGROUP
1262 };
1263
1264 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1265                                 u32 pid, u32 seq, u32 flags, u8 cmd)
1266 {
1267         struct ovs_header *ovs_header;
1268         struct nlattr *nla;
1269         int err;
1270
1271         ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
1272                                    flags, cmd);
1273         if (!ovs_header)
1274                 goto error;
1275
1276         ovs_header->dp_ifindex = get_dpifindex(dp);
1277
1278         rcu_read_lock();
1279         err = nla_put_string(skb, OVS_DP_ATTR_NAME, dp_name(dp));
1280         rcu_read_unlock();
1281         if (err)
1282                 goto nla_put_failure;
1283
1284         nla = nla_reserve(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats));
1285         if (!nla)
1286                 goto nla_put_failure;
1287         get_dp_stats(dp, nla_data(nla));
1288
1289         return genlmsg_end(skb, ovs_header);
1290
1291 nla_put_failure:
1292         genlmsg_cancel(skb, ovs_header);
1293 error:
1294         return -EMSGSIZE;
1295 }
1296
1297 static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
1298                                              u32 seq, u8 cmd)
1299 {
1300         struct sk_buff *skb;
1301         int retval;
1302
1303         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1304         if (!skb)
1305                 return ERR_PTR(-ENOMEM);
1306
1307         retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
1308         if (retval < 0) {
1309                 kfree_skb(skb);
1310                 return ERR_PTR(retval);
1311         }
1312         return skb;
1313 }
1314
1315 static int ovs_dp_cmd_validate(struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1316 {
1317         return CHECK_NUL_STRING(a[OVS_DP_ATTR_NAME], IFNAMSIZ - 1);
1318 }
1319
1320 /* Called with genl_mutex and optionally with RTNL lock also. */
1321 static struct datapath *lookup_datapath(struct ovs_header *ovs_header, struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1322 {
1323         struct datapath *dp;
1324
1325         if (!a[OVS_DP_ATTR_NAME])
1326                 dp = get_dp(ovs_header->dp_ifindex);
1327         else {
1328                 struct vport *vport;
1329
1330                 rcu_read_lock();
1331                 vport = vport_locate(nla_data(a[OVS_DP_ATTR_NAME]));
1332                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1333                 rcu_read_unlock();
1334         }
1335         return dp ? dp : ERR_PTR(-ENODEV);
1336 }
1337
1338 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1339 {
1340         struct nlattr **a = info->attrs;
1341         struct vport_parms parms;
1342         struct sk_buff *reply;
1343         struct datapath *dp;
1344         struct vport *vport;
1345         int err;
1346
1347         err = -EINVAL;
1348         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1349                 goto err;
1350
1351         err = ovs_dp_cmd_validate(a);
1352         if (err)
1353                 goto err;
1354
1355         rtnl_lock();
1356         err = -ENODEV;
1357         if (!try_module_get(THIS_MODULE))
1358                 goto err_unlock_rtnl;
1359
1360         err = -ENOMEM;
1361         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1362         if (dp == NULL)
1363                 goto err_put_module;
1364         INIT_LIST_HEAD(&dp->port_list);
1365
1366         /* Initialize kobject for bridge.  This will be added as
1367          * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
1368         dp->ifobj.kset = NULL;
1369         kobject_init(&dp->ifobj, &dp_ktype);
1370
1371         /* Allocate table. */
1372         err = -ENOMEM;
1373         rcu_assign_pointer(dp->table, flow_tbl_alloc(TBL_MIN_BUCKETS));
1374         if (!dp->table)
1375                 goto err_free_dp;
1376
1377         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1378         if (!dp->stats_percpu) {
1379                 err = -ENOMEM;
1380                 goto err_destroy_table;
1381         }
1382
1383         /* Set up our datapath device. */
1384         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1385         parms.type = OVS_VPORT_TYPE_INTERNAL;
1386         parms.options = NULL;
1387         parms.dp = dp;
1388         parms.port_no = OVSP_LOCAL;
1389         parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
1390
1391         vport = new_vport(&parms);
1392         if (IS_ERR(vport)) {
1393                 err = PTR_ERR(vport);
1394                 if (err == -EBUSY)
1395                         err = -EEXIST;
1396
1397                 goto err_destroy_percpu;
1398         }
1399
1400         reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
1401         err = PTR_ERR(reply);
1402         if (IS_ERR(reply))
1403                 goto err_destroy_local_port;
1404
1405         list_add_tail(&dp->list_node, &dps);
1406         dp_sysfs_add_dp(dp);
1407
1408         rtnl_unlock();
1409
1410         genl_notify(reply, genl_info_net(info), info->snd_pid,
1411                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1412         return 0;
1413
1414 err_destroy_local_port:
1415         dp_detach_port(get_vport_protected(dp, OVSP_LOCAL));
1416 err_destroy_percpu:
1417         free_percpu(dp->stats_percpu);
1418 err_destroy_table:
1419         flow_tbl_destroy(get_table_protected(dp));
1420 err_free_dp:
1421         kfree(dp);
1422 err_put_module:
1423         module_put(THIS_MODULE);
1424 err_unlock_rtnl:
1425         rtnl_unlock();
1426 err:
1427         return err;
1428 }
1429
1430 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1431 {
1432         struct vport *vport, *next_vport;
1433         struct sk_buff *reply;
1434         struct datapath *dp;
1435         int err;
1436
1437         err = ovs_dp_cmd_validate(info->attrs);
1438         if (err)
1439                 goto exit;
1440
1441         rtnl_lock();
1442         dp = lookup_datapath(info->userhdr, info->attrs);
1443         err = PTR_ERR(dp);
1444         if (IS_ERR(dp))
1445                 goto exit_unlock;
1446
1447         reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_DEL);
1448         err = PTR_ERR(reply);
1449         if (IS_ERR(reply))
1450                 goto exit_unlock;
1451
1452         list_for_each_entry_safe (vport, next_vport, &dp->port_list, node)
1453                 if (vport->port_no != OVSP_LOCAL)
1454                         dp_detach_port(vport);
1455
1456         dp_sysfs_del_dp(dp);
1457         list_del(&dp->list_node);
1458         dp_detach_port(get_vport_protected(dp, OVSP_LOCAL));
1459
1460         /* rtnl_unlock() will wait until all the references to devices that
1461          * are pending unregistration have been dropped.  We do it here to
1462          * ensure that any internal devices (which contain DP pointers) are
1463          * fully destroyed before freeing the datapath.
1464          */
1465         rtnl_unlock();
1466
1467         call_rcu(&dp->rcu, destroy_dp_rcu);
1468         module_put(THIS_MODULE);
1469
1470         genl_notify(reply, genl_info_net(info), info->snd_pid,
1471                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1472
1473         return 0;
1474
1475 exit_unlock:
1476         rtnl_unlock();
1477 exit:
1478         return err;
1479 }
1480
1481 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1482 {
1483         struct sk_buff *reply;
1484         struct datapath *dp;
1485         int err;
1486
1487         err = ovs_dp_cmd_validate(info->attrs);
1488         if (err)
1489                 return err;
1490
1491         dp = lookup_datapath(info->userhdr, info->attrs);
1492         if (IS_ERR(dp))
1493                 return PTR_ERR(dp);
1494
1495         reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
1496         if (IS_ERR(reply)) {
1497                 err = PTR_ERR(reply);
1498                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1499                                 dp_datapath_multicast_group.id, err);
1500                 return 0;
1501         }
1502
1503         genl_notify(reply, genl_info_net(info), info->snd_pid,
1504                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1505         return 0;
1506 }
1507
1508 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1509 {
1510         struct sk_buff *reply;
1511         struct datapath *dp;
1512         int err;
1513
1514         err = ovs_dp_cmd_validate(info->attrs);
1515         if (err)
1516                 return err;
1517
1518         dp = lookup_datapath(info->userhdr, info->attrs);
1519         if (IS_ERR(dp))
1520                 return PTR_ERR(dp);
1521
1522         reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
1523         if (IS_ERR(reply))
1524                 return PTR_ERR(reply);
1525
1526         return genlmsg_reply(reply, info);
1527 }
1528
1529 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1530 {
1531         struct datapath *dp;
1532         int skip = cb->args[0];
1533         int i = 0;
1534
1535         list_for_each_entry (dp, &dps, list_node) {
1536                 if (i < skip)
1537                         continue;
1538                 if (ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
1539                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1540                                          OVS_DP_CMD_NEW) < 0)
1541                         break;
1542                 i++;
1543         }
1544
1545         cb->args[0] = i;
1546
1547         return skb->len;
1548 }
1549
1550 static struct genl_ops dp_datapath_genl_ops[] = {
1551         { .cmd = OVS_DP_CMD_NEW,
1552           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1553           .policy = datapath_policy,
1554           .doit = ovs_dp_cmd_new
1555         },
1556         { .cmd = OVS_DP_CMD_DEL,
1557           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1558           .policy = datapath_policy,
1559           .doit = ovs_dp_cmd_del
1560         },
1561         { .cmd = OVS_DP_CMD_GET,
1562           .flags = 0,               /* OK for unprivileged users. */
1563           .policy = datapath_policy,
1564           .doit = ovs_dp_cmd_get,
1565           .dumpit = ovs_dp_cmd_dump
1566         },
1567         { .cmd = OVS_DP_CMD_SET,
1568           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1569           .policy = datapath_policy,
1570           .doit = ovs_dp_cmd_set,
1571         },
1572 };
1573
1574 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1575 #ifdef HAVE_NLA_NUL_STRING
1576         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1577         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1578         [OVS_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
1579 #else
1580         [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct ovs_vport_stats) },
1581         [OVS_VPORT_ATTR_ADDRESS] = { .minlen = ETH_ALEN },
1582 #endif
1583         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1584         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1585         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1586         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1587 };
1588
1589 static struct genl_family dp_vport_genl_family = {
1590         .id = GENL_ID_GENERATE,
1591         .hdrsize = sizeof(struct ovs_header),
1592         .name = OVS_VPORT_FAMILY,
1593         .version = OVS_VPORT_VERSION,
1594         .maxattr = OVS_VPORT_ATTR_MAX
1595 };
1596
1597 struct genl_multicast_group dp_vport_multicast_group = {
1598         .name = OVS_VPORT_MCGROUP
1599 };
1600
1601 /* Called with RTNL lock or RCU read lock. */
1602 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1603                                    u32 pid, u32 seq, u32 flags, u8 cmd)
1604 {
1605         struct ovs_header *ovs_header;
1606         struct nlattr *nla;
1607         int err;
1608
1609         ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
1610                                  flags, cmd);
1611         if (!ovs_header)
1612                 return -EMSGSIZE;
1613
1614         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1615
1616         NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
1617         NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport_get_type(vport));
1618         NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport_get_name(vport));
1619         NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
1620
1621         nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats));
1622         if (!nla)
1623                 goto nla_put_failure;
1624
1625         vport_get_stats(vport, nla_data(nla));
1626
1627         NLA_PUT(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
1628
1629         err = vport_get_options(vport, skb);
1630         if (err == -EMSGSIZE)
1631                 goto error;
1632
1633         return genlmsg_end(skb, ovs_header);
1634
1635 nla_put_failure:
1636         err = -EMSGSIZE;
1637 error:
1638         genlmsg_cancel(skb, ovs_header);
1639         return err;
1640 }
1641
1642 /* Called with RTNL lock or RCU read lock. */
1643 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
1644                                          u32 seq, u8 cmd)
1645 {
1646         struct sk_buff *skb;
1647         int retval;
1648
1649         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1650         if (!skb)
1651                 return ERR_PTR(-ENOMEM);
1652
1653         retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
1654         if (retval < 0) {
1655                 kfree_skb(skb);
1656                 return ERR_PTR(retval);
1657         }
1658         return skb;
1659 }
1660
1661 static int ovs_vport_cmd_validate(struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1662 {
1663         return CHECK_NUL_STRING(a[OVS_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1664 }
1665
1666 /* Called with RTNL lock or RCU read lock. */
1667 static struct vport *lookup_vport(struct ovs_header *ovs_header,
1668                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1669 {
1670         struct datapath *dp;
1671         struct vport *vport;
1672
1673         if (a[OVS_VPORT_ATTR_NAME]) {
1674                 vport = vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
1675                 if (!vport)
1676                         return ERR_PTR(-ENODEV);
1677                 return vport;
1678         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1679                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1680
1681                 if (port_no >= DP_MAX_PORTS)
1682                         return ERR_PTR(-EFBIG);
1683
1684                 dp = get_dp(ovs_header->dp_ifindex);
1685                 if (!dp)
1686                         return ERR_PTR(-ENODEV);
1687
1688                 vport = get_vport_protected(dp, port_no);
1689                 if (!vport)
1690                         return ERR_PTR(-ENOENT);
1691                 return vport;
1692         } else
1693                 return ERR_PTR(-EINVAL);
1694 }
1695
1696 /* Called with RTNL lock. */
1697 static int change_vport(struct vport *vport, struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1698 {
1699         int err = 0;
1700
1701         if (a[OVS_VPORT_ATTR_STATS])
1702                 vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1703
1704         if (a[OVS_VPORT_ATTR_ADDRESS])
1705                 err = vport_set_addr(vport, nla_data(a[OVS_VPORT_ATTR_ADDRESS]));
1706
1707         return err;
1708 }
1709
1710 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1711 {
1712         struct nlattr **a = info->attrs;
1713         struct ovs_header *ovs_header = info->userhdr;
1714         struct vport_parms parms;
1715         struct sk_buff *reply;
1716         struct vport *vport;
1717         struct datapath *dp;
1718         u32 port_no;
1719         int err;
1720
1721         err = -EINVAL;
1722         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1723             !a[OVS_VPORT_ATTR_UPCALL_PID])
1724                 goto exit;
1725
1726         err = ovs_vport_cmd_validate(a);
1727         if (err)
1728                 goto exit;
1729
1730         rtnl_lock();
1731         dp = get_dp(ovs_header->dp_ifindex);
1732         err = -ENODEV;
1733         if (!dp)
1734                 goto exit_unlock;
1735
1736         if (a[OVS_VPORT_ATTR_PORT_NO]) {
1737                 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1738
1739                 err = -EFBIG;
1740                 if (port_no >= DP_MAX_PORTS)
1741                         goto exit_unlock;
1742
1743                 vport = get_vport_protected(dp, port_no);
1744                 err = -EBUSY;
1745                 if (vport)
1746                         goto exit_unlock;
1747         } else {
1748                 for (port_no = 1; ; port_no++) {
1749                         if (port_no >= DP_MAX_PORTS) {
1750                                 err = -EFBIG;
1751                                 goto exit_unlock;
1752                         }
1753                         vport = get_vport_protected(dp, port_no);
1754                         if (!vport)
1755                                 break;
1756                 }
1757         }
1758
1759         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1760         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1761         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1762         parms.dp = dp;
1763         parms.port_no = port_no;
1764         parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1765
1766         vport = new_vport(&parms);
1767         err = PTR_ERR(vport);
1768         if (IS_ERR(vport))
1769                 goto exit_unlock;
1770
1771         dp_sysfs_add_if(vport);
1772
1773         err = change_vport(vport, a);
1774         if (!err) {
1775                 reply = ovs_vport_cmd_build_info(vport, info->snd_pid,
1776                                                  info->snd_seq, OVS_VPORT_CMD_NEW);
1777                 if (IS_ERR(reply))
1778                         err = PTR_ERR(reply);
1779         }
1780         if (err) {
1781                 dp_detach_port(vport);
1782                 goto exit_unlock;
1783         }
1784         genl_notify(reply, genl_info_net(info), info->snd_pid,
1785                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1786
1787
1788 exit_unlock:
1789         rtnl_unlock();
1790 exit:
1791         return err;
1792 }
1793
1794 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1795 {
1796         struct nlattr **a = info->attrs;
1797         struct sk_buff *reply;
1798         struct vport *vport;
1799         int err;
1800
1801         err = ovs_vport_cmd_validate(a);
1802         if (err)
1803                 goto exit;
1804
1805         rtnl_lock();
1806         vport = lookup_vport(info->userhdr, a);
1807         err = PTR_ERR(vport);
1808         if (IS_ERR(vport))
1809                 goto exit_unlock;
1810
1811         err = 0;
1812         if (a[OVS_VPORT_ATTR_TYPE] && nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport_get_type(vport))
1813                 err = -EINVAL;
1814         if (!err && a[OVS_VPORT_ATTR_OPTIONS])
1815                 err = vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1816         if (!err)
1817                 err = change_vport(vport, a);
1818         if (!err && a[OVS_VPORT_ATTR_UPCALL_PID])
1819                 vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1820
1821         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1822                                          OVS_VPORT_CMD_NEW);
1823         if (IS_ERR(reply)) {
1824                 err = PTR_ERR(reply);
1825                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1826                                 dp_vport_multicast_group.id, err);
1827                 return 0;
1828         }
1829
1830         genl_notify(reply, genl_info_net(info), info->snd_pid,
1831                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1832
1833 exit_unlock:
1834         rtnl_unlock();
1835 exit:
1836         return err;
1837 }
1838
1839 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1840 {
1841         struct nlattr **a = info->attrs;
1842         struct sk_buff *reply;
1843         struct vport *vport;
1844         int err;
1845
1846         err = ovs_vport_cmd_validate(a);
1847         if (err)
1848                 goto exit;
1849
1850         rtnl_lock();
1851         vport = lookup_vport(info->userhdr, a);
1852         err = PTR_ERR(vport);
1853         if (IS_ERR(vport))
1854                 goto exit_unlock;
1855
1856         if (vport->port_no == OVSP_LOCAL) {
1857                 err = -EINVAL;
1858                 goto exit_unlock;
1859         }
1860
1861         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1862                                          OVS_VPORT_CMD_DEL);
1863         err = PTR_ERR(reply);
1864         if (IS_ERR(reply))
1865                 goto exit_unlock;
1866
1867         dp_detach_port(vport);
1868
1869         genl_notify(reply, genl_info_net(info), info->snd_pid,
1870                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1871
1872 exit_unlock:
1873         rtnl_unlock();
1874 exit:
1875         return err;
1876 }
1877
1878 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1879 {
1880         struct nlattr **a = info->attrs;
1881         struct ovs_header *ovs_header = info->userhdr;
1882         struct sk_buff *reply;
1883         struct vport *vport;
1884         int err;
1885
1886         err = ovs_vport_cmd_validate(a);
1887         if (err)
1888                 goto exit;
1889
1890         rcu_read_lock();
1891         vport = lookup_vport(ovs_header, a);
1892         err = PTR_ERR(vport);
1893         if (IS_ERR(vport))
1894                 goto exit_unlock;
1895
1896         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1897                                          OVS_VPORT_CMD_NEW);
1898         err = PTR_ERR(reply);
1899         if (IS_ERR(reply))
1900                 goto exit_unlock;
1901
1902         rcu_read_unlock();
1903
1904         return genlmsg_reply(reply, info);
1905
1906 exit_unlock:
1907         rcu_read_unlock();
1908 exit:
1909         return err;
1910 }
1911
1912 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1913 {
1914         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1915         struct datapath *dp;
1916         u32 port_no;
1917         int retval;
1918
1919         dp = get_dp(ovs_header->dp_ifindex);
1920         if (!dp)
1921                 return -ENODEV;
1922
1923         rcu_read_lock();
1924         for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
1925                 struct vport *vport;
1926
1927                 vport = get_vport_protected(dp, port_no);
1928                 if (!vport)
1929                         continue;
1930
1931                 if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid,
1932                                             cb->nlh->nlmsg_seq, NLM_F_MULTI,
1933                                             OVS_VPORT_CMD_NEW) < 0)
1934                         break;
1935         }
1936         rcu_read_unlock();
1937
1938         cb->args[0] = port_no;
1939         retval = skb->len;
1940
1941         return retval;
1942 }
1943
1944 static struct genl_ops dp_vport_genl_ops[] = {
1945         { .cmd = OVS_VPORT_CMD_NEW,
1946           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1947           .policy = vport_policy,
1948           .doit = ovs_vport_cmd_new
1949         },
1950         { .cmd = OVS_VPORT_CMD_DEL,
1951           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1952           .policy = vport_policy,
1953           .doit = ovs_vport_cmd_del
1954         },
1955         { .cmd = OVS_VPORT_CMD_GET,
1956           .flags = 0,               /* OK for unprivileged users. */
1957           .policy = vport_policy,
1958           .doit = ovs_vport_cmd_get,
1959           .dumpit = ovs_vport_cmd_dump
1960         },
1961         { .cmd = OVS_VPORT_CMD_SET,
1962           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1963           .policy = vport_policy,
1964           .doit = ovs_vport_cmd_set,
1965         },
1966 };
1967
1968 struct genl_family_and_ops {
1969         struct genl_family *family;
1970         struct genl_ops *ops;
1971         int n_ops;
1972         struct genl_multicast_group *group;
1973 };
1974
1975 static const struct genl_family_and_ops dp_genl_families[] = {
1976         { &dp_datapath_genl_family,
1977           dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
1978           &dp_datapath_multicast_group },
1979         { &dp_vport_genl_family,
1980           dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
1981           &dp_vport_multicast_group },
1982         { &dp_flow_genl_family,
1983           dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
1984           &dp_flow_multicast_group },
1985         { &dp_packet_genl_family,
1986           dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
1987           NULL },
1988 };
1989
1990 static void dp_unregister_genl(int n_families)
1991 {
1992         int i;
1993
1994         for (i = 0; i < n_families; i++)
1995                 genl_unregister_family(dp_genl_families[i].family);
1996 }
1997
1998 static int dp_register_genl(void)
1999 {
2000         int n_registered;
2001         int err;
2002         int i;
2003
2004         n_registered = 0;
2005         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2006                 const struct genl_family_and_ops *f = &dp_genl_families[i];
2007
2008                 err = genl_register_family_with_ops(f->family, f->ops,
2009                                                     f->n_ops);
2010                 if (err)
2011                         goto error;
2012                 n_registered++;
2013
2014                 if (f->group) {
2015                         err = genl_register_mc_group(f->family, f->group);
2016                         if (err)
2017                                 goto error;
2018                 }
2019         }
2020
2021         return 0;
2022
2023 error:
2024         dp_unregister_genl(n_registered);
2025         return err;
2026 }
2027
2028 static int __init dp_init(void)
2029 {
2030         struct sk_buff *dummy_skb;
2031         int err;
2032
2033         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2034
2035         printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2036
2037         err = tnl_init();
2038         if (err)
2039                 goto error;
2040
2041         err = flow_init();
2042         if (err)
2043                 goto error_tnl_exit;
2044
2045         err = vport_init();
2046         if (err)
2047                 goto error_flow_exit;
2048
2049         err = register_netdevice_notifier(&dp_device_notifier);
2050         if (err)
2051                 goto error_vport_exit;
2052
2053         err = dp_register_genl();
2054         if (err < 0)
2055                 goto error_unreg_notifier;
2056
2057         return 0;
2058
2059 error_unreg_notifier:
2060         unregister_netdevice_notifier(&dp_device_notifier);
2061 error_vport_exit:
2062         vport_exit();
2063 error_flow_exit:
2064         flow_exit();
2065 error_tnl_exit:
2066         tnl_exit();
2067 error:
2068         return err;
2069 }
2070
2071 static void dp_cleanup(void)
2072 {
2073         rcu_barrier();
2074         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2075         unregister_netdevice_notifier(&dp_device_notifier);
2076         vport_exit();
2077         flow_exit();
2078         tnl_exit();
2079 }
2080
2081 module_init(dp_init);
2082 module_exit(dp_cleanup);
2083
2084 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2085 MODULE_LICENSE("GPL");