3ec5be4e3bf55b0c7a554c51788b6f6740acb59b
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
3  * Distributed under the terms of the GNU GPL version 2.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 /* Functions for managing the dp interface/device. */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/if_arp.h>
16 #include <linux/if_vlan.h>
17 #include <linux/in.h>
18 #include <linux/ip.h>
19 #include <linux/jhash.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/genetlink.h>
24 #include <linux/kernel.h>
25 #include <linux/kthread.h>
26 #include <linux/mutex.h>
27 #include <linux/percpu.h>
28 #include <linux/rcupdate.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/version.h>
32 #include <linux/ethtool.h>
33 #include <linux/wait.h>
34 #include <asm/system.h>
35 #include <asm/div64.h>
36 #include <asm/bug.h>
37 #include <linux/highmem.h>
38 #include <linux/netfilter_bridge.h>
39 #include <linux/netfilter_ipv4.h>
40 #include <linux/inetdevice.h>
41 #include <linux/list.h>
42 #include <linux/rculist.h>
43 #include <linux/dmi.h>
44 #include <net/inet_ecn.h>
45 #include <net/genetlink.h>
46
47 #include "openvswitch/datapath-protocol.h"
48 #include "checksum.h"
49 #include "datapath.h"
50 #include "actions.h"
51 #include "flow.h"
52 #include "table.h"
53 #include "vlan.h"
54 #include "vport-internal_dev.h"
55
56 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
57     LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
58 #error Kernels before 2.6.18 or after 3.0 are not supported by this version of Open vSwitch.
59 #endif
60
61 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
62 EXPORT_SYMBOL(dp_ioctl_hook);
63
64 /**
65  * DOC: Locking:
66  *
67  * Writes to device state (add/remove datapath, port, set operations on vports,
68  * etc.) are protected by RTNL.
69  *
70  * Writes to other state (flow table modifications, set miscellaneous datapath
71  * parameters such as drop frags, etc.) are protected by genl_mutex.  The RTNL
72  * lock nests inside genl_mutex.
73  *
74  * Reads are protected by RCU.
75  *
76  * There are a few special cases (mostly stats) that have their own
77  * synchronization but they nest under all of above and don't interact with
78  * each other.
79  */
80
81 /* Global list of datapaths to enable dumping them all out.
82  * Protected by genl_mutex.
83  */
84 static LIST_HEAD(dps);
85
86 static struct vport *new_vport(const struct vport_parms *);
87 static int queue_userspace_packets(struct datapath *, struct sk_buff *,
88                                  const struct dp_upcall_info *);
89
90 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
91 struct datapath *get_dp(int dp_ifindex)
92 {
93         struct datapath *dp = NULL;
94         struct net_device *dev;
95
96         rcu_read_lock();
97         dev = dev_get_by_index_rcu(&init_net, dp_ifindex);
98         if (dev) {
99                 struct vport *vport = internal_dev_get_vport(dev);
100                 if (vport)
101                         dp = vport->dp;
102         }
103         rcu_read_unlock();
104
105         return dp;
106 }
107 EXPORT_SYMBOL_GPL(get_dp);
108
109 /* Must be called with genl_mutex. */
110 static struct tbl *get_table_protected(struct datapath *dp)
111 {
112         return rcu_dereference_protected(dp->table, lockdep_genl_is_held());
113 }
114
115 /* Must be called with rcu_read_lock or RTNL lock. */
116 static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
117 {
118         return rcu_dereference_rtnl(dp->ports[port_no]);
119 }
120
121 /* Must be called with rcu_read_lock or RTNL lock. */
122 const char *dp_name(const struct datapath *dp)
123 {
124         return vport_get_name(rcu_dereference_rtnl(dp->ports[ODPP_LOCAL]));
125 }
126
127 static inline size_t br_nlmsg_size(void)
128 {
129         return NLMSG_ALIGN(sizeof(struct ifinfomsg))
130                + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
131                + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
132                + nla_total_size(4) /* IFLA_MASTER */
133                + nla_total_size(4) /* IFLA_MTU */
134                + nla_total_size(4) /* IFLA_LINK */
135                + nla_total_size(1); /* IFLA_OPERSTATE */
136 }
137
138 /* Caller must hold RTNL lock. */
139 static int dp_fill_ifinfo(struct sk_buff *skb,
140                           const struct vport *port,
141                           int event, unsigned int flags)
142 {
143         struct datapath *dp = port->dp;
144         int ifindex = vport_get_ifindex(port);
145         int iflink = vport_get_iflink(port);
146         struct ifinfomsg *hdr;
147         struct nlmsghdr *nlh;
148
149         if (ifindex < 0)
150                 return ifindex;
151
152         if (iflink < 0)
153                 return iflink;
154
155         nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
156         if (nlh == NULL)
157                 return -EMSGSIZE;
158
159         hdr = nlmsg_data(nlh);
160         hdr->ifi_family = AF_BRIDGE;
161         hdr->__ifi_pad = 0;
162         hdr->ifi_type = ARPHRD_ETHER;
163         hdr->ifi_index = ifindex;
164         hdr->ifi_flags = vport_get_flags(port);
165         hdr->ifi_change = 0;
166
167         NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
168         NLA_PUT_U32(skb, IFLA_MASTER,
169                 vport_get_ifindex(get_vport_protected(dp, ODPP_LOCAL)));
170         NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
171 #ifdef IFLA_OPERSTATE
172         NLA_PUT_U8(skb, IFLA_OPERSTATE,
173                    vport_is_running(port)
174                         ? vport_get_operstate(port)
175                         : IF_OPER_DOWN);
176 #endif
177
178         NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
179
180         if (ifindex != iflink)
181                 NLA_PUT_U32(skb, IFLA_LINK,iflink);
182
183         return nlmsg_end(skb, nlh);
184
185 nla_put_failure:
186         nlmsg_cancel(skb, nlh);
187         return -EMSGSIZE;
188 }
189
190 /* Caller must hold RTNL lock. */
191 static void dp_ifinfo_notify(int event, struct vport *port)
192 {
193         struct sk_buff *skb;
194         int err = -ENOBUFS;
195
196         skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
197         if (skb == NULL)
198                 goto errout;
199
200         err = dp_fill_ifinfo(skb, port, event, 0);
201         if (err < 0) {
202                 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
203                 WARN_ON(err == -EMSGSIZE);
204                 kfree_skb(skb);
205                 goto errout;
206         }
207         rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
208         return;
209 errout:
210         if (err < 0)
211                 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
212 }
213
214 static void release_dp(struct kobject *kobj)
215 {
216         struct datapath *dp = container_of(kobj, struct datapath, ifobj);
217         kfree(dp);
218 }
219
220 static struct kobj_type dp_ktype = {
221         .release = release_dp
222 };
223
224 static void destroy_dp_rcu(struct rcu_head *rcu)
225 {
226         struct datapath *dp = container_of(rcu, struct datapath, rcu);
227
228         tbl_destroy((struct tbl __force *)dp->table, flow_free_tbl);
229         free_percpu(dp->stats_percpu);
230         kobject_put(&dp->ifobj);
231 }
232
233 /* Called with RTNL lock and genl_lock. */
234 static struct vport *new_vport(const struct vport_parms *parms)
235 {
236         struct vport *vport;
237
238         vport = vport_add(parms);
239         if (!IS_ERR(vport)) {
240                 struct datapath *dp = parms->dp;
241
242                 rcu_assign_pointer(dp->ports[parms->port_no], vport);
243                 list_add(&vport->node, &dp->port_list);
244
245                 dp_ifinfo_notify(RTM_NEWLINK, vport);
246         }
247
248         return vport;
249 }
250
251 /* Called with RTNL lock. */
252 int dp_detach_port(struct vport *p)
253 {
254         ASSERT_RTNL();
255
256         if (p->port_no != ODPP_LOCAL)
257                 dp_sysfs_del_if(p);
258         dp_ifinfo_notify(RTM_DELLINK, p);
259
260         /* First drop references to device. */
261         list_del(&p->node);
262         rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
263
264         /* Then destroy it. */
265         return vport_del(p);
266 }
267
268 /* Must be called with rcu_read_lock. */
269 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
270 {
271         struct datapath *dp = p->dp;
272         struct dp_stats_percpu *stats;
273         int stats_counter_off;
274         int error;
275
276         OVS_CB(skb)->vport = p;
277
278         if (!OVS_CB(skb)->flow) {
279                 struct sw_flow_key key;
280                 struct tbl_node *flow_node;
281                 int key_len;
282                 bool is_frag;
283
284                 /* Extract flow from 'skb' into 'key'. */
285                 error = flow_extract(skb, p->port_no, &key, &key_len, &is_frag);
286                 if (unlikely(error)) {
287                         kfree_skb(skb);
288                         return;
289                 }
290
291                 if (is_frag && dp->drop_frags) {
292                         consume_skb(skb);
293                         stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
294                         goto out;
295                 }
296
297                 /* Look up flow. */
298                 flow_node = tbl_lookup(rcu_dereference(dp->table), &key, key_len,
299                                        flow_hash(&key, key_len), flow_cmp);
300                 if (unlikely(!flow_node)) {
301                         struct dp_upcall_info upcall;
302
303                         upcall.cmd = ODP_PACKET_CMD_MISS;
304                         upcall.key = &key;
305                         upcall.userdata = 0;
306                         upcall.sample_pool = 0;
307                         upcall.actions = NULL;
308                         upcall.actions_len = 0;
309                         dp_upcall(dp, skb, &upcall);
310                         stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
311                         goto out;
312                 }
313
314                 OVS_CB(skb)->flow = flow_cast(flow_node);
315         }
316
317         stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
318         flow_used(OVS_CB(skb)->flow, skb);
319         execute_actions(dp, skb);
320
321 out:
322         /* Update datapath statistics. */
323         local_bh_disable();
324         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
325
326         write_seqcount_begin(&stats->seqlock);
327         (*(u64 *)((u8 *)stats + stats_counter_off))++;
328         write_seqcount_end(&stats->seqlock);
329
330         local_bh_enable();
331 }
332
333 static void copy_and_csum_skb(struct sk_buff *skb, void *to)
334 {
335         u16 csum_start, csum_offset;
336         __wsum csum;
337
338         get_skb_csum_pointers(skb, &csum_start, &csum_offset);
339         csum_start -= skb_headroom(skb);
340
341         skb_copy_bits(skb, 0, to, csum_start);
342
343         csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start,
344                                       skb->len - csum_start, 0);
345         *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
346 }
347
348 static struct genl_family dp_packet_genl_family = {
349         .id = GENL_ID_GENERATE,
350         .hdrsize = sizeof(struct odp_header),
351         .name = ODP_PACKET_FAMILY,
352         .version = 1,
353         .maxattr = ODP_PACKET_ATTR_MAX
354 };
355
356 /* Generic Netlink multicast groups for upcalls.
357  *
358  * We really want three unique multicast groups per datapath, but we can't even
359  * get one, because genl_register_mc_group() takes genl_lock, which is also
360  * held during Generic Netlink message processing, so trying to acquire
361  * multicast groups during ODP_DP_NEW processing deadlocks.  Instead, we
362  * preallocate a few groups and use them round-robin for datapaths.  Collision
363  * isn't fatal--multicast listeners should check that the family is the one
364  * that they want and discard others--but it wastes time and memory to receive
365  * unwanted messages.
366  */
367 #define PACKET_N_MC_GROUPS 16
368 static struct genl_multicast_group packet_mc_groups[PACKET_N_MC_GROUPS];
369
370 static u32 packet_mc_group(struct datapath *dp, u8 cmd)
371 {
372         u32 idx;
373         BUILD_BUG_ON_NOT_POWER_OF_2(PACKET_N_MC_GROUPS);
374
375         idx = jhash_2words(dp->dp_ifindex, cmd, 0) & (PACKET_N_MC_GROUPS - 1);
376         return packet_mc_groups[idx].id;
377 }
378
379 static int packet_register_mc_groups(void)
380 {
381         int i;
382
383         for (i = 0; i < PACKET_N_MC_GROUPS; i++) {
384                 struct genl_multicast_group *group = &packet_mc_groups[i];
385                 int error;
386
387                 sprintf(group->name, "packet%d", i);
388                 error = genl_register_mc_group(&dp_packet_genl_family, group);
389                 if (error)
390                         return error;
391         }
392         return 0;
393 }
394
395 int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info)
396 {
397         struct dp_stats_percpu *stats;
398         int err;
399
400         WARN_ON_ONCE(skb_shared(skb));
401
402         forward_ip_summed(skb, true);
403
404         /* Break apart GSO packets into their component pieces.  Otherwise
405          * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
406         if (skb_is_gso(skb)) {
407                 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
408                 
409                 if (IS_ERR(nskb)) {
410                         kfree_skb(skb);
411                         err = PTR_ERR(nskb);
412                         goto err;
413                 }
414                 consume_skb(skb);
415                 skb = nskb;
416         }
417
418         err = queue_userspace_packets(dp, skb, upcall_info);
419         if (err)
420                 goto err;
421
422         return 0;
423
424 err:
425         local_bh_disable();
426         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
427
428         write_seqcount_begin(&stats->seqlock);
429         stats->n_lost++;
430         write_seqcount_end(&stats->seqlock);
431
432         local_bh_enable();
433
434         return err;
435 }
436
437 /* Send each packet in the 'skb' list to userspace for 'dp' as directed by
438  * 'upcall_info'.  There will be only one packet unless we broke up a GSO
439  * packet.
440  */
441 static int queue_userspace_packets(struct datapath *dp, struct sk_buff *skb,
442                                  const struct dp_upcall_info *upcall_info)
443 {
444         u32 group = packet_mc_group(dp, upcall_info->cmd);
445         struct sk_buff *nskb;
446         int err;
447
448         do {
449                 struct odp_header *upcall;
450                 struct sk_buff *user_skb; /* to be queued to userspace */
451                 struct nlattr *nla;
452                 unsigned int len;
453
454                 nskb = skb->next;
455                 skb->next = NULL;
456
457                 err = vlan_deaccel_tag(skb);
458                 if (unlikely(err))
459                         goto err_kfree_skbs;
460
461                 if (nla_attr_size(skb->len) > USHRT_MAX)
462                         goto err_kfree_skbs;
463
464                 len = sizeof(struct odp_header);
465                 len += nla_total_size(skb->len);
466                 len += nla_total_size(FLOW_BUFSIZE);
467                 if (upcall_info->userdata)
468                         len += nla_total_size(8);
469                 if (upcall_info->sample_pool)
470                         len += nla_total_size(4);
471                 if (upcall_info->actions_len)
472                         len += nla_total_size(upcall_info->actions_len);
473
474                 user_skb = genlmsg_new(len, GFP_ATOMIC);
475                 if (!user_skb) {
476                         netlink_set_err(INIT_NET_GENL_SOCK, 0, group, -ENOBUFS);
477                         goto err_kfree_skbs;
478                 }
479
480                 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd);
481                 upcall->dp_ifindex = dp->dp_ifindex;
482
483                 nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_KEY);
484                 flow_to_nlattrs(upcall_info->key, user_skb);
485                 nla_nest_end(user_skb, nla);
486
487                 if (upcall_info->userdata)
488                         nla_put_u64(user_skb, ODP_PACKET_ATTR_USERDATA, upcall_info->userdata);
489                 if (upcall_info->sample_pool)
490                         nla_put_u32(user_skb, ODP_PACKET_ATTR_SAMPLE_POOL, upcall_info->sample_pool);
491                 if (upcall_info->actions_len) {
492                         const struct nlattr *actions = upcall_info->actions;
493                         u32 actions_len = upcall_info->actions_len;
494
495                         nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_ACTIONS);
496                         memcpy(__skb_put(user_skb, actions_len), actions, actions_len);
497                         nla_nest_end(user_skb, nla);
498                 }
499
500                 nla = __nla_reserve(user_skb, ODP_PACKET_ATTR_PACKET, skb->len);
501                 if (skb->ip_summed == CHECKSUM_PARTIAL)
502                         copy_and_csum_skb(skb, nla_data(nla));
503                 else
504                         skb_copy_bits(skb, 0, nla_data(nla), skb->len);
505
506                 err = genlmsg_multicast(user_skb, 0, group, GFP_ATOMIC);
507                 if (err)
508                         goto err_kfree_skbs;
509
510                 consume_skb(skb);
511                 skb = nskb;
512         } while (skb);
513         return 0;
514
515 err_kfree_skbs:
516         kfree_skb(skb);
517         while ((skb = nskb) != NULL) {
518                 nskb = skb->next;
519                 kfree_skb(skb);
520         }
521         return err;
522 }
523
524 /* Called with genl_mutex. */
525 static int flush_flows(int dp_ifindex)
526 {
527         struct tbl *old_table;
528         struct tbl *new_table;
529         struct datapath *dp;
530
531         dp = get_dp(dp_ifindex);
532         if (!dp)
533                 return -ENODEV;
534
535         old_table = get_table_protected(dp);
536         new_table = tbl_create(TBL_MIN_BUCKETS);
537         if (!new_table)
538                 return -ENOMEM;
539
540         rcu_assign_pointer(dp->table, new_table);
541
542         tbl_deferred_destroy(old_table, flow_free_tbl);
543
544         return 0;
545 }
546
547 static int validate_actions(const struct nlattr *attr)
548 {
549         const struct nlattr *a;
550         int rem;
551
552         nla_for_each_nested(a, attr, rem) {
553                 static const u32 action_lens[ODP_ACTION_ATTR_MAX + 1] = {
554                         [ODP_ACTION_ATTR_OUTPUT] = 4,
555                         [ODP_ACTION_ATTR_USERSPACE] = 8,
556                         [ODP_ACTION_ATTR_SET_DL_TCI] = 2,
557                         [ODP_ACTION_ATTR_STRIP_VLAN] = 0,
558                         [ODP_ACTION_ATTR_SET_DL_SRC] = ETH_ALEN,
559                         [ODP_ACTION_ATTR_SET_DL_DST] = ETH_ALEN,
560                         [ODP_ACTION_ATTR_SET_NW_SRC] = 4,
561                         [ODP_ACTION_ATTR_SET_NW_DST] = 4,
562                         [ODP_ACTION_ATTR_SET_NW_TOS] = 1,
563                         [ODP_ACTION_ATTR_SET_TP_SRC] = 2,
564                         [ODP_ACTION_ATTR_SET_TP_DST] = 2,
565                         [ODP_ACTION_ATTR_SET_TUNNEL] = 8,
566                         [ODP_ACTION_ATTR_SET_PRIORITY] = 4,
567                         [ODP_ACTION_ATTR_POP_PRIORITY] = 0,
568                 };
569                 int type = nla_type(a);
570
571                 if (type > ODP_ACTION_ATTR_MAX || nla_len(a) != action_lens[type])
572                         return -EINVAL;
573
574                 switch (type) {
575                 case ODP_ACTION_ATTR_UNSPEC:
576                         return -EINVAL;
577
578                 case ODP_ACTION_ATTR_USERSPACE:
579                 case ODP_ACTION_ATTR_STRIP_VLAN:
580                 case ODP_ACTION_ATTR_SET_DL_SRC:
581                 case ODP_ACTION_ATTR_SET_DL_DST:
582                 case ODP_ACTION_ATTR_SET_NW_SRC:
583                 case ODP_ACTION_ATTR_SET_NW_DST:
584                 case ODP_ACTION_ATTR_SET_TP_SRC:
585                 case ODP_ACTION_ATTR_SET_TP_DST:
586                 case ODP_ACTION_ATTR_SET_TUNNEL:
587                 case ODP_ACTION_ATTR_SET_PRIORITY:
588                 case ODP_ACTION_ATTR_POP_PRIORITY:
589                         /* No validation needed. */
590                         break;
591
592                 case ODP_ACTION_ATTR_OUTPUT:
593                         if (nla_get_u32(a) >= DP_MAX_PORTS)
594                                 return -EINVAL;
595                         break;
596
597                 case ODP_ACTION_ATTR_SET_DL_TCI:
598                         if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
599                                 return -EINVAL;
600                         break;
601
602                 case ODP_ACTION_ATTR_SET_NW_TOS:
603                         if (nla_get_u8(a) & INET_ECN_MASK)
604                                 return -EINVAL;
605                         break;
606
607                 default:
608                         return -EOPNOTSUPP;
609                 }
610         }
611
612         if (rem > 0)
613                 return -EINVAL;
614
615         return 0;
616 }
617 static void clear_stats(struct sw_flow *flow)
618 {
619         flow->used = 0;
620         flow->tcp_flags = 0;
621         flow->packet_count = 0;
622         flow->byte_count = 0;
623 }
624
625 /* Called with genl_mutex. */
626 static int expand_table(struct datapath *dp)
627 {
628         struct tbl *old_table = get_table_protected(dp);
629         struct tbl *new_table;
630
631         new_table = tbl_expand(old_table);
632         if (IS_ERR(new_table)) {
633                 if (PTR_ERR(new_table) != -ENOSPC)
634                         return PTR_ERR(new_table);
635         } else {
636                 rcu_assign_pointer(dp->table, new_table);
637                 tbl_deferred_destroy(old_table, NULL);
638         }
639
640         return 0;
641 }
642
643 static int odp_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
644 {
645         struct odp_header *odp_header = info->userhdr;
646         struct nlattr **a = info->attrs;
647         struct sw_flow_actions *acts;
648         struct sk_buff *packet;
649         struct sw_flow *flow;
650         struct datapath *dp;
651         struct ethhdr *eth;
652         bool is_frag;
653         int len;
654         int err;
655         int key_len;
656
657         err = -EINVAL;
658         if (!a[ODP_PACKET_ATTR_PACKET] || !a[ODP_PACKET_ATTR_KEY] ||
659             !a[ODP_PACKET_ATTR_ACTIONS] ||
660             nla_len(a[ODP_PACKET_ATTR_PACKET]) < ETH_HLEN)
661                 goto err;
662
663         err = validate_actions(a[ODP_PACKET_ATTR_ACTIONS]);
664         if (err)
665                 goto err;
666
667         len = nla_len(a[ODP_PACKET_ATTR_PACKET]);
668         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
669         err = -ENOMEM;
670         if (!packet)
671                 goto err;
672         skb_reserve(packet, NET_IP_ALIGN);
673
674         memcpy(__skb_put(packet, len), nla_data(a[ODP_PACKET_ATTR_PACKET]), len);
675
676         skb_reset_mac_header(packet);
677         eth = eth_hdr(packet);
678
679         /* Normally, setting the skb 'protocol' field would be handled by a
680          * call to eth_type_trans(), but it assumes there's a sending
681          * device, which we may not have. */
682         if (ntohs(eth->h_proto) >= 1536)
683                 packet->protocol = eth->h_proto;
684         else
685                 packet->protocol = htons(ETH_P_802_2);
686
687         /* Build an sw_flow for sending this packet. */
688         flow = flow_alloc();
689         err = PTR_ERR(flow);
690         if (IS_ERR(flow))
691                 goto err_kfree_skb;
692
693         err = flow_extract(packet, -1, &flow->key, &key_len, &is_frag);
694         if (err)
695                 goto err_flow_put;
696         flow->tbl_node.hash = flow_hash(&flow->key, key_len);
697
698         err = flow_metadata_from_nlattrs(&flow->key.eth.in_port,
699                                          &flow->key.eth.tun_id,
700                                          a[ODP_PACKET_ATTR_KEY]);
701         if (err)
702                 goto err_flow_put;
703
704         acts = flow_actions_alloc(a[ODP_PACKET_ATTR_ACTIONS]);
705         err = PTR_ERR(acts);
706         if (IS_ERR(acts))
707                 goto err_flow_put;
708         rcu_assign_pointer(flow->sf_acts, acts);
709
710         OVS_CB(packet)->flow = flow;
711
712         rcu_read_lock();
713         dp = get_dp(odp_header->dp_ifindex);
714         err = -ENODEV;
715         if (!dp)
716                 goto err_unlock;
717         err = execute_actions(dp, packet);
718         rcu_read_unlock();
719
720         flow_put(flow);
721         return err;
722
723 err_unlock:
724         rcu_read_unlock();
725 err_flow_put:
726         flow_put(flow);
727 err_kfree_skb:
728         kfree_skb(packet);
729 err:
730         return err;
731 }
732
733 static const struct nla_policy packet_policy[ODP_PACKET_ATTR_MAX + 1] = {
734         [ODP_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
735         [ODP_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
736         [ODP_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
737 };
738
739 static struct genl_ops dp_packet_genl_ops[] = {
740         { .cmd = ODP_PACKET_CMD_EXECUTE,
741           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
742           .policy = packet_policy,
743           .doit = odp_packet_cmd_execute
744         }
745 };
746
747 static void get_dp_stats(struct datapath *dp, struct odp_stats *stats)
748 {
749         int i;
750
751         stats->n_frags = stats->n_hit = stats->n_missed = stats->n_lost = 0;
752         for_each_possible_cpu(i) {
753                 const struct dp_stats_percpu *percpu_stats;
754                 struct dp_stats_percpu local_stats;
755                 unsigned seqcount;
756
757                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
758
759                 do {
760                         seqcount = read_seqcount_begin(&percpu_stats->seqlock);
761                         local_stats = *percpu_stats;
762                 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
763
764                 stats->n_frags += local_stats.n_frags;
765                 stats->n_hit += local_stats.n_hit;
766                 stats->n_missed += local_stats.n_missed;
767                 stats->n_lost += local_stats.n_lost;
768         }
769 }
770
771 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports.
772  * Called with RTNL lock.
773  */
774 int dp_min_mtu(const struct datapath *dp)
775 {
776         struct vport *p;
777         int mtu = 0;
778
779         ASSERT_RTNL();
780
781         list_for_each_entry (p, &dp->port_list, node) {
782                 int dev_mtu;
783
784                 /* Skip any internal ports, since that's what we're trying to
785                  * set. */
786                 if (is_internal_vport(p))
787                         continue;
788
789                 dev_mtu = vport_get_mtu(p);
790                 if (!dev_mtu)
791                         continue;
792                 if (!mtu || dev_mtu < mtu)
793                         mtu = dev_mtu;
794         }
795
796         return mtu ? mtu : ETH_DATA_LEN;
797 }
798
799 /* Sets the MTU of all datapath devices to the minimum of the ports
800  * Called with RTNL lock.
801  */
802 void set_internal_devs_mtu(const struct datapath *dp)
803 {
804         struct vport *p;
805         int mtu;
806
807         ASSERT_RTNL();
808
809         mtu = dp_min_mtu(dp);
810
811         list_for_each_entry (p, &dp->port_list, node) {
812                 if (is_internal_vport(p))
813                         vport_set_mtu(p, mtu);
814         }
815 }
816
817 static const struct nla_policy flow_policy[ODP_FLOW_ATTR_MAX + 1] = {
818         [ODP_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
819         [ODP_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
820         [ODP_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
821 };
822
823 static struct genl_family dp_flow_genl_family = {
824         .id = GENL_ID_GENERATE,
825         .hdrsize = sizeof(struct odp_header),
826         .name = ODP_FLOW_FAMILY,
827         .version = 1,
828         .maxattr = ODP_FLOW_ATTR_MAX
829 };
830
831 static struct genl_multicast_group dp_flow_multicast_group = {
832         .name = ODP_FLOW_MCGROUP
833 };
834
835 /* Called with genl_lock. */
836 static int odp_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
837                                   struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd)
838 {
839         const int skb_orig_len = skb->len;
840         const struct sw_flow_actions *sf_acts;
841         struct odp_flow_stats stats;
842         struct odp_header *odp_header;
843         struct nlattr *nla;
844         unsigned long used;
845         u8 tcp_flags;
846         int err;
847
848         sf_acts = rcu_dereference_protected(flow->sf_acts,
849                                             lockdep_genl_is_held());
850
851         odp_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
852         if (!odp_header)
853                 return -EMSGSIZE;
854
855         odp_header->dp_ifindex = dp->dp_ifindex;
856
857         nla = nla_nest_start(skb, ODP_FLOW_ATTR_KEY);
858         if (!nla)
859                 goto nla_put_failure;
860         err = flow_to_nlattrs(&flow->key, skb);
861         if (err)
862                 goto error;
863         nla_nest_end(skb, nla);
864
865         spin_lock_bh(&flow->lock);
866         used = flow->used;
867         stats.n_packets = flow->packet_count;
868         stats.n_bytes = flow->byte_count;
869         tcp_flags = flow->tcp_flags;
870         spin_unlock_bh(&flow->lock);
871
872         if (used)
873                 NLA_PUT_U64(skb, ODP_FLOW_ATTR_USED, flow_used_time(used));
874
875         if (stats.n_packets)
876                 NLA_PUT(skb, ODP_FLOW_ATTR_STATS, sizeof(struct odp_flow_stats), &stats);
877
878         if (tcp_flags)
879                 NLA_PUT_U8(skb, ODP_FLOW_ATTR_TCP_FLAGS, tcp_flags);
880
881         /* If ODP_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
882          * this is the first flow to be dumped into 'skb'.  This is unusual for
883          * Netlink but individual action lists can be longer than
884          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
885          * The userspace caller can always fetch the actions separately if it
886          * really wants them.  (Most userspace callers in fact don't care.)
887          *
888          * This can only fail for dump operations because the skb is always
889          * properly sized for single flows.
890          */
891         err = nla_put(skb, ODP_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
892                       sf_acts->actions);
893         if (err < 0 && skb_orig_len)
894                 goto error;
895
896         return genlmsg_end(skb, odp_header);
897
898 nla_put_failure:
899         err = -EMSGSIZE;
900 error:
901         genlmsg_cancel(skb, odp_header);
902         return err;
903 }
904
905 static struct sk_buff *odp_flow_cmd_alloc_info(struct sw_flow *flow)
906 {
907         const struct sw_flow_actions *sf_acts;
908         int len;
909
910         sf_acts = rcu_dereference_protected(flow->sf_acts,
911                                             lockdep_genl_is_held());
912
913         len = nla_total_size(FLOW_BUFSIZE); /* ODP_FLOW_ATTR_KEY */
914         len += nla_total_size(sf_acts->actions_len); /* ODP_FLOW_ATTR_ACTIONS */
915         len += nla_total_size(sizeof(struct odp_flow_stats)); /* ODP_FLOW_ATTR_STATS */
916         len += nla_total_size(1); /* ODP_FLOW_ATTR_TCP_FLAGS */
917         len += nla_total_size(8); /* ODP_FLOW_ATTR_USED */
918         return genlmsg_new(NLMSG_ALIGN(sizeof(struct odp_header)) + len, GFP_KERNEL);
919 }
920
921 static struct sk_buff *odp_flow_cmd_build_info(struct sw_flow *flow, struct datapath *dp,
922                                                u32 pid, u32 seq, u8 cmd)
923 {
924         struct sk_buff *skb;
925         int retval;
926
927         skb = odp_flow_cmd_alloc_info(flow);
928         if (!skb)
929                 return ERR_PTR(-ENOMEM);
930
931         retval = odp_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
932         BUG_ON(retval < 0);
933         return skb;
934 }
935
936 static int odp_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
937 {
938         struct nlattr **a = info->attrs;
939         struct odp_header *odp_header = info->userhdr;
940         struct tbl_node *flow_node;
941         struct sw_flow_key key;
942         struct sw_flow *flow;
943         struct sk_buff *reply;
944         struct datapath *dp;
945         struct tbl *table;
946         u32 hash;
947         int error;
948         int key_len;
949
950         /* Extract key. */
951         error = -EINVAL;
952         if (!a[ODP_FLOW_ATTR_KEY])
953                 goto error;
954         error = flow_from_nlattrs(&key, &key_len, a[ODP_FLOW_ATTR_KEY]);
955         if (error)
956                 goto error;
957
958         /* Validate actions. */
959         if (a[ODP_FLOW_ATTR_ACTIONS]) {
960                 error = validate_actions(a[ODP_FLOW_ATTR_ACTIONS]);
961                 if (error)
962                         goto error;
963         } else if (info->genlhdr->cmd == ODP_FLOW_CMD_NEW) {
964                 error = -EINVAL;
965                 goto error;
966         }
967
968         dp = get_dp(odp_header->dp_ifindex);
969         error = -ENODEV;
970         if (!dp)
971                 goto error;
972
973         hash = flow_hash(&key, key_len);
974         table = get_table_protected(dp);
975         flow_node = tbl_lookup(table, &key, key_len, hash, flow_cmp);
976         if (!flow_node) {
977                 struct sw_flow_actions *acts;
978
979                 /* Bail out if we're not allowed to create a new flow. */
980                 error = -ENOENT;
981                 if (info->genlhdr->cmd == ODP_FLOW_CMD_SET)
982                         goto error;
983
984                 /* Expand table, if necessary, to make room. */
985                 if (tbl_count(table) >= tbl_n_buckets(table)) {
986                         error = expand_table(dp);
987                         if (error)
988                                 goto error;
989                         table = get_table_protected(dp);
990                 }
991
992                 /* Allocate flow. */
993                 flow = flow_alloc();
994                 if (IS_ERR(flow)) {
995                         error = PTR_ERR(flow);
996                         goto error;
997                 }
998                 flow->key = key;
999                 clear_stats(flow);
1000
1001                 /* Obtain actions. */
1002                 acts = flow_actions_alloc(a[ODP_FLOW_ATTR_ACTIONS]);
1003                 error = PTR_ERR(acts);
1004                 if (IS_ERR(acts))
1005                         goto error_free_flow;
1006                 rcu_assign_pointer(flow->sf_acts, acts);
1007
1008                 /* Put flow in bucket. */
1009                 error = tbl_insert(table, &flow->tbl_node, hash);
1010                 if (error)
1011                         goto error_free_flow;
1012
1013                 reply = odp_flow_cmd_build_info(flow, dp, info->snd_pid,
1014                                                 info->snd_seq, ODP_FLOW_CMD_NEW);
1015         } else {
1016                 /* We found a matching flow. */
1017                 struct sw_flow_actions *old_acts;
1018
1019                 /* Bail out if we're not allowed to modify an existing flow.
1020                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1021                  * because Generic Netlink treats the latter as a dump
1022                  * request.  We also accept NLM_F_EXCL in case that bug ever
1023                  * gets fixed.
1024                  */
1025                 error = -EEXIST;
1026                 if (info->genlhdr->cmd == ODP_FLOW_CMD_NEW &&
1027                     info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
1028                         goto error;
1029
1030                 /* Update actions. */
1031                 flow = flow_cast(flow_node);
1032                 old_acts = rcu_dereference_protected(flow->sf_acts,
1033                                                      lockdep_genl_is_held());
1034                 if (a[ODP_FLOW_ATTR_ACTIONS] &&
1035                     (old_acts->actions_len != nla_len(a[ODP_FLOW_ATTR_ACTIONS]) ||
1036                      memcmp(old_acts->actions, nla_data(a[ODP_FLOW_ATTR_ACTIONS]),
1037                             old_acts->actions_len))) {
1038                         struct sw_flow_actions *new_acts;
1039
1040                         new_acts = flow_actions_alloc(a[ODP_FLOW_ATTR_ACTIONS]);
1041                         error = PTR_ERR(new_acts);
1042                         if (IS_ERR(new_acts))
1043                                 goto error;
1044
1045                         rcu_assign_pointer(flow->sf_acts, new_acts);
1046                         flow_deferred_free_acts(old_acts);
1047                 }
1048
1049                 reply = odp_flow_cmd_build_info(flow, dp, info->snd_pid,
1050                                                 info->snd_seq, ODP_FLOW_CMD_NEW);
1051
1052                 /* Clear stats. */
1053                 if (a[ODP_FLOW_ATTR_CLEAR]) {
1054                         spin_lock_bh(&flow->lock);
1055                         clear_stats(flow);
1056                         spin_unlock_bh(&flow->lock);
1057                 }
1058         }
1059
1060         if (!IS_ERR(reply))
1061                 genl_notify(reply, genl_info_net(info), info->snd_pid,
1062                             dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1063         else
1064                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1065                                 dp_flow_multicast_group.id, PTR_ERR(reply));
1066         return 0;
1067
1068 error_free_flow:
1069         flow_put(flow);
1070 error:
1071         return error;
1072 }
1073
1074 static int odp_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1075 {
1076         struct nlattr **a = info->attrs;
1077         struct odp_header *odp_header = info->userhdr;
1078         struct sw_flow_key key;
1079         struct tbl_node *flow_node;
1080         struct sk_buff *reply;
1081         struct sw_flow *flow;
1082         struct datapath *dp;
1083         struct tbl *table;
1084         int err;
1085         int key_len;
1086
1087         if (!a[ODP_FLOW_ATTR_KEY])
1088                 return -EINVAL;
1089         err = flow_from_nlattrs(&key, &key_len, a[ODP_FLOW_ATTR_KEY]);
1090         if (err)
1091                 return err;
1092
1093         dp = get_dp(odp_header->dp_ifindex);
1094         if (!dp)
1095                 return -ENODEV;
1096
1097         table = get_table_protected(dp);
1098         flow_node = tbl_lookup(table, &key, key_len, flow_hash(&key, key_len),
1099                                flow_cmp);
1100         if (!flow_node)
1101                 return -ENOENT;
1102
1103         flow = flow_cast(flow_node);
1104         reply = odp_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, ODP_FLOW_CMD_NEW);
1105         if (IS_ERR(reply))
1106                 return PTR_ERR(reply);
1107
1108         return genlmsg_reply(reply, info);
1109 }
1110
1111 static int odp_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1112 {
1113         struct nlattr **a = info->attrs;
1114         struct odp_header *odp_header = info->userhdr;
1115         struct sw_flow_key key;
1116         struct tbl_node *flow_node;
1117         struct sk_buff *reply;
1118         struct sw_flow *flow;
1119         struct datapath *dp;
1120         struct tbl *table;
1121         int err;
1122         int key_len;
1123
1124         if (!a[ODP_FLOW_ATTR_KEY])
1125                 return flush_flows(odp_header->dp_ifindex);
1126         err = flow_from_nlattrs(&key, &key_len, a[ODP_FLOW_ATTR_KEY]);
1127         if (err)
1128                 return err;
1129
1130         dp = get_dp(odp_header->dp_ifindex);
1131         if (!dp)
1132                 return -ENODEV;
1133
1134         table = get_table_protected(dp);
1135         flow_node = tbl_lookup(table, &key, key_len, flow_hash(&key, key_len),
1136                                flow_cmp);
1137         if (!flow_node)
1138                 return -ENOENT;
1139         flow = flow_cast(flow_node);
1140
1141         reply = odp_flow_cmd_alloc_info(flow);
1142         if (!reply)
1143                 return -ENOMEM;
1144
1145         err = tbl_remove(table, flow_node);
1146         if (err) {
1147                 kfree_skb(reply);
1148                 return err;
1149         }
1150
1151         err = odp_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
1152                                      info->snd_seq, 0, ODP_FLOW_CMD_DEL);
1153         BUG_ON(err < 0);
1154
1155         flow_deferred_free(flow);
1156
1157         genl_notify(reply, genl_info_net(info), info->snd_pid,
1158                     dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1159         return 0;
1160 }
1161
1162 static int odp_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1163 {
1164         struct odp_header *odp_header = genlmsg_data(nlmsg_data(cb->nlh));
1165         struct datapath *dp;
1166
1167         dp = get_dp(odp_header->dp_ifindex);
1168         if (!dp)
1169                 return -ENODEV;
1170
1171         for (;;) {
1172                 struct tbl_node *flow_node;
1173                 struct sw_flow *flow;
1174                 u32 bucket, obj;
1175
1176                 bucket = cb->args[0];
1177                 obj = cb->args[1];
1178                 flow_node = tbl_next(get_table_protected(dp), &bucket, &obj);
1179                 if (!flow_node)
1180                         break;
1181
1182                 flow = flow_cast(flow_node);
1183                 if (odp_flow_cmd_fill_info(flow, dp, skb, NETLINK_CB(cb->skb).pid,
1184                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1185                                            ODP_FLOW_CMD_NEW) < 0)
1186                         break;
1187
1188                 cb->args[0] = bucket;
1189                 cb->args[1] = obj;
1190         }
1191         return skb->len;
1192 }
1193
1194 static struct genl_ops dp_flow_genl_ops[] = {
1195         { .cmd = ODP_FLOW_CMD_NEW,
1196           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1197           .policy = flow_policy,
1198           .doit = odp_flow_cmd_new_or_set
1199         },
1200         { .cmd = ODP_FLOW_CMD_DEL,
1201           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1202           .policy = flow_policy,
1203           .doit = odp_flow_cmd_del
1204         },
1205         { .cmd = ODP_FLOW_CMD_GET,
1206           .flags = 0,               /* OK for unprivileged users. */
1207           .policy = flow_policy,
1208           .doit = odp_flow_cmd_get,
1209           .dumpit = odp_flow_cmd_dump
1210         },
1211         { .cmd = ODP_FLOW_CMD_SET,
1212           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1213           .policy = flow_policy,
1214           .doit = odp_flow_cmd_new_or_set,
1215         },
1216 };
1217
1218 static const struct nla_policy datapath_policy[ODP_DP_ATTR_MAX + 1] = {
1219 #ifdef HAVE_NLA_NUL_STRING
1220         [ODP_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1221 #endif
1222         [ODP_DP_ATTR_IPV4_FRAGS] = { .type = NLA_U32 },
1223         [ODP_DP_ATTR_SAMPLING] = { .type = NLA_U32 },
1224 };
1225
1226 static struct genl_family dp_datapath_genl_family = {
1227         .id = GENL_ID_GENERATE,
1228         .hdrsize = sizeof(struct odp_header),
1229         .name = ODP_DATAPATH_FAMILY,
1230         .version = 1,
1231         .maxattr = ODP_DP_ATTR_MAX
1232 };
1233
1234 static struct genl_multicast_group dp_datapath_multicast_group = {
1235         .name = ODP_DATAPATH_MCGROUP
1236 };
1237
1238 static int odp_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1239                                 u32 pid, u32 seq, u32 flags, u8 cmd)
1240 {
1241         struct odp_header *odp_header;
1242         struct nlattr *nla;
1243         int err;
1244
1245         odp_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
1246                                    flags, cmd);
1247         if (!odp_header)
1248                 goto error;
1249
1250         odp_header->dp_ifindex = dp->dp_ifindex;
1251
1252         rcu_read_lock();
1253         err = nla_put_string(skb, ODP_DP_ATTR_NAME, dp_name(dp));
1254         rcu_read_unlock();
1255         if (err)
1256                 goto nla_put_failure;
1257
1258         nla = nla_reserve(skb, ODP_DP_ATTR_STATS, sizeof(struct odp_stats));
1259         if (!nla)
1260                 goto nla_put_failure;
1261         get_dp_stats(dp, nla_data(nla));
1262
1263         NLA_PUT_U32(skb, ODP_DP_ATTR_IPV4_FRAGS,
1264                     dp->drop_frags ? ODP_DP_FRAG_DROP : ODP_DP_FRAG_ZERO);
1265
1266         if (dp->sflow_probability)
1267                 NLA_PUT_U32(skb, ODP_DP_ATTR_SAMPLING, dp->sflow_probability);
1268
1269         nla = nla_nest_start(skb, ODP_DP_ATTR_MCGROUPS);
1270         if (!nla)
1271                 goto nla_put_failure;
1272         NLA_PUT_U32(skb, ODP_PACKET_CMD_MISS, packet_mc_group(dp, ODP_PACKET_CMD_MISS));
1273         NLA_PUT_U32(skb, ODP_PACKET_CMD_ACTION, packet_mc_group(dp, ODP_PACKET_CMD_ACTION));
1274         NLA_PUT_U32(skb, ODP_PACKET_CMD_SAMPLE, packet_mc_group(dp, ODP_PACKET_CMD_SAMPLE));
1275         nla_nest_end(skb, nla);
1276
1277         return genlmsg_end(skb, odp_header);
1278
1279 nla_put_failure:
1280         genlmsg_cancel(skb, odp_header);
1281 error:
1282         return -EMSGSIZE;
1283 }
1284
1285 static struct sk_buff *odp_dp_cmd_build_info(struct datapath *dp, u32 pid,
1286                                              u32 seq, u8 cmd)
1287 {
1288         struct sk_buff *skb;
1289         int retval;
1290
1291         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1292         if (!skb)
1293                 return ERR_PTR(-ENOMEM);
1294
1295         retval = odp_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
1296         if (retval < 0) {
1297                 kfree_skb(skb);
1298                 return ERR_PTR(retval);
1299         }
1300         return skb;
1301 }
1302
1303 static int odp_dp_cmd_validate(struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1304 {
1305         if (a[ODP_DP_ATTR_IPV4_FRAGS]) {
1306                 u32 frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]);
1307
1308                 if (frags != ODP_DP_FRAG_ZERO && frags != ODP_DP_FRAG_DROP)
1309                         return -EINVAL;
1310         }
1311
1312         return CHECK_NUL_STRING(a[ODP_DP_ATTR_NAME], IFNAMSIZ - 1);
1313 }
1314
1315 /* Called with genl_mutex and optionally with RTNL lock also. */
1316 static struct datapath *lookup_datapath(struct odp_header *odp_header, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1317 {
1318         struct datapath *dp;
1319
1320         if (!a[ODP_DP_ATTR_NAME])
1321                 dp = get_dp(odp_header->dp_ifindex);
1322         else {
1323                 struct vport *vport;
1324
1325                 rcu_read_lock();
1326                 vport = vport_locate(nla_data(a[ODP_DP_ATTR_NAME]));
1327                 dp = vport && vport->port_no == ODPP_LOCAL ? vport->dp : NULL;
1328                 rcu_read_unlock();
1329         }
1330         return dp ? dp : ERR_PTR(-ENODEV);
1331 }
1332
1333 /* Called with genl_mutex. */
1334 static void change_datapath(struct datapath *dp, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1335 {
1336         if (a[ODP_DP_ATTR_IPV4_FRAGS])
1337                 dp->drop_frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]) == ODP_DP_FRAG_DROP;
1338         if (a[ODP_DP_ATTR_SAMPLING])
1339                 dp->sflow_probability = nla_get_u32(a[ODP_DP_ATTR_SAMPLING]);
1340 }
1341
1342 static int odp_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1343 {
1344         struct nlattr **a = info->attrs;
1345         struct vport_parms parms;
1346         struct sk_buff *reply;
1347         struct datapath *dp;
1348         struct vport *vport;
1349         int err;
1350
1351         err = -EINVAL;
1352         if (!a[ODP_DP_ATTR_NAME])
1353                 goto err;
1354
1355         err = odp_dp_cmd_validate(a);
1356         if (err)
1357                 goto err;
1358
1359         rtnl_lock();
1360         err = -ENODEV;
1361         if (!try_module_get(THIS_MODULE))
1362                 goto err_unlock_rtnl;
1363
1364         err = -ENOMEM;
1365         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1366         if (dp == NULL)
1367                 goto err_put_module;
1368         INIT_LIST_HEAD(&dp->port_list);
1369
1370         /* Initialize kobject for bridge.  This will be added as
1371          * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
1372         dp->ifobj.kset = NULL;
1373         kobject_init(&dp->ifobj, &dp_ktype);
1374
1375         /* Allocate table. */
1376         err = -ENOMEM;
1377         rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
1378         if (!dp->table)
1379                 goto err_free_dp;
1380
1381         /* Set up our datapath device. */
1382         parms.name = nla_data(a[ODP_DP_ATTR_NAME]);
1383         parms.type = ODP_VPORT_TYPE_INTERNAL;
1384         parms.options = NULL;
1385         parms.dp = dp;
1386         parms.port_no = ODPP_LOCAL;
1387         vport = new_vport(&parms);
1388         if (IS_ERR(vport)) {
1389                 err = PTR_ERR(vport);
1390                 if (err == -EBUSY)
1391                         err = -EEXIST;
1392
1393                 goto err_destroy_table;
1394         }
1395         dp->dp_ifindex = vport_get_ifindex(vport);
1396
1397         dp->drop_frags = 0;
1398         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1399         if (!dp->stats_percpu) {
1400                 err = -ENOMEM;
1401                 goto err_destroy_local_port;
1402         }
1403
1404         change_datapath(dp, a);
1405
1406         reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_NEW);
1407         err = PTR_ERR(reply);
1408         if (IS_ERR(reply))
1409                 goto err_destroy_local_port;
1410
1411         list_add_tail(&dp->list_node, &dps);
1412         dp_sysfs_add_dp(dp);
1413
1414         rtnl_unlock();
1415
1416         genl_notify(reply, genl_info_net(info), info->snd_pid,
1417                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1418         return 0;
1419
1420 err_destroy_local_port:
1421         dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
1422 err_destroy_table:
1423         tbl_destroy(get_table_protected(dp), NULL);
1424 err_free_dp:
1425         kfree(dp);
1426 err_put_module:
1427         module_put(THIS_MODULE);
1428 err_unlock_rtnl:
1429         rtnl_unlock();
1430 err:
1431         return err;
1432 }
1433
1434 static int odp_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1435 {
1436         struct vport *vport, *next_vport;
1437         struct sk_buff *reply;
1438         struct datapath *dp;
1439         int err;
1440
1441         err = odp_dp_cmd_validate(info->attrs);
1442         if (err)
1443                 goto exit;
1444
1445         rtnl_lock();
1446         dp = lookup_datapath(info->userhdr, info->attrs);
1447         err = PTR_ERR(dp);
1448         if (IS_ERR(dp))
1449                 goto exit_unlock;
1450
1451         reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_DEL);
1452         err = PTR_ERR(reply);
1453         if (IS_ERR(reply))
1454                 goto exit_unlock;
1455
1456         list_for_each_entry_safe (vport, next_vport, &dp->port_list, node)
1457                 if (vport->port_no != ODPP_LOCAL)
1458                         dp_detach_port(vport);
1459
1460         dp_sysfs_del_dp(dp);
1461         list_del(&dp->list_node);
1462         dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
1463
1464         /* rtnl_unlock() will wait until all the references to devices that
1465          * are pending unregistration have been dropped.  We do it here to
1466          * ensure that any internal devices (which contain DP pointers) are
1467          * fully destroyed before freeing the datapath.
1468          */
1469         rtnl_unlock();
1470
1471         call_rcu(&dp->rcu, destroy_dp_rcu);
1472         module_put(THIS_MODULE);
1473
1474         genl_notify(reply, genl_info_net(info), info->snd_pid,
1475                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1476
1477         return 0;
1478
1479 exit_unlock:
1480         rtnl_unlock();
1481 exit:
1482         return err;
1483 }
1484
1485 static int odp_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1486 {
1487         struct sk_buff *reply;
1488         struct datapath *dp;
1489         int err;
1490
1491         err = odp_dp_cmd_validate(info->attrs);
1492         if (err)
1493                 return err;
1494
1495         dp = lookup_datapath(info->userhdr, info->attrs);
1496         if (IS_ERR(dp))
1497                 return PTR_ERR(dp);
1498
1499         change_datapath(dp, info->attrs);
1500
1501         reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_NEW);
1502         if (IS_ERR(reply)) {
1503                 err = PTR_ERR(reply);
1504                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1505                                 dp_datapath_multicast_group.id, err);
1506                 return 0;
1507         }
1508
1509         genl_notify(reply, genl_info_net(info), info->snd_pid,
1510                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1511         return 0;
1512 }
1513
1514 static int odp_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1515 {
1516         struct sk_buff *reply;
1517         struct datapath *dp;
1518         int err;
1519
1520         err = odp_dp_cmd_validate(info->attrs);
1521         if (err)
1522                 return err;
1523
1524         dp = lookup_datapath(info->userhdr, info->attrs);
1525         if (IS_ERR(dp))
1526                 return PTR_ERR(dp);
1527
1528         reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_NEW);
1529         if (IS_ERR(reply))
1530                 return PTR_ERR(reply);
1531
1532         return genlmsg_reply(reply, info);
1533 }
1534
1535 static int odp_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1536 {
1537         struct datapath *dp;
1538         int skip = cb->args[0];
1539         int i = 0;
1540
1541         list_for_each_entry (dp, &dps, list_node) {
1542                 if (i < skip)
1543                         continue;
1544                 if (odp_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
1545                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1546                                          ODP_DP_CMD_NEW) < 0)
1547                         break;
1548                 i++;
1549         }
1550
1551         cb->args[0] = i;
1552
1553         return skb->len;
1554 }
1555
1556 static struct genl_ops dp_datapath_genl_ops[] = {
1557         { .cmd = ODP_DP_CMD_NEW,
1558           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1559           .policy = datapath_policy,
1560           .doit = odp_dp_cmd_new
1561         },
1562         { .cmd = ODP_DP_CMD_DEL,
1563           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1564           .policy = datapath_policy,
1565           .doit = odp_dp_cmd_del
1566         },
1567         { .cmd = ODP_DP_CMD_GET,
1568           .flags = 0,               /* OK for unprivileged users. */
1569           .policy = datapath_policy,
1570           .doit = odp_dp_cmd_get,
1571           .dumpit = odp_dp_cmd_dump
1572         },
1573         { .cmd = ODP_DP_CMD_SET,
1574           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1575           .policy = datapath_policy,
1576           .doit = odp_dp_cmd_set,
1577         },
1578 };
1579
1580 static const struct nla_policy vport_policy[ODP_VPORT_ATTR_MAX + 1] = {
1581 #ifdef HAVE_NLA_NUL_STRING
1582         [ODP_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1583         [ODP_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1584         [ODP_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1585         [ODP_VPORT_ATTR_STATS] = { .len = sizeof(struct rtnl_link_stats64) },
1586         [ODP_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
1587 #else
1588         [ODP_VPORT_ATTR_STATS] = { .minlen = sizeof(struct rtnl_link_stats64) },
1589         [ODP_VPORT_ATTR_ADDRESS] = { .minlen = ETH_ALEN },
1590 #endif
1591         [ODP_VPORT_ATTR_MTU] = { .type = NLA_U32 },
1592         [ODP_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1593 };
1594
1595 static struct genl_family dp_vport_genl_family = {
1596         .id = GENL_ID_GENERATE,
1597         .hdrsize = sizeof(struct odp_header),
1598         .name = ODP_VPORT_FAMILY,
1599         .version = 1,
1600         .maxattr = ODP_VPORT_ATTR_MAX
1601 };
1602
1603 static struct genl_multicast_group dp_vport_multicast_group = {
1604         .name = ODP_VPORT_MCGROUP
1605 };
1606
1607 /* Called with RTNL lock or RCU read lock. */
1608 static int odp_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1609                                    u32 pid, u32 seq, u32 flags, u8 cmd)
1610 {
1611         struct odp_header *odp_header;
1612         struct nlattr *nla;
1613         int ifindex, iflink;
1614         int mtu;
1615         int err;
1616
1617         odp_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
1618                                  flags, cmd);
1619         if (!odp_header)
1620                 return -EMSGSIZE;
1621
1622         odp_header->dp_ifindex = vport->dp->dp_ifindex;
1623
1624         NLA_PUT_U32(skb, ODP_VPORT_ATTR_PORT_NO, vport->port_no);
1625         NLA_PUT_U32(skb, ODP_VPORT_ATTR_TYPE, vport_get_type(vport));
1626         NLA_PUT_STRING(skb, ODP_VPORT_ATTR_NAME, vport_get_name(vport));
1627
1628         nla = nla_reserve(skb, ODP_VPORT_ATTR_STATS, sizeof(struct rtnl_link_stats64));
1629         if (!nla)
1630                 goto nla_put_failure;
1631         if (vport_get_stats(vport, nla_data(nla)))
1632                 __skb_trim(skb, skb->len - nla->nla_len);
1633
1634         NLA_PUT(skb, ODP_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
1635
1636         mtu = vport_get_mtu(vport);
1637         if (mtu)
1638                 NLA_PUT_U32(skb, ODP_VPORT_ATTR_MTU, mtu);
1639
1640         err = vport_get_options(vport, skb);
1641         if (err == -EMSGSIZE)
1642                 goto error;
1643
1644         ifindex = vport_get_ifindex(vport);
1645         if (ifindex > 0)
1646                 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFINDEX, ifindex);
1647
1648         iflink = vport_get_iflink(vport);
1649         if (iflink > 0)
1650                 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFLINK, iflink);
1651
1652         return genlmsg_end(skb, odp_header);
1653
1654 nla_put_failure:
1655         err = -EMSGSIZE;
1656 error:
1657         genlmsg_cancel(skb, odp_header);
1658         return err;
1659 }
1660
1661 /* Called with RTNL lock or RCU read lock. */
1662 static struct sk_buff *odp_vport_cmd_build_info(struct vport *vport, u32 pid,
1663                                                 u32 seq, u8 cmd)
1664 {
1665         struct sk_buff *skb;
1666         int retval;
1667
1668         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1669         if (!skb)
1670                 return ERR_PTR(-ENOMEM);
1671
1672         retval = odp_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
1673         if (retval < 0) {
1674                 kfree_skb(skb);
1675                 return ERR_PTR(retval);
1676         }
1677         return skb;
1678 }
1679
1680 static int odp_vport_cmd_validate(struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1681 {
1682         return CHECK_NUL_STRING(a[ODP_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1683 }
1684
1685 /* Called with RTNL lock or RCU read lock. */
1686 static struct vport *lookup_vport(struct odp_header *odp_header,
1687                                   struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1688 {
1689         struct datapath *dp;
1690         struct vport *vport;
1691
1692         if (a[ODP_VPORT_ATTR_NAME]) {
1693                 vport = vport_locate(nla_data(a[ODP_VPORT_ATTR_NAME]));
1694                 if (!vport)
1695                         return ERR_PTR(-ENODEV);
1696                 return vport;
1697         } else if (a[ODP_VPORT_ATTR_PORT_NO]) {
1698                 u32 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1699
1700                 if (port_no >= DP_MAX_PORTS)
1701                         return ERR_PTR(-EFBIG);
1702
1703                 dp = get_dp(odp_header->dp_ifindex);
1704                 if (!dp)
1705                         return ERR_PTR(-ENODEV);
1706
1707                 vport = get_vport_protected(dp, port_no);
1708                 if (!vport)
1709                         return ERR_PTR(-ENOENT);
1710                 return vport;
1711         } else
1712                 return ERR_PTR(-EINVAL);
1713 }
1714
1715 /* Called with RTNL lock. */
1716 static int change_vport(struct vport *vport, struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1717 {
1718         int err = 0;
1719         if (a[ODP_VPORT_ATTR_STATS])
1720                 err = vport_set_stats(vport, nla_data(a[ODP_VPORT_ATTR_STATS]));
1721         if (!err && a[ODP_VPORT_ATTR_ADDRESS])
1722                 err = vport_set_addr(vport, nla_data(a[ODP_VPORT_ATTR_ADDRESS]));
1723         if (!err && a[ODP_VPORT_ATTR_MTU])
1724                 err = vport_set_mtu(vport, nla_get_u32(a[ODP_VPORT_ATTR_MTU]));
1725         return err;
1726 }
1727
1728 static int odp_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1729 {
1730         struct nlattr **a = info->attrs;
1731         struct odp_header *odp_header = info->userhdr;
1732         struct vport_parms parms;
1733         struct sk_buff *reply;
1734         struct vport *vport;
1735         struct datapath *dp;
1736         u32 port_no;
1737         int err;
1738
1739         err = -EINVAL;
1740         if (!a[ODP_VPORT_ATTR_NAME] || !a[ODP_VPORT_ATTR_TYPE])
1741                 goto exit;
1742
1743         err = odp_vport_cmd_validate(a);
1744         if (err)
1745                 goto exit;
1746
1747         rtnl_lock();
1748         dp = get_dp(odp_header->dp_ifindex);
1749         err = -ENODEV;
1750         if (!dp)
1751                 goto exit_unlock;
1752
1753         if (a[ODP_VPORT_ATTR_PORT_NO]) {
1754                 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1755
1756                 err = -EFBIG;
1757                 if (port_no >= DP_MAX_PORTS)
1758                         goto exit_unlock;
1759
1760                 vport = get_vport_protected(dp, port_no);
1761                 err = -EBUSY;
1762                 if (vport)
1763                         goto exit_unlock;
1764         } else {
1765                 for (port_no = 1; ; port_no++) {
1766                         if (port_no >= DP_MAX_PORTS) {
1767                                 err = -EFBIG;
1768                                 goto exit_unlock;
1769                         }
1770                         vport = get_vport_protected(dp, port_no);
1771                         if (!vport)
1772                                 break;
1773                 }
1774         }
1775
1776         parms.name = nla_data(a[ODP_VPORT_ATTR_NAME]);
1777         parms.type = nla_get_u32(a[ODP_VPORT_ATTR_TYPE]);
1778         parms.options = a[ODP_VPORT_ATTR_OPTIONS];
1779         parms.dp = dp;
1780         parms.port_no = port_no;
1781
1782         vport = new_vport(&parms);
1783         err = PTR_ERR(vport);
1784         if (IS_ERR(vport))
1785                 goto exit_unlock;
1786
1787         set_internal_devs_mtu(dp);
1788         dp_sysfs_add_if(vport);
1789
1790         err = change_vport(vport, a);
1791         if (!err) {
1792                 reply = odp_vport_cmd_build_info(vport, info->snd_pid,
1793                                                  info->snd_seq, ODP_VPORT_CMD_NEW);
1794                 if (IS_ERR(reply))
1795                         err = PTR_ERR(reply);
1796         }
1797         if (err) {
1798                 dp_detach_port(vport);
1799                 goto exit_unlock;
1800         }
1801         genl_notify(reply, genl_info_net(info), info->snd_pid,
1802                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1803
1804
1805 exit_unlock:
1806         rtnl_unlock();
1807 exit:
1808         return err;
1809 }
1810
1811 static int odp_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1812 {
1813         struct nlattr **a = info->attrs;
1814         struct sk_buff *reply;
1815         struct vport *vport;
1816         int err;
1817
1818         err = odp_vport_cmd_validate(a);
1819         if (err)
1820                 goto exit;
1821
1822         rtnl_lock();
1823         vport = lookup_vport(info->userhdr, a);
1824         err = PTR_ERR(vport);
1825         if (IS_ERR(vport))
1826                 goto exit_unlock;
1827
1828         err = 0;
1829         if (a[ODP_VPORT_ATTR_OPTIONS])
1830                 err = vport_set_options(vport, a[ODP_VPORT_ATTR_OPTIONS]);
1831         if (!err)
1832                 err = change_vport(vport, a);
1833
1834         reply = odp_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1835                                          ODP_VPORT_CMD_NEW);
1836         if (IS_ERR(reply)) {
1837                 err = PTR_ERR(reply);
1838                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1839                                 dp_vport_multicast_group.id, err);
1840                 return 0;
1841         }
1842
1843         genl_notify(reply, genl_info_net(info), info->snd_pid,
1844                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1845
1846 exit_unlock:
1847         rtnl_unlock();
1848 exit:
1849         return err;
1850 }
1851
1852 static int odp_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1853 {
1854         struct nlattr **a = info->attrs;
1855         struct sk_buff *reply;
1856         struct vport *vport;
1857         int err;
1858
1859         err = odp_vport_cmd_validate(a);
1860         if (err)
1861                 goto exit;
1862
1863         rtnl_lock();
1864         vport = lookup_vport(info->userhdr, a);
1865         err = PTR_ERR(vport);
1866         if (IS_ERR(vport))
1867                 goto exit_unlock;
1868
1869         if (vport->port_no == ODPP_LOCAL) {
1870                 err = -EINVAL;
1871                 goto exit_unlock;
1872         }
1873
1874         reply = odp_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1875                                          ODP_VPORT_CMD_DEL);
1876         err = PTR_ERR(reply);
1877         if (IS_ERR(reply))
1878                 goto exit_unlock;
1879
1880         err = dp_detach_port(vport);
1881
1882         genl_notify(reply, genl_info_net(info), info->snd_pid,
1883                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1884
1885 exit_unlock:
1886         rtnl_unlock();
1887 exit:
1888         return err;
1889 }
1890
1891 static int odp_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1892 {
1893         struct nlattr **a = info->attrs;
1894         struct odp_header *odp_header = info->userhdr;
1895         struct sk_buff *reply;
1896         struct vport *vport;
1897         int err;
1898
1899         err = odp_vport_cmd_validate(a);
1900         if (err)
1901                 goto exit;
1902
1903         rcu_read_lock();
1904         vport = lookup_vport(odp_header, a);
1905         err = PTR_ERR(vport);
1906         if (IS_ERR(vport))
1907                 goto exit_unlock;
1908
1909         reply = odp_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1910                                          ODP_VPORT_CMD_NEW);
1911         err = PTR_ERR(reply);
1912         if (IS_ERR(reply))
1913                 goto exit_unlock;
1914
1915         rcu_read_unlock();
1916
1917         return genlmsg_reply(reply, info);
1918
1919 exit_unlock:
1920         rcu_read_unlock();
1921 exit:
1922         return err;
1923 }
1924
1925 static int odp_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1926 {
1927         struct odp_header *odp_header = genlmsg_data(nlmsg_data(cb->nlh));
1928         struct datapath *dp;
1929         u32 port_no;
1930         int retval;
1931
1932         dp = get_dp(odp_header->dp_ifindex);
1933         if (!dp)
1934                 return -ENODEV;
1935
1936         rcu_read_lock();
1937         for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
1938                 struct vport *vport;
1939
1940                 vport = get_vport_protected(dp, port_no);
1941                 if (!vport)
1942                         continue;
1943
1944                 if (odp_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid,
1945                                             cb->nlh->nlmsg_seq, NLM_F_MULTI,
1946                                             ODP_VPORT_CMD_NEW) < 0)
1947                         break;
1948         }
1949         rcu_read_unlock();
1950
1951         cb->args[0] = port_no;
1952         retval = skb->len;
1953
1954         return retval;
1955 }
1956
1957 static struct genl_ops dp_vport_genl_ops[] = {
1958         { .cmd = ODP_VPORT_CMD_NEW,
1959           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1960           .policy = vport_policy,
1961           .doit = odp_vport_cmd_new
1962         },
1963         { .cmd = ODP_VPORT_CMD_DEL,
1964           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1965           .policy = vport_policy,
1966           .doit = odp_vport_cmd_del
1967         },
1968         { .cmd = ODP_VPORT_CMD_GET,
1969           .flags = 0,               /* OK for unprivileged users. */
1970           .policy = vport_policy,
1971           .doit = odp_vport_cmd_get,
1972           .dumpit = odp_vport_cmd_dump
1973         },
1974         { .cmd = ODP_VPORT_CMD_SET,
1975           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1976           .policy = vport_policy,
1977           .doit = odp_vport_cmd_set,
1978         },
1979 };
1980
1981 struct genl_family_and_ops {
1982         struct genl_family *family;
1983         struct genl_ops *ops;
1984         int n_ops;
1985         struct genl_multicast_group *group;
1986 };
1987
1988 static const struct genl_family_and_ops dp_genl_families[] = {
1989         { &dp_datapath_genl_family,
1990           dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
1991           &dp_datapath_multicast_group },
1992         { &dp_vport_genl_family,
1993           dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
1994           &dp_vport_multicast_group },
1995         { &dp_flow_genl_family,
1996           dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
1997           &dp_flow_multicast_group },
1998         { &dp_packet_genl_family,
1999           dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
2000           NULL },
2001 };
2002
2003 static void dp_unregister_genl(int n_families)
2004 {
2005         int i;
2006
2007         for (i = 0; i < n_families; i++) {
2008                 genl_unregister_family(dp_genl_families[i].family);
2009         }
2010 }
2011
2012 static int dp_register_genl(void)
2013 {
2014         int n_registered;
2015         int err;
2016         int i;
2017
2018         n_registered = 0;
2019         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2020                 const struct genl_family_and_ops *f = &dp_genl_families[i];
2021
2022                 err = genl_register_family_with_ops(f->family, f->ops,
2023                                                     f->n_ops);
2024                 if (err)
2025                         goto error;
2026                 n_registered++;
2027
2028                 if (f->group) {
2029                         err = genl_register_mc_group(f->family, f->group);
2030                         if (err)
2031                                 goto error;
2032                 }
2033         }
2034
2035         err = packet_register_mc_groups();
2036         if (err)
2037                 goto error;
2038         return 0;
2039
2040 error:
2041         dp_unregister_genl(n_registered);
2042         return err;
2043 }
2044
2045 static int __init dp_init(void)
2046 {
2047         struct sk_buff *dummy_skb;
2048         int err;
2049
2050         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2051
2052         printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2053
2054         err = flow_init();
2055         if (err)
2056                 goto error;
2057
2058         err = vport_init();
2059         if (err)
2060                 goto error_flow_exit;
2061
2062         err = register_netdevice_notifier(&dp_device_notifier);
2063         if (err)
2064                 goto error_vport_exit;
2065
2066         err = dp_register_genl();
2067         if (err < 0)
2068                 goto error_unreg_notifier;
2069
2070         return 0;
2071
2072 error_unreg_notifier:
2073         unregister_netdevice_notifier(&dp_device_notifier);
2074 error_vport_exit:
2075         vport_exit();
2076 error_flow_exit:
2077         flow_exit();
2078 error:
2079         return err;
2080 }
2081
2082 static void dp_cleanup(void)
2083 {
2084         rcu_barrier();
2085         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2086         unregister_netdevice_notifier(&dp_device_notifier);
2087         vport_exit();
2088         flow_exit();
2089 }
2090
2091 module_init(dp_init);
2092 module_exit(dp_cleanup);
2093
2094 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2095 MODULE_LICENSE("GPL");