datapath: Convert ODP_FLOW_* commands to use AF_NETLINK socket layer.
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
3  * Distributed under the terms of the GNU GPL version 2.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 /* Functions for managing the dp interface/device. */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/if_arp.h>
16 #include <linux/if_vlan.h>
17 #include <linux/in.h>
18 #include <linux/ip.h>
19 #include <linux/jhash.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/genetlink.h>
24 #include <linux/kernel.h>
25 #include <linux/kthread.h>
26 #include <linux/mutex.h>
27 #include <linux/percpu.h>
28 #include <linux/rcupdate.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/version.h>
32 #include <linux/ethtool.h>
33 #include <linux/wait.h>
34 #include <asm/system.h>
35 #include <asm/div64.h>
36 #include <asm/bug.h>
37 #include <linux/highmem.h>
38 #include <linux/netfilter_bridge.h>
39 #include <linux/netfilter_ipv4.h>
40 #include <linux/inetdevice.h>
41 #include <linux/list.h>
42 #include <linux/rculist.h>
43 #include <linux/dmi.h>
44 #include <net/inet_ecn.h>
45 #include <net/genetlink.h>
46
47 #include "openvswitch/datapath-protocol.h"
48 #include "checksum.h"
49 #include "datapath.h"
50 #include "actions.h"
51 #include "flow.h"
52 #include "loop_counter.h"
53 #include "table.h"
54 #include "vport-internal_dev.h"
55
56 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
57 EXPORT_SYMBOL(dp_ioctl_hook);
58
59 /**
60  * DOC: Locking:
61  *
62  * Writes to device state (add/remove datapath, port, set operations on vports,
63  * etc.) are protected by RTNL.
64  *
65  * Writes to other state (flow table modifications, set miscellaneous datapath
66  * parameters such as drop frags, etc.) are protected by genl_mutex.  The RTNL
67  * lock nests inside genl_mutex.
68  *
69  * Reads are protected by RCU.
70  *
71  * There are a few special cases (mostly stats) that have their own
72  * synchronization but they nest under all of above and don't interact with
73  * each other.
74  */
75
76 /* Protected by genl_mutex. */
77 static struct datapath __rcu *dps[256];
78
79 static struct vport *new_vport(const struct vport_parms *);
80
81 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
82 struct datapath *get_dp(int dp_idx)
83 {
84         if (dp_idx < 0 || dp_idx >= ARRAY_SIZE(dps))
85                 return NULL;
86
87         return rcu_dereference_check(dps[dp_idx], rcu_read_lock_held() ||
88                                          lockdep_rtnl_is_held() ||
89                                          lockdep_genl_is_held());
90 }
91 EXPORT_SYMBOL_GPL(get_dp);
92
93 /* Must be called with genl_mutex. */
94 static struct tbl *get_table_protected(struct datapath *dp)
95 {
96         return rcu_dereference_protected(dp->table, lockdep_genl_is_held());
97 }
98
99 /* Must be called with rcu_read_lock or RTNL lock. */
100 static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
101 {
102         return rcu_dereference_rtnl(dp->ports[port_no]);
103 }
104
105 /* Must be called with rcu_read_lock or RTNL lock. */
106 const char *dp_name(const struct datapath *dp)
107 {
108         return vport_get_name(rcu_dereference_rtnl(dp->ports[ODPP_LOCAL]));
109 }
110
111 static inline size_t br_nlmsg_size(void)
112 {
113         return NLMSG_ALIGN(sizeof(struct ifinfomsg))
114                + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
115                + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
116                + nla_total_size(4) /* IFLA_MASTER */
117                + nla_total_size(4) /* IFLA_MTU */
118                + nla_total_size(4) /* IFLA_LINK */
119                + nla_total_size(1); /* IFLA_OPERSTATE */
120 }
121
122 /* Caller must hold RTNL lock. */
123 static int dp_fill_ifinfo(struct sk_buff *skb,
124                           const struct vport *port,
125                           int event, unsigned int flags)
126 {
127         struct datapath *dp = port->dp;
128         int ifindex = vport_get_ifindex(port);
129         int iflink = vport_get_iflink(port);
130         struct ifinfomsg *hdr;
131         struct nlmsghdr *nlh;
132
133         if (ifindex < 0)
134                 return ifindex;
135
136         if (iflink < 0)
137                 return iflink;
138
139         nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
140         if (nlh == NULL)
141                 return -EMSGSIZE;
142
143         hdr = nlmsg_data(nlh);
144         hdr->ifi_family = AF_BRIDGE;
145         hdr->__ifi_pad = 0;
146         hdr->ifi_type = ARPHRD_ETHER;
147         hdr->ifi_index = ifindex;
148         hdr->ifi_flags = vport_get_flags(port);
149         hdr->ifi_change = 0;
150
151         NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
152         NLA_PUT_U32(skb, IFLA_MASTER,
153                 vport_get_ifindex(get_vport_protected(dp, ODPP_LOCAL)));
154         NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
155 #ifdef IFLA_OPERSTATE
156         NLA_PUT_U8(skb, IFLA_OPERSTATE,
157                    vport_is_running(port)
158                         ? vport_get_operstate(port)
159                         : IF_OPER_DOWN);
160 #endif
161
162         NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
163
164         if (ifindex != iflink)
165                 NLA_PUT_U32(skb, IFLA_LINK,iflink);
166
167         return nlmsg_end(skb, nlh);
168
169 nla_put_failure:
170         nlmsg_cancel(skb, nlh);
171         return -EMSGSIZE;
172 }
173
174 /* Caller must hold RTNL lock. */
175 static void dp_ifinfo_notify(int event, struct vport *port)
176 {
177         struct sk_buff *skb;
178         int err = -ENOBUFS;
179
180         skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
181         if (skb == NULL)
182                 goto errout;
183
184         err = dp_fill_ifinfo(skb, port, event, 0);
185         if (err < 0) {
186                 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
187                 WARN_ON(err == -EMSGSIZE);
188                 kfree_skb(skb);
189                 goto errout;
190         }
191         rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
192         return;
193 errout:
194         if (err < 0)
195                 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
196 }
197
198 static void release_dp(struct kobject *kobj)
199 {
200         struct datapath *dp = container_of(kobj, struct datapath, ifobj);
201         kfree(dp);
202 }
203
204 static struct kobj_type dp_ktype = {
205         .release = release_dp
206 };
207
208 static void destroy_dp_rcu(struct rcu_head *rcu)
209 {
210         struct datapath *dp = container_of(rcu, struct datapath, rcu);
211
212         tbl_destroy((struct tbl __force *)dp->table, flow_free_tbl);
213         free_percpu(dp->stats_percpu);
214         kobject_put(&dp->ifobj);
215 }
216
217 /* Called with RTNL lock and genl_lock. */
218 static struct vport *new_vport(const struct vport_parms *parms)
219 {
220         struct vport *vport;
221
222         vport = vport_add(parms);
223         if (!IS_ERR(vport)) {
224                 struct datapath *dp = parms->dp;
225
226                 rcu_assign_pointer(dp->ports[parms->port_no], vport);
227                 list_add(&vport->node, &dp->port_list);
228
229                 dp_ifinfo_notify(RTM_NEWLINK, vport);
230         }
231
232         return vport;
233 }
234
235 /* Called with RTNL lock. */
236 int dp_detach_port(struct vport *p)
237 {
238         ASSERT_RTNL();
239
240         if (p->port_no != ODPP_LOCAL)
241                 dp_sysfs_del_if(p);
242         dp_ifinfo_notify(RTM_DELLINK, p);
243
244         /* First drop references to device. */
245         list_del(&p->node);
246         rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
247
248         /* Then destroy it. */
249         return vport_del(p);
250 }
251
252 /* Must be called with rcu_read_lock. */
253 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
254 {
255         struct datapath *dp = p->dp;
256         struct dp_stats_percpu *stats;
257         int stats_counter_off;
258         struct sw_flow_actions *acts;
259         struct loop_counter *loop;
260         int error;
261
262         OVS_CB(skb)->vport = p;
263
264         if (!OVS_CB(skb)->flow) {
265                 struct sw_flow_key key;
266                 struct tbl_node *flow_node;
267                 bool is_frag;
268
269                 /* Extract flow from 'skb' into 'key'. */
270                 error = flow_extract(skb, p->port_no, &key, &is_frag);
271                 if (unlikely(error)) {
272                         kfree_skb(skb);
273                         return;
274                 }
275
276                 if (is_frag && dp->drop_frags) {
277                         kfree_skb(skb);
278                         stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
279                         goto out;
280                 }
281
282                 /* Look up flow. */
283                 flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
284                                         flow_hash(&key), flow_cmp);
285                 if (unlikely(!flow_node)) {
286                         struct dp_upcall_info upcall;
287
288                         upcall.cmd = ODP_PACKET_CMD_MISS;
289                         upcall.key = &key;
290                         upcall.userdata = 0;
291                         upcall.sample_pool = 0;
292                         upcall.actions = NULL;
293                         upcall.actions_len = 0;
294                         dp_upcall(dp, skb, &upcall);
295                         stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
296                         goto out;
297                 }
298
299                 OVS_CB(skb)->flow = flow_cast(flow_node);
300         }
301
302         stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
303         flow_used(OVS_CB(skb)->flow, skb);
304
305         acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
306
307         /* Check whether we've looped too much. */
308         loop = loop_get_counter();
309         if (unlikely(++loop->count > MAX_LOOPS))
310                 loop->looping = true;
311         if (unlikely(loop->looping)) {
312                 loop_suppress(dp, acts);
313                 kfree_skb(skb);
314                 goto out_loop;
315         }
316
317         /* Execute actions. */
318         execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
319                         acts->actions_len);
320
321         /* Check whether sub-actions looped too much. */
322         if (unlikely(loop->looping))
323                 loop_suppress(dp, acts);
324
325 out_loop:
326         /* Decrement loop counter. */
327         if (!--loop->count)
328                 loop->looping = false;
329         loop_put_counter();
330
331 out:
332         /* Update datapath statistics. */
333         local_bh_disable();
334         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
335
336         write_seqcount_begin(&stats->seqlock);
337         (*(u64 *)((u8 *)stats + stats_counter_off))++;
338         write_seqcount_end(&stats->seqlock);
339
340         local_bh_enable();
341 }
342
343 static void copy_and_csum_skb(struct sk_buff *skb, void *to)
344 {
345         u16 csum_start, csum_offset;
346         __wsum csum;
347
348         get_skb_csum_pointers(skb, &csum_start, &csum_offset);
349         csum_start -= skb_headroom(skb);
350         BUG_ON(csum_start >= skb_headlen(skb));
351
352         skb_copy_bits(skb, 0, to, csum_start);
353
354         csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start,
355                                       skb->len - csum_start, 0);
356         *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
357 }
358
359 static struct genl_family dp_packet_genl_family;
360 #define PACKET_N_MC_GROUPS 16
361
362 static int packet_mc_group(struct datapath *dp, u8 cmd)
363 {
364         BUILD_BUG_ON_NOT_POWER_OF_2(PACKET_N_MC_GROUPS);
365         return jhash_2words(dp->dp_idx, cmd, 0) & (PACKET_N_MC_GROUPS - 1);
366 }
367
368 /* Send each packet in the 'skb' list to userspace for 'dp' as directed by
369  * 'upcall_info'.  There will be only one packet unless we broke up a GSO
370  * packet.
371  */
372 static int queue_control_packets(struct datapath *dp, struct sk_buff *skb,
373                                  const struct dp_upcall_info *upcall_info)
374 {
375         u32 group = packet_mc_group(dp, upcall_info->cmd);
376         struct sk_buff *nskb;
377         int port_no;
378         int err;
379
380         if (OVS_CB(skb)->vport)
381                 port_no = OVS_CB(skb)->vport->port_no;
382         else
383                 port_no = ODPP_LOCAL;
384
385         do {
386                 struct odp_header *upcall;
387                 struct sk_buff *user_skb; /* to be queued to userspace */
388                 struct nlattr *nla;
389                 unsigned int len;
390
391                 nskb = skb->next;
392                 skb->next = NULL;
393
394                 len = sizeof(struct odp_header);
395                 len += nla_total_size(4); /* ODP_PACKET_ATTR_TYPE. */
396                 len += nla_total_size(skb->len);
397                 len += nla_total_size(FLOW_BUFSIZE);
398                 if (upcall_info->userdata)
399                         len += nla_total_size(8);
400                 if (upcall_info->sample_pool)
401                         len += nla_total_size(4);
402                 if (upcall_info->actions_len)
403                         len += nla_total_size(upcall_info->actions_len);
404
405                 user_skb = genlmsg_new(len, GFP_ATOMIC);
406                 if (!user_skb) {
407                         netlink_set_err(INIT_NET_GENL_SOCK, 0, group, -ENOBUFS);
408                         goto err_kfree_skbs;
409                 }
410
411                 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd);
412                 upcall->dp_idx = dp->dp_idx;
413
414                 nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_KEY);
415                 flow_to_nlattrs(upcall_info->key, user_skb);
416                 nla_nest_end(user_skb, nla);
417
418                 if (upcall_info->userdata)
419                         nla_put_u64(user_skb, ODP_PACKET_ATTR_USERDATA, upcall_info->userdata);
420                 if (upcall_info->sample_pool)
421                         nla_put_u32(user_skb, ODP_PACKET_ATTR_SAMPLE_POOL, upcall_info->sample_pool);
422                 if (upcall_info->actions_len) {
423                         const struct nlattr *actions = upcall_info->actions;
424                         u32 actions_len = upcall_info->actions_len;
425
426                         nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_ACTIONS);
427                         memcpy(__skb_put(user_skb, actions_len), actions, actions_len);
428                         nla_nest_end(user_skb, nla);
429                 }
430
431                 nla = __nla_reserve(user_skb, ODP_PACKET_ATTR_PACKET, skb->len);
432                 if (skb->ip_summed == CHECKSUM_PARTIAL)
433                         copy_and_csum_skb(skb, nla_data(nla));
434                 else
435                         skb_copy_bits(skb, 0, nla_data(nla), skb->len);
436
437                 err = genlmsg_multicast(user_skb, 0, group, GFP_ATOMIC);
438                 if (err)
439                         goto err_kfree_skbs;
440
441                 kfree_skb(skb);
442                 skb = nskb;
443         } while (skb);
444         return 0;
445
446 err_kfree_skbs:
447         kfree_skb(skb);
448         while ((skb = nskb) != NULL) {
449                 nskb = skb->next;
450                 kfree_skb(skb);
451         }
452         return err;
453 }
454
455 /* Generic Netlink multicast groups for upcalls.
456  *
457  * We really want three unique multicast groups per datapath, but we can't even
458  * get one, because genl_register_mc_group() takes genl_lock, which is also
459  * held during Generic Netlink message processing, so trying to acquire
460  * multicast groups during ODP_DP_NEW processing deadlocks.  Instead, we
461  * preallocate a few groups and use them round-robin for datapaths.  Collision
462  * isn't fatal--multicast listeners should check that the family is the one
463  * that they want and discard others--but it wastes time and memory to receive
464  * unwanted messages.
465  */
466 static struct genl_multicast_group packet_mc_groups[PACKET_N_MC_GROUPS];
467
468 static struct genl_family dp_packet_genl_family = {
469         .id = GENL_ID_GENERATE,
470         .hdrsize = sizeof(struct odp_header),
471         .name = ODP_PACKET_FAMILY,
472         .version = 1,
473         .maxattr = ODP_PACKET_ATTR_MAX
474 };
475
476 static int packet_register_mc_groups(void)
477 {
478         int i;
479
480         for (i = 0; i < PACKET_N_MC_GROUPS; i++) {
481                 struct genl_multicast_group *group = &packet_mc_groups[i];
482                 int error;
483
484                 sprintf(group->name, "packet%d", i);
485                 error = genl_register_mc_group(&dp_packet_genl_family, group);
486                 if (error)
487                         return error;
488         }
489         return 0;
490 }
491
492 int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info)
493 {
494         struct dp_stats_percpu *stats;
495         int err;
496
497         WARN_ON_ONCE(skb_shared(skb));
498
499         forward_ip_summed(skb);
500
501         err = vswitch_skb_checksum_setup(skb);
502         if (err)
503                 goto err_kfree_skb;
504
505         /* Break apart GSO packets into their component pieces.  Otherwise
506          * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
507         if (skb_is_gso(skb)) {
508                 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
509                 
510                 kfree_skb(skb);
511                 skb = nskb;
512                 if (IS_ERR(skb)) {
513                         err = PTR_ERR(skb);
514                         goto err;
515                 }
516         }
517
518         return queue_control_packets(dp, skb, upcall_info);
519
520 err_kfree_skb:
521         kfree_skb(skb);
522 err:
523         local_bh_disable();
524         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
525
526         write_seqcount_begin(&stats->seqlock);
527         stats->n_lost++;
528         write_seqcount_end(&stats->seqlock);
529
530         local_bh_enable();
531
532         return err;
533 }
534
535 /* Called with genl_mutex. */
536 static int flush_flows(int dp_idx)
537 {
538         struct tbl *old_table;
539         struct tbl *new_table;
540         struct datapath *dp;
541
542         dp = get_dp(dp_idx);
543         if (!dp)
544                 return -ENODEV;
545
546         old_table = get_table_protected(dp);
547         new_table = tbl_create(TBL_MIN_BUCKETS);
548         if (!new_table)
549                 return -ENOMEM;
550
551         rcu_assign_pointer(dp->table, new_table);
552
553         tbl_deferred_destroy(old_table, flow_free_tbl);
554
555         return 0;
556 }
557
558 static int validate_actions(const struct nlattr *attr)
559 {
560         const struct nlattr *a;
561         int rem;
562
563         nla_for_each_nested(a, attr, rem) {
564                 static const u32 action_lens[ODPAT_MAX + 1] = {
565                         [ODPAT_OUTPUT] = 4,
566                         [ODPAT_CONTROLLER] = 8,
567                         [ODPAT_SET_DL_TCI] = 2,
568                         [ODPAT_STRIP_VLAN] = 0,
569                         [ODPAT_SET_DL_SRC] = ETH_ALEN,
570                         [ODPAT_SET_DL_DST] = ETH_ALEN,
571                         [ODPAT_SET_NW_SRC] = 4,
572                         [ODPAT_SET_NW_DST] = 4,
573                         [ODPAT_SET_NW_TOS] = 1,
574                         [ODPAT_SET_TP_SRC] = 2,
575                         [ODPAT_SET_TP_DST] = 2,
576                         [ODPAT_SET_TUNNEL] = 8,
577                         [ODPAT_SET_PRIORITY] = 4,
578                         [ODPAT_POP_PRIORITY] = 0,
579                         [ODPAT_DROP_SPOOFED_ARP] = 0,
580                 };
581                 int type = nla_type(a);
582
583                 if (type > ODPAT_MAX || nla_len(a) != action_lens[type])
584                         return -EINVAL;
585
586                 switch (type) {
587                 case ODPAT_UNSPEC:
588                         return -EINVAL;
589
590                 case ODPAT_CONTROLLER:
591                 case ODPAT_STRIP_VLAN:
592                 case ODPAT_SET_DL_SRC:
593                 case ODPAT_SET_DL_DST:
594                 case ODPAT_SET_NW_SRC:
595                 case ODPAT_SET_NW_DST:
596                 case ODPAT_SET_TP_SRC:
597                 case ODPAT_SET_TP_DST:
598                 case ODPAT_SET_TUNNEL:
599                 case ODPAT_SET_PRIORITY:
600                 case ODPAT_POP_PRIORITY:
601                 case ODPAT_DROP_SPOOFED_ARP:
602                         /* No validation needed. */
603                         break;
604
605                 case ODPAT_OUTPUT:
606                         if (nla_get_u32(a) >= DP_MAX_PORTS)
607                                 return -EINVAL;
608                         break;
609
610                 case ODPAT_SET_DL_TCI:
611                         if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
612                                 return -EINVAL;
613                         break;
614
615                 case ODPAT_SET_NW_TOS:
616                         if (nla_get_u8(a) & INET_ECN_MASK)
617                                 return -EINVAL;
618                         break;
619
620                 default:
621                         return -EOPNOTSUPP;
622                 }
623         }
624
625         if (rem > 0)
626                 return -EINVAL;
627
628         return 0;
629 }
630 static void clear_stats(struct sw_flow *flow)
631 {
632         flow->used = 0;
633         flow->tcp_flags = 0;
634         flow->packet_count = 0;
635         flow->byte_count = 0;
636 }
637
638 /* Called with genl_mutex. */
639 static int expand_table(struct datapath *dp)
640 {
641         struct tbl *old_table = get_table_protected(dp);
642         struct tbl *new_table;
643
644         new_table = tbl_expand(old_table);
645         if (IS_ERR(new_table))
646                 return PTR_ERR(new_table);
647
648         rcu_assign_pointer(dp->table, new_table);
649         tbl_deferred_destroy(old_table, NULL);
650
651         return 0;
652 }
653
654 static int odp_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
655 {
656         struct odp_header *odp_header = info->userhdr;
657         struct nlattr **a = info->attrs;
658         struct sk_buff *packet;
659         struct sw_flow_key key;
660         struct datapath *dp;
661         struct ethhdr *eth;
662         bool is_frag;
663         int err;
664
665         err = -EINVAL;
666         if (!a[ODP_PACKET_ATTR_PACKET] || !a[ODP_PACKET_ATTR_ACTIONS] ||
667             nla_len(a[ODP_PACKET_ATTR_PACKET]) < ETH_HLEN)
668                 goto exit;
669
670         err = validate_actions(a[ODP_PACKET_ATTR_ACTIONS]);
671         if (err)
672                 goto exit;
673
674         packet = skb_clone(skb, GFP_KERNEL);
675         err = -ENOMEM;
676         if (!packet)
677                 goto exit;
678         packet->data = nla_data(a[ODP_PACKET_ATTR_PACKET]);
679         packet->len = nla_len(a[ODP_PACKET_ATTR_PACKET]);
680
681         skb_reset_mac_header(packet);
682         eth = eth_hdr(packet);
683
684         /* Normally, setting the skb 'protocol' field would be handled by a
685          * call to eth_type_trans(), but it assumes there's a sending
686          * device, which we may not have. */
687         if (ntohs(eth->h_proto) >= 1536)
688                 packet->protocol = eth->h_proto;
689         else
690                 packet->protocol = htons(ETH_P_802_2);
691
692         err = flow_extract(packet, -1, &key, &is_frag);
693         if (err)
694                 goto exit;
695
696         rcu_read_lock();
697         dp = get_dp(odp_header->dp_idx);
698         err = -ENODEV;
699         if (dp)
700                 err = execute_actions(dp, packet, &key,
701                                       nla_data(a[ODP_PACKET_ATTR_ACTIONS]),
702                                       nla_len(a[ODP_PACKET_ATTR_ACTIONS]));
703         rcu_read_unlock();
704
705 exit:
706         return err;
707 }
708
709 static const struct nla_policy packet_policy[ODP_PACKET_ATTR_MAX + 1] = {
710         [ODP_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
711         [ODP_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
712 };
713
714 static struct genl_ops dp_packet_genl_ops[] = {
715         { .cmd = ODP_PACKET_CMD_EXECUTE,
716           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
717           .policy = packet_policy,
718           .doit = odp_packet_cmd_execute
719         }
720 };
721
722 static void get_dp_stats(struct datapath *dp, struct odp_stats *stats)
723 {
724         int i;
725
726         stats->n_frags = stats->n_hit = stats->n_missed = stats->n_lost = 0;
727         for_each_possible_cpu(i) {
728                 const struct dp_stats_percpu *percpu_stats;
729                 struct dp_stats_percpu local_stats;
730                 unsigned seqcount;
731
732                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
733
734                 do {
735                         seqcount = read_seqcount_begin(&percpu_stats->seqlock);
736                         local_stats = *percpu_stats;
737                 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
738
739                 stats->n_frags += local_stats.n_frags;
740                 stats->n_hit += local_stats.n_hit;
741                 stats->n_missed += local_stats.n_missed;
742                 stats->n_lost += local_stats.n_lost;
743         }
744 }
745
746 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports.
747  * Called with RTNL lock.
748  */
749 int dp_min_mtu(const struct datapath *dp)
750 {
751         struct vport *p;
752         int mtu = 0;
753
754         ASSERT_RTNL();
755
756         list_for_each_entry (p, &dp->port_list, node) {
757                 int dev_mtu;
758
759                 /* Skip any internal ports, since that's what we're trying to
760                  * set. */
761                 if (is_internal_vport(p))
762                         continue;
763
764                 dev_mtu = vport_get_mtu(p);
765                 if (!mtu || dev_mtu < mtu)
766                         mtu = dev_mtu;
767         }
768
769         return mtu ? mtu : ETH_DATA_LEN;
770 }
771
772 /* Sets the MTU of all datapath devices to the minimum of the ports
773  * Called with RTNL lock.
774  */
775 void set_internal_devs_mtu(const struct datapath *dp)
776 {
777         struct vport *p;
778         int mtu;
779
780         ASSERT_RTNL();
781
782         mtu = dp_min_mtu(dp);
783
784         list_for_each_entry (p, &dp->port_list, node) {
785                 if (is_internal_vport(p))
786                         vport_set_mtu(p, mtu);
787         }
788 }
789
790 static const struct nla_policy flow_policy[ODP_FLOW_ATTR_MAX + 1] = {
791         [ODP_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
792         [ODP_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
793         [ODP_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
794 };
795
796 static struct genl_family dp_flow_genl_family = {
797         .id = GENL_ID_GENERATE,
798         .hdrsize = sizeof(struct odp_header),
799         .name = ODP_FLOW_FAMILY,
800         .version = 1,
801         .maxattr = ODP_FLOW_ATTR_MAX
802 };
803
804 static struct genl_multicast_group dp_flow_multicast_group = {
805         .name = ODP_FLOW_MCGROUP
806 };
807
808 /* Called with genl_lock. */
809 static int odp_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
810                                   struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd)
811 {
812         const int skb_orig_len = skb->len;
813         const struct sw_flow_actions *sf_acts;
814         struct odp_flow_stats stats;
815         struct odp_header *odp_header;
816         struct nlattr *nla;
817         unsigned long used;
818         u8 tcp_flags;
819         int nla_len;
820         int err;
821
822         sf_acts = rcu_dereference_protected(flow->sf_acts,
823                                             lockdep_genl_is_held());
824
825         odp_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
826         if (!odp_header)
827                 return -EMSGSIZE;
828
829         odp_header->dp_idx = dp->dp_idx;
830
831         nla = nla_nest_start(skb, ODP_FLOW_ATTR_KEY);
832         if (!nla)
833                 goto nla_put_failure;
834         err = flow_to_nlattrs(&flow->key, skb);
835         if (err)
836                 goto error;
837         nla_nest_end(skb, nla);
838
839         spin_lock_bh(&flow->lock);
840         used = flow->used;
841         stats.n_packets = flow->packet_count;
842         stats.n_bytes = flow->byte_count;
843         tcp_flags = flow->tcp_flags;
844         spin_unlock_bh(&flow->lock);
845
846         if (used)
847                 NLA_PUT_MSECS(skb, ODP_FLOW_ATTR_USED, used);
848
849         if (stats.n_packets)
850                 NLA_PUT(skb, ODP_FLOW_ATTR_STATS, sizeof(struct odp_flow_stats), &stats);
851
852         if (tcp_flags)
853                 NLA_PUT_U8(skb, ODP_FLOW_ATTR_TCP_FLAGS, tcp_flags);
854
855         /* If ODP_FLOW_ATTR_ACTIONS doesn't fit, and this is the first flow to
856          * be dumped into 'skb', then expand the skb.  This is unusual for
857          * Netlink but individual action lists can be longer than a page and
858          * thus entirely undumpable if we didn't do this. */
859         nla_len = nla_total_size(sf_acts->actions_len);
860         if (nla_len > skb_tailroom(skb) && !skb_orig_len) {
861                 int hdr_off = (unsigned char *)odp_header - skb->data;
862
863                 err = pskb_expand_head(skb, 0, nla_len - skb_tailroom(skb), GFP_KERNEL);
864                 if (err)
865                         goto error;
866
867                 odp_header = (struct odp_header *)(skb->data + hdr_off);
868         }
869         nla = nla_nest_start(skb, ODP_FLOW_ATTR_ACTIONS);
870         memcpy(__skb_put(skb, sf_acts->actions_len), sf_acts->actions, sf_acts->actions_len);
871         nla_nest_end(skb, nla);
872
873         return genlmsg_end(skb, odp_header);
874
875 nla_put_failure:
876         err = -EMSGSIZE;
877 error:
878         genlmsg_cancel(skb, odp_header);
879         return err;
880 }
881
882 static struct sk_buff *odp_flow_cmd_alloc_info(struct sw_flow *flow)
883 {
884         const struct sw_flow_actions *sf_acts;
885         int len;
886
887         sf_acts = rcu_dereference_protected(flow->sf_acts,
888                                             lockdep_genl_is_held());
889
890         len = nla_total_size(FLOW_BUFSIZE); /* ODP_FLOW_ATTR_KEY */
891         len += nla_total_size(sf_acts->actions_len); /* ODP_FLOW_ATTR_ACTIONS */
892         len += nla_total_size(sizeof(struct odp_flow_stats)); /* ODP_FLOW_ATTR_STATS */
893         len += nla_total_size(1); /* ODP_FLOW_ATTR_TCP_FLAGS */
894         len += nla_total_size(8); /* ODP_FLOW_ATTR_USED */
895         return genlmsg_new(NLMSG_ALIGN(sizeof(struct odp_header)) + len, GFP_KERNEL);
896 }
897
898 static struct sk_buff *odp_flow_cmd_build_info(struct sw_flow *flow, struct datapath *dp,
899                                                u32 pid, u32 seq, u8 cmd)
900 {
901         struct sk_buff *skb;
902         int retval;
903
904         skb = odp_flow_cmd_alloc_info(flow);
905         if (!skb)
906                 return ERR_PTR(-ENOMEM);
907
908         retval = odp_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
909         BUG_ON(retval < 0);
910         return skb;
911 }
912
913 static int odp_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
914 {
915         struct nlattr **a = info->attrs;
916         struct odp_header *odp_header = info->userhdr;
917         struct tbl_node *flow_node;
918         struct sw_flow_key key;
919         struct sw_flow *flow;
920         struct sk_buff *reply;
921         struct datapath *dp;
922         struct tbl *table;
923         u32 hash;
924         int error;
925
926         /* Extract key. */
927         error = -EINVAL;
928         if (!a[ODP_FLOW_ATTR_KEY])
929                 goto error;
930         error = flow_from_nlattrs(&key, a[ODP_FLOW_ATTR_KEY]);
931         if (error)
932                 goto error;
933
934         /* Validate actions. */
935         if (a[ODP_FLOW_ATTR_ACTIONS]) {
936                 error = validate_actions(a[ODP_FLOW_ATTR_ACTIONS]);
937                 if (error)
938                         goto error;
939         } else if (info->genlhdr->cmd == ODP_FLOW_CMD_NEW) {
940                 error = -EINVAL;
941                 goto error;
942         }
943
944         dp = get_dp(odp_header->dp_idx);
945         error = -ENODEV;
946         if (!dp)
947                 goto error;
948
949         hash = flow_hash(&key);
950         table = get_table_protected(dp);
951         flow_node = tbl_lookup(table, &key, hash, flow_cmp);
952         if (!flow_node) {
953                 struct sw_flow_actions *acts;
954
955                 /* Bail out if we're not allowed to create a new flow. */
956                 error = -ENOENT;
957                 if (info->genlhdr->cmd == ODP_FLOW_CMD_SET)
958                         goto error;
959
960                 /* Expand table, if necessary, to make room. */
961                 if (tbl_count(table) >= tbl_n_buckets(table)) {
962                         error = expand_table(dp);
963                         if (error)
964                                 goto error;
965                         table = get_table_protected(dp);
966                 }
967
968                 /* Allocate flow. */
969                 flow = flow_alloc();
970                 if (IS_ERR(flow)) {
971                         error = PTR_ERR(flow);
972                         goto error;
973                 }
974                 flow->key = key;
975                 clear_stats(flow);
976
977                 /* Obtain actions. */
978                 acts = flow_actions_alloc(a[ODP_FLOW_ATTR_ACTIONS]);
979                 error = PTR_ERR(acts);
980                 if (IS_ERR(acts))
981                         goto error_free_flow;
982                 rcu_assign_pointer(flow->sf_acts, acts);
983
984                 /* Put flow in bucket. */
985                 error = tbl_insert(table, &flow->tbl_node, hash);
986                 if (error)
987                         goto error_free_flow;
988
989                 reply = odp_flow_cmd_build_info(flow, dp, info->snd_pid,
990                                                 info->snd_seq, ODP_FLOW_CMD_NEW);
991         } else {
992                 /* We found a matching flow. */
993                 struct sw_flow_actions *old_acts;
994
995                 /* Bail out if we're not allowed to modify an existing flow.
996                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
997                  * because Generic Netlink treats the latter as a dump
998                  * request.  We also accept NLM_F_EXCL in case that bug ever
999                  * gets fixed.
1000                  */
1001                 error = -EEXIST;
1002                 if (info->genlhdr->cmd == ODP_FLOW_CMD_NEW &&
1003                     info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
1004                         goto error;
1005
1006                 /* Update actions. */
1007                 flow = flow_cast(flow_node);
1008                 old_acts = rcu_dereference_protected(flow->sf_acts,
1009                                                      lockdep_genl_is_held());
1010                 if (a[ODP_FLOW_ATTR_ACTIONS] &&
1011                     (old_acts->actions_len != nla_len(a[ODP_FLOW_ATTR_ACTIONS]) ||
1012                      memcmp(old_acts->actions, nla_data(a[ODP_FLOW_ATTR_ACTIONS]),
1013                             old_acts->actions_len))) {
1014                         struct sw_flow_actions *new_acts;
1015
1016                         new_acts = flow_actions_alloc(a[ODP_FLOW_ATTR_ACTIONS]);
1017                         error = PTR_ERR(new_acts);
1018                         if (IS_ERR(new_acts))
1019                                 goto error;
1020
1021                         rcu_assign_pointer(flow->sf_acts, new_acts);
1022                         flow_deferred_free_acts(old_acts);
1023                 }
1024
1025                 reply = odp_flow_cmd_build_info(flow, dp, info->snd_pid,
1026                                                 info->snd_seq, ODP_FLOW_CMD_NEW);
1027
1028                 /* Clear stats. */
1029                 if (a[ODP_FLOW_ATTR_CLEAR]) {
1030                         spin_lock_bh(&flow->lock);
1031                         clear_stats(flow);
1032                         spin_unlock_bh(&flow->lock);
1033                 }
1034         }
1035
1036         if (!IS_ERR(reply))
1037                 genl_notify(reply, genl_info_net(info), info->snd_pid,
1038                             dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1039         else
1040                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1041                                 dp_flow_multicast_group.id, PTR_ERR(reply));
1042         return 0;
1043
1044 error_free_flow:
1045         flow_put(flow);
1046 error:
1047         return error;
1048 }
1049
1050 static int odp_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1051 {
1052         struct nlattr **a = info->attrs;
1053         struct odp_header *odp_header = info->userhdr;
1054         struct sw_flow_key key;
1055         struct tbl_node *flow_node;
1056         struct sk_buff *reply;
1057         struct sw_flow *flow;
1058         struct datapath *dp;
1059         struct tbl *table;
1060         int err;
1061
1062         if (!a[ODP_FLOW_ATTR_KEY])
1063                 return -EINVAL;
1064         err = flow_from_nlattrs(&key, a[ODP_FLOW_ATTR_KEY]);
1065         if (err)
1066                 return err;
1067
1068         dp = get_dp(odp_header->dp_idx);
1069         if (!dp)
1070                 return -ENODEV;
1071
1072         table = get_table_protected(dp);
1073         flow_node = tbl_lookup(table, &key, flow_hash(&key), flow_cmp);
1074         if (!flow_node)
1075                 return -ENOENT;
1076
1077         flow = flow_cast(flow_node);
1078         reply = odp_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, ODP_FLOW_CMD_NEW);
1079         if (IS_ERR(reply))
1080                 return PTR_ERR(reply);
1081
1082         return genlmsg_reply(reply, info);
1083 }
1084
1085 static int odp_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1086 {
1087         struct nlattr **a = info->attrs;
1088         struct odp_header *odp_header = info->userhdr;
1089         struct sw_flow_key key;
1090         struct tbl_node *flow_node;
1091         struct sk_buff *reply;
1092         struct sw_flow *flow;
1093         struct datapath *dp;
1094         struct tbl *table;
1095         int err;
1096
1097         if (!a[ODP_FLOW_ATTR_KEY])
1098                 return flush_flows(odp_header->dp_idx);
1099         err = flow_from_nlattrs(&key, a[ODP_FLOW_ATTR_KEY]);
1100         if (err)
1101                 return err;
1102
1103         dp = get_dp(odp_header->dp_idx);
1104         if (!dp)
1105                 return -ENODEV;
1106
1107         table = get_table_protected(dp);
1108         flow_node = tbl_lookup(table, &key, flow_hash(&key), flow_cmp);
1109         if (!flow_node)
1110                 return -ENOENT;
1111         flow = flow_cast(flow_node);
1112
1113         reply = odp_flow_cmd_alloc_info(flow);
1114         if (!reply)
1115                 return -ENOMEM;
1116
1117         err = tbl_remove(table, flow_node);
1118         if (err) {
1119                 kfree_skb(reply);
1120                 return err;
1121         }
1122
1123         err = odp_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
1124                                      info->snd_seq, 0, ODP_FLOW_CMD_DEL);
1125         BUG_ON(err < 0);
1126
1127         flow_deferred_free(flow);
1128
1129         genl_notify(reply, genl_info_net(info), info->snd_pid,
1130                     dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1131         return 0;
1132 }
1133
1134 static int odp_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1135 {
1136         struct odp_header *odp_header = genlmsg_data(nlmsg_data(cb->nlh));
1137         struct datapath *dp;
1138
1139         dp = get_dp(odp_header->dp_idx);
1140         if (!dp)
1141                 return -ENODEV;
1142
1143         for (;;) {
1144                 struct tbl_node *flow_node;
1145                 struct sw_flow *flow;
1146                 u32 bucket, obj;
1147
1148                 bucket = cb->args[0];
1149                 obj = cb->args[1];
1150                 flow_node = tbl_next(get_table_protected(dp), &bucket, &obj);
1151                 if (!flow_node)
1152                         break;
1153
1154                 flow = flow_cast(flow_node);
1155                 if (odp_flow_cmd_fill_info(flow, dp, skb, NETLINK_CB(cb->skb).pid,
1156                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1157                                            ODP_FLOW_CMD_NEW) < 0)
1158                         break;
1159
1160                 cb->args[0] = bucket;
1161                 cb->args[1] = obj;
1162         }
1163         return skb->len;
1164 }
1165
1166 static struct genl_ops dp_flow_genl_ops[] = {
1167         { .cmd = ODP_FLOW_CMD_NEW,
1168           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1169           .policy = flow_policy,
1170           .doit = odp_flow_cmd_new_or_set
1171         },
1172         { .cmd = ODP_FLOW_CMD_DEL,
1173           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1174           .policy = flow_policy,
1175           .doit = odp_flow_cmd_del
1176         },
1177         { .cmd = ODP_FLOW_CMD_GET,
1178           .flags = 0,               /* OK for unprivileged users. */
1179           .policy = flow_policy,
1180           .doit = odp_flow_cmd_get,
1181           .dumpit = odp_flow_cmd_dump
1182         },
1183         { .cmd = ODP_FLOW_CMD_SET,
1184           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1185           .policy = flow_policy,
1186           .doit = odp_flow_cmd_new_or_set,
1187         },
1188 };
1189
1190 static const struct nla_policy datapath_policy[ODP_DP_ATTR_MAX + 1] = {
1191 #ifdef HAVE_NLA_NUL_STRING
1192         [ODP_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1193 #endif
1194         [ODP_DP_ATTR_IPV4_FRAGS] = { .type = NLA_U32 },
1195         [ODP_DP_ATTR_SAMPLING] = { .type = NLA_U32 },
1196 };
1197
1198 static struct genl_family dp_datapath_genl_family = {
1199         .id = GENL_ID_GENERATE,
1200         .hdrsize = sizeof(struct odp_header),
1201         .name = ODP_DATAPATH_FAMILY,
1202         .version = 1,
1203         .maxattr = ODP_DP_ATTR_MAX
1204 };
1205
1206 static struct genl_multicast_group dp_datapath_multicast_group = {
1207         .name = ODP_DATAPATH_MCGROUP
1208 };
1209
1210 static int odp_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1211                                 u32 pid, u32 seq, u32 flags, u8 cmd)
1212 {
1213         struct odp_header *odp_header;
1214         struct nlattr *nla;
1215         int err;
1216
1217         odp_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
1218                                    flags, cmd);
1219         if (!odp_header)
1220                 goto error;
1221
1222         odp_header->dp_idx = dp->dp_idx;
1223
1224         rcu_read_lock();
1225         err = nla_put_string(skb, ODP_DP_ATTR_NAME, dp_name(dp));
1226         rcu_read_unlock();
1227         if (err)
1228                 goto nla_put_failure;
1229
1230         nla = nla_reserve(skb, ODP_DP_ATTR_STATS, sizeof(struct odp_stats));
1231         if (!nla)
1232                 goto nla_put_failure;
1233         get_dp_stats(dp, nla_data(nla));
1234
1235         NLA_PUT_U32(skb, ODP_DP_ATTR_IPV4_FRAGS,
1236                     dp->drop_frags ? ODP_DP_FRAG_DROP : ODP_DP_FRAG_ZERO);
1237
1238         if (dp->sflow_probability)
1239                 NLA_PUT_U32(skb, ODP_DP_ATTR_SAMPLING, dp->sflow_probability);
1240
1241         nla = nla_nest_start(skb, ODP_DP_ATTR_MCGROUPS);
1242         if (!nla)
1243                 goto nla_put_failure;
1244         NLA_PUT_U32(skb, ODP_PACKET_CMD_MISS, packet_mc_group(dp, ODP_PACKET_CMD_MISS));
1245         NLA_PUT_U32(skb, ODP_PACKET_CMD_ACTION, packet_mc_group(dp, ODP_PACKET_CMD_ACTION));
1246         NLA_PUT_U32(skb, ODP_PACKET_CMD_SAMPLE, packet_mc_group(dp, ODP_PACKET_CMD_SAMPLE));
1247         nla_nest_end(skb, nla);
1248
1249         return genlmsg_end(skb, odp_header);
1250
1251 nla_put_failure:
1252         genlmsg_cancel(skb, odp_header);
1253 error:
1254         return -EMSGSIZE;
1255 }
1256
1257 static struct sk_buff *odp_dp_cmd_build_info(struct datapath *dp, u32 pid,
1258                                              u32 seq, u8 cmd)
1259 {
1260         struct sk_buff *skb;
1261         int retval;
1262
1263         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1264         if (!skb)
1265                 return ERR_PTR(-ENOMEM);
1266
1267         retval = odp_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
1268         if (retval < 0) {
1269                 kfree_skb(skb);
1270                 return ERR_PTR(retval);
1271         }
1272         return skb;
1273 }
1274
1275 static int odp_dp_cmd_validate(struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1276 {
1277         if (a[ODP_DP_ATTR_IPV4_FRAGS]) {
1278                 u32 frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]);
1279
1280                 if (frags != ODP_DP_FRAG_ZERO && frags != ODP_DP_FRAG_DROP)
1281                         return -EINVAL;
1282         }
1283
1284         return VERIFY_NUL_STRING(a[ODP_DP_ATTR_NAME], IFNAMSIZ - 1);
1285 }
1286
1287 /* Called with genl_mutex and optionally with RTNL lock also. */
1288 static struct datapath *lookup_datapath(struct odp_header *odp_header, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1289 {
1290         if (!a[ODP_DP_ATTR_NAME]) {
1291                 struct datapath *dp = get_dp(odp_header->dp_idx);
1292                 if (!dp)
1293                         return ERR_PTR(-ENODEV);
1294                 return dp;
1295         } else {
1296                 struct vport *vport;
1297                 int dp_idx;
1298
1299                 rcu_read_lock();
1300                 vport = vport_locate(nla_data(a[ODP_DP_ATTR_NAME]));
1301                 dp_idx = vport && vport->port_no == ODPP_LOCAL ? vport->dp->dp_idx : -1;
1302                 rcu_read_unlock();
1303
1304                 if (dp_idx < 0)
1305                         return ERR_PTR(-ENODEV);
1306                 return vport->dp;
1307         }
1308 }
1309
1310 /* Called with genl_mutex. */
1311 static void change_datapath(struct datapath *dp, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1312 {
1313         if (a[ODP_DP_ATTR_IPV4_FRAGS])
1314                 dp->drop_frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]) == ODP_DP_FRAG_DROP;
1315         if (a[ODP_DP_ATTR_SAMPLING])
1316                 dp->sflow_probability = nla_get_u32(a[ODP_DP_ATTR_SAMPLING]);
1317 }
1318
1319 static int odp_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1320 {
1321         struct nlattr **a = info->attrs;
1322         struct odp_header *odp_header = info->userhdr;
1323         struct vport_parms parms;
1324         struct sk_buff *reply;
1325         struct datapath *dp;
1326         struct vport *vport;
1327         int dp_idx;
1328         int err;
1329
1330         err = -EINVAL;
1331         if (!a[ODP_DP_ATTR_NAME])
1332                 goto err;
1333
1334         err = odp_dp_cmd_validate(a);
1335         if (err)
1336                 goto err;
1337
1338         rtnl_lock();
1339         err = -ENODEV;
1340         if (!try_module_get(THIS_MODULE))
1341                 goto err_unlock_rtnl;
1342
1343         dp_idx = odp_header->dp_idx;
1344         if (dp_idx < 0) {
1345                 err = -EFBIG;
1346                 for (dp_idx = 0; dp_idx < ARRAY_SIZE(dps); dp_idx++) {
1347                         if (get_dp(dp_idx))
1348                                 continue;
1349                         err = 0;
1350                         break;
1351                 }
1352         } else if (dp_idx < ARRAY_SIZE(dps))
1353                 err = get_dp(dp_idx) ? -EBUSY : 0;
1354         else
1355                 err = -EINVAL;
1356         if (err)
1357                 goto err_put_module;
1358
1359         err = -ENOMEM;
1360         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1361         if (dp == NULL)
1362                 goto err_put_module;
1363         INIT_LIST_HEAD(&dp->port_list);
1364         dp->dp_idx = dp_idx;
1365
1366         /* Initialize kobject for bridge.  This will be added as
1367          * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
1368         dp->ifobj.kset = NULL;
1369         kobject_init(&dp->ifobj, &dp_ktype);
1370
1371         /* Allocate table. */
1372         err = -ENOMEM;
1373         rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
1374         if (!dp->table)
1375                 goto err_free_dp;
1376
1377         /* Set up our datapath device. */
1378         parms.name = nla_data(a[ODP_DP_ATTR_NAME]);
1379         parms.type = ODP_VPORT_TYPE_INTERNAL;
1380         parms.options = NULL;
1381         parms.dp = dp;
1382         parms.port_no = ODPP_LOCAL;
1383         vport = new_vport(&parms);
1384         if (IS_ERR(vport)) {
1385                 err = PTR_ERR(vport);
1386                 if (err == -EBUSY)
1387                         err = -EEXIST;
1388
1389                 goto err_destroy_table;
1390         }
1391
1392         dp->drop_frags = 0;
1393         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1394         if (!dp->stats_percpu) {
1395                 err = -ENOMEM;
1396                 goto err_destroy_local_port;
1397         }
1398
1399         change_datapath(dp, a);
1400
1401         reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_NEW);
1402         err = PTR_ERR(reply);
1403         if (IS_ERR(reply))
1404                 goto err_destroy_local_port;
1405
1406         rcu_assign_pointer(dps[dp_idx], dp);
1407         dp_sysfs_add_dp(dp);
1408
1409         rtnl_unlock();
1410
1411         genl_notify(reply, genl_info_net(info), info->snd_pid,
1412                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1413         return 0;
1414
1415 err_destroy_local_port:
1416         dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
1417 err_destroy_table:
1418         tbl_destroy(get_table_protected(dp), NULL);
1419 err_free_dp:
1420         kfree(dp);
1421 err_put_module:
1422         module_put(THIS_MODULE);
1423 err_unlock_rtnl:
1424         rtnl_unlock();
1425 err:
1426         return err;
1427 }
1428
1429 static int odp_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1430 {
1431         struct vport *vport, *next_vport;
1432         struct sk_buff *reply;
1433         struct datapath *dp;
1434         int err;
1435
1436         err = odp_dp_cmd_validate(info->attrs);
1437         if (err)
1438                 goto exit;
1439
1440         rtnl_lock();
1441         dp = lookup_datapath(info->userhdr, info->attrs);
1442         err = PTR_ERR(dp);
1443         if (IS_ERR(dp))
1444                 goto exit_unlock;
1445
1446         reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_DEL);
1447         err = PTR_ERR(reply);
1448         if (IS_ERR(reply))
1449                 goto exit_unlock;
1450
1451         list_for_each_entry_safe (vport, next_vport, &dp->port_list, node)
1452                 if (vport->port_no != ODPP_LOCAL)
1453                         dp_detach_port(vport);
1454
1455         dp_sysfs_del_dp(dp);
1456         rcu_assign_pointer(dps[dp->dp_idx], NULL);
1457         dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
1458
1459         call_rcu(&dp->rcu, destroy_dp_rcu);
1460         module_put(THIS_MODULE);
1461
1462         genl_notify(reply, genl_info_net(info), info->snd_pid,
1463                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1464         err = 0;
1465
1466 exit_unlock:
1467         rtnl_unlock();
1468 exit:
1469         return err;
1470 }
1471
1472 static int odp_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1473 {
1474         struct sk_buff *reply;
1475         struct datapath *dp;
1476         int err;
1477
1478         err = odp_dp_cmd_validate(info->attrs);
1479         if (err)
1480                 return err;
1481
1482         dp = lookup_datapath(info->userhdr, info->attrs);
1483         if (IS_ERR(dp))
1484                 return PTR_ERR(dp);
1485
1486         change_datapath(dp, info->attrs);
1487
1488         reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_NEW);
1489         if (IS_ERR(reply)) {
1490                 err = PTR_ERR(reply);
1491                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1492                                 dp_datapath_multicast_group.id, err);
1493                 return 0;
1494         }
1495
1496         genl_notify(reply, genl_info_net(info), info->snd_pid,
1497                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1498         return 0;
1499 }
1500
1501 static int odp_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1502 {
1503         struct sk_buff *reply;
1504         struct datapath *dp;
1505         int err;
1506
1507         err = odp_dp_cmd_validate(info->attrs);
1508         if (err)
1509                 return err;
1510
1511         dp = lookup_datapath(info->userhdr, info->attrs);
1512         if (IS_ERR(dp))
1513                 return PTR_ERR(dp);
1514
1515         reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_NEW);
1516         if (IS_ERR(reply))
1517                 return PTR_ERR(reply);
1518
1519         return genlmsg_reply(reply, info);
1520 }
1521
1522 static int odp_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1523 {
1524         u32 dp_idx;
1525
1526         for (dp_idx = cb->args[0]; dp_idx < ARRAY_SIZE(dps); dp_idx++) {
1527                 struct datapath *dp = get_dp(dp_idx);
1528                 if (!dp)
1529                         continue;
1530                 if (odp_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
1531                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1532                                          ODP_DP_CMD_NEW) < 0)
1533                         break;
1534         }
1535
1536         cb->args[0] = dp_idx;
1537         return skb->len;
1538 }
1539
1540 static struct genl_ops dp_datapath_genl_ops[] = {
1541         { .cmd = ODP_DP_CMD_NEW,
1542           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1543           .policy = datapath_policy,
1544           .doit = odp_dp_cmd_new
1545         },
1546         { .cmd = ODP_DP_CMD_DEL,
1547           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1548           .policy = datapath_policy,
1549           .doit = odp_dp_cmd_del
1550         },
1551         { .cmd = ODP_DP_CMD_GET,
1552           .flags = 0,               /* OK for unprivileged users. */
1553           .policy = datapath_policy,
1554           .doit = odp_dp_cmd_get,
1555           .dumpit = odp_dp_cmd_dump
1556         },
1557         { .cmd = ODP_DP_CMD_SET,
1558           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1559           .policy = datapath_policy,
1560           .doit = odp_dp_cmd_set,
1561         },
1562 };
1563
1564 static const struct nla_policy vport_policy[ODP_VPORT_ATTR_MAX + 1] = {
1565 #ifdef HAVE_NLA_NUL_STRING
1566         [ODP_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1567         [ODP_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1568         [ODP_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1569         [ODP_VPORT_ATTR_STATS] = { .len = sizeof(struct rtnl_link_stats64) },
1570         [ODP_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
1571 #else
1572         [ODP_VPORT_ATTR_STATS] = { .minlen = sizeof(struct rtnl_link_stats64) },
1573         [ODP_VPORT_ATTR_ADDRESS] = { .minlen = ETH_ALEN },
1574 #endif
1575         [ODP_VPORT_ATTR_MTU] = { .type = NLA_U32 },
1576         [ODP_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1577 };
1578
1579 static struct genl_family dp_vport_genl_family = {
1580         .id = GENL_ID_GENERATE,
1581         .hdrsize = sizeof(struct odp_header),
1582         .name = ODP_VPORT_FAMILY,
1583         .version = 1,
1584         .maxattr = ODP_VPORT_ATTR_MAX
1585 };
1586
1587 static struct genl_multicast_group dp_vport_multicast_group = {
1588         .name = ODP_VPORT_MCGROUP
1589 };
1590
1591 /* Called with RTNL lock or RCU read lock. */
1592 static int odp_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1593                                    u32 pid, u32 seq, u32 flags, u8 cmd)
1594 {
1595         struct odp_header *odp_header;
1596         struct nlattr *nla;
1597         int ifindex, iflink;
1598         int err;
1599
1600         odp_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
1601                                  flags, cmd);
1602         if (!odp_header)
1603                 return -EMSGSIZE;
1604
1605         odp_header->dp_idx = vport->dp->dp_idx;
1606
1607         NLA_PUT_U32(skb, ODP_VPORT_ATTR_PORT_NO, vport->port_no);
1608         NLA_PUT_U32(skb, ODP_VPORT_ATTR_TYPE, vport_get_type(vport));
1609         NLA_PUT_STRING(skb, ODP_VPORT_ATTR_NAME, vport_get_name(vport));
1610
1611         nla = nla_reserve(skb, ODP_VPORT_ATTR_STATS, sizeof(struct rtnl_link_stats64));
1612         if (!nla)
1613                 goto nla_put_failure;
1614         if (vport_get_stats(vport, nla_data(nla)))
1615                 __skb_trim(skb, skb->len - nla->nla_len);
1616
1617         NLA_PUT(skb, ODP_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
1618
1619         NLA_PUT_U32(skb, ODP_VPORT_ATTR_MTU, vport_get_mtu(vport));
1620
1621         err = vport_get_options(vport, skb);
1622         if (err == -EMSGSIZE)
1623                 goto error;
1624
1625         ifindex = vport_get_ifindex(vport);
1626         if (ifindex > 0)
1627                 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFINDEX, ifindex);
1628
1629         iflink = vport_get_iflink(vport);
1630         if (iflink > 0)
1631                 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFLINK, iflink);
1632
1633         return genlmsg_end(skb, odp_header);
1634
1635 nla_put_failure:
1636         err = -EMSGSIZE;
1637 error:
1638         genlmsg_cancel(skb, odp_header);
1639         return err;
1640 }
1641
1642 /* Called with RTNL lock or RCU read lock. */
1643 static struct sk_buff *odp_vport_cmd_build_info(struct vport *vport, u32 pid,
1644                                                 u32 seq, u8 cmd)
1645 {
1646         struct sk_buff *skb;
1647         int retval;
1648
1649         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1650         if (!skb)
1651                 return ERR_PTR(-ENOMEM);
1652
1653         retval = odp_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
1654         if (retval < 0) {
1655                 kfree_skb(skb);
1656                 return ERR_PTR(retval);
1657         }
1658         return skb;
1659 }
1660
1661 static int odp_vport_cmd_validate(struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1662 {
1663         return VERIFY_NUL_STRING(a[ODP_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1664 }
1665
1666 /* Called with RTNL lock or RCU read lock. */
1667 static struct vport *lookup_vport(struct odp_header *odp_header,
1668                                   struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1669 {
1670         struct datapath *dp;
1671         struct vport *vport;
1672
1673         if (a[ODP_VPORT_ATTR_NAME]) {
1674                 vport = vport_locate(nla_data(a[ODP_VPORT_ATTR_NAME]));
1675                 if (!vport)
1676                         return ERR_PTR(-ENODEV);
1677                 return vport;
1678         } else if (a[ODP_VPORT_ATTR_PORT_NO]) {
1679                 u32 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1680
1681                 if (port_no >= DP_MAX_PORTS)
1682                         return ERR_PTR(-EFBIG);
1683
1684                 dp = get_dp(odp_header->dp_idx);
1685                 if (!dp)
1686                         return ERR_PTR(-ENODEV);
1687
1688                 vport = get_vport_protected(dp, port_no);
1689                 if (!vport)
1690                         return ERR_PTR(-ENOENT);
1691                 return vport;
1692         } else
1693                 return ERR_PTR(-EINVAL);
1694 }
1695
1696 /* Called with RTNL lock. */
1697 static int change_vport(struct vport *vport, struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1698 {
1699         int err = 0;
1700         if (a[ODP_VPORT_ATTR_STATS])
1701                 err = vport_set_stats(vport, nla_data(a[ODP_VPORT_ATTR_STATS]));
1702         if (!err && a[ODP_VPORT_ATTR_ADDRESS])
1703                 err = vport_set_addr(vport, nla_data(a[ODP_VPORT_ATTR_ADDRESS]));
1704         if (!err && a[ODP_VPORT_ATTR_MTU])
1705                 err = vport_set_mtu(vport, nla_get_u32(a[ODP_VPORT_ATTR_MTU]));
1706         return err;
1707 }
1708
1709 static int odp_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1710 {
1711         struct nlattr **a = info->attrs;
1712         struct odp_header *odp_header = info->userhdr;
1713         struct vport_parms parms;
1714         struct sk_buff *reply;
1715         struct vport *vport;
1716         struct datapath *dp;
1717         u32 port_no;
1718         int err;
1719
1720         err = -EINVAL;
1721         if (!a[ODP_VPORT_ATTR_NAME] || !a[ODP_VPORT_ATTR_TYPE])
1722                 goto exit;
1723
1724         err = odp_vport_cmd_validate(a);
1725         if (err)
1726                 goto exit;
1727
1728         rtnl_lock();
1729         dp = get_dp(odp_header->dp_idx);
1730         err = -ENODEV;
1731         if (!dp)
1732                 goto exit_unlock;
1733
1734         if (a[ODP_VPORT_ATTR_PORT_NO]) {
1735                 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1736
1737                 err = -EFBIG;
1738                 if (port_no >= DP_MAX_PORTS)
1739                         goto exit_unlock;
1740
1741                 vport = get_vport_protected(dp, port_no);
1742                 err = -EBUSY;
1743                 if (vport)
1744                         goto exit_unlock;
1745         } else {
1746                 for (port_no = 1; ; port_no++) {
1747                         if (port_no >= DP_MAX_PORTS) {
1748                                 err = -EFBIG;
1749                                 goto exit_unlock;
1750                         }
1751                         vport = get_vport_protected(dp, port_no);
1752                         if (!vport)
1753                                 break;
1754                 }
1755         }
1756
1757         parms.name = nla_data(a[ODP_VPORT_ATTR_NAME]);
1758         parms.type = nla_get_u32(a[ODP_VPORT_ATTR_TYPE]);
1759         parms.options = a[ODP_VPORT_ATTR_OPTIONS];
1760         parms.dp = dp;
1761         parms.port_no = port_no;
1762
1763         vport = new_vport(&parms);
1764         err = PTR_ERR(vport);
1765         if (IS_ERR(vport))
1766                 goto exit_unlock;
1767
1768         set_internal_devs_mtu(dp);
1769         dp_sysfs_add_if(vport);
1770
1771         err = change_vport(vport, a);
1772         if (!err) {
1773                 reply = odp_vport_cmd_build_info(vport, info->snd_pid,
1774                                                  info->snd_seq, ODP_VPORT_CMD_NEW);
1775                 if (IS_ERR(reply))
1776                         err = PTR_ERR(reply);
1777         }
1778         if (err) {
1779                 dp_detach_port(vport);
1780                 goto exit_unlock;
1781         }
1782         genl_notify(reply, genl_info_net(info), info->snd_pid,
1783                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1784
1785
1786 exit_unlock:
1787         rtnl_unlock();
1788 exit:
1789         return err;
1790 }
1791
1792 static int odp_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1793 {
1794         struct nlattr **a = info->attrs;
1795         struct sk_buff *reply;
1796         struct vport *vport;
1797         int err;
1798
1799         err = odp_vport_cmd_validate(a);
1800         if (err)
1801                 goto exit;
1802
1803         rtnl_lock();
1804         vport = lookup_vport(info->userhdr, a);
1805         err = PTR_ERR(vport);
1806         if (IS_ERR(vport))
1807                 goto exit_unlock;
1808
1809         err = 0;
1810         if (a[ODP_VPORT_ATTR_OPTIONS])
1811                 err = vport_set_options(vport, a[ODP_VPORT_ATTR_OPTIONS]);
1812         if (!err)
1813                 err = change_vport(vport, a);
1814
1815         reply = odp_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1816                                          ODP_VPORT_CMD_NEW);
1817         if (IS_ERR(reply)) {
1818                 err = PTR_ERR(reply);
1819                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1820                                 dp_vport_multicast_group.id, err);
1821                 return 0;
1822         }
1823
1824         genl_notify(reply, genl_info_net(info), info->snd_pid,
1825                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1826
1827 exit_unlock:
1828         rtnl_unlock();
1829 exit:
1830         return err;
1831 }
1832
1833 static int odp_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1834 {
1835         struct nlattr **a = info->attrs;
1836         struct sk_buff *reply;
1837         struct vport *vport;
1838         int err;
1839
1840         err = odp_vport_cmd_validate(a);
1841         if (err)
1842                 goto exit;
1843
1844         rtnl_lock();
1845         vport = lookup_vport(info->userhdr, a);
1846         err = PTR_ERR(vport);
1847         if (IS_ERR(vport))
1848                 goto exit_unlock;
1849
1850         if (vport->port_no == ODPP_LOCAL) {
1851                 err = -EINVAL;
1852                 goto exit_unlock;
1853         }
1854
1855         reply = odp_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1856                                          ODP_VPORT_CMD_DEL);
1857         err = PTR_ERR(reply);
1858         if (IS_ERR(reply))
1859                 goto exit_unlock;
1860
1861         err = dp_detach_port(vport);
1862
1863         genl_notify(reply, genl_info_net(info), info->snd_pid,
1864                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1865
1866 exit_unlock:
1867         rtnl_unlock();
1868 exit:
1869         return err;
1870 }
1871
1872 static int odp_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1873 {
1874         struct nlattr **a = info->attrs;
1875         struct odp_header *odp_header = info->userhdr;
1876         struct sk_buff *reply;
1877         struct vport *vport;
1878         int err;
1879
1880         err = odp_vport_cmd_validate(a);
1881         if (err)
1882                 goto exit;
1883
1884         rcu_read_lock();
1885         vport = lookup_vport(odp_header, a);
1886         err = PTR_ERR(vport);
1887         if (IS_ERR(vport))
1888                 goto exit_unlock;
1889
1890         reply = odp_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1891                                          ODP_VPORT_CMD_NEW);
1892         err = PTR_ERR(reply);
1893         if (IS_ERR(reply))
1894                 goto exit_unlock;
1895
1896         err = genlmsg_reply(reply, info);
1897
1898 exit_unlock:
1899         rcu_read_unlock();
1900 exit:
1901         return err;
1902 }
1903
1904 static int odp_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1905 {
1906         struct odp_header *odp_header = genlmsg_data(nlmsg_data(cb->nlh));
1907         struct datapath *dp;
1908         u32 port_no;
1909         int retval;
1910
1911         dp = get_dp(odp_header->dp_idx);
1912         if (!dp)
1913                 return -ENODEV;
1914
1915         rcu_read_lock();
1916         for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
1917                 struct vport *vport;
1918
1919                 vport = get_vport_protected(dp, port_no);
1920                 if (!vport)
1921                         continue;
1922
1923                 if (odp_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid,
1924                                             cb->nlh->nlmsg_seq, NLM_F_MULTI,
1925                                             ODP_VPORT_CMD_NEW) < 0)
1926                         break;
1927         }
1928         rcu_read_unlock();
1929
1930         cb->args[0] = port_no;
1931         retval = skb->len;
1932
1933         return retval;
1934 }
1935
1936 static struct genl_ops dp_vport_genl_ops[] = {
1937         { .cmd = ODP_VPORT_CMD_NEW,
1938           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1939           .policy = vport_policy,
1940           .doit = odp_vport_cmd_new
1941         },
1942         { .cmd = ODP_VPORT_CMD_DEL,
1943           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1944           .policy = vport_policy,
1945           .doit = odp_vport_cmd_del
1946         },
1947         { .cmd = ODP_VPORT_CMD_GET,
1948           .flags = 0,               /* OK for unprivileged users. */
1949           .policy = vport_policy,
1950           .doit = odp_vport_cmd_get,
1951           .dumpit = odp_vport_cmd_dump
1952         },
1953         { .cmd = ODP_VPORT_CMD_SET,
1954           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1955           .policy = vport_policy,
1956           .doit = odp_vport_cmd_set,
1957         },
1958 };
1959
1960 struct genl_family_and_ops {
1961         struct genl_family *family;
1962         struct genl_ops *ops;
1963         int n_ops;
1964         struct genl_multicast_group *group;
1965 };
1966
1967 static const struct genl_family_and_ops dp_genl_families[] = {
1968         { &dp_datapath_genl_family,
1969           dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
1970           &dp_datapath_multicast_group },
1971         { &dp_vport_genl_family,
1972           dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
1973           &dp_vport_multicast_group },
1974         { &dp_flow_genl_family,
1975           dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
1976           &dp_flow_multicast_group },
1977         { &dp_packet_genl_family,
1978           dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
1979           NULL },
1980 };
1981
1982 static void dp_unregister_genl(int n_families)
1983 {
1984         int i;
1985
1986         for (i = 0; i < n_families; i++) {
1987                 genl_unregister_family(dp_genl_families[i].family);
1988         }
1989 }
1990
1991 static int dp_register_genl(void)
1992 {
1993         int n_registered;
1994         int err;
1995         int i;
1996
1997         n_registered = 0;
1998         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
1999                 const struct genl_family_and_ops *f = &dp_genl_families[i];
2000
2001                 err = genl_register_family_with_ops(f->family, f->ops,
2002                                                     f->n_ops);
2003                 if (err)
2004                         goto error;
2005                 n_registered++;
2006
2007                 if (f->group) {
2008                         err = genl_register_mc_group(f->family, f->group);
2009                         if (err)
2010                                 goto error;
2011                 }
2012         }
2013
2014         err = packet_register_mc_groups();
2015         if (err)
2016                 goto error;
2017         return 0;
2018
2019 error:
2020         dp_unregister_genl(n_registered);
2021         return err;
2022 }
2023
2024 static int __init dp_init(void)
2025 {
2026         struct sk_buff *dummy_skb;
2027         int err;
2028
2029         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2030
2031         printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2032
2033         err = flow_init();
2034         if (err)
2035                 goto error;
2036
2037         err = vport_init();
2038         if (err)
2039                 goto error_flow_exit;
2040
2041         err = register_netdevice_notifier(&dp_device_notifier);
2042         if (err)
2043                 goto error_vport_exit;
2044
2045         err = dp_register_genl();
2046         if (err < 0)
2047                 goto error_unreg_notifier;
2048
2049         return 0;
2050
2051 error_unreg_notifier:
2052         unregister_netdevice_notifier(&dp_device_notifier);
2053 error_vport_exit:
2054         vport_exit();
2055 error_flow_exit:
2056         flow_exit();
2057 error:
2058         return err;
2059 }
2060
2061 static void dp_cleanup(void)
2062 {
2063         rcu_barrier();
2064         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2065         unregister_netdevice_notifier(&dp_device_notifier);
2066         vport_exit();
2067         flow_exit();
2068 }
2069
2070 module_init(dp_init);
2071 module_exit(dp_cleanup);
2072
2073 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2074 MODULE_LICENSE("GPL");