datapath: Dump flow actions only if there is room.
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
3  * Distributed under the terms of the GNU GPL version 2.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 /* Functions for managing the dp interface/device. */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/if_arp.h>
16 #include <linux/if_vlan.h>
17 #include <linux/in.h>
18 #include <linux/ip.h>
19 #include <linux/jhash.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/genetlink.h>
24 #include <linux/kernel.h>
25 #include <linux/kthread.h>
26 #include <linux/mutex.h>
27 #include <linux/percpu.h>
28 #include <linux/rcupdate.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/version.h>
32 #include <linux/ethtool.h>
33 #include <linux/wait.h>
34 #include <asm/system.h>
35 #include <asm/div64.h>
36 #include <asm/bug.h>
37 #include <linux/highmem.h>
38 #include <linux/netfilter_bridge.h>
39 #include <linux/netfilter_ipv4.h>
40 #include <linux/inetdevice.h>
41 #include <linux/list.h>
42 #include <linux/rculist.h>
43 #include <linux/dmi.h>
44 #include <net/inet_ecn.h>
45 #include <net/genetlink.h>
46
47 #include "openvswitch/datapath-protocol.h"
48 #include "checksum.h"
49 #include "datapath.h"
50 #include "actions.h"
51 #include "flow.h"
52 #include "loop_counter.h"
53 #include "table.h"
54 #include "vport-internal_dev.h"
55
56 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
57 EXPORT_SYMBOL(dp_ioctl_hook);
58
59 /**
60  * DOC: Locking:
61  *
62  * Writes to device state (add/remove datapath, port, set operations on vports,
63  * etc.) are protected by RTNL.
64  *
65  * Writes to other state (flow table modifications, set miscellaneous datapath
66  * parameters such as drop frags, etc.) are protected by genl_mutex.  The RTNL
67  * lock nests inside genl_mutex.
68  *
69  * Reads are protected by RCU.
70  *
71  * There are a few special cases (mostly stats) that have their own
72  * synchronization but they nest under all of above and don't interact with
73  * each other.
74  */
75
76 /* Global list of datapaths to enable dumping them all out.
77  * Protected by genl_mutex.
78  */
79 static LIST_HEAD(dps);
80
81 static struct vport *new_vport(const struct vport_parms *);
82 static int queue_control_packets(struct datapath *, struct sk_buff *,
83                                  const struct dp_upcall_info *);
84
85 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
86 struct datapath *get_dp(int dp_ifindex)
87 {
88         struct datapath *dp = NULL;
89         struct net_device *dev;
90
91         rcu_read_lock();
92         dev = dev_get_by_index_rcu(&init_net, dp_ifindex);
93         if (dev) {
94                 struct vport *vport = internal_dev_get_vport(dev);
95                 if (vport)
96                         dp = vport->dp;
97         }
98         rcu_read_unlock();
99
100         return dp;
101 }
102 EXPORT_SYMBOL_GPL(get_dp);
103
104 /* Must be called with genl_mutex. */
105 static struct tbl *get_table_protected(struct datapath *dp)
106 {
107         return rcu_dereference_protected(dp->table, lockdep_genl_is_held());
108 }
109
110 /* Must be called with rcu_read_lock or RTNL lock. */
111 static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
112 {
113         return rcu_dereference_rtnl(dp->ports[port_no]);
114 }
115
116 /* Must be called with rcu_read_lock or RTNL lock. */
117 const char *dp_name(const struct datapath *dp)
118 {
119         return vport_get_name(rcu_dereference_rtnl(dp->ports[ODPP_LOCAL]));
120 }
121
122 static inline size_t br_nlmsg_size(void)
123 {
124         return NLMSG_ALIGN(sizeof(struct ifinfomsg))
125                + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
126                + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
127                + nla_total_size(4) /* IFLA_MASTER */
128                + nla_total_size(4) /* IFLA_MTU */
129                + nla_total_size(4) /* IFLA_LINK */
130                + nla_total_size(1); /* IFLA_OPERSTATE */
131 }
132
133 /* Caller must hold RTNL lock. */
134 static int dp_fill_ifinfo(struct sk_buff *skb,
135                           const struct vport *port,
136                           int event, unsigned int flags)
137 {
138         struct datapath *dp = port->dp;
139         int ifindex = vport_get_ifindex(port);
140         int iflink = vport_get_iflink(port);
141         struct ifinfomsg *hdr;
142         struct nlmsghdr *nlh;
143
144         if (ifindex < 0)
145                 return ifindex;
146
147         if (iflink < 0)
148                 return iflink;
149
150         nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
151         if (nlh == NULL)
152                 return -EMSGSIZE;
153
154         hdr = nlmsg_data(nlh);
155         hdr->ifi_family = AF_BRIDGE;
156         hdr->__ifi_pad = 0;
157         hdr->ifi_type = ARPHRD_ETHER;
158         hdr->ifi_index = ifindex;
159         hdr->ifi_flags = vport_get_flags(port);
160         hdr->ifi_change = 0;
161
162         NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
163         NLA_PUT_U32(skb, IFLA_MASTER,
164                 vport_get_ifindex(get_vport_protected(dp, ODPP_LOCAL)));
165         NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
166 #ifdef IFLA_OPERSTATE
167         NLA_PUT_U8(skb, IFLA_OPERSTATE,
168                    vport_is_running(port)
169                         ? vport_get_operstate(port)
170                         : IF_OPER_DOWN);
171 #endif
172
173         NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
174
175         if (ifindex != iflink)
176                 NLA_PUT_U32(skb, IFLA_LINK,iflink);
177
178         return nlmsg_end(skb, nlh);
179
180 nla_put_failure:
181         nlmsg_cancel(skb, nlh);
182         return -EMSGSIZE;
183 }
184
185 /* Caller must hold RTNL lock. */
186 static void dp_ifinfo_notify(int event, struct vport *port)
187 {
188         struct sk_buff *skb;
189         int err = -ENOBUFS;
190
191         skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
192         if (skb == NULL)
193                 goto errout;
194
195         err = dp_fill_ifinfo(skb, port, event, 0);
196         if (err < 0) {
197                 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
198                 WARN_ON(err == -EMSGSIZE);
199                 kfree_skb(skb);
200                 goto errout;
201         }
202         rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
203         return;
204 errout:
205         if (err < 0)
206                 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
207 }
208
209 static void release_dp(struct kobject *kobj)
210 {
211         struct datapath *dp = container_of(kobj, struct datapath, ifobj);
212         kfree(dp);
213 }
214
215 static struct kobj_type dp_ktype = {
216         .release = release_dp
217 };
218
219 static void destroy_dp_rcu(struct rcu_head *rcu)
220 {
221         struct datapath *dp = container_of(rcu, struct datapath, rcu);
222
223         tbl_destroy((struct tbl __force *)dp->table, flow_free_tbl);
224         free_percpu(dp->stats_percpu);
225         kobject_put(&dp->ifobj);
226 }
227
228 /* Called with RTNL lock and genl_lock. */
229 static struct vport *new_vport(const struct vport_parms *parms)
230 {
231         struct vport *vport;
232
233         vport = vport_add(parms);
234         if (!IS_ERR(vport)) {
235                 struct datapath *dp = parms->dp;
236
237                 rcu_assign_pointer(dp->ports[parms->port_no], vport);
238                 list_add(&vport->node, &dp->port_list);
239
240                 dp_ifinfo_notify(RTM_NEWLINK, vport);
241         }
242
243         return vport;
244 }
245
246 /* Called with RTNL lock. */
247 int dp_detach_port(struct vport *p)
248 {
249         ASSERT_RTNL();
250
251         if (p->port_no != ODPP_LOCAL)
252                 dp_sysfs_del_if(p);
253         dp_ifinfo_notify(RTM_DELLINK, p);
254
255         /* First drop references to device. */
256         list_del(&p->node);
257         rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
258
259         /* Then destroy it. */
260         return vport_del(p);
261 }
262
263 /* Must be called with rcu_read_lock. */
264 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
265 {
266         struct datapath *dp = p->dp;
267         struct dp_stats_percpu *stats;
268         int stats_counter_off;
269         struct sw_flow_actions *acts;
270         struct loop_counter *loop;
271         int error;
272
273         OVS_CB(skb)->vport = p;
274
275         if (!OVS_CB(skb)->flow) {
276                 struct sw_flow_key key;
277                 struct tbl_node *flow_node;
278                 bool is_frag;
279
280                 /* Extract flow from 'skb' into 'key'. */
281                 error = flow_extract(skb, p->port_no, &key, &is_frag);
282                 if (unlikely(error)) {
283                         kfree_skb(skb);
284                         return;
285                 }
286
287                 if (is_frag && dp->drop_frags) {
288                         kfree_skb(skb);
289                         stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
290                         goto out;
291                 }
292
293                 /* Look up flow. */
294                 flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
295                                         flow_hash(&key), flow_cmp);
296                 if (unlikely(!flow_node)) {
297                         struct dp_upcall_info upcall;
298
299                         upcall.cmd = ODP_PACKET_CMD_MISS;
300                         upcall.key = &key;
301                         upcall.userdata = 0;
302                         upcall.sample_pool = 0;
303                         upcall.actions = NULL;
304                         upcall.actions_len = 0;
305                         dp_upcall(dp, skb, &upcall);
306                         stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
307                         goto out;
308                 }
309
310                 OVS_CB(skb)->flow = flow_cast(flow_node);
311         }
312
313         stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
314         flow_used(OVS_CB(skb)->flow, skb);
315
316         acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
317
318         /* Check whether we've looped too much. */
319         loop = loop_get_counter();
320         if (unlikely(++loop->count > MAX_LOOPS))
321                 loop->looping = true;
322         if (unlikely(loop->looping)) {
323                 loop_suppress(dp, acts);
324                 kfree_skb(skb);
325                 goto out_loop;
326         }
327
328         /* Execute actions. */
329         execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
330                         acts->actions_len);
331
332         /* Check whether sub-actions looped too much. */
333         if (unlikely(loop->looping))
334                 loop_suppress(dp, acts);
335
336 out_loop:
337         /* Decrement loop counter. */
338         if (!--loop->count)
339                 loop->looping = false;
340         loop_put_counter();
341
342 out:
343         /* Update datapath statistics. */
344         local_bh_disable();
345         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
346
347         write_seqcount_begin(&stats->seqlock);
348         (*(u64 *)((u8 *)stats + stats_counter_off))++;
349         write_seqcount_end(&stats->seqlock);
350
351         local_bh_enable();
352 }
353
354 static void copy_and_csum_skb(struct sk_buff *skb, void *to)
355 {
356         u16 csum_start, csum_offset;
357         __wsum csum;
358
359         get_skb_csum_pointers(skb, &csum_start, &csum_offset);
360         csum_start -= skb_headroom(skb);
361         BUG_ON(csum_start >= skb_headlen(skb));
362
363         skb_copy_bits(skb, 0, to, csum_start);
364
365         csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start,
366                                       skb->len - csum_start, 0);
367         *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
368 }
369
370 static struct genl_family dp_packet_genl_family = {
371         .id = GENL_ID_GENERATE,
372         .hdrsize = sizeof(struct odp_header),
373         .name = ODP_PACKET_FAMILY,
374         .version = 1,
375         .maxattr = ODP_PACKET_ATTR_MAX
376 };
377
378 /* Generic Netlink multicast groups for upcalls.
379  *
380  * We really want three unique multicast groups per datapath, but we can't even
381  * get one, because genl_register_mc_group() takes genl_lock, which is also
382  * held during Generic Netlink message processing, so trying to acquire
383  * multicast groups during ODP_DP_NEW processing deadlocks.  Instead, we
384  * preallocate a few groups and use them round-robin for datapaths.  Collision
385  * isn't fatal--multicast listeners should check that the family is the one
386  * that they want and discard others--but it wastes time and memory to receive
387  * unwanted messages.
388  */
389 #define PACKET_N_MC_GROUPS 16
390 static struct genl_multicast_group packet_mc_groups[PACKET_N_MC_GROUPS];
391
392 static u32 packet_mc_group(struct datapath *dp, u8 cmd)
393 {
394         u32 idx;
395         BUILD_BUG_ON_NOT_POWER_OF_2(PACKET_N_MC_GROUPS);
396
397         idx = jhash_2words(dp->dp_ifindex, cmd, 0) & (PACKET_N_MC_GROUPS - 1);
398         return packet_mc_groups[idx].id;
399 }
400
401 static int packet_register_mc_groups(void)
402 {
403         int i;
404
405         for (i = 0; i < PACKET_N_MC_GROUPS; i++) {
406                 struct genl_multicast_group *group = &packet_mc_groups[i];
407                 int error;
408
409                 sprintf(group->name, "packet%d", i);
410                 error = genl_register_mc_group(&dp_packet_genl_family, group);
411                 if (error)
412                         return error;
413         }
414         return 0;
415 }
416
417 int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info)
418 {
419         struct dp_stats_percpu *stats;
420         int err;
421
422         WARN_ON_ONCE(skb_shared(skb));
423
424         forward_ip_summed(skb);
425
426         err = vswitch_skb_checksum_setup(skb);
427         if (err)
428                 goto err_kfree_skb;
429
430         /* Break apart GSO packets into their component pieces.  Otherwise
431          * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
432         if (skb_is_gso(skb)) {
433                 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
434                 
435                 kfree_skb(skb);
436                 skb = nskb;
437                 if (IS_ERR(skb)) {
438                         err = PTR_ERR(skb);
439                         goto err;
440                 }
441         }
442
443         return queue_control_packets(dp, skb, upcall_info);
444
445 err_kfree_skb:
446         kfree_skb(skb);
447 err:
448         local_bh_disable();
449         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
450
451         write_seqcount_begin(&stats->seqlock);
452         stats->n_lost++;
453         write_seqcount_end(&stats->seqlock);
454
455         local_bh_enable();
456
457         return err;
458 }
459
460 /* Send each packet in the 'skb' list to userspace for 'dp' as directed by
461  * 'upcall_info'.  There will be only one packet unless we broke up a GSO
462  * packet.
463  */
464 static int queue_control_packets(struct datapath *dp, struct sk_buff *skb,
465                                  const struct dp_upcall_info *upcall_info)
466 {
467         u32 group = packet_mc_group(dp, upcall_info->cmd);
468         struct sk_buff *nskb;
469         int port_no;
470         int err;
471
472         if (OVS_CB(skb)->vport)
473                 port_no = OVS_CB(skb)->vport->port_no;
474         else
475                 port_no = ODPP_LOCAL;
476
477         do {
478                 struct odp_header *upcall;
479                 struct sk_buff *user_skb; /* to be queued to userspace */
480                 struct nlattr *nla;
481                 unsigned int len;
482
483                 nskb = skb->next;
484                 skb->next = NULL;
485
486                 len = sizeof(struct odp_header);
487                 len += nla_total_size(skb->len);
488                 len += nla_total_size(FLOW_BUFSIZE);
489                 if (upcall_info->userdata)
490                         len += nla_total_size(8);
491                 if (upcall_info->sample_pool)
492                         len += nla_total_size(4);
493                 if (upcall_info->actions_len)
494                         len += nla_total_size(upcall_info->actions_len);
495
496                 user_skb = genlmsg_new(len, GFP_ATOMIC);
497                 if (!user_skb) {
498                         netlink_set_err(INIT_NET_GENL_SOCK, 0, group, -ENOBUFS);
499                         goto err_kfree_skbs;
500                 }
501
502                 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd);
503                 upcall->dp_ifindex = dp->dp_ifindex;
504
505                 nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_KEY);
506                 flow_to_nlattrs(upcall_info->key, user_skb);
507                 nla_nest_end(user_skb, nla);
508
509                 if (upcall_info->userdata)
510                         nla_put_u64(user_skb, ODP_PACKET_ATTR_USERDATA, upcall_info->userdata);
511                 if (upcall_info->sample_pool)
512                         nla_put_u32(user_skb, ODP_PACKET_ATTR_SAMPLE_POOL, upcall_info->sample_pool);
513                 if (upcall_info->actions_len) {
514                         const struct nlattr *actions = upcall_info->actions;
515                         u32 actions_len = upcall_info->actions_len;
516
517                         nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_ACTIONS);
518                         memcpy(__skb_put(user_skb, actions_len), actions, actions_len);
519                         nla_nest_end(user_skb, nla);
520                 }
521
522                 nla = __nla_reserve(user_skb, ODP_PACKET_ATTR_PACKET, skb->len);
523                 if (skb->ip_summed == CHECKSUM_PARTIAL)
524                         copy_and_csum_skb(skb, nla_data(nla));
525                 else
526                         skb_copy_bits(skb, 0, nla_data(nla), skb->len);
527
528                 err = genlmsg_multicast(user_skb, 0, group, GFP_ATOMIC);
529                 if (err)
530                         goto err_kfree_skbs;
531
532                 kfree_skb(skb);
533                 skb = nskb;
534         } while (skb);
535         return 0;
536
537 err_kfree_skbs:
538         kfree_skb(skb);
539         while ((skb = nskb) != NULL) {
540                 nskb = skb->next;
541                 kfree_skb(skb);
542         }
543         return err;
544 }
545
546 /* Called with genl_mutex. */
547 static int flush_flows(int dp_ifindex)
548 {
549         struct tbl *old_table;
550         struct tbl *new_table;
551         struct datapath *dp;
552
553         dp = get_dp(dp_ifindex);
554         if (!dp)
555                 return -ENODEV;
556
557         old_table = get_table_protected(dp);
558         new_table = tbl_create(TBL_MIN_BUCKETS);
559         if (!new_table)
560                 return -ENOMEM;
561
562         rcu_assign_pointer(dp->table, new_table);
563
564         tbl_deferred_destroy(old_table, flow_free_tbl);
565
566         return 0;
567 }
568
569 static int validate_actions(const struct nlattr *attr)
570 {
571         const struct nlattr *a;
572         int rem;
573
574         nla_for_each_nested(a, attr, rem) {
575                 static const u32 action_lens[ODP_ACTION_ATTR_MAX + 1] = {
576                         [ODP_ACTION_ATTR_OUTPUT] = 4,
577                         [ODP_ACTION_ATTR_CONTROLLER] = 8,
578                         [ODP_ACTION_ATTR_SET_DL_TCI] = 2,
579                         [ODP_ACTION_ATTR_STRIP_VLAN] = 0,
580                         [ODP_ACTION_ATTR_SET_DL_SRC] = ETH_ALEN,
581                         [ODP_ACTION_ATTR_SET_DL_DST] = ETH_ALEN,
582                         [ODP_ACTION_ATTR_SET_NW_SRC] = 4,
583                         [ODP_ACTION_ATTR_SET_NW_DST] = 4,
584                         [ODP_ACTION_ATTR_SET_NW_TOS] = 1,
585                         [ODP_ACTION_ATTR_SET_TP_SRC] = 2,
586                         [ODP_ACTION_ATTR_SET_TP_DST] = 2,
587                         [ODP_ACTION_ATTR_SET_TUNNEL] = 8,
588                         [ODP_ACTION_ATTR_SET_PRIORITY] = 4,
589                         [ODP_ACTION_ATTR_POP_PRIORITY] = 0,
590                         [ODP_ACTION_ATTR_DROP_SPOOFED_ARP] = 0,
591                 };
592                 int type = nla_type(a);
593
594                 if (type > ODP_ACTION_ATTR_MAX || nla_len(a) != action_lens[type])
595                         return -EINVAL;
596
597                 switch (type) {
598                 case ODP_ACTION_ATTR_UNSPEC:
599                         return -EINVAL;
600
601                 case ODP_ACTION_ATTR_CONTROLLER:
602                 case ODP_ACTION_ATTR_STRIP_VLAN:
603                 case ODP_ACTION_ATTR_SET_DL_SRC:
604                 case ODP_ACTION_ATTR_SET_DL_DST:
605                 case ODP_ACTION_ATTR_SET_NW_SRC:
606                 case ODP_ACTION_ATTR_SET_NW_DST:
607                 case ODP_ACTION_ATTR_SET_TP_SRC:
608                 case ODP_ACTION_ATTR_SET_TP_DST:
609                 case ODP_ACTION_ATTR_SET_TUNNEL:
610                 case ODP_ACTION_ATTR_SET_PRIORITY:
611                 case ODP_ACTION_ATTR_POP_PRIORITY:
612                 case ODP_ACTION_ATTR_DROP_SPOOFED_ARP:
613                         /* No validation needed. */
614                         break;
615
616                 case ODP_ACTION_ATTR_OUTPUT:
617                         if (nla_get_u32(a) >= DP_MAX_PORTS)
618                                 return -EINVAL;
619                         break;
620
621                 case ODP_ACTION_ATTR_SET_DL_TCI:
622                         if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
623                                 return -EINVAL;
624                         break;
625
626                 case ODP_ACTION_ATTR_SET_NW_TOS:
627                         if (nla_get_u8(a) & INET_ECN_MASK)
628                                 return -EINVAL;
629                         break;
630
631                 default:
632                         return -EOPNOTSUPP;
633                 }
634         }
635
636         if (rem > 0)
637                 return -EINVAL;
638
639         return 0;
640 }
641 static void clear_stats(struct sw_flow *flow)
642 {
643         flow->used = 0;
644         flow->tcp_flags = 0;
645         flow->packet_count = 0;
646         flow->byte_count = 0;
647 }
648
649 /* Called with genl_mutex. */
650 static int expand_table(struct datapath *dp)
651 {
652         struct tbl *old_table = get_table_protected(dp);
653         struct tbl *new_table;
654
655         new_table = tbl_expand(old_table);
656         if (IS_ERR(new_table))
657                 return PTR_ERR(new_table);
658
659         rcu_assign_pointer(dp->table, new_table);
660         tbl_deferred_destroy(old_table, NULL);
661
662         return 0;
663 }
664
665 static int odp_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
666 {
667         struct odp_header *odp_header = info->userhdr;
668         struct nlattr **a = info->attrs;
669         struct sk_buff *packet;
670         struct sw_flow_key key;
671         struct datapath *dp;
672         struct ethhdr *eth;
673         bool is_frag;
674         int err;
675
676         err = -EINVAL;
677         if (!a[ODP_PACKET_ATTR_PACKET] || !a[ODP_PACKET_ATTR_ACTIONS] ||
678             nla_len(a[ODP_PACKET_ATTR_PACKET]) < ETH_HLEN)
679                 goto exit;
680
681         err = validate_actions(a[ODP_PACKET_ATTR_ACTIONS]);
682         if (err)
683                 goto exit;
684
685         packet = skb_clone(skb, GFP_KERNEL);
686         err = -ENOMEM;
687         if (!packet)
688                 goto exit;
689         packet->data = nla_data(a[ODP_PACKET_ATTR_PACKET]);
690         packet->len = nla_len(a[ODP_PACKET_ATTR_PACKET]);
691
692         skb_reset_mac_header(packet);
693         eth = eth_hdr(packet);
694
695         /* Normally, setting the skb 'protocol' field would be handled by a
696          * call to eth_type_trans(), but it assumes there's a sending
697          * device, which we may not have. */
698         if (ntohs(eth->h_proto) >= 1536)
699                 packet->protocol = eth->h_proto;
700         else
701                 packet->protocol = htons(ETH_P_802_2);
702
703         err = flow_extract(packet, -1, &key, &is_frag);
704         if (err)
705                 goto exit;
706
707         rcu_read_lock();
708         dp = get_dp(odp_header->dp_ifindex);
709         err = -ENODEV;
710         if (dp)
711                 err = execute_actions(dp, packet, &key,
712                                       nla_data(a[ODP_PACKET_ATTR_ACTIONS]),
713                                       nla_len(a[ODP_PACKET_ATTR_ACTIONS]));
714         rcu_read_unlock();
715
716 exit:
717         return err;
718 }
719
720 static const struct nla_policy packet_policy[ODP_PACKET_ATTR_MAX + 1] = {
721         [ODP_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
722         [ODP_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
723 };
724
725 static struct genl_ops dp_packet_genl_ops[] = {
726         { .cmd = ODP_PACKET_CMD_EXECUTE,
727           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
728           .policy = packet_policy,
729           .doit = odp_packet_cmd_execute
730         }
731 };
732
733 static void get_dp_stats(struct datapath *dp, struct odp_stats *stats)
734 {
735         int i;
736
737         stats->n_frags = stats->n_hit = stats->n_missed = stats->n_lost = 0;
738         for_each_possible_cpu(i) {
739                 const struct dp_stats_percpu *percpu_stats;
740                 struct dp_stats_percpu local_stats;
741                 unsigned seqcount;
742
743                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
744
745                 do {
746                         seqcount = read_seqcount_begin(&percpu_stats->seqlock);
747                         local_stats = *percpu_stats;
748                 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
749
750                 stats->n_frags += local_stats.n_frags;
751                 stats->n_hit += local_stats.n_hit;
752                 stats->n_missed += local_stats.n_missed;
753                 stats->n_lost += local_stats.n_lost;
754         }
755 }
756
757 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports.
758  * Called with RTNL lock.
759  */
760 int dp_min_mtu(const struct datapath *dp)
761 {
762         struct vport *p;
763         int mtu = 0;
764
765         ASSERT_RTNL();
766
767         list_for_each_entry (p, &dp->port_list, node) {
768                 int dev_mtu;
769
770                 /* Skip any internal ports, since that's what we're trying to
771                  * set. */
772                 if (is_internal_vport(p))
773                         continue;
774
775                 dev_mtu = vport_get_mtu(p);
776                 if (!mtu || dev_mtu < mtu)
777                         mtu = dev_mtu;
778         }
779
780         return mtu ? mtu : ETH_DATA_LEN;
781 }
782
783 /* Sets the MTU of all datapath devices to the minimum of the ports
784  * Called with RTNL lock.
785  */
786 void set_internal_devs_mtu(const struct datapath *dp)
787 {
788         struct vport *p;
789         int mtu;
790
791         ASSERT_RTNL();
792
793         mtu = dp_min_mtu(dp);
794
795         list_for_each_entry (p, &dp->port_list, node) {
796                 if (is_internal_vport(p))
797                         vport_set_mtu(p, mtu);
798         }
799 }
800
801 static const struct nla_policy flow_policy[ODP_FLOW_ATTR_MAX + 1] = {
802         [ODP_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
803         [ODP_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
804         [ODP_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
805 };
806
807 static struct genl_family dp_flow_genl_family = {
808         .id = GENL_ID_GENERATE,
809         .hdrsize = sizeof(struct odp_header),
810         .name = ODP_FLOW_FAMILY,
811         .version = 1,
812         .maxattr = ODP_FLOW_ATTR_MAX
813 };
814
815 static struct genl_multicast_group dp_flow_multicast_group = {
816         .name = ODP_FLOW_MCGROUP
817 };
818
819 /* Called with genl_lock. */
820 static int odp_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
821                                   struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd)
822 {
823         const int skb_orig_len = skb->len;
824         const struct sw_flow_actions *sf_acts;
825         struct odp_flow_stats stats;
826         struct odp_header *odp_header;
827         struct nlattr *nla;
828         unsigned long used;
829         u8 tcp_flags;
830         int err;
831
832         sf_acts = rcu_dereference_protected(flow->sf_acts,
833                                             lockdep_genl_is_held());
834
835         odp_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
836         if (!odp_header)
837                 return -EMSGSIZE;
838
839         odp_header->dp_ifindex = dp->dp_ifindex;
840
841         nla = nla_nest_start(skb, ODP_FLOW_ATTR_KEY);
842         if (!nla)
843                 goto nla_put_failure;
844         err = flow_to_nlattrs(&flow->key, skb);
845         if (err)
846                 goto error;
847         nla_nest_end(skb, nla);
848
849         spin_lock_bh(&flow->lock);
850         used = flow->used;
851         stats.n_packets = flow->packet_count;
852         stats.n_bytes = flow->byte_count;
853         tcp_flags = flow->tcp_flags;
854         spin_unlock_bh(&flow->lock);
855
856         if (used)
857                 NLA_PUT_U64(skb, ODP_FLOW_ATTR_USED, flow_used_time(used));
858
859         if (stats.n_packets)
860                 NLA_PUT(skb, ODP_FLOW_ATTR_STATS, sizeof(struct odp_flow_stats), &stats);
861
862         if (tcp_flags)
863                 NLA_PUT_U8(skb, ODP_FLOW_ATTR_TCP_FLAGS, tcp_flags);
864
865         /* If ODP_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
866          * this is the first flow to be dumped into 'skb'.  This is unusual for
867          * Netlink but individual action lists can be longer than
868          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
869          * The userspace caller can always fetch the actions separately if it
870          * really wants them.  (Most userspace callers in fact don't care.)
871          *
872          * This can only fail for dump operations because the skb is always
873          * properly sized for single flows.
874          */
875         err = nla_put(skb, ODP_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
876                       sf_acts->actions);
877         if (err < 0 && skb_orig_len)
878                 goto error;
879
880         return genlmsg_end(skb, odp_header);
881
882 nla_put_failure:
883         err = -EMSGSIZE;
884 error:
885         genlmsg_cancel(skb, odp_header);
886         return err;
887 }
888
889 static struct sk_buff *odp_flow_cmd_alloc_info(struct sw_flow *flow)
890 {
891         const struct sw_flow_actions *sf_acts;
892         int len;
893
894         sf_acts = rcu_dereference_protected(flow->sf_acts,
895                                             lockdep_genl_is_held());
896
897         len = nla_total_size(FLOW_BUFSIZE); /* ODP_FLOW_ATTR_KEY */
898         len += nla_total_size(sf_acts->actions_len); /* ODP_FLOW_ATTR_ACTIONS */
899         len += nla_total_size(sizeof(struct odp_flow_stats)); /* ODP_FLOW_ATTR_STATS */
900         len += nla_total_size(1); /* ODP_FLOW_ATTR_TCP_FLAGS */
901         len += nla_total_size(8); /* ODP_FLOW_ATTR_USED */
902         return genlmsg_new(NLMSG_ALIGN(sizeof(struct odp_header)) + len, GFP_KERNEL);
903 }
904
905 static struct sk_buff *odp_flow_cmd_build_info(struct sw_flow *flow, struct datapath *dp,
906                                                u32 pid, u32 seq, u8 cmd)
907 {
908         struct sk_buff *skb;
909         int retval;
910
911         skb = odp_flow_cmd_alloc_info(flow);
912         if (!skb)
913                 return ERR_PTR(-ENOMEM);
914
915         retval = odp_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
916         BUG_ON(retval < 0);
917         return skb;
918 }
919
920 static int odp_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
921 {
922         struct nlattr **a = info->attrs;
923         struct odp_header *odp_header = info->userhdr;
924         struct tbl_node *flow_node;
925         struct sw_flow_key key;
926         struct sw_flow *flow;
927         struct sk_buff *reply;
928         struct datapath *dp;
929         struct tbl *table;
930         u32 hash;
931         int error;
932
933         /* Extract key. */
934         error = -EINVAL;
935         if (!a[ODP_FLOW_ATTR_KEY])
936                 goto error;
937         error = flow_from_nlattrs(&key, a[ODP_FLOW_ATTR_KEY]);
938         if (error)
939                 goto error;
940
941         /* Validate actions. */
942         if (a[ODP_FLOW_ATTR_ACTIONS]) {
943                 error = validate_actions(a[ODP_FLOW_ATTR_ACTIONS]);
944                 if (error)
945                         goto error;
946         } else if (info->genlhdr->cmd == ODP_FLOW_CMD_NEW) {
947                 error = -EINVAL;
948                 goto error;
949         }
950
951         dp = get_dp(odp_header->dp_ifindex);
952         error = -ENODEV;
953         if (!dp)
954                 goto error;
955
956         hash = flow_hash(&key);
957         table = get_table_protected(dp);
958         flow_node = tbl_lookup(table, &key, hash, flow_cmp);
959         if (!flow_node) {
960                 struct sw_flow_actions *acts;
961
962                 /* Bail out if we're not allowed to create a new flow. */
963                 error = -ENOENT;
964                 if (info->genlhdr->cmd == ODP_FLOW_CMD_SET)
965                         goto error;
966
967                 /* Expand table, if necessary, to make room. */
968                 if (tbl_count(table) >= tbl_n_buckets(table)) {
969                         error = expand_table(dp);
970                         if (error)
971                                 goto error;
972                         table = get_table_protected(dp);
973                 }
974
975                 /* Allocate flow. */
976                 flow = flow_alloc();
977                 if (IS_ERR(flow)) {
978                         error = PTR_ERR(flow);
979                         goto error;
980                 }
981                 flow->key = key;
982                 clear_stats(flow);
983
984                 /* Obtain actions. */
985                 acts = flow_actions_alloc(a[ODP_FLOW_ATTR_ACTIONS]);
986                 error = PTR_ERR(acts);
987                 if (IS_ERR(acts))
988                         goto error_free_flow;
989                 rcu_assign_pointer(flow->sf_acts, acts);
990
991                 /* Put flow in bucket. */
992                 error = tbl_insert(table, &flow->tbl_node, hash);
993                 if (error)
994                         goto error_free_flow;
995
996                 reply = odp_flow_cmd_build_info(flow, dp, info->snd_pid,
997                                                 info->snd_seq, ODP_FLOW_CMD_NEW);
998         } else {
999                 /* We found a matching flow. */
1000                 struct sw_flow_actions *old_acts;
1001
1002                 /* Bail out if we're not allowed to modify an existing flow.
1003                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1004                  * because Generic Netlink treats the latter as a dump
1005                  * request.  We also accept NLM_F_EXCL in case that bug ever
1006                  * gets fixed.
1007                  */
1008                 error = -EEXIST;
1009                 if (info->genlhdr->cmd == ODP_FLOW_CMD_NEW &&
1010                     info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
1011                         goto error;
1012
1013                 /* Update actions. */
1014                 flow = flow_cast(flow_node);
1015                 old_acts = rcu_dereference_protected(flow->sf_acts,
1016                                                      lockdep_genl_is_held());
1017                 if (a[ODP_FLOW_ATTR_ACTIONS] &&
1018                     (old_acts->actions_len != nla_len(a[ODP_FLOW_ATTR_ACTIONS]) ||
1019                      memcmp(old_acts->actions, nla_data(a[ODP_FLOW_ATTR_ACTIONS]),
1020                             old_acts->actions_len))) {
1021                         struct sw_flow_actions *new_acts;
1022
1023                         new_acts = flow_actions_alloc(a[ODP_FLOW_ATTR_ACTIONS]);
1024                         error = PTR_ERR(new_acts);
1025                         if (IS_ERR(new_acts))
1026                                 goto error;
1027
1028                         rcu_assign_pointer(flow->sf_acts, new_acts);
1029                         flow_deferred_free_acts(old_acts);
1030                 }
1031
1032                 reply = odp_flow_cmd_build_info(flow, dp, info->snd_pid,
1033                                                 info->snd_seq, ODP_FLOW_CMD_NEW);
1034
1035                 /* Clear stats. */
1036                 if (a[ODP_FLOW_ATTR_CLEAR]) {
1037                         spin_lock_bh(&flow->lock);
1038                         clear_stats(flow);
1039                         spin_unlock_bh(&flow->lock);
1040                 }
1041         }
1042
1043         if (!IS_ERR(reply))
1044                 genl_notify(reply, genl_info_net(info), info->snd_pid,
1045                             dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1046         else
1047                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1048                                 dp_flow_multicast_group.id, PTR_ERR(reply));
1049         return 0;
1050
1051 error_free_flow:
1052         flow_put(flow);
1053 error:
1054         return error;
1055 }
1056
1057 static int odp_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1058 {
1059         struct nlattr **a = info->attrs;
1060         struct odp_header *odp_header = info->userhdr;
1061         struct sw_flow_key key;
1062         struct tbl_node *flow_node;
1063         struct sk_buff *reply;
1064         struct sw_flow *flow;
1065         struct datapath *dp;
1066         struct tbl *table;
1067         int err;
1068
1069         if (!a[ODP_FLOW_ATTR_KEY])
1070                 return -EINVAL;
1071         err = flow_from_nlattrs(&key, a[ODP_FLOW_ATTR_KEY]);
1072         if (err)
1073                 return err;
1074
1075         dp = get_dp(odp_header->dp_ifindex);
1076         if (!dp)
1077                 return -ENODEV;
1078
1079         table = get_table_protected(dp);
1080         flow_node = tbl_lookup(table, &key, flow_hash(&key), flow_cmp);
1081         if (!flow_node)
1082                 return -ENOENT;
1083
1084         flow = flow_cast(flow_node);
1085         reply = odp_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, ODP_FLOW_CMD_NEW);
1086         if (IS_ERR(reply))
1087                 return PTR_ERR(reply);
1088
1089         return genlmsg_reply(reply, info);
1090 }
1091
1092 static int odp_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1093 {
1094         struct nlattr **a = info->attrs;
1095         struct odp_header *odp_header = info->userhdr;
1096         struct sw_flow_key key;
1097         struct tbl_node *flow_node;
1098         struct sk_buff *reply;
1099         struct sw_flow *flow;
1100         struct datapath *dp;
1101         struct tbl *table;
1102         int err;
1103
1104         if (!a[ODP_FLOW_ATTR_KEY])
1105                 return flush_flows(odp_header->dp_ifindex);
1106         err = flow_from_nlattrs(&key, a[ODP_FLOW_ATTR_KEY]);
1107         if (err)
1108                 return err;
1109
1110         dp = get_dp(odp_header->dp_ifindex);
1111         if (!dp)
1112                 return -ENODEV;
1113
1114         table = get_table_protected(dp);
1115         flow_node = tbl_lookup(table, &key, flow_hash(&key), flow_cmp);
1116         if (!flow_node)
1117                 return -ENOENT;
1118         flow = flow_cast(flow_node);
1119
1120         reply = odp_flow_cmd_alloc_info(flow);
1121         if (!reply)
1122                 return -ENOMEM;
1123
1124         err = tbl_remove(table, flow_node);
1125         if (err) {
1126                 kfree_skb(reply);
1127                 return err;
1128         }
1129
1130         err = odp_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
1131                                      info->snd_seq, 0, ODP_FLOW_CMD_DEL);
1132         BUG_ON(err < 0);
1133
1134         flow_deferred_free(flow);
1135
1136         genl_notify(reply, genl_info_net(info), info->snd_pid,
1137                     dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1138         return 0;
1139 }
1140
1141 static int odp_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1142 {
1143         struct odp_header *odp_header = genlmsg_data(nlmsg_data(cb->nlh));
1144         struct datapath *dp;
1145
1146         dp = get_dp(odp_header->dp_ifindex);
1147         if (!dp)
1148                 return -ENODEV;
1149
1150         for (;;) {
1151                 struct tbl_node *flow_node;
1152                 struct sw_flow *flow;
1153                 u32 bucket, obj;
1154
1155                 bucket = cb->args[0];
1156                 obj = cb->args[1];
1157                 flow_node = tbl_next(get_table_protected(dp), &bucket, &obj);
1158                 if (!flow_node)
1159                         break;
1160
1161                 flow = flow_cast(flow_node);
1162                 if (odp_flow_cmd_fill_info(flow, dp, skb, NETLINK_CB(cb->skb).pid,
1163                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1164                                            ODP_FLOW_CMD_NEW) < 0)
1165                         break;
1166
1167                 cb->args[0] = bucket;
1168                 cb->args[1] = obj;
1169         }
1170         return skb->len;
1171 }
1172
1173 static struct genl_ops dp_flow_genl_ops[] = {
1174         { .cmd = ODP_FLOW_CMD_NEW,
1175           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1176           .policy = flow_policy,
1177           .doit = odp_flow_cmd_new_or_set
1178         },
1179         { .cmd = ODP_FLOW_CMD_DEL,
1180           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1181           .policy = flow_policy,
1182           .doit = odp_flow_cmd_del
1183         },
1184         { .cmd = ODP_FLOW_CMD_GET,
1185           .flags = 0,               /* OK for unprivileged users. */
1186           .policy = flow_policy,
1187           .doit = odp_flow_cmd_get,
1188           .dumpit = odp_flow_cmd_dump
1189         },
1190         { .cmd = ODP_FLOW_CMD_SET,
1191           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1192           .policy = flow_policy,
1193           .doit = odp_flow_cmd_new_or_set,
1194         },
1195 };
1196
1197 static const struct nla_policy datapath_policy[ODP_DP_ATTR_MAX + 1] = {
1198 #ifdef HAVE_NLA_NUL_STRING
1199         [ODP_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1200 #endif
1201         [ODP_DP_ATTR_IPV4_FRAGS] = { .type = NLA_U32 },
1202         [ODP_DP_ATTR_SAMPLING] = { .type = NLA_U32 },
1203 };
1204
1205 static struct genl_family dp_datapath_genl_family = {
1206         .id = GENL_ID_GENERATE,
1207         .hdrsize = sizeof(struct odp_header),
1208         .name = ODP_DATAPATH_FAMILY,
1209         .version = 1,
1210         .maxattr = ODP_DP_ATTR_MAX
1211 };
1212
1213 static struct genl_multicast_group dp_datapath_multicast_group = {
1214         .name = ODP_DATAPATH_MCGROUP
1215 };
1216
1217 static int odp_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1218                                 u32 pid, u32 seq, u32 flags, u8 cmd)
1219 {
1220         struct odp_header *odp_header;
1221         struct nlattr *nla;
1222         int err;
1223
1224         odp_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
1225                                    flags, cmd);
1226         if (!odp_header)
1227                 goto error;
1228
1229         odp_header->dp_ifindex = dp->dp_ifindex;
1230
1231         rcu_read_lock();
1232         err = nla_put_string(skb, ODP_DP_ATTR_NAME, dp_name(dp));
1233         rcu_read_unlock();
1234         if (err)
1235                 goto nla_put_failure;
1236
1237         nla = nla_reserve(skb, ODP_DP_ATTR_STATS, sizeof(struct odp_stats));
1238         if (!nla)
1239                 goto nla_put_failure;
1240         get_dp_stats(dp, nla_data(nla));
1241
1242         NLA_PUT_U32(skb, ODP_DP_ATTR_IPV4_FRAGS,
1243                     dp->drop_frags ? ODP_DP_FRAG_DROP : ODP_DP_FRAG_ZERO);
1244
1245         if (dp->sflow_probability)
1246                 NLA_PUT_U32(skb, ODP_DP_ATTR_SAMPLING, dp->sflow_probability);
1247
1248         nla = nla_nest_start(skb, ODP_DP_ATTR_MCGROUPS);
1249         if (!nla)
1250                 goto nla_put_failure;
1251         NLA_PUT_U32(skb, ODP_PACKET_CMD_MISS, packet_mc_group(dp, ODP_PACKET_CMD_MISS));
1252         NLA_PUT_U32(skb, ODP_PACKET_CMD_ACTION, packet_mc_group(dp, ODP_PACKET_CMD_ACTION));
1253         NLA_PUT_U32(skb, ODP_PACKET_CMD_SAMPLE, packet_mc_group(dp, ODP_PACKET_CMD_SAMPLE));
1254         nla_nest_end(skb, nla);
1255
1256         return genlmsg_end(skb, odp_header);
1257
1258 nla_put_failure:
1259         genlmsg_cancel(skb, odp_header);
1260 error:
1261         return -EMSGSIZE;
1262 }
1263
1264 static struct sk_buff *odp_dp_cmd_build_info(struct datapath *dp, u32 pid,
1265                                              u32 seq, u8 cmd)
1266 {
1267         struct sk_buff *skb;
1268         int retval;
1269
1270         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1271         if (!skb)
1272                 return ERR_PTR(-ENOMEM);
1273
1274         retval = odp_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
1275         if (retval < 0) {
1276                 kfree_skb(skb);
1277                 return ERR_PTR(retval);
1278         }
1279         return skb;
1280 }
1281
1282 static int odp_dp_cmd_validate(struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1283 {
1284         if (a[ODP_DP_ATTR_IPV4_FRAGS]) {
1285                 u32 frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]);
1286
1287                 if (frags != ODP_DP_FRAG_ZERO && frags != ODP_DP_FRAG_DROP)
1288                         return -EINVAL;
1289         }
1290
1291         return VERIFY_NUL_STRING(a[ODP_DP_ATTR_NAME], IFNAMSIZ - 1);
1292 }
1293
1294 /* Called with genl_mutex and optionally with RTNL lock also. */
1295 static struct datapath *lookup_datapath(struct odp_header *odp_header, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1296 {
1297         struct datapath *dp;
1298
1299         if (!a[ODP_DP_ATTR_NAME])
1300                 dp = get_dp(odp_header->dp_ifindex);
1301         else {
1302                 struct vport *vport;
1303
1304                 rcu_read_lock();
1305                 vport = vport_locate(nla_data(a[ODP_DP_ATTR_NAME]));
1306                 dp = vport && vport->port_no == ODPP_LOCAL ? vport->dp : NULL;
1307                 rcu_read_unlock();
1308         }
1309         return dp ? dp : ERR_PTR(-ENODEV);
1310 }
1311
1312 /* Called with genl_mutex. */
1313 static void change_datapath(struct datapath *dp, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1314 {
1315         if (a[ODP_DP_ATTR_IPV4_FRAGS])
1316                 dp->drop_frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]) == ODP_DP_FRAG_DROP;
1317         if (a[ODP_DP_ATTR_SAMPLING])
1318                 dp->sflow_probability = nla_get_u32(a[ODP_DP_ATTR_SAMPLING]);
1319 }
1320
1321 static int odp_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1322 {
1323         struct nlattr **a = info->attrs;
1324         struct vport_parms parms;
1325         struct sk_buff *reply;
1326         struct datapath *dp;
1327         struct vport *vport;
1328         int err;
1329
1330         err = -EINVAL;
1331         if (!a[ODP_DP_ATTR_NAME])
1332                 goto err;
1333
1334         err = odp_dp_cmd_validate(a);
1335         if (err)
1336                 goto err;
1337
1338         rtnl_lock();
1339         err = -ENODEV;
1340         if (!try_module_get(THIS_MODULE))
1341                 goto err_unlock_rtnl;
1342
1343         err = -ENOMEM;
1344         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1345         if (dp == NULL)
1346                 goto err_put_module;
1347         INIT_LIST_HEAD(&dp->port_list);
1348
1349         /* Initialize kobject for bridge.  This will be added as
1350          * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
1351         dp->ifobj.kset = NULL;
1352         kobject_init(&dp->ifobj, &dp_ktype);
1353
1354         /* Allocate table. */
1355         err = -ENOMEM;
1356         rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
1357         if (!dp->table)
1358                 goto err_free_dp;
1359
1360         /* Set up our datapath device. */
1361         parms.name = nla_data(a[ODP_DP_ATTR_NAME]);
1362         parms.type = ODP_VPORT_TYPE_INTERNAL;
1363         parms.options = NULL;
1364         parms.dp = dp;
1365         parms.port_no = ODPP_LOCAL;
1366         vport = new_vport(&parms);
1367         if (IS_ERR(vport)) {
1368                 err = PTR_ERR(vport);
1369                 if (err == -EBUSY)
1370                         err = -EEXIST;
1371
1372                 goto err_destroy_table;
1373         }
1374         dp->dp_ifindex = vport_get_ifindex(vport);
1375
1376         dp->drop_frags = 0;
1377         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1378         if (!dp->stats_percpu) {
1379                 err = -ENOMEM;
1380                 goto err_destroy_local_port;
1381         }
1382
1383         change_datapath(dp, a);
1384
1385         reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_NEW);
1386         err = PTR_ERR(reply);
1387         if (IS_ERR(reply))
1388                 goto err_destroy_local_port;
1389
1390         list_add_tail(&dp->list_node, &dps);
1391         dp_sysfs_add_dp(dp);
1392
1393         rtnl_unlock();
1394
1395         genl_notify(reply, genl_info_net(info), info->snd_pid,
1396                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1397         return 0;
1398
1399 err_destroy_local_port:
1400         dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
1401 err_destroy_table:
1402         tbl_destroy(get_table_protected(dp), NULL);
1403 err_free_dp:
1404         kfree(dp);
1405 err_put_module:
1406         module_put(THIS_MODULE);
1407 err_unlock_rtnl:
1408         rtnl_unlock();
1409 err:
1410         return err;
1411 }
1412
1413 static int odp_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1414 {
1415         struct vport *vport, *next_vport;
1416         struct sk_buff *reply;
1417         struct datapath *dp;
1418         int err;
1419
1420         err = odp_dp_cmd_validate(info->attrs);
1421         if (err)
1422                 goto exit;
1423
1424         rtnl_lock();
1425         dp = lookup_datapath(info->userhdr, info->attrs);
1426         err = PTR_ERR(dp);
1427         if (IS_ERR(dp))
1428                 goto exit_unlock;
1429
1430         reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_DEL);
1431         err = PTR_ERR(reply);
1432         if (IS_ERR(reply))
1433                 goto exit_unlock;
1434
1435         list_for_each_entry_safe (vport, next_vport, &dp->port_list, node)
1436                 if (vport->port_no != ODPP_LOCAL)
1437                         dp_detach_port(vport);
1438
1439         dp_sysfs_del_dp(dp);
1440         list_del(&dp->list_node);
1441         dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
1442
1443         call_rcu(&dp->rcu, destroy_dp_rcu);
1444         module_put(THIS_MODULE);
1445
1446         genl_notify(reply, genl_info_net(info), info->snd_pid,
1447                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1448         err = 0;
1449
1450 exit_unlock:
1451         rtnl_unlock();
1452 exit:
1453         return err;
1454 }
1455
1456 static int odp_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1457 {
1458         struct sk_buff *reply;
1459         struct datapath *dp;
1460         int err;
1461
1462         err = odp_dp_cmd_validate(info->attrs);
1463         if (err)
1464                 return err;
1465
1466         dp = lookup_datapath(info->userhdr, info->attrs);
1467         if (IS_ERR(dp))
1468                 return PTR_ERR(dp);
1469
1470         change_datapath(dp, info->attrs);
1471
1472         reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_NEW);
1473         if (IS_ERR(reply)) {
1474                 err = PTR_ERR(reply);
1475                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1476                                 dp_datapath_multicast_group.id, err);
1477                 return 0;
1478         }
1479
1480         genl_notify(reply, genl_info_net(info), info->snd_pid,
1481                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1482         return 0;
1483 }
1484
1485 static int odp_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1486 {
1487         struct sk_buff *reply;
1488         struct datapath *dp;
1489         int err;
1490
1491         err = odp_dp_cmd_validate(info->attrs);
1492         if (err)
1493                 return err;
1494
1495         dp = lookup_datapath(info->userhdr, info->attrs);
1496         if (IS_ERR(dp))
1497                 return PTR_ERR(dp);
1498
1499         reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_NEW);
1500         if (IS_ERR(reply))
1501                 return PTR_ERR(reply);
1502
1503         return genlmsg_reply(reply, info);
1504 }
1505
1506 static int odp_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1507 {
1508         struct datapath *dp;
1509         int skip = cb->args[0];
1510         int i = 0;
1511
1512         list_for_each_entry (dp, &dps, list_node) {
1513                 if (i < skip)
1514                         continue;
1515                 if (odp_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
1516                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1517                                          ODP_DP_CMD_NEW) < 0)
1518                         break;
1519                 i++;
1520         }
1521
1522         cb->args[0] = i;
1523
1524         return skb->len;
1525 }
1526
1527 static struct genl_ops dp_datapath_genl_ops[] = {
1528         { .cmd = ODP_DP_CMD_NEW,
1529           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1530           .policy = datapath_policy,
1531           .doit = odp_dp_cmd_new
1532         },
1533         { .cmd = ODP_DP_CMD_DEL,
1534           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1535           .policy = datapath_policy,
1536           .doit = odp_dp_cmd_del
1537         },
1538         { .cmd = ODP_DP_CMD_GET,
1539           .flags = 0,               /* OK for unprivileged users. */
1540           .policy = datapath_policy,
1541           .doit = odp_dp_cmd_get,
1542           .dumpit = odp_dp_cmd_dump
1543         },
1544         { .cmd = ODP_DP_CMD_SET,
1545           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1546           .policy = datapath_policy,
1547           .doit = odp_dp_cmd_set,
1548         },
1549 };
1550
1551 static const struct nla_policy vport_policy[ODP_VPORT_ATTR_MAX + 1] = {
1552 #ifdef HAVE_NLA_NUL_STRING
1553         [ODP_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1554         [ODP_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1555         [ODP_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1556         [ODP_VPORT_ATTR_STATS] = { .len = sizeof(struct rtnl_link_stats64) },
1557         [ODP_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
1558 #else
1559         [ODP_VPORT_ATTR_STATS] = { .minlen = sizeof(struct rtnl_link_stats64) },
1560         [ODP_VPORT_ATTR_ADDRESS] = { .minlen = ETH_ALEN },
1561 #endif
1562         [ODP_VPORT_ATTR_MTU] = { .type = NLA_U32 },
1563         [ODP_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1564 };
1565
1566 static struct genl_family dp_vport_genl_family = {
1567         .id = GENL_ID_GENERATE,
1568         .hdrsize = sizeof(struct odp_header),
1569         .name = ODP_VPORT_FAMILY,
1570         .version = 1,
1571         .maxattr = ODP_VPORT_ATTR_MAX
1572 };
1573
1574 static struct genl_multicast_group dp_vport_multicast_group = {
1575         .name = ODP_VPORT_MCGROUP
1576 };
1577
1578 /* Called with RTNL lock or RCU read lock. */
1579 static int odp_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1580                                    u32 pid, u32 seq, u32 flags, u8 cmd)
1581 {
1582         struct odp_header *odp_header;
1583         struct nlattr *nla;
1584         int ifindex, iflink;
1585         int err;
1586
1587         odp_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
1588                                  flags, cmd);
1589         if (!odp_header)
1590                 return -EMSGSIZE;
1591
1592         odp_header->dp_ifindex = vport->dp->dp_ifindex;
1593
1594         NLA_PUT_U32(skb, ODP_VPORT_ATTR_PORT_NO, vport->port_no);
1595         NLA_PUT_U32(skb, ODP_VPORT_ATTR_TYPE, vport_get_type(vport));
1596         NLA_PUT_STRING(skb, ODP_VPORT_ATTR_NAME, vport_get_name(vport));
1597
1598         nla = nla_reserve(skb, ODP_VPORT_ATTR_STATS, sizeof(struct rtnl_link_stats64));
1599         if (!nla)
1600                 goto nla_put_failure;
1601         if (vport_get_stats(vport, nla_data(nla)))
1602                 __skb_trim(skb, skb->len - nla->nla_len);
1603
1604         NLA_PUT(skb, ODP_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
1605
1606         NLA_PUT_U32(skb, ODP_VPORT_ATTR_MTU, vport_get_mtu(vport));
1607
1608         err = vport_get_options(vport, skb);
1609         if (err == -EMSGSIZE)
1610                 goto error;
1611
1612         ifindex = vport_get_ifindex(vport);
1613         if (ifindex > 0)
1614                 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFINDEX, ifindex);
1615
1616         iflink = vport_get_iflink(vport);
1617         if (iflink > 0)
1618                 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFLINK, iflink);
1619
1620         return genlmsg_end(skb, odp_header);
1621
1622 nla_put_failure:
1623         err = -EMSGSIZE;
1624 error:
1625         genlmsg_cancel(skb, odp_header);
1626         return err;
1627 }
1628
1629 /* Called with RTNL lock or RCU read lock. */
1630 static struct sk_buff *odp_vport_cmd_build_info(struct vport *vport, u32 pid,
1631                                                 u32 seq, u8 cmd)
1632 {
1633         struct sk_buff *skb;
1634         int retval;
1635
1636         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1637         if (!skb)
1638                 return ERR_PTR(-ENOMEM);
1639
1640         retval = odp_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
1641         if (retval < 0) {
1642                 kfree_skb(skb);
1643                 return ERR_PTR(retval);
1644         }
1645         return skb;
1646 }
1647
1648 static int odp_vport_cmd_validate(struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1649 {
1650         return VERIFY_NUL_STRING(a[ODP_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1651 }
1652
1653 /* Called with RTNL lock or RCU read lock. */
1654 static struct vport *lookup_vport(struct odp_header *odp_header,
1655                                   struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1656 {
1657         struct datapath *dp;
1658         struct vport *vport;
1659
1660         if (a[ODP_VPORT_ATTR_NAME]) {
1661                 vport = vport_locate(nla_data(a[ODP_VPORT_ATTR_NAME]));
1662                 if (!vport)
1663                         return ERR_PTR(-ENODEV);
1664                 return vport;
1665         } else if (a[ODP_VPORT_ATTR_PORT_NO]) {
1666                 u32 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1667
1668                 if (port_no >= DP_MAX_PORTS)
1669                         return ERR_PTR(-EFBIG);
1670
1671                 dp = get_dp(odp_header->dp_ifindex);
1672                 if (!dp)
1673                         return ERR_PTR(-ENODEV);
1674
1675                 vport = get_vport_protected(dp, port_no);
1676                 if (!vport)
1677                         return ERR_PTR(-ENOENT);
1678                 return vport;
1679         } else
1680                 return ERR_PTR(-EINVAL);
1681 }
1682
1683 /* Called with RTNL lock. */
1684 static int change_vport(struct vport *vport, struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1685 {
1686         int err = 0;
1687         if (a[ODP_VPORT_ATTR_STATS])
1688                 err = vport_set_stats(vport, nla_data(a[ODP_VPORT_ATTR_STATS]));
1689         if (!err && a[ODP_VPORT_ATTR_ADDRESS])
1690                 err = vport_set_addr(vport, nla_data(a[ODP_VPORT_ATTR_ADDRESS]));
1691         if (!err && a[ODP_VPORT_ATTR_MTU])
1692                 err = vport_set_mtu(vport, nla_get_u32(a[ODP_VPORT_ATTR_MTU]));
1693         return err;
1694 }
1695
1696 static int odp_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1697 {
1698         struct nlattr **a = info->attrs;
1699         struct odp_header *odp_header = info->userhdr;
1700         struct vport_parms parms;
1701         struct sk_buff *reply;
1702         struct vport *vport;
1703         struct datapath *dp;
1704         u32 port_no;
1705         int err;
1706
1707         err = -EINVAL;
1708         if (!a[ODP_VPORT_ATTR_NAME] || !a[ODP_VPORT_ATTR_TYPE])
1709                 goto exit;
1710
1711         err = odp_vport_cmd_validate(a);
1712         if (err)
1713                 goto exit;
1714
1715         rtnl_lock();
1716         dp = get_dp(odp_header->dp_ifindex);
1717         err = -ENODEV;
1718         if (!dp)
1719                 goto exit_unlock;
1720
1721         if (a[ODP_VPORT_ATTR_PORT_NO]) {
1722                 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1723
1724                 err = -EFBIG;
1725                 if (port_no >= DP_MAX_PORTS)
1726                         goto exit_unlock;
1727
1728                 vport = get_vport_protected(dp, port_no);
1729                 err = -EBUSY;
1730                 if (vport)
1731                         goto exit_unlock;
1732         } else {
1733                 for (port_no = 1; ; port_no++) {
1734                         if (port_no >= DP_MAX_PORTS) {
1735                                 err = -EFBIG;
1736                                 goto exit_unlock;
1737                         }
1738                         vport = get_vport_protected(dp, port_no);
1739                         if (!vport)
1740                                 break;
1741                 }
1742         }
1743
1744         parms.name = nla_data(a[ODP_VPORT_ATTR_NAME]);
1745         parms.type = nla_get_u32(a[ODP_VPORT_ATTR_TYPE]);
1746         parms.options = a[ODP_VPORT_ATTR_OPTIONS];
1747         parms.dp = dp;
1748         parms.port_no = port_no;
1749
1750         vport = new_vport(&parms);
1751         err = PTR_ERR(vport);
1752         if (IS_ERR(vport))
1753                 goto exit_unlock;
1754
1755         set_internal_devs_mtu(dp);
1756         dp_sysfs_add_if(vport);
1757
1758         err = change_vport(vport, a);
1759         if (!err) {
1760                 reply = odp_vport_cmd_build_info(vport, info->snd_pid,
1761                                                  info->snd_seq, ODP_VPORT_CMD_NEW);
1762                 if (IS_ERR(reply))
1763                         err = PTR_ERR(reply);
1764         }
1765         if (err) {
1766                 dp_detach_port(vport);
1767                 goto exit_unlock;
1768         }
1769         genl_notify(reply, genl_info_net(info), info->snd_pid,
1770                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1771
1772
1773 exit_unlock:
1774         rtnl_unlock();
1775 exit:
1776         return err;
1777 }
1778
1779 static int odp_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1780 {
1781         struct nlattr **a = info->attrs;
1782         struct sk_buff *reply;
1783         struct vport *vport;
1784         int err;
1785
1786         err = odp_vport_cmd_validate(a);
1787         if (err)
1788                 goto exit;
1789
1790         rtnl_lock();
1791         vport = lookup_vport(info->userhdr, a);
1792         err = PTR_ERR(vport);
1793         if (IS_ERR(vport))
1794                 goto exit_unlock;
1795
1796         err = 0;
1797         if (a[ODP_VPORT_ATTR_OPTIONS])
1798                 err = vport_set_options(vport, a[ODP_VPORT_ATTR_OPTIONS]);
1799         if (!err)
1800                 err = change_vport(vport, a);
1801
1802         reply = odp_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1803                                          ODP_VPORT_CMD_NEW);
1804         if (IS_ERR(reply)) {
1805                 err = PTR_ERR(reply);
1806                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1807                                 dp_vport_multicast_group.id, err);
1808                 return 0;
1809         }
1810
1811         genl_notify(reply, genl_info_net(info), info->snd_pid,
1812                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1813
1814 exit_unlock:
1815         rtnl_unlock();
1816 exit:
1817         return err;
1818 }
1819
1820 static int odp_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1821 {
1822         struct nlattr **a = info->attrs;
1823         struct sk_buff *reply;
1824         struct vport *vport;
1825         int err;
1826
1827         err = odp_vport_cmd_validate(a);
1828         if (err)
1829                 goto exit;
1830
1831         rtnl_lock();
1832         vport = lookup_vport(info->userhdr, a);
1833         err = PTR_ERR(vport);
1834         if (IS_ERR(vport))
1835                 goto exit_unlock;
1836
1837         if (vport->port_no == ODPP_LOCAL) {
1838                 err = -EINVAL;
1839                 goto exit_unlock;
1840         }
1841
1842         reply = odp_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1843                                          ODP_VPORT_CMD_DEL);
1844         err = PTR_ERR(reply);
1845         if (IS_ERR(reply))
1846                 goto exit_unlock;
1847
1848         err = dp_detach_port(vport);
1849
1850         genl_notify(reply, genl_info_net(info), info->snd_pid,
1851                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1852
1853 exit_unlock:
1854         rtnl_unlock();
1855 exit:
1856         return err;
1857 }
1858
1859 static int odp_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1860 {
1861         struct nlattr **a = info->attrs;
1862         struct odp_header *odp_header = info->userhdr;
1863         struct sk_buff *reply;
1864         struct vport *vport;
1865         int err;
1866
1867         err = odp_vport_cmd_validate(a);
1868         if (err)
1869                 goto exit;
1870
1871         rcu_read_lock();
1872         vport = lookup_vport(odp_header, a);
1873         err = PTR_ERR(vport);
1874         if (IS_ERR(vport))
1875                 goto exit_unlock;
1876
1877         reply = odp_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1878                                          ODP_VPORT_CMD_NEW);
1879         err = PTR_ERR(reply);
1880         if (IS_ERR(reply))
1881                 goto exit_unlock;
1882
1883         err = genlmsg_reply(reply, info);
1884
1885 exit_unlock:
1886         rcu_read_unlock();
1887 exit:
1888         return err;
1889 }
1890
1891 static int odp_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1892 {
1893         struct odp_header *odp_header = genlmsg_data(nlmsg_data(cb->nlh));
1894         struct datapath *dp;
1895         u32 port_no;
1896         int retval;
1897
1898         dp = get_dp(odp_header->dp_ifindex);
1899         if (!dp)
1900                 return -ENODEV;
1901
1902         rcu_read_lock();
1903         for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
1904                 struct vport *vport;
1905
1906                 vport = get_vport_protected(dp, port_no);
1907                 if (!vport)
1908                         continue;
1909
1910                 if (odp_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid,
1911                                             cb->nlh->nlmsg_seq, NLM_F_MULTI,
1912                                             ODP_VPORT_CMD_NEW) < 0)
1913                         break;
1914         }
1915         rcu_read_unlock();
1916
1917         cb->args[0] = port_no;
1918         retval = skb->len;
1919
1920         return retval;
1921 }
1922
1923 static struct genl_ops dp_vport_genl_ops[] = {
1924         { .cmd = ODP_VPORT_CMD_NEW,
1925           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1926           .policy = vport_policy,
1927           .doit = odp_vport_cmd_new
1928         },
1929         { .cmd = ODP_VPORT_CMD_DEL,
1930           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1931           .policy = vport_policy,
1932           .doit = odp_vport_cmd_del
1933         },
1934         { .cmd = ODP_VPORT_CMD_GET,
1935           .flags = 0,               /* OK for unprivileged users. */
1936           .policy = vport_policy,
1937           .doit = odp_vport_cmd_get,
1938           .dumpit = odp_vport_cmd_dump
1939         },
1940         { .cmd = ODP_VPORT_CMD_SET,
1941           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1942           .policy = vport_policy,
1943           .doit = odp_vport_cmd_set,
1944         },
1945 };
1946
1947 struct genl_family_and_ops {
1948         struct genl_family *family;
1949         struct genl_ops *ops;
1950         int n_ops;
1951         struct genl_multicast_group *group;
1952 };
1953
1954 static const struct genl_family_and_ops dp_genl_families[] = {
1955         { &dp_datapath_genl_family,
1956           dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
1957           &dp_datapath_multicast_group },
1958         { &dp_vport_genl_family,
1959           dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
1960           &dp_vport_multicast_group },
1961         { &dp_flow_genl_family,
1962           dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
1963           &dp_flow_multicast_group },
1964         { &dp_packet_genl_family,
1965           dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
1966           NULL },
1967 };
1968
1969 static void dp_unregister_genl(int n_families)
1970 {
1971         int i;
1972
1973         for (i = 0; i < n_families; i++) {
1974                 genl_unregister_family(dp_genl_families[i].family);
1975         }
1976 }
1977
1978 static int dp_register_genl(void)
1979 {
1980         int n_registered;
1981         int err;
1982         int i;
1983
1984         n_registered = 0;
1985         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
1986                 const struct genl_family_and_ops *f = &dp_genl_families[i];
1987
1988                 err = genl_register_family_with_ops(f->family, f->ops,
1989                                                     f->n_ops);
1990                 if (err)
1991                         goto error;
1992                 n_registered++;
1993
1994                 if (f->group) {
1995                         err = genl_register_mc_group(f->family, f->group);
1996                         if (err)
1997                                 goto error;
1998                 }
1999         }
2000
2001         err = packet_register_mc_groups();
2002         if (err)
2003                 goto error;
2004         return 0;
2005
2006 error:
2007         dp_unregister_genl(n_registered);
2008         return err;
2009 }
2010
2011 static int __init dp_init(void)
2012 {
2013         struct sk_buff *dummy_skb;
2014         int err;
2015
2016         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2017
2018         printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2019
2020         err = flow_init();
2021         if (err)
2022                 goto error;
2023
2024         err = vport_init();
2025         if (err)
2026                 goto error_flow_exit;
2027
2028         err = register_netdevice_notifier(&dp_device_notifier);
2029         if (err)
2030                 goto error_vport_exit;
2031
2032         err = dp_register_genl();
2033         if (err < 0)
2034                 goto error_unreg_notifier;
2035
2036         return 0;
2037
2038 error_unreg_notifier:
2039         unregister_netdevice_notifier(&dp_device_notifier);
2040 error_vport_exit:
2041         vport_exit();
2042 error_flow_exit:
2043         flow_exit();
2044 error:
2045         return err;
2046 }
2047
2048 static void dp_cleanup(void)
2049 {
2050         rcu_barrier();
2051         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2052         unregister_netdevice_notifier(&dp_device_notifier);
2053         vport_exit();
2054         flow_exit();
2055 }
2056
2057 module_init(dp_init);
2058 module_exit(dp_cleanup);
2059
2060 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2061 MODULE_LICENSE("GPL");