ba32e37fae9dbb91797e7de4af6266b0724f5d12
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
3  * Distributed under the terms of the GNU GPL version 2.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 /* Functions for managing the dp interface/device. */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/if_arp.h>
16 #include <linux/if_vlan.h>
17 #include <linux/in.h>
18 #include <linux/ip.h>
19 #include <linux/jhash.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/genetlink.h>
24 #include <linux/kernel.h>
25 #include <linux/kthread.h>
26 #include <linux/mutex.h>
27 #include <linux/percpu.h>
28 #include <linux/rcupdate.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/version.h>
32 #include <linux/ethtool.h>
33 #include <linux/wait.h>
34 #include <asm/system.h>
35 #include <asm/div64.h>
36 #include <asm/bug.h>
37 #include <linux/highmem.h>
38 #include <linux/netfilter_bridge.h>
39 #include <linux/netfilter_ipv4.h>
40 #include <linux/inetdevice.h>
41 #include <linux/list.h>
42 #include <linux/rculist.h>
43 #include <linux/dmi.h>
44 #include <net/inet_ecn.h>
45 #include <net/genetlink.h>
46
47 #include "openvswitch/datapath-protocol.h"
48 #include "checksum.h"
49 #include "datapath.h"
50 #include "actions.h"
51 #include "flow.h"
52 #include "loop_counter.h"
53 #include "table.h"
54 #include "vport-internal_dev.h"
55
56 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
57 EXPORT_SYMBOL(dp_ioctl_hook);
58
59 /**
60  * DOC: Locking:
61  *
62  * Writes to device state (add/remove datapath, port, set operations on vports,
63  * etc.) are protected by RTNL.
64  *
65  * Writes to other state (flow table modifications, set miscellaneous datapath
66  * parameters such as drop frags, etc.) are protected by genl_mutex.  The RTNL
67  * lock nests inside genl_mutex.
68  *
69  * Reads are protected by RCU.
70  *
71  * There are a few special cases (mostly stats) that have their own
72  * synchronization but they nest under all of above and don't interact with
73  * each other.
74  */
75
76 /* Global list of datapaths to enable dumping them all out.
77  * Protected by genl_mutex.
78  */
79 static LIST_HEAD(dps);
80
81 static struct vport *new_vport(const struct vport_parms *);
82 static int queue_control_packets(struct datapath *, struct sk_buff *,
83                                  const struct dp_upcall_info *);
84
85 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
86 struct datapath *get_dp(int dp_ifindex)
87 {
88         struct datapath *dp = NULL;
89         struct net_device *dev;
90
91         rcu_read_lock();
92         dev = dev_get_by_index_rcu(&init_net, dp_ifindex);
93         if (dev) {
94                 struct vport *vport = internal_dev_get_vport(dev);
95                 if (vport)
96                         dp = vport->dp;
97         }
98         rcu_read_unlock();
99
100         return dp;
101 }
102 EXPORT_SYMBOL_GPL(get_dp);
103
104 /* Must be called with genl_mutex. */
105 static struct tbl *get_table_protected(struct datapath *dp)
106 {
107         return rcu_dereference_protected(dp->table, lockdep_genl_is_held());
108 }
109
110 /* Must be called with rcu_read_lock or RTNL lock. */
111 static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
112 {
113         return rcu_dereference_rtnl(dp->ports[port_no]);
114 }
115
116 /* Must be called with rcu_read_lock or RTNL lock. */
117 const char *dp_name(const struct datapath *dp)
118 {
119         return vport_get_name(rcu_dereference_rtnl(dp->ports[ODPP_LOCAL]));
120 }
121
122 static inline size_t br_nlmsg_size(void)
123 {
124         return NLMSG_ALIGN(sizeof(struct ifinfomsg))
125                + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
126                + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
127                + nla_total_size(4) /* IFLA_MASTER */
128                + nla_total_size(4) /* IFLA_MTU */
129                + nla_total_size(4) /* IFLA_LINK */
130                + nla_total_size(1); /* IFLA_OPERSTATE */
131 }
132
133 /* Caller must hold RTNL lock. */
134 static int dp_fill_ifinfo(struct sk_buff *skb,
135                           const struct vport *port,
136                           int event, unsigned int flags)
137 {
138         struct datapath *dp = port->dp;
139         int ifindex = vport_get_ifindex(port);
140         int iflink = vport_get_iflink(port);
141         struct ifinfomsg *hdr;
142         struct nlmsghdr *nlh;
143
144         if (ifindex < 0)
145                 return ifindex;
146
147         if (iflink < 0)
148                 return iflink;
149
150         nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
151         if (nlh == NULL)
152                 return -EMSGSIZE;
153
154         hdr = nlmsg_data(nlh);
155         hdr->ifi_family = AF_BRIDGE;
156         hdr->__ifi_pad = 0;
157         hdr->ifi_type = ARPHRD_ETHER;
158         hdr->ifi_index = ifindex;
159         hdr->ifi_flags = vport_get_flags(port);
160         hdr->ifi_change = 0;
161
162         NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
163         NLA_PUT_U32(skb, IFLA_MASTER,
164                 vport_get_ifindex(get_vport_protected(dp, ODPP_LOCAL)));
165         NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
166 #ifdef IFLA_OPERSTATE
167         NLA_PUT_U8(skb, IFLA_OPERSTATE,
168                    vport_is_running(port)
169                         ? vport_get_operstate(port)
170                         : IF_OPER_DOWN);
171 #endif
172
173         NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
174
175         if (ifindex != iflink)
176                 NLA_PUT_U32(skb, IFLA_LINK,iflink);
177
178         return nlmsg_end(skb, nlh);
179
180 nla_put_failure:
181         nlmsg_cancel(skb, nlh);
182         return -EMSGSIZE;
183 }
184
185 /* Caller must hold RTNL lock. */
186 static void dp_ifinfo_notify(int event, struct vport *port)
187 {
188         struct sk_buff *skb;
189         int err = -ENOBUFS;
190
191         skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
192         if (skb == NULL)
193                 goto errout;
194
195         err = dp_fill_ifinfo(skb, port, event, 0);
196         if (err < 0) {
197                 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
198                 WARN_ON(err == -EMSGSIZE);
199                 kfree_skb(skb);
200                 goto errout;
201         }
202         rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
203         return;
204 errout:
205         if (err < 0)
206                 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
207 }
208
209 static void release_dp(struct kobject *kobj)
210 {
211         struct datapath *dp = container_of(kobj, struct datapath, ifobj);
212         kfree(dp);
213 }
214
215 static struct kobj_type dp_ktype = {
216         .release = release_dp
217 };
218
219 static void destroy_dp_rcu(struct rcu_head *rcu)
220 {
221         struct datapath *dp = container_of(rcu, struct datapath, rcu);
222
223         tbl_destroy((struct tbl __force *)dp->table, flow_free_tbl);
224         free_percpu(dp->stats_percpu);
225         kobject_put(&dp->ifobj);
226 }
227
228 /* Called with RTNL lock and genl_lock. */
229 static struct vport *new_vport(const struct vport_parms *parms)
230 {
231         struct vport *vport;
232
233         vport = vport_add(parms);
234         if (!IS_ERR(vport)) {
235                 struct datapath *dp = parms->dp;
236
237                 rcu_assign_pointer(dp->ports[parms->port_no], vport);
238                 list_add(&vport->node, &dp->port_list);
239
240                 dp_ifinfo_notify(RTM_NEWLINK, vport);
241         }
242
243         return vport;
244 }
245
246 /* Called with RTNL lock. */
247 int dp_detach_port(struct vport *p)
248 {
249         ASSERT_RTNL();
250
251         if (p->port_no != ODPP_LOCAL)
252                 dp_sysfs_del_if(p);
253         dp_ifinfo_notify(RTM_DELLINK, p);
254
255         /* First drop references to device. */
256         list_del(&p->node);
257         rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
258
259         /* Then destroy it. */
260         return vport_del(p);
261 }
262
263 /* Must be called with rcu_read_lock. */
264 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
265 {
266         struct datapath *dp = p->dp;
267         struct dp_stats_percpu *stats;
268         int stats_counter_off;
269         struct sw_flow_actions *acts;
270         struct loop_counter *loop;
271         int error;
272
273         OVS_CB(skb)->vport = p;
274
275         if (!OVS_CB(skb)->flow) {
276                 struct sw_flow_key key;
277                 struct tbl_node *flow_node;
278                 bool is_frag;
279
280                 /* Extract flow from 'skb' into 'key'. */
281                 error = flow_extract(skb, p->port_no, &key, &is_frag);
282                 if (unlikely(error)) {
283                         kfree_skb(skb);
284                         return;
285                 }
286
287                 if (is_frag && dp->drop_frags) {
288                         kfree_skb(skb);
289                         stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
290                         goto out;
291                 }
292
293                 /* Look up flow. */
294                 flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
295                                         flow_hash(&key), flow_cmp);
296                 if (unlikely(!flow_node)) {
297                         struct dp_upcall_info upcall;
298
299                         upcall.cmd = ODP_PACKET_CMD_MISS;
300                         upcall.key = &key;
301                         upcall.userdata = 0;
302                         upcall.sample_pool = 0;
303                         upcall.actions = NULL;
304                         upcall.actions_len = 0;
305                         dp_upcall(dp, skb, &upcall);
306                         stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
307                         goto out;
308                 }
309
310                 OVS_CB(skb)->flow = flow_cast(flow_node);
311         }
312
313         stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
314         flow_used(OVS_CB(skb)->flow, skb);
315
316         acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
317
318         /* Check whether we've looped too much. */
319         loop = loop_get_counter();
320         if (unlikely(++loop->count > MAX_LOOPS))
321                 loop->looping = true;
322         if (unlikely(loop->looping)) {
323                 loop_suppress(dp, acts);
324                 kfree_skb(skb);
325                 goto out_loop;
326         }
327
328         /* Execute actions. */
329         execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
330                         acts->actions_len);
331
332         /* Check whether sub-actions looped too much. */
333         if (unlikely(loop->looping))
334                 loop_suppress(dp, acts);
335
336 out_loop:
337         /* Decrement loop counter. */
338         if (!--loop->count)
339                 loop->looping = false;
340         loop_put_counter();
341
342 out:
343         /* Update datapath statistics. */
344         local_bh_disable();
345         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
346
347         write_seqcount_begin(&stats->seqlock);
348         (*(u64 *)((u8 *)stats + stats_counter_off))++;
349         write_seqcount_end(&stats->seqlock);
350
351         local_bh_enable();
352 }
353
354 static void copy_and_csum_skb(struct sk_buff *skb, void *to)
355 {
356         u16 csum_start, csum_offset;
357         __wsum csum;
358
359         get_skb_csum_pointers(skb, &csum_start, &csum_offset);
360         csum_start -= skb_headroom(skb);
361         BUG_ON(csum_start >= skb_headlen(skb));
362
363         skb_copy_bits(skb, 0, to, csum_start);
364
365         csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start,
366                                       skb->len - csum_start, 0);
367         *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
368 }
369
370 static struct genl_family dp_packet_genl_family = {
371         .id = GENL_ID_GENERATE,
372         .hdrsize = sizeof(struct odp_header),
373         .name = ODP_PACKET_FAMILY,
374         .version = 1,
375         .maxattr = ODP_PACKET_ATTR_MAX
376 };
377
378 /* Generic Netlink multicast groups for upcalls.
379  *
380  * We really want three unique multicast groups per datapath, but we can't even
381  * get one, because genl_register_mc_group() takes genl_lock, which is also
382  * held during Generic Netlink message processing, so trying to acquire
383  * multicast groups during ODP_DP_NEW processing deadlocks.  Instead, we
384  * preallocate a few groups and use them round-robin for datapaths.  Collision
385  * isn't fatal--multicast listeners should check that the family is the one
386  * that they want and discard others--but it wastes time and memory to receive
387  * unwanted messages.
388  */
389 #define PACKET_N_MC_GROUPS 16
390 static struct genl_multicast_group packet_mc_groups[PACKET_N_MC_GROUPS];
391
392 static u32 packet_mc_group(struct datapath *dp, u8 cmd)
393 {
394         u32 idx;
395         BUILD_BUG_ON_NOT_POWER_OF_2(PACKET_N_MC_GROUPS);
396
397         idx = jhash_2words(dp->dp_ifindex, cmd, 0) & (PACKET_N_MC_GROUPS - 1);
398         return packet_mc_groups[idx].id;
399 }
400
401 static int packet_register_mc_groups(void)
402 {
403         int i;
404
405         for (i = 0; i < PACKET_N_MC_GROUPS; i++) {
406                 struct genl_multicast_group *group = &packet_mc_groups[i];
407                 int error;
408
409                 sprintf(group->name, "packet%d", i);
410                 error = genl_register_mc_group(&dp_packet_genl_family, group);
411                 if (error)
412                         return error;
413         }
414         return 0;
415 }
416
417 int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info)
418 {
419         struct dp_stats_percpu *stats;
420         int err;
421
422         WARN_ON_ONCE(skb_shared(skb));
423
424         forward_ip_summed(skb);
425
426         err = vswitch_skb_checksum_setup(skb);
427         if (err)
428                 goto err_kfree_skb;
429
430         /* Break apart GSO packets into their component pieces.  Otherwise
431          * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
432         if (skb_is_gso(skb)) {
433                 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
434                 
435                 kfree_skb(skb);
436                 skb = nskb;
437                 if (IS_ERR(skb)) {
438                         err = PTR_ERR(skb);
439                         goto err;
440                 }
441         }
442
443         return queue_control_packets(dp, skb, upcall_info);
444
445 err_kfree_skb:
446         kfree_skb(skb);
447 err:
448         local_bh_disable();
449         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
450
451         write_seqcount_begin(&stats->seqlock);
452         stats->n_lost++;
453         write_seqcount_end(&stats->seqlock);
454
455         local_bh_enable();
456
457         return err;
458 }
459
460 /* Send each packet in the 'skb' list to userspace for 'dp' as directed by
461  * 'upcall_info'.  There will be only one packet unless we broke up a GSO
462  * packet.
463  */
464 static int queue_control_packets(struct datapath *dp, struct sk_buff *skb,
465                                  const struct dp_upcall_info *upcall_info)
466 {
467         u32 group = packet_mc_group(dp, upcall_info->cmd);
468         struct sk_buff *nskb;
469         int port_no;
470         int err;
471
472         if (OVS_CB(skb)->vport)
473                 port_no = OVS_CB(skb)->vport->port_no;
474         else
475                 port_no = ODPP_LOCAL;
476
477         do {
478                 struct odp_header *upcall;
479                 struct sk_buff *user_skb; /* to be queued to userspace */
480                 struct nlattr *nla;
481                 unsigned int len;
482
483                 nskb = skb->next;
484                 skb->next = NULL;
485
486                 len = sizeof(struct odp_header);
487                 len += nla_total_size(skb->len);
488                 len += nla_total_size(FLOW_BUFSIZE);
489                 if (upcall_info->userdata)
490                         len += nla_total_size(8);
491                 if (upcall_info->sample_pool)
492                         len += nla_total_size(4);
493                 if (upcall_info->actions_len)
494                         len += nla_total_size(upcall_info->actions_len);
495
496                 user_skb = genlmsg_new(len, GFP_ATOMIC);
497                 if (!user_skb) {
498                         netlink_set_err(INIT_NET_GENL_SOCK, 0, group, -ENOBUFS);
499                         goto err_kfree_skbs;
500                 }
501
502                 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd);
503                 upcall->dp_ifindex = dp->dp_ifindex;
504
505                 nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_KEY);
506                 flow_to_nlattrs(upcall_info->key, user_skb);
507                 nla_nest_end(user_skb, nla);
508
509                 if (upcall_info->userdata)
510                         nla_put_u64(user_skb, ODP_PACKET_ATTR_USERDATA, upcall_info->userdata);
511                 if (upcall_info->sample_pool)
512                         nla_put_u32(user_skb, ODP_PACKET_ATTR_SAMPLE_POOL, upcall_info->sample_pool);
513                 if (upcall_info->actions_len) {
514                         const struct nlattr *actions = upcall_info->actions;
515                         u32 actions_len = upcall_info->actions_len;
516
517                         nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_ACTIONS);
518                         memcpy(__skb_put(user_skb, actions_len), actions, actions_len);
519                         nla_nest_end(user_skb, nla);
520                 }
521
522                 nla = __nla_reserve(user_skb, ODP_PACKET_ATTR_PACKET, skb->len);
523                 if (skb->ip_summed == CHECKSUM_PARTIAL)
524                         copy_and_csum_skb(skb, nla_data(nla));
525                 else
526                         skb_copy_bits(skb, 0, nla_data(nla), skb->len);
527
528                 err = genlmsg_multicast(user_skb, 0, group, GFP_ATOMIC);
529                 if (err)
530                         goto err_kfree_skbs;
531
532                 kfree_skb(skb);
533                 skb = nskb;
534         } while (skb);
535         return 0;
536
537 err_kfree_skbs:
538         kfree_skb(skb);
539         while ((skb = nskb) != NULL) {
540                 nskb = skb->next;
541                 kfree_skb(skb);
542         }
543         return err;
544 }
545
546 /* Called with genl_mutex. */
547 static int flush_flows(int dp_ifindex)
548 {
549         struct tbl *old_table;
550         struct tbl *new_table;
551         struct datapath *dp;
552
553         dp = get_dp(dp_ifindex);
554         if (!dp)
555                 return -ENODEV;
556
557         old_table = get_table_protected(dp);
558         new_table = tbl_create(TBL_MIN_BUCKETS);
559         if (!new_table)
560                 return -ENOMEM;
561
562         rcu_assign_pointer(dp->table, new_table);
563
564         tbl_deferred_destroy(old_table, flow_free_tbl);
565
566         return 0;
567 }
568
569 static int validate_actions(const struct nlattr *attr)
570 {
571         const struct nlattr *a;
572         int rem;
573
574         nla_for_each_nested(a, attr, rem) {
575                 static const u32 action_lens[ODP_ACTION_ATTR_MAX + 1] = {
576                         [ODP_ACTION_ATTR_OUTPUT] = 4,
577                         [ODP_ACTION_ATTR_CONTROLLER] = 8,
578                         [ODP_ACTION_ATTR_SET_DL_TCI] = 2,
579                         [ODP_ACTION_ATTR_STRIP_VLAN] = 0,
580                         [ODP_ACTION_ATTR_SET_DL_SRC] = ETH_ALEN,
581                         [ODP_ACTION_ATTR_SET_DL_DST] = ETH_ALEN,
582                         [ODP_ACTION_ATTR_SET_NW_SRC] = 4,
583                         [ODP_ACTION_ATTR_SET_NW_DST] = 4,
584                         [ODP_ACTION_ATTR_SET_NW_TOS] = 1,
585                         [ODP_ACTION_ATTR_SET_TP_SRC] = 2,
586                         [ODP_ACTION_ATTR_SET_TP_DST] = 2,
587                         [ODP_ACTION_ATTR_SET_TUNNEL] = 8,
588                         [ODP_ACTION_ATTR_SET_PRIORITY] = 4,
589                         [ODP_ACTION_ATTR_POP_PRIORITY] = 0,
590                         [ODP_ACTION_ATTR_DROP_SPOOFED_ARP] = 0,
591                 };
592                 int type = nla_type(a);
593
594                 if (type > ODP_ACTION_ATTR_MAX || nla_len(a) != action_lens[type])
595                         return -EINVAL;
596
597                 switch (type) {
598                 case ODP_ACTION_ATTR_UNSPEC:
599                         return -EINVAL;
600
601                 case ODP_ACTION_ATTR_CONTROLLER:
602                 case ODP_ACTION_ATTR_STRIP_VLAN:
603                 case ODP_ACTION_ATTR_SET_DL_SRC:
604                 case ODP_ACTION_ATTR_SET_DL_DST:
605                 case ODP_ACTION_ATTR_SET_NW_SRC:
606                 case ODP_ACTION_ATTR_SET_NW_DST:
607                 case ODP_ACTION_ATTR_SET_TP_SRC:
608                 case ODP_ACTION_ATTR_SET_TP_DST:
609                 case ODP_ACTION_ATTR_SET_TUNNEL:
610                 case ODP_ACTION_ATTR_SET_PRIORITY:
611                 case ODP_ACTION_ATTR_POP_PRIORITY:
612                 case ODP_ACTION_ATTR_DROP_SPOOFED_ARP:
613                         /* No validation needed. */
614                         break;
615
616                 case ODP_ACTION_ATTR_OUTPUT:
617                         if (nla_get_u32(a) >= DP_MAX_PORTS)
618                                 return -EINVAL;
619                         break;
620
621                 case ODP_ACTION_ATTR_SET_DL_TCI:
622                         if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
623                                 return -EINVAL;
624                         break;
625
626                 case ODP_ACTION_ATTR_SET_NW_TOS:
627                         if (nla_get_u8(a) & INET_ECN_MASK)
628                                 return -EINVAL;
629                         break;
630
631                 default:
632                         return -EOPNOTSUPP;
633                 }
634         }
635
636         if (rem > 0)
637                 return -EINVAL;
638
639         return 0;
640 }
641 static void clear_stats(struct sw_flow *flow)
642 {
643         flow->used = 0;
644         flow->tcp_flags = 0;
645         flow->packet_count = 0;
646         flow->byte_count = 0;
647 }
648
649 /* Called with genl_mutex. */
650 static int expand_table(struct datapath *dp)
651 {
652         struct tbl *old_table = get_table_protected(dp);
653         struct tbl *new_table;
654
655         new_table = tbl_expand(old_table);
656         if (IS_ERR(new_table))
657                 return PTR_ERR(new_table);
658
659         rcu_assign_pointer(dp->table, new_table);
660         tbl_deferred_destroy(old_table, NULL);
661
662         return 0;
663 }
664
665 static int odp_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
666 {
667         struct odp_header *odp_header = info->userhdr;
668         struct nlattr **a = info->attrs;
669         struct sk_buff *packet;
670         struct sw_flow_key key;
671         struct datapath *dp;
672         struct ethhdr *eth;
673         bool is_frag;
674         int err;
675
676         err = -EINVAL;
677         if (!a[ODP_PACKET_ATTR_PACKET] || !a[ODP_PACKET_ATTR_ACTIONS] ||
678             nla_len(a[ODP_PACKET_ATTR_PACKET]) < ETH_HLEN)
679                 goto exit;
680
681         err = validate_actions(a[ODP_PACKET_ATTR_ACTIONS]);
682         if (err)
683                 goto exit;
684
685         packet = skb_clone(skb, GFP_KERNEL);
686         err = -ENOMEM;
687         if (!packet)
688                 goto exit;
689         packet->data = nla_data(a[ODP_PACKET_ATTR_PACKET]);
690         packet->len = nla_len(a[ODP_PACKET_ATTR_PACKET]);
691
692         skb_reset_mac_header(packet);
693         eth = eth_hdr(packet);
694
695         /* Normally, setting the skb 'protocol' field would be handled by a
696          * call to eth_type_trans(), but it assumes there's a sending
697          * device, which we may not have. */
698         if (ntohs(eth->h_proto) >= 1536)
699                 packet->protocol = eth->h_proto;
700         else
701                 packet->protocol = htons(ETH_P_802_2);
702
703         err = flow_extract(packet, -1, &key, &is_frag);
704         if (err)
705                 goto exit;
706
707         rcu_read_lock();
708         dp = get_dp(odp_header->dp_ifindex);
709         err = -ENODEV;
710         if (dp)
711                 err = execute_actions(dp, packet, &key,
712                                       nla_data(a[ODP_PACKET_ATTR_ACTIONS]),
713                                       nla_len(a[ODP_PACKET_ATTR_ACTIONS]));
714         rcu_read_unlock();
715
716 exit:
717         return err;
718 }
719
720 static const struct nla_policy packet_policy[ODP_PACKET_ATTR_MAX + 1] = {
721         [ODP_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
722         [ODP_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
723 };
724
725 static struct genl_ops dp_packet_genl_ops[] = {
726         { .cmd = ODP_PACKET_CMD_EXECUTE,
727           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
728           .policy = packet_policy,
729           .doit = odp_packet_cmd_execute
730         }
731 };
732
733 static void get_dp_stats(struct datapath *dp, struct odp_stats *stats)
734 {
735         int i;
736
737         stats->n_frags = stats->n_hit = stats->n_missed = stats->n_lost = 0;
738         for_each_possible_cpu(i) {
739                 const struct dp_stats_percpu *percpu_stats;
740                 struct dp_stats_percpu local_stats;
741                 unsigned seqcount;
742
743                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
744
745                 do {
746                         seqcount = read_seqcount_begin(&percpu_stats->seqlock);
747                         local_stats = *percpu_stats;
748                 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
749
750                 stats->n_frags += local_stats.n_frags;
751                 stats->n_hit += local_stats.n_hit;
752                 stats->n_missed += local_stats.n_missed;
753                 stats->n_lost += local_stats.n_lost;
754         }
755 }
756
757 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports.
758  * Called with RTNL lock.
759  */
760 int dp_min_mtu(const struct datapath *dp)
761 {
762         struct vport *p;
763         int mtu = 0;
764
765         ASSERT_RTNL();
766
767         list_for_each_entry (p, &dp->port_list, node) {
768                 int dev_mtu;
769
770                 /* Skip any internal ports, since that's what we're trying to
771                  * set. */
772                 if (is_internal_vport(p))
773                         continue;
774
775                 dev_mtu = vport_get_mtu(p);
776                 if (!dev_mtu)
777                         continue;
778                 if (!mtu || dev_mtu < mtu)
779                         mtu = dev_mtu;
780         }
781
782         return mtu ? mtu : ETH_DATA_LEN;
783 }
784
785 /* Sets the MTU of all datapath devices to the minimum of the ports
786  * Called with RTNL lock.
787  */
788 void set_internal_devs_mtu(const struct datapath *dp)
789 {
790         struct vport *p;
791         int mtu;
792
793         ASSERT_RTNL();
794
795         mtu = dp_min_mtu(dp);
796
797         list_for_each_entry (p, &dp->port_list, node) {
798                 if (is_internal_vport(p))
799                         vport_set_mtu(p, mtu);
800         }
801 }
802
803 static const struct nla_policy flow_policy[ODP_FLOW_ATTR_MAX + 1] = {
804         [ODP_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
805         [ODP_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
806         [ODP_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
807 };
808
809 static struct genl_family dp_flow_genl_family = {
810         .id = GENL_ID_GENERATE,
811         .hdrsize = sizeof(struct odp_header),
812         .name = ODP_FLOW_FAMILY,
813         .version = 1,
814         .maxattr = ODP_FLOW_ATTR_MAX
815 };
816
817 static struct genl_multicast_group dp_flow_multicast_group = {
818         .name = ODP_FLOW_MCGROUP
819 };
820
821 /* Called with genl_lock. */
822 static int odp_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
823                                   struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd)
824 {
825         const int skb_orig_len = skb->len;
826         const struct sw_flow_actions *sf_acts;
827         struct odp_flow_stats stats;
828         struct odp_header *odp_header;
829         struct nlattr *nla;
830         unsigned long used;
831         u8 tcp_flags;
832         int err;
833
834         sf_acts = rcu_dereference_protected(flow->sf_acts,
835                                             lockdep_genl_is_held());
836
837         odp_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
838         if (!odp_header)
839                 return -EMSGSIZE;
840
841         odp_header->dp_ifindex = dp->dp_ifindex;
842
843         nla = nla_nest_start(skb, ODP_FLOW_ATTR_KEY);
844         if (!nla)
845                 goto nla_put_failure;
846         err = flow_to_nlattrs(&flow->key, skb);
847         if (err)
848                 goto error;
849         nla_nest_end(skb, nla);
850
851         spin_lock_bh(&flow->lock);
852         used = flow->used;
853         stats.n_packets = flow->packet_count;
854         stats.n_bytes = flow->byte_count;
855         tcp_flags = flow->tcp_flags;
856         spin_unlock_bh(&flow->lock);
857
858         if (used)
859                 NLA_PUT_U64(skb, ODP_FLOW_ATTR_USED, flow_used_time(used));
860
861         if (stats.n_packets)
862                 NLA_PUT(skb, ODP_FLOW_ATTR_STATS, sizeof(struct odp_flow_stats), &stats);
863
864         if (tcp_flags)
865                 NLA_PUT_U8(skb, ODP_FLOW_ATTR_TCP_FLAGS, tcp_flags);
866
867         /* If ODP_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
868          * this is the first flow to be dumped into 'skb'.  This is unusual for
869          * Netlink but individual action lists can be longer than
870          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
871          * The userspace caller can always fetch the actions separately if it
872          * really wants them.  (Most userspace callers in fact don't care.)
873          *
874          * This can only fail for dump operations because the skb is always
875          * properly sized for single flows.
876          */
877         err = nla_put(skb, ODP_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
878                       sf_acts->actions);
879         if (err < 0 && skb_orig_len)
880                 goto error;
881
882         return genlmsg_end(skb, odp_header);
883
884 nla_put_failure:
885         err = -EMSGSIZE;
886 error:
887         genlmsg_cancel(skb, odp_header);
888         return err;
889 }
890
891 static struct sk_buff *odp_flow_cmd_alloc_info(struct sw_flow *flow)
892 {
893         const struct sw_flow_actions *sf_acts;
894         int len;
895
896         sf_acts = rcu_dereference_protected(flow->sf_acts,
897                                             lockdep_genl_is_held());
898
899         len = nla_total_size(FLOW_BUFSIZE); /* ODP_FLOW_ATTR_KEY */
900         len += nla_total_size(sf_acts->actions_len); /* ODP_FLOW_ATTR_ACTIONS */
901         len += nla_total_size(sizeof(struct odp_flow_stats)); /* ODP_FLOW_ATTR_STATS */
902         len += nla_total_size(1); /* ODP_FLOW_ATTR_TCP_FLAGS */
903         len += nla_total_size(8); /* ODP_FLOW_ATTR_USED */
904         return genlmsg_new(NLMSG_ALIGN(sizeof(struct odp_header)) + len, GFP_KERNEL);
905 }
906
907 static struct sk_buff *odp_flow_cmd_build_info(struct sw_flow *flow, struct datapath *dp,
908                                                u32 pid, u32 seq, u8 cmd)
909 {
910         struct sk_buff *skb;
911         int retval;
912
913         skb = odp_flow_cmd_alloc_info(flow);
914         if (!skb)
915                 return ERR_PTR(-ENOMEM);
916
917         retval = odp_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
918         BUG_ON(retval < 0);
919         return skb;
920 }
921
922 static int odp_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
923 {
924         struct nlattr **a = info->attrs;
925         struct odp_header *odp_header = info->userhdr;
926         struct tbl_node *flow_node;
927         struct sw_flow_key key;
928         struct sw_flow *flow;
929         struct sk_buff *reply;
930         struct datapath *dp;
931         struct tbl *table;
932         u32 hash;
933         int error;
934
935         /* Extract key. */
936         error = -EINVAL;
937         if (!a[ODP_FLOW_ATTR_KEY])
938                 goto error;
939         error = flow_from_nlattrs(&key, a[ODP_FLOW_ATTR_KEY]);
940         if (error)
941                 goto error;
942
943         /* Validate actions. */
944         if (a[ODP_FLOW_ATTR_ACTIONS]) {
945                 error = validate_actions(a[ODP_FLOW_ATTR_ACTIONS]);
946                 if (error)
947                         goto error;
948         } else if (info->genlhdr->cmd == ODP_FLOW_CMD_NEW) {
949                 error = -EINVAL;
950                 goto error;
951         }
952
953         dp = get_dp(odp_header->dp_ifindex);
954         error = -ENODEV;
955         if (!dp)
956                 goto error;
957
958         hash = flow_hash(&key);
959         table = get_table_protected(dp);
960         flow_node = tbl_lookup(table, &key, hash, flow_cmp);
961         if (!flow_node) {
962                 struct sw_flow_actions *acts;
963
964                 /* Bail out if we're not allowed to create a new flow. */
965                 error = -ENOENT;
966                 if (info->genlhdr->cmd == ODP_FLOW_CMD_SET)
967                         goto error;
968
969                 /* Expand table, if necessary, to make room. */
970                 if (tbl_count(table) >= tbl_n_buckets(table)) {
971                         error = expand_table(dp);
972                         if (error)
973                                 goto error;
974                         table = get_table_protected(dp);
975                 }
976
977                 /* Allocate flow. */
978                 flow = flow_alloc();
979                 if (IS_ERR(flow)) {
980                         error = PTR_ERR(flow);
981                         goto error;
982                 }
983                 flow->key = key;
984                 clear_stats(flow);
985
986                 /* Obtain actions. */
987                 acts = flow_actions_alloc(a[ODP_FLOW_ATTR_ACTIONS]);
988                 error = PTR_ERR(acts);
989                 if (IS_ERR(acts))
990                         goto error_free_flow;
991                 rcu_assign_pointer(flow->sf_acts, acts);
992
993                 /* Put flow in bucket. */
994                 error = tbl_insert(table, &flow->tbl_node, hash);
995                 if (error)
996                         goto error_free_flow;
997
998                 reply = odp_flow_cmd_build_info(flow, dp, info->snd_pid,
999                                                 info->snd_seq, ODP_FLOW_CMD_NEW);
1000         } else {
1001                 /* We found a matching flow. */
1002                 struct sw_flow_actions *old_acts;
1003
1004                 /* Bail out if we're not allowed to modify an existing flow.
1005                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1006                  * because Generic Netlink treats the latter as a dump
1007                  * request.  We also accept NLM_F_EXCL in case that bug ever
1008                  * gets fixed.
1009                  */
1010                 error = -EEXIST;
1011                 if (info->genlhdr->cmd == ODP_FLOW_CMD_NEW &&
1012                     info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
1013                         goto error;
1014
1015                 /* Update actions. */
1016                 flow = flow_cast(flow_node);
1017                 old_acts = rcu_dereference_protected(flow->sf_acts,
1018                                                      lockdep_genl_is_held());
1019                 if (a[ODP_FLOW_ATTR_ACTIONS] &&
1020                     (old_acts->actions_len != nla_len(a[ODP_FLOW_ATTR_ACTIONS]) ||
1021                      memcmp(old_acts->actions, nla_data(a[ODP_FLOW_ATTR_ACTIONS]),
1022                             old_acts->actions_len))) {
1023                         struct sw_flow_actions *new_acts;
1024
1025                         new_acts = flow_actions_alloc(a[ODP_FLOW_ATTR_ACTIONS]);
1026                         error = PTR_ERR(new_acts);
1027                         if (IS_ERR(new_acts))
1028                                 goto error;
1029
1030                         rcu_assign_pointer(flow->sf_acts, new_acts);
1031                         flow_deferred_free_acts(old_acts);
1032                 }
1033
1034                 reply = odp_flow_cmd_build_info(flow, dp, info->snd_pid,
1035                                                 info->snd_seq, ODP_FLOW_CMD_NEW);
1036
1037                 /* Clear stats. */
1038                 if (a[ODP_FLOW_ATTR_CLEAR]) {
1039                         spin_lock_bh(&flow->lock);
1040                         clear_stats(flow);
1041                         spin_unlock_bh(&flow->lock);
1042                 }
1043         }
1044
1045         if (!IS_ERR(reply))
1046                 genl_notify(reply, genl_info_net(info), info->snd_pid,
1047                             dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1048         else
1049                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1050                                 dp_flow_multicast_group.id, PTR_ERR(reply));
1051         return 0;
1052
1053 error_free_flow:
1054         flow_put(flow);
1055 error:
1056         return error;
1057 }
1058
1059 static int odp_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1060 {
1061         struct nlattr **a = info->attrs;
1062         struct odp_header *odp_header = info->userhdr;
1063         struct sw_flow_key key;
1064         struct tbl_node *flow_node;
1065         struct sk_buff *reply;
1066         struct sw_flow *flow;
1067         struct datapath *dp;
1068         struct tbl *table;
1069         int err;
1070
1071         if (!a[ODP_FLOW_ATTR_KEY])
1072                 return -EINVAL;
1073         err = flow_from_nlattrs(&key, a[ODP_FLOW_ATTR_KEY]);
1074         if (err)
1075                 return err;
1076
1077         dp = get_dp(odp_header->dp_ifindex);
1078         if (!dp)
1079                 return -ENODEV;
1080
1081         table = get_table_protected(dp);
1082         flow_node = tbl_lookup(table, &key, flow_hash(&key), flow_cmp);
1083         if (!flow_node)
1084                 return -ENOENT;
1085
1086         flow = flow_cast(flow_node);
1087         reply = odp_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, ODP_FLOW_CMD_NEW);
1088         if (IS_ERR(reply))
1089                 return PTR_ERR(reply);
1090
1091         return genlmsg_reply(reply, info);
1092 }
1093
1094 static int odp_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1095 {
1096         struct nlattr **a = info->attrs;
1097         struct odp_header *odp_header = info->userhdr;
1098         struct sw_flow_key key;
1099         struct tbl_node *flow_node;
1100         struct sk_buff *reply;
1101         struct sw_flow *flow;
1102         struct datapath *dp;
1103         struct tbl *table;
1104         int err;
1105
1106         if (!a[ODP_FLOW_ATTR_KEY])
1107                 return flush_flows(odp_header->dp_ifindex);
1108         err = flow_from_nlattrs(&key, a[ODP_FLOW_ATTR_KEY]);
1109         if (err)
1110                 return err;
1111
1112         dp = get_dp(odp_header->dp_ifindex);
1113         if (!dp)
1114                 return -ENODEV;
1115
1116         table = get_table_protected(dp);
1117         flow_node = tbl_lookup(table, &key, flow_hash(&key), flow_cmp);
1118         if (!flow_node)
1119                 return -ENOENT;
1120         flow = flow_cast(flow_node);
1121
1122         reply = odp_flow_cmd_alloc_info(flow);
1123         if (!reply)
1124                 return -ENOMEM;
1125
1126         err = tbl_remove(table, flow_node);
1127         if (err) {
1128                 kfree_skb(reply);
1129                 return err;
1130         }
1131
1132         err = odp_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
1133                                      info->snd_seq, 0, ODP_FLOW_CMD_DEL);
1134         BUG_ON(err < 0);
1135
1136         flow_deferred_free(flow);
1137
1138         genl_notify(reply, genl_info_net(info), info->snd_pid,
1139                     dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1140         return 0;
1141 }
1142
1143 static int odp_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1144 {
1145         struct odp_header *odp_header = genlmsg_data(nlmsg_data(cb->nlh));
1146         struct datapath *dp;
1147
1148         dp = get_dp(odp_header->dp_ifindex);
1149         if (!dp)
1150                 return -ENODEV;
1151
1152         for (;;) {
1153                 struct tbl_node *flow_node;
1154                 struct sw_flow *flow;
1155                 u32 bucket, obj;
1156
1157                 bucket = cb->args[0];
1158                 obj = cb->args[1];
1159                 flow_node = tbl_next(get_table_protected(dp), &bucket, &obj);
1160                 if (!flow_node)
1161                         break;
1162
1163                 flow = flow_cast(flow_node);
1164                 if (odp_flow_cmd_fill_info(flow, dp, skb, NETLINK_CB(cb->skb).pid,
1165                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1166                                            ODP_FLOW_CMD_NEW) < 0)
1167                         break;
1168
1169                 cb->args[0] = bucket;
1170                 cb->args[1] = obj;
1171         }
1172         return skb->len;
1173 }
1174
1175 static struct genl_ops dp_flow_genl_ops[] = {
1176         { .cmd = ODP_FLOW_CMD_NEW,
1177           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1178           .policy = flow_policy,
1179           .doit = odp_flow_cmd_new_or_set
1180         },
1181         { .cmd = ODP_FLOW_CMD_DEL,
1182           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1183           .policy = flow_policy,
1184           .doit = odp_flow_cmd_del
1185         },
1186         { .cmd = ODP_FLOW_CMD_GET,
1187           .flags = 0,               /* OK for unprivileged users. */
1188           .policy = flow_policy,
1189           .doit = odp_flow_cmd_get,
1190           .dumpit = odp_flow_cmd_dump
1191         },
1192         { .cmd = ODP_FLOW_CMD_SET,
1193           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1194           .policy = flow_policy,
1195           .doit = odp_flow_cmd_new_or_set,
1196         },
1197 };
1198
1199 static const struct nla_policy datapath_policy[ODP_DP_ATTR_MAX + 1] = {
1200 #ifdef HAVE_NLA_NUL_STRING
1201         [ODP_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1202 #endif
1203         [ODP_DP_ATTR_IPV4_FRAGS] = { .type = NLA_U32 },
1204         [ODP_DP_ATTR_SAMPLING] = { .type = NLA_U32 },
1205 };
1206
1207 static struct genl_family dp_datapath_genl_family = {
1208         .id = GENL_ID_GENERATE,
1209         .hdrsize = sizeof(struct odp_header),
1210         .name = ODP_DATAPATH_FAMILY,
1211         .version = 1,
1212         .maxattr = ODP_DP_ATTR_MAX
1213 };
1214
1215 static struct genl_multicast_group dp_datapath_multicast_group = {
1216         .name = ODP_DATAPATH_MCGROUP
1217 };
1218
1219 static int odp_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1220                                 u32 pid, u32 seq, u32 flags, u8 cmd)
1221 {
1222         struct odp_header *odp_header;
1223         struct nlattr *nla;
1224         int err;
1225
1226         odp_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
1227                                    flags, cmd);
1228         if (!odp_header)
1229                 goto error;
1230
1231         odp_header->dp_ifindex = dp->dp_ifindex;
1232
1233         rcu_read_lock();
1234         err = nla_put_string(skb, ODP_DP_ATTR_NAME, dp_name(dp));
1235         rcu_read_unlock();
1236         if (err)
1237                 goto nla_put_failure;
1238
1239         nla = nla_reserve(skb, ODP_DP_ATTR_STATS, sizeof(struct odp_stats));
1240         if (!nla)
1241                 goto nla_put_failure;
1242         get_dp_stats(dp, nla_data(nla));
1243
1244         NLA_PUT_U32(skb, ODP_DP_ATTR_IPV4_FRAGS,
1245                     dp->drop_frags ? ODP_DP_FRAG_DROP : ODP_DP_FRAG_ZERO);
1246
1247         if (dp->sflow_probability)
1248                 NLA_PUT_U32(skb, ODP_DP_ATTR_SAMPLING, dp->sflow_probability);
1249
1250         nla = nla_nest_start(skb, ODP_DP_ATTR_MCGROUPS);
1251         if (!nla)
1252                 goto nla_put_failure;
1253         NLA_PUT_U32(skb, ODP_PACKET_CMD_MISS, packet_mc_group(dp, ODP_PACKET_CMD_MISS));
1254         NLA_PUT_U32(skb, ODP_PACKET_CMD_ACTION, packet_mc_group(dp, ODP_PACKET_CMD_ACTION));
1255         NLA_PUT_U32(skb, ODP_PACKET_CMD_SAMPLE, packet_mc_group(dp, ODP_PACKET_CMD_SAMPLE));
1256         nla_nest_end(skb, nla);
1257
1258         return genlmsg_end(skb, odp_header);
1259
1260 nla_put_failure:
1261         genlmsg_cancel(skb, odp_header);
1262 error:
1263         return -EMSGSIZE;
1264 }
1265
1266 static struct sk_buff *odp_dp_cmd_build_info(struct datapath *dp, u32 pid,
1267                                              u32 seq, u8 cmd)
1268 {
1269         struct sk_buff *skb;
1270         int retval;
1271
1272         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1273         if (!skb)
1274                 return ERR_PTR(-ENOMEM);
1275
1276         retval = odp_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
1277         if (retval < 0) {
1278                 kfree_skb(skb);
1279                 return ERR_PTR(retval);
1280         }
1281         return skb;
1282 }
1283
1284 static int odp_dp_cmd_validate(struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1285 {
1286         if (a[ODP_DP_ATTR_IPV4_FRAGS]) {
1287                 u32 frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]);
1288
1289                 if (frags != ODP_DP_FRAG_ZERO && frags != ODP_DP_FRAG_DROP)
1290                         return -EINVAL;
1291         }
1292
1293         return VERIFY_NUL_STRING(a[ODP_DP_ATTR_NAME], IFNAMSIZ - 1);
1294 }
1295
1296 /* Called with genl_mutex and optionally with RTNL lock also. */
1297 static struct datapath *lookup_datapath(struct odp_header *odp_header, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1298 {
1299         struct datapath *dp;
1300
1301         if (!a[ODP_DP_ATTR_NAME])
1302                 dp = get_dp(odp_header->dp_ifindex);
1303         else {
1304                 struct vport *vport;
1305
1306                 rcu_read_lock();
1307                 vport = vport_locate(nla_data(a[ODP_DP_ATTR_NAME]));
1308                 dp = vport && vport->port_no == ODPP_LOCAL ? vport->dp : NULL;
1309                 rcu_read_unlock();
1310         }
1311         return dp ? dp : ERR_PTR(-ENODEV);
1312 }
1313
1314 /* Called with genl_mutex. */
1315 static void change_datapath(struct datapath *dp, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1316 {
1317         if (a[ODP_DP_ATTR_IPV4_FRAGS])
1318                 dp->drop_frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]) == ODP_DP_FRAG_DROP;
1319         if (a[ODP_DP_ATTR_SAMPLING])
1320                 dp->sflow_probability = nla_get_u32(a[ODP_DP_ATTR_SAMPLING]);
1321 }
1322
1323 static int odp_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1324 {
1325         struct nlattr **a = info->attrs;
1326         struct vport_parms parms;
1327         struct sk_buff *reply;
1328         struct datapath *dp;
1329         struct vport *vport;
1330         int err;
1331
1332         err = -EINVAL;
1333         if (!a[ODP_DP_ATTR_NAME])
1334                 goto err;
1335
1336         err = odp_dp_cmd_validate(a);
1337         if (err)
1338                 goto err;
1339
1340         rtnl_lock();
1341         err = -ENODEV;
1342         if (!try_module_get(THIS_MODULE))
1343                 goto err_unlock_rtnl;
1344
1345         err = -ENOMEM;
1346         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1347         if (dp == NULL)
1348                 goto err_put_module;
1349         INIT_LIST_HEAD(&dp->port_list);
1350
1351         /* Initialize kobject for bridge.  This will be added as
1352          * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
1353         dp->ifobj.kset = NULL;
1354         kobject_init(&dp->ifobj, &dp_ktype);
1355
1356         /* Allocate table. */
1357         err = -ENOMEM;
1358         rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
1359         if (!dp->table)
1360                 goto err_free_dp;
1361
1362         /* Set up our datapath device. */
1363         parms.name = nla_data(a[ODP_DP_ATTR_NAME]);
1364         parms.type = ODP_VPORT_TYPE_INTERNAL;
1365         parms.options = NULL;
1366         parms.dp = dp;
1367         parms.port_no = ODPP_LOCAL;
1368         vport = new_vport(&parms);
1369         if (IS_ERR(vport)) {
1370                 err = PTR_ERR(vport);
1371                 if (err == -EBUSY)
1372                         err = -EEXIST;
1373
1374                 goto err_destroy_table;
1375         }
1376         dp->dp_ifindex = vport_get_ifindex(vport);
1377
1378         dp->drop_frags = 0;
1379         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1380         if (!dp->stats_percpu) {
1381                 err = -ENOMEM;
1382                 goto err_destroy_local_port;
1383         }
1384
1385         change_datapath(dp, a);
1386
1387         reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_NEW);
1388         err = PTR_ERR(reply);
1389         if (IS_ERR(reply))
1390                 goto err_destroy_local_port;
1391
1392         list_add_tail(&dp->list_node, &dps);
1393         dp_sysfs_add_dp(dp);
1394
1395         rtnl_unlock();
1396
1397         genl_notify(reply, genl_info_net(info), info->snd_pid,
1398                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1399         return 0;
1400
1401 err_destroy_local_port:
1402         dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
1403 err_destroy_table:
1404         tbl_destroy(get_table_protected(dp), NULL);
1405 err_free_dp:
1406         kfree(dp);
1407 err_put_module:
1408         module_put(THIS_MODULE);
1409 err_unlock_rtnl:
1410         rtnl_unlock();
1411 err:
1412         return err;
1413 }
1414
1415 static int odp_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1416 {
1417         struct vport *vport, *next_vport;
1418         struct sk_buff *reply;
1419         struct datapath *dp;
1420         int err;
1421
1422         err = odp_dp_cmd_validate(info->attrs);
1423         if (err)
1424                 goto exit;
1425
1426         rtnl_lock();
1427         dp = lookup_datapath(info->userhdr, info->attrs);
1428         err = PTR_ERR(dp);
1429         if (IS_ERR(dp))
1430                 goto exit_unlock;
1431
1432         reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_DEL);
1433         err = PTR_ERR(reply);
1434         if (IS_ERR(reply))
1435                 goto exit_unlock;
1436
1437         list_for_each_entry_safe (vport, next_vport, &dp->port_list, node)
1438                 if (vport->port_no != ODPP_LOCAL)
1439                         dp_detach_port(vport);
1440
1441         dp_sysfs_del_dp(dp);
1442         list_del(&dp->list_node);
1443         dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
1444
1445         call_rcu(&dp->rcu, destroy_dp_rcu);
1446         module_put(THIS_MODULE);
1447
1448         genl_notify(reply, genl_info_net(info), info->snd_pid,
1449                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1450         err = 0;
1451
1452 exit_unlock:
1453         rtnl_unlock();
1454 exit:
1455         return err;
1456 }
1457
1458 static int odp_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1459 {
1460         struct sk_buff *reply;
1461         struct datapath *dp;
1462         int err;
1463
1464         err = odp_dp_cmd_validate(info->attrs);
1465         if (err)
1466                 return err;
1467
1468         dp = lookup_datapath(info->userhdr, info->attrs);
1469         if (IS_ERR(dp))
1470                 return PTR_ERR(dp);
1471
1472         change_datapath(dp, info->attrs);
1473
1474         reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_NEW);
1475         if (IS_ERR(reply)) {
1476                 err = PTR_ERR(reply);
1477                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1478                                 dp_datapath_multicast_group.id, err);
1479                 return 0;
1480         }
1481
1482         genl_notify(reply, genl_info_net(info), info->snd_pid,
1483                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1484         return 0;
1485 }
1486
1487 static int odp_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1488 {
1489         struct sk_buff *reply;
1490         struct datapath *dp;
1491         int err;
1492
1493         err = odp_dp_cmd_validate(info->attrs);
1494         if (err)
1495                 return err;
1496
1497         dp = lookup_datapath(info->userhdr, info->attrs);
1498         if (IS_ERR(dp))
1499                 return PTR_ERR(dp);
1500
1501         reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_NEW);
1502         if (IS_ERR(reply))
1503                 return PTR_ERR(reply);
1504
1505         return genlmsg_reply(reply, info);
1506 }
1507
1508 static int odp_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1509 {
1510         struct datapath *dp;
1511         int skip = cb->args[0];
1512         int i = 0;
1513
1514         list_for_each_entry (dp, &dps, list_node) {
1515                 if (i < skip)
1516                         continue;
1517                 if (odp_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
1518                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1519                                          ODP_DP_CMD_NEW) < 0)
1520                         break;
1521                 i++;
1522         }
1523
1524         cb->args[0] = i;
1525
1526         return skb->len;
1527 }
1528
1529 static struct genl_ops dp_datapath_genl_ops[] = {
1530         { .cmd = ODP_DP_CMD_NEW,
1531           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1532           .policy = datapath_policy,
1533           .doit = odp_dp_cmd_new
1534         },
1535         { .cmd = ODP_DP_CMD_DEL,
1536           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1537           .policy = datapath_policy,
1538           .doit = odp_dp_cmd_del
1539         },
1540         { .cmd = ODP_DP_CMD_GET,
1541           .flags = 0,               /* OK for unprivileged users. */
1542           .policy = datapath_policy,
1543           .doit = odp_dp_cmd_get,
1544           .dumpit = odp_dp_cmd_dump
1545         },
1546         { .cmd = ODP_DP_CMD_SET,
1547           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1548           .policy = datapath_policy,
1549           .doit = odp_dp_cmd_set,
1550         },
1551 };
1552
1553 static const struct nla_policy vport_policy[ODP_VPORT_ATTR_MAX + 1] = {
1554 #ifdef HAVE_NLA_NUL_STRING
1555         [ODP_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1556         [ODP_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1557         [ODP_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1558         [ODP_VPORT_ATTR_STATS] = { .len = sizeof(struct rtnl_link_stats64) },
1559         [ODP_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
1560 #else
1561         [ODP_VPORT_ATTR_STATS] = { .minlen = sizeof(struct rtnl_link_stats64) },
1562         [ODP_VPORT_ATTR_ADDRESS] = { .minlen = ETH_ALEN },
1563 #endif
1564         [ODP_VPORT_ATTR_MTU] = { .type = NLA_U32 },
1565         [ODP_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1566 };
1567
1568 static struct genl_family dp_vport_genl_family = {
1569         .id = GENL_ID_GENERATE,
1570         .hdrsize = sizeof(struct odp_header),
1571         .name = ODP_VPORT_FAMILY,
1572         .version = 1,
1573         .maxattr = ODP_VPORT_ATTR_MAX
1574 };
1575
1576 static struct genl_multicast_group dp_vport_multicast_group = {
1577         .name = ODP_VPORT_MCGROUP
1578 };
1579
1580 /* Called with RTNL lock or RCU read lock. */
1581 static int odp_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1582                                    u32 pid, u32 seq, u32 flags, u8 cmd)
1583 {
1584         struct odp_header *odp_header;
1585         struct nlattr *nla;
1586         int ifindex, iflink;
1587         int mtu;
1588         int err;
1589
1590         odp_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
1591                                  flags, cmd);
1592         if (!odp_header)
1593                 return -EMSGSIZE;
1594
1595         odp_header->dp_ifindex = vport->dp->dp_ifindex;
1596
1597         NLA_PUT_U32(skb, ODP_VPORT_ATTR_PORT_NO, vport->port_no);
1598         NLA_PUT_U32(skb, ODP_VPORT_ATTR_TYPE, vport_get_type(vport));
1599         NLA_PUT_STRING(skb, ODP_VPORT_ATTR_NAME, vport_get_name(vport));
1600
1601         nla = nla_reserve(skb, ODP_VPORT_ATTR_STATS, sizeof(struct rtnl_link_stats64));
1602         if (!nla)
1603                 goto nla_put_failure;
1604         if (vport_get_stats(vport, nla_data(nla)))
1605                 __skb_trim(skb, skb->len - nla->nla_len);
1606
1607         NLA_PUT(skb, ODP_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
1608
1609         mtu = vport_get_mtu(vport);
1610         if (mtu)
1611                 NLA_PUT_U32(skb, ODP_VPORT_ATTR_MTU, mtu);
1612
1613         err = vport_get_options(vport, skb);
1614         if (err == -EMSGSIZE)
1615                 goto error;
1616
1617         ifindex = vport_get_ifindex(vport);
1618         if (ifindex > 0)
1619                 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFINDEX, ifindex);
1620
1621         iflink = vport_get_iflink(vport);
1622         if (iflink > 0)
1623                 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFLINK, iflink);
1624
1625         return genlmsg_end(skb, odp_header);
1626
1627 nla_put_failure:
1628         err = -EMSGSIZE;
1629 error:
1630         genlmsg_cancel(skb, odp_header);
1631         return err;
1632 }
1633
1634 /* Called with RTNL lock or RCU read lock. */
1635 static struct sk_buff *odp_vport_cmd_build_info(struct vport *vport, u32 pid,
1636                                                 u32 seq, u8 cmd)
1637 {
1638         struct sk_buff *skb;
1639         int retval;
1640
1641         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1642         if (!skb)
1643                 return ERR_PTR(-ENOMEM);
1644
1645         retval = odp_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
1646         if (retval < 0) {
1647                 kfree_skb(skb);
1648                 return ERR_PTR(retval);
1649         }
1650         return skb;
1651 }
1652
1653 static int odp_vport_cmd_validate(struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1654 {
1655         return VERIFY_NUL_STRING(a[ODP_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1656 }
1657
1658 /* Called with RTNL lock or RCU read lock. */
1659 static struct vport *lookup_vport(struct odp_header *odp_header,
1660                                   struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1661 {
1662         struct datapath *dp;
1663         struct vport *vport;
1664
1665         if (a[ODP_VPORT_ATTR_NAME]) {
1666                 vport = vport_locate(nla_data(a[ODP_VPORT_ATTR_NAME]));
1667                 if (!vport)
1668                         return ERR_PTR(-ENODEV);
1669                 return vport;
1670         } else if (a[ODP_VPORT_ATTR_PORT_NO]) {
1671                 u32 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1672
1673                 if (port_no >= DP_MAX_PORTS)
1674                         return ERR_PTR(-EFBIG);
1675
1676                 dp = get_dp(odp_header->dp_ifindex);
1677                 if (!dp)
1678                         return ERR_PTR(-ENODEV);
1679
1680                 vport = get_vport_protected(dp, port_no);
1681                 if (!vport)
1682                         return ERR_PTR(-ENOENT);
1683                 return vport;
1684         } else
1685                 return ERR_PTR(-EINVAL);
1686 }
1687
1688 /* Called with RTNL lock. */
1689 static int change_vport(struct vport *vport, struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1690 {
1691         int err = 0;
1692         if (a[ODP_VPORT_ATTR_STATS])
1693                 err = vport_set_stats(vport, nla_data(a[ODP_VPORT_ATTR_STATS]));
1694         if (!err && a[ODP_VPORT_ATTR_ADDRESS])
1695                 err = vport_set_addr(vport, nla_data(a[ODP_VPORT_ATTR_ADDRESS]));
1696         if (!err && a[ODP_VPORT_ATTR_MTU])
1697                 err = vport_set_mtu(vport, nla_get_u32(a[ODP_VPORT_ATTR_MTU]));
1698         return err;
1699 }
1700
1701 static int odp_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1702 {
1703         struct nlattr **a = info->attrs;
1704         struct odp_header *odp_header = info->userhdr;
1705         struct vport_parms parms;
1706         struct sk_buff *reply;
1707         struct vport *vport;
1708         struct datapath *dp;
1709         u32 port_no;
1710         int err;
1711
1712         err = -EINVAL;
1713         if (!a[ODP_VPORT_ATTR_NAME] || !a[ODP_VPORT_ATTR_TYPE])
1714                 goto exit;
1715
1716         err = odp_vport_cmd_validate(a);
1717         if (err)
1718                 goto exit;
1719
1720         rtnl_lock();
1721         dp = get_dp(odp_header->dp_ifindex);
1722         err = -ENODEV;
1723         if (!dp)
1724                 goto exit_unlock;
1725
1726         if (a[ODP_VPORT_ATTR_PORT_NO]) {
1727                 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1728
1729                 err = -EFBIG;
1730                 if (port_no >= DP_MAX_PORTS)
1731                         goto exit_unlock;
1732
1733                 vport = get_vport_protected(dp, port_no);
1734                 err = -EBUSY;
1735                 if (vport)
1736                         goto exit_unlock;
1737         } else {
1738                 for (port_no = 1; ; port_no++) {
1739                         if (port_no >= DP_MAX_PORTS) {
1740                                 err = -EFBIG;
1741                                 goto exit_unlock;
1742                         }
1743                         vport = get_vport_protected(dp, port_no);
1744                         if (!vport)
1745                                 break;
1746                 }
1747         }
1748
1749         parms.name = nla_data(a[ODP_VPORT_ATTR_NAME]);
1750         parms.type = nla_get_u32(a[ODP_VPORT_ATTR_TYPE]);
1751         parms.options = a[ODP_VPORT_ATTR_OPTIONS];
1752         parms.dp = dp;
1753         parms.port_no = port_no;
1754
1755         vport = new_vport(&parms);
1756         err = PTR_ERR(vport);
1757         if (IS_ERR(vport))
1758                 goto exit_unlock;
1759
1760         set_internal_devs_mtu(dp);
1761         dp_sysfs_add_if(vport);
1762
1763         err = change_vport(vport, a);
1764         if (!err) {
1765                 reply = odp_vport_cmd_build_info(vport, info->snd_pid,
1766                                                  info->snd_seq, ODP_VPORT_CMD_NEW);
1767                 if (IS_ERR(reply))
1768                         err = PTR_ERR(reply);
1769         }
1770         if (err) {
1771                 dp_detach_port(vport);
1772                 goto exit_unlock;
1773         }
1774         genl_notify(reply, genl_info_net(info), info->snd_pid,
1775                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1776
1777
1778 exit_unlock:
1779         rtnl_unlock();
1780 exit:
1781         return err;
1782 }
1783
1784 static int odp_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1785 {
1786         struct nlattr **a = info->attrs;
1787         struct sk_buff *reply;
1788         struct vport *vport;
1789         int err;
1790
1791         err = odp_vport_cmd_validate(a);
1792         if (err)
1793                 goto exit;
1794
1795         rtnl_lock();
1796         vport = lookup_vport(info->userhdr, a);
1797         err = PTR_ERR(vport);
1798         if (IS_ERR(vport))
1799                 goto exit_unlock;
1800
1801         err = 0;
1802         if (a[ODP_VPORT_ATTR_OPTIONS])
1803                 err = vport_set_options(vport, a[ODP_VPORT_ATTR_OPTIONS]);
1804         if (!err)
1805                 err = change_vport(vport, a);
1806
1807         reply = odp_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1808                                          ODP_VPORT_CMD_NEW);
1809         if (IS_ERR(reply)) {
1810                 err = PTR_ERR(reply);
1811                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1812                                 dp_vport_multicast_group.id, err);
1813                 return 0;
1814         }
1815
1816         genl_notify(reply, genl_info_net(info), info->snd_pid,
1817                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1818
1819 exit_unlock:
1820         rtnl_unlock();
1821 exit:
1822         return err;
1823 }
1824
1825 static int odp_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1826 {
1827         struct nlattr **a = info->attrs;
1828         struct sk_buff *reply;
1829         struct vport *vport;
1830         int err;
1831
1832         err = odp_vport_cmd_validate(a);
1833         if (err)
1834                 goto exit;
1835
1836         rtnl_lock();
1837         vport = lookup_vport(info->userhdr, a);
1838         err = PTR_ERR(vport);
1839         if (IS_ERR(vport))
1840                 goto exit_unlock;
1841
1842         if (vport->port_no == ODPP_LOCAL) {
1843                 err = -EINVAL;
1844                 goto exit_unlock;
1845         }
1846
1847         reply = odp_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1848                                          ODP_VPORT_CMD_DEL);
1849         err = PTR_ERR(reply);
1850         if (IS_ERR(reply))
1851                 goto exit_unlock;
1852
1853         err = dp_detach_port(vport);
1854
1855         genl_notify(reply, genl_info_net(info), info->snd_pid,
1856                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1857
1858 exit_unlock:
1859         rtnl_unlock();
1860 exit:
1861         return err;
1862 }
1863
1864 static int odp_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1865 {
1866         struct nlattr **a = info->attrs;
1867         struct odp_header *odp_header = info->userhdr;
1868         struct sk_buff *reply;
1869         struct vport *vport;
1870         int err;
1871
1872         err = odp_vport_cmd_validate(a);
1873         if (err)
1874                 goto exit;
1875
1876         rcu_read_lock();
1877         vport = lookup_vport(odp_header, a);
1878         err = PTR_ERR(vport);
1879         if (IS_ERR(vport))
1880                 goto exit_unlock;
1881
1882         reply = odp_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1883                                          ODP_VPORT_CMD_NEW);
1884         err = PTR_ERR(reply);
1885         if (IS_ERR(reply))
1886                 goto exit_unlock;
1887
1888         err = genlmsg_reply(reply, info);
1889
1890 exit_unlock:
1891         rcu_read_unlock();
1892 exit:
1893         return err;
1894 }
1895
1896 static int odp_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1897 {
1898         struct odp_header *odp_header = genlmsg_data(nlmsg_data(cb->nlh));
1899         struct datapath *dp;
1900         u32 port_no;
1901         int retval;
1902
1903         dp = get_dp(odp_header->dp_ifindex);
1904         if (!dp)
1905                 return -ENODEV;
1906
1907         rcu_read_lock();
1908         for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
1909                 struct vport *vport;
1910
1911                 vport = get_vport_protected(dp, port_no);
1912                 if (!vport)
1913                         continue;
1914
1915                 if (odp_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid,
1916                                             cb->nlh->nlmsg_seq, NLM_F_MULTI,
1917                                             ODP_VPORT_CMD_NEW) < 0)
1918                         break;
1919         }
1920         rcu_read_unlock();
1921
1922         cb->args[0] = port_no;
1923         retval = skb->len;
1924
1925         return retval;
1926 }
1927
1928 static struct genl_ops dp_vport_genl_ops[] = {
1929         { .cmd = ODP_VPORT_CMD_NEW,
1930           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1931           .policy = vport_policy,
1932           .doit = odp_vport_cmd_new
1933         },
1934         { .cmd = ODP_VPORT_CMD_DEL,
1935           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1936           .policy = vport_policy,
1937           .doit = odp_vport_cmd_del
1938         },
1939         { .cmd = ODP_VPORT_CMD_GET,
1940           .flags = 0,               /* OK for unprivileged users. */
1941           .policy = vport_policy,
1942           .doit = odp_vport_cmd_get,
1943           .dumpit = odp_vport_cmd_dump
1944         },
1945         { .cmd = ODP_VPORT_CMD_SET,
1946           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1947           .policy = vport_policy,
1948           .doit = odp_vport_cmd_set,
1949         },
1950 };
1951
1952 struct genl_family_and_ops {
1953         struct genl_family *family;
1954         struct genl_ops *ops;
1955         int n_ops;
1956         struct genl_multicast_group *group;
1957 };
1958
1959 static const struct genl_family_and_ops dp_genl_families[] = {
1960         { &dp_datapath_genl_family,
1961           dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
1962           &dp_datapath_multicast_group },
1963         { &dp_vport_genl_family,
1964           dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
1965           &dp_vport_multicast_group },
1966         { &dp_flow_genl_family,
1967           dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
1968           &dp_flow_multicast_group },
1969         { &dp_packet_genl_family,
1970           dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
1971           NULL },
1972 };
1973
1974 static void dp_unregister_genl(int n_families)
1975 {
1976         int i;
1977
1978         for (i = 0; i < n_families; i++) {
1979                 genl_unregister_family(dp_genl_families[i].family);
1980         }
1981 }
1982
1983 static int dp_register_genl(void)
1984 {
1985         int n_registered;
1986         int err;
1987         int i;
1988
1989         n_registered = 0;
1990         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
1991                 const struct genl_family_and_ops *f = &dp_genl_families[i];
1992
1993                 err = genl_register_family_with_ops(f->family, f->ops,
1994                                                     f->n_ops);
1995                 if (err)
1996                         goto error;
1997                 n_registered++;
1998
1999                 if (f->group) {
2000                         err = genl_register_mc_group(f->family, f->group);
2001                         if (err)
2002                                 goto error;
2003                 }
2004         }
2005
2006         err = packet_register_mc_groups();
2007         if (err)
2008                 goto error;
2009         return 0;
2010
2011 error:
2012         dp_unregister_genl(n_registered);
2013         return err;
2014 }
2015
2016 static int __init dp_init(void)
2017 {
2018         struct sk_buff *dummy_skb;
2019         int err;
2020
2021         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2022
2023         printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2024
2025         err = flow_init();
2026         if (err)
2027                 goto error;
2028
2029         err = vport_init();
2030         if (err)
2031                 goto error_flow_exit;
2032
2033         err = register_netdevice_notifier(&dp_device_notifier);
2034         if (err)
2035                 goto error_vport_exit;
2036
2037         err = dp_register_genl();
2038         if (err < 0)
2039                 goto error_unreg_notifier;
2040
2041         return 0;
2042
2043 error_unreg_notifier:
2044         unregister_netdevice_notifier(&dp_device_notifier);
2045 error_vport_exit:
2046         vport_exit();
2047 error_flow_exit:
2048         flow_exit();
2049 error:
2050         return err;
2051 }
2052
2053 static void dp_cleanup(void)
2054 {
2055         rcu_barrier();
2056         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2057         unregister_netdevice_notifier(&dp_device_notifier);
2058         vport_exit();
2059         flow_exit();
2060 }
2061
2062 module_init(dp_init);
2063 module_exit(dp_cleanup);
2064
2065 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2066 MODULE_LICENSE("GPL");