datapath: Calculate flow hash after extracting metadata.
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
3  * Distributed under the terms of the GNU GPL version 2.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 /* Functions for managing the dp interface/device. */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/if_arp.h>
16 #include <linux/if_vlan.h>
17 #include <linux/in.h>
18 #include <linux/ip.h>
19 #include <linux/jhash.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/genetlink.h>
24 #include <linux/kernel.h>
25 #include <linux/kthread.h>
26 #include <linux/mutex.h>
27 #include <linux/percpu.h>
28 #include <linux/rcupdate.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/version.h>
32 #include <linux/ethtool.h>
33 #include <linux/wait.h>
34 #include <asm/system.h>
35 #include <asm/div64.h>
36 #include <asm/bug.h>
37 #include <linux/highmem.h>
38 #include <linux/netfilter_bridge.h>
39 #include <linux/netfilter_ipv4.h>
40 #include <linux/inetdevice.h>
41 #include <linux/list.h>
42 #include <linux/rculist.h>
43 #include <linux/dmi.h>
44 #include <net/inet_ecn.h>
45 #include <net/genetlink.h>
46
47 #include "openvswitch/datapath-protocol.h"
48 #include "checksum.h"
49 #include "datapath.h"
50 #include "actions.h"
51 #include "flow.h"
52 #include "table.h"
53 #include "vlan.h"
54 #include "vport-internal_dev.h"
55
56 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
57     LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
58 #error Kernels before 2.6.18 or after 3.0 are not supported by this version of Open vSwitch.
59 #endif
60
61 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
62 EXPORT_SYMBOL(dp_ioctl_hook);
63
64 /**
65  * DOC: Locking:
66  *
67  * Writes to device state (add/remove datapath, port, set operations on vports,
68  * etc.) are protected by RTNL.
69  *
70  * Writes to other state (flow table modifications, set miscellaneous datapath
71  * parameters such as drop frags, etc.) are protected by genl_mutex.  The RTNL
72  * lock nests inside genl_mutex.
73  *
74  * Reads are protected by RCU.
75  *
76  * There are a few special cases (mostly stats) that have their own
77  * synchronization but they nest under all of above and don't interact with
78  * each other.
79  */
80
81 /* Global list of datapaths to enable dumping them all out.
82  * Protected by genl_mutex.
83  */
84 static LIST_HEAD(dps);
85
86 static struct vport *new_vport(const struct vport_parms *);
87 static int queue_userspace_packets(struct datapath *, struct sk_buff *,
88                                  const struct dp_upcall_info *);
89
90 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
91 struct datapath *get_dp(int dp_ifindex)
92 {
93         struct datapath *dp = NULL;
94         struct net_device *dev;
95
96         rcu_read_lock();
97         dev = dev_get_by_index_rcu(&init_net, dp_ifindex);
98         if (dev) {
99                 struct vport *vport = internal_dev_get_vport(dev);
100                 if (vport)
101                         dp = vport->dp;
102         }
103         rcu_read_unlock();
104
105         return dp;
106 }
107 EXPORT_SYMBOL_GPL(get_dp);
108
109 /* Must be called with genl_mutex. */
110 static struct tbl *get_table_protected(struct datapath *dp)
111 {
112         return rcu_dereference_protected(dp->table, lockdep_genl_is_held());
113 }
114
115 /* Must be called with rcu_read_lock or RTNL lock. */
116 static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
117 {
118         return rcu_dereference_rtnl(dp->ports[port_no]);
119 }
120
121 /* Must be called with rcu_read_lock or RTNL lock. */
122 const char *dp_name(const struct datapath *dp)
123 {
124         return vport_get_name(rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]));
125 }
126
127 static inline size_t br_nlmsg_size(void)
128 {
129         return NLMSG_ALIGN(sizeof(struct ifinfomsg))
130                + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
131                + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
132                + nla_total_size(4) /* IFLA_MASTER */
133                + nla_total_size(4) /* IFLA_MTU */
134                + nla_total_size(4) /* IFLA_LINK */
135                + nla_total_size(1); /* IFLA_OPERSTATE */
136 }
137
138 /* Caller must hold RTNL lock. */
139 static int dp_fill_ifinfo(struct sk_buff *skb,
140                           const struct vport *port,
141                           int event, unsigned int flags)
142 {
143         struct datapath *dp = port->dp;
144         int ifindex = vport_get_ifindex(port);
145         struct ifinfomsg *hdr;
146         struct nlmsghdr *nlh;
147
148         if (ifindex < 0)
149                 return ifindex;
150
151         nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
152         if (nlh == NULL)
153                 return -EMSGSIZE;
154
155         hdr = nlmsg_data(nlh);
156         hdr->ifi_family = AF_BRIDGE;
157         hdr->__ifi_pad = 0;
158         hdr->ifi_type = ARPHRD_ETHER;
159         hdr->ifi_index = ifindex;
160         hdr->ifi_flags = vport_get_flags(port);
161         hdr->ifi_change = 0;
162
163         NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
164         NLA_PUT_U32(skb, IFLA_MASTER,
165                 vport_get_ifindex(get_vport_protected(dp, OVSP_LOCAL)));
166         NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
167 #ifdef IFLA_OPERSTATE
168         NLA_PUT_U8(skb, IFLA_OPERSTATE,
169                    vport_is_running(port)
170                         ? vport_get_operstate(port)
171                         : IF_OPER_DOWN);
172 #endif
173
174         NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
175
176         return nlmsg_end(skb, nlh);
177
178 nla_put_failure:
179         nlmsg_cancel(skb, nlh);
180         return -EMSGSIZE;
181 }
182
183 /* Caller must hold RTNL lock. */
184 static void dp_ifinfo_notify(int event, struct vport *port)
185 {
186         struct sk_buff *skb;
187         int err = -ENOBUFS;
188
189         skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
190         if (skb == NULL)
191                 goto errout;
192
193         err = dp_fill_ifinfo(skb, port, event, 0);
194         if (err < 0) {
195                 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
196                 WARN_ON(err == -EMSGSIZE);
197                 kfree_skb(skb);
198                 goto errout;
199         }
200         rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
201         return;
202 errout:
203         if (err < 0)
204                 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
205 }
206
207 static void release_dp(struct kobject *kobj)
208 {
209         struct datapath *dp = container_of(kobj, struct datapath, ifobj);
210         kfree(dp);
211 }
212
213 static struct kobj_type dp_ktype = {
214         .release = release_dp
215 };
216
217 static void destroy_dp_rcu(struct rcu_head *rcu)
218 {
219         struct datapath *dp = container_of(rcu, struct datapath, rcu);
220
221         tbl_destroy((struct tbl __force *)dp->table, flow_free_tbl);
222         free_percpu(dp->stats_percpu);
223         kobject_put(&dp->ifobj);
224 }
225
226 /* Called with RTNL lock and genl_lock. */
227 static struct vport *new_vport(const struct vport_parms *parms)
228 {
229         struct vport *vport;
230
231         vport = vport_add(parms);
232         if (!IS_ERR(vport)) {
233                 struct datapath *dp = parms->dp;
234
235                 rcu_assign_pointer(dp->ports[parms->port_no], vport);
236                 list_add(&vport->node, &dp->port_list);
237
238                 dp_ifinfo_notify(RTM_NEWLINK, vport);
239         }
240
241         return vport;
242 }
243
244 /* Called with RTNL lock. */
245 int dp_detach_port(struct vport *p)
246 {
247         ASSERT_RTNL();
248
249         if (p->port_no != OVSP_LOCAL)
250                 dp_sysfs_del_if(p);
251         dp_ifinfo_notify(RTM_DELLINK, p);
252
253         /* First drop references to device. */
254         list_del(&p->node);
255         rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
256
257         /* Then destroy it. */
258         return vport_del(p);
259 }
260
261 /* Must be called with rcu_read_lock. */
262 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
263 {
264         struct datapath *dp = p->dp;
265         struct dp_stats_percpu *stats;
266         int stats_counter_off;
267         int error;
268
269         OVS_CB(skb)->vport = p;
270
271         if (!OVS_CB(skb)->flow) {
272                 struct sw_flow_key key;
273                 struct tbl_node *flow_node;
274                 int key_len;
275                 bool is_frag;
276
277                 /* Extract flow from 'skb' into 'key'. */
278                 error = flow_extract(skb, p->port_no, &key, &key_len, &is_frag);
279                 if (unlikely(error)) {
280                         kfree_skb(skb);
281                         return;
282                 }
283
284                 if (is_frag && dp->drop_frags) {
285                         consume_skb(skb);
286                         stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
287                         goto out;
288                 }
289
290                 /* Look up flow. */
291                 flow_node = tbl_lookup(rcu_dereference(dp->table), &key, key_len,
292                                        flow_hash(&key, key_len), flow_cmp);
293                 if (unlikely(!flow_node)) {
294                         struct dp_upcall_info upcall;
295
296                         upcall.cmd = OVS_PACKET_CMD_MISS;
297                         upcall.key = &key;
298                         upcall.userdata = 0;
299                         upcall.sample_pool = 0;
300                         upcall.actions = NULL;
301                         upcall.actions_len = 0;
302                         dp_upcall(dp, skb, &upcall);
303                         stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
304                         goto out;
305                 }
306
307                 OVS_CB(skb)->flow = flow_cast(flow_node);
308         }
309
310         stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
311         flow_used(OVS_CB(skb)->flow, skb);
312         execute_actions(dp, skb);
313
314 out:
315         /* Update datapath statistics. */
316         local_bh_disable();
317         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
318
319         write_seqcount_begin(&stats->seqlock);
320         (*(u64 *)((u8 *)stats + stats_counter_off))++;
321         write_seqcount_end(&stats->seqlock);
322
323         local_bh_enable();
324 }
325
326 static void copy_and_csum_skb(struct sk_buff *skb, void *to)
327 {
328         u16 csum_start, csum_offset;
329         __wsum csum;
330
331         get_skb_csum_pointers(skb, &csum_start, &csum_offset);
332         csum_start -= skb_headroom(skb);
333
334         skb_copy_bits(skb, 0, to, csum_start);
335
336         csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start,
337                                       skb->len - csum_start, 0);
338         *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
339 }
340
341 static struct genl_family dp_packet_genl_family = {
342         .id = GENL_ID_GENERATE,
343         .hdrsize = sizeof(struct ovs_header),
344         .name = OVS_PACKET_FAMILY,
345         .version = 1,
346         .maxattr = OVS_PACKET_ATTR_MAX
347 };
348
349 /* Generic Netlink multicast groups for upcalls.
350  *
351  * We really want three unique multicast groups per datapath, but we can't even
352  * get one, because genl_register_mc_group() takes genl_lock, which is also
353  * held during Generic Netlink message processing, so trying to acquire
354  * multicast groups during OVS_DP_NEW processing deadlocks.  Instead, we
355  * preallocate a few groups and use them round-robin for datapaths.  Collision
356  * isn't fatal--multicast listeners should check that the family is the one
357  * that they want and discard others--but it wastes time and memory to receive
358  * unwanted messages.
359  */
360 #define PACKET_N_MC_GROUPS 16
361 static struct genl_multicast_group packet_mc_groups[PACKET_N_MC_GROUPS];
362
363 static u32 packet_mc_group(struct datapath *dp, u8 cmd)
364 {
365         u32 idx;
366         BUILD_BUG_ON_NOT_POWER_OF_2(PACKET_N_MC_GROUPS);
367
368         idx = jhash_2words(dp->dp_ifindex, cmd, 0) & (PACKET_N_MC_GROUPS - 1);
369         return packet_mc_groups[idx].id;
370 }
371
372 static int packet_register_mc_groups(void)
373 {
374         int i;
375
376         for (i = 0; i < PACKET_N_MC_GROUPS; i++) {
377                 struct genl_multicast_group *group = &packet_mc_groups[i];
378                 int error;
379
380                 sprintf(group->name, "packet%d", i);
381                 error = genl_register_mc_group(&dp_packet_genl_family, group);
382                 if (error)
383                         return error;
384         }
385         return 0;
386 }
387
388 int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info)
389 {
390         struct dp_stats_percpu *stats;
391         int err;
392
393         WARN_ON_ONCE(skb_shared(skb));
394
395         forward_ip_summed(skb, true);
396
397         /* Break apart GSO packets into their component pieces.  Otherwise
398          * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
399         if (skb_is_gso(skb)) {
400                 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
401                 
402                 if (IS_ERR(nskb)) {
403                         kfree_skb(skb);
404                         err = PTR_ERR(nskb);
405                         goto err;
406                 }
407                 consume_skb(skb);
408                 skb = nskb;
409         }
410
411         err = queue_userspace_packets(dp, skb, upcall_info);
412         if (err)
413                 goto err;
414
415         return 0;
416
417 err:
418         local_bh_disable();
419         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
420
421         write_seqcount_begin(&stats->seqlock);
422         stats->n_lost++;
423         write_seqcount_end(&stats->seqlock);
424
425         local_bh_enable();
426
427         return err;
428 }
429
430 /* Send each packet in the 'skb' list to userspace for 'dp' as directed by
431  * 'upcall_info'.  There will be only one packet unless we broke up a GSO
432  * packet.
433  */
434 static int queue_userspace_packets(struct datapath *dp, struct sk_buff *skb,
435                                  const struct dp_upcall_info *upcall_info)
436 {
437         u32 group = packet_mc_group(dp, upcall_info->cmd);
438         struct sk_buff *nskb;
439         int err;
440
441         do {
442                 struct ovs_header *upcall;
443                 struct sk_buff *user_skb; /* to be queued to userspace */
444                 struct nlattr *nla;
445                 unsigned int len;
446
447                 nskb = skb->next;
448                 skb->next = NULL;
449
450                 err = vlan_deaccel_tag(skb);
451                 if (unlikely(err))
452                         goto err_kfree_skbs;
453
454                 if (nla_attr_size(skb->len) > USHRT_MAX)
455                         goto err_kfree_skbs;
456
457                 len = sizeof(struct ovs_header);
458                 len += nla_total_size(skb->len);
459                 len += nla_total_size(FLOW_BUFSIZE);
460                 if (upcall_info->userdata)
461                         len += nla_total_size(8);
462                 if (upcall_info->sample_pool)
463                         len += nla_total_size(4);
464                 if (upcall_info->actions_len)
465                         len += nla_total_size(upcall_info->actions_len);
466
467                 user_skb = genlmsg_new(len, GFP_ATOMIC);
468                 if (!user_skb) {
469                         netlink_set_err(INIT_NET_GENL_SOCK, 0, group, -ENOBUFS);
470                         goto err_kfree_skbs;
471                 }
472
473                 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd);
474                 upcall->dp_ifindex = dp->dp_ifindex;
475
476                 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
477                 flow_to_nlattrs(upcall_info->key, user_skb);
478                 nla_nest_end(user_skb, nla);
479
480                 if (upcall_info->userdata)
481                         nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA, upcall_info->userdata);
482                 if (upcall_info->sample_pool)
483                         nla_put_u32(user_skb, OVS_PACKET_ATTR_SAMPLE_POOL, upcall_info->sample_pool);
484                 if (upcall_info->actions_len) {
485                         const struct nlattr *actions = upcall_info->actions;
486                         u32 actions_len = upcall_info->actions_len;
487
488                         nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
489                         memcpy(__skb_put(user_skb, actions_len), actions, actions_len);
490                         nla_nest_end(user_skb, nla);
491                 }
492
493                 nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
494                 if (skb->ip_summed == CHECKSUM_PARTIAL)
495                         copy_and_csum_skb(skb, nla_data(nla));
496                 else
497                         skb_copy_bits(skb, 0, nla_data(nla), skb->len);
498
499                 err = genlmsg_multicast(user_skb, 0, group, GFP_ATOMIC);
500                 if (err)
501                         goto err_kfree_skbs;
502
503                 consume_skb(skb);
504                 skb = nskb;
505         } while (skb);
506         return 0;
507
508 err_kfree_skbs:
509         kfree_skb(skb);
510         while ((skb = nskb) != NULL) {
511                 nskb = skb->next;
512                 kfree_skb(skb);
513         }
514         return err;
515 }
516
517 /* Called with genl_mutex. */
518 static int flush_flows(int dp_ifindex)
519 {
520         struct tbl *old_table;
521         struct tbl *new_table;
522         struct datapath *dp;
523
524         dp = get_dp(dp_ifindex);
525         if (!dp)
526                 return -ENODEV;
527
528         old_table = get_table_protected(dp);
529         new_table = tbl_create(TBL_MIN_BUCKETS);
530         if (!new_table)
531                 return -ENOMEM;
532
533         rcu_assign_pointer(dp->table, new_table);
534
535         tbl_deferred_destroy(old_table, flow_free_tbl);
536
537         return 0;
538 }
539
540 static int validate_actions(const struct nlattr *attr)
541 {
542         const struct nlattr *a;
543         int rem;
544
545         nla_for_each_nested(a, attr, rem) {
546                 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
547                         [OVS_ACTION_ATTR_OUTPUT] = 4,
548                         [OVS_ACTION_ATTR_USERSPACE] = 8,
549                         [OVS_ACTION_ATTR_SET_DL_TCI] = 2,
550                         [OVS_ACTION_ATTR_STRIP_VLAN] = 0,
551                         [OVS_ACTION_ATTR_SET_DL_SRC] = ETH_ALEN,
552                         [OVS_ACTION_ATTR_SET_DL_DST] = ETH_ALEN,
553                         [OVS_ACTION_ATTR_SET_NW_SRC] = 4,
554                         [OVS_ACTION_ATTR_SET_NW_DST] = 4,
555                         [OVS_ACTION_ATTR_SET_NW_TOS] = 1,
556                         [OVS_ACTION_ATTR_SET_TP_SRC] = 2,
557                         [OVS_ACTION_ATTR_SET_TP_DST] = 2,
558                         [OVS_ACTION_ATTR_SET_TUNNEL] = 8,
559                         [OVS_ACTION_ATTR_SET_PRIORITY] = 4,
560                         [OVS_ACTION_ATTR_POP_PRIORITY] = 0,
561                 };
562                 int type = nla_type(a);
563
564                 if (type > OVS_ACTION_ATTR_MAX || nla_len(a) != action_lens[type])
565                         return -EINVAL;
566
567                 switch (type) {
568                 case OVS_ACTION_ATTR_UNSPEC:
569                         return -EINVAL;
570
571                 case OVS_ACTION_ATTR_USERSPACE:
572                 case OVS_ACTION_ATTR_STRIP_VLAN:
573                 case OVS_ACTION_ATTR_SET_DL_SRC:
574                 case OVS_ACTION_ATTR_SET_DL_DST:
575                 case OVS_ACTION_ATTR_SET_NW_SRC:
576                 case OVS_ACTION_ATTR_SET_NW_DST:
577                 case OVS_ACTION_ATTR_SET_TP_SRC:
578                 case OVS_ACTION_ATTR_SET_TP_DST:
579                 case OVS_ACTION_ATTR_SET_TUNNEL:
580                 case OVS_ACTION_ATTR_SET_PRIORITY:
581                 case OVS_ACTION_ATTR_POP_PRIORITY:
582                         /* No validation needed. */
583                         break;
584
585                 case OVS_ACTION_ATTR_OUTPUT:
586                         if (nla_get_u32(a) >= DP_MAX_PORTS)
587                                 return -EINVAL;
588                         break;
589
590                 case OVS_ACTION_ATTR_SET_DL_TCI:
591                         if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
592                                 return -EINVAL;
593                         break;
594
595                 case OVS_ACTION_ATTR_SET_NW_TOS:
596                         if (nla_get_u8(a) & INET_ECN_MASK)
597                                 return -EINVAL;
598                         break;
599
600                 default:
601                         return -EOPNOTSUPP;
602                 }
603         }
604
605         if (rem > 0)
606                 return -EINVAL;
607
608         return 0;
609 }
610 static void clear_stats(struct sw_flow *flow)
611 {
612         flow->used = 0;
613         flow->tcp_flags = 0;
614         flow->packet_count = 0;
615         flow->byte_count = 0;
616 }
617
618 /* Called with genl_mutex. */
619 static int expand_table(struct datapath *dp)
620 {
621         struct tbl *old_table = get_table_protected(dp);
622         struct tbl *new_table;
623
624         new_table = tbl_expand(old_table);
625         if (IS_ERR(new_table)) {
626                 if (PTR_ERR(new_table) != -ENOSPC)
627                         return PTR_ERR(new_table);
628         } else {
629                 rcu_assign_pointer(dp->table, new_table);
630                 tbl_deferred_destroy(old_table, NULL);
631         }
632
633         return 0;
634 }
635
636 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
637 {
638         struct ovs_header *ovs_header = info->userhdr;
639         struct nlattr **a = info->attrs;
640         struct sw_flow_actions *acts;
641         struct sk_buff *packet;
642         struct sw_flow *flow;
643         struct datapath *dp;
644         struct ethhdr *eth;
645         bool is_frag;
646         int len;
647         int err;
648         int key_len;
649
650         err = -EINVAL;
651         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
652             !a[OVS_PACKET_ATTR_ACTIONS] ||
653             nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN)
654                 goto err;
655
656         err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS]);
657         if (err)
658                 goto err;
659
660         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
661         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
662         err = -ENOMEM;
663         if (!packet)
664                 goto err;
665         skb_reserve(packet, NET_IP_ALIGN);
666
667         memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len);
668
669         skb_reset_mac_header(packet);
670         eth = eth_hdr(packet);
671
672         /* Normally, setting the skb 'protocol' field would be handled by a
673          * call to eth_type_trans(), but it assumes there's a sending
674          * device, which we may not have. */
675         if (ntohs(eth->h_proto) >= 1536)
676                 packet->protocol = eth->h_proto;
677         else
678                 packet->protocol = htons(ETH_P_802_2);
679
680         /* Build an sw_flow for sending this packet. */
681         flow = flow_alloc();
682         err = PTR_ERR(flow);
683         if (IS_ERR(flow))
684                 goto err_kfree_skb;
685
686         err = flow_extract(packet, -1, &flow->key, &key_len, &is_frag);
687         if (err)
688                 goto err_flow_put;
689
690         err = flow_metadata_from_nlattrs(&flow->key.eth.in_port,
691                                          &flow->key.eth.tun_id,
692                                          a[OVS_PACKET_ATTR_KEY]);
693         if (err)
694                 goto err_flow_put;
695
696         flow->tbl_node.hash = flow_hash(&flow->key, key_len);
697
698         acts = flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
699         err = PTR_ERR(acts);
700         if (IS_ERR(acts))
701                 goto err_flow_put;
702         rcu_assign_pointer(flow->sf_acts, acts);
703
704         OVS_CB(packet)->flow = flow;
705
706         rcu_read_lock();
707         dp = get_dp(ovs_header->dp_ifindex);
708         err = -ENODEV;
709         if (!dp)
710                 goto err_unlock;
711         err = execute_actions(dp, packet);
712         rcu_read_unlock();
713
714         flow_put(flow);
715         return err;
716
717 err_unlock:
718         rcu_read_unlock();
719 err_flow_put:
720         flow_put(flow);
721 err_kfree_skb:
722         kfree_skb(packet);
723 err:
724         return err;
725 }
726
727 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
728         [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
729         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
730         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
731 };
732
733 static struct genl_ops dp_packet_genl_ops[] = {
734         { .cmd = OVS_PACKET_CMD_EXECUTE,
735           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
736           .policy = packet_policy,
737           .doit = ovs_packet_cmd_execute
738         }
739 };
740
741 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
742 {
743         int i;
744         struct tbl *table = get_table_protected(dp);
745
746         stats->n_flows = tbl_count(table);
747
748         stats->n_frags = stats->n_hit = stats->n_missed = stats->n_lost = 0;
749         for_each_possible_cpu(i) {
750                 const struct dp_stats_percpu *percpu_stats;
751                 struct dp_stats_percpu local_stats;
752                 unsigned seqcount;
753
754                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
755
756                 do {
757                         seqcount = read_seqcount_begin(&percpu_stats->seqlock);
758                         local_stats = *percpu_stats;
759                 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
760
761                 stats->n_frags += local_stats.n_frags;
762                 stats->n_hit += local_stats.n_hit;
763                 stats->n_missed += local_stats.n_missed;
764                 stats->n_lost += local_stats.n_lost;
765         }
766 }
767
768 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports.
769  * Called with RTNL lock.
770  */
771 int dp_min_mtu(const struct datapath *dp)
772 {
773         struct vport *p;
774         int mtu = 0;
775
776         ASSERT_RTNL();
777
778         list_for_each_entry (p, &dp->port_list, node) {
779                 int dev_mtu;
780
781                 /* Skip any internal ports, since that's what we're trying to
782                  * set. */
783                 if (is_internal_vport(p))
784                         continue;
785
786                 dev_mtu = vport_get_mtu(p);
787                 if (!dev_mtu)
788                         continue;
789                 if (!mtu || dev_mtu < mtu)
790                         mtu = dev_mtu;
791         }
792
793         return mtu ? mtu : ETH_DATA_LEN;
794 }
795
796 /* Sets the MTU of all datapath devices to the minimum of the ports
797  * Called with RTNL lock.
798  */
799 void set_internal_devs_mtu(const struct datapath *dp)
800 {
801         struct vport *p;
802         int mtu;
803
804         ASSERT_RTNL();
805
806         mtu = dp_min_mtu(dp);
807
808         list_for_each_entry (p, &dp->port_list, node) {
809                 if (is_internal_vport(p))
810                         vport_set_mtu(p, mtu);
811         }
812 }
813
814 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
815         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
816         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
817         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
818 };
819
820 static struct genl_family dp_flow_genl_family = {
821         .id = GENL_ID_GENERATE,
822         .hdrsize = sizeof(struct ovs_header),
823         .name = OVS_FLOW_FAMILY,
824         .version = 1,
825         .maxattr = OVS_FLOW_ATTR_MAX
826 };
827
828 static struct genl_multicast_group dp_flow_multicast_group = {
829         .name = OVS_FLOW_MCGROUP
830 };
831
832 /* Called with genl_lock. */
833 static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
834                                   struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd)
835 {
836         const int skb_orig_len = skb->len;
837         const struct sw_flow_actions *sf_acts;
838         struct ovs_flow_stats stats;
839         struct ovs_header *ovs_header;
840         struct nlattr *nla;
841         unsigned long used;
842         u8 tcp_flags;
843         int err;
844
845         sf_acts = rcu_dereference_protected(flow->sf_acts,
846                                             lockdep_genl_is_held());
847
848         ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
849         if (!ovs_header)
850                 return -EMSGSIZE;
851
852         ovs_header->dp_ifindex = dp->dp_ifindex;
853
854         nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
855         if (!nla)
856                 goto nla_put_failure;
857         err = flow_to_nlattrs(&flow->key, skb);
858         if (err)
859                 goto error;
860         nla_nest_end(skb, nla);
861
862         spin_lock_bh(&flow->lock);
863         used = flow->used;
864         stats.n_packets = flow->packet_count;
865         stats.n_bytes = flow->byte_count;
866         tcp_flags = flow->tcp_flags;
867         spin_unlock_bh(&flow->lock);
868
869         if (used)
870                 NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, flow_used_time(used));
871
872         if (stats.n_packets)
873                 NLA_PUT(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats);
874
875         if (tcp_flags)
876                 NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags);
877
878         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
879          * this is the first flow to be dumped into 'skb'.  This is unusual for
880          * Netlink but individual action lists can be longer than
881          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
882          * The userspace caller can always fetch the actions separately if it
883          * really wants them.  (Most userspace callers in fact don't care.)
884          *
885          * This can only fail for dump operations because the skb is always
886          * properly sized for single flows.
887          */
888         err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
889                       sf_acts->actions);
890         if (err < 0 && skb_orig_len)
891                 goto error;
892
893         return genlmsg_end(skb, ovs_header);
894
895 nla_put_failure:
896         err = -EMSGSIZE;
897 error:
898         genlmsg_cancel(skb, ovs_header);
899         return err;
900 }
901
902 static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
903 {
904         const struct sw_flow_actions *sf_acts;
905         int len;
906
907         sf_acts = rcu_dereference_protected(flow->sf_acts,
908                                             lockdep_genl_is_held());
909
910         len = nla_total_size(FLOW_BUFSIZE); /* OVS_FLOW_ATTR_KEY */
911         len += nla_total_size(sf_acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
912         len += nla_total_size(sizeof(struct ovs_flow_stats)); /* OVS_FLOW_ATTR_STATS */
913         len += nla_total_size(1); /* OVS_FLOW_ATTR_TCP_FLAGS */
914         len += nla_total_size(8); /* OVS_FLOW_ATTR_USED */
915         return genlmsg_new(NLMSG_ALIGN(sizeof(struct ovs_header)) + len, GFP_KERNEL);
916 }
917
918 static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, struct datapath *dp,
919                                                u32 pid, u32 seq, u8 cmd)
920 {
921         struct sk_buff *skb;
922         int retval;
923
924         skb = ovs_flow_cmd_alloc_info(flow);
925         if (!skb)
926                 return ERR_PTR(-ENOMEM);
927
928         retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
929         BUG_ON(retval < 0);
930         return skb;
931 }
932
933 static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
934 {
935         struct nlattr **a = info->attrs;
936         struct ovs_header *ovs_header = info->userhdr;
937         struct tbl_node *flow_node;
938         struct sw_flow_key key;
939         struct sw_flow *flow;
940         struct sk_buff *reply;
941         struct datapath *dp;
942         struct tbl *table;
943         u32 hash;
944         int error;
945         int key_len;
946
947         /* Extract key. */
948         error = -EINVAL;
949         if (!a[OVS_FLOW_ATTR_KEY])
950                 goto error;
951         error = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
952         if (error)
953                 goto error;
954
955         /* Validate actions. */
956         if (a[OVS_FLOW_ATTR_ACTIONS]) {
957                 error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS]);
958                 if (error)
959                         goto error;
960         } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
961                 error = -EINVAL;
962                 goto error;
963         }
964
965         dp = get_dp(ovs_header->dp_ifindex);
966         error = -ENODEV;
967         if (!dp)
968                 goto error;
969
970         hash = flow_hash(&key, key_len);
971         table = get_table_protected(dp);
972         flow_node = tbl_lookup(table, &key, key_len, hash, flow_cmp);
973         if (!flow_node) {
974                 struct sw_flow_actions *acts;
975
976                 /* Bail out if we're not allowed to create a new flow. */
977                 error = -ENOENT;
978                 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
979                         goto error;
980
981                 /* Expand table, if necessary, to make room. */
982                 if (tbl_count(table) >= tbl_n_buckets(table)) {
983                         error = expand_table(dp);
984                         if (error)
985                                 goto error;
986                         table = get_table_protected(dp);
987                 }
988
989                 /* Allocate flow. */
990                 flow = flow_alloc();
991                 if (IS_ERR(flow)) {
992                         error = PTR_ERR(flow);
993                         goto error;
994                 }
995                 flow->key = key;
996                 clear_stats(flow);
997
998                 /* Obtain actions. */
999                 acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
1000                 error = PTR_ERR(acts);
1001                 if (IS_ERR(acts))
1002                         goto error_free_flow;
1003                 rcu_assign_pointer(flow->sf_acts, acts);
1004
1005                 /* Put flow in bucket. */
1006                 error = tbl_insert(table, &flow->tbl_node, hash);
1007                 if (error)
1008                         goto error_free_flow;
1009
1010                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
1011                                                 info->snd_seq, OVS_FLOW_CMD_NEW);
1012         } else {
1013                 /* We found a matching flow. */
1014                 struct sw_flow_actions *old_acts;
1015
1016                 /* Bail out if we're not allowed to modify an existing flow.
1017                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1018                  * because Generic Netlink treats the latter as a dump
1019                  * request.  We also accept NLM_F_EXCL in case that bug ever
1020                  * gets fixed.
1021                  */
1022                 error = -EEXIST;
1023                 if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
1024                     info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
1025                         goto error;
1026
1027                 /* Update actions. */
1028                 flow = flow_cast(flow_node);
1029                 old_acts = rcu_dereference_protected(flow->sf_acts,
1030                                                      lockdep_genl_is_held());
1031                 if (a[OVS_FLOW_ATTR_ACTIONS] &&
1032                     (old_acts->actions_len != nla_len(a[OVS_FLOW_ATTR_ACTIONS]) ||
1033                      memcmp(old_acts->actions, nla_data(a[OVS_FLOW_ATTR_ACTIONS]),
1034                             old_acts->actions_len))) {
1035                         struct sw_flow_actions *new_acts;
1036
1037                         new_acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
1038                         error = PTR_ERR(new_acts);
1039                         if (IS_ERR(new_acts))
1040                                 goto error;
1041
1042                         rcu_assign_pointer(flow->sf_acts, new_acts);
1043                         flow_deferred_free_acts(old_acts);
1044                 }
1045
1046                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
1047                                                 info->snd_seq, OVS_FLOW_CMD_NEW);
1048
1049                 /* Clear stats. */
1050                 if (a[OVS_FLOW_ATTR_CLEAR]) {
1051                         spin_lock_bh(&flow->lock);
1052                         clear_stats(flow);
1053                         spin_unlock_bh(&flow->lock);
1054                 }
1055         }
1056
1057         if (!IS_ERR(reply))
1058                 genl_notify(reply, genl_info_net(info), info->snd_pid,
1059                             dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1060         else
1061                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1062                                 dp_flow_multicast_group.id, PTR_ERR(reply));
1063         return 0;
1064
1065 error_free_flow:
1066         flow_put(flow);
1067 error:
1068         return error;
1069 }
1070
1071 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1072 {
1073         struct nlattr **a = info->attrs;
1074         struct ovs_header *ovs_header = info->userhdr;
1075         struct sw_flow_key key;
1076         struct tbl_node *flow_node;
1077         struct sk_buff *reply;
1078         struct sw_flow *flow;
1079         struct datapath *dp;
1080         struct tbl *table;
1081         int err;
1082         int key_len;
1083
1084         if (!a[OVS_FLOW_ATTR_KEY])
1085                 return -EINVAL;
1086         err = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1087         if (err)
1088                 return err;
1089
1090         dp = get_dp(ovs_header->dp_ifindex);
1091         if (!dp)
1092                 return -ENODEV;
1093
1094         table = get_table_protected(dp);
1095         flow_node = tbl_lookup(table, &key, key_len, flow_hash(&key, key_len),
1096                                flow_cmp);
1097         if (!flow_node)
1098                 return -ENOENT;
1099
1100         flow = flow_cast(flow_node);
1101         reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW);
1102         if (IS_ERR(reply))
1103                 return PTR_ERR(reply);
1104
1105         return genlmsg_reply(reply, info);
1106 }
1107
1108 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1109 {
1110         struct nlattr **a = info->attrs;
1111         struct ovs_header *ovs_header = info->userhdr;
1112         struct sw_flow_key key;
1113         struct tbl_node *flow_node;
1114         struct sk_buff *reply;
1115         struct sw_flow *flow;
1116         struct datapath *dp;
1117         struct tbl *table;
1118         int err;
1119         int key_len;
1120
1121         if (!a[OVS_FLOW_ATTR_KEY])
1122                 return flush_flows(ovs_header->dp_ifindex);
1123         err = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1124         if (err)
1125                 return err;
1126
1127         dp = get_dp(ovs_header->dp_ifindex);
1128         if (!dp)
1129                 return -ENODEV;
1130
1131         table = get_table_protected(dp);
1132         flow_node = tbl_lookup(table, &key, key_len, flow_hash(&key, key_len),
1133                                flow_cmp);
1134         if (!flow_node)
1135                 return -ENOENT;
1136         flow = flow_cast(flow_node);
1137
1138         reply = ovs_flow_cmd_alloc_info(flow);
1139         if (!reply)
1140                 return -ENOMEM;
1141
1142         err = tbl_remove(table, flow_node);
1143         if (err) {
1144                 kfree_skb(reply);
1145                 return err;
1146         }
1147
1148         err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
1149                                      info->snd_seq, 0, OVS_FLOW_CMD_DEL);
1150         BUG_ON(err < 0);
1151
1152         flow_deferred_free(flow);
1153
1154         genl_notify(reply, genl_info_net(info), info->snd_pid,
1155                     dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1156         return 0;
1157 }
1158
1159 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1160 {
1161         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1162         struct datapath *dp;
1163
1164         dp = get_dp(ovs_header->dp_ifindex);
1165         if (!dp)
1166                 return -ENODEV;
1167
1168         for (;;) {
1169                 struct tbl_node *flow_node;
1170                 struct sw_flow *flow;
1171                 u32 bucket, obj;
1172
1173                 bucket = cb->args[0];
1174                 obj = cb->args[1];
1175                 flow_node = tbl_next(get_table_protected(dp), &bucket, &obj);
1176                 if (!flow_node)
1177                         break;
1178
1179                 flow = flow_cast(flow_node);
1180                 if (ovs_flow_cmd_fill_info(flow, dp, skb, NETLINK_CB(cb->skb).pid,
1181                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1182                                            OVS_FLOW_CMD_NEW) < 0)
1183                         break;
1184
1185                 cb->args[0] = bucket;
1186                 cb->args[1] = obj;
1187         }
1188         return skb->len;
1189 }
1190
1191 static struct genl_ops dp_flow_genl_ops[] = {
1192         { .cmd = OVS_FLOW_CMD_NEW,
1193           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1194           .policy = flow_policy,
1195           .doit = ovs_flow_cmd_new_or_set
1196         },
1197         { .cmd = OVS_FLOW_CMD_DEL,
1198           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1199           .policy = flow_policy,
1200           .doit = ovs_flow_cmd_del
1201         },
1202         { .cmd = OVS_FLOW_CMD_GET,
1203           .flags = 0,               /* OK for unprivileged users. */
1204           .policy = flow_policy,
1205           .doit = ovs_flow_cmd_get,
1206           .dumpit = ovs_flow_cmd_dump
1207         },
1208         { .cmd = OVS_FLOW_CMD_SET,
1209           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1210           .policy = flow_policy,
1211           .doit = ovs_flow_cmd_new_or_set,
1212         },
1213 };
1214
1215 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1216 #ifdef HAVE_NLA_NUL_STRING
1217         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1218 #endif
1219         [OVS_DP_ATTR_IPV4_FRAGS] = { .type = NLA_U32 },
1220         [OVS_DP_ATTR_SAMPLING] = { .type = NLA_U32 },
1221 };
1222
1223 static struct genl_family dp_datapath_genl_family = {
1224         .id = GENL_ID_GENERATE,
1225         .hdrsize = sizeof(struct ovs_header),
1226         .name = OVS_DATAPATH_FAMILY,
1227         .version = 1,
1228         .maxattr = OVS_DP_ATTR_MAX
1229 };
1230
1231 static struct genl_multicast_group dp_datapath_multicast_group = {
1232         .name = OVS_DATAPATH_MCGROUP
1233 };
1234
1235 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1236                                 u32 pid, u32 seq, u32 flags, u8 cmd)
1237 {
1238         struct ovs_header *ovs_header;
1239         struct nlattr *nla;
1240         int err;
1241
1242         ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
1243                                    flags, cmd);
1244         if (!ovs_header)
1245                 goto error;
1246
1247         ovs_header->dp_ifindex = dp->dp_ifindex;
1248
1249         rcu_read_lock();
1250         err = nla_put_string(skb, OVS_DP_ATTR_NAME, dp_name(dp));
1251         rcu_read_unlock();
1252         if (err)
1253                 goto nla_put_failure;
1254
1255         nla = nla_reserve(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats));
1256         if (!nla)
1257                 goto nla_put_failure;
1258         get_dp_stats(dp, nla_data(nla));
1259
1260         NLA_PUT_U32(skb, OVS_DP_ATTR_IPV4_FRAGS,
1261                     dp->drop_frags ? OVS_DP_FRAG_DROP : OVS_DP_FRAG_ZERO);
1262
1263         if (dp->sflow_probability)
1264                 NLA_PUT_U32(skb, OVS_DP_ATTR_SAMPLING, dp->sflow_probability);
1265
1266         nla = nla_nest_start(skb, OVS_DP_ATTR_MCGROUPS);
1267         if (!nla)
1268                 goto nla_put_failure;
1269         NLA_PUT_U32(skb, OVS_PACKET_CMD_MISS, packet_mc_group(dp, OVS_PACKET_CMD_MISS));
1270         NLA_PUT_U32(skb, OVS_PACKET_CMD_ACTION, packet_mc_group(dp, OVS_PACKET_CMD_ACTION));
1271         NLA_PUT_U32(skb, OVS_PACKET_CMD_SAMPLE, packet_mc_group(dp, OVS_PACKET_CMD_SAMPLE));
1272         nla_nest_end(skb, nla);
1273
1274         return genlmsg_end(skb, ovs_header);
1275
1276 nla_put_failure:
1277         genlmsg_cancel(skb, ovs_header);
1278 error:
1279         return -EMSGSIZE;
1280 }
1281
1282 static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
1283                                              u32 seq, u8 cmd)
1284 {
1285         struct sk_buff *skb;
1286         int retval;
1287
1288         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1289         if (!skb)
1290                 return ERR_PTR(-ENOMEM);
1291
1292         retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
1293         if (retval < 0) {
1294                 kfree_skb(skb);
1295                 return ERR_PTR(retval);
1296         }
1297         return skb;
1298 }
1299
1300 static int ovs_dp_cmd_validate(struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1301 {
1302         if (a[OVS_DP_ATTR_IPV4_FRAGS]) {
1303                 u32 frags = nla_get_u32(a[OVS_DP_ATTR_IPV4_FRAGS]);
1304
1305                 if (frags != OVS_DP_FRAG_ZERO && frags != OVS_DP_FRAG_DROP)
1306                         return -EINVAL;
1307         }
1308
1309         return CHECK_NUL_STRING(a[OVS_DP_ATTR_NAME], IFNAMSIZ - 1);
1310 }
1311
1312 /* Called with genl_mutex and optionally with RTNL lock also. */
1313 static struct datapath *lookup_datapath(struct ovs_header *ovs_header, struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1314 {
1315         struct datapath *dp;
1316
1317         if (!a[OVS_DP_ATTR_NAME])
1318                 dp = get_dp(ovs_header->dp_ifindex);
1319         else {
1320                 struct vport *vport;
1321
1322                 rcu_read_lock();
1323                 vport = vport_locate(nla_data(a[OVS_DP_ATTR_NAME]));
1324                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1325                 rcu_read_unlock();
1326         }
1327         return dp ? dp : ERR_PTR(-ENODEV);
1328 }
1329
1330 /* Called with genl_mutex. */
1331 static void change_datapath(struct datapath *dp, struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1332 {
1333         if (a[OVS_DP_ATTR_IPV4_FRAGS])
1334                 dp->drop_frags = nla_get_u32(a[OVS_DP_ATTR_IPV4_FRAGS]) == OVS_DP_FRAG_DROP;
1335         if (a[OVS_DP_ATTR_SAMPLING])
1336                 dp->sflow_probability = nla_get_u32(a[OVS_DP_ATTR_SAMPLING]);
1337 }
1338
1339 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1340 {
1341         struct nlattr **a = info->attrs;
1342         struct vport_parms parms;
1343         struct sk_buff *reply;
1344         struct datapath *dp;
1345         struct vport *vport;
1346         int err;
1347
1348         err = -EINVAL;
1349         if (!a[OVS_DP_ATTR_NAME])
1350                 goto err;
1351
1352         err = ovs_dp_cmd_validate(a);
1353         if (err)
1354                 goto err;
1355
1356         rtnl_lock();
1357         err = -ENODEV;
1358         if (!try_module_get(THIS_MODULE))
1359                 goto err_unlock_rtnl;
1360
1361         err = -ENOMEM;
1362         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1363         if (dp == NULL)
1364                 goto err_put_module;
1365         INIT_LIST_HEAD(&dp->port_list);
1366
1367         /* Initialize kobject for bridge.  This will be added as
1368          * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
1369         dp->ifobj.kset = NULL;
1370         kobject_init(&dp->ifobj, &dp_ktype);
1371
1372         /* Allocate table. */
1373         err = -ENOMEM;
1374         rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
1375         if (!dp->table)
1376                 goto err_free_dp;
1377
1378         /* Set up our datapath device. */
1379         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1380         parms.type = OVS_VPORT_TYPE_INTERNAL;
1381         parms.options = NULL;
1382         parms.dp = dp;
1383         parms.port_no = OVSP_LOCAL;
1384         vport = new_vport(&parms);
1385         if (IS_ERR(vport)) {
1386                 err = PTR_ERR(vport);
1387                 if (err == -EBUSY)
1388                         err = -EEXIST;
1389
1390                 goto err_destroy_table;
1391         }
1392         dp->dp_ifindex = vport_get_ifindex(vport);
1393
1394         dp->drop_frags = 0;
1395         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1396         if (!dp->stats_percpu) {
1397                 err = -ENOMEM;
1398                 goto err_destroy_local_port;
1399         }
1400
1401         change_datapath(dp, a);
1402
1403         reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
1404         err = PTR_ERR(reply);
1405         if (IS_ERR(reply))
1406                 goto err_destroy_local_port;
1407
1408         list_add_tail(&dp->list_node, &dps);
1409         dp_sysfs_add_dp(dp);
1410
1411         rtnl_unlock();
1412
1413         genl_notify(reply, genl_info_net(info), info->snd_pid,
1414                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1415         return 0;
1416
1417 err_destroy_local_port:
1418         dp_detach_port(get_vport_protected(dp, OVSP_LOCAL));
1419 err_destroy_table:
1420         tbl_destroy(get_table_protected(dp), NULL);
1421 err_free_dp:
1422         kfree(dp);
1423 err_put_module:
1424         module_put(THIS_MODULE);
1425 err_unlock_rtnl:
1426         rtnl_unlock();
1427 err:
1428         return err;
1429 }
1430
1431 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1432 {
1433         struct vport *vport, *next_vport;
1434         struct sk_buff *reply;
1435         struct datapath *dp;
1436         int err;
1437
1438         err = ovs_dp_cmd_validate(info->attrs);
1439         if (err)
1440                 goto exit;
1441
1442         rtnl_lock();
1443         dp = lookup_datapath(info->userhdr, info->attrs);
1444         err = PTR_ERR(dp);
1445         if (IS_ERR(dp))
1446                 goto exit_unlock;
1447
1448         reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_DEL);
1449         err = PTR_ERR(reply);
1450         if (IS_ERR(reply))
1451                 goto exit_unlock;
1452
1453         list_for_each_entry_safe (vport, next_vport, &dp->port_list, node)
1454                 if (vport->port_no != OVSP_LOCAL)
1455                         dp_detach_port(vport);
1456
1457         dp_sysfs_del_dp(dp);
1458         list_del(&dp->list_node);
1459         dp_detach_port(get_vport_protected(dp, OVSP_LOCAL));
1460
1461         /* rtnl_unlock() will wait until all the references to devices that
1462          * are pending unregistration have been dropped.  We do it here to
1463          * ensure that any internal devices (which contain DP pointers) are
1464          * fully destroyed before freeing the datapath.
1465          */
1466         rtnl_unlock();
1467
1468         call_rcu(&dp->rcu, destroy_dp_rcu);
1469         module_put(THIS_MODULE);
1470
1471         genl_notify(reply, genl_info_net(info), info->snd_pid,
1472                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1473
1474         return 0;
1475
1476 exit_unlock:
1477         rtnl_unlock();
1478 exit:
1479         return err;
1480 }
1481
1482 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1483 {
1484         struct sk_buff *reply;
1485         struct datapath *dp;
1486         int err;
1487
1488         err = ovs_dp_cmd_validate(info->attrs);
1489         if (err)
1490                 return err;
1491
1492         dp = lookup_datapath(info->userhdr, info->attrs);
1493         if (IS_ERR(dp))
1494                 return PTR_ERR(dp);
1495
1496         change_datapath(dp, info->attrs);
1497
1498         reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
1499         if (IS_ERR(reply)) {
1500                 err = PTR_ERR(reply);
1501                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1502                                 dp_datapath_multicast_group.id, err);
1503                 return 0;
1504         }
1505
1506         genl_notify(reply, genl_info_net(info), info->snd_pid,
1507                     dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
1508         return 0;
1509 }
1510
1511 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1512 {
1513         struct sk_buff *reply;
1514         struct datapath *dp;
1515         int err;
1516
1517         err = ovs_dp_cmd_validate(info->attrs);
1518         if (err)
1519                 return err;
1520
1521         dp = lookup_datapath(info->userhdr, info->attrs);
1522         if (IS_ERR(dp))
1523                 return PTR_ERR(dp);
1524
1525         reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
1526         if (IS_ERR(reply))
1527                 return PTR_ERR(reply);
1528
1529         return genlmsg_reply(reply, info);
1530 }
1531
1532 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1533 {
1534         struct datapath *dp;
1535         int skip = cb->args[0];
1536         int i = 0;
1537
1538         list_for_each_entry (dp, &dps, list_node) {
1539                 if (i < skip)
1540                         continue;
1541                 if (ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
1542                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1543                                          OVS_DP_CMD_NEW) < 0)
1544                         break;
1545                 i++;
1546         }
1547
1548         cb->args[0] = i;
1549
1550         return skb->len;
1551 }
1552
1553 static struct genl_ops dp_datapath_genl_ops[] = {
1554         { .cmd = OVS_DP_CMD_NEW,
1555           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1556           .policy = datapath_policy,
1557           .doit = ovs_dp_cmd_new
1558         },
1559         { .cmd = OVS_DP_CMD_DEL,
1560           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1561           .policy = datapath_policy,
1562           .doit = ovs_dp_cmd_del
1563         },
1564         { .cmd = OVS_DP_CMD_GET,
1565           .flags = 0,               /* OK for unprivileged users. */
1566           .policy = datapath_policy,
1567           .doit = ovs_dp_cmd_get,
1568           .dumpit = ovs_dp_cmd_dump
1569         },
1570         { .cmd = OVS_DP_CMD_SET,
1571           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1572           .policy = datapath_policy,
1573           .doit = ovs_dp_cmd_set,
1574         },
1575 };
1576
1577 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1578 #ifdef HAVE_NLA_NUL_STRING
1579         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1580         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1581         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1582         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct rtnl_link_stats64) },
1583         [OVS_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
1584 #else
1585         [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct rtnl_link_stats64) },
1586         [OVS_VPORT_ATTR_ADDRESS] = { .minlen = ETH_ALEN },
1587 #endif
1588         [OVS_VPORT_ATTR_MTU] = { .type = NLA_U32 },
1589         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1590 };
1591
1592 static struct genl_family dp_vport_genl_family = {
1593         .id = GENL_ID_GENERATE,
1594         .hdrsize = sizeof(struct ovs_header),
1595         .name = OVS_VPORT_FAMILY,
1596         .version = 1,
1597         .maxattr = OVS_VPORT_ATTR_MAX
1598 };
1599
1600 struct genl_multicast_group dp_vport_multicast_group = {
1601         .name = OVS_VPORT_MCGROUP
1602 };
1603
1604 /* Called with RTNL lock or RCU read lock. */
1605 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1606                                    u32 pid, u32 seq, u32 flags, u8 cmd)
1607 {
1608         struct ovs_header *ovs_header;
1609         struct nlattr *nla;
1610         int ifindex;
1611         int mtu;
1612         int err;
1613
1614         ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
1615                                  flags, cmd);
1616         if (!ovs_header)
1617                 return -EMSGSIZE;
1618
1619         ovs_header->dp_ifindex = vport->dp->dp_ifindex;
1620
1621         NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
1622         NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport_get_type(vport));
1623         NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport_get_name(vport));
1624
1625         nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS, sizeof(struct rtnl_link_stats64));
1626         if (!nla)
1627                 goto nla_put_failure;
1628         if (vport_get_stats(vport, nla_data(nla)))
1629                 __skb_trim(skb, skb->len - nla->nla_len);
1630
1631         NLA_PUT(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
1632
1633         mtu = vport_get_mtu(vport);
1634         if (mtu)
1635                 NLA_PUT_U32(skb, OVS_VPORT_ATTR_MTU, mtu);
1636
1637         err = vport_get_options(vport, skb);
1638         if (err == -EMSGSIZE)
1639                 goto error;
1640
1641         ifindex = vport_get_ifindex(vport);
1642         if (ifindex > 0)
1643                 NLA_PUT_U32(skb, OVS_VPORT_ATTR_IFINDEX, ifindex);
1644
1645         return genlmsg_end(skb, ovs_header);
1646
1647 nla_put_failure:
1648         err = -EMSGSIZE;
1649 error:
1650         genlmsg_cancel(skb, ovs_header);
1651         return err;
1652 }
1653
1654 /* Called with RTNL lock or RCU read lock. */
1655 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
1656                                          u32 seq, u8 cmd)
1657 {
1658         struct sk_buff *skb;
1659         int retval;
1660
1661         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1662         if (!skb)
1663                 return ERR_PTR(-ENOMEM);
1664
1665         retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
1666         if (retval < 0) {
1667                 kfree_skb(skb);
1668                 return ERR_PTR(retval);
1669         }
1670         return skb;
1671 }
1672
1673 static int ovs_vport_cmd_validate(struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1674 {
1675         return CHECK_NUL_STRING(a[OVS_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1676 }
1677
1678 /* Called with RTNL lock or RCU read lock. */
1679 static struct vport *lookup_vport(struct ovs_header *ovs_header,
1680                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1681 {
1682         struct datapath *dp;
1683         struct vport *vport;
1684
1685         if (a[OVS_VPORT_ATTR_NAME]) {
1686                 vport = vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
1687                 if (!vport)
1688                         return ERR_PTR(-ENODEV);
1689                 return vport;
1690         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1691                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1692
1693                 if (port_no >= DP_MAX_PORTS)
1694                         return ERR_PTR(-EFBIG);
1695
1696                 dp = get_dp(ovs_header->dp_ifindex);
1697                 if (!dp)
1698                         return ERR_PTR(-ENODEV);
1699
1700                 vport = get_vport_protected(dp, port_no);
1701                 if (!vport)
1702                         return ERR_PTR(-ENOENT);
1703                 return vport;
1704         } else
1705                 return ERR_PTR(-EINVAL);
1706 }
1707
1708 /* Called with RTNL lock. */
1709 static int change_vport(struct vport *vport, struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1710 {
1711         int err = 0;
1712         if (a[OVS_VPORT_ATTR_STATS])
1713                 err = vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1714         if (!err && a[OVS_VPORT_ATTR_ADDRESS])
1715                 err = vport_set_addr(vport, nla_data(a[OVS_VPORT_ATTR_ADDRESS]));
1716         if (!err && a[OVS_VPORT_ATTR_MTU])
1717                 err = vport_set_mtu(vport, nla_get_u32(a[OVS_VPORT_ATTR_MTU]));
1718         return err;
1719 }
1720
1721 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1722 {
1723         struct nlattr **a = info->attrs;
1724         struct ovs_header *ovs_header = info->userhdr;
1725         struct vport_parms parms;
1726         struct sk_buff *reply;
1727         struct vport *vport;
1728         struct datapath *dp;
1729         u32 port_no;
1730         int err;
1731
1732         err = -EINVAL;
1733         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE])
1734                 goto exit;
1735
1736         err = ovs_vport_cmd_validate(a);
1737         if (err)
1738                 goto exit;
1739
1740         rtnl_lock();
1741         dp = get_dp(ovs_header->dp_ifindex);
1742         err = -ENODEV;
1743         if (!dp)
1744                 goto exit_unlock;
1745
1746         if (a[OVS_VPORT_ATTR_PORT_NO]) {
1747                 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1748
1749                 err = -EFBIG;
1750                 if (port_no >= DP_MAX_PORTS)
1751                         goto exit_unlock;
1752
1753                 vport = get_vport_protected(dp, port_no);
1754                 err = -EBUSY;
1755                 if (vport)
1756                         goto exit_unlock;
1757         } else {
1758                 for (port_no = 1; ; port_no++) {
1759                         if (port_no >= DP_MAX_PORTS) {
1760                                 err = -EFBIG;
1761                                 goto exit_unlock;
1762                         }
1763                         vport = get_vport_protected(dp, port_no);
1764                         if (!vport)
1765                                 break;
1766                 }
1767         }
1768
1769         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1770         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1771         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1772         parms.dp = dp;
1773         parms.port_no = port_no;
1774
1775         vport = new_vport(&parms);
1776         err = PTR_ERR(vport);
1777         if (IS_ERR(vport))
1778                 goto exit_unlock;
1779
1780         set_internal_devs_mtu(dp);
1781         dp_sysfs_add_if(vport);
1782
1783         err = change_vport(vport, a);
1784         if (!err) {
1785                 reply = ovs_vport_cmd_build_info(vport, info->snd_pid,
1786                                                  info->snd_seq, OVS_VPORT_CMD_NEW);
1787                 if (IS_ERR(reply))
1788                         err = PTR_ERR(reply);
1789         }
1790         if (err) {
1791                 dp_detach_port(vport);
1792                 goto exit_unlock;
1793         }
1794         genl_notify(reply, genl_info_net(info), info->snd_pid,
1795                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1796
1797
1798 exit_unlock:
1799         rtnl_unlock();
1800 exit:
1801         return err;
1802 }
1803
1804 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1805 {
1806         struct nlattr **a = info->attrs;
1807         struct sk_buff *reply;
1808         struct vport *vport;
1809         int err;
1810
1811         err = ovs_vport_cmd_validate(a);
1812         if (err)
1813                 goto exit;
1814
1815         rtnl_lock();
1816         vport = lookup_vport(info->userhdr, a);
1817         err = PTR_ERR(vport);
1818         if (IS_ERR(vport))
1819                 goto exit_unlock;
1820
1821         err = 0;
1822         if (a[OVS_VPORT_ATTR_OPTIONS])
1823                 err = vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1824         if (!err)
1825                 err = change_vport(vport, a);
1826
1827         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1828                                          OVS_VPORT_CMD_NEW);
1829         if (IS_ERR(reply)) {
1830                 err = PTR_ERR(reply);
1831                 netlink_set_err(INIT_NET_GENL_SOCK, 0,
1832                                 dp_vport_multicast_group.id, err);
1833                 return 0;
1834         }
1835
1836         genl_notify(reply, genl_info_net(info), info->snd_pid,
1837                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1838
1839 exit_unlock:
1840         rtnl_unlock();
1841 exit:
1842         return err;
1843 }
1844
1845 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1846 {
1847         struct nlattr **a = info->attrs;
1848         struct sk_buff *reply;
1849         struct vport *vport;
1850         int err;
1851
1852         err = ovs_vport_cmd_validate(a);
1853         if (err)
1854                 goto exit;
1855
1856         rtnl_lock();
1857         vport = lookup_vport(info->userhdr, a);
1858         err = PTR_ERR(vport);
1859         if (IS_ERR(vport))
1860                 goto exit_unlock;
1861
1862         if (vport->port_no == OVSP_LOCAL) {
1863                 err = -EINVAL;
1864                 goto exit_unlock;
1865         }
1866
1867         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1868                                          OVS_VPORT_CMD_DEL);
1869         err = PTR_ERR(reply);
1870         if (IS_ERR(reply))
1871                 goto exit_unlock;
1872
1873         err = dp_detach_port(vport);
1874
1875         genl_notify(reply, genl_info_net(info), info->snd_pid,
1876                     dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1877
1878 exit_unlock:
1879         rtnl_unlock();
1880 exit:
1881         return err;
1882 }
1883
1884 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1885 {
1886         struct nlattr **a = info->attrs;
1887         struct ovs_header *ovs_header = info->userhdr;
1888         struct sk_buff *reply;
1889         struct vport *vport;
1890         int err;
1891
1892         err = ovs_vport_cmd_validate(a);
1893         if (err)
1894                 goto exit;
1895
1896         rcu_read_lock();
1897         vport = lookup_vport(ovs_header, a);
1898         err = PTR_ERR(vport);
1899         if (IS_ERR(vport))
1900                 goto exit_unlock;
1901
1902         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1903                                          OVS_VPORT_CMD_NEW);
1904         err = PTR_ERR(reply);
1905         if (IS_ERR(reply))
1906                 goto exit_unlock;
1907
1908         rcu_read_unlock();
1909
1910         return genlmsg_reply(reply, info);
1911
1912 exit_unlock:
1913         rcu_read_unlock();
1914 exit:
1915         return err;
1916 }
1917
1918 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1919 {
1920         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1921         struct datapath *dp;
1922         u32 port_no;
1923         int retval;
1924
1925         dp = get_dp(ovs_header->dp_ifindex);
1926         if (!dp)
1927                 return -ENODEV;
1928
1929         rcu_read_lock();
1930         for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
1931                 struct vport *vport;
1932
1933                 vport = get_vport_protected(dp, port_no);
1934                 if (!vport)
1935                         continue;
1936
1937                 if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid,
1938                                             cb->nlh->nlmsg_seq, NLM_F_MULTI,
1939                                             OVS_VPORT_CMD_NEW) < 0)
1940                         break;
1941         }
1942         rcu_read_unlock();
1943
1944         cb->args[0] = port_no;
1945         retval = skb->len;
1946
1947         return retval;
1948 }
1949
1950 static struct genl_ops dp_vport_genl_ops[] = {
1951         { .cmd = OVS_VPORT_CMD_NEW,
1952           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1953           .policy = vport_policy,
1954           .doit = ovs_vport_cmd_new
1955         },
1956         { .cmd = OVS_VPORT_CMD_DEL,
1957           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1958           .policy = vport_policy,
1959           .doit = ovs_vport_cmd_del
1960         },
1961         { .cmd = OVS_VPORT_CMD_GET,
1962           .flags = 0,               /* OK for unprivileged users. */
1963           .policy = vport_policy,
1964           .doit = ovs_vport_cmd_get,
1965           .dumpit = ovs_vport_cmd_dump
1966         },
1967         { .cmd = OVS_VPORT_CMD_SET,
1968           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1969           .policy = vport_policy,
1970           .doit = ovs_vport_cmd_set,
1971         },
1972 };
1973
1974 struct genl_family_and_ops {
1975         struct genl_family *family;
1976         struct genl_ops *ops;
1977         int n_ops;
1978         struct genl_multicast_group *group;
1979 };
1980
1981 static const struct genl_family_and_ops dp_genl_families[] = {
1982         { &dp_datapath_genl_family,
1983           dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
1984           &dp_datapath_multicast_group },
1985         { &dp_vport_genl_family,
1986           dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
1987           &dp_vport_multicast_group },
1988         { &dp_flow_genl_family,
1989           dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
1990           &dp_flow_multicast_group },
1991         { &dp_packet_genl_family,
1992           dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
1993           NULL },
1994 };
1995
1996 static void dp_unregister_genl(int n_families)
1997 {
1998         int i;
1999
2000         for (i = 0; i < n_families; i++)
2001                 genl_unregister_family(dp_genl_families[i].family);
2002 }
2003
2004 static int dp_register_genl(void)
2005 {
2006         int n_registered;
2007         int err;
2008         int i;
2009
2010         n_registered = 0;
2011         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2012                 const struct genl_family_and_ops *f = &dp_genl_families[i];
2013
2014                 err = genl_register_family_with_ops(f->family, f->ops,
2015                                                     f->n_ops);
2016                 if (err)
2017                         goto error;
2018                 n_registered++;
2019
2020                 if (f->group) {
2021                         err = genl_register_mc_group(f->family, f->group);
2022                         if (err)
2023                                 goto error;
2024                 }
2025         }
2026
2027         err = packet_register_mc_groups();
2028         if (err)
2029                 goto error;
2030         return 0;
2031
2032 error:
2033         dp_unregister_genl(n_registered);
2034         return err;
2035 }
2036
2037 static int __init dp_init(void)
2038 {
2039         struct sk_buff *dummy_skb;
2040         int err;
2041
2042         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2043
2044         printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2045
2046         err = flow_init();
2047         if (err)
2048                 goto error;
2049
2050         err = vport_init();
2051         if (err)
2052                 goto error_flow_exit;
2053
2054         err = register_netdevice_notifier(&dp_device_notifier);
2055         if (err)
2056                 goto error_vport_exit;
2057
2058         err = dp_register_genl();
2059         if (err < 0)
2060                 goto error_unreg_notifier;
2061
2062         return 0;
2063
2064 error_unreg_notifier:
2065         unregister_netdevice_notifier(&dp_device_notifier);
2066 error_vport_exit:
2067         vport_exit();
2068 error_flow_exit:
2069         flow_exit();
2070 error:
2071         return err;
2072 }
2073
2074 static void dp_cleanup(void)
2075 {
2076         rcu_barrier();
2077         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2078         unregister_netdevice_notifier(&dp_device_notifier);
2079         vport_exit();
2080         flow_exit();
2081 }
2082
2083 module_init(dp_init);
2084 module_exit(dp_cleanup);
2085
2086 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2087 MODULE_LICENSE("GPL");