ofp-util: Allow decoding of Open Flow 1.2 Flow Statistics Response Messages
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007-2012 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/version.h>
40 #include <linux/ethtool.h>
41 #include <linux/wait.h>
42 #include <asm/div64.h>
43 #include <linux/highmem.h>
44 #include <linux/netfilter_bridge.h>
45 #include <linux/netfilter_ipv4.h>
46 #include <linux/inetdevice.h>
47 #include <linux/list.h>
48 #include <linux/openvswitch.h>
49 #include <linux/rculist.h>
50 #include <linux/dmi.h>
51 #include <net/genetlink.h>
52 #include <net/net_namespace.h>
53 #include <net/netns/generic.h>
54
55 #include "checksum.h"
56 #include "datapath.h"
57 #include "flow.h"
58 #include "genl_exec.h"
59 #include "vlan.h"
60 #include "tunnel.h"
61 #include "vport-internal_dev.h"
62
63 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
64     LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
65 #error Kernels before 2.6.18 or after 3.5 are not supported by this version of Open vSwitch.
66 #endif
67
68 #define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
69 static void rehash_flow_table(struct work_struct *work);
70 static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
71
72 int ovs_net_id __read_mostly;
73
74 int (*ovs_dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
75 EXPORT_SYMBOL(ovs_dp_ioctl_hook);
76
77 /**
78  * DOC: Locking:
79  *
80  * Writes to device state (add/remove datapath, port, set operations on vports,
81  * etc.) are protected by RTNL.
82  *
83  * Writes to other state (flow table modifications, set miscellaneous datapath
84  * parameters, etc.) are protected by genl_mutex.  The RTNL lock nests inside
85  * genl_mutex.
86  *
87  * Reads are protected by RCU.
88  *
89  * There are a few special cases (mostly stats) that have their own
90  * synchronization but they nest under all of above and don't interact with
91  * each other.
92  */
93
94 static struct vport *new_vport(const struct vport_parms *);
95 static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *,
96                              const struct dp_upcall_info *);
97 static int queue_userspace_packet(struct net *, int dp_ifindex,
98                                   struct sk_buff *,
99                                   const struct dp_upcall_info *);
100
101 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
102 static struct datapath *get_dp(struct net *net, int dp_ifindex)
103 {
104         struct datapath *dp = NULL;
105         struct net_device *dev;
106
107         rcu_read_lock();
108         dev = dev_get_by_index_rcu(net, dp_ifindex);
109         if (dev) {
110                 struct vport *vport = ovs_internal_dev_get_vport(dev);
111                 if (vport)
112                         dp = vport->dp;
113         }
114         rcu_read_unlock();
115
116         return dp;
117 }
118
119 /* Must be called with rcu_read_lock or RTNL lock. */
120 const char *ovs_dp_name(const struct datapath *dp)
121 {
122         struct vport *vport = ovs_vport_rtnl_rcu(dp, OVSP_LOCAL);
123         return vport->ops->get_name(vport);
124 }
125
126 static int get_dpifindex(struct datapath *dp)
127 {
128         struct vport *local;
129         int ifindex;
130
131         rcu_read_lock();
132
133         local = ovs_vport_rcu(dp, OVSP_LOCAL);
134         if (local)
135                 ifindex = local->ops->get_ifindex(local);
136         else
137                 ifindex = 0;
138
139         rcu_read_unlock();
140
141         return ifindex;
142 }
143
144 static size_t br_nlmsg_size(void)
145 {
146         return NLMSG_ALIGN(sizeof(struct ifinfomsg))
147                + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
148                + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
149                + nla_total_size(4) /* IFLA_MASTER */
150                + nla_total_size(4) /* IFLA_MTU */
151                + nla_total_size(1); /* IFLA_OPERSTATE */
152 }
153
154 /* Caller must hold RTNL lock. */
155 static int dp_fill_ifinfo(struct sk_buff *skb,
156                           const struct vport *port,
157                           int event, unsigned int flags)
158 {
159         struct datapath *dp = port->dp;
160         struct ifinfomsg *hdr;
161         struct nlmsghdr *nlh;
162
163         if (!port->ops->get_ifindex)
164                 return -ENODEV;
165
166         nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
167         if (nlh == NULL)
168                 return -EMSGSIZE;
169
170         hdr = nlmsg_data(nlh);
171         hdr->ifi_family = AF_BRIDGE;
172         hdr->__ifi_pad = 0;
173         hdr->ifi_type = ARPHRD_ETHER;
174         hdr->ifi_index = port->ops->get_ifindex(port);
175         hdr->ifi_flags = port->ops->get_dev_flags(port);
176         hdr->ifi_change = 0;
177
178         if (nla_put_string(skb, IFLA_IFNAME, port->ops->get_name(port)) ||
179             nla_put_u32(skb, IFLA_MASTER, get_dpifindex(dp)) ||
180             nla_put_u32(skb, IFLA_MTU, port->ops->get_mtu(port)) ||
181 #ifdef IFLA_OPERSTATE
182             nla_put_u8(skb, IFLA_OPERSTATE,
183                        port->ops->is_running(port) ?
184                                 port->ops->get_operstate(port) :
185                                 IF_OPER_DOWN) ||
186 #endif
187             nla_put(skb, IFLA_ADDRESS, ETH_ALEN, port->ops->get_addr(port)))
188                 goto nla_put_failure;
189
190         return nlmsg_end(skb, nlh);
191
192 nla_put_failure:
193         nlmsg_cancel(skb, nlh);
194         return -EMSGSIZE;
195 }
196
197 /* Caller must hold RTNL lock. */
198 static void dp_ifinfo_notify(int event, struct vport *port)
199 {
200         struct sk_buff *skb;
201         int err;
202
203         skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
204         if (!skb) {
205                 err = -ENOBUFS;
206                 goto err;
207         }
208
209         err = dp_fill_ifinfo(skb, port, event, 0);
210         if (err < 0) {
211                 if (err == -ENODEV) {
212                         goto out;
213                 } else {
214                         /* -EMSGSIZE implies BUG in br_nlmsg_size() */
215                         WARN_ON(err == -EMSGSIZE);
216                         goto err;
217                 }
218         }
219
220         rtnl_notify(skb, ovs_dp_get_net(port->dp), 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
221
222         return;
223 err:
224         rtnl_set_sk_err(ovs_dp_get_net(port->dp), RTNLGRP_LINK, err);
225 out:
226         kfree_skb(skb);
227 }
228
229 static void release_dp(struct kobject *kobj)
230 {
231         struct datapath *dp = container_of(kobj, struct datapath, ifobj);
232         kfree(dp);
233 }
234
235 static struct kobj_type dp_ktype = {
236         .release = release_dp
237 };
238
239 static void destroy_dp_rcu(struct rcu_head *rcu)
240 {
241         struct datapath *dp = container_of(rcu, struct datapath, rcu);
242
243         ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
244         free_percpu(dp->stats_percpu);
245         release_net(ovs_dp_get_net(dp));
246         kfree(dp->ports);
247         kobject_put(&dp->ifobj);
248 }
249
250 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
251                                             u16 port_no)
252 {
253         return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
254 }
255
256 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
257 {
258         struct vport *vport;
259         struct hlist_node *n;
260         struct hlist_head *head;
261
262         head = vport_hash_bucket(dp, port_no);
263         hlist_for_each_entry_rcu(vport, n, head, dp_hash_node) {
264                 if (vport->port_no == port_no)
265                         return vport;
266         }
267         return NULL;
268 }
269
270 /* Called with RTNL lock and genl_lock. */
271 static struct vport *new_vport(const struct vport_parms *parms)
272 {
273         struct vport *vport;
274
275         vport = ovs_vport_add(parms);
276         if (!IS_ERR(vport)) {
277                 struct datapath *dp = parms->dp;
278                 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
279
280                 hlist_add_head_rcu(&vport->dp_hash_node, head);
281                 dp_ifinfo_notify(RTM_NEWLINK, vport);
282         }
283         return vport;
284 }
285
286 /* Called with RTNL lock. */
287 void ovs_dp_detach_port(struct vport *p)
288 {
289         ASSERT_RTNL();
290
291         if (p->port_no != OVSP_LOCAL)
292                 ovs_dp_sysfs_del_if(p);
293
294         dp_ifinfo_notify(RTM_DELLINK, p);
295
296         /* First drop references to device. */
297         hlist_del_rcu(&p->dp_hash_node);
298
299         /* Then destroy it. */
300         ovs_vport_del(p);
301 }
302
303 /* Must be called with rcu_read_lock. */
304 void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
305 {
306         struct datapath *dp = p->dp;
307         struct sw_flow *flow;
308         struct dp_stats_percpu *stats;
309         u64 *stats_counter;
310         int error;
311
312         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
313
314         if (!OVS_CB(skb)->flow) {
315                 struct sw_flow_key key;
316                 int key_len;
317
318                 /* Extract flow from 'skb' into 'key'. */
319                 error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
320                 if (unlikely(error)) {
321                         kfree_skb(skb);
322                         return;
323                 }
324
325                 /* Look up flow. */
326                 flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table),
327                                            &key, key_len);
328                 if (unlikely(!flow)) {
329                         struct dp_upcall_info upcall;
330
331                         upcall.cmd = OVS_PACKET_CMD_MISS;
332                         upcall.key = &key;
333                         upcall.userdata = NULL;
334                         upcall.pid = p->upcall_pid;
335                         ovs_dp_upcall(dp, skb, &upcall);
336                         consume_skb(skb);
337                         stats_counter = &stats->n_missed;
338                         goto out;
339                 }
340
341                 OVS_CB(skb)->flow = flow;
342         }
343
344         stats_counter = &stats->n_hit;
345         ovs_flow_used(OVS_CB(skb)->flow, skb);
346         ovs_execute_actions(dp, skb);
347
348 out:
349         /* Update datapath statistics. */
350         u64_stats_update_begin(&stats->sync);
351         (*stats_counter)++;
352         u64_stats_update_end(&stats->sync);
353 }
354
355 static struct genl_family dp_packet_genl_family = {
356         .id = GENL_ID_GENERATE,
357         .hdrsize = sizeof(struct ovs_header),
358         .name = OVS_PACKET_FAMILY,
359         .version = OVS_PACKET_VERSION,
360         .maxattr = OVS_PACKET_ATTR_MAX,
361          SET_NETNSOK
362 };
363
364 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
365                   const struct dp_upcall_info *upcall_info)
366 {
367         struct dp_stats_percpu *stats;
368         int dp_ifindex;
369         int err;
370
371         if (upcall_info->pid == 0) {
372                 err = -ENOTCONN;
373                 goto err;
374         }
375
376         dp_ifindex = get_dpifindex(dp);
377         if (!dp_ifindex) {
378                 err = -ENODEV;
379                 goto err;
380         }
381
382         forward_ip_summed(skb, true);
383
384         if (!skb_is_gso(skb))
385                 err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
386         else
387                 err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
388         if (err)
389                 goto err;
390
391         return 0;
392
393 err:
394         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
395
396         u64_stats_update_begin(&stats->sync);
397         stats->n_lost++;
398         u64_stats_update_end(&stats->sync);
399
400         return err;
401 }
402
403 static int queue_gso_packets(struct net *net, int dp_ifindex,
404                              struct sk_buff *skb,
405                              const struct dp_upcall_info *upcall_info)
406 {
407         unsigned short gso_type = skb_shinfo(skb)->gso_type;
408         struct dp_upcall_info later_info;
409         struct sw_flow_key later_key;
410         struct sk_buff *segs, *nskb;
411         int err;
412
413         segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
414         if (IS_ERR(segs))
415                 return PTR_ERR(segs);
416
417         /* Queue all of the segments. */
418         skb = segs;
419         do {
420                 err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info);
421                 if (err)
422                         break;
423
424                 if (skb == segs && gso_type & SKB_GSO_UDP) {
425                         /* The initial flow key extracted by ovs_flow_extract()
426                          * in this case is for a first fragment, so we need to
427                          * properly mark later fragments.
428                          */
429                         later_key = *upcall_info->key;
430                         later_key.ip.frag = OVS_FRAG_TYPE_LATER;
431
432                         later_info = *upcall_info;
433                         later_info.key = &later_key;
434                         upcall_info = &later_info;
435                 }
436         } while ((skb = skb->next));
437
438         /* Free all of the segments. */
439         skb = segs;
440         do {
441                 nskb = skb->next;
442                 if (err)
443                         kfree_skb(skb);
444                 else
445                         consume_skb(skb);
446         } while ((skb = nskb));
447         return err;
448 }
449
450 static int queue_userspace_packet(struct net *net, int dp_ifindex,
451                                   struct sk_buff *skb,
452                                   const struct dp_upcall_info *upcall_info)
453 {
454         struct ovs_header *upcall;
455         struct sk_buff *nskb = NULL;
456         struct sk_buff *user_skb; /* to be queued to userspace */
457         struct nlattr *nla;
458         unsigned int len;
459         int err;
460
461         if (vlan_tx_tag_present(skb)) {
462                 nskb = skb_clone(skb, GFP_ATOMIC);
463                 if (!nskb)
464                         return -ENOMEM;
465                 
466                 err = vlan_deaccel_tag(nskb);
467                 if (err)
468                         return err;
469
470                 skb = nskb;
471         }
472
473         if (nla_attr_size(skb->len) > USHRT_MAX) {
474                 err = -EFBIG;
475                 goto out;
476         }
477
478         len = sizeof(struct ovs_header);
479         len += nla_total_size(skb->len);
480         len += nla_total_size(FLOW_BUFSIZE);
481         if (upcall_info->cmd == OVS_PACKET_CMD_ACTION)
482                 len += nla_total_size(8);
483
484         user_skb = genlmsg_new(len, GFP_ATOMIC);
485         if (!user_skb) {
486                 err = -ENOMEM;
487                 goto out;
488         }
489
490         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
491                              0, upcall_info->cmd);
492         upcall->dp_ifindex = dp_ifindex;
493
494         nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
495         ovs_flow_to_nlattrs(upcall_info->key, user_skb);
496         nla_nest_end(user_skb, nla);
497
498         if (upcall_info->userdata)
499                 nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA,
500                             nla_get_u64(upcall_info->userdata));
501
502         nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
503
504         skb_copy_and_csum_dev(skb, nla_data(nla));
505
506         err = genlmsg_unicast(net, user_skb, upcall_info->pid);
507
508 out:
509         kfree_skb(nskb);
510         return err;
511 }
512
513 /* Called with genl_mutex. */
514 static int flush_flows(struct datapath *dp)
515 {
516         struct flow_table *old_table;
517         struct flow_table *new_table;
518
519         old_table = genl_dereference(dp->table);
520         new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
521         if (!new_table)
522                 return -ENOMEM;
523
524         rcu_assign_pointer(dp->table, new_table);
525
526         ovs_flow_tbl_deferred_destroy(old_table);
527         return 0;
528 }
529
530 static int validate_actions(const struct nlattr *attr,
531                                 const struct sw_flow_key *key, int depth);
532
533 static int validate_sample(const struct nlattr *attr,
534                                 const struct sw_flow_key *key, int depth)
535 {
536         const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
537         const struct nlattr *probability, *actions;
538         const struct nlattr *a;
539         int rem;
540
541         memset(attrs, 0, sizeof(attrs));
542         nla_for_each_nested(a, attr, rem) {
543                 int type = nla_type(a);
544                 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
545                         return -EINVAL;
546                 attrs[type] = a;
547         }
548         if (rem)
549                 return -EINVAL;
550
551         probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
552         if (!probability || nla_len(probability) != sizeof(u32))
553                 return -EINVAL;
554
555         actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
556         if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
557                 return -EINVAL;
558         return validate_actions(actions, key, depth + 1);
559 }
560
561 static int validate_tp_port(const struct sw_flow_key *flow_key)
562 {
563         if (flow_key->eth.type == htons(ETH_P_IP)) {
564                 if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst)
565                         return 0;
566         } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
567                 if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst)
568                         return 0;
569         }
570
571         return -EINVAL;
572 }
573
574 static int validate_set(const struct nlattr *a,
575                         const struct sw_flow_key *flow_key)
576 {
577         const struct nlattr *ovs_key = nla_data(a);
578         int key_type = nla_type(ovs_key);
579
580         /* There can be only one key in a action */
581         if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
582                 return -EINVAL;
583
584         if (key_type > OVS_KEY_ATTR_MAX ||
585             nla_len(ovs_key) != ovs_key_lens[key_type])
586                 return -EINVAL;
587
588         switch (key_type) {
589         const struct ovs_key_ipv4 *ipv4_key;
590
591         case OVS_KEY_ATTR_PRIORITY:
592         case OVS_KEY_ATTR_TUN_ID:
593         case OVS_KEY_ATTR_ETHERNET:
594                 break;
595
596         case OVS_KEY_ATTR_IPV4:
597                 if (flow_key->eth.type != htons(ETH_P_IP))
598                         return -EINVAL;
599
600                 if (!flow_key->ip.proto)
601                         return -EINVAL;
602
603                 ipv4_key = nla_data(ovs_key);
604                 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
605                         return -EINVAL;
606
607                 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
608                         return -EINVAL;
609
610                 break;
611
612         case OVS_KEY_ATTR_TCP:
613                 if (flow_key->ip.proto != IPPROTO_TCP)
614                         return -EINVAL;
615
616                 return validate_tp_port(flow_key);
617
618         case OVS_KEY_ATTR_UDP:
619                 if (flow_key->ip.proto != IPPROTO_UDP)
620                         return -EINVAL;
621
622                 return validate_tp_port(flow_key);
623
624         default:
625                 return -EINVAL;
626         }
627
628         return 0;
629 }
630
631 static int validate_userspace(const struct nlattr *attr)
632 {
633         static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] =   {
634                 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
635                 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 },
636         };
637         struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
638         int error;
639
640         error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
641                                  attr, userspace_policy);
642         if (error)
643                 return error;
644
645         if (!a[OVS_USERSPACE_ATTR_PID] ||
646             !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
647                 return -EINVAL;
648
649         return 0;
650 }
651
652 static int validate_actions(const struct nlattr *attr,
653                                 const struct sw_flow_key *key,  int depth)
654 {
655         const struct nlattr *a;
656         int rem, err;
657
658         if (depth >= SAMPLE_ACTION_DEPTH)
659                 return -EOVERFLOW;
660
661         nla_for_each_nested(a, attr, rem) {
662                 /* Expected argument lengths, (u32)-1 for variable length. */
663                 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
664                         [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
665                         [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
666                         [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
667                         [OVS_ACTION_ATTR_POP_VLAN] = 0,
668                         [OVS_ACTION_ATTR_SET] = (u32)-1,
669                         [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
670                 };
671                 const struct ovs_action_push_vlan *vlan;
672                 int type = nla_type(a);
673
674                 if (type > OVS_ACTION_ATTR_MAX ||
675                     (action_lens[type] != nla_len(a) &&
676                      action_lens[type] != (u32)-1))
677                         return -EINVAL;
678
679                 switch (type) {
680                 case OVS_ACTION_ATTR_UNSPEC:
681                         return -EINVAL;
682
683                 case OVS_ACTION_ATTR_USERSPACE:
684                         err = validate_userspace(a);
685                         if (err)
686                                 return err;
687                         break;
688
689                 case OVS_ACTION_ATTR_OUTPUT:
690                         if (nla_get_u32(a) >= DP_MAX_PORTS)
691                                 return -EINVAL;
692                         break;
693
694
695                 case OVS_ACTION_ATTR_POP_VLAN:
696                         break;
697
698                 case OVS_ACTION_ATTR_PUSH_VLAN:
699                         vlan = nla_data(a);
700                         if (vlan->vlan_tpid != htons(ETH_P_8021Q))
701                                 return -EINVAL;
702                         if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
703                                 return -EINVAL;
704                         break;
705
706                 case OVS_ACTION_ATTR_SET:
707                         err = validate_set(a, key);
708                         if (err)
709                                 return err;
710                         break;
711
712                 case OVS_ACTION_ATTR_SAMPLE:
713                         err = validate_sample(a, key, depth);
714                         if (err)
715                                 return err;
716                         break;
717
718                 default:
719                         return -EINVAL;
720                 }
721         }
722
723         if (rem > 0)
724                 return -EINVAL;
725
726         return 0;
727 }
728
729 static void clear_stats(struct sw_flow *flow)
730 {
731         flow->used = 0;
732         flow->tcp_flags = 0;
733         flow->packet_count = 0;
734         flow->byte_count = 0;
735 }
736
737 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
738 {
739         struct ovs_header *ovs_header = info->userhdr;
740         struct nlattr **a = info->attrs;
741         struct sw_flow_actions *acts;
742         struct sk_buff *packet;
743         struct sw_flow *flow;
744         struct datapath *dp;
745         struct ethhdr *eth;
746         int len;
747         int err;
748         int key_len;
749
750         err = -EINVAL;
751         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
752             !a[OVS_PACKET_ATTR_ACTIONS] ||
753             nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN)
754                 goto err;
755
756         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
757         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
758         err = -ENOMEM;
759         if (!packet)
760                 goto err;
761         skb_reserve(packet, NET_IP_ALIGN);
762
763         memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len);
764
765         skb_reset_mac_header(packet);
766         eth = eth_hdr(packet);
767
768         /* Normally, setting the skb 'protocol' field would be handled by a
769          * call to eth_type_trans(), but it assumes there's a sending
770          * device, which we may not have. */
771         if (ntohs(eth->h_proto) >= 1536)
772                 packet->protocol = eth->h_proto;
773         else
774                 packet->protocol = htons(ETH_P_802_2);
775
776         /* Build an sw_flow for sending this packet. */
777         flow = ovs_flow_alloc();
778         err = PTR_ERR(flow);
779         if (IS_ERR(flow))
780                 goto err_kfree_skb;
781
782         err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
783         if (err)
784                 goto err_flow_put;
785
786         err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority,
787                                              &flow->key.phy.in_port,
788                                              &flow->key.phy.tun_id,
789                                              a[OVS_PACKET_ATTR_KEY]);
790         if (err)
791                 goto err_flow_put;
792
793         err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0);
794         if (err)
795                 goto err_flow_put;
796
797         flow->hash = ovs_flow_hash(&flow->key, key_len);
798
799         acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
800         err = PTR_ERR(acts);
801         if (IS_ERR(acts))
802                 goto err_flow_put;
803         rcu_assign_pointer(flow->sf_acts, acts);
804
805         OVS_CB(packet)->flow = flow;
806         packet->priority = flow->key.phy.priority;
807
808         rcu_read_lock();
809         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
810         err = -ENODEV;
811         if (!dp)
812                 goto err_unlock;
813
814         local_bh_disable();
815         err = ovs_execute_actions(dp, packet);
816         local_bh_enable();
817         rcu_read_unlock();
818
819         ovs_flow_put(flow);
820         return err;
821
822 err_unlock:
823         rcu_read_unlock();
824 err_flow_put:
825         ovs_flow_put(flow);
826 err_kfree_skb:
827         kfree_skb(packet);
828 err:
829         return err;
830 }
831
832 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
833         [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
834         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
835         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
836 };
837
838 static struct genl_ops dp_packet_genl_ops[] = {
839         { .cmd = OVS_PACKET_CMD_EXECUTE,
840           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
841           .policy = packet_policy,
842           .doit = ovs_packet_cmd_execute
843         }
844 };
845
846 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
847 {
848         int i;
849         struct flow_table *table = genl_dereference(dp->table);
850
851         stats->n_flows = ovs_flow_tbl_count(table);
852
853         stats->n_hit = stats->n_missed = stats->n_lost = 0;
854         for_each_possible_cpu(i) {
855                 const struct dp_stats_percpu *percpu_stats;
856                 struct dp_stats_percpu local_stats;
857                 unsigned int start;
858
859                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
860
861                 do {
862                         start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
863                         local_stats = *percpu_stats;
864                 } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
865
866                 stats->n_hit += local_stats.n_hit;
867                 stats->n_missed += local_stats.n_missed;
868                 stats->n_lost += local_stats.n_lost;
869         }
870 }
871
872 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
873         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
874         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
875         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
876 };
877
878 static struct genl_family dp_flow_genl_family = {
879         .id = GENL_ID_GENERATE,
880         .hdrsize = sizeof(struct ovs_header),
881         .name = OVS_FLOW_FAMILY,
882         .version = OVS_FLOW_VERSION,
883         .maxattr = OVS_FLOW_ATTR_MAX,
884          SET_NETNSOK
885 };
886
887 static struct genl_multicast_group ovs_dp_flow_multicast_group = {
888         .name = OVS_FLOW_MCGROUP
889 };
890
891 /* Called with genl_lock. */
892 static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
893                                   struct sk_buff *skb, u32 pid,
894                                   u32 seq, u32 flags, u8 cmd)
895 {
896         const int skb_orig_len = skb->len;
897         const struct sw_flow_actions *sf_acts;
898         struct ovs_flow_stats stats;
899         struct ovs_header *ovs_header;
900         struct nlattr *nla;
901         unsigned long used;
902         u8 tcp_flags;
903         int err;
904
905         sf_acts = rcu_dereference_protected(flow->sf_acts,
906                                             lockdep_genl_is_held());
907
908         ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
909         if (!ovs_header)
910                 return -EMSGSIZE;
911
912         ovs_header->dp_ifindex = get_dpifindex(dp);
913
914         nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
915         if (!nla)
916                 goto nla_put_failure;
917         err = ovs_flow_to_nlattrs(&flow->key, skb);
918         if (err)
919                 goto error;
920         nla_nest_end(skb, nla);
921
922         spin_lock_bh(&flow->lock);
923         used = flow->used;
924         stats.n_packets = flow->packet_count;
925         stats.n_bytes = flow->byte_count;
926         tcp_flags = flow->tcp_flags;
927         spin_unlock_bh(&flow->lock);
928
929         if (used &&
930             nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
931                 goto nla_put_failure;
932
933         if (stats.n_packets &&
934             nla_put(skb, OVS_FLOW_ATTR_STATS,
935                     sizeof(struct ovs_flow_stats), &stats))
936                 goto nla_put_failure;
937
938         if (tcp_flags &&
939             nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags))
940                 goto nla_put_failure;
941
942         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
943          * this is the first flow to be dumped into 'skb'.  This is unusual for
944          * Netlink but individual action lists can be longer than
945          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
946          * The userspace caller can always fetch the actions separately if it
947          * really wants them.  (Most userspace callers in fact don't care.)
948          *
949          * This can only fail for dump operations because the skb is always
950          * properly sized for single flows.
951          */
952         err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
953                       sf_acts->actions);
954         if (err < 0 && skb_orig_len)
955                 goto error;
956
957         return genlmsg_end(skb, ovs_header);
958
959 nla_put_failure:
960         err = -EMSGSIZE;
961 error:
962         genlmsg_cancel(skb, ovs_header);
963         return err;
964 }
965
966 static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
967 {
968         const struct sw_flow_actions *sf_acts;
969         int len;
970
971         sf_acts = rcu_dereference_protected(flow->sf_acts,
972                                             lockdep_genl_is_held());
973
974         /* OVS_FLOW_ATTR_KEY */
975         len = nla_total_size(FLOW_BUFSIZE);
976         /* OVS_FLOW_ATTR_ACTIONS */
977         len += nla_total_size(sf_acts->actions_len);
978         /* OVS_FLOW_ATTR_STATS */
979         len += nla_total_size(sizeof(struct ovs_flow_stats));
980         /* OVS_FLOW_ATTR_TCP_FLAGS */
981         len += nla_total_size(1);
982         /* OVS_FLOW_ATTR_USED */
983         len += nla_total_size(8);
984
985         len += NLMSG_ALIGN(sizeof(struct ovs_header));
986
987         return genlmsg_new(len, GFP_KERNEL);
988 }
989
990 static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
991                                                struct datapath *dp,
992                                                u32 pid, u32 seq, u8 cmd)
993 {
994         struct sk_buff *skb;
995         int retval;
996
997         skb = ovs_flow_cmd_alloc_info(flow);
998         if (!skb)
999                 return ERR_PTR(-ENOMEM);
1000
1001         retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
1002         BUG_ON(retval < 0);
1003         return skb;
1004 }
1005
1006 static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
1007 {
1008         struct nlattr **a = info->attrs;
1009         struct ovs_header *ovs_header = info->userhdr;
1010         struct sw_flow_key key;
1011         struct sw_flow *flow;
1012         struct sk_buff *reply;
1013         struct datapath *dp;
1014         struct flow_table *table;
1015         int error;
1016         int key_len;
1017
1018         /* Extract key. */
1019         error = -EINVAL;
1020         if (!a[OVS_FLOW_ATTR_KEY])
1021                 goto error;
1022         error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1023         if (error)
1024                 goto error;
1025
1026         /* Validate actions. */
1027         if (a[OVS_FLOW_ATTR_ACTIONS]) {
1028                 error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS], &key,  0);
1029                 if (error)
1030                         goto error;
1031         } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
1032                 error = -EINVAL;
1033                 goto error;
1034         }
1035
1036         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1037         error = -ENODEV;
1038         if (!dp)
1039                 goto error;
1040
1041         table = genl_dereference(dp->table);
1042         flow = ovs_flow_tbl_lookup(table, &key, key_len);
1043         if (!flow) {
1044                 struct sw_flow_actions *acts;
1045
1046                 /* Bail out if we're not allowed to create a new flow. */
1047                 error = -ENOENT;
1048                 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
1049                         goto error;
1050
1051                 /* Expand table, if necessary, to make room. */
1052                 if (ovs_flow_tbl_need_to_expand(table)) {
1053                         struct flow_table *new_table;
1054
1055                         new_table = ovs_flow_tbl_expand(table);
1056                         if (!IS_ERR(new_table)) {
1057                                 rcu_assign_pointer(dp->table, new_table);
1058                                 ovs_flow_tbl_deferred_destroy(table);
1059                                 table = genl_dereference(dp->table);
1060                         }
1061                 }
1062
1063                 /* Allocate flow. */
1064                 flow = ovs_flow_alloc();
1065                 if (IS_ERR(flow)) {
1066                         error = PTR_ERR(flow);
1067                         goto error;
1068                 }
1069                 flow->key = key;
1070                 clear_stats(flow);
1071
1072                 /* Obtain actions. */
1073                 acts = ovs_flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
1074                 error = PTR_ERR(acts);
1075                 if (IS_ERR(acts))
1076                         goto error_free_flow;
1077                 rcu_assign_pointer(flow->sf_acts, acts);
1078
1079                 /* Put flow in bucket. */
1080                 flow->hash = ovs_flow_hash(&key, key_len);
1081                 ovs_flow_tbl_insert(table, flow);
1082
1083                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
1084                                                 info->snd_seq,
1085                                                 OVS_FLOW_CMD_NEW);
1086         } else {
1087                 /* We found a matching flow. */
1088                 struct sw_flow_actions *old_acts;
1089                 struct nlattr *acts_attrs;
1090
1091                 /* Bail out if we're not allowed to modify an existing flow.
1092                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1093                  * because Generic Netlink treats the latter as a dump
1094                  * request.  We also accept NLM_F_EXCL in case that bug ever
1095                  * gets fixed.
1096                  */
1097                 error = -EEXIST;
1098                 if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
1099                     info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
1100                         goto error;
1101
1102                 /* Update actions. */
1103                 old_acts = rcu_dereference_protected(flow->sf_acts,
1104                                                      lockdep_genl_is_held());
1105                 acts_attrs = a[OVS_FLOW_ATTR_ACTIONS];
1106                 if (acts_attrs &&
1107                    (old_acts->actions_len != nla_len(acts_attrs) ||
1108                    memcmp(old_acts->actions, nla_data(acts_attrs),
1109                           old_acts->actions_len))) {
1110                         struct sw_flow_actions *new_acts;
1111
1112                         new_acts = ovs_flow_actions_alloc(acts_attrs);
1113                         error = PTR_ERR(new_acts);
1114                         if (IS_ERR(new_acts))
1115                                 goto error;
1116
1117                         rcu_assign_pointer(flow->sf_acts, new_acts);
1118                         ovs_flow_deferred_free_acts(old_acts);
1119                 }
1120
1121                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
1122                                                info->snd_seq, OVS_FLOW_CMD_NEW);
1123
1124                 /* Clear stats. */
1125                 if (a[OVS_FLOW_ATTR_CLEAR]) {
1126                         spin_lock_bh(&flow->lock);
1127                         clear_stats(flow);
1128                         spin_unlock_bh(&flow->lock);
1129                 }
1130         }
1131
1132         if (!IS_ERR(reply))
1133                 genl_notify(reply, genl_info_net(info), info->snd_pid,
1134                            ovs_dp_flow_multicast_group.id, info->nlhdr,
1135                            GFP_KERNEL);
1136         else
1137                 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
1138                                 ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
1139         return 0;
1140
1141 error_free_flow:
1142         ovs_flow_put(flow);
1143 error:
1144         return error;
1145 }
1146
1147 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1148 {
1149         struct nlattr **a = info->attrs;
1150         struct ovs_header *ovs_header = info->userhdr;
1151         struct sw_flow_key key;
1152         struct sk_buff *reply;
1153         struct sw_flow *flow;
1154         struct datapath *dp;
1155         struct flow_table *table;
1156         int err;
1157         int key_len;
1158
1159         if (!a[OVS_FLOW_ATTR_KEY])
1160                 return -EINVAL;
1161         err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1162         if (err)
1163                 return err;
1164
1165         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1166         if (!dp)
1167                 return -ENODEV;
1168
1169         table = genl_dereference(dp->table);
1170         flow = ovs_flow_tbl_lookup(table, &key, key_len);
1171         if (!flow)
1172                 return -ENOENT;
1173
1174         reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
1175                                         info->snd_seq, OVS_FLOW_CMD_NEW);
1176         if (IS_ERR(reply))
1177                 return PTR_ERR(reply);
1178
1179         return genlmsg_reply(reply, info);
1180 }
1181
1182 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1183 {
1184         struct nlattr **a = info->attrs;
1185         struct ovs_header *ovs_header = info->userhdr;
1186         struct sw_flow_key key;
1187         struct sk_buff *reply;
1188         struct sw_flow *flow;
1189         struct datapath *dp;
1190         struct flow_table *table;
1191         int err;
1192         int key_len;
1193
1194         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1195         if (!dp)
1196                 return -ENODEV;
1197
1198         if (!a[OVS_FLOW_ATTR_KEY])
1199                 return flush_flows(dp);
1200
1201         err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1202         if (err)
1203                 return err;
1204
1205         table = genl_dereference(dp->table);
1206         flow = ovs_flow_tbl_lookup(table, &key, key_len);
1207         if (!flow)
1208                 return -ENOENT;
1209
1210         reply = ovs_flow_cmd_alloc_info(flow);
1211         if (!reply)
1212                 return -ENOMEM;
1213
1214         ovs_flow_tbl_remove(table, flow);
1215
1216         err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
1217                                      info->snd_seq, 0, OVS_FLOW_CMD_DEL);
1218         BUG_ON(err < 0);
1219
1220         ovs_flow_deferred_free(flow);
1221
1222         genl_notify(reply, genl_info_net(info), info->snd_pid,
1223                     ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1224         return 0;
1225 }
1226
1227 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1228 {
1229         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1230         struct datapath *dp;
1231         struct flow_table *table;
1232
1233         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1234         if (!dp)
1235                 return -ENODEV;
1236
1237         table = genl_dereference(dp->table);
1238
1239         for (;;) {
1240                 struct sw_flow *flow;
1241                 u32 bucket, obj;
1242
1243                 bucket = cb->args[0];
1244                 obj = cb->args[1];
1245                 flow = ovs_flow_tbl_next(table, &bucket, &obj);
1246                 if (!flow)
1247                         break;
1248
1249                 if (ovs_flow_cmd_fill_info(flow, dp, skb,
1250                                            NETLINK_CB(cb->skb).pid,
1251                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1252                                            OVS_FLOW_CMD_NEW) < 0)
1253                         break;
1254
1255                 cb->args[0] = bucket;
1256                 cb->args[1] = obj;
1257         }
1258         return skb->len;
1259 }
1260
1261 static struct genl_ops dp_flow_genl_ops[] = {
1262         { .cmd = OVS_FLOW_CMD_NEW,
1263           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1264           .policy = flow_policy,
1265           .doit = ovs_flow_cmd_new_or_set
1266         },
1267         { .cmd = OVS_FLOW_CMD_DEL,
1268           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1269           .policy = flow_policy,
1270           .doit = ovs_flow_cmd_del
1271         },
1272         { .cmd = OVS_FLOW_CMD_GET,
1273           .flags = 0,               /* OK for unprivileged users. */
1274           .policy = flow_policy,
1275           .doit = ovs_flow_cmd_get,
1276           .dumpit = ovs_flow_cmd_dump
1277         },
1278         { .cmd = OVS_FLOW_CMD_SET,
1279           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1280           .policy = flow_policy,
1281           .doit = ovs_flow_cmd_new_or_set,
1282         },
1283 };
1284
1285 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1286 #ifdef HAVE_NLA_NUL_STRING
1287         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1288 #endif
1289         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1290 };
1291
1292 static struct genl_family dp_datapath_genl_family = {
1293         .id = GENL_ID_GENERATE,
1294         .hdrsize = sizeof(struct ovs_header),
1295         .name = OVS_DATAPATH_FAMILY,
1296         .version = OVS_DATAPATH_VERSION,
1297         .maxattr = OVS_DP_ATTR_MAX,
1298          SET_NETNSOK
1299 };
1300
1301 static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
1302         .name = OVS_DATAPATH_MCGROUP
1303 };
1304
1305 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1306                                 u32 pid, u32 seq, u32 flags, u8 cmd)
1307 {
1308         struct ovs_header *ovs_header;
1309         struct ovs_dp_stats dp_stats;
1310         int err;
1311
1312         ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
1313                                    flags, cmd);
1314         if (!ovs_header)
1315                 goto error;
1316
1317         ovs_header->dp_ifindex = get_dpifindex(dp);
1318
1319         rcu_read_lock();
1320         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1321         rcu_read_unlock();
1322         if (err)
1323                 goto nla_put_failure;
1324
1325         get_dp_stats(dp, &dp_stats);
1326         if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats))
1327                 goto nla_put_failure;
1328
1329         return genlmsg_end(skb, ovs_header);
1330
1331 nla_put_failure:
1332         genlmsg_cancel(skb, ovs_header);
1333 error:
1334         return -EMSGSIZE;
1335 }
1336
1337 static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
1338                                              u32 seq, u8 cmd)
1339 {
1340         struct sk_buff *skb;
1341         int retval;
1342
1343         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1344         if (!skb)
1345                 return ERR_PTR(-ENOMEM);
1346
1347         retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
1348         if (retval < 0) {
1349                 kfree_skb(skb);
1350                 return ERR_PTR(retval);
1351         }
1352         return skb;
1353 }
1354
1355 static int ovs_dp_cmd_validate(struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1356 {
1357         return CHECK_NUL_STRING(a[OVS_DP_ATTR_NAME], IFNAMSIZ - 1);
1358 }
1359
1360 /* Called with genl_mutex and optionally with RTNL lock also. */
1361 static struct datapath *lookup_datapath(struct net *net,
1362                                         struct ovs_header *ovs_header,
1363                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1364 {
1365         struct datapath *dp;
1366
1367         if (!a[OVS_DP_ATTR_NAME])
1368                 dp = get_dp(net, ovs_header->dp_ifindex);
1369         else {
1370                 struct vport *vport;
1371
1372                 rcu_read_lock();
1373                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1374                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1375                 rcu_read_unlock();
1376         }
1377         return dp ? dp : ERR_PTR(-ENODEV);
1378 }
1379
1380 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1381 {
1382         struct nlattr **a = info->attrs;
1383         struct vport_parms parms;
1384         struct sk_buff *reply;
1385         struct datapath *dp;
1386         struct vport *vport;
1387         struct ovs_net *ovs_net;
1388         int err, i;
1389
1390         err = -EINVAL;
1391         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1392                 goto err;
1393
1394         err = ovs_dp_cmd_validate(a);
1395         if (err)
1396                 goto err;
1397
1398         rtnl_lock();
1399
1400         err = -ENOMEM;
1401         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1402         if (dp == NULL)
1403                 goto err_unlock_rtnl;
1404
1405         /* Initialize kobject for bridge.  This will be added as
1406          * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
1407         dp->ifobj.kset = NULL;
1408         kobject_init(&dp->ifobj, &dp_ktype);
1409
1410         /* Allocate table. */
1411         err = -ENOMEM;
1412         rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
1413         if (!dp->table)
1414                 goto err_free_dp;
1415
1416         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1417         if (!dp->stats_percpu) {
1418                 err = -ENOMEM;
1419                 goto err_destroy_table;
1420         }
1421         ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1422
1423         dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1424                             GFP_KERNEL);
1425         if (!dp->ports) {
1426                 err = -ENOMEM;
1427                 goto err_destroy_percpu;
1428         }
1429
1430         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1431                 INIT_HLIST_HEAD(&dp->ports[i]);
1432
1433         /* Set up our datapath device. */
1434         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1435         parms.type = OVS_VPORT_TYPE_INTERNAL;
1436         parms.options = NULL;
1437         parms.dp = dp;
1438         parms.port_no = OVSP_LOCAL;
1439         parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
1440
1441         vport = new_vport(&parms);
1442         if (IS_ERR(vport)) {
1443                 err = PTR_ERR(vport);
1444                 if (err == -EBUSY)
1445                         err = -EEXIST;
1446
1447                 goto err_destroy_ports_array;
1448         }
1449
1450         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1451                                       info->snd_seq, OVS_DP_CMD_NEW);
1452         err = PTR_ERR(reply);
1453         if (IS_ERR(reply))
1454                 goto err_destroy_local_port;
1455
1456         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1457         list_add_tail(&dp->list_node, &ovs_net->dps);
1458         ovs_dp_sysfs_add_dp(dp);
1459
1460         rtnl_unlock();
1461
1462         genl_notify(reply, genl_info_net(info), info->snd_pid,
1463                     ovs_dp_datapath_multicast_group.id, info->nlhdr,
1464                     GFP_KERNEL);
1465         return 0;
1466
1467 err_destroy_local_port:
1468         ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
1469 err_destroy_ports_array:
1470         kfree(dp->ports);
1471 err_destroy_percpu:
1472         free_percpu(dp->stats_percpu);
1473 err_destroy_table:
1474         ovs_flow_tbl_destroy(genl_dereference(dp->table));
1475 err_free_dp:
1476         kfree(dp);
1477 err_unlock_rtnl:
1478         rtnl_unlock();
1479 err:
1480         return err;
1481 }
1482
1483 /* Called with genl_mutex. */
1484 static void __dp_destroy(struct datapath *dp)
1485 {
1486         int i;
1487
1488         rtnl_lock();
1489
1490         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1491                 struct vport *vport;
1492                 struct hlist_node *node, *n;
1493
1494                 hlist_for_each_entry_safe(vport, node, n, &dp->ports[i], dp_hash_node)
1495                         if (vport->port_no != OVSP_LOCAL)
1496                                 ovs_dp_detach_port(vport);
1497         }
1498
1499         ovs_dp_sysfs_del_dp(dp);
1500         list_del(&dp->list_node);
1501         ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
1502
1503         /* rtnl_unlock() will wait until all the references to devices that
1504          * are pending unregistration have been dropped.  We do it here to
1505          * ensure that any internal devices (which contain DP pointers) are
1506          * fully destroyed before freeing the datapath.
1507          */
1508         rtnl_unlock();
1509
1510         call_rcu(&dp->rcu, destroy_dp_rcu);
1511 }
1512
1513 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1514 {
1515         struct sk_buff *reply;
1516         struct datapath *dp;
1517         int err;
1518
1519         err = ovs_dp_cmd_validate(info->attrs);
1520         if (err)
1521                 return err;
1522
1523         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1524         err = PTR_ERR(dp);
1525         if (IS_ERR(dp))
1526                 return err;
1527
1528         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1529                                       info->snd_seq, OVS_DP_CMD_DEL);
1530         err = PTR_ERR(reply);
1531         if (IS_ERR(reply))
1532                 return err;
1533
1534         __dp_destroy(dp);
1535
1536         genl_notify(reply, genl_info_net(info), info->snd_pid,
1537                     ovs_dp_datapath_multicast_group.id, info->nlhdr,
1538                     GFP_KERNEL);
1539
1540         return 0;
1541 }
1542
1543 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1544 {
1545         struct sk_buff *reply;
1546         struct datapath *dp;
1547         int err;
1548
1549         err = ovs_dp_cmd_validate(info->attrs);
1550         if (err)
1551                 return err;
1552
1553         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1554         if (IS_ERR(dp))
1555                 return PTR_ERR(dp);
1556
1557         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1558                                       info->snd_seq, OVS_DP_CMD_NEW);
1559         if (IS_ERR(reply)) {
1560                 err = PTR_ERR(reply);
1561                 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
1562                                 ovs_dp_datapath_multicast_group.id, err);
1563                 return 0;
1564         }
1565
1566         genl_notify(reply, genl_info_net(info), info->snd_pid,
1567                     ovs_dp_datapath_multicast_group.id, info->nlhdr,
1568                     GFP_KERNEL);
1569
1570         return 0;
1571 }
1572
1573 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1574 {
1575         struct sk_buff *reply;
1576         struct datapath *dp;
1577         int err;
1578
1579         err = ovs_dp_cmd_validate(info->attrs);
1580         if (err)
1581                 return err;
1582
1583         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1584         if (IS_ERR(dp))
1585                 return PTR_ERR(dp);
1586
1587         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1588                                       info->snd_seq, OVS_DP_CMD_NEW);
1589         if (IS_ERR(reply))
1590                 return PTR_ERR(reply);
1591
1592         return genlmsg_reply(reply, info);
1593 }
1594
1595 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1596 {
1597         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1598         struct datapath *dp;
1599         int skip = cb->args[0];
1600         int i = 0;
1601
1602         list_for_each_entry(dp, &ovs_net->dps, list_node) {
1603                 if (i >= skip &&
1604                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
1605                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1606                                          OVS_DP_CMD_NEW) < 0)
1607                         break;
1608                 i++;
1609         }
1610
1611         cb->args[0] = i;
1612
1613         return skb->len;
1614 }
1615
1616 static struct genl_ops dp_datapath_genl_ops[] = {
1617         { .cmd = OVS_DP_CMD_NEW,
1618           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1619           .policy = datapath_policy,
1620           .doit = ovs_dp_cmd_new
1621         },
1622         { .cmd = OVS_DP_CMD_DEL,
1623           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1624           .policy = datapath_policy,
1625           .doit = ovs_dp_cmd_del
1626         },
1627         { .cmd = OVS_DP_CMD_GET,
1628           .flags = 0,               /* OK for unprivileged users. */
1629           .policy = datapath_policy,
1630           .doit = ovs_dp_cmd_get,
1631           .dumpit = ovs_dp_cmd_dump
1632         },
1633         { .cmd = OVS_DP_CMD_SET,
1634           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1635           .policy = datapath_policy,
1636           .doit = ovs_dp_cmd_set,
1637         },
1638 };
1639
1640 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1641 #ifdef HAVE_NLA_NUL_STRING
1642         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1643         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1644         [OVS_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
1645 #else
1646         [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct ovs_vport_stats) },
1647         [OVS_VPORT_ATTR_ADDRESS] = { .minlen = ETH_ALEN },
1648 #endif
1649         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1650         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1651         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1652         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1653 };
1654
1655 static struct genl_family dp_vport_genl_family = {
1656         .id = GENL_ID_GENERATE,
1657         .hdrsize = sizeof(struct ovs_header),
1658         .name = OVS_VPORT_FAMILY,
1659         .version = OVS_VPORT_VERSION,
1660         .maxattr = OVS_VPORT_ATTR_MAX,
1661          SET_NETNSOK
1662 };
1663
1664 struct genl_multicast_group ovs_dp_vport_multicast_group = {
1665         .name = OVS_VPORT_MCGROUP
1666 };
1667
1668 /* Called with RTNL lock or RCU read lock. */
1669 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1670                                    u32 pid, u32 seq, u32 flags, u8 cmd)
1671 {
1672         struct ovs_header *ovs_header;
1673         struct ovs_vport_stats vport_stats;
1674         int err;
1675
1676         ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
1677                                  flags, cmd);
1678         if (!ovs_header)
1679                 return -EMSGSIZE;
1680
1681         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1682
1683         if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1684             nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1685             nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
1686             nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid))
1687                 goto nla_put_failure;
1688
1689         ovs_vport_get_stats(vport, &vport_stats);
1690         if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1691                     &vport_stats))
1692                 goto nla_put_failure;
1693
1694         if (nla_put(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN,
1695                     vport->ops->get_addr(vport)))
1696                 goto nla_put_failure;
1697
1698         err = ovs_vport_get_options(vport, skb);
1699         if (err == -EMSGSIZE)
1700                 goto error;
1701
1702         return genlmsg_end(skb, ovs_header);
1703
1704 nla_put_failure:
1705         err = -EMSGSIZE;
1706 error:
1707         genlmsg_cancel(skb, ovs_header);
1708         return err;
1709 }
1710
1711 /* Called with RTNL lock or RCU read lock. */
1712 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
1713                                          u32 seq, u8 cmd)
1714 {
1715         struct sk_buff *skb;
1716         int retval;
1717
1718         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1719         if (!skb)
1720                 return ERR_PTR(-ENOMEM);
1721
1722         retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
1723         if (retval < 0) {
1724                 kfree_skb(skb);
1725                 return ERR_PTR(retval);
1726         }
1727         return skb;
1728 }
1729
1730 static int ovs_vport_cmd_validate(struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1731 {
1732         return CHECK_NUL_STRING(a[OVS_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1733 }
1734
1735 /* Called with RTNL lock or RCU read lock. */
1736 static struct vport *lookup_vport(struct net *net,
1737                                   struct ovs_header *ovs_header,
1738                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1739 {
1740         struct datapath *dp;
1741         struct vport *vport;
1742
1743         if (a[OVS_VPORT_ATTR_NAME]) {
1744                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1745                 if (!vport)
1746                         return ERR_PTR(-ENODEV);
1747                 if (ovs_header->dp_ifindex &&
1748                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1749                         return ERR_PTR(-ENODEV);
1750                 return vport;
1751         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1752                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1753
1754                 if (port_no >= DP_MAX_PORTS)
1755                         return ERR_PTR(-EFBIG);
1756
1757                 dp = get_dp(net, ovs_header->dp_ifindex);
1758                 if (!dp)
1759                         return ERR_PTR(-ENODEV);
1760
1761                 vport = ovs_vport_rtnl_rcu(dp, port_no);
1762                 if (!vport)
1763                         return ERR_PTR(-ENOENT);
1764                 return vport;
1765         } else
1766                 return ERR_PTR(-EINVAL);
1767 }
1768
1769 /* Called with RTNL lock. */
1770 static int change_vport(struct vport *vport,
1771                         struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1772 {
1773         int err = 0;
1774
1775         if (a[OVS_VPORT_ATTR_STATS])
1776                 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1777
1778         if (a[OVS_VPORT_ATTR_ADDRESS])
1779                 err = ovs_vport_set_addr(vport, nla_data(a[OVS_VPORT_ATTR_ADDRESS]));
1780
1781         return err;
1782 }
1783
1784 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1785 {
1786         struct nlattr **a = info->attrs;
1787         struct ovs_header *ovs_header = info->userhdr;
1788         struct vport_parms parms;
1789         struct sk_buff *reply;
1790         struct vport *vport;
1791         struct datapath *dp;
1792         u32 port_no;
1793         int err;
1794
1795         err = -EINVAL;
1796         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1797             !a[OVS_VPORT_ATTR_UPCALL_PID])
1798                 goto exit;
1799
1800         err = ovs_vport_cmd_validate(a);
1801         if (err)
1802                 goto exit;
1803
1804         rtnl_lock();
1805         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1806         err = -ENODEV;
1807         if (!dp)
1808                 goto exit_unlock;
1809
1810         if (a[OVS_VPORT_ATTR_PORT_NO]) {
1811                 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1812
1813                 err = -EFBIG;
1814                 if (port_no >= DP_MAX_PORTS)
1815                         goto exit_unlock;
1816
1817                 vport = ovs_vport_rtnl(dp, port_no);
1818                 err = -EBUSY;
1819                 if (vport)
1820                         goto exit_unlock;
1821         } else {
1822                 for (port_no = 1; ; port_no++) {
1823                         if (port_no >= DP_MAX_PORTS) {
1824                                 err = -EFBIG;
1825                                 goto exit_unlock;
1826                         }
1827                         vport = ovs_vport_rtnl(dp, port_no);
1828                         if (!vport)
1829                                 break;
1830                 }
1831         }
1832
1833         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1834         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1835         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1836         parms.dp = dp;
1837         parms.port_no = port_no;
1838         parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1839
1840         vport = new_vport(&parms);
1841         err = PTR_ERR(vport);
1842         if (IS_ERR(vport))
1843                 goto exit_unlock;
1844
1845         ovs_dp_sysfs_add_if(vport);
1846
1847         err = change_vport(vport, a);
1848         if (!err) {
1849                 reply = ovs_vport_cmd_build_info(vport, info->snd_pid,
1850                                                  info->snd_seq,
1851                                                  OVS_VPORT_CMD_NEW);
1852                 if (IS_ERR(reply))
1853                         err = PTR_ERR(reply);
1854         }
1855         if (err) {
1856                 ovs_dp_detach_port(vport);
1857                 goto exit_unlock;
1858         }
1859         genl_notify(reply, genl_info_net(info), info->snd_pid,
1860                     ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1861
1862 exit_unlock:
1863         rtnl_unlock();
1864 exit:
1865         return err;
1866 }
1867
1868 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1869 {
1870         struct nlattr **a = info->attrs;
1871         struct sk_buff *reply;
1872         struct vport *vport;
1873         int err;
1874
1875         err = ovs_vport_cmd_validate(a);
1876         if (err)
1877                 goto exit;
1878
1879         rtnl_lock();
1880         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1881         err = PTR_ERR(vport);
1882         if (IS_ERR(vport))
1883                 goto exit_unlock;
1884
1885         err = 0;
1886         if (a[OVS_VPORT_ATTR_TYPE] &&
1887             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
1888                 err = -EINVAL;
1889
1890         if (!err && a[OVS_VPORT_ATTR_OPTIONS])
1891                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1892         if (!err)
1893                 err = change_vport(vport, a);
1894         else
1895                 goto exit_unlock;
1896         if (!err && a[OVS_VPORT_ATTR_UPCALL_PID])
1897                 vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1898
1899         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1900                                          OVS_VPORT_CMD_NEW);
1901         if (IS_ERR(reply)) {
1902                 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
1903                                 ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
1904                 goto exit_unlock;
1905         }
1906
1907         genl_notify(reply, genl_info_net(info), info->snd_pid,
1908                     ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1909
1910 exit_unlock:
1911         rtnl_unlock();
1912 exit:
1913         return err;
1914 }
1915
1916 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1917 {
1918         struct nlattr **a = info->attrs;
1919         struct sk_buff *reply;
1920         struct vport *vport;
1921         int err;
1922
1923         err = ovs_vport_cmd_validate(a);
1924         if (err)
1925                 goto exit;
1926
1927         rtnl_lock();
1928         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1929         err = PTR_ERR(vport);
1930         if (IS_ERR(vport))
1931                 goto exit_unlock;
1932
1933         if (vport->port_no == OVSP_LOCAL) {
1934                 err = -EINVAL;
1935                 goto exit_unlock;
1936         }
1937
1938         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1939                                          OVS_VPORT_CMD_DEL);
1940         err = PTR_ERR(reply);
1941         if (IS_ERR(reply))
1942                 goto exit_unlock;
1943
1944         ovs_dp_detach_port(vport);
1945
1946         genl_notify(reply, genl_info_net(info), info->snd_pid,
1947                     ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1948
1949 exit_unlock:
1950         rtnl_unlock();
1951 exit:
1952         return err;
1953 }
1954
1955 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1956 {
1957         struct nlattr **a = info->attrs;
1958         struct ovs_header *ovs_header = info->userhdr;
1959         struct sk_buff *reply;
1960         struct vport *vport;
1961         int err;
1962
1963         err = ovs_vport_cmd_validate(a);
1964         if (err)
1965                 goto exit;
1966
1967         rcu_read_lock();
1968         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
1969         err = PTR_ERR(vport);
1970         if (IS_ERR(vport))
1971                 goto exit_unlock;
1972
1973         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1974                                          OVS_VPORT_CMD_NEW);
1975         err = PTR_ERR(reply);
1976         if (IS_ERR(reply))
1977                 goto exit_unlock;
1978
1979         rcu_read_unlock();
1980
1981         return genlmsg_reply(reply, info);
1982
1983 exit_unlock:
1984         rcu_read_unlock();
1985 exit:
1986         return err;
1987 }
1988
1989 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1990 {
1991         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1992         struct datapath *dp;
1993         int bucket = cb->args[0], skip = cb->args[1];
1994         int i, j = 0;
1995
1996         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1997         if (!dp)
1998                 return -ENODEV;
1999
2000         rcu_read_lock();
2001         for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2002                 struct vport *vport;
2003                 struct hlist_node *n;
2004
2005                 j = 0;
2006                 hlist_for_each_entry_rcu(vport, n, &dp->ports[i], dp_hash_node) {
2007                         if (j >= skip &&
2008                             ovs_vport_cmd_fill_info(vport, skb,
2009                                                     NETLINK_CB(cb->skb).pid,
2010                                                     cb->nlh->nlmsg_seq,
2011                                                     NLM_F_MULTI,
2012                                                     OVS_VPORT_CMD_NEW) < 0)
2013                                 goto out;
2014
2015                         j++;
2016                 }
2017                 skip = 0;
2018         }
2019 out:
2020         rcu_read_unlock();
2021
2022         cb->args[0] = i;
2023         cb->args[1] = j;
2024
2025         return skb->len;
2026 }
2027
2028 static struct genl_ops dp_vport_genl_ops[] = {
2029         { .cmd = OVS_VPORT_CMD_NEW,
2030           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2031           .policy = vport_policy,
2032           .doit = ovs_vport_cmd_new
2033         },
2034         { .cmd = OVS_VPORT_CMD_DEL,
2035           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2036           .policy = vport_policy,
2037           .doit = ovs_vport_cmd_del
2038         },
2039         { .cmd = OVS_VPORT_CMD_GET,
2040           .flags = 0,               /* OK for unprivileged users. */
2041           .policy = vport_policy,
2042           .doit = ovs_vport_cmd_get,
2043           .dumpit = ovs_vport_cmd_dump
2044         },
2045         { .cmd = OVS_VPORT_CMD_SET,
2046           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2047           .policy = vport_policy,
2048           .doit = ovs_vport_cmd_set,
2049         },
2050 };
2051
2052 struct genl_family_and_ops {
2053         struct genl_family *family;
2054         struct genl_ops *ops;
2055         int n_ops;
2056         struct genl_multicast_group *group;
2057 };
2058
2059 static const struct genl_family_and_ops dp_genl_families[] = {
2060         { &dp_datapath_genl_family,
2061           dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
2062           &ovs_dp_datapath_multicast_group },
2063         { &dp_vport_genl_family,
2064           dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
2065           &ovs_dp_vport_multicast_group },
2066         { &dp_flow_genl_family,
2067           dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
2068           &ovs_dp_flow_multicast_group },
2069         { &dp_packet_genl_family,
2070           dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
2071           NULL },
2072 };
2073
2074 static void dp_unregister_genl(int n_families)
2075 {
2076         int i;
2077
2078         for (i = 0; i < n_families; i++)
2079                 genl_unregister_family(dp_genl_families[i].family);
2080 }
2081
2082 static int dp_register_genl(void)
2083 {
2084         int n_registered;
2085         int err;
2086         int i;
2087
2088         n_registered = 0;
2089         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2090                 const struct genl_family_and_ops *f = &dp_genl_families[i];
2091
2092                 err = genl_register_family_with_ops(f->family, f->ops,
2093                                                     f->n_ops);
2094                 if (err)
2095                         goto error;
2096                 n_registered++;
2097
2098                 if (f->group) {
2099                         err = genl_register_mc_group(f->family, f->group);
2100                         if (err)
2101                                 goto error;
2102                 }
2103         }
2104
2105         return 0;
2106
2107 error:
2108         dp_unregister_genl(n_registered);
2109         return err;
2110 }
2111
2112 static int __rehash_flow_table(void *dummy)
2113 {
2114         struct datapath *dp;
2115         struct net *net;
2116
2117         rtnl_lock();
2118         for_each_net(net) {
2119                 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2120
2121                 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2122                         struct flow_table *old_table = genl_dereference(dp->table);
2123                         struct flow_table *new_table;
2124
2125                         new_table = ovs_flow_tbl_rehash(old_table);
2126                         if (!IS_ERR(new_table)) {
2127                                 rcu_assign_pointer(dp->table, new_table);
2128                                 ovs_flow_tbl_deferred_destroy(old_table);
2129                         }
2130                 }
2131         }
2132         rtnl_unlock();
2133         return 0;
2134 }
2135
2136 static void rehash_flow_table(struct work_struct *work)
2137 {
2138         genl_exec(__rehash_flow_table, NULL);
2139         schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2140 }
2141
2142 static int dp_destroy_all(void *data)
2143 {
2144         struct datapath *dp, *dp_next;
2145         struct ovs_net *ovs_net = data;
2146
2147         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2148                 __dp_destroy(dp);
2149
2150         return 0;
2151 }
2152
2153 static int __net_init ovs_init_net(struct net *net)
2154 {
2155         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2156
2157         INIT_LIST_HEAD(&ovs_net->dps);
2158         return 0;
2159 }
2160
2161 static void __net_exit ovs_exit_net(struct net *net)
2162 {
2163         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2164
2165         genl_exec(dp_destroy_all, ovs_net);
2166 }
2167
2168 static struct pernet_operations ovs_net_ops = {
2169         .init = ovs_init_net,
2170         .exit = ovs_exit_net,
2171         .id   = &ovs_net_id,
2172         .size = sizeof(struct ovs_net),
2173 };
2174
2175 static int __init dp_init(void)
2176 {
2177         struct sk_buff *dummy_skb;
2178         int err;
2179
2180         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2181
2182         pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
2183                 VERSION);
2184
2185         err = genl_exec_init();
2186         if (err)
2187                 goto error;
2188
2189         err = ovs_workqueues_init();
2190         if (err)
2191                 goto error_genl_exec;
2192
2193         err = ovs_tnl_init();
2194         if (err)
2195                 goto error_wq;
2196
2197         err = ovs_flow_init();
2198         if (err)
2199                 goto error_tnl_exit;
2200
2201         err = ovs_vport_init();
2202         if (err)
2203                 goto error_flow_exit;
2204
2205         err = register_pernet_device(&ovs_net_ops);
2206         if (err)
2207                 goto error_vport_exit;
2208
2209         err = register_netdevice_notifier(&ovs_dp_device_notifier);
2210         if (err)
2211                 goto error_netns_exit;
2212
2213         err = dp_register_genl();
2214         if (err < 0)
2215                 goto error_unreg_notifier;
2216
2217         schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2218
2219         return 0;
2220
2221 error_unreg_notifier:
2222         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2223 error_netns_exit:
2224         unregister_pernet_device(&ovs_net_ops);
2225 error_vport_exit:
2226         ovs_vport_exit();
2227 error_flow_exit:
2228         ovs_flow_exit();
2229 error_tnl_exit:
2230         ovs_tnl_exit();
2231 error_wq:
2232         ovs_workqueues_exit();
2233 error_genl_exec:
2234         genl_exec_exit();
2235 error:
2236         return err;
2237 }
2238
2239 static void dp_cleanup(void)
2240 {
2241         cancel_delayed_work_sync(&rehash_flow_wq);
2242         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2243         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2244         unregister_pernet_device(&ovs_net_ops);
2245         rcu_barrier();
2246         ovs_vport_exit();
2247         ovs_flow_exit();
2248         ovs_tnl_exit();
2249         ovs_workqueues_exit();
2250         genl_exec_exit();
2251 }
2252
2253 module_init(dp_init);
2254 module_exit(dp_cleanup);
2255
2256 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2257 MODULE_LICENSE("GPL");