Global replace of Nicira Networks.
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007-2012 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/version.h>
40 #include <linux/ethtool.h>
41 #include <linux/wait.h>
42 #include <asm/system.h>
43 #include <asm/div64.h>
44 #include <linux/highmem.h>
45 #include <linux/netfilter_bridge.h>
46 #include <linux/netfilter_ipv4.h>
47 #include <linux/inetdevice.h>
48 #include <linux/list.h>
49 #include <linux/openvswitch.h>
50 #include <linux/rculist.h>
51 #include <linux/dmi.h>
52 #include <net/genetlink.h>
53 #include <net/net_namespace.h>
54 #include <net/netns/generic.h>
55
56 #include "checksum.h"
57 #include "datapath.h"
58 #include "flow.h"
59 #include "genl_exec.h"
60 #include "vlan.h"
61 #include "tunnel.h"
62 #include "vport-internal_dev.h"
63
64 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
65     LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
66 #error Kernels before 2.6.18 or after 3.3 are not supported by this version of Open vSwitch.
67 #endif
68
69 #define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
70 static void rehash_flow_table(struct work_struct *work);
71 static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
72
73 int ovs_net_id __read_mostly;
74
75 int (*ovs_dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
76 EXPORT_SYMBOL(ovs_dp_ioctl_hook);
77
78 /**
79  * DOC: Locking:
80  *
81  * Writes to device state (add/remove datapath, port, set operations on vports,
82  * etc.) are protected by RTNL.
83  *
84  * Writes to other state (flow table modifications, set miscellaneous datapath
85  * parameters, etc.) are protected by genl_mutex.  The RTNL lock nests inside
86  * genl_mutex.
87  *
88  * Reads are protected by RCU.
89  *
90  * There are a few special cases (mostly stats) that have their own
91  * synchronization but they nest under all of above and don't interact with
92  * each other.
93  */
94
95 static struct vport *new_vport(const struct vport_parms *);
96 static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *,
97                              const struct dp_upcall_info *);
98 static int queue_userspace_packet(struct net *, int dp_ifindex,
99                                   struct sk_buff *,
100                                   const struct dp_upcall_info *);
101
102 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
103 static struct datapath *get_dp(struct net *net, int dp_ifindex)
104 {
105         struct datapath *dp = NULL;
106         struct net_device *dev;
107
108         rcu_read_lock();
109         dev = dev_get_by_index_rcu(net, dp_ifindex);
110         if (dev) {
111                 struct vport *vport = ovs_internal_dev_get_vport(dev);
112                 if (vport)
113                         dp = vport->dp;
114         }
115         rcu_read_unlock();
116
117         return dp;
118 }
119
120 /* Must be called with rcu_read_lock or RTNL lock. */
121 const char *ovs_dp_name(const struct datapath *dp)
122 {
123         struct vport *vport = ovs_vport_rtnl_rcu(dp, OVSP_LOCAL);
124         return vport->ops->get_name(vport);
125 }
126
127 static int get_dpifindex(struct datapath *dp)
128 {
129         struct vport *local;
130         int ifindex;
131
132         rcu_read_lock();
133
134         local = ovs_vport_rcu(dp, OVSP_LOCAL);
135         if (local)
136                 ifindex = local->ops->get_ifindex(local);
137         else
138                 ifindex = 0;
139
140         rcu_read_unlock();
141
142         return ifindex;
143 }
144
145 static size_t br_nlmsg_size(void)
146 {
147         return NLMSG_ALIGN(sizeof(struct ifinfomsg))
148                + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
149                + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
150                + nla_total_size(4) /* IFLA_MASTER */
151                + nla_total_size(4) /* IFLA_MTU */
152                + nla_total_size(1); /* IFLA_OPERSTATE */
153 }
154
155 /* Caller must hold RTNL lock. */
156 static int dp_fill_ifinfo(struct sk_buff *skb,
157                           const struct vport *port,
158                           int event, unsigned int flags)
159 {
160         struct datapath *dp = port->dp;
161         struct ifinfomsg *hdr;
162         struct nlmsghdr *nlh;
163
164         if (!port->ops->get_ifindex)
165                 return -ENODEV;
166
167         nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
168         if (nlh == NULL)
169                 return -EMSGSIZE;
170
171         hdr = nlmsg_data(nlh);
172         hdr->ifi_family = AF_BRIDGE;
173         hdr->__ifi_pad = 0;
174         hdr->ifi_type = ARPHRD_ETHER;
175         hdr->ifi_index = port->ops->get_ifindex(port);
176         hdr->ifi_flags = port->ops->get_dev_flags(port);
177         hdr->ifi_change = 0;
178
179         if (nla_put_string(skb, IFLA_IFNAME, port->ops->get_name(port)) ||
180             nla_put_u32(skb, IFLA_MASTER, get_dpifindex(dp)) ||
181             nla_put_u32(skb, IFLA_MTU, port->ops->get_mtu(port)) ||
182 #ifdef IFLA_OPERSTATE
183             nla_put_u8(skb, IFLA_OPERSTATE,
184                        port->ops->is_running(port) ?
185                                 port->ops->get_operstate(port) :
186                                 IF_OPER_DOWN) ||
187 #endif
188             nla_put(skb, IFLA_ADDRESS, ETH_ALEN, port->ops->get_addr(port)))
189                 goto nla_put_failure;
190
191         return nlmsg_end(skb, nlh);
192
193 nla_put_failure:
194         nlmsg_cancel(skb, nlh);
195         return -EMSGSIZE;
196 }
197
198 /* Caller must hold RTNL lock. */
199 static void dp_ifinfo_notify(int event, struct vport *port)
200 {
201         struct sk_buff *skb;
202         int err;
203
204         skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
205         if (!skb) {
206                 err = -ENOBUFS;
207                 goto err;
208         }
209
210         err = dp_fill_ifinfo(skb, port, event, 0);
211         if (err < 0) {
212                 if (err == -ENODEV) {
213                         goto out;
214                 } else {
215                         /* -EMSGSIZE implies BUG in br_nlmsg_size() */
216                         WARN_ON(err == -EMSGSIZE);
217                         goto err;
218                 }
219         }
220
221         rtnl_notify(skb, ovs_dp_get_net(port->dp), 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
222
223         return;
224 err:
225         rtnl_set_sk_err(ovs_dp_get_net(port->dp), RTNLGRP_LINK, err);
226 out:
227         kfree_skb(skb);
228 }
229
230 static void release_dp(struct kobject *kobj)
231 {
232         struct datapath *dp = container_of(kobj, struct datapath, ifobj);
233         kfree(dp);
234 }
235
236 static struct kobj_type dp_ktype = {
237         .release = release_dp
238 };
239
240 static void destroy_dp_rcu(struct rcu_head *rcu)
241 {
242         struct datapath *dp = container_of(rcu, struct datapath, rcu);
243
244         ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
245         free_percpu(dp->stats_percpu);
246         release_net(ovs_dp_get_net(dp));
247         kfree(dp->ports);
248         kobject_put(&dp->ifobj);
249 }
250
251 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
252                                             u16 port_no)
253 {
254         return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
255 }
256
257 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
258 {
259         struct vport *vport;
260         struct hlist_node *n;
261         struct hlist_head *head;
262
263         head = vport_hash_bucket(dp, port_no);
264         hlist_for_each_entry_rcu(vport, n, head, dp_hash_node) {
265                 if (vport->port_no == port_no)
266                         return vport;
267         }
268         return NULL;
269 }
270
271 /* Called with RTNL lock and genl_lock. */
272 static struct vport *new_vport(const struct vport_parms *parms)
273 {
274         struct vport *vport;
275
276         vport = ovs_vport_add(parms);
277         if (!IS_ERR(vport)) {
278                 struct datapath *dp = parms->dp;
279                 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
280
281                 hlist_add_head_rcu(&vport->dp_hash_node, head);
282                 dp_ifinfo_notify(RTM_NEWLINK, vport);
283         }
284         return vport;
285 }
286
287 /* Called with RTNL lock. */
288 void ovs_dp_detach_port(struct vport *p)
289 {
290         ASSERT_RTNL();
291
292         if (p->port_no != OVSP_LOCAL)
293                 ovs_dp_sysfs_del_if(p);
294
295         dp_ifinfo_notify(RTM_DELLINK, p);
296
297         /* First drop references to device. */
298         hlist_del_rcu(&p->dp_hash_node);
299
300         /* Then destroy it. */
301         ovs_vport_del(p);
302 }
303
304 /* Must be called with rcu_read_lock. */
305 void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
306 {
307         struct datapath *dp = p->dp;
308         struct sw_flow *flow;
309         struct dp_stats_percpu *stats;
310         u64 *stats_counter;
311         int error;
312
313         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
314
315         if (!OVS_CB(skb)->flow) {
316                 struct sw_flow_key key;
317                 int key_len;
318
319                 /* Extract flow from 'skb' into 'key'. */
320                 error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
321                 if (unlikely(error)) {
322                         kfree_skb(skb);
323                         return;
324                 }
325
326                 /* Look up flow. */
327                 flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table),
328                                            &key, key_len);
329                 if (unlikely(!flow)) {
330                         struct dp_upcall_info upcall;
331
332                         upcall.cmd = OVS_PACKET_CMD_MISS;
333                         upcall.key = &key;
334                         upcall.userdata = NULL;
335                         upcall.pid = p->upcall_pid;
336                         ovs_dp_upcall(dp, skb, &upcall);
337                         consume_skb(skb);
338                         stats_counter = &stats->n_missed;
339                         goto out;
340                 }
341
342                 OVS_CB(skb)->flow = flow;
343         }
344
345         stats_counter = &stats->n_hit;
346         ovs_flow_used(OVS_CB(skb)->flow, skb);
347         ovs_execute_actions(dp, skb);
348
349 out:
350         /* Update datapath statistics. */
351         u64_stats_update_begin(&stats->sync);
352         (*stats_counter)++;
353         u64_stats_update_end(&stats->sync);
354 }
355
356 static struct genl_family dp_packet_genl_family = {
357         .id = GENL_ID_GENERATE,
358         .hdrsize = sizeof(struct ovs_header),
359         .name = OVS_PACKET_FAMILY,
360         .version = OVS_PACKET_VERSION,
361         .maxattr = OVS_PACKET_ATTR_MAX,
362          SET_NETNSOK
363 };
364
365 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
366                   const struct dp_upcall_info *upcall_info)
367 {
368         struct dp_stats_percpu *stats;
369         int dp_ifindex;
370         int err;
371
372         if (upcall_info->pid == 0) {
373                 err = -ENOTCONN;
374                 goto err;
375         }
376
377         dp_ifindex = get_dpifindex(dp);
378         if (!dp_ifindex) {
379                 err = -ENODEV;
380                 goto err;
381         }
382
383         forward_ip_summed(skb, true);
384
385         if (!skb_is_gso(skb))
386                 err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
387         else
388                 err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
389         if (err)
390                 goto err;
391
392         return 0;
393
394 err:
395         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
396
397         u64_stats_update_begin(&stats->sync);
398         stats->n_lost++;
399         u64_stats_update_end(&stats->sync);
400
401         return err;
402 }
403
404 static int queue_gso_packets(struct net *net, int dp_ifindex,
405                              struct sk_buff *skb,
406                              const struct dp_upcall_info *upcall_info)
407 {
408         struct dp_upcall_info later_info;
409         struct sw_flow_key later_key;
410         struct sk_buff *segs, *nskb;
411         int err;
412
413         segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
414         if (IS_ERR(skb))
415                 return PTR_ERR(skb);
416
417         /* Queue all of the segments. */
418         skb = segs;
419         do {
420                 err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info);
421                 if (err)
422                         break;
423
424                 if (skb == segs && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
425                         /* The initial flow key extracted by ovs_flow_extract()
426                          * in this case is for a first fragment, so we need to
427                          * properly mark later fragments.
428                          */
429                         later_key = *upcall_info->key;
430                         later_key.ip.frag = OVS_FRAG_TYPE_LATER;
431
432                         later_info = *upcall_info;
433                         later_info.key = &later_key;
434                         upcall_info = &later_info;
435                 }
436         } while ((skb = skb->next));
437
438         /* Free all of the segments. */
439         skb = segs;
440         do {
441                 nskb = skb->next;
442                 if (err)
443                         kfree_skb(skb);
444                 else
445                         consume_skb(skb);
446         } while ((skb = nskb));
447         return err;
448 }
449
450 static int queue_userspace_packet(struct net *net, int dp_ifindex,
451                                   struct sk_buff *skb,
452                                   const struct dp_upcall_info *upcall_info)
453 {
454         struct ovs_header *upcall;
455         struct sk_buff *nskb = NULL;
456         struct sk_buff *user_skb; /* to be queued to userspace */
457         struct nlattr *nla;
458         unsigned int len;
459         int err;
460
461         if (vlan_tx_tag_present(skb)) {
462                 nskb = skb_clone(skb, GFP_ATOMIC);
463                 if (!nskb)
464                         return -ENOMEM;
465                 
466                 err = vlan_deaccel_tag(nskb);
467                 if (err)
468                         return err;
469
470                 skb = nskb;
471         }
472
473         if (nla_attr_size(skb->len) > USHRT_MAX) {
474                 err = -EFBIG;
475                 goto out;
476         }
477
478         len = sizeof(struct ovs_header);
479         len += nla_total_size(skb->len);
480         len += nla_total_size(FLOW_BUFSIZE);
481         if (upcall_info->cmd == OVS_PACKET_CMD_ACTION)
482                 len += nla_total_size(8);
483
484         user_skb = genlmsg_new(len, GFP_ATOMIC);
485         if (!user_skb) {
486                 err = -ENOMEM;
487                 goto out;
488         }
489
490         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
491                              0, upcall_info->cmd);
492         upcall->dp_ifindex = dp_ifindex;
493
494         nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
495         ovs_flow_to_nlattrs(upcall_info->key, user_skb);
496         nla_nest_end(user_skb, nla);
497
498         if (upcall_info->userdata)
499                 nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA,
500                             nla_get_u64(upcall_info->userdata));
501
502         nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
503
504         skb_copy_and_csum_dev(skb, nla_data(nla));
505
506         err = genlmsg_unicast(net, user_skb, upcall_info->pid);
507
508 out:
509         kfree_skb(nskb);
510         return err;
511 }
512
513 /* Called with genl_mutex. */
514 static int flush_flows(struct datapath *dp)
515 {
516         struct flow_table *old_table;
517         struct flow_table *new_table;
518
519         old_table = genl_dereference(dp->table);
520         new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
521         if (!new_table)
522                 return -ENOMEM;
523
524         rcu_assign_pointer(dp->table, new_table);
525
526         ovs_flow_tbl_deferred_destroy(old_table);
527         return 0;
528 }
529
530 static int validate_actions(const struct nlattr *attr,
531                                 const struct sw_flow_key *key, int depth);
532
533 static int validate_sample(const struct nlattr *attr,
534                                 const struct sw_flow_key *key, int depth)
535 {
536         const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
537         const struct nlattr *probability, *actions;
538         const struct nlattr *a;
539         int rem;
540
541         memset(attrs, 0, sizeof(attrs));
542         nla_for_each_nested(a, attr, rem) {
543                 int type = nla_type(a);
544                 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
545                         return -EINVAL;
546                 attrs[type] = a;
547         }
548         if (rem)
549                 return -EINVAL;
550
551         probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
552         if (!probability || nla_len(probability) != sizeof(u32))
553                 return -EINVAL;
554
555         actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
556         if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
557                 return -EINVAL;
558         return validate_actions(actions, key, depth + 1);
559 }
560
561 static int validate_set(const struct nlattr *a,
562                         const struct sw_flow_key *flow_key)
563 {
564         const struct nlattr *ovs_key = nla_data(a);
565         int key_type = nla_type(ovs_key);
566
567         /* There can be only one key in a action */
568         if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
569                 return -EINVAL;
570
571         if (key_type > OVS_KEY_ATTR_MAX ||
572             nla_len(ovs_key) != ovs_key_lens[key_type])
573                 return -EINVAL;
574
575         switch (key_type) {
576         const struct ovs_key_ipv4 *ipv4_key;
577
578         case OVS_KEY_ATTR_PRIORITY:
579         case OVS_KEY_ATTR_TUN_ID:
580         case OVS_KEY_ATTR_ETHERNET:
581                 break;
582
583         case OVS_KEY_ATTR_IPV4:
584                 if (flow_key->eth.type != htons(ETH_P_IP))
585                         return -EINVAL;
586
587                 if (!flow_key->ipv4.addr.src || !flow_key->ipv4.addr.dst)
588                         return -EINVAL;
589
590                 ipv4_key = nla_data(ovs_key);
591                 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
592                         return -EINVAL;
593
594                 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
595                         return -EINVAL;
596
597                 break;
598
599         case OVS_KEY_ATTR_TCP:
600                 if (flow_key->ip.proto != IPPROTO_TCP)
601                         return -EINVAL;
602
603                 if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
604                         return -EINVAL;
605
606                 break;
607
608         case OVS_KEY_ATTR_UDP:
609                 if (flow_key->ip.proto != IPPROTO_UDP)
610                         return -EINVAL;
611
612                 if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
613                         return -EINVAL;
614                 break;
615
616         default:
617                 return -EINVAL;
618         }
619
620         return 0;
621 }
622
623 static int validate_userspace(const struct nlattr *attr)
624 {
625         static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] =   {
626                 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
627                 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 },
628         };
629         struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
630         int error;
631
632         error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
633                                  attr, userspace_policy);
634         if (error)
635                 return error;
636
637         if (!a[OVS_USERSPACE_ATTR_PID] ||
638             !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
639                 return -EINVAL;
640
641         return 0;
642 }
643
644 static int validate_actions(const struct nlattr *attr,
645                                 const struct sw_flow_key *key,  int depth)
646 {
647         const struct nlattr *a;
648         int rem, err;
649
650         if (depth >= SAMPLE_ACTION_DEPTH)
651                 return -EOVERFLOW;
652
653         nla_for_each_nested(a, attr, rem) {
654                 /* Expected argument lengths, (u32)-1 for variable length. */
655                 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
656                         [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
657                         [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
658                         [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
659                         [OVS_ACTION_ATTR_POP_VLAN] = 0,
660                         [OVS_ACTION_ATTR_SET] = (u32)-1,
661                         [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
662                 };
663                 const struct ovs_action_push_vlan *vlan;
664                 int type = nla_type(a);
665
666                 if (type > OVS_ACTION_ATTR_MAX ||
667                     (action_lens[type] != nla_len(a) &&
668                      action_lens[type] != (u32)-1))
669                         return -EINVAL;
670
671                 switch (type) {
672                 case OVS_ACTION_ATTR_UNSPEC:
673                         return -EINVAL;
674
675                 case OVS_ACTION_ATTR_USERSPACE:
676                         err = validate_userspace(a);
677                         if (err)
678                                 return err;
679                         break;
680
681                 case OVS_ACTION_ATTR_OUTPUT:
682                         if (nla_get_u32(a) >= DP_MAX_PORTS)
683                                 return -EINVAL;
684                         break;
685
686
687                 case OVS_ACTION_ATTR_POP_VLAN:
688                         break;
689
690                 case OVS_ACTION_ATTR_PUSH_VLAN:
691                         vlan = nla_data(a);
692                         if (vlan->vlan_tpid != htons(ETH_P_8021Q))
693                                 return -EINVAL;
694                         if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
695                                 return -EINVAL;
696                         break;
697
698                 case OVS_ACTION_ATTR_SET:
699                         err = validate_set(a, key);
700                         if (err)
701                                 return err;
702                         break;
703
704                 case OVS_ACTION_ATTR_SAMPLE:
705                         err = validate_sample(a, key, depth);
706                         if (err)
707                                 return err;
708                         break;
709
710                 default:
711                         return -EINVAL;
712                 }
713         }
714
715         if (rem > 0)
716                 return -EINVAL;
717
718         return 0;
719 }
720
721 static void clear_stats(struct sw_flow *flow)
722 {
723         flow->used = 0;
724         flow->tcp_flags = 0;
725         flow->packet_count = 0;
726         flow->byte_count = 0;
727 }
728
729 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
730 {
731         struct ovs_header *ovs_header = info->userhdr;
732         struct nlattr **a = info->attrs;
733         struct sw_flow_actions *acts;
734         struct sk_buff *packet;
735         struct sw_flow *flow;
736         struct datapath *dp;
737         struct ethhdr *eth;
738         int len;
739         int err;
740         int key_len;
741
742         err = -EINVAL;
743         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
744             !a[OVS_PACKET_ATTR_ACTIONS] ||
745             nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN)
746                 goto err;
747
748         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
749         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
750         err = -ENOMEM;
751         if (!packet)
752                 goto err;
753         skb_reserve(packet, NET_IP_ALIGN);
754
755         memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len);
756
757         skb_reset_mac_header(packet);
758         eth = eth_hdr(packet);
759
760         /* Normally, setting the skb 'protocol' field would be handled by a
761          * call to eth_type_trans(), but it assumes there's a sending
762          * device, which we may not have. */
763         if (ntohs(eth->h_proto) >= 1536)
764                 packet->protocol = eth->h_proto;
765         else
766                 packet->protocol = htons(ETH_P_802_2);
767
768         /* Build an sw_flow for sending this packet. */
769         flow = ovs_flow_alloc();
770         err = PTR_ERR(flow);
771         if (IS_ERR(flow))
772                 goto err_kfree_skb;
773
774         err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
775         if (err)
776                 goto err_flow_put;
777
778         err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority,
779                                              &flow->key.phy.in_port,
780                                              &flow->key.phy.tun_id,
781                                              a[OVS_PACKET_ATTR_KEY]);
782         if (err)
783                 goto err_flow_put;
784
785         err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0);
786         if (err)
787                 goto err_flow_put;
788
789         flow->hash = ovs_flow_hash(&flow->key, key_len);
790
791         acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
792         err = PTR_ERR(acts);
793         if (IS_ERR(acts))
794                 goto err_flow_put;
795         rcu_assign_pointer(flow->sf_acts, acts);
796
797         OVS_CB(packet)->flow = flow;
798         packet->priority = flow->key.phy.priority;
799
800         rcu_read_lock();
801         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
802         err = -ENODEV;
803         if (!dp)
804                 goto err_unlock;
805
806         local_bh_disable();
807         err = ovs_execute_actions(dp, packet);
808         local_bh_enable();
809         rcu_read_unlock();
810
811         ovs_flow_put(flow);
812         return err;
813
814 err_unlock:
815         rcu_read_unlock();
816 err_flow_put:
817         ovs_flow_put(flow);
818 err_kfree_skb:
819         kfree_skb(packet);
820 err:
821         return err;
822 }
823
824 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
825         [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
826         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
827         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
828 };
829
830 static struct genl_ops dp_packet_genl_ops[] = {
831         { .cmd = OVS_PACKET_CMD_EXECUTE,
832           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
833           .policy = packet_policy,
834           .doit = ovs_packet_cmd_execute
835         }
836 };
837
838 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
839 {
840         int i;
841         struct flow_table *table = genl_dereference(dp->table);
842
843         stats->n_flows = ovs_flow_tbl_count(table);
844
845         stats->n_hit = stats->n_missed = stats->n_lost = 0;
846         for_each_possible_cpu(i) {
847                 const struct dp_stats_percpu *percpu_stats;
848                 struct dp_stats_percpu local_stats;
849                 unsigned int start;
850
851                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
852
853                 do {
854                         start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
855                         local_stats = *percpu_stats;
856                 } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
857
858                 stats->n_hit += local_stats.n_hit;
859                 stats->n_missed += local_stats.n_missed;
860                 stats->n_lost += local_stats.n_lost;
861         }
862 }
863
864 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
865         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
866         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
867         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
868 };
869
870 static struct genl_family dp_flow_genl_family = {
871         .id = GENL_ID_GENERATE,
872         .hdrsize = sizeof(struct ovs_header),
873         .name = OVS_FLOW_FAMILY,
874         .version = OVS_FLOW_VERSION,
875         .maxattr = OVS_FLOW_ATTR_MAX,
876          SET_NETNSOK
877 };
878
879 static struct genl_multicast_group ovs_dp_flow_multicast_group = {
880         .name = OVS_FLOW_MCGROUP
881 };
882
883 /* Called with genl_lock. */
884 static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
885                                   struct sk_buff *skb, u32 pid,
886                                   u32 seq, u32 flags, u8 cmd)
887 {
888         const int skb_orig_len = skb->len;
889         const struct sw_flow_actions *sf_acts;
890         struct ovs_flow_stats stats;
891         struct ovs_header *ovs_header;
892         struct nlattr *nla;
893         unsigned long used;
894         u8 tcp_flags;
895         int err;
896
897         sf_acts = rcu_dereference_protected(flow->sf_acts,
898                                             lockdep_genl_is_held());
899
900         ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
901         if (!ovs_header)
902                 return -EMSGSIZE;
903
904         ovs_header->dp_ifindex = get_dpifindex(dp);
905
906         nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
907         if (!nla)
908                 goto nla_put_failure;
909         err = ovs_flow_to_nlattrs(&flow->key, skb);
910         if (err)
911                 goto error;
912         nla_nest_end(skb, nla);
913
914         spin_lock_bh(&flow->lock);
915         used = flow->used;
916         stats.n_packets = flow->packet_count;
917         stats.n_bytes = flow->byte_count;
918         tcp_flags = flow->tcp_flags;
919         spin_unlock_bh(&flow->lock);
920
921         if (used &&
922             nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
923                 goto nla_put_failure;
924
925         if (stats.n_packets &&
926             nla_put(skb, OVS_FLOW_ATTR_STATS,
927                     sizeof(struct ovs_flow_stats), &stats))
928                 goto nla_put_failure;
929
930         if (tcp_flags &&
931             nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags))
932                 goto nla_put_failure;
933
934         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
935          * this is the first flow to be dumped into 'skb'.  This is unusual for
936          * Netlink but individual action lists can be longer than
937          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
938          * The userspace caller can always fetch the actions separately if it
939          * really wants them.  (Most userspace callers in fact don't care.)
940          *
941          * This can only fail for dump operations because the skb is always
942          * properly sized for single flows.
943          */
944         err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
945                       sf_acts->actions);
946         if (err < 0 && skb_orig_len)
947                 goto error;
948
949         return genlmsg_end(skb, ovs_header);
950
951 nla_put_failure:
952         err = -EMSGSIZE;
953 error:
954         genlmsg_cancel(skb, ovs_header);
955         return err;
956 }
957
958 static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
959 {
960         const struct sw_flow_actions *sf_acts;
961         int len;
962
963         sf_acts = rcu_dereference_protected(flow->sf_acts,
964                                             lockdep_genl_is_held());
965
966         /* OVS_FLOW_ATTR_KEY */
967         len = nla_total_size(FLOW_BUFSIZE);
968         /* OVS_FLOW_ATTR_ACTIONS */
969         len += nla_total_size(sf_acts->actions_len);
970         /* OVS_FLOW_ATTR_STATS */
971         len += nla_total_size(sizeof(struct ovs_flow_stats));
972         /* OVS_FLOW_ATTR_TCP_FLAGS */
973         len += nla_total_size(1);
974         /* OVS_FLOW_ATTR_USED */
975         len += nla_total_size(8);
976
977         len += NLMSG_ALIGN(sizeof(struct ovs_header));
978
979         return genlmsg_new(len, GFP_KERNEL);
980 }
981
982 static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
983                                                struct datapath *dp,
984                                                u32 pid, u32 seq, u8 cmd)
985 {
986         struct sk_buff *skb;
987         int retval;
988
989         skb = ovs_flow_cmd_alloc_info(flow);
990         if (!skb)
991                 return ERR_PTR(-ENOMEM);
992
993         retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
994         BUG_ON(retval < 0);
995         return skb;
996 }
997
998 static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
999 {
1000         struct nlattr **a = info->attrs;
1001         struct ovs_header *ovs_header = info->userhdr;
1002         struct sw_flow_key key;
1003         struct sw_flow *flow;
1004         struct sk_buff *reply;
1005         struct datapath *dp;
1006         struct flow_table *table;
1007         int error;
1008         int key_len;
1009
1010         /* Extract key. */
1011         error = -EINVAL;
1012         if (!a[OVS_FLOW_ATTR_KEY])
1013                 goto error;
1014         error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1015         if (error)
1016                 goto error;
1017
1018         /* Validate actions. */
1019         if (a[OVS_FLOW_ATTR_ACTIONS]) {
1020                 error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS], &key,  0);
1021                 if (error)
1022                         goto error;
1023         } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
1024                 error = -EINVAL;
1025                 goto error;
1026         }
1027
1028         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1029         error = -ENODEV;
1030         if (!dp)
1031                 goto error;
1032
1033         table = genl_dereference(dp->table);
1034         flow = ovs_flow_tbl_lookup(table, &key, key_len);
1035         if (!flow) {
1036                 struct sw_flow_actions *acts;
1037
1038                 /* Bail out if we're not allowed to create a new flow. */
1039                 error = -ENOENT;
1040                 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
1041                         goto error;
1042
1043                 /* Expand table, if necessary, to make room. */
1044                 if (ovs_flow_tbl_need_to_expand(table)) {
1045                         struct flow_table *new_table;
1046
1047                         new_table = ovs_flow_tbl_expand(table);
1048                         if (!IS_ERR(new_table)) {
1049                                 rcu_assign_pointer(dp->table, new_table);
1050                                 ovs_flow_tbl_deferred_destroy(table);
1051                                 table = genl_dereference(dp->table);
1052                         }
1053                 }
1054
1055                 /* Allocate flow. */
1056                 flow = ovs_flow_alloc();
1057                 if (IS_ERR(flow)) {
1058                         error = PTR_ERR(flow);
1059                         goto error;
1060                 }
1061                 flow->key = key;
1062                 clear_stats(flow);
1063
1064                 /* Obtain actions. */
1065                 acts = ovs_flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
1066                 error = PTR_ERR(acts);
1067                 if (IS_ERR(acts))
1068                         goto error_free_flow;
1069                 rcu_assign_pointer(flow->sf_acts, acts);
1070
1071                 /* Put flow in bucket. */
1072                 flow->hash = ovs_flow_hash(&key, key_len);
1073                 ovs_flow_tbl_insert(table, flow);
1074
1075                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
1076                                                 info->snd_seq,
1077                                                 OVS_FLOW_CMD_NEW);
1078         } else {
1079                 /* We found a matching flow. */
1080                 struct sw_flow_actions *old_acts;
1081                 struct nlattr *acts_attrs;
1082
1083                 /* Bail out if we're not allowed to modify an existing flow.
1084                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1085                  * because Generic Netlink treats the latter as a dump
1086                  * request.  We also accept NLM_F_EXCL in case that bug ever
1087                  * gets fixed.
1088                  */
1089                 error = -EEXIST;
1090                 if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
1091                     info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
1092                         goto error;
1093
1094                 /* Update actions. */
1095                 old_acts = rcu_dereference_protected(flow->sf_acts,
1096                                                      lockdep_genl_is_held());
1097                 acts_attrs = a[OVS_FLOW_ATTR_ACTIONS];
1098                 if (acts_attrs &&
1099                    (old_acts->actions_len != nla_len(acts_attrs) ||
1100                    memcmp(old_acts->actions, nla_data(acts_attrs),
1101                           old_acts->actions_len))) {
1102                         struct sw_flow_actions *new_acts;
1103
1104                         new_acts = ovs_flow_actions_alloc(acts_attrs);
1105                         error = PTR_ERR(new_acts);
1106                         if (IS_ERR(new_acts))
1107                                 goto error;
1108
1109                         rcu_assign_pointer(flow->sf_acts, new_acts);
1110                         ovs_flow_deferred_free_acts(old_acts);
1111                 }
1112
1113                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
1114                                                info->snd_seq, OVS_FLOW_CMD_NEW);
1115
1116                 /* Clear stats. */
1117                 if (a[OVS_FLOW_ATTR_CLEAR]) {
1118                         spin_lock_bh(&flow->lock);
1119                         clear_stats(flow);
1120                         spin_unlock_bh(&flow->lock);
1121                 }
1122         }
1123
1124         if (!IS_ERR(reply))
1125                 genl_notify(reply, genl_info_net(info), info->snd_pid,
1126                            ovs_dp_flow_multicast_group.id, info->nlhdr,
1127                            GFP_KERNEL);
1128         else
1129                 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
1130                                 ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
1131         return 0;
1132
1133 error_free_flow:
1134         ovs_flow_put(flow);
1135 error:
1136         return error;
1137 }
1138
1139 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1140 {
1141         struct nlattr **a = info->attrs;
1142         struct ovs_header *ovs_header = info->userhdr;
1143         struct sw_flow_key key;
1144         struct sk_buff *reply;
1145         struct sw_flow *flow;
1146         struct datapath *dp;
1147         struct flow_table *table;
1148         int err;
1149         int key_len;
1150
1151         if (!a[OVS_FLOW_ATTR_KEY])
1152                 return -EINVAL;
1153         err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1154         if (err)
1155                 return err;
1156
1157         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1158         if (!dp)
1159                 return -ENODEV;
1160
1161         table = genl_dereference(dp->table);
1162         flow = ovs_flow_tbl_lookup(table, &key, key_len);
1163         if (!flow)
1164                 return -ENOENT;
1165
1166         reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
1167                                         info->snd_seq, OVS_FLOW_CMD_NEW);
1168         if (IS_ERR(reply))
1169                 return PTR_ERR(reply);
1170
1171         return genlmsg_reply(reply, info);
1172 }
1173
1174 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1175 {
1176         struct nlattr **a = info->attrs;
1177         struct ovs_header *ovs_header = info->userhdr;
1178         struct sw_flow_key key;
1179         struct sk_buff *reply;
1180         struct sw_flow *flow;
1181         struct datapath *dp;
1182         struct flow_table *table;
1183         int err;
1184         int key_len;
1185
1186         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1187         if (!dp)
1188                 return -ENODEV;
1189
1190         if (!a[OVS_FLOW_ATTR_KEY])
1191                 return flush_flows(dp);
1192
1193         err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1194         if (err)
1195                 return err;
1196
1197         table = genl_dereference(dp->table);
1198         flow = ovs_flow_tbl_lookup(table, &key, key_len);
1199         if (!flow)
1200                 return -ENOENT;
1201
1202         reply = ovs_flow_cmd_alloc_info(flow);
1203         if (!reply)
1204                 return -ENOMEM;
1205
1206         ovs_flow_tbl_remove(table, flow);
1207
1208         err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
1209                                      info->snd_seq, 0, OVS_FLOW_CMD_DEL);
1210         BUG_ON(err < 0);
1211
1212         ovs_flow_deferred_free(flow);
1213
1214         genl_notify(reply, genl_info_net(info), info->snd_pid,
1215                     ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1216         return 0;
1217 }
1218
1219 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1220 {
1221         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1222         struct datapath *dp;
1223         struct flow_table *table;
1224
1225         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1226         if (!dp)
1227                 return -ENODEV;
1228
1229         table = genl_dereference(dp->table);
1230
1231         for (;;) {
1232                 struct sw_flow *flow;
1233                 u32 bucket, obj;
1234
1235                 bucket = cb->args[0];
1236                 obj = cb->args[1];
1237                 flow = ovs_flow_tbl_next(table, &bucket, &obj);
1238                 if (!flow)
1239                         break;
1240
1241                 if (ovs_flow_cmd_fill_info(flow, dp, skb,
1242                                            NETLINK_CB(cb->skb).pid,
1243                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1244                                            OVS_FLOW_CMD_NEW) < 0)
1245                         break;
1246
1247                 cb->args[0] = bucket;
1248                 cb->args[1] = obj;
1249         }
1250         return skb->len;
1251 }
1252
1253 static struct genl_ops dp_flow_genl_ops[] = {
1254         { .cmd = OVS_FLOW_CMD_NEW,
1255           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1256           .policy = flow_policy,
1257           .doit = ovs_flow_cmd_new_or_set
1258         },
1259         { .cmd = OVS_FLOW_CMD_DEL,
1260           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1261           .policy = flow_policy,
1262           .doit = ovs_flow_cmd_del
1263         },
1264         { .cmd = OVS_FLOW_CMD_GET,
1265           .flags = 0,               /* OK for unprivileged users. */
1266           .policy = flow_policy,
1267           .doit = ovs_flow_cmd_get,
1268           .dumpit = ovs_flow_cmd_dump
1269         },
1270         { .cmd = OVS_FLOW_CMD_SET,
1271           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1272           .policy = flow_policy,
1273           .doit = ovs_flow_cmd_new_or_set,
1274         },
1275 };
1276
1277 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1278 #ifdef HAVE_NLA_NUL_STRING
1279         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1280 #endif
1281         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1282 };
1283
1284 static struct genl_family dp_datapath_genl_family = {
1285         .id = GENL_ID_GENERATE,
1286         .hdrsize = sizeof(struct ovs_header),
1287         .name = OVS_DATAPATH_FAMILY,
1288         .version = OVS_DATAPATH_VERSION,
1289         .maxattr = OVS_DP_ATTR_MAX,
1290          SET_NETNSOK
1291 };
1292
1293 static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
1294         .name = OVS_DATAPATH_MCGROUP
1295 };
1296
1297 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1298                                 u32 pid, u32 seq, u32 flags, u8 cmd)
1299 {
1300         struct ovs_header *ovs_header;
1301         struct ovs_dp_stats dp_stats;
1302         int err;
1303
1304         ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
1305                                    flags, cmd);
1306         if (!ovs_header)
1307                 goto error;
1308
1309         ovs_header->dp_ifindex = get_dpifindex(dp);
1310
1311         rcu_read_lock();
1312         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1313         rcu_read_unlock();
1314         if (err)
1315                 goto nla_put_failure;
1316
1317         get_dp_stats(dp, &dp_stats);
1318         if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats))
1319                 goto nla_put_failure;
1320
1321         return genlmsg_end(skb, ovs_header);
1322
1323 nla_put_failure:
1324         genlmsg_cancel(skb, ovs_header);
1325 error:
1326         return -EMSGSIZE;
1327 }
1328
1329 static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
1330                                              u32 seq, u8 cmd)
1331 {
1332         struct sk_buff *skb;
1333         int retval;
1334
1335         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1336         if (!skb)
1337                 return ERR_PTR(-ENOMEM);
1338
1339         retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
1340         if (retval < 0) {
1341                 kfree_skb(skb);
1342                 return ERR_PTR(retval);
1343         }
1344         return skb;
1345 }
1346
1347 static int ovs_dp_cmd_validate(struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1348 {
1349         return CHECK_NUL_STRING(a[OVS_DP_ATTR_NAME], IFNAMSIZ - 1);
1350 }
1351
1352 /* Called with genl_mutex and optionally with RTNL lock also. */
1353 static struct datapath *lookup_datapath(struct net *net,
1354                                         struct ovs_header *ovs_header,
1355                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1356 {
1357         struct datapath *dp;
1358
1359         if (!a[OVS_DP_ATTR_NAME])
1360                 dp = get_dp(net, ovs_header->dp_ifindex);
1361         else {
1362                 struct vport *vport;
1363
1364                 rcu_read_lock();
1365                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1366                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1367                 rcu_read_unlock();
1368         }
1369         return dp ? dp : ERR_PTR(-ENODEV);
1370 }
1371
1372 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1373 {
1374         struct nlattr **a = info->attrs;
1375         struct vport_parms parms;
1376         struct sk_buff *reply;
1377         struct datapath *dp;
1378         struct vport *vport;
1379         struct ovs_net *ovs_net;
1380         int err, i;
1381
1382         err = -EINVAL;
1383         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1384                 goto err;
1385
1386         err = ovs_dp_cmd_validate(a);
1387         if (err)
1388                 goto err;
1389
1390         rtnl_lock();
1391
1392         err = -ENOMEM;
1393         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1394         if (dp == NULL)
1395                 goto err_unlock_rtnl;
1396
1397         /* Initialize kobject for bridge.  This will be added as
1398          * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
1399         dp->ifobj.kset = NULL;
1400         kobject_init(&dp->ifobj, &dp_ktype);
1401
1402         /* Allocate table. */
1403         err = -ENOMEM;
1404         rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
1405         if (!dp->table)
1406                 goto err_free_dp;
1407
1408         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1409         if (!dp->stats_percpu) {
1410                 err = -ENOMEM;
1411                 goto err_destroy_table;
1412         }
1413         ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1414
1415         dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1416                             GFP_KERNEL);
1417         if (!dp->ports) {
1418                 err = -ENOMEM;
1419                 goto err_destroy_percpu;
1420         }
1421
1422         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1423                 INIT_HLIST_HEAD(&dp->ports[i]);
1424
1425         /* Set up our datapath device. */
1426         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1427         parms.type = OVS_VPORT_TYPE_INTERNAL;
1428         parms.options = NULL;
1429         parms.dp = dp;
1430         parms.port_no = OVSP_LOCAL;
1431         parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
1432
1433         vport = new_vport(&parms);
1434         if (IS_ERR(vport)) {
1435                 err = PTR_ERR(vport);
1436                 if (err == -EBUSY)
1437                         err = -EEXIST;
1438
1439                 goto err_destroy_ports_array;
1440         }
1441
1442         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1443                                       info->snd_seq, OVS_DP_CMD_NEW);
1444         err = PTR_ERR(reply);
1445         if (IS_ERR(reply))
1446                 goto err_destroy_local_port;
1447
1448         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1449         list_add_tail(&dp->list_node, &ovs_net->dps);
1450         ovs_dp_sysfs_add_dp(dp);
1451
1452         rtnl_unlock();
1453
1454         genl_notify(reply, genl_info_net(info), info->snd_pid,
1455                     ovs_dp_datapath_multicast_group.id, info->nlhdr,
1456                     GFP_KERNEL);
1457         return 0;
1458
1459 err_destroy_local_port:
1460         ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
1461 err_destroy_ports_array:
1462         kfree(dp->ports);
1463 err_destroy_percpu:
1464         free_percpu(dp->stats_percpu);
1465 err_destroy_table:
1466         ovs_flow_tbl_destroy(genl_dereference(dp->table));
1467 err_free_dp:
1468         kfree(dp);
1469 err_unlock_rtnl:
1470         rtnl_unlock();
1471 err:
1472         return err;
1473 }
1474
1475 /* Called with genl_mutex. */
1476 static void __dp_destroy(struct datapath *dp)
1477 {
1478         int i;
1479
1480         rtnl_lock();
1481
1482         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1483                 struct vport *vport;
1484                 struct hlist_node *node, *n;
1485
1486                 hlist_for_each_entry_safe(vport, node, n, &dp->ports[i], dp_hash_node)
1487                         if (vport->port_no != OVSP_LOCAL)
1488                                 ovs_dp_detach_port(vport);
1489         }
1490
1491         ovs_dp_sysfs_del_dp(dp);
1492         list_del(&dp->list_node);
1493         ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
1494
1495         /* rtnl_unlock() will wait until all the references to devices that
1496          * are pending unregistration have been dropped.  We do it here to
1497          * ensure that any internal devices (which contain DP pointers) are
1498          * fully destroyed before freeing the datapath.
1499          */
1500         rtnl_unlock();
1501
1502         call_rcu(&dp->rcu, destroy_dp_rcu);
1503 }
1504
1505 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1506 {
1507         struct sk_buff *reply;
1508         struct datapath *dp;
1509         int err;
1510
1511         err = ovs_dp_cmd_validate(info->attrs);
1512         if (err)
1513                 return err;
1514
1515         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1516         err = PTR_ERR(dp);
1517         if (IS_ERR(dp))
1518                 return err;
1519
1520         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1521                                       info->snd_seq, OVS_DP_CMD_DEL);
1522         err = PTR_ERR(reply);
1523         if (IS_ERR(reply))
1524                 return err;
1525
1526         __dp_destroy(dp);
1527
1528         genl_notify(reply, genl_info_net(info), info->snd_pid,
1529                     ovs_dp_datapath_multicast_group.id, info->nlhdr,
1530                     GFP_KERNEL);
1531
1532         return 0;
1533 }
1534
1535 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1536 {
1537         struct sk_buff *reply;
1538         struct datapath *dp;
1539         int err;
1540
1541         err = ovs_dp_cmd_validate(info->attrs);
1542         if (err)
1543                 return err;
1544
1545         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1546         if (IS_ERR(dp))
1547                 return PTR_ERR(dp);
1548
1549         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1550                                       info->snd_seq, OVS_DP_CMD_NEW);
1551         if (IS_ERR(reply)) {
1552                 err = PTR_ERR(reply);
1553                 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
1554                                 ovs_dp_datapath_multicast_group.id, err);
1555                 return 0;
1556         }
1557
1558         genl_notify(reply, genl_info_net(info), info->snd_pid,
1559                     ovs_dp_datapath_multicast_group.id, info->nlhdr,
1560                     GFP_KERNEL);
1561
1562         return 0;
1563 }
1564
1565 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1566 {
1567         struct sk_buff *reply;
1568         struct datapath *dp;
1569         int err;
1570
1571         err = ovs_dp_cmd_validate(info->attrs);
1572         if (err)
1573                 return err;
1574
1575         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1576         if (IS_ERR(dp))
1577                 return PTR_ERR(dp);
1578
1579         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1580                                       info->snd_seq, OVS_DP_CMD_NEW);
1581         if (IS_ERR(reply))
1582                 return PTR_ERR(reply);
1583
1584         return genlmsg_reply(reply, info);
1585 }
1586
1587 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1588 {
1589         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1590         struct datapath *dp;
1591         int skip = cb->args[0];
1592         int i = 0;
1593
1594         list_for_each_entry(dp, &ovs_net->dps, list_node) {
1595                 if (i >= skip &&
1596                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
1597                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1598                                          OVS_DP_CMD_NEW) < 0)
1599                         break;
1600                 i++;
1601         }
1602
1603         cb->args[0] = i;
1604
1605         return skb->len;
1606 }
1607
1608 static struct genl_ops dp_datapath_genl_ops[] = {
1609         { .cmd = OVS_DP_CMD_NEW,
1610           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1611           .policy = datapath_policy,
1612           .doit = ovs_dp_cmd_new
1613         },
1614         { .cmd = OVS_DP_CMD_DEL,
1615           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1616           .policy = datapath_policy,
1617           .doit = ovs_dp_cmd_del
1618         },
1619         { .cmd = OVS_DP_CMD_GET,
1620           .flags = 0,               /* OK for unprivileged users. */
1621           .policy = datapath_policy,
1622           .doit = ovs_dp_cmd_get,
1623           .dumpit = ovs_dp_cmd_dump
1624         },
1625         { .cmd = OVS_DP_CMD_SET,
1626           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1627           .policy = datapath_policy,
1628           .doit = ovs_dp_cmd_set,
1629         },
1630 };
1631
1632 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1633 #ifdef HAVE_NLA_NUL_STRING
1634         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1635         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1636         [OVS_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
1637 #else
1638         [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct ovs_vport_stats) },
1639         [OVS_VPORT_ATTR_ADDRESS] = { .minlen = ETH_ALEN },
1640 #endif
1641         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1642         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1643         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1644         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1645 };
1646
1647 static struct genl_family dp_vport_genl_family = {
1648         .id = GENL_ID_GENERATE,
1649         .hdrsize = sizeof(struct ovs_header),
1650         .name = OVS_VPORT_FAMILY,
1651         .version = OVS_VPORT_VERSION,
1652         .maxattr = OVS_VPORT_ATTR_MAX,
1653          SET_NETNSOK
1654 };
1655
1656 struct genl_multicast_group ovs_dp_vport_multicast_group = {
1657         .name = OVS_VPORT_MCGROUP
1658 };
1659
1660 /* Called with RTNL lock or RCU read lock. */
1661 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1662                                    u32 pid, u32 seq, u32 flags, u8 cmd)
1663 {
1664         struct ovs_header *ovs_header;
1665         struct ovs_vport_stats vport_stats;
1666         int err;
1667
1668         ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
1669                                  flags, cmd);
1670         if (!ovs_header)
1671                 return -EMSGSIZE;
1672
1673         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1674
1675         if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1676             nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1677             nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
1678             nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid))
1679                 goto nla_put_failure;
1680
1681         ovs_vport_get_stats(vport, &vport_stats);
1682         if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1683                     &vport_stats))
1684                 goto nla_put_failure;
1685
1686         if (nla_put(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN,
1687                     vport->ops->get_addr(vport)))
1688                 goto nla_put_failure;
1689
1690         err = ovs_vport_get_options(vport, skb);
1691         if (err == -EMSGSIZE)
1692                 goto error;
1693
1694         return genlmsg_end(skb, ovs_header);
1695
1696 nla_put_failure:
1697         err = -EMSGSIZE;
1698 error:
1699         genlmsg_cancel(skb, ovs_header);
1700         return err;
1701 }
1702
1703 /* Called with RTNL lock or RCU read lock. */
1704 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
1705                                          u32 seq, u8 cmd)
1706 {
1707         struct sk_buff *skb;
1708         int retval;
1709
1710         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1711         if (!skb)
1712                 return ERR_PTR(-ENOMEM);
1713
1714         retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
1715         if (retval < 0) {
1716                 kfree_skb(skb);
1717                 return ERR_PTR(retval);
1718         }
1719         return skb;
1720 }
1721
1722 static int ovs_vport_cmd_validate(struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1723 {
1724         return CHECK_NUL_STRING(a[OVS_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1725 }
1726
1727 /* Called with RTNL lock or RCU read lock. */
1728 static struct vport *lookup_vport(struct net *net,
1729                                   struct ovs_header *ovs_header,
1730                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1731 {
1732         struct datapath *dp;
1733         struct vport *vport;
1734
1735         if (a[OVS_VPORT_ATTR_NAME]) {
1736                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1737                 if (!vport)
1738                         return ERR_PTR(-ENODEV);
1739                 if (ovs_header->dp_ifindex &&
1740                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1741                         return ERR_PTR(-ENODEV);
1742                 return vport;
1743         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1744                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1745
1746                 if (port_no >= DP_MAX_PORTS)
1747                         return ERR_PTR(-EFBIG);
1748
1749                 dp = get_dp(net, ovs_header->dp_ifindex);
1750                 if (!dp)
1751                         return ERR_PTR(-ENODEV);
1752
1753                 vport = ovs_vport_rtnl_rcu(dp, port_no);
1754                 if (!vport)
1755                         return ERR_PTR(-ENOENT);
1756                 return vport;
1757         } else
1758                 return ERR_PTR(-EINVAL);
1759 }
1760
1761 /* Called with RTNL lock. */
1762 static int change_vport(struct vport *vport,
1763                         struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1764 {
1765         int err = 0;
1766
1767         if (a[OVS_VPORT_ATTR_STATS])
1768                 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1769
1770         if (a[OVS_VPORT_ATTR_ADDRESS])
1771                 err = ovs_vport_set_addr(vport, nla_data(a[OVS_VPORT_ATTR_ADDRESS]));
1772
1773         return err;
1774 }
1775
1776 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1777 {
1778         struct nlattr **a = info->attrs;
1779         struct ovs_header *ovs_header = info->userhdr;
1780         struct vport_parms parms;
1781         struct sk_buff *reply;
1782         struct vport *vport;
1783         struct datapath *dp;
1784         u32 port_no;
1785         int err;
1786
1787         err = -EINVAL;
1788         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1789             !a[OVS_VPORT_ATTR_UPCALL_PID])
1790                 goto exit;
1791
1792         err = ovs_vport_cmd_validate(a);
1793         if (err)
1794                 goto exit;
1795
1796         rtnl_lock();
1797         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1798         err = -ENODEV;
1799         if (!dp)
1800                 goto exit_unlock;
1801
1802         if (a[OVS_VPORT_ATTR_PORT_NO]) {
1803                 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1804
1805                 err = -EFBIG;
1806                 if (port_no >= DP_MAX_PORTS)
1807                         goto exit_unlock;
1808
1809                 vport = ovs_vport_rtnl(dp, port_no);
1810                 err = -EBUSY;
1811                 if (vport)
1812                         goto exit_unlock;
1813         } else {
1814                 for (port_no = 1; ; port_no++) {
1815                         if (port_no >= DP_MAX_PORTS) {
1816                                 err = -EFBIG;
1817                                 goto exit_unlock;
1818                         }
1819                         vport = ovs_vport_rtnl(dp, port_no);
1820                         if (!vport)
1821                                 break;
1822                 }
1823         }
1824
1825         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1826         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1827         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1828         parms.dp = dp;
1829         parms.port_no = port_no;
1830         parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1831
1832         vport = new_vport(&parms);
1833         err = PTR_ERR(vport);
1834         if (IS_ERR(vport))
1835                 goto exit_unlock;
1836
1837         ovs_dp_sysfs_add_if(vport);
1838
1839         err = change_vport(vport, a);
1840         if (!err) {
1841                 reply = ovs_vport_cmd_build_info(vport, info->snd_pid,
1842                                                  info->snd_seq,
1843                                                  OVS_VPORT_CMD_NEW);
1844                 if (IS_ERR(reply))
1845                         err = PTR_ERR(reply);
1846         }
1847         if (err) {
1848                 ovs_dp_detach_port(vport);
1849                 goto exit_unlock;
1850         }
1851         genl_notify(reply, genl_info_net(info), info->snd_pid,
1852                     ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1853
1854 exit_unlock:
1855         rtnl_unlock();
1856 exit:
1857         return err;
1858 }
1859
1860 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1861 {
1862         struct nlattr **a = info->attrs;
1863         struct sk_buff *reply;
1864         struct vport *vport;
1865         int err;
1866
1867         err = ovs_vport_cmd_validate(a);
1868         if (err)
1869                 goto exit;
1870
1871         rtnl_lock();
1872         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1873         err = PTR_ERR(vport);
1874         if (IS_ERR(vport))
1875                 goto exit_unlock;
1876
1877         err = 0;
1878         if (a[OVS_VPORT_ATTR_TYPE] &&
1879             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
1880                 err = -EINVAL;
1881
1882         if (!err && a[OVS_VPORT_ATTR_OPTIONS])
1883                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1884         if (!err)
1885                 err = change_vport(vport, a);
1886         else
1887                 goto exit_unlock;
1888         if (!err && a[OVS_VPORT_ATTR_UPCALL_PID])
1889                 vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1890
1891         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1892                                          OVS_VPORT_CMD_NEW);
1893         if (IS_ERR(reply)) {
1894                 err = PTR_ERR(reply);
1895                 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
1896                                 ovs_dp_vport_multicast_group.id, err);
1897                 return 0;
1898         }
1899
1900         genl_notify(reply, genl_info_net(info), info->snd_pid,
1901                     ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1902
1903 exit_unlock:
1904         rtnl_unlock();
1905 exit:
1906         return err;
1907 }
1908
1909 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1910 {
1911         struct nlattr **a = info->attrs;
1912         struct sk_buff *reply;
1913         struct vport *vport;
1914         int err;
1915
1916         err = ovs_vport_cmd_validate(a);
1917         if (err)
1918                 goto exit;
1919
1920         rtnl_lock();
1921         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1922         err = PTR_ERR(vport);
1923         if (IS_ERR(vport))
1924                 goto exit_unlock;
1925
1926         if (vport->port_no == OVSP_LOCAL) {
1927                 err = -EINVAL;
1928                 goto exit_unlock;
1929         }
1930
1931         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1932                                          OVS_VPORT_CMD_DEL);
1933         err = PTR_ERR(reply);
1934         if (IS_ERR(reply))
1935                 goto exit_unlock;
1936
1937         ovs_dp_detach_port(vport);
1938
1939         genl_notify(reply, genl_info_net(info), info->snd_pid,
1940                     ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1941
1942 exit_unlock:
1943         rtnl_unlock();
1944 exit:
1945         return err;
1946 }
1947
1948 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1949 {
1950         struct nlattr **a = info->attrs;
1951         struct ovs_header *ovs_header = info->userhdr;
1952         struct sk_buff *reply;
1953         struct vport *vport;
1954         int err;
1955
1956         err = ovs_vport_cmd_validate(a);
1957         if (err)
1958                 goto exit;
1959
1960         rcu_read_lock();
1961         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
1962         err = PTR_ERR(vport);
1963         if (IS_ERR(vport))
1964                 goto exit_unlock;
1965
1966         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1967                                          OVS_VPORT_CMD_NEW);
1968         err = PTR_ERR(reply);
1969         if (IS_ERR(reply))
1970                 goto exit_unlock;
1971
1972         rcu_read_unlock();
1973
1974         return genlmsg_reply(reply, info);
1975
1976 exit_unlock:
1977         rcu_read_unlock();
1978 exit:
1979         return err;
1980 }
1981
1982 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1983 {
1984         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1985         struct datapath *dp;
1986         int bucket = cb->args[0], skip = cb->args[1];
1987         int i, j = 0;
1988
1989         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1990         if (!dp)
1991                 return -ENODEV;
1992
1993         rcu_read_lock();
1994         for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
1995                 struct vport *vport;
1996                 struct hlist_node *n;
1997
1998                 j = 0;
1999                 hlist_for_each_entry_rcu(vport, n, &dp->ports[i], dp_hash_node) {
2000                         if (j >= skip &&
2001                             ovs_vport_cmd_fill_info(vport, skb,
2002                                                     NETLINK_CB(cb->skb).pid,
2003                                                     cb->nlh->nlmsg_seq,
2004                                                     NLM_F_MULTI,
2005                                                     OVS_VPORT_CMD_NEW) < 0)
2006                                 goto out;
2007
2008                         j++;
2009                 }
2010                 skip = 0;
2011         }
2012 out:
2013         rcu_read_unlock();
2014
2015         cb->args[0] = i;
2016         cb->args[1] = j;
2017
2018         return skb->len;
2019 }
2020
2021 static struct genl_ops dp_vport_genl_ops[] = {
2022         { .cmd = OVS_VPORT_CMD_NEW,
2023           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2024           .policy = vport_policy,
2025           .doit = ovs_vport_cmd_new
2026         },
2027         { .cmd = OVS_VPORT_CMD_DEL,
2028           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2029           .policy = vport_policy,
2030           .doit = ovs_vport_cmd_del
2031         },
2032         { .cmd = OVS_VPORT_CMD_GET,
2033           .flags = 0,               /* OK for unprivileged users. */
2034           .policy = vport_policy,
2035           .doit = ovs_vport_cmd_get,
2036           .dumpit = ovs_vport_cmd_dump
2037         },
2038         { .cmd = OVS_VPORT_CMD_SET,
2039           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2040           .policy = vport_policy,
2041           .doit = ovs_vport_cmd_set,
2042         },
2043 };
2044
2045 struct genl_family_and_ops {
2046         struct genl_family *family;
2047         struct genl_ops *ops;
2048         int n_ops;
2049         struct genl_multicast_group *group;
2050 };
2051
2052 static const struct genl_family_and_ops dp_genl_families[] = {
2053         { &dp_datapath_genl_family,
2054           dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
2055           &ovs_dp_datapath_multicast_group },
2056         { &dp_vport_genl_family,
2057           dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
2058           &ovs_dp_vport_multicast_group },
2059         { &dp_flow_genl_family,
2060           dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
2061           &ovs_dp_flow_multicast_group },
2062         { &dp_packet_genl_family,
2063           dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
2064           NULL },
2065 };
2066
2067 static void dp_unregister_genl(int n_families)
2068 {
2069         int i;
2070
2071         for (i = 0; i < n_families; i++)
2072                 genl_unregister_family(dp_genl_families[i].family);
2073 }
2074
2075 static int dp_register_genl(void)
2076 {
2077         int n_registered;
2078         int err;
2079         int i;
2080
2081         n_registered = 0;
2082         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2083                 const struct genl_family_and_ops *f = &dp_genl_families[i];
2084
2085                 err = genl_register_family_with_ops(f->family, f->ops,
2086                                                     f->n_ops);
2087                 if (err)
2088                         goto error;
2089                 n_registered++;
2090
2091                 if (f->group) {
2092                         err = genl_register_mc_group(f->family, f->group);
2093                         if (err)
2094                                 goto error;
2095                 }
2096         }
2097
2098         return 0;
2099
2100 error:
2101         dp_unregister_genl(n_registered);
2102         return err;
2103 }
2104
2105 static int __rehash_flow_table(void *dummy)
2106 {
2107         struct datapath *dp;
2108         struct net *net;
2109
2110         rtnl_lock();
2111         for_each_net(net) {
2112                 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2113
2114                 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2115                         struct flow_table *old_table = genl_dereference(dp->table);
2116                         struct flow_table *new_table;
2117
2118                         new_table = ovs_flow_tbl_rehash(old_table);
2119                         if (!IS_ERR(new_table)) {
2120                                 rcu_assign_pointer(dp->table, new_table);
2121                                 ovs_flow_tbl_deferred_destroy(old_table);
2122                         }
2123                 }
2124         }
2125         rtnl_unlock();
2126         return 0;
2127 }
2128
2129 static void rehash_flow_table(struct work_struct *work)
2130 {
2131         genl_exec(__rehash_flow_table, NULL);
2132         schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2133 }
2134
2135 static int dp_destroy_all(void *data)
2136 {
2137         struct datapath *dp, *dp_next;
2138         struct ovs_net *ovs_net = data;
2139
2140         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2141                 __dp_destroy(dp);
2142
2143         return 0;
2144 }
2145
2146 static int __net_init ovs_init_net(struct net *net)
2147 {
2148         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2149
2150         INIT_LIST_HEAD(&ovs_net->dps);
2151         return 0;
2152 }
2153
2154 static void __net_exit ovs_exit_net(struct net *net)
2155 {
2156         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2157
2158         genl_exec(dp_destroy_all, ovs_net);
2159 }
2160
2161 static struct pernet_operations ovs_net_ops = {
2162         .init = ovs_init_net,
2163         .exit = ovs_exit_net,
2164         .id   = &ovs_net_id,
2165         .size = sizeof(struct ovs_net),
2166 };
2167
2168 static int __init dp_init(void)
2169 {
2170         struct sk_buff *dummy_skb;
2171         int err;
2172
2173         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2174
2175         pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
2176                 VERSION);
2177
2178         err = genl_exec_init();
2179         if (err)
2180                 goto error;
2181
2182         err = ovs_workqueues_init();
2183         if (err)
2184                 goto error_genl_exec;
2185
2186         err = ovs_tnl_init();
2187         if (err)
2188                 goto error_wq;
2189
2190         err = ovs_flow_init();
2191         if (err)
2192                 goto error_tnl_exit;
2193
2194         err = ovs_vport_init();
2195         if (err)
2196                 goto error_flow_exit;
2197
2198         err = register_pernet_device(&ovs_net_ops);
2199         if (err)
2200                 goto error_vport_exit;
2201
2202         err = register_netdevice_notifier(&ovs_dp_device_notifier);
2203         if (err)
2204                 goto error_netns_exit;
2205
2206         err = dp_register_genl();
2207         if (err < 0)
2208                 goto error_unreg_notifier;
2209
2210         schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2211
2212         return 0;
2213
2214 error_unreg_notifier:
2215         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2216 error_netns_exit:
2217         unregister_pernet_device(&ovs_net_ops);
2218 error_vport_exit:
2219         ovs_vport_exit();
2220 error_flow_exit:
2221         ovs_flow_exit();
2222 error_tnl_exit:
2223         ovs_tnl_exit();
2224 error_wq:
2225         ovs_workqueues_exit();
2226 error_genl_exec:
2227         genl_exec_exit();
2228 error:
2229         return err;
2230 }
2231
2232 static void dp_cleanup(void)
2233 {
2234         cancel_delayed_work_sync(&rehash_flow_wq);
2235         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2236         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2237         unregister_pernet_device(&ovs_net_ops);
2238         rcu_barrier();
2239         ovs_vport_exit();
2240         ovs_flow_exit();
2241         ovs_tnl_exit();
2242         ovs_workqueues_exit();
2243         genl_exec_exit();
2244 }
2245
2246 module_init(dp_init);
2247 module_exit(dp_cleanup);
2248
2249 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2250 MODULE_LICENSE("GPL");