bb9696735cc45450b96e4a4f4b455461a44877ef
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007-2012 Nicira Networks.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/version.h>
40 #include <linux/ethtool.h>
41 #include <linux/wait.h>
42 #include <asm/system.h>
43 #include <asm/div64.h>
44 #include <linux/highmem.h>
45 #include <linux/netfilter_bridge.h>
46 #include <linux/netfilter_ipv4.h>
47 #include <linux/inetdevice.h>
48 #include <linux/list.h>
49 #include <linux/openvswitch.h>
50 #include <linux/rculist.h>
51 #include <linux/dmi.h>
52 #include <net/genetlink.h>
53 #include <net/net_namespace.h>
54 #include <net/netns/generic.h>
55
56 #include "checksum.h"
57 #include "datapath.h"
58 #include "flow.h"
59 #include "genl_exec.h"
60 #include "vlan.h"
61 #include "tunnel.h"
62 #include "vport-internal_dev.h"
63
64 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
65     LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)
66 #error Kernels before 2.6.18 or after 3.2 are not supported by this version of Open vSwitch.
67 #endif
68
69 #define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
70 static void rehash_flow_table(struct work_struct *work);
71 static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
72
73 int ovs_net_id __read_mostly;
74
75 int (*ovs_dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
76 EXPORT_SYMBOL(ovs_dp_ioctl_hook);
77
78 /**
79  * DOC: Locking:
80  *
81  * Writes to device state (add/remove datapath, port, set operations on vports,
82  * etc.) are protected by RTNL.
83  *
84  * Writes to other state (flow table modifications, set miscellaneous datapath
85  * parameters, etc.) are protected by genl_mutex.  The RTNL lock nests inside
86  * genl_mutex.
87  *
88  * Reads are protected by RCU.
89  *
90  * There are a few special cases (mostly stats) that have their own
91  * synchronization but they nest under all of above and don't interact with
92  * each other.
93  */
94
95 static struct vport *new_vport(const struct vport_parms *);
96 static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *,
97                              const struct dp_upcall_info *);
98 static int queue_userspace_packet(struct net *, int dp_ifindex,
99                                   struct sk_buff *,
100                                   const struct dp_upcall_info *);
101
102 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
103 static struct datapath *get_dp(struct net *net, int dp_ifindex)
104 {
105         struct datapath *dp = NULL;
106         struct net_device *dev;
107
108         rcu_read_lock();
109         dev = dev_get_by_index_rcu(net, dp_ifindex);
110         if (dev) {
111                 struct vport *vport = ovs_internal_dev_get_vport(dev);
112                 if (vport)
113                         dp = vport->dp;
114         }
115         rcu_read_unlock();
116
117         return dp;
118 }
119
120 /* Must be called with rcu_read_lock or RTNL lock. */
121 const char *ovs_dp_name(const struct datapath *dp)
122 {
123         struct vport *vport = ovs_vport_rtnl_rcu(dp, OVSP_LOCAL);
124         return vport->ops->get_name(vport);
125 }
126
127 static int get_dpifindex(struct datapath *dp)
128 {
129         struct vport *local;
130         int ifindex;
131
132         rcu_read_lock();
133
134         local = ovs_vport_rcu(dp, OVSP_LOCAL);
135         if (local)
136                 ifindex = local->ops->get_ifindex(local);
137         else
138                 ifindex = 0;
139
140         rcu_read_unlock();
141
142         return ifindex;
143 }
144
145 static size_t br_nlmsg_size(void)
146 {
147         return NLMSG_ALIGN(sizeof(struct ifinfomsg))
148                + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
149                + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
150                + nla_total_size(4) /* IFLA_MASTER */
151                + nla_total_size(4) /* IFLA_MTU */
152                + nla_total_size(1); /* IFLA_OPERSTATE */
153 }
154
155 /* Caller must hold RTNL lock. */
156 static int dp_fill_ifinfo(struct sk_buff *skb,
157                           const struct vport *port,
158                           int event, unsigned int flags)
159 {
160         struct datapath *dp = port->dp;
161         struct ifinfomsg *hdr;
162         struct nlmsghdr *nlh;
163
164         if (!port->ops->get_ifindex)
165                 return -ENODEV;
166
167         nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
168         if (nlh == NULL)
169                 return -EMSGSIZE;
170
171         hdr = nlmsg_data(nlh);
172         hdr->ifi_family = AF_BRIDGE;
173         hdr->__ifi_pad = 0;
174         hdr->ifi_type = ARPHRD_ETHER;
175         hdr->ifi_index = port->ops->get_ifindex(port);
176         hdr->ifi_flags = port->ops->get_dev_flags(port);
177         hdr->ifi_change = 0;
178
179         NLA_PUT_STRING(skb, IFLA_IFNAME, port->ops->get_name(port));
180         NLA_PUT_U32(skb, IFLA_MASTER, get_dpifindex(dp));
181         NLA_PUT_U32(skb, IFLA_MTU, port->ops->get_mtu(port));
182 #ifdef IFLA_OPERSTATE
183         NLA_PUT_U8(skb, IFLA_OPERSTATE,
184                    port->ops->is_running(port)
185                         ? port->ops->get_operstate(port)
186                         : IF_OPER_DOWN);
187 #endif
188
189         NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, port->ops->get_addr(port));
190
191         return nlmsg_end(skb, nlh);
192
193 nla_put_failure:
194         nlmsg_cancel(skb, nlh);
195         return -EMSGSIZE;
196 }
197
198 /* Caller must hold RTNL lock. */
199 static void dp_ifinfo_notify(int event, struct vport *port)
200 {
201         struct sk_buff *skb;
202         int err;
203
204         skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
205         if (!skb) {
206                 err = -ENOBUFS;
207                 goto err;
208         }
209
210         err = dp_fill_ifinfo(skb, port, event, 0);
211         if (err < 0) {
212                 if (err == -ENODEV) {
213                         goto out;
214                 } else {
215                         /* -EMSGSIZE implies BUG in br_nlmsg_size() */
216                         WARN_ON(err == -EMSGSIZE);
217                         goto err;
218                 }
219         }
220
221         rtnl_notify(skb, ovs_dp_get_net(port->dp), 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
222
223         return;
224 err:
225         rtnl_set_sk_err(ovs_dp_get_net(port->dp), RTNLGRP_LINK, err);
226 out:
227         kfree_skb(skb);
228 }
229
230 static void release_dp(struct kobject *kobj)
231 {
232         struct datapath *dp = container_of(kobj, struct datapath, ifobj);
233         kfree(dp);
234 }
235
236 static struct kobj_type dp_ktype = {
237         .release = release_dp
238 };
239
240 static void destroy_dp_rcu(struct rcu_head *rcu)
241 {
242         struct datapath *dp = container_of(rcu, struct datapath, rcu);
243
244         ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
245         free_percpu(dp->stats_percpu);
246         release_net(ovs_dp_get_net(dp));
247         kfree(dp->ports);
248         kobject_put(&dp->ifobj);
249 }
250
251 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
252                                             u16 port_no)
253 {
254         return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
255 }
256
257 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
258 {
259         struct vport *vport;
260         struct hlist_node *n;
261         struct hlist_head *head;
262
263         head = vport_hash_bucket(dp, port_no);
264         hlist_for_each_entry_rcu(vport, n, head, dp_hash_node) {
265                 if (vport->port_no == port_no)
266                         return vport;
267         }
268         return NULL;
269 }
270
271 /* Called with RTNL lock and genl_lock. */
272 static struct vport *new_vport(const struct vport_parms *parms)
273 {
274         struct vport *vport;
275
276         vport = ovs_vport_add(parms);
277         if (!IS_ERR(vport)) {
278                 struct datapath *dp = parms->dp;
279                 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
280
281                 hlist_add_head_rcu(&vport->dp_hash_node, head);
282                 dp_ifinfo_notify(RTM_NEWLINK, vport);
283         }
284         return vport;
285 }
286
287 /* Called with RTNL lock. */
288 void ovs_dp_detach_port(struct vport *p)
289 {
290         ASSERT_RTNL();
291
292         if (p->port_no != OVSP_LOCAL)
293                 ovs_dp_sysfs_del_if(p);
294
295         dp_ifinfo_notify(RTM_DELLINK, p);
296
297         /* First drop references to device. */
298         hlist_del_rcu(&p->dp_hash_node);
299
300         /* Then destroy it. */
301         ovs_vport_del(p);
302 }
303
304 /* Must be called with rcu_read_lock. */
305 void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
306 {
307         struct datapath *dp = p->dp;
308         struct sw_flow *flow;
309         struct dp_stats_percpu *stats;
310         u64 *stats_counter;
311         int error;
312
313         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
314
315         if (!OVS_CB(skb)->flow) {
316                 struct sw_flow_key key;
317                 int key_len;
318
319                 /* Extract flow from 'skb' into 'key'. */
320                 error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
321                 if (unlikely(error)) {
322                         kfree_skb(skb);
323                         return;
324                 }
325
326                 /* Look up flow. */
327                 flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table),
328                                            &key, key_len);
329                 if (unlikely(!flow)) {
330                         struct dp_upcall_info upcall;
331
332                         upcall.cmd = OVS_PACKET_CMD_MISS;
333                         upcall.key = &key;
334                         upcall.userdata = NULL;
335                         upcall.pid = p->upcall_pid;
336                         ovs_dp_upcall(dp, skb, &upcall);
337                         consume_skb(skb);
338                         stats_counter = &stats->n_missed;
339                         goto out;
340                 }
341
342                 OVS_CB(skb)->flow = flow;
343         }
344
345         stats_counter = &stats->n_hit;
346         ovs_flow_used(OVS_CB(skb)->flow, skb);
347         ovs_execute_actions(dp, skb);
348
349 out:
350         /* Update datapath statistics. */
351         u64_stats_update_begin(&stats->sync);
352         (*stats_counter)++;
353         u64_stats_update_end(&stats->sync);
354 }
355
356 static struct genl_family dp_packet_genl_family = {
357         .id = GENL_ID_GENERATE,
358         .hdrsize = sizeof(struct ovs_header),
359         .name = OVS_PACKET_FAMILY,
360         .version = OVS_PACKET_VERSION,
361         .maxattr = OVS_PACKET_ATTR_MAX,
362          SET_NETNSOK
363 };
364
365 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
366                   const struct dp_upcall_info *upcall_info)
367 {
368         struct dp_stats_percpu *stats;
369         int dp_ifindex;
370         int err;
371
372         if (upcall_info->pid == 0) {
373                 err = -ENOTCONN;
374                 goto err;
375         }
376
377         dp_ifindex = get_dpifindex(dp);
378         if (!dp_ifindex) {
379                 err = -ENODEV;
380                 goto err;
381         }
382
383         forward_ip_summed(skb, true);
384
385         if (!skb_is_gso(skb))
386                 err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
387         else
388                 err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
389         if (err)
390                 goto err;
391
392         return 0;
393
394 err:
395         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
396
397         u64_stats_update_begin(&stats->sync);
398         stats->n_lost++;
399         u64_stats_update_end(&stats->sync);
400
401         return err;
402 }
403
404 static int queue_gso_packets(struct net *net, int dp_ifindex,
405                              struct sk_buff *skb,
406                              const struct dp_upcall_info *upcall_info)
407 {
408         struct dp_upcall_info later_info;
409         struct sw_flow_key later_key;
410         struct sk_buff *segs, *nskb;
411         int err;
412
413         segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
414         if (IS_ERR(skb))
415                 return PTR_ERR(skb);
416
417         /* Queue all of the segments. */
418         skb = segs;
419         do {
420                 err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info);
421                 if (err)
422                         break;
423
424                 if (skb == segs && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
425                         /* The initial flow key extracted by ovs_flow_extract()
426                          * in this case is for a first fragment, so we need to
427                          * properly mark later fragments.
428                          */
429                         later_key = *upcall_info->key;
430                         later_key.ip.frag = OVS_FRAG_TYPE_LATER;
431
432                         later_info = *upcall_info;
433                         later_info.key = &later_key;
434                         upcall_info = &later_info;
435                 }
436         } while ((skb = skb->next));
437
438         /* Free all of the segments. */
439         skb = segs;
440         do {
441                 nskb = skb->next;
442                 if (err)
443                         kfree_skb(skb);
444                 else
445                         consume_skb(skb);
446         } while ((skb = nskb));
447         return err;
448 }
449
450 static int queue_userspace_packet(struct net *net, int dp_ifindex,
451                                   struct sk_buff *skb,
452                                   const struct dp_upcall_info *upcall_info)
453 {
454         struct ovs_header *upcall;
455         struct sk_buff *nskb = NULL;
456         struct sk_buff *user_skb; /* to be queued to userspace */
457         struct nlattr *nla;
458         unsigned int len;
459         int err;
460
461         if (vlan_tx_tag_present(skb)) {
462                 nskb = skb_clone(skb, GFP_ATOMIC);
463                 if (!nskb)
464                         return -ENOMEM;
465                 
466                 err = vlan_deaccel_tag(nskb);
467                 if (err)
468                         return err;
469
470                 skb = nskb;
471         }
472
473         if (nla_attr_size(skb->len) > USHRT_MAX) {
474                 err = -EFBIG;
475                 goto out;
476         }
477
478         len = sizeof(struct ovs_header);
479         len += nla_total_size(skb->len);
480         len += nla_total_size(FLOW_BUFSIZE);
481         if (upcall_info->cmd == OVS_PACKET_CMD_ACTION)
482                 len += nla_total_size(8);
483
484         user_skb = genlmsg_new(len, GFP_ATOMIC);
485         if (!user_skb) {
486                 err = -ENOMEM;
487                 goto out;
488         }
489
490         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
491                              0, upcall_info->cmd);
492         upcall->dp_ifindex = dp_ifindex;
493
494         nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
495         ovs_flow_to_nlattrs(upcall_info->key, user_skb);
496         nla_nest_end(user_skb, nla);
497
498         if (upcall_info->userdata)
499                 nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA,
500                             nla_get_u64(upcall_info->userdata));
501
502         nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
503
504         skb_copy_and_csum_dev(skb, nla_data(nla));
505
506         err = genlmsg_unicast(net, user_skb, upcall_info->pid);
507
508 out:
509         kfree_skb(nskb);
510         return err;
511 }
512
513 /* Called with genl_mutex. */
514 static int flush_flows(struct datapath *dp)
515 {
516         struct flow_table *old_table;
517         struct flow_table *new_table;
518
519         old_table = genl_dereference(dp->table);
520         new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
521         if (!new_table)
522                 return -ENOMEM;
523
524         rcu_assign_pointer(dp->table, new_table);
525
526         ovs_flow_tbl_deferred_destroy(old_table);
527         return 0;
528 }
529
530 static int validate_actions(const struct nlattr *attr,
531                                 const struct sw_flow_key *key, int depth);
532
533 static int validate_sample(const struct nlattr *attr,
534                                 const struct sw_flow_key *key, int depth)
535 {
536         const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
537         const struct nlattr *probability, *actions;
538         const struct nlattr *a;
539         int rem;
540
541         memset(attrs, 0, sizeof(attrs));
542         nla_for_each_nested(a, attr, rem) {
543                 int type = nla_type(a);
544                 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
545                         return -EINVAL;
546                 attrs[type] = a;
547         }
548         if (rem)
549                 return -EINVAL;
550
551         probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
552         if (!probability || nla_len(probability) != sizeof(u32))
553                 return -EINVAL;
554
555         actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
556         if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
557                 return -EINVAL;
558         return validate_actions(actions, key, depth + 1);
559 }
560
561 static int validate_set(const struct nlattr *a,
562                         const struct sw_flow_key *flow_key)
563 {
564         const struct nlattr *ovs_key = nla_data(a);
565         int key_type = nla_type(ovs_key);
566
567         /* There can be only one key in a action */
568         if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
569                 return -EINVAL;
570
571         if (key_type > OVS_KEY_ATTR_MAX ||
572             nla_len(ovs_key) != ovs_key_lens[key_type])
573                 return -EINVAL;
574
575         switch (key_type) {
576         const struct ovs_key_ipv4 *ipv4_key;
577
578         case OVS_KEY_ATTR_PRIORITY:
579         case OVS_KEY_ATTR_TUN_ID:
580         case OVS_KEY_ATTR_ETHERNET:
581                 break;
582
583         case OVS_KEY_ATTR_IPV4:
584                 if (flow_key->eth.type != htons(ETH_P_IP))
585                         return -EINVAL;
586
587                 if (!flow_key->ipv4.addr.src || !flow_key->ipv4.addr.dst)
588                         return -EINVAL;
589
590                 ipv4_key = nla_data(ovs_key);
591                 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
592                         return -EINVAL;
593
594                 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
595                         return -EINVAL;
596
597                 break;
598
599         case OVS_KEY_ATTR_TCP:
600                 if (flow_key->ip.proto != IPPROTO_TCP)
601                         return -EINVAL;
602
603                 if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
604                         return -EINVAL;
605
606                 break;
607
608         case OVS_KEY_ATTR_UDP:
609                 if (flow_key->ip.proto != IPPROTO_UDP)
610                         return -EINVAL;
611
612                 if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
613                         return -EINVAL;
614                 break;
615
616         default:
617                 return -EINVAL;
618         }
619
620         return 0;
621 }
622
623 static int validate_userspace(const struct nlattr *attr)
624 {
625         static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] =   {
626                 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
627                 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 },
628         };
629         struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
630         int error;
631
632         error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
633                                  attr, userspace_policy);
634         if (error)
635                 return error;
636
637         if (!a[OVS_USERSPACE_ATTR_PID] ||
638             !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
639                 return -EINVAL;
640
641         return 0;
642 }
643
644 static int validate_actions(const struct nlattr *attr,
645                                 const struct sw_flow_key *key,  int depth)
646 {
647         const struct nlattr *a;
648         int rem, err;
649
650         if (depth >= SAMPLE_ACTION_DEPTH)
651                 return -EOVERFLOW;
652
653         nla_for_each_nested(a, attr, rem) {
654                 /* Expected argument lengths, (u32)-1 for variable length. */
655                 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
656                         [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
657                         [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
658                         [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
659                         [OVS_ACTION_ATTR_POP_VLAN] = 0,
660                         [OVS_ACTION_ATTR_SET] = (u32)-1,
661                         [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
662                 };
663                 const struct ovs_action_push_vlan *vlan;
664                 int type = nla_type(a);
665
666                 if (type > OVS_ACTION_ATTR_MAX ||
667                     (action_lens[type] != nla_len(a) &&
668                      action_lens[type] != (u32)-1))
669                         return -EINVAL;
670
671                 switch (type) {
672                 case OVS_ACTION_ATTR_UNSPEC:
673                         return -EINVAL;
674
675                 case OVS_ACTION_ATTR_USERSPACE:
676                         err = validate_userspace(a);
677                         if (err)
678                                 return err;
679                         break;
680
681                 case OVS_ACTION_ATTR_OUTPUT:
682                         if (nla_get_u32(a) >= DP_MAX_PORTS)
683                                 return -EINVAL;
684                         break;
685
686
687                 case OVS_ACTION_ATTR_POP_VLAN:
688                         break;
689
690                 case OVS_ACTION_ATTR_PUSH_VLAN:
691                         vlan = nla_data(a);
692                         if (vlan->vlan_tpid != htons(ETH_P_8021Q))
693                                 return -EINVAL;
694                         if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
695                                 return -EINVAL;
696                         break;
697
698                 case OVS_ACTION_ATTR_SET:
699                         err = validate_set(a, key);
700                         if (err)
701                                 return err;
702                         break;
703
704                 case OVS_ACTION_ATTR_SAMPLE:
705                         err = validate_sample(a, key, depth);
706                         if (err)
707                                 return err;
708                         break;
709
710                 default:
711                         return -EINVAL;
712                 }
713         }
714
715         if (rem > 0)
716                 return -EINVAL;
717
718         return 0;
719 }
720
721 static void clear_stats(struct sw_flow *flow)
722 {
723         flow->used = 0;
724         flow->tcp_flags = 0;
725         flow->packet_count = 0;
726         flow->byte_count = 0;
727 }
728
729 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
730 {
731         struct ovs_header *ovs_header = info->userhdr;
732         struct nlattr **a = info->attrs;
733         struct sw_flow_actions *acts;
734         struct sk_buff *packet;
735         struct sw_flow *flow;
736         struct datapath *dp;
737         struct ethhdr *eth;
738         int len;
739         int err;
740         int key_len;
741
742         err = -EINVAL;
743         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
744             !a[OVS_PACKET_ATTR_ACTIONS] ||
745             nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN)
746                 goto err;
747
748         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
749         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
750         err = -ENOMEM;
751         if (!packet)
752                 goto err;
753         skb_reserve(packet, NET_IP_ALIGN);
754
755         memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len);
756
757         skb_reset_mac_header(packet);
758         eth = eth_hdr(packet);
759
760         /* Normally, setting the skb 'protocol' field would be handled by a
761          * call to eth_type_trans(), but it assumes there's a sending
762          * device, which we may not have. */
763         if (ntohs(eth->h_proto) >= 1536)
764                 packet->protocol = eth->h_proto;
765         else
766                 packet->protocol = htons(ETH_P_802_2);
767
768         /* Build an sw_flow for sending this packet. */
769         flow = ovs_flow_alloc();
770         err = PTR_ERR(flow);
771         if (IS_ERR(flow))
772                 goto err_kfree_skb;
773
774         err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
775         if (err)
776                 goto err_flow_put;
777
778         err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority,
779                                              &flow->key.phy.in_port,
780                                              &flow->key.phy.tun_id,
781                                              a[OVS_PACKET_ATTR_KEY]);
782         if (err)
783                 goto err_flow_put;
784
785         err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0);
786         if (err)
787                 goto err_flow_put;
788
789         flow->hash = ovs_flow_hash(&flow->key, key_len);
790
791         acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
792         err = PTR_ERR(acts);
793         if (IS_ERR(acts))
794                 goto err_flow_put;
795         rcu_assign_pointer(flow->sf_acts, acts);
796
797         OVS_CB(packet)->flow = flow;
798         packet->priority = flow->key.phy.priority;
799
800         rcu_read_lock();
801         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
802         err = -ENODEV;
803         if (!dp)
804                 goto err_unlock;
805
806         local_bh_disable();
807         err = ovs_execute_actions(dp, packet);
808         local_bh_enable();
809         rcu_read_unlock();
810
811         ovs_flow_put(flow);
812         return err;
813
814 err_unlock:
815         rcu_read_unlock();
816 err_flow_put:
817         ovs_flow_put(flow);
818 err_kfree_skb:
819         kfree_skb(packet);
820 err:
821         return err;
822 }
823
824 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
825         [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
826         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
827         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
828 };
829
830 static struct genl_ops dp_packet_genl_ops[] = {
831         { .cmd = OVS_PACKET_CMD_EXECUTE,
832           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
833           .policy = packet_policy,
834           .doit = ovs_packet_cmd_execute
835         }
836 };
837
838 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
839 {
840         int i;
841         struct flow_table *table = genl_dereference(dp->table);
842
843         stats->n_flows = ovs_flow_tbl_count(table);
844
845         stats->n_hit = stats->n_missed = stats->n_lost = 0;
846         for_each_possible_cpu(i) {
847                 const struct dp_stats_percpu *percpu_stats;
848                 struct dp_stats_percpu local_stats;
849                 unsigned int start;
850
851                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
852
853                 do {
854                         start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
855                         local_stats = *percpu_stats;
856                 } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
857
858                 stats->n_hit += local_stats.n_hit;
859                 stats->n_missed += local_stats.n_missed;
860                 stats->n_lost += local_stats.n_lost;
861         }
862 }
863
864 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
865         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
866         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
867         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
868 };
869
870 static struct genl_family dp_flow_genl_family = {
871         .id = GENL_ID_GENERATE,
872         .hdrsize = sizeof(struct ovs_header),
873         .name = OVS_FLOW_FAMILY,
874         .version = OVS_FLOW_VERSION,
875         .maxattr = OVS_FLOW_ATTR_MAX,
876          SET_NETNSOK
877 };
878
879 static struct genl_multicast_group ovs_dp_flow_multicast_group = {
880         .name = OVS_FLOW_MCGROUP
881 };
882
883 /* Called with genl_lock. */
884 static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
885                                   struct sk_buff *skb, u32 pid,
886                                   u32 seq, u32 flags, u8 cmd)
887 {
888         const int skb_orig_len = skb->len;
889         const struct sw_flow_actions *sf_acts;
890         struct ovs_flow_stats stats;
891         struct ovs_header *ovs_header;
892         struct nlattr *nla;
893         unsigned long used;
894         u8 tcp_flags;
895         int err;
896
897         sf_acts = rcu_dereference_protected(flow->sf_acts,
898                                             lockdep_genl_is_held());
899
900         ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
901         if (!ovs_header)
902                 return -EMSGSIZE;
903
904         ovs_header->dp_ifindex = get_dpifindex(dp);
905
906         nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
907         if (!nla)
908                 goto nla_put_failure;
909         err = ovs_flow_to_nlattrs(&flow->key, skb);
910         if (err)
911                 goto error;
912         nla_nest_end(skb, nla);
913
914         spin_lock_bh(&flow->lock);
915         used = flow->used;
916         stats.n_packets = flow->packet_count;
917         stats.n_bytes = flow->byte_count;
918         tcp_flags = flow->tcp_flags;
919         spin_unlock_bh(&flow->lock);
920
921         if (used)
922                 NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used));
923
924         if (stats.n_packets)
925                 NLA_PUT(skb, OVS_FLOW_ATTR_STATS,
926                         sizeof(struct ovs_flow_stats), &stats);
927
928         if (tcp_flags)
929                 NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags);
930
931         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
932          * this is the first flow to be dumped into 'skb'.  This is unusual for
933          * Netlink but individual action lists can be longer than
934          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
935          * The userspace caller can always fetch the actions separately if it
936          * really wants them.  (Most userspace callers in fact don't care.)
937          *
938          * This can only fail for dump operations because the skb is always
939          * properly sized for single flows.
940          */
941         err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
942                       sf_acts->actions);
943         if (err < 0 && skb_orig_len)
944                 goto error;
945
946         return genlmsg_end(skb, ovs_header);
947
948 nla_put_failure:
949         err = -EMSGSIZE;
950 error:
951         genlmsg_cancel(skb, ovs_header);
952         return err;
953 }
954
955 static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
956 {
957         const struct sw_flow_actions *sf_acts;
958         int len;
959
960         sf_acts = rcu_dereference_protected(flow->sf_acts,
961                                             lockdep_genl_is_held());
962
963         /* OVS_FLOW_ATTR_KEY */
964         len = nla_total_size(FLOW_BUFSIZE);
965         /* OVS_FLOW_ATTR_ACTIONS */
966         len += nla_total_size(sf_acts->actions_len);
967         /* OVS_FLOW_ATTR_STATS */
968         len += nla_total_size(sizeof(struct ovs_flow_stats));
969         /* OVS_FLOW_ATTR_TCP_FLAGS */
970         len += nla_total_size(1);
971         /* OVS_FLOW_ATTR_USED */
972         len += nla_total_size(8);
973
974         len += NLMSG_ALIGN(sizeof(struct ovs_header));
975
976         return genlmsg_new(len, GFP_KERNEL);
977 }
978
979 static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
980                                                struct datapath *dp,
981                                                u32 pid, u32 seq, u8 cmd)
982 {
983         struct sk_buff *skb;
984         int retval;
985
986         skb = ovs_flow_cmd_alloc_info(flow);
987         if (!skb)
988                 return ERR_PTR(-ENOMEM);
989
990         retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
991         BUG_ON(retval < 0);
992         return skb;
993 }
994
995 static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
996 {
997         struct nlattr **a = info->attrs;
998         struct ovs_header *ovs_header = info->userhdr;
999         struct sw_flow_key key;
1000         struct sw_flow *flow;
1001         struct sk_buff *reply;
1002         struct datapath *dp;
1003         struct flow_table *table;
1004         int error;
1005         int key_len;
1006
1007         /* Extract key. */
1008         error = -EINVAL;
1009         if (!a[OVS_FLOW_ATTR_KEY])
1010                 goto error;
1011         error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1012         if (error)
1013                 goto error;
1014
1015         /* Validate actions. */
1016         if (a[OVS_FLOW_ATTR_ACTIONS]) {
1017                 error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS], &key,  0);
1018                 if (error)
1019                         goto error;
1020         } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
1021                 error = -EINVAL;
1022                 goto error;
1023         }
1024
1025         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1026         error = -ENODEV;
1027         if (!dp)
1028                 goto error;
1029
1030         table = genl_dereference(dp->table);
1031         flow = ovs_flow_tbl_lookup(table, &key, key_len);
1032         if (!flow) {
1033                 struct sw_flow_actions *acts;
1034
1035                 /* Bail out if we're not allowed to create a new flow. */
1036                 error = -ENOENT;
1037                 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
1038                         goto error;
1039
1040                 /* Expand table, if necessary, to make room. */
1041                 if (ovs_flow_tbl_need_to_expand(table)) {
1042                         struct flow_table *new_table;
1043
1044                         new_table = ovs_flow_tbl_expand(table);
1045                         if (!IS_ERR(new_table)) {
1046                                 rcu_assign_pointer(dp->table, new_table);
1047                                 ovs_flow_tbl_deferred_destroy(table);
1048                                 table = genl_dereference(dp->table);
1049                         }
1050                 }
1051
1052                 /* Allocate flow. */
1053                 flow = ovs_flow_alloc();
1054                 if (IS_ERR(flow)) {
1055                         error = PTR_ERR(flow);
1056                         goto error;
1057                 }
1058                 flow->key = key;
1059                 clear_stats(flow);
1060
1061                 /* Obtain actions. */
1062                 acts = ovs_flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
1063                 error = PTR_ERR(acts);
1064                 if (IS_ERR(acts))
1065                         goto error_free_flow;
1066                 rcu_assign_pointer(flow->sf_acts, acts);
1067
1068                 /* Put flow in bucket. */
1069                 flow->hash = ovs_flow_hash(&key, key_len);
1070                 ovs_flow_tbl_insert(table, flow);
1071
1072                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
1073                                                 info->snd_seq,
1074                                                 OVS_FLOW_CMD_NEW);
1075         } else {
1076                 /* We found a matching flow. */
1077                 struct sw_flow_actions *old_acts;
1078                 struct nlattr *acts_attrs;
1079
1080                 /* Bail out if we're not allowed to modify an existing flow.
1081                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1082                  * because Generic Netlink treats the latter as a dump
1083                  * request.  We also accept NLM_F_EXCL in case that bug ever
1084                  * gets fixed.
1085                  */
1086                 error = -EEXIST;
1087                 if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
1088                     info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
1089                         goto error;
1090
1091                 /* Update actions. */
1092                 old_acts = rcu_dereference_protected(flow->sf_acts,
1093                                                      lockdep_genl_is_held());
1094                 acts_attrs = a[OVS_FLOW_ATTR_ACTIONS];
1095                 if (acts_attrs &&
1096                    (old_acts->actions_len != nla_len(acts_attrs) ||
1097                    memcmp(old_acts->actions, nla_data(acts_attrs),
1098                           old_acts->actions_len))) {
1099                         struct sw_flow_actions *new_acts;
1100
1101                         new_acts = ovs_flow_actions_alloc(acts_attrs);
1102                         error = PTR_ERR(new_acts);
1103                         if (IS_ERR(new_acts))
1104                                 goto error;
1105
1106                         rcu_assign_pointer(flow->sf_acts, new_acts);
1107                         ovs_flow_deferred_free_acts(old_acts);
1108                 }
1109
1110                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
1111                                                info->snd_seq, OVS_FLOW_CMD_NEW);
1112
1113                 /* Clear stats. */
1114                 if (a[OVS_FLOW_ATTR_CLEAR]) {
1115                         spin_lock_bh(&flow->lock);
1116                         clear_stats(flow);
1117                         spin_unlock_bh(&flow->lock);
1118                 }
1119         }
1120
1121         if (!IS_ERR(reply))
1122                 genl_notify(reply, genl_info_net(info), info->snd_pid,
1123                            ovs_dp_flow_multicast_group.id, info->nlhdr,
1124                            GFP_KERNEL);
1125         else
1126                 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
1127                                 ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
1128         return 0;
1129
1130 error_free_flow:
1131         ovs_flow_put(flow);
1132 error:
1133         return error;
1134 }
1135
1136 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1137 {
1138         struct nlattr **a = info->attrs;
1139         struct ovs_header *ovs_header = info->userhdr;
1140         struct sw_flow_key key;
1141         struct sk_buff *reply;
1142         struct sw_flow *flow;
1143         struct datapath *dp;
1144         struct flow_table *table;
1145         int err;
1146         int key_len;
1147
1148         if (!a[OVS_FLOW_ATTR_KEY])
1149                 return -EINVAL;
1150         err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1151         if (err)
1152                 return err;
1153
1154         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1155         if (!dp)
1156                 return -ENODEV;
1157
1158         table = genl_dereference(dp->table);
1159         flow = ovs_flow_tbl_lookup(table, &key, key_len);
1160         if (!flow)
1161                 return -ENOENT;
1162
1163         reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
1164                                         info->snd_seq, OVS_FLOW_CMD_NEW);
1165         if (IS_ERR(reply))
1166                 return PTR_ERR(reply);
1167
1168         return genlmsg_reply(reply, info);
1169 }
1170
1171 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1172 {
1173         struct nlattr **a = info->attrs;
1174         struct ovs_header *ovs_header = info->userhdr;
1175         struct sw_flow_key key;
1176         struct sk_buff *reply;
1177         struct sw_flow *flow;
1178         struct datapath *dp;
1179         struct flow_table *table;
1180         int err;
1181         int key_len;
1182
1183         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1184         if (!dp)
1185                 return -ENODEV;
1186
1187         if (!a[OVS_FLOW_ATTR_KEY])
1188                 return flush_flows(dp);
1189
1190         err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1191         if (err)
1192                 return err;
1193
1194         table = genl_dereference(dp->table);
1195         flow = ovs_flow_tbl_lookup(table, &key, key_len);
1196         if (!flow)
1197                 return -ENOENT;
1198
1199         reply = ovs_flow_cmd_alloc_info(flow);
1200         if (!reply)
1201                 return -ENOMEM;
1202
1203         ovs_flow_tbl_remove(table, flow);
1204
1205         err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
1206                                      info->snd_seq, 0, OVS_FLOW_CMD_DEL);
1207         BUG_ON(err < 0);
1208
1209         ovs_flow_deferred_free(flow);
1210
1211         genl_notify(reply, genl_info_net(info), info->snd_pid,
1212                     ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1213         return 0;
1214 }
1215
1216 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1217 {
1218         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1219         struct datapath *dp;
1220         struct flow_table *table;
1221
1222         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1223         if (!dp)
1224                 return -ENODEV;
1225
1226         table = genl_dereference(dp->table);
1227
1228         for (;;) {
1229                 struct sw_flow *flow;
1230                 u32 bucket, obj;
1231
1232                 bucket = cb->args[0];
1233                 obj = cb->args[1];
1234                 flow = ovs_flow_tbl_next(table, &bucket, &obj);
1235                 if (!flow)
1236                         break;
1237
1238                 if (ovs_flow_cmd_fill_info(flow, dp, skb,
1239                                            NETLINK_CB(cb->skb).pid,
1240                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1241                                            OVS_FLOW_CMD_NEW) < 0)
1242                         break;
1243
1244                 cb->args[0] = bucket;
1245                 cb->args[1] = obj;
1246         }
1247         return skb->len;
1248 }
1249
1250 static struct genl_ops dp_flow_genl_ops[] = {
1251         { .cmd = OVS_FLOW_CMD_NEW,
1252           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1253           .policy = flow_policy,
1254           .doit = ovs_flow_cmd_new_or_set
1255         },
1256         { .cmd = OVS_FLOW_CMD_DEL,
1257           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1258           .policy = flow_policy,
1259           .doit = ovs_flow_cmd_del
1260         },
1261         { .cmd = OVS_FLOW_CMD_GET,
1262           .flags = 0,               /* OK for unprivileged users. */
1263           .policy = flow_policy,
1264           .doit = ovs_flow_cmd_get,
1265           .dumpit = ovs_flow_cmd_dump
1266         },
1267         { .cmd = OVS_FLOW_CMD_SET,
1268           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1269           .policy = flow_policy,
1270           .doit = ovs_flow_cmd_new_or_set,
1271         },
1272 };
1273
1274 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1275 #ifdef HAVE_NLA_NUL_STRING
1276         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1277 #endif
1278         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1279 };
1280
1281 static struct genl_family dp_datapath_genl_family = {
1282         .id = GENL_ID_GENERATE,
1283         .hdrsize = sizeof(struct ovs_header),
1284         .name = OVS_DATAPATH_FAMILY,
1285         .version = OVS_DATAPATH_VERSION,
1286         .maxattr = OVS_DP_ATTR_MAX,
1287          SET_NETNSOK
1288 };
1289
1290 static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
1291         .name = OVS_DATAPATH_MCGROUP
1292 };
1293
1294 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1295                                 u32 pid, u32 seq, u32 flags, u8 cmd)
1296 {
1297         struct ovs_header *ovs_header;
1298         struct ovs_dp_stats dp_stats;
1299         int err;
1300
1301         ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
1302                                    flags, cmd);
1303         if (!ovs_header)
1304                 goto error;
1305
1306         ovs_header->dp_ifindex = get_dpifindex(dp);
1307
1308         rcu_read_lock();
1309         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1310         rcu_read_unlock();
1311         if (err)
1312                 goto nla_put_failure;
1313
1314         get_dp_stats(dp, &dp_stats);
1315         NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats);
1316
1317         return genlmsg_end(skb, ovs_header);
1318
1319 nla_put_failure:
1320         genlmsg_cancel(skb, ovs_header);
1321 error:
1322         return -EMSGSIZE;
1323 }
1324
1325 static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
1326                                              u32 seq, u8 cmd)
1327 {
1328         struct sk_buff *skb;
1329         int retval;
1330
1331         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1332         if (!skb)
1333                 return ERR_PTR(-ENOMEM);
1334
1335         retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
1336         if (retval < 0) {
1337                 kfree_skb(skb);
1338                 return ERR_PTR(retval);
1339         }
1340         return skb;
1341 }
1342
1343 static int ovs_dp_cmd_validate(struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1344 {
1345         return CHECK_NUL_STRING(a[OVS_DP_ATTR_NAME], IFNAMSIZ - 1);
1346 }
1347
1348 /* Called with genl_mutex and optionally with RTNL lock also. */
1349 static struct datapath *lookup_datapath(struct net *net,
1350                                         struct ovs_header *ovs_header,
1351                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1352 {
1353         struct datapath *dp;
1354
1355         if (!a[OVS_DP_ATTR_NAME])
1356                 dp = get_dp(net, ovs_header->dp_ifindex);
1357         else {
1358                 struct vport *vport;
1359
1360                 rcu_read_lock();
1361                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1362                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1363                 rcu_read_unlock();
1364         }
1365         return dp ? dp : ERR_PTR(-ENODEV);
1366 }
1367
1368 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1369 {
1370         struct nlattr **a = info->attrs;
1371         struct vport_parms parms;
1372         struct sk_buff *reply;
1373         struct datapath *dp;
1374         struct vport *vport;
1375         struct ovs_net *ovs_net;
1376         int err, i;
1377
1378         err = -EINVAL;
1379         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1380                 goto err;
1381
1382         err = ovs_dp_cmd_validate(a);
1383         if (err)
1384                 goto err;
1385
1386         rtnl_lock();
1387
1388         err = -ENOMEM;
1389         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1390         if (dp == NULL)
1391                 goto err_unlock_rtnl;
1392
1393         /* Initialize kobject for bridge.  This will be added as
1394          * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
1395         dp->ifobj.kset = NULL;
1396         kobject_init(&dp->ifobj, &dp_ktype);
1397
1398         /* Allocate table. */
1399         err = -ENOMEM;
1400         rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
1401         if (!dp->table)
1402                 goto err_free_dp;
1403
1404         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1405         if (!dp->stats_percpu) {
1406                 err = -ENOMEM;
1407                 goto err_destroy_table;
1408         }
1409         ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1410
1411         dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1412                             GFP_KERNEL);
1413         if (!dp->ports) {
1414                 err = -ENOMEM;
1415                 goto err_destroy_percpu;
1416         }
1417
1418         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1419                 INIT_HLIST_HEAD(&dp->ports[i]);
1420
1421         /* Set up our datapath device. */
1422         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1423         parms.type = OVS_VPORT_TYPE_INTERNAL;
1424         parms.options = NULL;
1425         parms.dp = dp;
1426         parms.port_no = OVSP_LOCAL;
1427         parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
1428
1429         vport = new_vport(&parms);
1430         if (IS_ERR(vport)) {
1431                 err = PTR_ERR(vport);
1432                 if (err == -EBUSY)
1433                         err = -EEXIST;
1434
1435                 goto err_destroy_ports_array;
1436         }
1437
1438         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1439                                       info->snd_seq, OVS_DP_CMD_NEW);
1440         err = PTR_ERR(reply);
1441         if (IS_ERR(reply))
1442                 goto err_destroy_local_port;
1443
1444         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1445         list_add_tail(&dp->list_node, &ovs_net->dps);
1446         ovs_dp_sysfs_add_dp(dp);
1447
1448         rtnl_unlock();
1449
1450         genl_notify(reply, genl_info_net(info), info->snd_pid,
1451                     ovs_dp_datapath_multicast_group.id, info->nlhdr,
1452                     GFP_KERNEL);
1453         return 0;
1454
1455 err_destroy_local_port:
1456         ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
1457 err_destroy_ports_array:
1458         kfree(dp->ports);
1459 err_destroy_percpu:
1460         free_percpu(dp->stats_percpu);
1461 err_destroy_table:
1462         ovs_flow_tbl_destroy(genl_dereference(dp->table));
1463 err_free_dp:
1464         kfree(dp);
1465 err_unlock_rtnl:
1466         rtnl_unlock();
1467 err:
1468         return err;
1469 }
1470
1471 /* Called with genl_mutex. */
1472 static void __dp_destroy(struct datapath *dp)
1473 {
1474         int i;
1475
1476         rtnl_lock();
1477
1478         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1479                 struct vport *vport;
1480                 struct hlist_node *node, *n;
1481
1482                 hlist_for_each_entry_safe(vport, node, n, &dp->ports[i], dp_hash_node)
1483                         if (vport->port_no != OVSP_LOCAL)
1484                                 ovs_dp_detach_port(vport);
1485         }
1486
1487         ovs_dp_sysfs_del_dp(dp);
1488         list_del(&dp->list_node);
1489         ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
1490
1491         /* rtnl_unlock() will wait until all the references to devices that
1492          * are pending unregistration have been dropped.  We do it here to
1493          * ensure that any internal devices (which contain DP pointers) are
1494          * fully destroyed before freeing the datapath.
1495          */
1496         rtnl_unlock();
1497
1498         call_rcu(&dp->rcu, destroy_dp_rcu);
1499 }
1500
1501 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1502 {
1503         struct sk_buff *reply;
1504         struct datapath *dp;
1505         int err;
1506
1507         err = ovs_dp_cmd_validate(info->attrs);
1508         if (err)
1509                 return err;
1510
1511         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1512         err = PTR_ERR(dp);
1513         if (IS_ERR(dp))
1514                 return err;
1515
1516         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1517                                       info->snd_seq, OVS_DP_CMD_DEL);
1518         err = PTR_ERR(reply);
1519         if (IS_ERR(reply))
1520                 return err;
1521
1522         __dp_destroy(dp);
1523
1524         genl_notify(reply, genl_info_net(info), info->snd_pid,
1525                     ovs_dp_datapath_multicast_group.id, info->nlhdr,
1526                     GFP_KERNEL);
1527
1528         return 0;
1529 }
1530
1531 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1532 {
1533         struct sk_buff *reply;
1534         struct datapath *dp;
1535         int err;
1536
1537         err = ovs_dp_cmd_validate(info->attrs);
1538         if (err)
1539                 return err;
1540
1541         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1542         if (IS_ERR(dp))
1543                 return PTR_ERR(dp);
1544
1545         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1546                                       info->snd_seq, OVS_DP_CMD_NEW);
1547         if (IS_ERR(reply)) {
1548                 err = PTR_ERR(reply);
1549                 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
1550                                 ovs_dp_datapath_multicast_group.id, err);
1551                 return 0;
1552         }
1553
1554         genl_notify(reply, genl_info_net(info), info->snd_pid,
1555                     ovs_dp_datapath_multicast_group.id, info->nlhdr,
1556                     GFP_KERNEL);
1557
1558         return 0;
1559 }
1560
1561 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1562 {
1563         struct sk_buff *reply;
1564         struct datapath *dp;
1565         int err;
1566
1567         err = ovs_dp_cmd_validate(info->attrs);
1568         if (err)
1569                 return err;
1570
1571         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1572         if (IS_ERR(dp))
1573                 return PTR_ERR(dp);
1574
1575         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1576                                       info->snd_seq, OVS_DP_CMD_NEW);
1577         if (IS_ERR(reply))
1578                 return PTR_ERR(reply);
1579
1580         return genlmsg_reply(reply, info);
1581 }
1582
1583 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1584 {
1585         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1586         struct datapath *dp;
1587         int skip = cb->args[0];
1588         int i = 0;
1589
1590         list_for_each_entry(dp, &ovs_net->dps, list_node) {
1591                 if (i >= skip &&
1592                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
1593                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1594                                          OVS_DP_CMD_NEW) < 0)
1595                         break;
1596                 i++;
1597         }
1598
1599         cb->args[0] = i;
1600
1601         return skb->len;
1602 }
1603
1604 static struct genl_ops dp_datapath_genl_ops[] = {
1605         { .cmd = OVS_DP_CMD_NEW,
1606           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1607           .policy = datapath_policy,
1608           .doit = ovs_dp_cmd_new
1609         },
1610         { .cmd = OVS_DP_CMD_DEL,
1611           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1612           .policy = datapath_policy,
1613           .doit = ovs_dp_cmd_del
1614         },
1615         { .cmd = OVS_DP_CMD_GET,
1616           .flags = 0,               /* OK for unprivileged users. */
1617           .policy = datapath_policy,
1618           .doit = ovs_dp_cmd_get,
1619           .dumpit = ovs_dp_cmd_dump
1620         },
1621         { .cmd = OVS_DP_CMD_SET,
1622           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1623           .policy = datapath_policy,
1624           .doit = ovs_dp_cmd_set,
1625         },
1626 };
1627
1628 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1629 #ifdef HAVE_NLA_NUL_STRING
1630         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1631         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1632         [OVS_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
1633 #else
1634         [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct ovs_vport_stats) },
1635         [OVS_VPORT_ATTR_ADDRESS] = { .minlen = ETH_ALEN },
1636 #endif
1637         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1638         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1639         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1640         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1641 };
1642
1643 static struct genl_family dp_vport_genl_family = {
1644         .id = GENL_ID_GENERATE,
1645         .hdrsize = sizeof(struct ovs_header),
1646         .name = OVS_VPORT_FAMILY,
1647         .version = OVS_VPORT_VERSION,
1648         .maxattr = OVS_VPORT_ATTR_MAX,
1649          SET_NETNSOK
1650 };
1651
1652 struct genl_multicast_group ovs_dp_vport_multicast_group = {
1653         .name = OVS_VPORT_MCGROUP
1654 };
1655
1656 /* Called with RTNL lock or RCU read lock. */
1657 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1658                                    u32 pid, u32 seq, u32 flags, u8 cmd)
1659 {
1660         struct ovs_header *ovs_header;
1661         struct ovs_vport_stats vport_stats;
1662         int err;
1663
1664         ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
1665                                  flags, cmd);
1666         if (!ovs_header)
1667                 return -EMSGSIZE;
1668
1669         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1670
1671         NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
1672         NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type);
1673         NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport));
1674         NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
1675
1676         ovs_vport_get_stats(vport, &vport_stats);
1677         NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1678                 &vport_stats);
1679
1680         NLA_PUT(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN,
1681                 vport->ops->get_addr(vport));
1682
1683         err = ovs_vport_get_options(vport, skb);
1684         if (err == -EMSGSIZE)
1685                 goto error;
1686
1687         return genlmsg_end(skb, ovs_header);
1688
1689 nla_put_failure:
1690         err = -EMSGSIZE;
1691 error:
1692         genlmsg_cancel(skb, ovs_header);
1693         return err;
1694 }
1695
1696 /* Called with RTNL lock or RCU read lock. */
1697 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
1698                                          u32 seq, u8 cmd)
1699 {
1700         struct sk_buff *skb;
1701         int retval;
1702
1703         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1704         if (!skb)
1705                 return ERR_PTR(-ENOMEM);
1706
1707         retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
1708         if (retval < 0) {
1709                 kfree_skb(skb);
1710                 return ERR_PTR(retval);
1711         }
1712         return skb;
1713 }
1714
1715 static int ovs_vport_cmd_validate(struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1716 {
1717         return CHECK_NUL_STRING(a[OVS_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1718 }
1719
1720 /* Called with RTNL lock or RCU read lock. */
1721 static struct vport *lookup_vport(struct net *net,
1722                                   struct ovs_header *ovs_header,
1723                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1724 {
1725         struct datapath *dp;
1726         struct vport *vport;
1727
1728         if (a[OVS_VPORT_ATTR_NAME]) {
1729                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1730                 if (!vport)
1731                         return ERR_PTR(-ENODEV);
1732                 if (ovs_header->dp_ifindex &&
1733                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1734                         return ERR_PTR(-ENODEV);
1735                 return vport;
1736         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1737                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1738
1739                 if (port_no >= DP_MAX_PORTS)
1740                         return ERR_PTR(-EFBIG);
1741
1742                 dp = get_dp(net, ovs_header->dp_ifindex);
1743                 if (!dp)
1744                         return ERR_PTR(-ENODEV);
1745
1746                 vport = ovs_vport_rtnl_rcu(dp, port_no);
1747                 if (!vport)
1748                         return ERR_PTR(-ENOENT);
1749                 return vport;
1750         } else
1751                 return ERR_PTR(-EINVAL);
1752 }
1753
1754 /* Called with RTNL lock. */
1755 static int change_vport(struct vport *vport,
1756                         struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1757 {
1758         int err = 0;
1759
1760         if (a[OVS_VPORT_ATTR_STATS])
1761                 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1762
1763         if (a[OVS_VPORT_ATTR_ADDRESS])
1764                 err = ovs_vport_set_addr(vport, nla_data(a[OVS_VPORT_ATTR_ADDRESS]));
1765
1766         return err;
1767 }
1768
1769 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1770 {
1771         struct nlattr **a = info->attrs;
1772         struct ovs_header *ovs_header = info->userhdr;
1773         struct vport_parms parms;
1774         struct sk_buff *reply;
1775         struct vport *vport;
1776         struct datapath *dp;
1777         u32 port_no;
1778         int err;
1779
1780         err = -EINVAL;
1781         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1782             !a[OVS_VPORT_ATTR_UPCALL_PID])
1783                 goto exit;
1784
1785         err = ovs_vport_cmd_validate(a);
1786         if (err)
1787                 goto exit;
1788
1789         rtnl_lock();
1790         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1791         err = -ENODEV;
1792         if (!dp)
1793                 goto exit_unlock;
1794
1795         if (a[OVS_VPORT_ATTR_PORT_NO]) {
1796                 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1797
1798                 err = -EFBIG;
1799                 if (port_no >= DP_MAX_PORTS)
1800                         goto exit_unlock;
1801
1802                 vport = ovs_vport_rtnl(dp, port_no);
1803                 err = -EBUSY;
1804                 if (vport)
1805                         goto exit_unlock;
1806         } else {
1807                 for (port_no = 1; ; port_no++) {
1808                         if (port_no >= DP_MAX_PORTS) {
1809                                 err = -EFBIG;
1810                                 goto exit_unlock;
1811                         }
1812                         vport = ovs_vport_rtnl(dp, port_no);
1813                         if (!vport)
1814                                 break;
1815                 }
1816         }
1817
1818         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1819         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1820         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1821         parms.dp = dp;
1822         parms.port_no = port_no;
1823         parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1824
1825         vport = new_vport(&parms);
1826         err = PTR_ERR(vport);
1827         if (IS_ERR(vport))
1828                 goto exit_unlock;
1829
1830         ovs_dp_sysfs_add_if(vport);
1831
1832         err = change_vport(vport, a);
1833         if (!err) {
1834                 reply = ovs_vport_cmd_build_info(vport, info->snd_pid,
1835                                                  info->snd_seq,
1836                                                  OVS_VPORT_CMD_NEW);
1837                 if (IS_ERR(reply))
1838                         err = PTR_ERR(reply);
1839         }
1840         if (err) {
1841                 ovs_dp_detach_port(vport);
1842                 goto exit_unlock;
1843         }
1844         genl_notify(reply, genl_info_net(info), info->snd_pid,
1845                     ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1846
1847 exit_unlock:
1848         rtnl_unlock();
1849 exit:
1850         return err;
1851 }
1852
1853 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1854 {
1855         struct nlattr **a = info->attrs;
1856         struct sk_buff *reply;
1857         struct vport *vport;
1858         int err;
1859
1860         err = ovs_vport_cmd_validate(a);
1861         if (err)
1862                 goto exit;
1863
1864         rtnl_lock();
1865         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1866         err = PTR_ERR(vport);
1867         if (IS_ERR(vport))
1868                 goto exit_unlock;
1869
1870         err = 0;
1871         if (a[OVS_VPORT_ATTR_TYPE] &&
1872             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
1873                 err = -EINVAL;
1874
1875         if (!err && a[OVS_VPORT_ATTR_OPTIONS])
1876                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1877         if (!err)
1878                 err = change_vport(vport, a);
1879         if (!err && a[OVS_VPORT_ATTR_UPCALL_PID])
1880                 vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1881
1882         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1883                                          OVS_VPORT_CMD_NEW);
1884         if (IS_ERR(reply)) {
1885                 err = PTR_ERR(reply);
1886                 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
1887                                 ovs_dp_vport_multicast_group.id, err);
1888                 return 0;
1889         }
1890
1891         genl_notify(reply, genl_info_net(info), info->snd_pid,
1892                     ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1893
1894 exit_unlock:
1895         rtnl_unlock();
1896 exit:
1897         return err;
1898 }
1899
1900 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1901 {
1902         struct nlattr **a = info->attrs;
1903         struct sk_buff *reply;
1904         struct vport *vport;
1905         int err;
1906
1907         err = ovs_vport_cmd_validate(a);
1908         if (err)
1909                 goto exit;
1910
1911         rtnl_lock();
1912         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1913         err = PTR_ERR(vport);
1914         if (IS_ERR(vport))
1915                 goto exit_unlock;
1916
1917         if (vport->port_no == OVSP_LOCAL) {
1918                 err = -EINVAL;
1919                 goto exit_unlock;
1920         }
1921
1922         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1923                                          OVS_VPORT_CMD_DEL);
1924         err = PTR_ERR(reply);
1925         if (IS_ERR(reply))
1926                 goto exit_unlock;
1927
1928         ovs_dp_detach_port(vport);
1929
1930         genl_notify(reply, genl_info_net(info), info->snd_pid,
1931                     ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1932
1933 exit_unlock:
1934         rtnl_unlock();
1935 exit:
1936         return err;
1937 }
1938
1939 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1940 {
1941         struct nlattr **a = info->attrs;
1942         struct ovs_header *ovs_header = info->userhdr;
1943         struct sk_buff *reply;
1944         struct vport *vport;
1945         int err;
1946
1947         err = ovs_vport_cmd_validate(a);
1948         if (err)
1949                 goto exit;
1950
1951         rcu_read_lock();
1952         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
1953         err = PTR_ERR(vport);
1954         if (IS_ERR(vport))
1955                 goto exit_unlock;
1956
1957         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1958                                          OVS_VPORT_CMD_NEW);
1959         err = PTR_ERR(reply);
1960         if (IS_ERR(reply))
1961                 goto exit_unlock;
1962
1963         rcu_read_unlock();
1964
1965         return genlmsg_reply(reply, info);
1966
1967 exit_unlock:
1968         rcu_read_unlock();
1969 exit:
1970         return err;
1971 }
1972
1973 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1974 {
1975         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1976         struct datapath *dp;
1977         int bucket = cb->args[0], skip = cb->args[1];
1978         int i, j = 0;
1979
1980         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1981         if (!dp)
1982                 return -ENODEV;
1983
1984         rcu_read_lock();
1985         for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
1986                 struct vport *vport;
1987                 struct hlist_node *n;
1988
1989                 j = 0;
1990                 hlist_for_each_entry_rcu(vport, n, &dp->ports[i], dp_hash_node) {
1991                         if (j >= skip &&
1992                             ovs_vport_cmd_fill_info(vport, skb,
1993                                                     NETLINK_CB(cb->skb).pid,
1994                                                     cb->nlh->nlmsg_seq,
1995                                                     NLM_F_MULTI,
1996                                                     OVS_VPORT_CMD_NEW) < 0)
1997                                 goto out;
1998
1999                         j++;
2000                 }
2001                 skip = 0;
2002         }
2003 out:
2004         rcu_read_unlock();
2005
2006         cb->args[0] = i;
2007         cb->args[1] = j;
2008
2009         return skb->len;
2010 }
2011
2012 static struct genl_ops dp_vport_genl_ops[] = {
2013         { .cmd = OVS_VPORT_CMD_NEW,
2014           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2015           .policy = vport_policy,
2016           .doit = ovs_vport_cmd_new
2017         },
2018         { .cmd = OVS_VPORT_CMD_DEL,
2019           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2020           .policy = vport_policy,
2021           .doit = ovs_vport_cmd_del
2022         },
2023         { .cmd = OVS_VPORT_CMD_GET,
2024           .flags = 0,               /* OK for unprivileged users. */
2025           .policy = vport_policy,
2026           .doit = ovs_vport_cmd_get,
2027           .dumpit = ovs_vport_cmd_dump
2028         },
2029         { .cmd = OVS_VPORT_CMD_SET,
2030           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2031           .policy = vport_policy,
2032           .doit = ovs_vport_cmd_set,
2033         },
2034 };
2035
2036 struct genl_family_and_ops {
2037         struct genl_family *family;
2038         struct genl_ops *ops;
2039         int n_ops;
2040         struct genl_multicast_group *group;
2041 };
2042
2043 static const struct genl_family_and_ops dp_genl_families[] = {
2044         { &dp_datapath_genl_family,
2045           dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
2046           &ovs_dp_datapath_multicast_group },
2047         { &dp_vport_genl_family,
2048           dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
2049           &ovs_dp_vport_multicast_group },
2050         { &dp_flow_genl_family,
2051           dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
2052           &ovs_dp_flow_multicast_group },
2053         { &dp_packet_genl_family,
2054           dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
2055           NULL },
2056 };
2057
2058 static void dp_unregister_genl(int n_families)
2059 {
2060         int i;
2061
2062         for (i = 0; i < n_families; i++)
2063                 genl_unregister_family(dp_genl_families[i].family);
2064 }
2065
2066 static int dp_register_genl(void)
2067 {
2068         int n_registered;
2069         int err;
2070         int i;
2071
2072         n_registered = 0;
2073         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2074                 const struct genl_family_and_ops *f = &dp_genl_families[i];
2075
2076                 err = genl_register_family_with_ops(f->family, f->ops,
2077                                                     f->n_ops);
2078                 if (err)
2079                         goto error;
2080                 n_registered++;
2081
2082                 if (f->group) {
2083                         err = genl_register_mc_group(f->family, f->group);
2084                         if (err)
2085                                 goto error;
2086                 }
2087         }
2088
2089         return 0;
2090
2091 error:
2092         dp_unregister_genl(n_registered);
2093         return err;
2094 }
2095
2096 static int __rehash_flow_table(void *dummy)
2097 {
2098         struct datapath *dp;
2099         struct net *net;
2100
2101         rtnl_lock();
2102         for_each_net(net) {
2103                 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2104
2105                 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2106                         struct flow_table *old_table = genl_dereference(dp->table);
2107                         struct flow_table *new_table;
2108
2109                         new_table = ovs_flow_tbl_rehash(old_table);
2110                         if (!IS_ERR(new_table)) {
2111                                 rcu_assign_pointer(dp->table, new_table);
2112                                 ovs_flow_tbl_deferred_destroy(old_table);
2113                         }
2114                 }
2115         }
2116         rtnl_unlock();
2117         return 0;
2118 }
2119
2120 static void rehash_flow_table(struct work_struct *work)
2121 {
2122         genl_exec(__rehash_flow_table, NULL);
2123         schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2124 }
2125
2126 static int dp_destroy_all(void *data)
2127 {
2128         struct datapath *dp, *dp_next;
2129         struct ovs_net *ovs_net = data;
2130
2131         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2132                 __dp_destroy(dp);
2133
2134         return 0;
2135 }
2136
2137 static int __net_init ovs_init_net(struct net *net)
2138 {
2139         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2140
2141         INIT_LIST_HEAD(&ovs_net->dps);
2142         return 0;
2143 }
2144
2145 static void __net_exit ovs_exit_net(struct net *net)
2146 {
2147         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2148
2149         genl_exec(dp_destroy_all, ovs_net);
2150 }
2151
2152 static struct pernet_operations ovs_net_ops = {
2153         .init = ovs_init_net,
2154         .exit = ovs_exit_net,
2155         .id   = &ovs_net_id,
2156         .size = sizeof(struct ovs_net),
2157 };
2158
2159 static int __init dp_init(void)
2160 {
2161         struct sk_buff *dummy_skb;
2162         int err;
2163
2164         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2165
2166         pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
2167                 VERSION BUILDNR);
2168
2169         err = genl_exec_init();
2170         if (err)
2171                 goto error;
2172
2173         err = ovs_workqueues_init();
2174         if (err)
2175                 goto error_genl_exec;
2176
2177         err = ovs_tnl_init();
2178         if (err)
2179                 goto error_wq;
2180
2181         err = ovs_flow_init();
2182         if (err)
2183                 goto error_tnl_exit;
2184
2185         err = ovs_vport_init();
2186         if (err)
2187                 goto error_flow_exit;
2188
2189         err = register_pernet_device(&ovs_net_ops);
2190         if (err)
2191                 goto error_vport_exit;
2192
2193         err = register_netdevice_notifier(&ovs_dp_device_notifier);
2194         if (err)
2195                 goto error_netns_exit;
2196
2197         err = dp_register_genl();
2198         if (err < 0)
2199                 goto error_unreg_notifier;
2200
2201         schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2202
2203         return 0;
2204
2205 error_unreg_notifier:
2206         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2207 error_netns_exit:
2208         unregister_pernet_device(&ovs_net_ops);
2209 error_vport_exit:
2210         ovs_vport_exit();
2211 error_flow_exit:
2212         ovs_flow_exit();
2213 error_tnl_exit:
2214         ovs_tnl_exit();
2215 error_wq:
2216         ovs_workqueues_exit();
2217 error_genl_exec:
2218         genl_exec_exit();
2219 error:
2220         return err;
2221 }
2222
2223 static void dp_cleanup(void)
2224 {
2225         cancel_delayed_work_sync(&rehash_flow_wq);
2226         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2227         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2228         unregister_pernet_device(&ovs_net_ops);
2229         rcu_barrier();
2230         ovs_vport_exit();
2231         ovs_flow_exit();
2232         ovs_tnl_exit();
2233         ovs_workqueues_exit();
2234         genl_exec_exit();
2235 }
2236
2237 module_init(dp_init);
2238 module_exit(dp_cleanup);
2239
2240 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2241 MODULE_LICENSE("GPL");