datapath: Remove checks for preinitialized flow.
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007-2012 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/version.h>
40 #include <linux/ethtool.h>
41 #include <linux/wait.h>
42 #include <asm/div64.h>
43 #include <linux/highmem.h>
44 #include <linux/netfilter_bridge.h>
45 #include <linux/netfilter_ipv4.h>
46 #include <linux/inetdevice.h>
47 #include <linux/list.h>
48 #include <linux/openvswitch.h>
49 #include <linux/rculist.h>
50 #include <linux/dmi.h>
51 #include <net/genetlink.h>
52 #include <net/net_namespace.h>
53 #include <net/netns/generic.h>
54
55 #include "checksum.h"
56 #include "datapath.h"
57 #include "flow.h"
58 #include "genl_exec.h"
59 #include "vlan.h"
60 #include "tunnel.h"
61 #include "vport-internal_dev.h"
62
63 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
64     LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
65 #error Kernels before 2.6.18 or after 3.8 are not supported by this version of Open vSwitch.
66 #endif
67
68 #define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
69 static void rehash_flow_table(struct work_struct *work);
70 static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
71
72 int ovs_net_id __read_mostly;
73
74 /**
75  * DOC: Locking:
76  *
77  * Writes to device state (add/remove datapath, port, set operations on vports,
78  * etc.) are protected by RTNL.
79  *
80  * Writes to other state (flow table modifications, set miscellaneous datapath
81  * parameters, etc.) are protected by genl_mutex.  The RTNL lock nests inside
82  * genl_mutex.
83  *
84  * Reads are protected by RCU.
85  *
86  * There are a few special cases (mostly stats) that have their own
87  * synchronization but they nest under all of above and don't interact with
88  * each other.
89  */
90
91 static struct vport *new_vport(const struct vport_parms *);
92 static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *,
93                              const struct dp_upcall_info *);
94 static int queue_userspace_packet(struct net *, int dp_ifindex,
95                                   struct sk_buff *,
96                                   const struct dp_upcall_info *);
97
98 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
99 static struct datapath *get_dp(struct net *net, int dp_ifindex)
100 {
101         struct datapath *dp = NULL;
102         struct net_device *dev;
103
104         rcu_read_lock();
105         dev = dev_get_by_index_rcu(net, dp_ifindex);
106         if (dev) {
107                 struct vport *vport = ovs_internal_dev_get_vport(dev);
108                 if (vport)
109                         dp = vport->dp;
110         }
111         rcu_read_unlock();
112
113         return dp;
114 }
115
116 /* Must be called with rcu_read_lock or RTNL lock. */
117 const char *ovs_dp_name(const struct datapath *dp)
118 {
119         struct vport *vport = ovs_vport_rtnl_rcu(dp, OVSP_LOCAL);
120         return vport->ops->get_name(vport);
121 }
122
123 static int get_dpifindex(struct datapath *dp)
124 {
125         struct vport *local;
126         int ifindex;
127
128         rcu_read_lock();
129
130         local = ovs_vport_rcu(dp, OVSP_LOCAL);
131         if (local)
132                 ifindex = local->ops->get_ifindex(local);
133         else
134                 ifindex = 0;
135
136         rcu_read_unlock();
137
138         return ifindex;
139 }
140
141 static void destroy_dp_rcu(struct rcu_head *rcu)
142 {
143         struct datapath *dp = container_of(rcu, struct datapath, rcu);
144
145         ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
146         free_percpu(dp->stats_percpu);
147         release_net(ovs_dp_get_net(dp));
148         kfree(dp->ports);
149         kfree(dp);
150 }
151
152 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
153                                             u16 port_no)
154 {
155         return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
156 }
157
158 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
159 {
160         struct vport *vport;
161         struct hlist_head *head;
162
163         head = vport_hash_bucket(dp, port_no);
164         hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
165                 if (vport->port_no == port_no)
166                         return vport;
167         }
168         return NULL;
169 }
170
171 /* Called with RTNL lock and genl_lock. */
172 static struct vport *new_vport(const struct vport_parms *parms)
173 {
174         struct vport *vport;
175
176         vport = ovs_vport_add(parms);
177         if (!IS_ERR(vport)) {
178                 struct datapath *dp = parms->dp;
179                 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
180
181                 hlist_add_head_rcu(&vport->dp_hash_node, head);
182         }
183         return vport;
184 }
185
186 /* Called with RTNL lock. */
187 void ovs_dp_detach_port(struct vport *p)
188 {
189         ASSERT_RTNL();
190
191         /* First drop references to device. */
192         hlist_del_rcu(&p->dp_hash_node);
193
194         /* Then destroy it. */
195         ovs_vport_del(p);
196 }
197
198 /* Must be called with rcu_read_lock. */
199 void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
200 {
201         struct datapath *dp = p->dp;
202         struct sw_flow *flow;
203         struct dp_stats_percpu *stats;
204         struct sw_flow_key key;
205         u64 *stats_counter;
206         int error;
207         int key_len;
208
209         stats = this_cpu_ptr(dp->stats_percpu);
210
211         /* Extract flow from 'skb' into 'key'. */
212         error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
213         if (unlikely(error)) {
214                 kfree_skb(skb);
215                 return;
216         }
217
218         /* Look up flow. */
219         flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
220         if (unlikely(!flow)) {
221                 struct dp_upcall_info upcall;
222
223                 upcall.cmd = OVS_PACKET_CMD_MISS;
224                 upcall.key = &key;
225                 upcall.userdata = NULL;
226                 upcall.portid = p->upcall_portid;
227                 ovs_dp_upcall(dp, skb, &upcall);
228                 consume_skb(skb);
229                 stats_counter = &stats->n_missed;
230                 goto out;
231         }
232
233         OVS_CB(skb)->flow = flow;
234
235         stats_counter = &stats->n_hit;
236         ovs_flow_used(OVS_CB(skb)->flow, skb);
237         ovs_execute_actions(dp, skb);
238
239 out:
240         /* Update datapath statistics. */
241         u64_stats_update_begin(&stats->sync);
242         (*stats_counter)++;
243         u64_stats_update_end(&stats->sync);
244 }
245
246 static struct genl_family dp_packet_genl_family = {
247         .id = GENL_ID_GENERATE,
248         .hdrsize = sizeof(struct ovs_header),
249         .name = OVS_PACKET_FAMILY,
250         .version = OVS_PACKET_VERSION,
251         .maxattr = OVS_PACKET_ATTR_MAX,
252          SET_NETNSOK
253 };
254
255 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
256                   const struct dp_upcall_info *upcall_info)
257 {
258         struct dp_stats_percpu *stats;
259         int dp_ifindex;
260         int err;
261
262         if (upcall_info->portid == 0) {
263                 err = -ENOTCONN;
264                 goto err;
265         }
266
267         dp_ifindex = get_dpifindex(dp);
268         if (!dp_ifindex) {
269                 err = -ENODEV;
270                 goto err;
271         }
272
273         forward_ip_summed(skb, true);
274
275         if (!skb_is_gso(skb))
276                 err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
277         else
278                 err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
279         if (err)
280                 goto err;
281
282         return 0;
283
284 err:
285         stats = this_cpu_ptr(dp->stats_percpu);
286
287         u64_stats_update_begin(&stats->sync);
288         stats->n_lost++;
289         u64_stats_update_end(&stats->sync);
290
291         return err;
292 }
293
294 static int queue_gso_packets(struct net *net, int dp_ifindex,
295                              struct sk_buff *skb,
296                              const struct dp_upcall_info *upcall_info)
297 {
298         unsigned short gso_type = skb_shinfo(skb)->gso_type;
299         struct dp_upcall_info later_info;
300         struct sw_flow_key later_key;
301         struct sk_buff *segs, *nskb;
302         int err;
303
304         segs = __skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM, false);
305         if (IS_ERR(segs))
306                 return PTR_ERR(segs);
307
308         /* Queue all of the segments. */
309         skb = segs;
310         do {
311                 err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info);
312                 if (err)
313                         break;
314
315                 if (skb == segs && gso_type & SKB_GSO_UDP) {
316                         /* The initial flow key extracted by ovs_flow_extract()
317                          * in this case is for a first fragment, so we need to
318                          * properly mark later fragments.
319                          */
320                         later_key = *upcall_info->key;
321                         later_key.ip.frag = OVS_FRAG_TYPE_LATER;
322
323                         later_info = *upcall_info;
324                         later_info.key = &later_key;
325                         upcall_info = &later_info;
326                 }
327         } while ((skb = skb->next));
328
329         /* Free all of the segments. */
330         skb = segs;
331         do {
332                 nskb = skb->next;
333                 if (err)
334                         kfree_skb(skb);
335                 else
336                         consume_skb(skb);
337         } while ((skb = nskb));
338         return err;
339 }
340
341 static int queue_userspace_packet(struct net *net, int dp_ifindex,
342                                   struct sk_buff *skb,
343                                   const struct dp_upcall_info *upcall_info)
344 {
345         struct ovs_header *upcall;
346         struct sk_buff *nskb = NULL;
347         struct sk_buff *user_skb; /* to be queued to userspace */
348         struct nlattr *nla;
349         unsigned int len;
350         int err;
351
352         if (vlan_tx_tag_present(skb)) {
353                 nskb = skb_clone(skb, GFP_ATOMIC);
354                 if (!nskb)
355                         return -ENOMEM;
356                 
357                 err = vlan_deaccel_tag(nskb);
358                 if (err)
359                         return err;
360
361                 skb = nskb;
362         }
363
364         if (nla_attr_size(skb->len) > USHRT_MAX) {
365                 err = -EFBIG;
366                 goto out;
367         }
368
369         len = sizeof(struct ovs_header);
370         len += nla_total_size(skb->len);
371         len += nla_total_size(FLOW_BUFSIZE);
372         if (upcall_info->userdata)
373                 len += NLA_ALIGN(upcall_info->userdata->nla_len);
374
375         user_skb = genlmsg_new(len, GFP_ATOMIC);
376         if (!user_skb) {
377                 err = -ENOMEM;
378                 goto out;
379         }
380
381         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
382                              0, upcall_info->cmd);
383         upcall->dp_ifindex = dp_ifindex;
384
385         nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
386         ovs_flow_to_nlattrs(upcall_info->key, user_skb);
387         nla_nest_end(user_skb, nla);
388
389         if (upcall_info->userdata)
390                 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
391                           nla_len(upcall_info->userdata),
392                           nla_data(upcall_info->userdata));
393
394         nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
395
396         skb_copy_and_csum_dev(skb, nla_data(nla));
397
398         genlmsg_end(user_skb, upcall);
399         err = genlmsg_unicast(net, user_skb, upcall_info->portid);
400
401 out:
402         kfree_skb(nskb);
403         return err;
404 }
405
406 /* Called with genl_mutex. */
407 static int flush_flows(struct datapath *dp)
408 {
409         struct flow_table *old_table;
410         struct flow_table *new_table;
411
412         old_table = genl_dereference(dp->table);
413         new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
414         if (!new_table)
415                 return -ENOMEM;
416
417         rcu_assign_pointer(dp->table, new_table);
418
419         ovs_flow_tbl_deferred_destroy(old_table);
420         return 0;
421 }
422
423 static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, int attr_len)
424 {
425
426         struct sw_flow_actions *acts;
427         int new_acts_size;
428         int req_size = NLA_ALIGN(attr_len);
429         int next_offset = offsetof(struct sw_flow_actions, actions) +
430                                         (*sfa)->actions_len;
431
432         if (req_size <= (ksize(*sfa) - next_offset))
433                 goto out;
434
435         new_acts_size = ksize(*sfa) * 2;
436
437         if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
438                 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
439                         return ERR_PTR(-EMSGSIZE);
440                 new_acts_size = MAX_ACTIONS_BUFSIZE;
441         }
442
443         acts = ovs_flow_actions_alloc(new_acts_size);
444         if (IS_ERR(acts))
445                 return (void *)acts;
446
447         memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
448         acts->actions_len = (*sfa)->actions_len;
449         kfree(*sfa);
450         *sfa = acts;
451
452 out:
453         (*sfa)->actions_len += req_size;
454         return  (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
455 }
456
457 static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len)
458 {
459         struct nlattr *a;
460
461         a = reserve_sfa_size(sfa, nla_attr_size(len));
462         if (IS_ERR(a))
463                 return PTR_ERR(a);
464
465         a->nla_type = attrtype;
466         a->nla_len = nla_attr_size(len);
467
468         if (data)
469                 memcpy(nla_data(a), data, len);
470         memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
471
472         return 0;
473 }
474
475 static inline int add_nested_action_start(struct sw_flow_actions **sfa, int attrtype)
476 {
477         int used = (*sfa)->actions_len;
478         int err;
479
480         err = add_action(sfa, attrtype, NULL, 0);
481         if (err)
482                 return err;
483
484         return used;
485 }
486
487 static inline void add_nested_action_end(struct sw_flow_actions *sfa, int st_offset)
488 {
489         struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions + st_offset);
490
491         a->nla_len = sfa->actions_len - st_offset;
492 }
493
494 static int validate_and_copy_actions(const struct nlattr *attr,
495                                 const struct sw_flow_key *key, int depth,
496                                 struct sw_flow_actions **sfa);
497
498 static int validate_and_copy_sample(const struct nlattr *attr,
499                            const struct sw_flow_key *key, int depth,
500                            struct sw_flow_actions **sfa)
501 {
502         const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
503         const struct nlattr *probability, *actions;
504         const struct nlattr *a;
505         int rem, start, err, st_acts;
506
507         memset(attrs, 0, sizeof(attrs));
508         nla_for_each_nested(a, attr, rem) {
509                 int type = nla_type(a);
510                 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
511                         return -EINVAL;
512                 attrs[type] = a;
513         }
514         if (rem)
515                 return -EINVAL;
516
517         probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
518         if (!probability || nla_len(probability) != sizeof(u32))
519                 return -EINVAL;
520
521         actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
522         if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
523                 return -EINVAL;
524
525         /* validation done, copy sample action. */
526         start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE);
527         if (start < 0)
528                 return start;
529         err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY, nla_data(probability), sizeof(u32));
530         if (err)
531                 return err;
532         st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS);
533         if (st_acts < 0)
534                 return st_acts;
535
536         err = validate_and_copy_actions(actions, key, depth + 1, sfa);
537         if (err)
538                 return err;
539
540         add_nested_action_end(*sfa, st_acts);
541         add_nested_action_end(*sfa, start);
542
543         return 0;
544 }
545
546 static int validate_tp_port(const struct sw_flow_key *flow_key)
547 {
548         if (flow_key->eth.type == htons(ETH_P_IP)) {
549                 if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst)
550                         return 0;
551         } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
552                 if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst)
553                         return 0;
554         }
555
556         return -EINVAL;
557 }
558
559 static int validate_and_copy_set_tun(const struct nlattr *attr,
560                                      struct sw_flow_actions **sfa)
561 {
562         struct ovs_key_ipv4_tunnel tun_key;
563         int err, start;
564
565         err = ipv4_tun_from_nlattr(nla_data(attr), &tun_key);
566         if (err)
567                 return err;
568
569         start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET);
570         if (start < 0)
571                 return start;
572
573         err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &tun_key, sizeof(tun_key));
574         add_nested_action_end(*sfa, start);
575
576         return err;
577 }
578
579 static int validate_set(const struct nlattr *a,
580                         const struct sw_flow_key *flow_key,
581                         struct sw_flow_actions **sfa,
582                         bool *set_tun)
583 {
584         const struct nlattr *ovs_key = nla_data(a);
585         int key_type = nla_type(ovs_key);
586
587         /* There can be only one key in a action */
588         if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
589                 return -EINVAL;
590
591         if (key_type > OVS_KEY_ATTR_MAX ||
592             (ovs_key_lens[key_type] != nla_len(ovs_key) &&
593              ovs_key_lens[key_type] != -1))
594                 return -EINVAL;
595
596         switch (key_type) {
597         const struct ovs_key_ipv4 *ipv4_key;
598         const struct ovs_key_ipv6 *ipv6_key;
599         int err;
600
601         case OVS_KEY_ATTR_PRIORITY:
602         case OVS_KEY_ATTR_ETHERNET:
603                 break;
604
605         case OVS_KEY_ATTR_SKB_MARK:
606 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && !defined(CONFIG_NETFILTER)
607                 if (nla_get_u32(ovs_key) != 0)
608                         return -EINVAL;
609 #endif
610                 break;
611
612         case OVS_KEY_ATTR_TUNNEL:
613                 *set_tun = true;
614                 err = validate_and_copy_set_tun(a, sfa);
615                 if (err)
616                         return err;
617                 break;
618
619         case OVS_KEY_ATTR_IPV4:
620                 if (flow_key->eth.type != htons(ETH_P_IP))
621                         return -EINVAL;
622
623                 if (!flow_key->ip.proto)
624                         return -EINVAL;
625
626                 ipv4_key = nla_data(ovs_key);
627                 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
628                         return -EINVAL;
629
630                 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
631                         return -EINVAL;
632
633                 break;
634
635         case OVS_KEY_ATTR_IPV6:
636                 if (flow_key->eth.type != htons(ETH_P_IPV6))
637                         return -EINVAL;
638
639                 if (!flow_key->ip.proto)
640                         return -EINVAL;
641
642                 ipv6_key = nla_data(ovs_key);
643                 if (ipv6_key->ipv6_proto != flow_key->ip.proto)
644                         return -EINVAL;
645
646                 if (ipv6_key->ipv6_frag != flow_key->ip.frag)
647                         return -EINVAL;
648
649                 if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
650                         return -EINVAL;
651
652                 break;
653
654         case OVS_KEY_ATTR_TCP:
655                 if (flow_key->ip.proto != IPPROTO_TCP)
656                         return -EINVAL;
657
658                 return validate_tp_port(flow_key);
659
660         case OVS_KEY_ATTR_UDP:
661                 if (flow_key->ip.proto != IPPROTO_UDP)
662                         return -EINVAL;
663
664                 return validate_tp_port(flow_key);
665
666         default:
667                 return -EINVAL;
668         }
669
670         return 0;
671 }
672
673 static int validate_userspace(const struct nlattr *attr)
674 {
675         static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] =   {
676                 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
677                 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
678         };
679         struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
680         int error;
681
682         error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
683                                  attr, userspace_policy);
684         if (error)
685                 return error;
686
687         if (!a[OVS_USERSPACE_ATTR_PID] ||
688             !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
689                 return -EINVAL;
690
691         return 0;
692 }
693
694 static int copy_action(const struct nlattr *from,
695                       struct sw_flow_actions **sfa)
696 {
697         int totlen = NLA_ALIGN(from->nla_len);
698         struct nlattr *to;
699
700         to = reserve_sfa_size(sfa, from->nla_len);
701         if (IS_ERR(to))
702                 return PTR_ERR(to);
703
704         memcpy(to, from, totlen);
705         return 0;
706 }
707
708 static int validate_and_copy_actions(const struct nlattr *attr,
709                                 const struct sw_flow_key *key,
710                                 int depth,
711                                 struct sw_flow_actions **sfa)
712 {
713         const struct nlattr *a;
714         int rem, err;
715
716         if (depth >= SAMPLE_ACTION_DEPTH)
717                 return -EOVERFLOW;
718
719         nla_for_each_nested(a, attr, rem) {
720                 /* Expected argument lengths, (u32)-1 for variable length. */
721                 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
722                         [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
723                         [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
724                         [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
725                         [OVS_ACTION_ATTR_POP_VLAN] = 0,
726                         [OVS_ACTION_ATTR_SET] = (u32)-1,
727                         [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
728                 };
729                 const struct ovs_action_push_vlan *vlan;
730                 int type = nla_type(a);
731                 bool skip_copy;
732
733                 if (type > OVS_ACTION_ATTR_MAX ||
734                     (action_lens[type] != nla_len(a) &&
735                      action_lens[type] != (u32)-1))
736                         return -EINVAL;
737
738                 skip_copy = false;
739                 switch (type) {
740                 case OVS_ACTION_ATTR_UNSPEC:
741                         return -EINVAL;
742
743                 case OVS_ACTION_ATTR_USERSPACE:
744                         err = validate_userspace(a);
745                         if (err)
746                                 return err;
747                         break;
748
749                 case OVS_ACTION_ATTR_OUTPUT:
750                         if (nla_get_u32(a) >= DP_MAX_PORTS)
751                                 return -EINVAL;
752                         break;
753
754
755                 case OVS_ACTION_ATTR_POP_VLAN:
756                         break;
757
758                 case OVS_ACTION_ATTR_PUSH_VLAN:
759                         vlan = nla_data(a);
760                         if (vlan->vlan_tpid != htons(ETH_P_8021Q))
761                                 return -EINVAL;
762                         if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
763                                 return -EINVAL;
764                         break;
765
766                 case OVS_ACTION_ATTR_SET:
767                         err = validate_set(a, key, sfa, &skip_copy);
768                         if (err)
769                                 return err;
770                         break;
771
772                 case OVS_ACTION_ATTR_SAMPLE:
773                         err = validate_and_copy_sample(a, key, depth, sfa);
774                         if (err)
775                                 return err;
776                         skip_copy = true;
777                         break;
778
779                 default:
780                         return -EINVAL;
781                 }
782                 if (!skip_copy) {
783                         err = copy_action(a, sfa);
784                         if (err)
785                                 return err;
786                 }
787         }
788
789         if (rem > 0)
790                 return -EINVAL;
791
792         return 0;
793 }
794
795 static void clear_stats(struct sw_flow *flow)
796 {
797         flow->used = 0;
798         flow->tcp_flags = 0;
799         flow->packet_count = 0;
800         flow->byte_count = 0;
801 }
802
803 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
804 {
805         struct ovs_header *ovs_header = info->userhdr;
806         struct nlattr **a = info->attrs;
807         struct sw_flow_actions *acts;
808         struct sk_buff *packet;
809         struct sw_flow *flow;
810         struct datapath *dp;
811         struct ethhdr *eth;
812         int len;
813         int err;
814         int key_len;
815
816         err = -EINVAL;
817         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
818             !a[OVS_PACKET_ATTR_ACTIONS] ||
819             nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN)
820                 goto err;
821
822         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
823         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
824         err = -ENOMEM;
825         if (!packet)
826                 goto err;
827         skb_reserve(packet, NET_IP_ALIGN);
828
829         memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len);
830
831         skb_reset_mac_header(packet);
832         eth = eth_hdr(packet);
833
834         /* Normally, setting the skb 'protocol' field would be handled by a
835          * call to eth_type_trans(), but it assumes there's a sending
836          * device, which we may not have. */
837         if (ntohs(eth->h_proto) >= 1536)
838                 packet->protocol = eth->h_proto;
839         else
840                 packet->protocol = htons(ETH_P_802_2);
841
842         /* Build an sw_flow for sending this packet. */
843         flow = ovs_flow_alloc();
844         err = PTR_ERR(flow);
845         if (IS_ERR(flow))
846                 goto err_kfree_skb;
847
848         err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
849         if (err)
850                 goto err_flow_free;
851
852         err = ovs_flow_metadata_from_nlattrs(flow, key_len, a[OVS_PACKET_ATTR_KEY]);
853         if (err)
854                 goto err_flow_free;
855         acts = ovs_flow_actions_alloc(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
856         err = PTR_ERR(acts);
857         if (IS_ERR(acts))
858                 goto err_flow_free;
859
860         err = validate_and_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0, &acts);
861         rcu_assign_pointer(flow->sf_acts, acts);
862         if (err)
863                 goto err_flow_free;
864
865         OVS_CB(packet)->flow = flow;
866         packet->priority = flow->key.phy.priority;
867         skb_set_mark(packet, flow->key.phy.skb_mark);
868
869         rcu_read_lock();
870         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
871         err = -ENODEV;
872         if (!dp)
873                 goto err_unlock;
874
875         local_bh_disable();
876         err = ovs_execute_actions(dp, packet);
877         local_bh_enable();
878         rcu_read_unlock();
879
880         ovs_flow_free(flow);
881         return err;
882
883 err_unlock:
884         rcu_read_unlock();
885 err_flow_free:
886         ovs_flow_free(flow);
887 err_kfree_skb:
888         kfree_skb(packet);
889 err:
890         return err;
891 }
892
893 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
894         [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
895         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
896         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
897 };
898
899 static struct genl_ops dp_packet_genl_ops[] = {
900         { .cmd = OVS_PACKET_CMD_EXECUTE,
901           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
902           .policy = packet_policy,
903           .doit = ovs_packet_cmd_execute
904         }
905 };
906
907 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
908 {
909         int i;
910         struct flow_table *table = genl_dereference(dp->table);
911
912         stats->n_flows = ovs_flow_tbl_count(table);
913
914         stats->n_hit = stats->n_missed = stats->n_lost = 0;
915         for_each_possible_cpu(i) {
916                 const struct dp_stats_percpu *percpu_stats;
917                 struct dp_stats_percpu local_stats;
918                 unsigned int start;
919
920                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
921
922                 do {
923                         start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
924                         local_stats = *percpu_stats;
925                 } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
926
927                 stats->n_hit += local_stats.n_hit;
928                 stats->n_missed += local_stats.n_missed;
929                 stats->n_lost += local_stats.n_lost;
930         }
931 }
932
933 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
934         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
935         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
936         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
937 };
938
939 static struct genl_family dp_flow_genl_family = {
940         .id = GENL_ID_GENERATE,
941         .hdrsize = sizeof(struct ovs_header),
942         .name = OVS_FLOW_FAMILY,
943         .version = OVS_FLOW_VERSION,
944         .maxattr = OVS_FLOW_ATTR_MAX,
945          SET_NETNSOK
946 };
947
948 static struct genl_multicast_group ovs_dp_flow_multicast_group = {
949         .name = OVS_FLOW_MCGROUP
950 };
951
952 static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb);
953 static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
954 {
955         const struct nlattr *a;
956         struct nlattr *start;
957         int err = 0, rem;
958
959         start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
960         if (!start)
961                 return -EMSGSIZE;
962
963         nla_for_each_nested(a, attr, rem) {
964                 int type = nla_type(a);
965                 struct nlattr *st_sample;
966
967                 switch (type) {
968                 case OVS_SAMPLE_ATTR_PROBABILITY:
969                         if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY, sizeof(u32), nla_data(a)))
970                                 return -EMSGSIZE;
971                         break;
972                 case OVS_SAMPLE_ATTR_ACTIONS:
973                         st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
974                         if (!st_sample)
975                                 return -EMSGSIZE;
976                         err = actions_to_attr(nla_data(a), nla_len(a), skb);
977                         if (err)
978                                 return err;
979                         nla_nest_end(skb, st_sample);
980                         break;
981                 }
982         }
983
984         nla_nest_end(skb, start);
985         return err;
986 }
987
988 static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
989 {
990         const struct nlattr *ovs_key = nla_data(a);
991         int key_type = nla_type(ovs_key);
992         struct nlattr *start;
993         int err;
994
995         switch (key_type) {
996         case OVS_KEY_ATTR_IPV4_TUNNEL:
997                 start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
998                 if (!start)
999                         return -EMSGSIZE;
1000
1001                 err = ipv4_tun_to_nlattr(skb, nla_data(ovs_key));
1002                 if (err)
1003                         return err;
1004                 nla_nest_end(skb, start);
1005                 break;
1006         default:
1007                 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
1008                         return -EMSGSIZE;
1009                 break;
1010         }
1011
1012         return 0;
1013 }
1014
1015 static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb)
1016 {
1017         const struct nlattr *a;
1018         int rem, err;
1019
1020         nla_for_each_attr(a, attr, len, rem) {
1021                 int type = nla_type(a);
1022
1023                 switch (type) {
1024                 case OVS_ACTION_ATTR_SET:
1025                         err = set_action_to_attr(a, skb);
1026                         if (err)
1027                                 return err;
1028                         break;
1029
1030                 case OVS_ACTION_ATTR_SAMPLE:
1031                         err = sample_action_to_attr(a, skb);
1032                         if (err)
1033                                 return err;
1034                         break;
1035                 default:
1036                         if (nla_put(skb, type, nla_len(a), nla_data(a)))
1037                                 return -EMSGSIZE;
1038                         break;
1039                 }
1040         }
1041
1042         return 0;
1043 }
1044
1045 /* Called with genl_lock. */
1046 static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
1047                                   struct sk_buff *skb, u32 portid,
1048                                   u32 seq, u32 flags, u8 cmd)
1049 {
1050         const int skb_orig_len = skb->len;
1051         const struct sw_flow_actions *sf_acts;
1052         struct nlattr *start;
1053         struct ovs_flow_stats stats;
1054         struct ovs_header *ovs_header;
1055         struct nlattr *nla;
1056         unsigned long used;
1057         u8 tcp_flags;
1058         int err;
1059
1060         sf_acts = rcu_dereference_protected(flow->sf_acts,
1061                                             lockdep_genl_is_held());
1062
1063         ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
1064         if (!ovs_header)
1065                 return -EMSGSIZE;
1066
1067         ovs_header->dp_ifindex = get_dpifindex(dp);
1068
1069         nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
1070         if (!nla)
1071                 goto nla_put_failure;
1072         err = ovs_flow_to_nlattrs(&flow->key, skb);
1073         if (err)
1074                 goto error;
1075         nla_nest_end(skb, nla);
1076
1077         spin_lock_bh(&flow->lock);
1078         used = flow->used;
1079         stats.n_packets = flow->packet_count;
1080         stats.n_bytes = flow->byte_count;
1081         tcp_flags = flow->tcp_flags;
1082         spin_unlock_bh(&flow->lock);
1083
1084         if (used &&
1085             nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
1086                 goto nla_put_failure;
1087
1088         if (stats.n_packets &&
1089             nla_put(skb, OVS_FLOW_ATTR_STATS,
1090                     sizeof(struct ovs_flow_stats), &stats))
1091                 goto nla_put_failure;
1092
1093         if (tcp_flags &&
1094             nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags))
1095                 goto nla_put_failure;
1096
1097         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
1098          * this is the first flow to be dumped into 'skb'.  This is unusual for
1099          * Netlink but individual action lists can be longer than
1100          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
1101          * The userspace caller can always fetch the actions separately if it
1102          * really wants them.  (Most userspace callers in fact don't care.)
1103          *
1104          * This can only fail for dump operations because the skb is always
1105          * properly sized for single flows.
1106          */
1107         start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
1108         if (start) {
1109                 err = actions_to_attr(sf_acts->actions, sf_acts->actions_len, skb);
1110                 if (!err)
1111                         nla_nest_end(skb, start);
1112                 else {
1113                         if (skb_orig_len)
1114                                 goto error;
1115
1116                         nla_nest_cancel(skb, start);
1117                 }
1118         } else if (skb_orig_len)
1119                 goto nla_put_failure;
1120
1121         return genlmsg_end(skb, ovs_header);
1122
1123 nla_put_failure:
1124         err = -EMSGSIZE;
1125 error:
1126         genlmsg_cancel(skb, ovs_header);
1127         return err;
1128 }
1129
1130 static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
1131 {
1132         const struct sw_flow_actions *sf_acts;
1133         int len;
1134
1135         sf_acts = rcu_dereference_protected(flow->sf_acts,
1136                                             lockdep_genl_is_held());
1137
1138         /* OVS_FLOW_ATTR_KEY */
1139         len = nla_total_size(FLOW_BUFSIZE);
1140         /* OVS_FLOW_ATTR_ACTIONS */
1141         len += nla_total_size(sf_acts->actions_len);
1142         /* OVS_FLOW_ATTR_STATS */
1143         len += nla_total_size(sizeof(struct ovs_flow_stats));
1144         /* OVS_FLOW_ATTR_TCP_FLAGS */
1145         len += nla_total_size(1);
1146         /* OVS_FLOW_ATTR_USED */
1147         len += nla_total_size(8);
1148
1149         len += NLMSG_ALIGN(sizeof(struct ovs_header));
1150
1151         return genlmsg_new(len, GFP_KERNEL);
1152 }
1153
1154 static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
1155                                                struct datapath *dp,
1156                                                u32 portid, u32 seq, u8 cmd)
1157 {
1158         struct sk_buff *skb;
1159         int retval;
1160
1161         skb = ovs_flow_cmd_alloc_info(flow);
1162         if (!skb)
1163                 return ERR_PTR(-ENOMEM);
1164
1165         retval = ovs_flow_cmd_fill_info(flow, dp, skb, portid, seq, 0, cmd);
1166         BUG_ON(retval < 0);
1167         return skb;
1168 }
1169
1170 static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
1171 {
1172         struct nlattr **a = info->attrs;
1173         struct ovs_header *ovs_header = info->userhdr;
1174         struct sw_flow_key key;
1175         struct sw_flow *flow;
1176         struct sk_buff *reply;
1177         struct datapath *dp;
1178         struct flow_table *table;
1179         struct sw_flow_actions *acts = NULL;
1180         int error;
1181         int key_len;
1182
1183         /* Extract key. */
1184         error = -EINVAL;
1185         if (!a[OVS_FLOW_ATTR_KEY])
1186                 goto error;
1187         error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1188         if (error)
1189                 goto error;
1190
1191         /* Validate actions. */
1192         if (a[OVS_FLOW_ATTR_ACTIONS]) {
1193                 acts = ovs_flow_actions_alloc(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
1194                 error = PTR_ERR(acts);
1195                 if (IS_ERR(acts))
1196                         goto error;
1197
1198                 error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &key,  0, &acts);
1199                 if (error)
1200                         goto err_kfree;
1201         } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
1202                 error = -EINVAL;
1203                 goto error;
1204         }
1205
1206         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1207         error = -ENODEV;
1208         if (!dp)
1209                 goto err_kfree;
1210
1211         table = genl_dereference(dp->table);
1212         flow = ovs_flow_tbl_lookup(table, &key, key_len);
1213         if (!flow) {
1214                 /* Bail out if we're not allowed to create a new flow. */
1215                 error = -ENOENT;
1216                 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
1217                         goto err_kfree;
1218
1219                 /* Expand table, if necessary, to make room. */
1220                 if (ovs_flow_tbl_need_to_expand(table)) {
1221                         struct flow_table *new_table;
1222
1223                         new_table = ovs_flow_tbl_expand(table);
1224                         if (!IS_ERR(new_table)) {
1225                                 rcu_assign_pointer(dp->table, new_table);
1226                                 ovs_flow_tbl_deferred_destroy(table);
1227                                 table = genl_dereference(dp->table);
1228                         }
1229                 }
1230
1231                 /* Allocate flow. */
1232                 flow = ovs_flow_alloc();
1233                 if (IS_ERR(flow)) {
1234                         error = PTR_ERR(flow);
1235                         goto err_kfree;
1236                 }
1237                 clear_stats(flow);
1238
1239                 rcu_assign_pointer(flow->sf_acts, acts);
1240
1241                 /* Put flow in bucket. */
1242                 ovs_flow_tbl_insert(table, flow, &key, key_len);
1243
1244                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
1245                                                 info->snd_seq,
1246                                                 OVS_FLOW_CMD_NEW);
1247         } else {
1248                 /* We found a matching flow. */
1249                 struct sw_flow_actions *old_acts;
1250
1251                 /* Bail out if we're not allowed to modify an existing flow.
1252                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1253                  * because Generic Netlink treats the latter as a dump
1254                  * request.  We also accept NLM_F_EXCL in case that bug ever
1255                  * gets fixed.
1256                  */
1257                 error = -EEXIST;
1258                 if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
1259                     info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
1260                         goto err_kfree;
1261
1262                 /* Update actions. */
1263                 old_acts = rcu_dereference_protected(flow->sf_acts,
1264                                                      lockdep_genl_is_held());
1265                 rcu_assign_pointer(flow->sf_acts, acts);
1266                 ovs_flow_deferred_free_acts(old_acts);
1267
1268                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
1269                                                info->snd_seq, OVS_FLOW_CMD_NEW);
1270
1271                 /* Clear stats. */
1272                 if (a[OVS_FLOW_ATTR_CLEAR]) {
1273                         spin_lock_bh(&flow->lock);
1274                         clear_stats(flow);
1275                         spin_unlock_bh(&flow->lock);
1276                 }
1277         }
1278
1279         if (!IS_ERR(reply))
1280                 genl_notify(reply, genl_info_net(info), info->snd_portid,
1281                            ovs_dp_flow_multicast_group.id, info->nlhdr,
1282                            GFP_KERNEL);
1283         else
1284                 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
1285                                 ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
1286         return 0;
1287
1288 err_kfree:
1289         kfree(acts);
1290 error:
1291         return error;
1292 }
1293
1294 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1295 {
1296         struct nlattr **a = info->attrs;
1297         struct ovs_header *ovs_header = info->userhdr;
1298         struct sw_flow_key key;
1299         struct sk_buff *reply;
1300         struct sw_flow *flow;
1301         struct datapath *dp;
1302         struct flow_table *table;
1303         int err;
1304         int key_len;
1305
1306         if (!a[OVS_FLOW_ATTR_KEY])
1307                 return -EINVAL;
1308         err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1309         if (err)
1310                 return err;
1311
1312         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1313         if (!dp)
1314                 return -ENODEV;
1315
1316         table = genl_dereference(dp->table);
1317         flow = ovs_flow_tbl_lookup(table, &key, key_len);
1318         if (!flow)
1319                 return -ENOENT;
1320
1321         reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
1322                                         info->snd_seq, OVS_FLOW_CMD_NEW);
1323         if (IS_ERR(reply))
1324                 return PTR_ERR(reply);
1325
1326         return genlmsg_reply(reply, info);
1327 }
1328
1329 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1330 {
1331         struct nlattr **a = info->attrs;
1332         struct ovs_header *ovs_header = info->userhdr;
1333         struct sw_flow_key key;
1334         struct sk_buff *reply;
1335         struct sw_flow *flow;
1336         struct datapath *dp;
1337         struct flow_table *table;
1338         int err;
1339         int key_len;
1340
1341         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1342         if (!dp)
1343                 return -ENODEV;
1344
1345         if (!a[OVS_FLOW_ATTR_KEY])
1346                 return flush_flows(dp);
1347
1348         err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1349         if (err)
1350                 return err;
1351
1352         table = genl_dereference(dp->table);
1353         flow = ovs_flow_tbl_lookup(table, &key, key_len);
1354         if (!flow)
1355                 return -ENOENT;
1356
1357         reply = ovs_flow_cmd_alloc_info(flow);
1358         if (!reply)
1359                 return -ENOMEM;
1360
1361         ovs_flow_tbl_remove(table, flow);
1362
1363         err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
1364                                      info->snd_seq, 0, OVS_FLOW_CMD_DEL);
1365         BUG_ON(err < 0);
1366
1367         ovs_flow_deferred_free(flow);
1368
1369         genl_notify(reply, genl_info_net(info), info->snd_portid,
1370                     ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1371         return 0;
1372 }
1373
1374 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1375 {
1376         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1377         struct datapath *dp;
1378         struct flow_table *table;
1379
1380         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1381         if (!dp)
1382                 return -ENODEV;
1383
1384         table = genl_dereference(dp->table);
1385
1386         for (;;) {
1387                 struct sw_flow *flow;
1388                 u32 bucket, obj;
1389
1390                 bucket = cb->args[0];
1391                 obj = cb->args[1];
1392                 flow = ovs_flow_tbl_next(table, &bucket, &obj);
1393                 if (!flow)
1394                         break;
1395
1396                 if (ovs_flow_cmd_fill_info(flow, dp, skb,
1397                                            NETLINK_CB(cb->skb).portid,
1398                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1399                                            OVS_FLOW_CMD_NEW) < 0)
1400                         break;
1401
1402                 cb->args[0] = bucket;
1403                 cb->args[1] = obj;
1404         }
1405         return skb->len;
1406 }
1407
1408 static struct genl_ops dp_flow_genl_ops[] = {
1409         { .cmd = OVS_FLOW_CMD_NEW,
1410           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1411           .policy = flow_policy,
1412           .doit = ovs_flow_cmd_new_or_set
1413         },
1414         { .cmd = OVS_FLOW_CMD_DEL,
1415           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1416           .policy = flow_policy,
1417           .doit = ovs_flow_cmd_del
1418         },
1419         { .cmd = OVS_FLOW_CMD_GET,
1420           .flags = 0,               /* OK for unprivileged users. */
1421           .policy = flow_policy,
1422           .doit = ovs_flow_cmd_get,
1423           .dumpit = ovs_flow_cmd_dump
1424         },
1425         { .cmd = OVS_FLOW_CMD_SET,
1426           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1427           .policy = flow_policy,
1428           .doit = ovs_flow_cmd_new_or_set,
1429         },
1430 };
1431
1432 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1433 #ifdef HAVE_NLA_NUL_STRING
1434         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1435 #endif
1436         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1437 };
1438
1439 static struct genl_family dp_datapath_genl_family = {
1440         .id = GENL_ID_GENERATE,
1441         .hdrsize = sizeof(struct ovs_header),
1442         .name = OVS_DATAPATH_FAMILY,
1443         .version = OVS_DATAPATH_VERSION,
1444         .maxattr = OVS_DP_ATTR_MAX,
1445          SET_NETNSOK
1446 };
1447
1448 static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
1449         .name = OVS_DATAPATH_MCGROUP
1450 };
1451
1452 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1453                                 u32 portid, u32 seq, u32 flags, u8 cmd)
1454 {
1455         struct ovs_header *ovs_header;
1456         struct ovs_dp_stats dp_stats;
1457         int err;
1458
1459         ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1460                                    flags, cmd);
1461         if (!ovs_header)
1462                 goto error;
1463
1464         ovs_header->dp_ifindex = get_dpifindex(dp);
1465
1466         rcu_read_lock();
1467         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1468         rcu_read_unlock();
1469         if (err)
1470                 goto nla_put_failure;
1471
1472         get_dp_stats(dp, &dp_stats);
1473         if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats))
1474                 goto nla_put_failure;
1475
1476         return genlmsg_end(skb, ovs_header);
1477
1478 nla_put_failure:
1479         genlmsg_cancel(skb, ovs_header);
1480 error:
1481         return -EMSGSIZE;
1482 }
1483
1484 static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 portid,
1485                                              u32 seq, u8 cmd)
1486 {
1487         struct sk_buff *skb;
1488         int retval;
1489
1490         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1491         if (!skb)
1492                 return ERR_PTR(-ENOMEM);
1493
1494         retval = ovs_dp_cmd_fill_info(dp, skb, portid, seq, 0, cmd);
1495         if (retval < 0) {
1496                 kfree_skb(skb);
1497                 return ERR_PTR(retval);
1498         }
1499         return skb;
1500 }
1501
1502 static int ovs_dp_cmd_validate(struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1503 {
1504         return CHECK_NUL_STRING(a[OVS_DP_ATTR_NAME], IFNAMSIZ - 1);
1505 }
1506
1507 /* Called with genl_mutex and optionally with RTNL lock also. */
1508 static struct datapath *lookup_datapath(struct net *net,
1509                                         struct ovs_header *ovs_header,
1510                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1511 {
1512         struct datapath *dp;
1513
1514         if (!a[OVS_DP_ATTR_NAME])
1515                 dp = get_dp(net, ovs_header->dp_ifindex);
1516         else {
1517                 struct vport *vport;
1518
1519                 rcu_read_lock();
1520                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1521                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1522                 rcu_read_unlock();
1523         }
1524         return dp ? dp : ERR_PTR(-ENODEV);
1525 }
1526
1527 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1528 {
1529         struct nlattr **a = info->attrs;
1530         struct vport_parms parms;
1531         struct sk_buff *reply;
1532         struct datapath *dp;
1533         struct vport *vport;
1534         struct ovs_net *ovs_net;
1535         int err, i;
1536
1537         err = -EINVAL;
1538         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1539                 goto err;
1540
1541         err = ovs_dp_cmd_validate(a);
1542         if (err)
1543                 goto err;
1544
1545         rtnl_lock();
1546
1547         err = -ENOMEM;
1548         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1549         if (dp == NULL)
1550                 goto err_unlock_rtnl;
1551
1552         ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1553
1554         /* Allocate table. */
1555         err = -ENOMEM;
1556         rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
1557         if (!dp->table)
1558                 goto err_free_dp;
1559
1560         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1561         if (!dp->stats_percpu) {
1562                 err = -ENOMEM;
1563                 goto err_destroy_table;
1564         }
1565
1566         dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1567                             GFP_KERNEL);
1568         if (!dp->ports) {
1569                 err = -ENOMEM;
1570                 goto err_destroy_percpu;
1571         }
1572
1573         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1574                 INIT_HLIST_HEAD(&dp->ports[i]);
1575
1576         /* Set up our datapath device. */
1577         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1578         parms.type = OVS_VPORT_TYPE_INTERNAL;
1579         parms.options = NULL;
1580         parms.dp = dp;
1581         parms.port_no = OVSP_LOCAL;
1582         parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
1583
1584         vport = new_vport(&parms);
1585         if (IS_ERR(vport)) {
1586                 err = PTR_ERR(vport);
1587                 if (err == -EBUSY)
1588                         err = -EEXIST;
1589
1590                 goto err_destroy_ports_array;
1591         }
1592
1593         reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1594                                       info->snd_seq, OVS_DP_CMD_NEW);
1595         err = PTR_ERR(reply);
1596         if (IS_ERR(reply))
1597                 goto err_destroy_local_port;
1598
1599         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1600         list_add_tail(&dp->list_node, &ovs_net->dps);
1601
1602         rtnl_unlock();
1603
1604         genl_notify(reply, genl_info_net(info), info->snd_portid,
1605                     ovs_dp_datapath_multicast_group.id, info->nlhdr,
1606                     GFP_KERNEL);
1607         return 0;
1608
1609 err_destroy_local_port:
1610         ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
1611 err_destroy_ports_array:
1612         kfree(dp->ports);
1613 err_destroy_percpu:
1614         free_percpu(dp->stats_percpu);
1615 err_destroy_table:
1616         ovs_flow_tbl_destroy(genl_dereference(dp->table));
1617 err_free_dp:
1618         release_net(ovs_dp_get_net(dp));
1619         kfree(dp);
1620 err_unlock_rtnl:
1621         rtnl_unlock();
1622 err:
1623         return err;
1624 }
1625
1626 /* Called with genl_mutex. */
1627 static void __dp_destroy(struct datapath *dp)
1628 {
1629         int i;
1630
1631         rtnl_lock();
1632
1633         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1634                 struct vport *vport;
1635                 struct hlist_node *n;
1636
1637                 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1638                         if (vport->port_no != OVSP_LOCAL)
1639                                 ovs_dp_detach_port(vport);
1640         }
1641
1642         list_del(&dp->list_node);
1643         ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
1644
1645         /* rtnl_unlock() will wait until all the references to devices that
1646          * are pending unregistration have been dropped.  We do it here to
1647          * ensure that any internal devices (which contain DP pointers) are
1648          * fully destroyed before freeing the datapath.
1649          */
1650         rtnl_unlock();
1651
1652         call_rcu(&dp->rcu, destroy_dp_rcu);
1653 }
1654
1655 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1656 {
1657         struct sk_buff *reply;
1658         struct datapath *dp;
1659         int err;
1660
1661         err = ovs_dp_cmd_validate(info->attrs);
1662         if (err)
1663                 return err;
1664
1665         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1666         err = PTR_ERR(dp);
1667         if (IS_ERR(dp))
1668                 return err;
1669
1670         reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1671                                       info->snd_seq, OVS_DP_CMD_DEL);
1672         err = PTR_ERR(reply);
1673         if (IS_ERR(reply))
1674                 return err;
1675
1676         __dp_destroy(dp);
1677
1678         genl_notify(reply, genl_info_net(info), info->snd_portid,
1679                     ovs_dp_datapath_multicast_group.id, info->nlhdr,
1680                     GFP_KERNEL);
1681
1682         return 0;
1683 }
1684
1685 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1686 {
1687         struct sk_buff *reply;
1688         struct datapath *dp;
1689         int err;
1690
1691         err = ovs_dp_cmd_validate(info->attrs);
1692         if (err)
1693                 return err;
1694
1695         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1696         if (IS_ERR(dp))
1697                 return PTR_ERR(dp);
1698
1699         reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1700                                       info->snd_seq, OVS_DP_CMD_NEW);
1701         if (IS_ERR(reply)) {
1702                 err = PTR_ERR(reply);
1703                 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
1704                                 ovs_dp_datapath_multicast_group.id, err);
1705                 return 0;
1706         }
1707
1708         genl_notify(reply, genl_info_net(info), info->snd_portid,
1709                     ovs_dp_datapath_multicast_group.id, info->nlhdr,
1710                     GFP_KERNEL);
1711
1712         return 0;
1713 }
1714
1715 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1716 {
1717         struct sk_buff *reply;
1718         struct datapath *dp;
1719         int err;
1720
1721         err = ovs_dp_cmd_validate(info->attrs);
1722         if (err)
1723                 return err;
1724
1725         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1726         if (IS_ERR(dp))
1727                 return PTR_ERR(dp);
1728
1729         reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1730                                       info->snd_seq, OVS_DP_CMD_NEW);
1731         if (IS_ERR(reply))
1732                 return PTR_ERR(reply);
1733
1734         return genlmsg_reply(reply, info);
1735 }
1736
1737 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1738 {
1739         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1740         struct datapath *dp;
1741         int skip = cb->args[0];
1742         int i = 0;
1743
1744         list_for_each_entry(dp, &ovs_net->dps, list_node) {
1745                 if (i >= skip &&
1746                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1747                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1748                                          OVS_DP_CMD_NEW) < 0)
1749                         break;
1750                 i++;
1751         }
1752
1753         cb->args[0] = i;
1754
1755         return skb->len;
1756 }
1757
1758 static struct genl_ops dp_datapath_genl_ops[] = {
1759         { .cmd = OVS_DP_CMD_NEW,
1760           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1761           .policy = datapath_policy,
1762           .doit = ovs_dp_cmd_new
1763         },
1764         { .cmd = OVS_DP_CMD_DEL,
1765           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1766           .policy = datapath_policy,
1767           .doit = ovs_dp_cmd_del
1768         },
1769         { .cmd = OVS_DP_CMD_GET,
1770           .flags = 0,               /* OK for unprivileged users. */
1771           .policy = datapath_policy,
1772           .doit = ovs_dp_cmd_get,
1773           .dumpit = ovs_dp_cmd_dump
1774         },
1775         { .cmd = OVS_DP_CMD_SET,
1776           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1777           .policy = datapath_policy,
1778           .doit = ovs_dp_cmd_set,
1779         },
1780 };
1781
1782 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1783 #ifdef HAVE_NLA_NUL_STRING
1784         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1785         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1786 #else
1787         [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct ovs_vport_stats) },
1788 #endif
1789         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1790         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1791         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1792         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1793 };
1794
1795 static struct genl_family dp_vport_genl_family = {
1796         .id = GENL_ID_GENERATE,
1797         .hdrsize = sizeof(struct ovs_header),
1798         .name = OVS_VPORT_FAMILY,
1799         .version = OVS_VPORT_VERSION,
1800         .maxattr = OVS_VPORT_ATTR_MAX,
1801          SET_NETNSOK
1802 };
1803
1804 struct genl_multicast_group ovs_dp_vport_multicast_group = {
1805         .name = OVS_VPORT_MCGROUP
1806 };
1807
1808 /* Called with RTNL lock or RCU read lock. */
1809 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1810                                    u32 portid, u32 seq, u32 flags, u8 cmd)
1811 {
1812         struct ovs_header *ovs_header;
1813         struct ovs_vport_stats vport_stats;
1814         int err;
1815
1816         ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1817                                  flags, cmd);
1818         if (!ovs_header)
1819                 return -EMSGSIZE;
1820
1821         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1822
1823         if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1824             nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1825             nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
1826             nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_portid))
1827                 goto nla_put_failure;
1828
1829         ovs_vport_get_stats(vport, &vport_stats);
1830         if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1831                     &vport_stats))
1832                 goto nla_put_failure;
1833
1834         err = ovs_vport_get_options(vport, skb);
1835         if (err == -EMSGSIZE)
1836                 goto error;
1837
1838         return genlmsg_end(skb, ovs_header);
1839
1840 nla_put_failure:
1841         err = -EMSGSIZE;
1842 error:
1843         genlmsg_cancel(skb, ovs_header);
1844         return err;
1845 }
1846
1847 /* Called with RTNL lock or RCU read lock. */
1848 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1849                                          u32 seq, u8 cmd)
1850 {
1851         struct sk_buff *skb;
1852         int retval;
1853
1854         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1855         if (!skb)
1856                 return ERR_PTR(-ENOMEM);
1857
1858         retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1859         if (retval < 0) {
1860                 kfree_skb(skb);
1861                 return ERR_PTR(retval);
1862         }
1863         return skb;
1864 }
1865
1866 static int ovs_vport_cmd_validate(struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1867 {
1868         return CHECK_NUL_STRING(a[OVS_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1869 }
1870
1871 /* Called with RTNL lock or RCU read lock. */
1872 static struct vport *lookup_vport(struct net *net,
1873                                   struct ovs_header *ovs_header,
1874                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1875 {
1876         struct datapath *dp;
1877         struct vport *vport;
1878
1879         if (a[OVS_VPORT_ATTR_NAME]) {
1880                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1881                 if (!vport)
1882                         return ERR_PTR(-ENODEV);
1883                 if (ovs_header->dp_ifindex &&
1884                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1885                         return ERR_PTR(-ENODEV);
1886                 return vport;
1887         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1888                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1889
1890                 if (port_no >= DP_MAX_PORTS)
1891                         return ERR_PTR(-EFBIG);
1892
1893                 dp = get_dp(net, ovs_header->dp_ifindex);
1894                 if (!dp)
1895                         return ERR_PTR(-ENODEV);
1896
1897                 vport = ovs_vport_rtnl_rcu(dp, port_no);
1898                 if (!vport)
1899                         return ERR_PTR(-ENODEV);
1900                 return vport;
1901         } else
1902                 return ERR_PTR(-EINVAL);
1903 }
1904
1905 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1906 {
1907         struct nlattr **a = info->attrs;
1908         struct ovs_header *ovs_header = info->userhdr;
1909         struct vport_parms parms;
1910         struct sk_buff *reply;
1911         struct vport *vport;
1912         struct datapath *dp;
1913         u32 port_no;
1914         int err;
1915
1916         err = -EINVAL;
1917         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1918             !a[OVS_VPORT_ATTR_UPCALL_PID])
1919                 goto exit;
1920
1921         err = ovs_vport_cmd_validate(a);
1922         if (err)
1923                 goto exit;
1924
1925         rtnl_lock();
1926         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1927         err = -ENODEV;
1928         if (!dp)
1929                 goto exit_unlock;
1930
1931         if (a[OVS_VPORT_ATTR_PORT_NO]) {
1932                 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1933
1934                 err = -EFBIG;
1935                 if (port_no >= DP_MAX_PORTS)
1936                         goto exit_unlock;
1937
1938                 vport = ovs_vport_rtnl(dp, port_no);
1939                 err = -EBUSY;
1940                 if (vport)
1941                         goto exit_unlock;
1942         } else {
1943                 for (port_no = 1; ; port_no++) {
1944                         if (port_no >= DP_MAX_PORTS) {
1945                                 err = -EFBIG;
1946                                 goto exit_unlock;
1947                         }
1948                         vport = ovs_vport_rtnl(dp, port_no);
1949                         if (!vport)
1950                                 break;
1951                 }
1952         }
1953
1954         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1955         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1956         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1957         parms.dp = dp;
1958         parms.port_no = port_no;
1959         parms.upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1960
1961         vport = new_vport(&parms);
1962         err = PTR_ERR(vport);
1963         if (IS_ERR(vport))
1964                 goto exit_unlock;
1965
1966         err = 0;
1967         if (a[OVS_VPORT_ATTR_STATS])
1968                 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
1969
1970         reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
1971                                          OVS_VPORT_CMD_NEW);
1972         if (IS_ERR(reply)) {
1973                 err = PTR_ERR(reply);
1974                 ovs_dp_detach_port(vport);
1975                 goto exit_unlock;
1976         }
1977         genl_notify(reply, genl_info_net(info), info->snd_portid,
1978                     ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1979
1980 exit_unlock:
1981         rtnl_unlock();
1982 exit:
1983         return err;
1984 }
1985
1986 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1987 {
1988         struct nlattr **a = info->attrs;
1989         struct sk_buff *reply;
1990         struct vport *vport;
1991         int err;
1992
1993         err = ovs_vport_cmd_validate(a);
1994         if (err)
1995                 goto exit;
1996
1997         rtnl_lock();
1998         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1999         err = PTR_ERR(vport);
2000         if (IS_ERR(vport))
2001                 goto exit_unlock;
2002
2003         err = 0;
2004         if (a[OVS_VPORT_ATTR_TYPE] &&
2005             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
2006                 err = -EINVAL;
2007
2008         if (!err && a[OVS_VPORT_ATTR_OPTIONS])
2009                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
2010         if (err)
2011                 goto exit_unlock;
2012
2013         if (a[OVS_VPORT_ATTR_STATS])
2014                 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
2015
2016         if (a[OVS_VPORT_ATTR_UPCALL_PID])
2017                 vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
2018
2019         reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
2020                                          info->snd_seq, OVS_VPORT_CMD_NEW);
2021         if (IS_ERR(reply)) {
2022                 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
2023                                 ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
2024                 goto exit_unlock;
2025         }
2026
2027         genl_notify(reply, genl_info_net(info), info->snd_portid,
2028                     ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
2029
2030 exit_unlock:
2031         rtnl_unlock();
2032 exit:
2033         return err;
2034 }
2035
2036 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2037 {
2038         struct nlattr **a = info->attrs;
2039         struct sk_buff *reply;
2040         struct vport *vport;
2041         int err;
2042
2043         err = ovs_vport_cmd_validate(a);
2044         if (err)
2045                 goto exit;
2046
2047         rtnl_lock();
2048         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2049         err = PTR_ERR(vport);
2050         if (IS_ERR(vport))
2051                 goto exit_unlock;
2052
2053         if (vport->port_no == OVSP_LOCAL) {
2054                 err = -EINVAL;
2055                 goto exit_unlock;
2056         }
2057
2058         reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
2059                                          info->snd_seq, OVS_VPORT_CMD_DEL);
2060         err = PTR_ERR(reply);
2061         if (IS_ERR(reply))
2062                 goto exit_unlock;
2063
2064         err = 0;
2065         ovs_dp_detach_port(vport);
2066
2067         genl_notify(reply, genl_info_net(info), info->snd_portid,
2068                     ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
2069
2070 exit_unlock:
2071         rtnl_unlock();
2072 exit:
2073         return err;
2074 }
2075
2076 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2077 {
2078         struct nlattr **a = info->attrs;
2079         struct ovs_header *ovs_header = info->userhdr;
2080         struct sk_buff *reply;
2081         struct vport *vport;
2082         int err;
2083
2084         err = ovs_vport_cmd_validate(a);
2085         if (err)
2086                 goto exit;
2087
2088         rcu_read_lock();
2089         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
2090         err = PTR_ERR(vport);
2091         if (IS_ERR(vport))
2092                 goto exit_unlock;
2093
2094         reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
2095                                          info->snd_seq, OVS_VPORT_CMD_NEW);
2096         err = PTR_ERR(reply);
2097         if (IS_ERR(reply))
2098                 goto exit_unlock;
2099
2100         rcu_read_unlock();
2101
2102         return genlmsg_reply(reply, info);
2103
2104 exit_unlock:
2105         rcu_read_unlock();
2106 exit:
2107         return err;
2108 }
2109
2110 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2111 {
2112         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
2113         struct datapath *dp;
2114         int bucket = cb->args[0], skip = cb->args[1];
2115         int i, j = 0;
2116
2117         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
2118         if (!dp)
2119                 return -ENODEV;
2120
2121         rcu_read_lock();
2122         for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2123                 struct vport *vport;
2124
2125                 j = 0;
2126                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2127                         if (j >= skip &&
2128                             ovs_vport_cmd_fill_info(vport, skb,
2129                                                     NETLINK_CB(cb->skb).portid,
2130                                                     cb->nlh->nlmsg_seq,
2131                                                     NLM_F_MULTI,
2132                                                     OVS_VPORT_CMD_NEW) < 0)
2133                                 goto out;
2134
2135                         j++;
2136                 }
2137                 skip = 0;
2138         }
2139 out:
2140         rcu_read_unlock();
2141
2142         cb->args[0] = i;
2143         cb->args[1] = j;
2144
2145         return skb->len;
2146 }
2147
2148 static struct genl_ops dp_vport_genl_ops[] = {
2149         { .cmd = OVS_VPORT_CMD_NEW,
2150           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2151           .policy = vport_policy,
2152           .doit = ovs_vport_cmd_new
2153         },
2154         { .cmd = OVS_VPORT_CMD_DEL,
2155           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2156           .policy = vport_policy,
2157           .doit = ovs_vport_cmd_del
2158         },
2159         { .cmd = OVS_VPORT_CMD_GET,
2160           .flags = 0,               /* OK for unprivileged users. */
2161           .policy = vport_policy,
2162           .doit = ovs_vport_cmd_get,
2163           .dumpit = ovs_vport_cmd_dump
2164         },
2165         { .cmd = OVS_VPORT_CMD_SET,
2166           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2167           .policy = vport_policy,
2168           .doit = ovs_vport_cmd_set,
2169         },
2170 };
2171
2172 struct genl_family_and_ops {
2173         struct genl_family *family;
2174         struct genl_ops *ops;
2175         int n_ops;
2176         struct genl_multicast_group *group;
2177 };
2178
2179 static const struct genl_family_and_ops dp_genl_families[] = {
2180         { &dp_datapath_genl_family,
2181           dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
2182           &ovs_dp_datapath_multicast_group },
2183         { &dp_vport_genl_family,
2184           dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
2185           &ovs_dp_vport_multicast_group },
2186         { &dp_flow_genl_family,
2187           dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
2188           &ovs_dp_flow_multicast_group },
2189         { &dp_packet_genl_family,
2190           dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
2191           NULL },
2192 };
2193
2194 static void dp_unregister_genl(int n_families)
2195 {
2196         int i;
2197
2198         for (i = 0; i < n_families; i++)
2199                 genl_unregister_family(dp_genl_families[i].family);
2200 }
2201
2202 static int dp_register_genl(void)
2203 {
2204         int n_registered;
2205         int err;
2206         int i;
2207
2208         n_registered = 0;
2209         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2210                 const struct genl_family_and_ops *f = &dp_genl_families[i];
2211
2212                 err = genl_register_family_with_ops(f->family, f->ops,
2213                                                     f->n_ops);
2214                 if (err)
2215                         goto error;
2216                 n_registered++;
2217
2218                 if (f->group) {
2219                         err = genl_register_mc_group(f->family, f->group);
2220                         if (err)
2221                                 goto error;
2222                 }
2223         }
2224
2225         return 0;
2226
2227 error:
2228         dp_unregister_genl(n_registered);
2229         return err;
2230 }
2231
2232 static int __rehash_flow_table(void *dummy)
2233 {
2234         struct datapath *dp;
2235         struct net *net;
2236
2237         rtnl_lock();
2238         for_each_net(net) {
2239                 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2240
2241                 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2242                         struct flow_table *old_table = genl_dereference(dp->table);
2243                         struct flow_table *new_table;
2244
2245                         new_table = ovs_flow_tbl_rehash(old_table);
2246                         if (!IS_ERR(new_table)) {
2247                                 rcu_assign_pointer(dp->table, new_table);
2248                                 ovs_flow_tbl_deferred_destroy(old_table);
2249                         }
2250                 }
2251         }
2252         rtnl_unlock();
2253         return 0;
2254 }
2255
2256 static void rehash_flow_table(struct work_struct *work)
2257 {
2258         genl_exec(__rehash_flow_table, NULL);
2259         schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2260 }
2261
2262 static int dp_destroy_all(void *data)
2263 {
2264         struct datapath *dp, *dp_next;
2265         struct ovs_net *ovs_net = data;
2266
2267         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2268                 __dp_destroy(dp);
2269
2270         return 0;
2271 }
2272
2273 static int __net_init ovs_init_net(struct net *net)
2274 {
2275         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2276
2277         INIT_LIST_HEAD(&ovs_net->dps);
2278         return 0;
2279 }
2280
2281 static void __net_exit ovs_exit_net(struct net *net)
2282 {
2283         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2284
2285         genl_exec(dp_destroy_all, ovs_net);
2286 }
2287
2288 static struct pernet_operations ovs_net_ops = {
2289         .init = ovs_init_net,
2290         .exit = ovs_exit_net,
2291         .id   = &ovs_net_id,
2292         .size = sizeof(struct ovs_net),
2293 };
2294
2295 static int __init dp_init(void)
2296 {
2297         int err;
2298
2299         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2300
2301         pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
2302                 VERSION);
2303
2304         err = genl_exec_init();
2305         if (err)
2306                 goto error;
2307
2308         err = ovs_workqueues_init();
2309         if (err)
2310                 goto error_genl_exec;
2311
2312         err = ovs_flow_init();
2313         if (err)
2314                 goto error_wq;
2315
2316         err = ovs_vport_init();
2317         if (err)
2318                 goto error_flow_exit;
2319
2320         err = register_pernet_device(&ovs_net_ops);
2321         if (err)
2322                 goto error_vport_exit;
2323
2324         err = register_netdevice_notifier(&ovs_dp_device_notifier);
2325         if (err)
2326                 goto error_netns_exit;
2327
2328         err = dp_register_genl();
2329         if (err < 0)
2330                 goto error_unreg_notifier;
2331
2332         schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2333
2334         return 0;
2335
2336 error_unreg_notifier:
2337         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2338 error_netns_exit:
2339         unregister_pernet_device(&ovs_net_ops);
2340 error_vport_exit:
2341         ovs_vport_exit();
2342 error_flow_exit:
2343         ovs_flow_exit();
2344 error_wq:
2345         ovs_workqueues_exit();
2346 error_genl_exec:
2347         genl_exec_exit();
2348 error:
2349         return err;
2350 }
2351
2352 static void dp_cleanup(void)
2353 {
2354         cancel_delayed_work_sync(&rehash_flow_wq);
2355         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2356         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2357         unregister_pernet_device(&ovs_net_ops);
2358         rcu_barrier();
2359         ovs_vport_exit();
2360         ovs_flow_exit();
2361         ovs_workqueues_exit();
2362         genl_exec_exit();
2363 }
2364
2365 module_init(dp_init);
2366 module_exit(dp_cleanup);
2367
2368 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2369 MODULE_LICENSE("GPL");
2370 MODULE_VERSION(VERSION);