datapath: Use RCU lock for flow dump operation.
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007-2013 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/version.h>
40 #include <linux/ethtool.h>
41 #include <linux/wait.h>
42 #include <asm/div64.h>
43 #include <linux/highmem.h>
44 #include <linux/netfilter_bridge.h>
45 #include <linux/netfilter_ipv4.h>
46 #include <linux/inetdevice.h>
47 #include <linux/list.h>
48 #include <linux/openvswitch.h>
49 #include <linux/rculist.h>
50 #include <linux/dmi.h>
51 #include <linux/genetlink.h>
52 #include <net/genetlink.h>
53 #include <net/genetlink.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56
57 #include "checksum.h"
58 #include "datapath.h"
59 #include "flow.h"
60 #include "vlan.h"
61 #include "tunnel.h"
62 #include "vport-internal_dev.h"
63 #include "vport-netdev.h"
64
65 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
66     LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
67 #error Kernels before 2.6.18 or after 3.8 are not supported by this version of Open vSwitch.
68 #endif
69
70 #define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
71 static void rehash_flow_table(struct work_struct *work);
72 static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
73
74 int ovs_net_id __read_mostly;
75
76 static void ovs_notify(struct sk_buff *skb, struct genl_info *info,
77                        struct genl_multicast_group *grp)
78 {
79         genl_notify(skb, genl_info_net(info), info->snd_portid,
80                     grp->id, info->nlhdr, GFP_KERNEL);
81 }
82
83 /**
84  * DOC: Locking:
85  *
86  * All writes e.g. Writes to device state (add/remove datapath, port, set
87  * operations on vports, etc.), Writes to other state (flow table
88  * modifications, set miscellaneous datapath parameters, etc.) are protected
89  * by ovs_lock.
90  *
91  * Reads are protected by RCU.
92  *
93  * There are a few special cases (mostly stats) that have their own
94  * synchronization but they nest under all of above and don't interact with
95  * each other.
96  *
97  * The RTNL lock nests inside ovs_mutex.
98  */
99
100 static DEFINE_MUTEX(ovs_mutex);
101
102 void ovs_lock(void)
103 {
104         mutex_lock(&ovs_mutex);
105 }
106
107 void ovs_unlock(void)
108 {
109         mutex_unlock(&ovs_mutex);
110 }
111
112 #ifdef CONFIG_LOCKDEP
113 int lockdep_ovsl_is_held(void)
114 {
115         if (debug_locks)
116                 return lockdep_is_held(&ovs_mutex);
117         else
118                 return 1;
119 }
120 #endif
121
122 static struct vport *new_vport(const struct vport_parms *);
123 static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *,
124                              const struct dp_upcall_info *);
125 static int queue_userspace_packet(struct net *, int dp_ifindex,
126                                   struct sk_buff *,
127                                   const struct dp_upcall_info *);
128
129 /* Must be called with rcu_read_lock or ovs_mutex. */
130 static struct datapath *get_dp(struct net *net, int dp_ifindex)
131 {
132         struct datapath *dp = NULL;
133         struct net_device *dev;
134
135         rcu_read_lock();
136         dev = dev_get_by_index_rcu(net, dp_ifindex);
137         if (dev) {
138                 struct vport *vport = ovs_internal_dev_get_vport(dev);
139                 if (vport)
140                         dp = vport->dp;
141         }
142         rcu_read_unlock();
143
144         return dp;
145 }
146
147 /* Must be called with rcu_read_lock or ovs_mutex. */
148 const char *ovs_dp_name(const struct datapath *dp)
149 {
150         struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
151         return vport->ops->get_name(vport);
152 }
153
154 static int get_dpifindex(struct datapath *dp)
155 {
156         struct vport *local;
157         int ifindex;
158
159         rcu_read_lock();
160
161         local = ovs_vport_rcu(dp, OVSP_LOCAL);
162         if (local)
163                 ifindex = netdev_vport_priv(local)->dev->ifindex;
164         else
165                 ifindex = 0;
166
167         rcu_read_unlock();
168
169         return ifindex;
170 }
171
172 static void destroy_dp_rcu(struct rcu_head *rcu)
173 {
174         struct datapath *dp = container_of(rcu, struct datapath, rcu);
175
176         ovs_flow_tbl_destroy((__force struct flow_table *)dp->table, false);
177         free_percpu(dp->stats_percpu);
178         release_net(ovs_dp_get_net(dp));
179         kfree(dp->ports);
180         kfree(dp);
181 }
182
183 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
184                                             u16 port_no)
185 {
186         return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
187 }
188
189 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
190 {
191         struct vport *vport;
192         struct hlist_head *head;
193
194         head = vport_hash_bucket(dp, port_no);
195         hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
196                 if (vport->port_no == port_no)
197                         return vport;
198         }
199         return NULL;
200 }
201
202 /* Called with ovs_mutex. */
203 static struct vport *new_vport(const struct vport_parms *parms)
204 {
205         struct vport *vport;
206
207         vport = ovs_vport_add(parms);
208         if (!IS_ERR(vport)) {
209                 struct datapath *dp = parms->dp;
210                 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
211
212                 hlist_add_head_rcu(&vport->dp_hash_node, head);
213         }
214         return vport;
215 }
216
217 void ovs_dp_detach_port(struct vport *p)
218 {
219         ASSERT_OVSL();
220
221         /* First drop references to device. */
222         hlist_del_rcu(&p->dp_hash_node);
223
224         /* Then destroy it. */
225         ovs_vport_del(p);
226 }
227
228 /* Must be called with rcu_read_lock. */
229 void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
230 {
231         struct datapath *dp = p->dp;
232         struct sw_flow *flow;
233         struct dp_stats_percpu *stats;
234         struct sw_flow_key key;
235         u64 *stats_counter;
236         int error;
237
238         stats = this_cpu_ptr(dp->stats_percpu);
239
240         /* Extract flow from 'skb' into 'key'. */
241         error = ovs_flow_extract(skb, p->port_no, &key);
242         if (unlikely(error)) {
243                 kfree_skb(skb);
244                 return;
245         }
246
247         /* Look up flow. */
248         flow = ovs_flow_lookup(rcu_dereference(dp->table), &key);
249         if (unlikely(!flow)) {
250                 struct dp_upcall_info upcall;
251
252                 upcall.cmd = OVS_PACKET_CMD_MISS;
253                 upcall.key = &key;
254                 upcall.userdata = NULL;
255                 upcall.portid = p->upcall_portid;
256                 ovs_dp_upcall(dp, skb, &upcall);
257                 consume_skb(skb);
258                 stats_counter = &stats->n_missed;
259                 goto out;
260         }
261
262         OVS_CB(skb)->flow = flow;
263         OVS_CB(skb)->pkt_key = &key;
264
265         stats_counter = &stats->n_hit;
266         ovs_flow_used(OVS_CB(skb)->flow, skb);
267         ovs_execute_actions(dp, skb);
268
269 out:
270         /* Update datapath statistics. */
271         u64_stats_update_begin(&stats->sync);
272         (*stats_counter)++;
273         u64_stats_update_end(&stats->sync);
274 }
275
276 static struct genl_family dp_packet_genl_family = {
277         .id = GENL_ID_GENERATE,
278         .hdrsize = sizeof(struct ovs_header),
279         .name = OVS_PACKET_FAMILY,
280         .version = OVS_PACKET_VERSION,
281         .maxattr = OVS_PACKET_ATTR_MAX,
282          SET_NETNSOK
283 };
284
285 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
286                   const struct dp_upcall_info *upcall_info)
287 {
288         struct dp_stats_percpu *stats;
289         int dp_ifindex;
290         int err;
291
292         if (upcall_info->portid == 0) {
293                 err = -ENOTCONN;
294                 goto err;
295         }
296
297         dp_ifindex = get_dpifindex(dp);
298         if (!dp_ifindex) {
299                 err = -ENODEV;
300                 goto err;
301         }
302
303         forward_ip_summed(skb, true);
304
305         if (!skb_is_gso(skb))
306                 err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
307         else
308                 err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
309         if (err)
310                 goto err;
311
312         return 0;
313
314 err:
315         stats = this_cpu_ptr(dp->stats_percpu);
316
317         u64_stats_update_begin(&stats->sync);
318         stats->n_lost++;
319         u64_stats_update_end(&stats->sync);
320
321         return err;
322 }
323
324 static int queue_gso_packets(struct net *net, int dp_ifindex,
325                              struct sk_buff *skb,
326                              const struct dp_upcall_info *upcall_info)
327 {
328         unsigned short gso_type = skb_shinfo(skb)->gso_type;
329         struct dp_upcall_info later_info;
330         struct sw_flow_key later_key;
331         struct sk_buff *segs, *nskb;
332         int err;
333
334         segs = __skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM, false);
335         if (IS_ERR(segs))
336                 return PTR_ERR(segs);
337
338         /* Queue all of the segments. */
339         skb = segs;
340         do {
341                 err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info);
342                 if (err)
343                         break;
344
345                 if (skb == segs && gso_type & SKB_GSO_UDP) {
346                         /* The initial flow key extracted by ovs_flow_extract()
347                          * in this case is for a first fragment, so we need to
348                          * properly mark later fragments.
349                          */
350                         later_key = *upcall_info->key;
351                         later_key.ip.frag = OVS_FRAG_TYPE_LATER;
352
353                         later_info = *upcall_info;
354                         later_info.key = &later_key;
355                         upcall_info = &later_info;
356                 }
357         } while ((skb = skb->next));
358
359         /* Free all of the segments. */
360         skb = segs;
361         do {
362                 nskb = skb->next;
363                 if (err)
364                         kfree_skb(skb);
365                 else
366                         consume_skb(skb);
367         } while ((skb = nskb));
368         return err;
369 }
370
371 static size_t key_attr_size(void)
372 {
373         return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
374                 + nla_total_size(0)   /* OVS_KEY_ATTR_TUNNEL */
375                   + nla_total_size(8)   /* OVS_TUNNEL_KEY_ATTR_ID */
376                   + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
377                   + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
378                   + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TOS */
379                   + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TTL */
380                   + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
381                   + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_CSUM */
382                 + nla_total_size(4)   /* OVS_KEY_ATTR_IN_PORT */
383                 + nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
384                 + nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
385                 + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
386                 + nla_total_size(4)   /* OVS_KEY_ATTR_8021Q */
387                 + nla_total_size(0)   /* OVS_KEY_ATTR_ENCAP */
388                 + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
389                 + nla_total_size(40)  /* OVS_KEY_ATTR_IPV6 */
390                 + nla_total_size(2)   /* OVS_KEY_ATTR_ICMPV6 */
391                 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
392 }
393
394 static size_t upcall_msg_size(const struct sk_buff *skb,
395                               const struct nlattr *userdata)
396 {
397         size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
398                 + nla_total_size(skb->len) /* OVS_PACKET_ATTR_PACKET */
399                 + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
400
401         /* OVS_PACKET_ATTR_USERDATA */
402         if (userdata)
403                 size += NLA_ALIGN(userdata->nla_len);
404
405         return size;
406 }
407
408 static int queue_userspace_packet(struct net *net, int dp_ifindex,
409                                   struct sk_buff *skb,
410                                   const struct dp_upcall_info *upcall_info)
411 {
412         struct ovs_header *upcall;
413         struct sk_buff *nskb = NULL;
414         struct sk_buff *user_skb; /* to be queued to userspace */
415         struct nlattr *nla;
416         int err;
417
418         if (vlan_tx_tag_present(skb)) {
419                 nskb = skb_clone(skb, GFP_ATOMIC);
420                 if (!nskb)
421                         return -ENOMEM;
422                 
423                 err = vlan_deaccel_tag(nskb);
424                 if (err)
425                         return err;
426
427                 skb = nskb;
428         }
429
430         if (nla_attr_size(skb->len) > USHRT_MAX) {
431                 err = -EFBIG;
432                 goto out;
433         }
434
435         user_skb = genlmsg_new(upcall_msg_size(skb, upcall_info->userdata), GFP_ATOMIC);
436         if (!user_skb) {
437                 err = -ENOMEM;
438                 goto out;
439         }
440
441         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
442                              0, upcall_info->cmd);
443         upcall->dp_ifindex = dp_ifindex;
444
445         nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
446         ovs_flow_to_nlattrs(upcall_info->key, upcall_info->key, user_skb);
447         nla_nest_end(user_skb, nla);
448
449         if (upcall_info->userdata)
450                 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
451                           nla_len(upcall_info->userdata),
452                           nla_data(upcall_info->userdata));
453
454         nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
455
456         skb_copy_and_csum_dev(skb, nla_data(nla));
457
458         genlmsg_end(user_skb, upcall);
459         err = genlmsg_unicast(net, user_skb, upcall_info->portid);
460
461 out:
462         kfree_skb(nskb);
463         return err;
464 }
465
466 /* Called with ovs_mutex. */
467 static int flush_flows(struct datapath *dp)
468 {
469         struct flow_table *old_table;
470         struct flow_table *new_table;
471
472         old_table = ovsl_dereference(dp->table);
473         new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
474         if (!new_table)
475                 return -ENOMEM;
476
477         rcu_assign_pointer(dp->table, new_table);
478
479         ovs_flow_tbl_destroy(old_table, true);
480         return 0;
481 }
482
483 static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, int attr_len)
484 {
485
486         struct sw_flow_actions *acts;
487         int new_acts_size;
488         int req_size = NLA_ALIGN(attr_len);
489         int next_offset = offsetof(struct sw_flow_actions, actions) +
490                                         (*sfa)->actions_len;
491
492         if (req_size <= (ksize(*sfa) - next_offset))
493                 goto out;
494
495         new_acts_size = ksize(*sfa) * 2;
496
497         if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
498                 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
499                         return ERR_PTR(-EMSGSIZE);
500                 new_acts_size = MAX_ACTIONS_BUFSIZE;
501         }
502
503         acts = ovs_flow_actions_alloc(new_acts_size);
504         if (IS_ERR(acts))
505                 return (void *)acts;
506
507         memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
508         acts->actions_len = (*sfa)->actions_len;
509         kfree(*sfa);
510         *sfa = acts;
511
512 out:
513         (*sfa)->actions_len += req_size;
514         return  (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
515 }
516
517 static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len)
518 {
519         struct nlattr *a;
520
521         a = reserve_sfa_size(sfa, nla_attr_size(len));
522         if (IS_ERR(a))
523                 return PTR_ERR(a);
524
525         a->nla_type = attrtype;
526         a->nla_len = nla_attr_size(len);
527
528         if (data)
529                 memcpy(nla_data(a), data, len);
530         memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
531
532         return 0;
533 }
534
535 static inline int add_nested_action_start(struct sw_flow_actions **sfa, int attrtype)
536 {
537         int used = (*sfa)->actions_len;
538         int err;
539
540         err = add_action(sfa, attrtype, NULL, 0);
541         if (err)
542                 return err;
543
544         return used;
545 }
546
547 static inline void add_nested_action_end(struct sw_flow_actions *sfa, int st_offset)
548 {
549         struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions + st_offset);
550
551         a->nla_len = sfa->actions_len - st_offset;
552 }
553
554 static int validate_and_copy_actions(const struct nlattr *attr,
555                                 const struct sw_flow_key *key, int depth,
556                                 struct sw_flow_actions **sfa);
557
558 static int validate_and_copy_sample(const struct nlattr *attr,
559                            const struct sw_flow_key *key, int depth,
560                            struct sw_flow_actions **sfa)
561 {
562         const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
563         const struct nlattr *probability, *actions;
564         const struct nlattr *a;
565         int rem, start, err, st_acts;
566
567         memset(attrs, 0, sizeof(attrs));
568         nla_for_each_nested(a, attr, rem) {
569                 int type = nla_type(a);
570                 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
571                         return -EINVAL;
572                 attrs[type] = a;
573         }
574         if (rem)
575                 return -EINVAL;
576
577         probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
578         if (!probability || nla_len(probability) != sizeof(u32))
579                 return -EINVAL;
580
581         actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
582         if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
583                 return -EINVAL;
584
585         /* validation done, copy sample action. */
586         start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE);
587         if (start < 0)
588                 return start;
589         err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY, nla_data(probability), sizeof(u32));
590         if (err)
591                 return err;
592         st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS);
593         if (st_acts < 0)
594                 return st_acts;
595
596         err = validate_and_copy_actions(actions, key, depth + 1, sfa);
597         if (err)
598                 return err;
599
600         add_nested_action_end(*sfa, st_acts);
601         add_nested_action_end(*sfa, start);
602
603         return 0;
604 }
605
606 static int validate_tp_port(const struct sw_flow_key *flow_key)
607 {
608         if (flow_key->eth.type == htons(ETH_P_IP)) {
609                 if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst)
610                         return 0;
611         } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
612                 if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst)
613                         return 0;
614         }
615
616         return -EINVAL;
617 }
618
619 static int validate_and_copy_set_tun(const struct nlattr *attr,
620                                      struct sw_flow_actions **sfa)
621 {
622         struct sw_flow_match match;
623         struct sw_flow_key key;
624         int err, start;
625
626         ovs_match_init(&match, &key, NULL);
627         err = ipv4_tun_from_nlattr(nla_data(attr), &match, false);
628         if (err)
629                 return err;
630
631         start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET);
632         if (start < 0)
633                 return start;
634
635         err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &match.key->tun_key,
636                         sizeof(match.key->tun_key));
637         add_nested_action_end(*sfa, start);
638
639         return err;
640 }
641
642 static int validate_set(const struct nlattr *a,
643                         const struct sw_flow_key *flow_key,
644                         struct sw_flow_actions **sfa,
645                         bool *set_tun)
646 {
647         const struct nlattr *ovs_key = nla_data(a);
648         int key_type = nla_type(ovs_key);
649
650         /* There can be only one key in a action */
651         if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
652                 return -EINVAL;
653
654         if (key_type > OVS_KEY_ATTR_MAX ||
655             (ovs_key_lens[key_type] != nla_len(ovs_key) &&
656              ovs_key_lens[key_type] != -1))
657                 return -EINVAL;
658
659         switch (key_type) {
660         const struct ovs_key_ipv4 *ipv4_key;
661         const struct ovs_key_ipv6 *ipv6_key;
662         int err;
663
664         case OVS_KEY_ATTR_PRIORITY:
665         case OVS_KEY_ATTR_ETHERNET:
666                 break;
667
668         case OVS_KEY_ATTR_SKB_MARK:
669 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && !defined(CONFIG_NETFILTER)
670                 if (nla_get_u32(ovs_key) != 0)
671                         return -EINVAL;
672 #endif
673                 break;
674
675         case OVS_KEY_ATTR_TUNNEL:
676                 *set_tun = true;
677                 err = validate_and_copy_set_tun(a, sfa);
678                 if (err)
679                         return err;
680                 break;
681
682         case OVS_KEY_ATTR_IPV4:
683                 if (flow_key->eth.type != htons(ETH_P_IP))
684                         return -EINVAL;
685
686                 if (!flow_key->ip.proto)
687                         return -EINVAL;
688
689                 ipv4_key = nla_data(ovs_key);
690                 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
691                         return -EINVAL;
692
693                 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
694                         return -EINVAL;
695
696                 break;
697
698         case OVS_KEY_ATTR_IPV6:
699                 if (flow_key->eth.type != htons(ETH_P_IPV6))
700                         return -EINVAL;
701
702                 if (!flow_key->ip.proto)
703                         return -EINVAL;
704
705                 ipv6_key = nla_data(ovs_key);
706                 if (ipv6_key->ipv6_proto != flow_key->ip.proto)
707                         return -EINVAL;
708
709                 if (ipv6_key->ipv6_frag != flow_key->ip.frag)
710                         return -EINVAL;
711
712                 if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
713                         return -EINVAL;
714
715                 break;
716
717         case OVS_KEY_ATTR_TCP:
718                 if (flow_key->ip.proto != IPPROTO_TCP)
719                         return -EINVAL;
720
721                 return validate_tp_port(flow_key);
722
723         case OVS_KEY_ATTR_UDP:
724                 if (flow_key->ip.proto != IPPROTO_UDP)
725                         return -EINVAL;
726
727                 return validate_tp_port(flow_key);
728
729         default:
730                 return -EINVAL;
731         }
732
733         return 0;
734 }
735
736 static int validate_userspace(const struct nlattr *attr)
737 {
738         static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] =   {
739                 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
740                 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
741         };
742         struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
743         int error;
744
745         error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
746                                  attr, userspace_policy);
747         if (error)
748                 return error;
749
750         if (!a[OVS_USERSPACE_ATTR_PID] ||
751             !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
752                 return -EINVAL;
753
754         return 0;
755 }
756
757 static int copy_action(const struct nlattr *from,
758                       struct sw_flow_actions **sfa)
759 {
760         int totlen = NLA_ALIGN(from->nla_len);
761         struct nlattr *to;
762
763         to = reserve_sfa_size(sfa, from->nla_len);
764         if (IS_ERR(to))
765                 return PTR_ERR(to);
766
767         memcpy(to, from, totlen);
768         return 0;
769 }
770
771 static int validate_and_copy_actions(const struct nlattr *attr,
772                                 const struct sw_flow_key *key,
773                                 int depth,
774                                 struct sw_flow_actions **sfa)
775 {
776         const struct nlattr *a;
777         int rem, err;
778
779         if (depth >= SAMPLE_ACTION_DEPTH)
780                 return -EOVERFLOW;
781
782         nla_for_each_nested(a, attr, rem) {
783                 /* Expected argument lengths, (u32)-1 for variable length. */
784                 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
785                         [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
786                         [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
787                         [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
788                         [OVS_ACTION_ATTR_POP_VLAN] = 0,
789                         [OVS_ACTION_ATTR_SET] = (u32)-1,
790                         [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
791                 };
792                 const struct ovs_action_push_vlan *vlan;
793                 int type = nla_type(a);
794                 bool skip_copy;
795
796                 if (type > OVS_ACTION_ATTR_MAX ||
797                     (action_lens[type] != nla_len(a) &&
798                      action_lens[type] != (u32)-1))
799                         return -EINVAL;
800
801                 skip_copy = false;
802                 switch (type) {
803                 case OVS_ACTION_ATTR_UNSPEC:
804                         return -EINVAL;
805
806                 case OVS_ACTION_ATTR_USERSPACE:
807                         err = validate_userspace(a);
808                         if (err)
809                                 return err;
810                         break;
811
812                 case OVS_ACTION_ATTR_OUTPUT:
813                         if (nla_get_u32(a) >= DP_MAX_PORTS)
814                                 return -EINVAL;
815                         break;
816
817
818                 case OVS_ACTION_ATTR_POP_VLAN:
819                         break;
820
821                 case OVS_ACTION_ATTR_PUSH_VLAN:
822                         vlan = nla_data(a);
823                         if (vlan->vlan_tpid != htons(ETH_P_8021Q))
824                                 return -EINVAL;
825                         if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
826                                 return -EINVAL;
827                         break;
828
829                 case OVS_ACTION_ATTR_SET:
830                         err = validate_set(a, key, sfa, &skip_copy);
831                         if (err)
832                                 return err;
833                         break;
834
835                 case OVS_ACTION_ATTR_SAMPLE:
836                         err = validate_and_copy_sample(a, key, depth, sfa);
837                         if (err)
838                                 return err;
839                         skip_copy = true;
840                         break;
841
842                 default:
843                         return -EINVAL;
844                 }
845                 if (!skip_copy) {
846                         err = copy_action(a, sfa);
847                         if (err)
848                                 return err;
849                 }
850         }
851
852         if (rem > 0)
853                 return -EINVAL;
854
855         return 0;
856 }
857
858 static void clear_stats(struct sw_flow *flow)
859 {
860         flow->used = 0;
861         flow->tcp_flags = 0;
862         flow->packet_count = 0;
863         flow->byte_count = 0;
864 }
865
866 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
867 {
868         struct ovs_header *ovs_header = info->userhdr;
869         struct nlattr **a = info->attrs;
870         struct sw_flow_actions *acts;
871         struct sk_buff *packet;
872         struct sw_flow *flow;
873         struct datapath *dp;
874         struct ethhdr *eth;
875         int len;
876         int err;
877
878         err = -EINVAL;
879         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
880             !a[OVS_PACKET_ATTR_ACTIONS])
881                 goto err;
882
883         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
884         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
885         err = -ENOMEM;
886         if (!packet)
887                 goto err;
888         skb_reserve(packet, NET_IP_ALIGN);
889
890         nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
891
892         skb_reset_mac_header(packet);
893         eth = eth_hdr(packet);
894
895         /* Normally, setting the skb 'protocol' field would be handled by a
896          * call to eth_type_trans(), but it assumes there's a sending
897          * device, which we may not have. */
898         if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
899                 packet->protocol = eth->h_proto;
900         else
901                 packet->protocol = htons(ETH_P_802_2);
902
903         /* Build an sw_flow for sending this packet. */
904         flow = ovs_flow_alloc();
905         err = PTR_ERR(flow);
906         if (IS_ERR(flow))
907                 goto err_kfree_skb;
908
909         err = ovs_flow_extract(packet, -1, &flow->key);
910         if (err)
911                 goto err_flow_free;
912
913         err = ovs_flow_metadata_from_nlattrs(flow, a[OVS_PACKET_ATTR_KEY]);
914         if (err)
915                 goto err_flow_free;
916         acts = ovs_flow_actions_alloc(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
917         err = PTR_ERR(acts);
918         if (IS_ERR(acts))
919                 goto err_flow_free;
920
921         err = validate_and_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0, &acts);
922         rcu_assign_pointer(flow->sf_acts, acts);
923         if (err)
924                 goto err_flow_free;
925
926         OVS_CB(packet)->flow = flow;
927         OVS_CB(packet)->pkt_key = &flow->key;
928         packet->priority = flow->key.phy.priority;
929         skb_set_mark(packet, flow->key.phy.skb_mark);
930
931         rcu_read_lock();
932         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
933         err = -ENODEV;
934         if (!dp)
935                 goto err_unlock;
936
937         local_bh_disable();
938         err = ovs_execute_actions(dp, packet);
939         local_bh_enable();
940         rcu_read_unlock();
941
942         ovs_flow_free(flow, false);
943         return err;
944
945 err_unlock:
946         rcu_read_unlock();
947 err_flow_free:
948         ovs_flow_free(flow, false);
949 err_kfree_skb:
950         kfree_skb(packet);
951 err:
952         return err;
953 }
954
955 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
956 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
957         [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
958 #else
959         [OVS_PACKET_ATTR_PACKET] = { .minlen = ETH_HLEN },
960 #endif
961         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
962         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
963 };
964
965 static struct genl_ops dp_packet_genl_ops[] = {
966         { .cmd = OVS_PACKET_CMD_EXECUTE,
967           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
968           .policy = packet_policy,
969           .doit = ovs_packet_cmd_execute
970         }
971 };
972
973 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
974 {
975         int i;
976         struct flow_table *table = ovsl_dereference(dp->table);
977
978         stats->n_flows = ovs_flow_tbl_count(table);
979
980         stats->n_hit = stats->n_missed = stats->n_lost = 0;
981         for_each_possible_cpu(i) {
982                 const struct dp_stats_percpu *percpu_stats;
983                 struct dp_stats_percpu local_stats;
984                 unsigned int start;
985
986                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
987
988                 do {
989                         start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
990                         local_stats = *percpu_stats;
991                 } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
992
993                 stats->n_hit += local_stats.n_hit;
994                 stats->n_missed += local_stats.n_missed;
995                 stats->n_lost += local_stats.n_lost;
996         }
997 }
998
999 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1000         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1001         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1002         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1003 };
1004
1005 static struct genl_family dp_flow_genl_family = {
1006         .id = GENL_ID_GENERATE,
1007         .hdrsize = sizeof(struct ovs_header),
1008         .name = OVS_FLOW_FAMILY,
1009         .version = OVS_FLOW_VERSION,
1010         .maxattr = OVS_FLOW_ATTR_MAX,
1011          SET_NETNSOK
1012 };
1013
1014 static struct genl_multicast_group ovs_dp_flow_multicast_group = {
1015         .name = OVS_FLOW_MCGROUP
1016 };
1017
1018 static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb);
1019 static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
1020 {
1021         const struct nlattr *a;
1022         struct nlattr *start;
1023         int err = 0, rem;
1024
1025         start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
1026         if (!start)
1027                 return -EMSGSIZE;
1028
1029         nla_for_each_nested(a, attr, rem) {
1030                 int type = nla_type(a);
1031                 struct nlattr *st_sample;
1032
1033                 switch (type) {
1034                 case OVS_SAMPLE_ATTR_PROBABILITY:
1035                         if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY, sizeof(u32), nla_data(a)))
1036                                 return -EMSGSIZE;
1037                         break;
1038                 case OVS_SAMPLE_ATTR_ACTIONS:
1039                         st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
1040                         if (!st_sample)
1041                                 return -EMSGSIZE;
1042                         err = actions_to_attr(nla_data(a), nla_len(a), skb);
1043                         if (err)
1044                                 return err;
1045                         nla_nest_end(skb, st_sample);
1046                         break;
1047                 }
1048         }
1049
1050         nla_nest_end(skb, start);
1051         return err;
1052 }
1053
1054 static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
1055 {
1056         const struct nlattr *ovs_key = nla_data(a);
1057         int key_type = nla_type(ovs_key);
1058         struct nlattr *start;
1059         int err;
1060
1061         switch (key_type) {
1062         case OVS_KEY_ATTR_IPV4_TUNNEL:
1063                 start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
1064                 if (!start)
1065                         return -EMSGSIZE;
1066
1067                 err = ipv4_tun_to_nlattr(skb,
1068                                 nla_data(ovs_key), nla_data(ovs_key));
1069                 if (err)
1070                         return err;
1071                 nla_nest_end(skb, start);
1072                 break;
1073         default:
1074                 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
1075                         return -EMSGSIZE;
1076                 break;
1077         }
1078
1079         return 0;
1080 }
1081
1082 static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb)
1083 {
1084         const struct nlattr *a;
1085         int rem, err;
1086
1087         nla_for_each_attr(a, attr, len, rem) {
1088                 int type = nla_type(a);
1089
1090                 switch (type) {
1091                 case OVS_ACTION_ATTR_SET:
1092                         err = set_action_to_attr(a, skb);
1093                         if (err)
1094                                 return err;
1095                         break;
1096
1097                 case OVS_ACTION_ATTR_SAMPLE:
1098                         err = sample_action_to_attr(a, skb);
1099                         if (err)
1100                                 return err;
1101                         break;
1102                 default:
1103                         if (nla_put(skb, type, nla_len(a), nla_data(a)))
1104                                 return -EMSGSIZE;
1105                         break;
1106                 }
1107         }
1108
1109         return 0;
1110 }
1111
1112 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
1113 {
1114         return NLMSG_ALIGN(sizeof(struct ovs_header))
1115                 + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
1116                 + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */
1117                 + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
1118                 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
1119                 + nla_total_size(8) /* OVS_FLOW_ATTR_USED */
1120                 + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
1121 }
1122
1123 /* Called with ovs_mutex. */
1124 static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
1125                                   struct sk_buff *skb, u32 portid,
1126                                   u32 seq, u32 flags, u8 cmd)
1127 {
1128         const int skb_orig_len = skb->len;
1129         struct sw_flow_mask *mask;
1130         struct nlattr *start;
1131         struct ovs_flow_stats stats;
1132         struct ovs_header *ovs_header;
1133         struct nlattr *nla;
1134         unsigned long used;
1135         u8 tcp_flags;
1136         int err;
1137
1138         ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
1139         if (!ovs_header)
1140                 return -EMSGSIZE;
1141
1142         ovs_header->dp_ifindex = get_dpifindex(dp);
1143
1144         /* Fill flow key. */
1145         nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
1146         if (!nla)
1147                 goto nla_put_failure;
1148
1149         err = ovs_flow_to_nlattrs(&flow->unmasked_key,
1150                         &flow->unmasked_key, skb);
1151         if (err)
1152                 goto error;
1153         nla_nest_end(skb, nla);
1154
1155         nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
1156         if (!nla)
1157                 goto nla_put_failure;
1158
1159         mask = rcu_dereference_check(flow->mask, lockdep_ovsl_is_held());
1160         err = ovs_flow_to_nlattrs(&flow->key, &mask->key, skb);
1161         if (err)
1162                 goto error;
1163
1164         nla_nest_end(skb, nla);
1165
1166         spin_lock_bh(&flow->lock);
1167         used = flow->used;
1168         stats.n_packets = flow->packet_count;
1169         stats.n_bytes = flow->byte_count;
1170         tcp_flags = flow->tcp_flags;
1171         spin_unlock_bh(&flow->lock);
1172
1173         if (used &&
1174             nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
1175                 goto nla_put_failure;
1176
1177         if (stats.n_packets &&
1178             nla_put(skb, OVS_FLOW_ATTR_STATS,
1179                     sizeof(struct ovs_flow_stats), &stats))
1180                 goto nla_put_failure;
1181
1182         if (tcp_flags &&
1183             nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags))
1184                 goto nla_put_failure;
1185
1186         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
1187          * this is the first flow to be dumped into 'skb'.  This is unusual for
1188          * Netlink but individual action lists can be longer than
1189          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
1190          * The userspace caller can always fetch the actions separately if it
1191          * really wants them.  (Most userspace callers in fact don't care.)
1192          *
1193          * This can only fail for dump operations because the skb is always
1194          * properly sized for single flows.
1195          */
1196         start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
1197         if (start) {
1198                 const struct sw_flow_actions *sf_acts;
1199
1200                 sf_acts = rcu_dereference_check(flow->sf_acts,
1201                                                 lockdep_ovsl_is_held());
1202
1203                 err = actions_to_attr(sf_acts->actions, sf_acts->actions_len, skb);
1204                 if (!err)
1205                         nla_nest_end(skb, start);
1206                 else {
1207                         if (skb_orig_len)
1208                                 goto error;
1209
1210                         nla_nest_cancel(skb, start);
1211                 }
1212         } else if (skb_orig_len)
1213                 goto nla_put_failure;
1214
1215         return genlmsg_end(skb, ovs_header);
1216
1217 nla_put_failure:
1218         err = -EMSGSIZE;
1219 error:
1220         genlmsg_cancel(skb, ovs_header);
1221         return err;
1222 }
1223
1224 static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
1225 {
1226         const struct sw_flow_actions *sf_acts;
1227
1228         sf_acts = ovsl_dereference(flow->sf_acts);
1229
1230         return genlmsg_new(ovs_flow_cmd_msg_size(sf_acts), GFP_KERNEL);
1231 }
1232
1233 static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
1234                                                struct datapath *dp,
1235                                                u32 portid, u32 seq, u8 cmd)
1236 {
1237         struct sk_buff *skb;
1238         int retval;
1239
1240         skb = ovs_flow_cmd_alloc_info(flow);
1241         if (!skb)
1242                 return ERR_PTR(-ENOMEM);
1243
1244         retval = ovs_flow_cmd_fill_info(flow, dp, skb, portid, seq, 0, cmd);
1245         BUG_ON(retval < 0);
1246         return skb;
1247 }
1248
1249 static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
1250 {
1251         struct nlattr **a = info->attrs;
1252         struct ovs_header *ovs_header = info->userhdr;
1253         struct sw_flow_key key;
1254         struct sw_flow *flow = NULL;
1255         struct sw_flow_mask mask;
1256         struct sk_buff *reply;
1257         struct datapath *dp;
1258         struct flow_table *table;
1259         struct sw_flow_actions *acts = NULL;
1260         struct sw_flow_match match;
1261         int error;
1262
1263         /* Extract key. */
1264         error = -EINVAL;
1265         if (!a[OVS_FLOW_ATTR_KEY])
1266                 goto error;
1267
1268         ovs_match_init(&match, &key, &mask);
1269         error = ovs_match_from_nlattrs(&match,
1270                         a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
1271         if (error)
1272                 goto error;
1273
1274         /* Validate actions. */
1275         if (a[OVS_FLOW_ATTR_ACTIONS]) {
1276                 acts = ovs_flow_actions_alloc(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
1277                 error = PTR_ERR(acts);
1278                 if (IS_ERR(acts))
1279                         goto error;
1280
1281                 error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &key,  0, &acts);
1282                 if (error)
1283                         goto err_kfree;
1284         } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
1285                 error = -EINVAL;
1286                 goto error;
1287         }
1288
1289         ovs_lock();
1290         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1291         error = -ENODEV;
1292         if (!dp)
1293                 goto err_unlock_ovs;
1294
1295         table = ovsl_dereference(dp->table);
1296
1297         /* Check if this is a duplicate flow */
1298         flow = ovs_flow_lookup(table, &key);
1299         if (!flow) {
1300                 struct sw_flow_mask *mask_p;
1301                 /* Bail out if we're not allowed to create a new flow. */
1302                 error = -ENOENT;
1303                 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
1304                         goto err_unlock_ovs;
1305
1306                 /* Expand table, if necessary, to make room. */
1307                 if (ovs_flow_tbl_need_to_expand(table)) {
1308                         struct flow_table *new_table;
1309
1310                         new_table = ovs_flow_tbl_expand(table);
1311                         if (!IS_ERR(new_table)) {
1312                                 rcu_assign_pointer(dp->table, new_table);
1313                                 ovs_flow_tbl_destroy(table, true);
1314                                 table = ovsl_dereference(dp->table);
1315                         }
1316                 }
1317
1318                 /* Allocate flow. */
1319                 flow = ovs_flow_alloc();
1320                 if (IS_ERR(flow)) {
1321                         error = PTR_ERR(flow);
1322                         goto err_unlock_ovs;
1323                 }
1324                 clear_stats(flow);
1325
1326                 /* Make sure mask is unique in the system */
1327                 mask_p = ovs_sw_flow_mask_find(table, &mask);
1328                 if (!mask_p) {
1329                         /* Allocate a new mask if none exsits. */
1330                         mask_p = ovs_sw_flow_mask_alloc();
1331                         if (!mask_p)
1332                                 goto err_flow_free;
1333                         mask_p->key = mask.key;
1334                         mask_p->range = mask.range;
1335                         ovs_sw_flow_mask_insert(table, mask_p);
1336                 }
1337
1338                 ovs_sw_flow_mask_add_ref(mask_p);
1339                 rcu_assign_pointer(flow->mask, mask_p);
1340                 rcu_assign_pointer(flow->sf_acts, acts);
1341
1342                 /* Put flow in bucket. */
1343                 ovs_flow_insert(table, flow, &key, match.range.end);
1344
1345                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
1346                                                 info->snd_seq, OVS_FLOW_CMD_NEW);
1347         } else {
1348                 /* We found a matching flow. */
1349                 struct sw_flow_actions *old_acts;
1350
1351                 /* Bail out if we're not allowed to modify an existing flow.
1352                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1353                  * because Generic Netlink treats the latter as a dump
1354                  * request.  We also accept NLM_F_EXCL in case that bug ever
1355                  * gets fixed.
1356                  */
1357                 error = -EEXIST;
1358                 if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
1359                     info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
1360                         goto err_unlock_ovs;
1361
1362                 /* The unmasked key has to be the same for flow updates. */
1363                 error = -EINVAL;
1364                 if (!ovs_flow_cmp_unmasked_key(flow, &key, match.range.end))
1365                         goto err_unlock_ovs;
1366
1367                 /* Update actions. */
1368                 old_acts = ovsl_dereference(flow->sf_acts);
1369                 rcu_assign_pointer(flow->sf_acts, acts);
1370                 ovs_flow_deferred_free_acts(old_acts);
1371
1372                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
1373                                                info->snd_seq, OVS_FLOW_CMD_NEW);
1374
1375                 /* Clear stats. */
1376                 if (a[OVS_FLOW_ATTR_CLEAR]) {
1377                         spin_lock_bh(&flow->lock);
1378                         clear_stats(flow);
1379                         spin_unlock_bh(&flow->lock);
1380                 }
1381         }
1382         ovs_unlock();
1383
1384         if (!IS_ERR(reply))
1385                 ovs_notify(reply, info, &ovs_dp_flow_multicast_group);
1386         else
1387                 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
1388                                 ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
1389         return 0;
1390
1391 err_flow_free:
1392         ovs_flow_free(flow, false);
1393 err_unlock_ovs:
1394         ovs_unlock();
1395 err_kfree:
1396         kfree(acts);
1397 error:
1398         return error;
1399 }
1400
1401 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1402 {
1403         struct nlattr **a = info->attrs;
1404         struct ovs_header *ovs_header = info->userhdr;
1405         struct sw_flow_key key;
1406         struct sk_buff *reply;
1407         struct sw_flow *flow;
1408         struct datapath *dp;
1409         struct flow_table *table;
1410         struct sw_flow_match match;
1411         int err;
1412
1413         if (!a[OVS_FLOW_ATTR_KEY])
1414                 return -EINVAL;
1415
1416         ovs_match_init(&match, &key, NULL);
1417         err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1418         if (err)
1419                 return err;
1420
1421         ovs_lock();
1422         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1423         if (!dp) {
1424                 err = -ENODEV;
1425                 goto unlock;
1426         }
1427
1428         table = ovsl_dereference(dp->table);
1429         flow = ovs_flow_lookup_unmasked_key(table, &match);
1430         if (!flow) {
1431                 err = -ENOENT;
1432                 goto unlock;
1433         }
1434
1435         reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
1436                                         info->snd_seq, OVS_FLOW_CMD_NEW);
1437         if (IS_ERR(reply)) {
1438                 err = PTR_ERR(reply);
1439                 goto unlock;
1440         }
1441
1442         ovs_unlock();
1443         return genlmsg_reply(reply, info);
1444 unlock:
1445         ovs_unlock();
1446         return err;
1447 }
1448
1449 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1450 {
1451         struct nlattr **a = info->attrs;
1452         struct ovs_header *ovs_header = info->userhdr;
1453         struct sw_flow_key key;
1454         struct sk_buff *reply;
1455         struct sw_flow *flow;
1456         struct datapath *dp;
1457         struct flow_table *table;
1458         struct sw_flow_match match;
1459         int err;
1460
1461         ovs_lock();
1462         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1463         if (!dp) {
1464                 err = -ENODEV;
1465                 goto unlock;
1466         }
1467
1468         if (!a[OVS_FLOW_ATTR_KEY]) {
1469                 err = flush_flows(dp);
1470                 goto unlock;
1471         }
1472
1473         ovs_match_init(&match, &key, NULL);
1474         err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1475         if (err)
1476                 goto unlock;
1477
1478         table = ovsl_dereference(dp->table);
1479         flow = ovs_flow_lookup_unmasked_key(table, &match);
1480         if (!flow) {
1481                 err = -ENOENT;
1482                 goto unlock;
1483         }
1484
1485         reply = ovs_flow_cmd_alloc_info(flow);
1486         if (!reply) {
1487                 err = -ENOMEM;
1488                 goto unlock;
1489         }
1490
1491         ovs_flow_remove(table, flow);
1492
1493         err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
1494                                      info->snd_seq, 0, OVS_FLOW_CMD_DEL);
1495         BUG_ON(err < 0);
1496
1497         ovs_flow_free(flow, true);
1498         ovs_unlock();
1499
1500         ovs_notify(reply, info, &ovs_dp_flow_multicast_group);
1501         return 0;
1502 unlock:
1503         ovs_unlock();
1504         return err;
1505 }
1506
1507 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1508 {
1509         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1510         struct datapath *dp;
1511         struct flow_table *table;
1512
1513         rcu_read_lock();
1514         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1515         if (!dp) {
1516                 rcu_read_unlock();
1517                 return -ENODEV;
1518         }
1519
1520         table = rcu_dereference(dp->table);
1521         for (;;) {
1522                 struct sw_flow *flow;
1523                 u32 bucket, obj;
1524
1525                 bucket = cb->args[0];
1526                 obj = cb->args[1];
1527                 flow = ovs_flow_dump_next(table, &bucket, &obj);
1528                 if (!flow)
1529                         break;
1530
1531                 if (ovs_flow_cmd_fill_info(flow, dp, skb,
1532                                            NETLINK_CB(cb->skb).portid,
1533                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1534                                            OVS_FLOW_CMD_NEW) < 0)
1535                         break;
1536
1537                 cb->args[0] = bucket;
1538                 cb->args[1] = obj;
1539         }
1540         rcu_read_unlock();
1541         return skb->len;
1542 }
1543
1544 static struct genl_ops dp_flow_genl_ops[] = {
1545         { .cmd = OVS_FLOW_CMD_NEW,
1546           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1547           .policy = flow_policy,
1548           .doit = ovs_flow_cmd_new_or_set
1549         },
1550         { .cmd = OVS_FLOW_CMD_DEL,
1551           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1552           .policy = flow_policy,
1553           .doit = ovs_flow_cmd_del
1554         },
1555         { .cmd = OVS_FLOW_CMD_GET,
1556           .flags = 0,               /* OK for unprivileged users. */
1557           .policy = flow_policy,
1558           .doit = ovs_flow_cmd_get,
1559           .dumpit = ovs_flow_cmd_dump
1560         },
1561         { .cmd = OVS_FLOW_CMD_SET,
1562           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1563           .policy = flow_policy,
1564           .doit = ovs_flow_cmd_new_or_set,
1565         },
1566 };
1567
1568 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1569 #ifdef HAVE_NLA_NUL_STRING
1570         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1571 #endif
1572         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1573 };
1574
1575 static struct genl_family dp_datapath_genl_family = {
1576         .id = GENL_ID_GENERATE,
1577         .hdrsize = sizeof(struct ovs_header),
1578         .name = OVS_DATAPATH_FAMILY,
1579         .version = OVS_DATAPATH_VERSION,
1580         .maxattr = OVS_DP_ATTR_MAX,
1581          SET_NETNSOK
1582 };
1583
1584 static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
1585         .name = OVS_DATAPATH_MCGROUP
1586 };
1587
1588 static size_t ovs_dp_cmd_msg_size(void)
1589 {
1590         size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1591
1592         msgsize += nla_total_size(IFNAMSIZ);
1593         msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
1594
1595         return msgsize;
1596 }
1597
1598 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1599                                 u32 portid, u32 seq, u32 flags, u8 cmd)
1600 {
1601         struct ovs_header *ovs_header;
1602         struct ovs_dp_stats dp_stats;
1603         int err;
1604
1605         ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1606                                    flags, cmd);
1607         if (!ovs_header)
1608                 goto error;
1609
1610         ovs_header->dp_ifindex = get_dpifindex(dp);
1611
1612         rcu_read_lock();
1613         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1614         rcu_read_unlock();
1615         if (err)
1616                 goto nla_put_failure;
1617
1618         get_dp_stats(dp, &dp_stats);
1619         if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats))
1620                 goto nla_put_failure;
1621
1622         return genlmsg_end(skb, ovs_header);
1623
1624 nla_put_failure:
1625         genlmsg_cancel(skb, ovs_header);
1626 error:
1627         return -EMSGSIZE;
1628 }
1629
1630 static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 portid,
1631                                              u32 seq, u8 cmd)
1632 {
1633         struct sk_buff *skb;
1634         int retval;
1635
1636         skb = genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
1637         if (!skb)
1638                 return ERR_PTR(-ENOMEM);
1639
1640         retval = ovs_dp_cmd_fill_info(dp, skb, portid, seq, 0, cmd);
1641         if (retval < 0) {
1642                 kfree_skb(skb);
1643                 return ERR_PTR(retval);
1644         }
1645         return skb;
1646 }
1647
1648 static int ovs_dp_cmd_validate(struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1649 {
1650         return CHECK_NUL_STRING(a[OVS_DP_ATTR_NAME], IFNAMSIZ - 1);
1651 }
1652
1653 /* Called with ovs_mutex. */
1654 static struct datapath *lookup_datapath(struct net *net,
1655                                         struct ovs_header *ovs_header,
1656                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1657 {
1658         struct datapath *dp;
1659
1660         if (!a[OVS_DP_ATTR_NAME])
1661                 dp = get_dp(net, ovs_header->dp_ifindex);
1662         else {
1663                 struct vport *vport;
1664
1665                 rcu_read_lock();
1666                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1667                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1668                 rcu_read_unlock();
1669         }
1670         return dp ? dp : ERR_PTR(-ENODEV);
1671 }
1672
1673 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1674 {
1675         struct nlattr **a = info->attrs;
1676         struct vport_parms parms;
1677         struct sk_buff *reply;
1678         struct datapath *dp;
1679         struct vport *vport;
1680         struct ovs_net *ovs_net;
1681         int err, i;
1682
1683         err = -EINVAL;
1684         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1685                 goto err;
1686
1687         err = ovs_dp_cmd_validate(a);
1688         if (err)
1689                 goto err;
1690
1691         ovs_lock();
1692
1693         err = -ENOMEM;
1694         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1695         if (dp == NULL)
1696                 goto err_unlock_ovs;
1697
1698         ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1699
1700         /* Allocate table. */
1701         err = -ENOMEM;
1702         rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
1703         if (!dp->table)
1704                 goto err_free_dp;
1705
1706         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1707         if (!dp->stats_percpu) {
1708                 err = -ENOMEM;
1709                 goto err_destroy_table;
1710         }
1711
1712         dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1713                             GFP_KERNEL);
1714         if (!dp->ports) {
1715                 err = -ENOMEM;
1716                 goto err_destroy_percpu;
1717         }
1718
1719         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1720                 INIT_HLIST_HEAD(&dp->ports[i]);
1721
1722         /* Set up our datapath device. */
1723         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1724         parms.type = OVS_VPORT_TYPE_INTERNAL;
1725         parms.options = NULL;
1726         parms.dp = dp;
1727         parms.port_no = OVSP_LOCAL;
1728         parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
1729
1730         vport = new_vport(&parms);
1731         if (IS_ERR(vport)) {
1732                 err = PTR_ERR(vport);
1733                 if (err == -EBUSY)
1734                         err = -EEXIST;
1735
1736                 goto err_destroy_ports_array;
1737         }
1738
1739         reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1740                                       info->snd_seq, OVS_DP_CMD_NEW);
1741         err = PTR_ERR(reply);
1742         if (IS_ERR(reply))
1743                 goto err_destroy_local_port;
1744
1745         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1746         list_add_tail(&dp->list_node, &ovs_net->dps);
1747
1748         ovs_unlock();
1749
1750         ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
1751         return 0;
1752
1753 err_destroy_local_port:
1754         ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1755 err_destroy_ports_array:
1756         kfree(dp->ports);
1757 err_destroy_percpu:
1758         free_percpu(dp->stats_percpu);
1759 err_destroy_table:
1760         ovs_flow_tbl_destroy(ovsl_dereference(dp->table), false);
1761 err_free_dp:
1762         release_net(ovs_dp_get_net(dp));
1763         kfree(dp);
1764 err_unlock_ovs:
1765         ovs_unlock();
1766 err:
1767         return err;
1768 }
1769
1770 /* Called with ovs_mutex. */
1771 static void __dp_destroy(struct datapath *dp)
1772 {
1773         int i;
1774
1775         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1776                 struct vport *vport;
1777                 struct hlist_node *n;
1778
1779                 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1780                         if (vport->port_no != OVSP_LOCAL)
1781                                 ovs_dp_detach_port(vport);
1782         }
1783
1784         list_del(&dp->list_node);
1785
1786         /* OVSP_LOCAL is datapath internal port. We need to make sure that
1787          * all port in datapath are destroyed first before freeing datapath.
1788          */
1789         ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1790
1791         call_rcu(&dp->rcu, destroy_dp_rcu);
1792 }
1793
1794 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1795 {
1796         struct sk_buff *reply;
1797         struct datapath *dp;
1798         int err;
1799
1800         err = ovs_dp_cmd_validate(info->attrs);
1801         if (err)
1802                 return err;
1803
1804         ovs_lock();
1805         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1806         err = PTR_ERR(dp);
1807         if (IS_ERR(dp))
1808                 goto unlock;
1809
1810         reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1811                                       info->snd_seq, OVS_DP_CMD_DEL);
1812         err = PTR_ERR(reply);
1813         if (IS_ERR(reply))
1814                 goto unlock;
1815
1816         __dp_destroy(dp);
1817         ovs_unlock();
1818
1819         ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
1820
1821         return 0;
1822 unlock:
1823         ovs_unlock();
1824         return err;
1825 }
1826
1827 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1828 {
1829         struct sk_buff *reply;
1830         struct datapath *dp;
1831         int err;
1832
1833         err = ovs_dp_cmd_validate(info->attrs);
1834         if (err)
1835                 return err;
1836
1837         ovs_lock();
1838         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1839         err = PTR_ERR(dp);
1840         if (IS_ERR(dp))
1841                 goto unlock;
1842
1843         reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1844                                       info->snd_seq, OVS_DP_CMD_NEW);
1845         if (IS_ERR(reply)) {
1846                 err = PTR_ERR(reply);
1847                 netlink_set_err(GENL_SOCK(sock_net(skb->sk)), 0,
1848                                 ovs_dp_datapath_multicast_group.id, err);
1849                 err = 0;
1850                 goto unlock;
1851         }
1852
1853         ovs_unlock();
1854         ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
1855
1856         return 0;
1857 unlock:
1858         ovs_unlock();
1859         return err;
1860 }
1861
1862 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1863 {
1864         struct sk_buff *reply;
1865         struct datapath *dp;
1866         int err;
1867
1868         err = ovs_dp_cmd_validate(info->attrs);
1869         if (err)
1870                 return err;
1871
1872         ovs_lock();
1873         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1874         if (IS_ERR(dp)) {
1875                 err = PTR_ERR(dp);
1876                 goto unlock;
1877         }
1878
1879         reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1880                                       info->snd_seq, OVS_DP_CMD_NEW);
1881         if (IS_ERR(reply)) {
1882                 err = PTR_ERR(reply);
1883                 goto unlock;
1884         }
1885
1886         ovs_unlock();
1887         return genlmsg_reply(reply, info);
1888
1889 unlock:
1890         ovs_unlock();
1891         return err;
1892 }
1893
1894 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1895 {
1896         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1897         struct datapath *dp;
1898         int skip = cb->args[0];
1899         int i = 0;
1900
1901         ovs_lock();
1902         list_for_each_entry(dp, &ovs_net->dps, list_node) {
1903                 if (i >= skip &&
1904                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1905                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1906                                          OVS_DP_CMD_NEW) < 0)
1907                         break;
1908                 i++;
1909         }
1910         ovs_unlock();
1911
1912         cb->args[0] = i;
1913
1914         return skb->len;
1915 }
1916
1917 static struct genl_ops dp_datapath_genl_ops[] = {
1918         { .cmd = OVS_DP_CMD_NEW,
1919           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1920           .policy = datapath_policy,
1921           .doit = ovs_dp_cmd_new
1922         },
1923         { .cmd = OVS_DP_CMD_DEL,
1924           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1925           .policy = datapath_policy,
1926           .doit = ovs_dp_cmd_del
1927         },
1928         { .cmd = OVS_DP_CMD_GET,
1929           .flags = 0,               /* OK for unprivileged users. */
1930           .policy = datapath_policy,
1931           .doit = ovs_dp_cmd_get,
1932           .dumpit = ovs_dp_cmd_dump
1933         },
1934         { .cmd = OVS_DP_CMD_SET,
1935           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1936           .policy = datapath_policy,
1937           .doit = ovs_dp_cmd_set,
1938         },
1939 };
1940
1941 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1942 #ifdef HAVE_NLA_NUL_STRING
1943         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1944         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1945 #else
1946         [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct ovs_vport_stats) },
1947 #endif
1948         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1949         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1950         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1951         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1952 };
1953
1954 static struct genl_family dp_vport_genl_family = {
1955         .id = GENL_ID_GENERATE,
1956         .hdrsize = sizeof(struct ovs_header),
1957         .name = OVS_VPORT_FAMILY,
1958         .version = OVS_VPORT_VERSION,
1959         .maxattr = OVS_VPORT_ATTR_MAX,
1960          SET_NETNSOK
1961 };
1962
1963 struct genl_multicast_group ovs_dp_vport_multicast_group = {
1964         .name = OVS_VPORT_MCGROUP
1965 };
1966
1967 /* Called with ovs_mutex or RCU read lock. */
1968 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1969                                    u32 portid, u32 seq, u32 flags, u8 cmd)
1970 {
1971         struct ovs_header *ovs_header;
1972         struct ovs_vport_stats vport_stats;
1973         int err;
1974
1975         ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1976                                  flags, cmd);
1977         if (!ovs_header)
1978                 return -EMSGSIZE;
1979
1980         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1981
1982         if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1983             nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1984             nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
1985             nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_portid))
1986                 goto nla_put_failure;
1987
1988         ovs_vport_get_stats(vport, &vport_stats);
1989         if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1990                     &vport_stats))
1991                 goto nla_put_failure;
1992
1993         err = ovs_vport_get_options(vport, skb);
1994         if (err == -EMSGSIZE)
1995                 goto error;
1996
1997         return genlmsg_end(skb, ovs_header);
1998
1999 nla_put_failure:
2000         err = -EMSGSIZE;
2001 error:
2002         genlmsg_cancel(skb, ovs_header);
2003         return err;
2004 }
2005
2006 /* Called with ovs_mutex or RCU read lock. */
2007 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
2008                                          u32 seq, u8 cmd)
2009 {
2010         struct sk_buff *skb;
2011         int retval;
2012
2013         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
2014         if (!skb)
2015                 return ERR_PTR(-ENOMEM);
2016
2017         retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
2018         BUG_ON(retval < 0);
2019
2020         return skb;
2021 }
2022
2023 static int ovs_vport_cmd_validate(struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
2024 {
2025         return CHECK_NUL_STRING(a[OVS_VPORT_ATTR_NAME], IFNAMSIZ - 1);
2026 }
2027
2028 /* Called with ovs_mutex or RCU read lock. */
2029 static struct vport *lookup_vport(struct net *net,
2030                                   struct ovs_header *ovs_header,
2031                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
2032 {
2033         struct datapath *dp;
2034         struct vport *vport;
2035
2036         if (a[OVS_VPORT_ATTR_NAME]) {
2037                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
2038                 if (!vport)
2039                         return ERR_PTR(-ENODEV);
2040                 if (ovs_header->dp_ifindex &&
2041                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
2042                         return ERR_PTR(-ENODEV);
2043                 return vport;
2044         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
2045                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
2046
2047                 if (port_no >= DP_MAX_PORTS)
2048                         return ERR_PTR(-EFBIG);
2049
2050                 dp = get_dp(net, ovs_header->dp_ifindex);
2051                 if (!dp)
2052                         return ERR_PTR(-ENODEV);
2053
2054                 vport = ovs_vport_ovsl_rcu(dp, port_no);
2055                 if (!vport)
2056                         return ERR_PTR(-ENODEV);
2057                 return vport;
2058         } else
2059                 return ERR_PTR(-EINVAL);
2060 }
2061
2062 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
2063 {
2064         struct nlattr **a = info->attrs;
2065         struct ovs_header *ovs_header = info->userhdr;
2066         struct vport_parms parms;
2067         struct sk_buff *reply;
2068         struct vport *vport;
2069         struct datapath *dp;
2070         u32 port_no;
2071         int err;
2072
2073         err = -EINVAL;
2074         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
2075             !a[OVS_VPORT_ATTR_UPCALL_PID])
2076                 goto exit;
2077
2078         err = ovs_vport_cmd_validate(a);
2079         if (err)
2080                 goto exit;
2081
2082         ovs_lock();
2083         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
2084         err = -ENODEV;
2085         if (!dp)
2086                 goto exit_unlock;
2087
2088         if (a[OVS_VPORT_ATTR_PORT_NO]) {
2089                 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
2090
2091                 err = -EFBIG;
2092                 if (port_no >= DP_MAX_PORTS)
2093                         goto exit_unlock;
2094
2095                 vport = ovs_vport_ovsl(dp, port_no);
2096                 err = -EBUSY;
2097                 if (vport)
2098                         goto exit_unlock;
2099         } else {
2100                 for (port_no = 1; ; port_no++) {
2101                         if (port_no >= DP_MAX_PORTS) {
2102                                 err = -EFBIG;
2103                                 goto exit_unlock;
2104                         }
2105                         vport = ovs_vport_ovsl(dp, port_no);
2106                         if (!vport)
2107                                 break;
2108                 }
2109         }
2110
2111         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
2112         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2113         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
2114         parms.dp = dp;
2115         parms.port_no = port_no;
2116         parms.upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
2117
2118         vport = new_vport(&parms);
2119         err = PTR_ERR(vport);
2120         if (IS_ERR(vport))
2121                 goto exit_unlock;
2122
2123         err = 0;
2124         if (a[OVS_VPORT_ATTR_STATS])
2125                 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
2126
2127         reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
2128                                          OVS_VPORT_CMD_NEW);
2129         if (IS_ERR(reply)) {
2130                 err = PTR_ERR(reply);
2131                 ovs_dp_detach_port(vport);
2132                 goto exit_unlock;
2133         }
2134
2135         ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
2136
2137 exit_unlock:
2138         ovs_unlock();
2139 exit:
2140         return err;
2141 }
2142
2143 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2144 {
2145         struct nlattr **a = info->attrs;
2146         struct sk_buff *reply;
2147         struct vport *vport;
2148         int err;
2149
2150         err = ovs_vport_cmd_validate(a);
2151         if (err)
2152                 goto exit;
2153
2154         ovs_lock();
2155         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2156         err = PTR_ERR(vport);
2157         if (IS_ERR(vport))
2158                 goto exit_unlock;
2159
2160         if (a[OVS_VPORT_ATTR_TYPE] &&
2161             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
2162                 err = -EINVAL;
2163                 goto exit_unlock;
2164         }
2165
2166         reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2167         if (!reply) {
2168                 err = -ENOMEM;
2169                 goto exit_unlock;
2170         }
2171
2172         if (a[OVS_VPORT_ATTR_OPTIONS]) {
2173                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
2174                 if (err)
2175                         goto exit_free;
2176         }
2177
2178         if (a[OVS_VPORT_ATTR_STATS])
2179                 ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
2180
2181         if (a[OVS_VPORT_ATTR_UPCALL_PID])
2182                 vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
2183
2184         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2185                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2186         BUG_ON(err < 0);
2187
2188         ovs_unlock();
2189         ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
2190         return 0;
2191
2192 exit_free:
2193         kfree_skb(reply);
2194 exit_unlock:
2195         ovs_unlock();
2196 exit:
2197         return err;
2198 }
2199
2200 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2201 {
2202         struct nlattr **a = info->attrs;
2203         struct sk_buff *reply;
2204         struct vport *vport;
2205         int err;
2206
2207         err = ovs_vport_cmd_validate(a);
2208         if (err)
2209                 goto exit;
2210
2211         ovs_lock();
2212         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2213         err = PTR_ERR(vport);
2214         if (IS_ERR(vport))
2215                 goto exit_unlock;
2216
2217         if (vport->port_no == OVSP_LOCAL) {
2218                 err = -EINVAL;
2219                 goto exit_unlock;
2220         }
2221
2222         reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
2223                                          info->snd_seq, OVS_VPORT_CMD_DEL);
2224         err = PTR_ERR(reply);
2225         if (IS_ERR(reply))
2226                 goto exit_unlock;
2227
2228         err = 0;
2229         ovs_dp_detach_port(vport);
2230
2231         ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
2232
2233 exit_unlock:
2234         ovs_unlock();
2235 exit:
2236         return err;
2237 }
2238
2239 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2240 {
2241         struct nlattr **a = info->attrs;
2242         struct ovs_header *ovs_header = info->userhdr;
2243         struct sk_buff *reply;
2244         struct vport *vport;
2245         int err;
2246
2247         err = ovs_vport_cmd_validate(a);
2248         if (err)
2249                 goto exit;
2250
2251         rcu_read_lock();
2252         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
2253         err = PTR_ERR(vport);
2254         if (IS_ERR(vport))
2255                 goto exit_unlock;
2256
2257         reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
2258                                          info->snd_seq, OVS_VPORT_CMD_NEW);
2259         err = PTR_ERR(reply);
2260         if (IS_ERR(reply))
2261                 goto exit_unlock;
2262
2263         rcu_read_unlock();
2264
2265         return genlmsg_reply(reply, info);
2266
2267 exit_unlock:
2268         rcu_read_unlock();
2269 exit:
2270         return err;
2271 }
2272
2273 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2274 {
2275         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
2276         struct datapath *dp;
2277         int bucket = cb->args[0], skip = cb->args[1];
2278         int i, j = 0;
2279
2280         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
2281         if (!dp)
2282                 return -ENODEV;
2283
2284         rcu_read_lock();
2285         for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2286                 struct vport *vport;
2287
2288                 j = 0;
2289                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2290                         if (j >= skip &&
2291                             ovs_vport_cmd_fill_info(vport, skb,
2292                                                     NETLINK_CB(cb->skb).portid,
2293                                                     cb->nlh->nlmsg_seq,
2294                                                     NLM_F_MULTI,
2295                                                     OVS_VPORT_CMD_NEW) < 0)
2296                                 goto out;
2297
2298                         j++;
2299                 }
2300                 skip = 0;
2301         }
2302 out:
2303         rcu_read_unlock();
2304
2305         cb->args[0] = i;
2306         cb->args[1] = j;
2307
2308         return skb->len;
2309 }
2310
2311 static struct genl_ops dp_vport_genl_ops[] = {
2312         { .cmd = OVS_VPORT_CMD_NEW,
2313           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2314           .policy = vport_policy,
2315           .doit = ovs_vport_cmd_new
2316         },
2317         { .cmd = OVS_VPORT_CMD_DEL,
2318           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2319           .policy = vport_policy,
2320           .doit = ovs_vport_cmd_del
2321         },
2322         { .cmd = OVS_VPORT_CMD_GET,
2323           .flags = 0,               /* OK for unprivileged users. */
2324           .policy = vport_policy,
2325           .doit = ovs_vport_cmd_get,
2326           .dumpit = ovs_vport_cmd_dump
2327         },
2328         { .cmd = OVS_VPORT_CMD_SET,
2329           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2330           .policy = vport_policy,
2331           .doit = ovs_vport_cmd_set,
2332         },
2333 };
2334
2335 struct genl_family_and_ops {
2336         struct genl_family *family;
2337         struct genl_ops *ops;
2338         int n_ops;
2339         struct genl_multicast_group *group;
2340 };
2341
2342 static const struct genl_family_and_ops dp_genl_families[] = {
2343         { &dp_datapath_genl_family,
2344           dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
2345           &ovs_dp_datapath_multicast_group },
2346         { &dp_vport_genl_family,
2347           dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
2348           &ovs_dp_vport_multicast_group },
2349         { &dp_flow_genl_family,
2350           dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
2351           &ovs_dp_flow_multicast_group },
2352         { &dp_packet_genl_family,
2353           dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
2354           NULL },
2355 };
2356
2357 static void dp_unregister_genl(int n_families)
2358 {
2359         int i;
2360
2361         for (i = 0; i < n_families; i++)
2362                 genl_unregister_family(dp_genl_families[i].family);
2363 }
2364
2365 static int dp_register_genl(void)
2366 {
2367         int n_registered;
2368         int err;
2369         int i;
2370
2371         n_registered = 0;
2372         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2373                 const struct genl_family_and_ops *f = &dp_genl_families[i];
2374
2375                 err = genl_register_family_with_ops(f->family, f->ops,
2376                                                     f->n_ops);
2377                 if (err)
2378                         goto error;
2379                 n_registered++;
2380
2381                 if (f->group) {
2382                         err = genl_register_mc_group(f->family, f->group);
2383                         if (err)
2384                                 goto error;
2385                 }
2386         }
2387
2388         return 0;
2389
2390 error:
2391         dp_unregister_genl(n_registered);
2392         return err;
2393 }
2394
2395 static void rehash_flow_table(struct work_struct *work)
2396 {
2397         struct datapath *dp;
2398         struct net *net;
2399
2400         ovs_lock();
2401         rtnl_lock();
2402         for_each_net(net) {
2403                 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2404
2405                 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2406                         struct flow_table *old_table = ovsl_dereference(dp->table);
2407                         struct flow_table *new_table;
2408
2409                         new_table = ovs_flow_tbl_rehash(old_table);
2410                         if (!IS_ERR(new_table)) {
2411                                 rcu_assign_pointer(dp->table, new_table);
2412                                 ovs_flow_tbl_destroy(old_table, true);
2413                         }
2414                 }
2415         }
2416         rtnl_unlock();
2417         ovs_unlock();
2418         schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2419 }
2420
2421 static int __net_init ovs_init_net(struct net *net)
2422 {
2423         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2424
2425         INIT_LIST_HEAD(&ovs_net->dps);
2426         INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2427         return 0;
2428 }
2429
2430 static void __net_exit ovs_exit_net(struct net *net)
2431 {
2432         struct datapath *dp, *dp_next;
2433         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2434
2435         ovs_lock();
2436         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2437                 __dp_destroy(dp);
2438         ovs_unlock();
2439
2440         cancel_work_sync(&ovs_net->dp_notify_work);
2441 }
2442
2443 static struct pernet_operations ovs_net_ops = {
2444         .init = ovs_init_net,
2445         .exit = ovs_exit_net,
2446         .id   = &ovs_net_id,
2447         .size = sizeof(struct ovs_net),
2448 };
2449
2450 static int __init dp_init(void)
2451 {
2452         int err;
2453
2454         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2455
2456         pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
2457                 VERSION);
2458
2459         err = ovs_workqueues_init();
2460         if (err)
2461                 goto error;
2462
2463         err = ovs_flow_init();
2464         if (err)
2465                 goto error_wq;
2466
2467         err = ovs_vport_init();
2468         if (err)
2469                 goto error_flow_exit;
2470
2471         err = register_pernet_device(&ovs_net_ops);
2472         if (err)
2473                 goto error_vport_exit;
2474
2475         err = register_netdevice_notifier(&ovs_dp_device_notifier);
2476         if (err)
2477                 goto error_netns_exit;
2478
2479         err = dp_register_genl();
2480         if (err < 0)
2481                 goto error_unreg_notifier;
2482
2483         schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2484
2485         return 0;
2486
2487 error_unreg_notifier:
2488         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2489 error_netns_exit:
2490         unregister_pernet_device(&ovs_net_ops);
2491 error_vport_exit:
2492         ovs_vport_exit();
2493 error_flow_exit:
2494         ovs_flow_exit();
2495 error_wq:
2496         ovs_workqueues_exit();
2497 error:
2498         return err;
2499 }
2500
2501 static void dp_cleanup(void)
2502 {
2503         cancel_delayed_work_sync(&rehash_flow_wq);
2504         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2505         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2506         unregister_pernet_device(&ovs_net_ops);
2507         rcu_barrier();
2508         ovs_vport_exit();
2509         ovs_flow_exit();
2510         ovs_workqueues_exit();
2511 }
2512
2513 module_init(dp_init);
2514 module_exit(dp_cleanup);
2515
2516 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2517 MODULE_LICENSE("GPL");
2518 MODULE_VERSION(VERSION);