752c8d6b348f472d50a1915a3a14a9db03df3beb
[sliver-openvswitch.git] / datapath / flow.c
1 /*
2  * Copyright (c) 2007-2013 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #include "flow.h"
20 #include "datapath.h"
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
32 #include <linux/in.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/icmp.h>
40 #include <linux/icmpv6.h>
41 #include <linux/rculist.h>
42 #include <net/ip.h>
43 #include <net/ipv6.h>
44 #include <net/ndisc.h>
45
46 #include "vlan.h"
47
48 static struct kmem_cache *flow_cache;
49
50 static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask,
51                 struct sw_flow_key_range *range, u8 val);
52
53 static void update_range__(struct sw_flow_match *match,
54                           size_t offset, size_t size, bool is_mask)
55 {
56         struct sw_flow_key_range *range = NULL;
57         size_t start = offset;
58         size_t end = offset + size;
59
60         if (!is_mask)
61                 range = &match->range;
62         else if (match->mask)
63                 range = &match->mask->range;
64
65         if (!range)
66                 return;
67
68         if (range->start == range->end) {
69                 range->start = start;
70                 range->end = end;
71                 return;
72         }
73
74         if (range->start > start)
75                 range->start = start;
76
77         if (range->end < end)
78                 range->end = end;
79 }
80
81 #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
82         do { \
83                 update_range__(match, offsetof(struct sw_flow_key, field),  \
84                                      sizeof((match)->key->field), is_mask); \
85                 if (is_mask && match->mask != NULL) {                       \
86                         (match)->mask->key.field = value;                   \
87                 } else {                                                    \
88                         (match)->key->field = value;                        \
89                 }                                                           \
90         } while (0)
91
92 #define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
93         do { \
94                 update_range__(match, offsetof(struct sw_flow_key, field),  \
95                                 len, is_mask);                              \
96                 if (is_mask && match->mask != NULL) {                       \
97                         memcpy(&(match)->mask->key.field, value_p, len);    \
98                 } else {                                                    \
99                         memcpy(&(match)->key->field, value_p, len);         \
100                 }                                                           \
101         } while (0)
102
103 void ovs_match_init(struct sw_flow_match *match,
104                     struct sw_flow_key *key,
105                     struct sw_flow_mask *mask)
106 {
107         memset(match, 0, sizeof(*match));
108         match->key = key;
109         match->mask = mask;
110
111         memset(key, 0, sizeof(*key));
112
113         if (mask) {
114                 memset(&mask->key, 0, sizeof(mask->key));
115                 mask->range.start = mask->range.end = 0;
116         }
117 }
118
119 static bool ovs_match_validate(const struct sw_flow_match *match,
120                 u64 key_attrs, u64 mask_attrs)
121 {
122         u64 key_expected = 1ULL << OVS_KEY_ATTR_ETHERNET;
123         u64 mask_allowed = key_attrs;  /* At most allow all key attributes */
124
125         /* The following mask attributes allowed only if they
126          * pass the validation tests. */
127         mask_allowed &= ~((1ULL << OVS_KEY_ATTR_IPV4)
128                         | (1ULL << OVS_KEY_ATTR_IPV6)
129                         | (1ULL << OVS_KEY_ATTR_TCP)
130                         | (1ULL << OVS_KEY_ATTR_UDP)
131                         | (1ULL << OVS_KEY_ATTR_ICMP)
132                         | (1ULL << OVS_KEY_ATTR_ICMPV6)
133                         | (1ULL << OVS_KEY_ATTR_ARP)
134                         | (1ULL << OVS_KEY_ATTR_ND));
135
136         if (match->key->phy.in_port == DP_MAX_PORTS &&
137             match->mask && (match->mask->key.phy.in_port == 0xffff))
138                 mask_allowed |= (1ULL << OVS_KEY_ATTR_IN_PORT);
139
140         if (match->key->eth.type == htons(ETH_P_802_2) &&
141             match->mask && (match->mask->key.eth.type == htons(0xffff)))
142                 mask_allowed |= (1ULL << OVS_KEY_ATTR_ETHERTYPE);
143
144         /* Check key attributes. */
145         if (match->key->eth.type == htons(ETH_P_ARP)
146                         || match->key->eth.type == htons(ETH_P_RARP)) {
147                 key_expected |= 1ULL << OVS_KEY_ATTR_ARP;
148                 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
149                         mask_allowed |= 1ULL << OVS_KEY_ATTR_ARP;
150         }
151
152         if (match->key->eth.type == htons(ETH_P_IP)) {
153                 key_expected |= 1ULL << OVS_KEY_ATTR_IPV4;
154                 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
155                         mask_allowed |= 1ULL << OVS_KEY_ATTR_IPV4;
156
157                 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
158                         if (match->key->ip.proto == IPPROTO_UDP) {
159                                 key_expected |= 1ULL << OVS_KEY_ATTR_UDP;
160                                 if (match->mask && (match->mask->key.ip.proto == 0xff))
161                                         mask_allowed |= 1ULL << OVS_KEY_ATTR_UDP;
162                         }
163
164                         if (match->key->ip.proto == IPPROTO_TCP) {
165                                 key_expected |= 1ULL << OVS_KEY_ATTR_TCP;
166                                 if (match->mask && (match->mask->key.ip.proto == 0xff))
167                                         mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP;
168                         }
169
170                         if (match->key->ip.proto == IPPROTO_ICMP) {
171                                 key_expected |= 1ULL << OVS_KEY_ATTR_ICMP;
172                                 if (match->mask && (match->mask->key.ip.proto == 0xff))
173                                         mask_allowed |= 1ULL << OVS_KEY_ATTR_ICMP;
174                         }
175                 }
176         }
177
178         if (match->key->eth.type == htons(ETH_P_IPV6)) {
179                 key_expected |= 1ULL << OVS_KEY_ATTR_IPV6;
180                 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
181                         mask_allowed |= 1ULL << OVS_KEY_ATTR_IPV6;
182
183                 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
184                         if (match->key->ip.proto == IPPROTO_UDP) {
185                                 key_expected |= 1ULL << OVS_KEY_ATTR_UDP;
186                                 if (match->mask && (match->mask->key.ip.proto == 0xff))
187                                         mask_allowed |= 1ULL << OVS_KEY_ATTR_UDP;
188                         }
189
190                         if (match->key->ip.proto == IPPROTO_TCP) {
191                                 key_expected |= 1ULL << OVS_KEY_ATTR_TCP;
192                                 if (match->mask && (match->mask->key.ip.proto == 0xff))
193                                         mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP;
194                         }
195
196                         if (match->key->ip.proto == IPPROTO_ICMPV6) {
197                                 key_expected |= 1ULL << OVS_KEY_ATTR_ICMPV6;
198                                 if (match->mask && (match->mask->key.ip.proto == 0xff))
199                                         mask_allowed |= 1ULL << OVS_KEY_ATTR_ICMPV6;
200
201                                 if (match->key->ipv6.tp.src ==
202                                                 htons(NDISC_NEIGHBOUR_SOLICITATION) ||
203                                     match->key->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
204                                         key_expected |= 1ULL << OVS_KEY_ATTR_ND;
205                                         if (match->mask && (match->mask->key.ipv6.tp.src == htons(0xffff)))
206                                                 mask_allowed |= 1ULL << OVS_KEY_ATTR_ND;
207                                 }
208                         }
209                 }
210         }
211
212         if ((key_attrs & key_expected) != key_expected) {
213                 /* Key attributes check failed. */
214                 OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n",
215                                 key_attrs, key_expected);
216                 return false;
217         }
218
219         if ((mask_attrs & mask_allowed) != mask_attrs) {
220                 /* Mask attributes check failed. */
221                 OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n",
222                                 mask_attrs, mask_allowed);
223                 return false;
224         }
225
226         return true;
227 }
228
229 static int check_header(struct sk_buff *skb, int len)
230 {
231         if (unlikely(skb->len < len))
232                 return -EINVAL;
233         if (unlikely(!pskb_may_pull(skb, len)))
234                 return -ENOMEM;
235         return 0;
236 }
237
238 static bool arphdr_ok(struct sk_buff *skb)
239 {
240         return pskb_may_pull(skb, skb_network_offset(skb) +
241                                   sizeof(struct arp_eth_header));
242 }
243
244 static int check_iphdr(struct sk_buff *skb)
245 {
246         unsigned int nh_ofs = skb_network_offset(skb);
247         unsigned int ip_len;
248         int err;
249
250         err = check_header(skb, nh_ofs + sizeof(struct iphdr));
251         if (unlikely(err))
252                 return err;
253
254         ip_len = ip_hdrlen(skb);
255         if (unlikely(ip_len < sizeof(struct iphdr) ||
256                      skb->len < nh_ofs + ip_len))
257                 return -EINVAL;
258
259         skb_set_transport_header(skb, nh_ofs + ip_len);
260         return 0;
261 }
262
263 static bool tcphdr_ok(struct sk_buff *skb)
264 {
265         int th_ofs = skb_transport_offset(skb);
266         int tcp_len;
267
268         if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
269                 return false;
270
271         tcp_len = tcp_hdrlen(skb);
272         if (unlikely(tcp_len < sizeof(struct tcphdr) ||
273                      skb->len < th_ofs + tcp_len))
274                 return false;
275
276         return true;
277 }
278
279 static bool udphdr_ok(struct sk_buff *skb)
280 {
281         return pskb_may_pull(skb, skb_transport_offset(skb) +
282                                   sizeof(struct udphdr));
283 }
284
285 static bool icmphdr_ok(struct sk_buff *skb)
286 {
287         return pskb_may_pull(skb, skb_transport_offset(skb) +
288                                   sizeof(struct icmphdr));
289 }
290
291 u64 ovs_flow_used_time(unsigned long flow_jiffies)
292 {
293         struct timespec cur_ts;
294         u64 cur_ms, idle_ms;
295
296         ktime_get_ts(&cur_ts);
297         idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
298         cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
299                  cur_ts.tv_nsec / NSEC_PER_MSEC;
300
301         return cur_ms - idle_ms;
302 }
303
304 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
305 {
306         unsigned int nh_ofs = skb_network_offset(skb);
307         unsigned int nh_len;
308         int payload_ofs;
309         struct ipv6hdr *nh;
310         uint8_t nexthdr;
311         __be16 frag_off;
312         int err;
313
314         err = check_header(skb, nh_ofs + sizeof(*nh));
315         if (unlikely(err))
316                 return err;
317
318         nh = ipv6_hdr(skb);
319         nexthdr = nh->nexthdr;
320         payload_ofs = (u8 *)(nh + 1) - skb->data;
321
322         key->ip.proto = NEXTHDR_NONE;
323         key->ip.tos = ipv6_get_dsfield(nh);
324         key->ip.ttl = nh->hop_limit;
325         key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
326         key->ipv6.addr.src = nh->saddr;
327         key->ipv6.addr.dst = nh->daddr;
328
329         payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
330         if (unlikely(payload_ofs < 0))
331                 return -EINVAL;
332
333         if (frag_off) {
334                 if (frag_off & htons(~0x7))
335                         key->ip.frag = OVS_FRAG_TYPE_LATER;
336                 else
337                         key->ip.frag = OVS_FRAG_TYPE_FIRST;
338         }
339
340         nh_len = payload_ofs - nh_ofs;
341         skb_set_transport_header(skb, nh_ofs + nh_len);
342         key->ip.proto = nexthdr;
343         return nh_len;
344 }
345
346 static bool icmp6hdr_ok(struct sk_buff *skb)
347 {
348         return pskb_may_pull(skb, skb_transport_offset(skb) +
349                                   sizeof(struct icmp6hdr));
350 }
351
352 void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src,
353                        const struct sw_flow_mask *mask)
354 {
355         u8 *m = (u8 *)&mask->key + mask->range.start;
356         u8 *s = (u8 *)src + mask->range.start;
357         u8 *d = (u8 *)dst + mask->range.start;
358         int i;
359
360         memset(dst, 0, sizeof(*dst));
361         for (i = 0; i < ovs_sw_flow_mask_size_roundup(mask); i++) {
362                 *d = *s & *m;
363                 d++, s++, m++;
364         }
365 }
366
367 #define TCP_FLAGS_OFFSET 13
368 #define TCP_FLAG_MASK 0x3f
369
370 void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
371 {
372         u8 tcp_flags = 0;
373
374         if ((flow->key.eth.type == htons(ETH_P_IP) ||
375              flow->key.eth.type == htons(ETH_P_IPV6)) &&
376             flow->key.ip.proto == IPPROTO_TCP &&
377             likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
378                 u8 *tcp = (u8 *)tcp_hdr(skb);
379                 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
380         }
381
382         spin_lock(&flow->lock);
383         flow->used = jiffies;
384         flow->packet_count++;
385         flow->byte_count += skb->len;
386         flow->tcp_flags |= tcp_flags;
387         spin_unlock(&flow->lock);
388 }
389
390 struct sw_flow_actions *ovs_flow_actions_alloc(int size)
391 {
392         struct sw_flow_actions *sfa;
393
394         if (size > MAX_ACTIONS_BUFSIZE)
395                 return ERR_PTR(-EINVAL);
396
397         sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
398         if (!sfa)
399                 return ERR_PTR(-ENOMEM);
400
401         sfa->actions_len = 0;
402         return sfa;
403 }
404
405 struct sw_flow *ovs_flow_alloc(void)
406 {
407         struct sw_flow *flow;
408
409         flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
410         if (!flow)
411                 return ERR_PTR(-ENOMEM);
412
413         spin_lock_init(&flow->lock);
414         flow->sf_acts = NULL;
415         flow->mask = NULL;
416
417         return flow;
418 }
419
420 static struct hlist_head *find_bucket(struct flow_table *table, u32 hash)
421 {
422         hash = jhash_1word(hash, table->hash_seed);
423         return flex_array_get(table->buckets,
424                                 (hash & (table->n_buckets - 1)));
425 }
426
427 static struct flex_array *alloc_buckets(unsigned int n_buckets)
428 {
429         struct flex_array *buckets;
430         int i, err;
431
432         buckets = flex_array_alloc(sizeof(struct hlist_head *),
433                                    n_buckets, GFP_KERNEL);
434         if (!buckets)
435                 return NULL;
436
437         err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
438         if (err) {
439                 flex_array_free(buckets);
440                 return NULL;
441         }
442
443         for (i = 0; i < n_buckets; i++)
444                 INIT_HLIST_HEAD((struct hlist_head *)
445                                         flex_array_get(buckets, i));
446
447         return buckets;
448 }
449
450 static void free_buckets(struct flex_array *buckets)
451 {
452         flex_array_free(buckets);
453 }
454
455 static struct flow_table *__flow_tbl_alloc(int new_size)
456 {
457         struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
458
459         if (!table)
460                 return NULL;
461
462         table->buckets = alloc_buckets(new_size);
463
464         if (!table->buckets) {
465                 kfree(table);
466                 return NULL;
467         }
468         table->n_buckets = new_size;
469         table->count = 0;
470         table->node_ver = 0;
471         table->keep_flows = false;
472         get_random_bytes(&table->hash_seed, sizeof(u32));
473         table->mask_list = NULL;
474
475         return table;
476 }
477
478 static void __flow_tbl_destroy(struct flow_table *table)
479 {
480         int i;
481
482         if (table->keep_flows)
483                 goto skip_flows;
484
485         for (i = 0; i < table->n_buckets; i++) {
486                 struct sw_flow *flow;
487                 struct hlist_head *head = flex_array_get(table->buckets, i);
488                 struct hlist_node *n;
489                 int ver = table->node_ver;
490
491                 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
492                         hlist_del_rcu(&flow->hash_node[ver]);
493                         ovs_flow_free(flow, false);
494                 }
495         }
496
497         BUG_ON(!list_empty(table->mask_list));
498         kfree(table->mask_list);
499
500 skip_flows:
501         free_buckets(table->buckets);
502         kfree(table);
503 }
504
505 struct flow_table *ovs_flow_tbl_alloc(int new_size)
506 {
507         struct flow_table *table = __flow_tbl_alloc(new_size);
508
509         if (!table)
510                 return NULL;
511
512         table->mask_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
513         if (!table->mask_list) {
514                 table->keep_flows = true;
515                 __flow_tbl_destroy(table);
516                 return NULL;
517         }
518         INIT_LIST_HEAD(table->mask_list);
519
520         return table;
521 }
522
523 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
524 {
525         struct flow_table *table = container_of(rcu, struct flow_table, rcu);
526
527         __flow_tbl_destroy(table);
528 }
529
530 void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
531 {
532         if (!table)
533                 return;
534
535         if (deferred)
536                 call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
537         else
538                 __flow_tbl_destroy(table);
539 }
540
541 struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *last)
542 {
543         struct sw_flow *flow;
544         struct hlist_head *head;
545         int ver;
546         int i;
547
548         ver = table->node_ver;
549         while (*bucket < table->n_buckets) {
550                 i = 0;
551                 head = flex_array_get(table->buckets, *bucket);
552                 hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
553                         if (i < *last) {
554                                 i++;
555                                 continue;
556                         }
557                         *last = i + 1;
558                         return flow;
559                 }
560                 (*bucket)++;
561                 *last = 0;
562         }
563
564         return NULL;
565 }
566
567 static void __tbl_insert(struct flow_table *table, struct sw_flow *flow)
568 {
569         struct hlist_head *head;
570
571         head = find_bucket(table, flow->hash);
572         hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
573
574         table->count++;
575 }
576
577 static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new)
578 {
579         int old_ver;
580         int i;
581
582         old_ver = old->node_ver;
583         new->node_ver = !old_ver;
584
585         /* Insert in new table. */
586         for (i = 0; i < old->n_buckets; i++) {
587                 struct sw_flow *flow;
588                 struct hlist_head *head;
589
590                 head = flex_array_get(old->buckets, i);
591
592                 hlist_for_each_entry(flow, head, hash_node[old_ver])
593                         __tbl_insert(new, flow);
594         }
595
596         new->mask_list = old->mask_list;
597         old->keep_flows = true;
598 }
599
600 static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets)
601 {
602         struct flow_table *new_table;
603
604         new_table = __flow_tbl_alloc(n_buckets);
605         if (!new_table)
606                 return ERR_PTR(-ENOMEM);
607
608         flow_table_copy_flows(table, new_table);
609
610         return new_table;
611 }
612
613 struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table)
614 {
615         return __flow_tbl_rehash(table, table->n_buckets);
616 }
617
618 struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
619 {
620         return __flow_tbl_rehash(table, table->n_buckets * 2);
621 }
622
623 static void __flow_free(struct sw_flow *flow)
624 {
625         kfree((struct sf_flow_acts __force *)flow->sf_acts);
626         kmem_cache_free(flow_cache, flow);
627 }
628
629 static void rcu_free_flow_callback(struct rcu_head *rcu)
630 {
631         struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
632
633         __flow_free(flow);
634 }
635
636 void ovs_flow_free(struct sw_flow *flow, bool deferred)
637 {
638         if (!flow)
639                 return;
640
641         ovs_sw_flow_mask_del_ref((struct sw_flow_mask __force *)flow->mask,
642                                  deferred);
643
644         if (deferred)
645                 call_rcu(&flow->rcu, rcu_free_flow_callback);
646         else
647                 __flow_free(flow);
648 }
649
650 /* RCU callback used by ovs_flow_deferred_free_acts. */
651 static void rcu_free_acts_callback(struct rcu_head *rcu)
652 {
653         struct sw_flow_actions *sf_acts = container_of(rcu,
654                         struct sw_flow_actions, rcu);
655         kfree(sf_acts);
656 }
657
658 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
659  * The caller must hold rcu_read_lock for this to be sensible. */
660 void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
661 {
662         call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
663 }
664
665 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
666 {
667         struct qtag_prefix {
668                 __be16 eth_type; /* ETH_P_8021Q */
669                 __be16 tci;
670         };
671         struct qtag_prefix *qp;
672
673         if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16)))
674                 return 0;
675
676         if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
677                                          sizeof(__be16))))
678                 return -ENOMEM;
679
680         qp = (struct qtag_prefix *) skb->data;
681         key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
682         __skb_pull(skb, sizeof(struct qtag_prefix));
683
684         return 0;
685 }
686
687 static __be16 parse_ethertype(struct sk_buff *skb)
688 {
689         struct llc_snap_hdr {
690                 u8  dsap;  /* Always 0xAA */
691                 u8  ssap;  /* Always 0xAA */
692                 u8  ctrl;
693                 u8  oui[3];
694                 __be16 ethertype;
695         };
696         struct llc_snap_hdr *llc;
697         __be16 proto;
698
699         proto = *(__be16 *) skb->data;
700         __skb_pull(skb, sizeof(__be16));
701
702         if (ntohs(proto) >= ETH_P_802_3_MIN)
703                 return proto;
704
705         if (skb->len < sizeof(struct llc_snap_hdr))
706                 return htons(ETH_P_802_2);
707
708         if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
709                 return htons(0);
710
711         llc = (struct llc_snap_hdr *) skb->data;
712         if (llc->dsap != LLC_SAP_SNAP ||
713             llc->ssap != LLC_SAP_SNAP ||
714             (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
715                 return htons(ETH_P_802_2);
716
717         __skb_pull(skb, sizeof(struct llc_snap_hdr));
718
719         if (ntohs(llc->ethertype) >= ETH_P_802_3_MIN)
720                 return llc->ethertype;
721
722         return htons(ETH_P_802_2);
723 }
724
725 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
726                         int nh_len)
727 {
728         struct icmp6hdr *icmp = icmp6_hdr(skb);
729
730         /* The ICMPv6 type and code fields use the 16-bit transport port
731          * fields, so we need to store them in 16-bit network byte order.
732          */
733         key->ipv6.tp.src = htons(icmp->icmp6_type);
734         key->ipv6.tp.dst = htons(icmp->icmp6_code);
735
736         if (icmp->icmp6_code == 0 &&
737             (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
738              icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
739                 int icmp_len = skb->len - skb_transport_offset(skb);
740                 struct nd_msg *nd;
741                 int offset;
742
743                 /* In order to process neighbor discovery options, we need the
744                  * entire packet.
745                  */
746                 if (unlikely(icmp_len < sizeof(*nd)))
747                         return 0;
748
749                 if (unlikely(skb_linearize(skb)))
750                         return -ENOMEM;
751
752                 nd = (struct nd_msg *)skb_transport_header(skb);
753                 key->ipv6.nd.target = nd->target;
754
755                 icmp_len -= sizeof(*nd);
756                 offset = 0;
757                 while (icmp_len >= 8) {
758                         struct nd_opt_hdr *nd_opt =
759                                  (struct nd_opt_hdr *)(nd->opt + offset);
760                         int opt_len = nd_opt->nd_opt_len * 8;
761
762                         if (unlikely(!opt_len || opt_len > icmp_len))
763                                 return 0;
764
765                         /* Store the link layer address if the appropriate
766                          * option is provided.  It is considered an error if
767                          * the same link layer option is specified twice.
768                          */
769                         if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
770                             && opt_len == 8) {
771                                 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
772                                         goto invalid;
773                                 memcpy(key->ipv6.nd.sll,
774                                     &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
775                         } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
776                                    && opt_len == 8) {
777                                 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
778                                         goto invalid;
779                                 memcpy(key->ipv6.nd.tll,
780                                     &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
781                         }
782
783                         icmp_len -= opt_len;
784                         offset += opt_len;
785                 }
786         }
787
788         return 0;
789
790 invalid:
791         memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
792         memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
793         memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
794
795         return 0;
796 }
797
798 /**
799  * ovs_flow_extract - extracts a flow key from an Ethernet frame.
800  * @skb: sk_buff that contains the frame, with skb->data pointing to the
801  * Ethernet header
802  * @in_port: port number on which @skb was received.
803  * @key: output flow key
804  * @key_lenp: length of output flow key
805  *
806  * The caller must ensure that skb->len >= ETH_HLEN.
807  *
808  * Returns 0 if successful, otherwise a negative errno value.
809  *
810  * Initializes @skb header pointers as follows:
811  *
812  *    - skb->mac_header: the Ethernet header.
813  *
814  *    - skb->network_header: just past the Ethernet header, or just past the
815  *      VLAN header, to the first byte of the Ethernet payload.
816  *
817  *    - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
818  *      on output, then just past the IP header, if one is present and
819  *      of a correct length, otherwise the same as skb->network_header.
820  *      For other key->eth.type values it is left untouched.
821  */
822 int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
823 {
824         int error;
825         struct ethhdr *eth;
826
827         memset(key, 0, sizeof(*key));
828
829         key->phy.priority = skb->priority;
830         if (OVS_CB(skb)->tun_key)
831                 memcpy(&key->tun_key, OVS_CB(skb)->tun_key, sizeof(key->tun_key));
832         key->phy.in_port = in_port;
833         key->phy.skb_mark = skb_get_mark(skb);
834
835         skb_reset_mac_header(skb);
836
837         /* Link layer.  We are guaranteed to have at least the 14 byte Ethernet
838          * header in the linear data area.
839          */
840         eth = eth_hdr(skb);
841         memcpy(key->eth.src, eth->h_source, ETH_ALEN);
842         memcpy(key->eth.dst, eth->h_dest, ETH_ALEN);
843
844         __skb_pull(skb, 2 * ETH_ALEN);
845         /* We are going to push all headers that we pull, so no need to
846          * update skb->csum here. */
847
848         if (vlan_tx_tag_present(skb))
849                 key->eth.tci = htons(vlan_get_tci(skb));
850         else if (eth->h_proto == htons(ETH_P_8021Q))
851                 if (unlikely(parse_vlan(skb, key)))
852                         return -ENOMEM;
853
854         key->eth.type = parse_ethertype(skb);
855         if (unlikely(key->eth.type == htons(0)))
856                 return -ENOMEM;
857
858         skb_reset_network_header(skb);
859         __skb_push(skb, skb->data - skb_mac_header(skb));
860
861         /* Network layer. */
862         if (key->eth.type == htons(ETH_P_IP)) {
863                 struct iphdr *nh;
864                 __be16 offset;
865
866                 error = check_iphdr(skb);
867                 if (unlikely(error)) {
868                         if (error == -EINVAL) {
869                                 skb->transport_header = skb->network_header;
870                                 error = 0;
871                         }
872                         return error;
873                 }
874
875                 nh = ip_hdr(skb);
876                 key->ipv4.addr.src = nh->saddr;
877                 key->ipv4.addr.dst = nh->daddr;
878
879                 key->ip.proto = nh->protocol;
880                 key->ip.tos = nh->tos;
881                 key->ip.ttl = nh->ttl;
882
883                 offset = nh->frag_off & htons(IP_OFFSET);
884                 if (offset) {
885                         key->ip.frag = OVS_FRAG_TYPE_LATER;
886                         return 0;
887                 }
888                 if (nh->frag_off & htons(IP_MF) ||
889                          skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
890                         key->ip.frag = OVS_FRAG_TYPE_FIRST;
891
892                 /* Transport layer. */
893                 if (key->ip.proto == IPPROTO_TCP) {
894                         if (tcphdr_ok(skb)) {
895                                 struct tcphdr *tcp = tcp_hdr(skb);
896                                 key->ipv4.tp.src = tcp->source;
897                                 key->ipv4.tp.dst = tcp->dest;
898                         }
899                 } else if (key->ip.proto == IPPROTO_UDP) {
900                         if (udphdr_ok(skb)) {
901                                 struct udphdr *udp = udp_hdr(skb);
902                                 key->ipv4.tp.src = udp->source;
903                                 key->ipv4.tp.dst = udp->dest;
904                         }
905                 } else if (key->ip.proto == IPPROTO_ICMP) {
906                         if (icmphdr_ok(skb)) {
907                                 struct icmphdr *icmp = icmp_hdr(skb);
908                                 /* The ICMP type and code fields use the 16-bit
909                                  * transport port fields, so we need to store
910                                  * them in 16-bit network byte order. */
911                                 key->ipv4.tp.src = htons(icmp->type);
912                                 key->ipv4.tp.dst = htons(icmp->code);
913                         }
914                 }
915
916         } else if ((key->eth.type == htons(ETH_P_ARP) ||
917                    key->eth.type == htons(ETH_P_RARP)) && arphdr_ok(skb)) {
918                 struct arp_eth_header *arp;
919
920                 arp = (struct arp_eth_header *)skb_network_header(skb);
921
922                 if (arp->ar_hrd == htons(ARPHRD_ETHER)
923                                 && arp->ar_pro == htons(ETH_P_IP)
924                                 && arp->ar_hln == ETH_ALEN
925                                 && arp->ar_pln == 4) {
926
927                         /* We only match on the lower 8 bits of the opcode. */
928                         if (ntohs(arp->ar_op) <= 0xff)
929                                 key->ip.proto = ntohs(arp->ar_op);
930                         memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
931                         memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
932                         memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
933                         memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
934                 }
935         } else if (key->eth.type == htons(ETH_P_IPV6)) {
936                 int nh_len;             /* IPv6 Header + Extensions */
937
938                 nh_len = parse_ipv6hdr(skb, key);
939                 if (unlikely(nh_len < 0)) {
940                         if (nh_len == -EINVAL) {
941                                 skb->transport_header = skb->network_header;
942                                 error = 0;
943                         } else {
944                                 error = nh_len;
945                         }
946                         return error;
947                 }
948
949                 if (key->ip.frag == OVS_FRAG_TYPE_LATER)
950                         return 0;
951                 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
952                         key->ip.frag = OVS_FRAG_TYPE_FIRST;
953
954                 /* Transport layer. */
955                 if (key->ip.proto == NEXTHDR_TCP) {
956                         if (tcphdr_ok(skb)) {
957                                 struct tcphdr *tcp = tcp_hdr(skb);
958                                 key->ipv6.tp.src = tcp->source;
959                                 key->ipv6.tp.dst = tcp->dest;
960                         }
961                 } else if (key->ip.proto == NEXTHDR_UDP) {
962                         if (udphdr_ok(skb)) {
963                                 struct udphdr *udp = udp_hdr(skb);
964                                 key->ipv6.tp.src = udp->source;
965                                 key->ipv6.tp.dst = udp->dest;
966                         }
967                 } else if (key->ip.proto == NEXTHDR_ICMP) {
968                         if (icmp6hdr_ok(skb)) {
969                                 error = parse_icmpv6(skb, key, nh_len);
970                                 if (error)
971                                         return error;
972                         }
973                 }
974         }
975
976         return 0;
977 }
978
979 static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, int key_len)
980 {
981         return jhash2((u32 *)((u8 *)key + key_start),
982                       DIV_ROUND_UP(key_len - key_start, sizeof(u32)), 0);
983 }
984
985 static int flow_key_start(const struct sw_flow_key *key)
986 {
987         if (key->tun_key.ipv4_dst)
988                 return 0;
989         else
990                 return offsetof(struct sw_flow_key, phy);
991 }
992
993 static bool __cmp_key(const struct sw_flow_key *key1,
994                 const struct sw_flow_key *key2,  int key_start, int key_len)
995 {
996         return !memcmp((u8 *)key1 + key_start,
997                         (u8 *)key2 + key_start, (key_len - key_start));
998 }
999
1000 static bool __flow_cmp_key(const struct sw_flow *flow,
1001                 const struct sw_flow_key *key, int key_start, int key_len)
1002 {
1003         return __cmp_key(&flow->key, key, key_start, key_len);
1004 }
1005
1006 static bool __flow_cmp_unmasked_key(const struct sw_flow *flow,
1007                   const struct sw_flow_key *key, int key_start, int key_len)
1008 {
1009         return __cmp_key(&flow->unmasked_key, key, key_start, key_len);
1010 }
1011
1012 bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
1013                 const struct sw_flow_key *key, int key_len)
1014 {
1015         int key_start;
1016         key_start = flow_key_start(key);
1017
1018         return __flow_cmp_unmasked_key(flow, key, key_start, key_len);
1019
1020 }
1021
1022 struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table,
1023                                        struct sw_flow_match *match)
1024 {
1025         struct sw_flow_key *unmasked = match->key;
1026         int key_len = match->range.end;
1027         struct sw_flow *flow;
1028
1029         flow = ovs_flow_lookup(table, unmasked);
1030         if (flow && (!ovs_flow_cmp_unmasked_key(flow, unmasked, key_len)))
1031                 flow = NULL;
1032
1033         return flow;
1034 }
1035
1036 static struct sw_flow *ovs_masked_flow_lookup(struct flow_table *table,
1037                                     const struct sw_flow_key *flow_key,
1038                                     struct sw_flow_mask *mask)
1039 {
1040         struct sw_flow *flow;
1041         struct hlist_head *head;
1042         int key_start = mask->range.start;
1043         int key_len = mask->range.end;
1044         u32 hash;
1045         struct sw_flow_key masked_key;
1046
1047         ovs_flow_key_mask(&masked_key, flow_key, mask);
1048         hash = ovs_flow_hash(&masked_key, key_start, key_len);
1049         head = find_bucket(table, hash);
1050         hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
1051                 if (__flow_cmp_key(flow, &masked_key, key_start, key_len))
1052                         return flow;
1053         }
1054         return NULL;
1055 }
1056
1057 struct sw_flow *ovs_flow_lookup(struct flow_table *tbl,
1058                                 const struct sw_flow_key *key)
1059 {
1060         struct sw_flow *flow = NULL;
1061         struct sw_flow_mask *mask;
1062
1063         list_for_each_entry_rcu(mask, tbl->mask_list, list) {
1064                 flow = ovs_masked_flow_lookup(tbl, key, mask);
1065                 if (flow)  /* Found */
1066                         break;
1067         }
1068
1069         return flow;
1070 }
1071
1072
1073 void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow)
1074 {
1075         flow->hash = ovs_flow_hash(&flow->key,
1076                         ovsl_dereference(flow->mask)->range.start,
1077                         ovsl_dereference(flow->mask)->range.end);
1078         __tbl_insert(table, flow);
1079 }
1080
1081 void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow)
1082 {
1083         BUG_ON(table->count == 0);
1084         hlist_del_rcu(&flow->hash_node[table->node_ver]);
1085         table->count--;
1086 }
1087
1088 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute.  */
1089 const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
1090         [OVS_KEY_ATTR_ENCAP] = -1,
1091         [OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
1092         [OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
1093         [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32),
1094         [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
1095         [OVS_KEY_ATTR_VLAN] = sizeof(__be16),
1096         [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
1097         [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
1098         [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
1099         [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
1100         [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
1101         [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
1102         [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
1103         [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
1104         [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
1105         [OVS_KEY_ATTR_TUNNEL] = -1,
1106 };
1107
1108 static bool is_all_zero(const u8 *fp, size_t size)
1109 {
1110         int i;
1111
1112         if (!fp)
1113                 return false;
1114
1115         for (i = 0; i < size; i++)
1116                 if (fp[i])
1117                         return false;
1118
1119         return true;
1120 }
1121
1122 static int __parse_flow_nlattrs(const struct nlattr *attr,
1123                               const struct nlattr *a[],
1124                               u64 *attrsp, bool nz)
1125 {
1126         const struct nlattr *nla;
1127         u64 attrs;
1128         int rem;
1129
1130         attrs = *attrsp;
1131         nla_for_each_nested(nla, attr, rem) {
1132                 u16 type = nla_type(nla);
1133                 int expected_len;
1134
1135                 if (type > OVS_KEY_ATTR_MAX) {
1136                         OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n",
1137                                   type, OVS_KEY_ATTR_MAX);
1138                 }
1139
1140                 if (attrs & (1ULL << type)) {
1141                         OVS_NLERR("Duplicate key attribute (type %d).\n", type);
1142                         return -EINVAL;
1143                 }
1144
1145                 expected_len = ovs_key_lens[type];
1146                 if (nla_len(nla) != expected_len && expected_len != -1) {
1147                         OVS_NLERR("Key attribute has unexpected length (type=%d"
1148                                   ", length=%d, expected=%d).\n", type,
1149                                   nla_len(nla), expected_len);
1150                         return -EINVAL;
1151                 }
1152
1153                 if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
1154                         attrs |= 1ULL << type;
1155                         a[type] = nla;
1156                 }
1157         }
1158         if (rem) {
1159                 OVS_NLERR("Message has %d unknown bytes.\n", rem);
1160                 return -EINVAL;
1161         }
1162
1163         *attrsp = attrs;
1164         return 0;
1165 }
1166
1167 static int parse_flow_mask_nlattrs(const struct nlattr *attr,
1168                               const struct nlattr *a[], u64 *attrsp)
1169 {
1170         return __parse_flow_nlattrs(attr, a, attrsp, true);
1171 }
1172
1173 static int parse_flow_nlattrs(const struct nlattr *attr,
1174                               const struct nlattr *a[], u64 *attrsp)
1175 {
1176         return __parse_flow_nlattrs(attr, a, attrsp, false);
1177 }
1178
1179 int ipv4_tun_from_nlattr(const struct nlattr *attr,
1180                          struct sw_flow_match *match, bool is_mask)
1181 {
1182         struct nlattr *a;
1183         int rem;
1184         bool ttl = false;
1185         __be16 tun_flags = 0;
1186
1187         nla_for_each_nested(a, attr, rem) {
1188                 int type = nla_type(a);
1189                 static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
1190                         [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64),
1191                         [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32),
1192                         [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32),
1193                         [OVS_TUNNEL_KEY_ATTR_TOS] = 1,
1194                         [OVS_TUNNEL_KEY_ATTR_TTL] = 1,
1195                         [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0,
1196                         [OVS_TUNNEL_KEY_ATTR_CSUM] = 0,
1197                 };
1198
1199                 if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
1200                         OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n",
1201                         type, OVS_TUNNEL_KEY_ATTR_MAX);
1202                         return -EINVAL;
1203                 }
1204
1205                 if (ovs_tunnel_key_lens[type] != nla_len(a)) {
1206                         OVS_NLERR("IPv4 tunnel attribute type has unexpected "
1207                                   " legnth (type=%d, length=%d, expected=%d).\n",
1208                                   type, nla_len(a), ovs_tunnel_key_lens[type]);
1209                         return -EINVAL;
1210                 }
1211
1212                 switch (type) {
1213                 case OVS_TUNNEL_KEY_ATTR_ID:
1214                         SW_FLOW_KEY_PUT(match, tun_key.tun_id,
1215                                         nla_get_be64(a), is_mask);
1216                         tun_flags |= TUNNEL_KEY;
1217                         break;
1218                 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
1219                         SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
1220                                         nla_get_be32(a), is_mask);
1221                         break;
1222                 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
1223                         SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
1224                                         nla_get_be32(a), is_mask);
1225                         break;
1226                 case OVS_TUNNEL_KEY_ATTR_TOS:
1227                         SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
1228                                         nla_get_u8(a), is_mask);
1229                         break;
1230                 case OVS_TUNNEL_KEY_ATTR_TTL:
1231                         SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl,
1232                                         nla_get_u8(a), is_mask);
1233                         ttl = true;
1234                         break;
1235                 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
1236                         tun_flags |= TUNNEL_DONT_FRAGMENT;
1237                         break;
1238                 case OVS_TUNNEL_KEY_ATTR_CSUM:
1239                         tun_flags |= TUNNEL_CSUM;
1240                         break;
1241                 default:
1242                         return -EINVAL;
1243                 }
1244         }
1245
1246         SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
1247
1248         if (rem > 0) {
1249                 OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem);
1250                 return -EINVAL;
1251         }
1252
1253         if (!match->key->tun_key.ipv4_dst) {
1254                 OVS_NLERR("IPv4 tunnel destination address is zero.\n");
1255                 return -EINVAL;
1256         }
1257
1258         if (!ttl) {
1259                 OVS_NLERR("IPv4 tunnel TTL not specified.\n");
1260                 return -EINVAL;
1261         }
1262
1263         return 0;
1264 }
1265
1266 int ipv4_tun_to_nlattr(struct sk_buff *skb,
1267                         const struct ovs_key_ipv4_tunnel *tun_key,
1268                         const struct ovs_key_ipv4_tunnel *output)
1269 {
1270         struct nlattr *nla;
1271
1272         nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL);
1273         if (!nla)
1274                 return -EMSGSIZE;
1275
1276         if (output->tun_flags & TUNNEL_KEY &&
1277             nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
1278                 return -EMSGSIZE;
1279         if (output->ipv4_src &&
1280                 nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src))
1281                 return -EMSGSIZE;
1282         if (output->ipv4_dst &&
1283                 nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst))
1284                 return -EMSGSIZE;
1285         if (output->ipv4_tos &&
1286                 nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
1287                 return -EMSGSIZE;
1288         if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
1289                 return -EMSGSIZE;
1290         if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
1291                 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
1292                 return -EMSGSIZE;
1293         if ((output->tun_flags & TUNNEL_CSUM) &&
1294                 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
1295                 return -EMSGSIZE;
1296
1297         nla_nest_end(skb, nla);
1298         return 0;
1299 }
1300
1301
1302 static int metadata_from_nlattrs(struct sw_flow_match *match,  u64 *attrs,
1303                 const struct nlattr **a, bool is_mask)
1304 {
1305         if (*attrs & (1ULL << OVS_KEY_ATTR_PRIORITY)) {
1306                 SW_FLOW_KEY_PUT(match, phy.priority,
1307                           nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
1308                 *attrs &= ~(1ULL << OVS_KEY_ATTR_PRIORITY);
1309         }
1310
1311         if (*attrs & (1ULL << OVS_KEY_ATTR_IN_PORT)) {
1312                 u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
1313
1314                 if (!is_mask && in_port >= DP_MAX_PORTS)
1315                         return -EINVAL;
1316                 SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
1317                 *attrs &= ~(1ULL << OVS_KEY_ATTR_IN_PORT);
1318         } else if (!is_mask) {
1319                 SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask);
1320         }
1321
1322         if (*attrs & (1ULL << OVS_KEY_ATTR_SKB_MARK)) {
1323                 uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
1324 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && !defined(CONFIG_NETFILTER)
1325                 if (!is_mask && mark != 0) {
1326                         OVS_NLERR("skb->mark must be zero on this kernel (mark=%d).\n", mark);
1327                         return -EINVAL;
1328                 }
1329 #endif
1330                 SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask);
1331                 *attrs &= ~(1ULL << OVS_KEY_ATTR_SKB_MARK);
1332         }
1333         if (*attrs & (1ULL << OVS_KEY_ATTR_TUNNEL)) {
1334                 if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
1335                                         is_mask))
1336                         return -EINVAL;
1337                 *attrs &= ~(1ULL << OVS_KEY_ATTR_TUNNEL);
1338         }
1339         return 0;
1340 }
1341
1342 static int ovs_key_from_nlattrs(struct sw_flow_match *match,  u64 attrs,
1343                 const struct nlattr **a, bool is_mask)
1344 {
1345         int err;
1346         u64 orig_attrs = attrs;
1347
1348         err = metadata_from_nlattrs(match, &attrs, a, is_mask);
1349         if (err)
1350                 return err;
1351
1352         if (attrs & (1ULL << OVS_KEY_ATTR_ETHERNET)) {
1353                 const struct ovs_key_ethernet *eth_key;
1354
1355                 eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
1356                 SW_FLOW_KEY_MEMCPY(match, eth.src,
1357                                 eth_key->eth_src, ETH_ALEN, is_mask);
1358                 SW_FLOW_KEY_MEMCPY(match, eth.dst,
1359                                 eth_key->eth_dst, ETH_ALEN, is_mask);
1360                 attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERNET);
1361         }
1362
1363         if (attrs & (1ULL << OVS_KEY_ATTR_VLAN)) {
1364                 __be16 tci;
1365
1366                 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
1367                 if (!is_mask)
1368                         if (!(tci & htons(VLAN_TAG_PRESENT))) {
1369                                 OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n");
1370                                 return -EINVAL;
1371                         }
1372
1373                 SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
1374                 attrs &= ~(1ULL << OVS_KEY_ATTR_VLAN);
1375         }
1376
1377         if (attrs & (1ULL << OVS_KEY_ATTR_ETHERTYPE)) {
1378                 __be16 eth_type;
1379
1380                 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1381                 if (!is_mask && ntohs(eth_type) < ETH_P_802_3_MIN) {
1382                         OVS_NLERR("EtherType is less than mimimum (type=%x, min=%x).\n",
1383                                         ntohs(eth_type), ETH_P_802_3_MIN);
1384                         return -EINVAL;
1385                 }
1386
1387                 SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask);
1388                 attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERTYPE);
1389         } else if (!is_mask) {
1390                 SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
1391         }
1392
1393         if (attrs & (1ULL << OVS_KEY_ATTR_IPV4)) {
1394                 const struct ovs_key_ipv4 *ipv4_key;
1395
1396                 ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
1397                 if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) {
1398                         OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n",
1399                                 ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
1400                         return -EINVAL;
1401                 }
1402                 SW_FLOW_KEY_PUT(match, ip.proto,
1403                                 ipv4_key->ipv4_proto, is_mask);
1404                 SW_FLOW_KEY_PUT(match, ip.tos,
1405                                 ipv4_key->ipv4_tos, is_mask);
1406                 SW_FLOW_KEY_PUT(match, ip.ttl,
1407                                 ipv4_key->ipv4_ttl, is_mask);
1408                 SW_FLOW_KEY_PUT(match, ip.frag,
1409                                 ipv4_key->ipv4_frag, is_mask);
1410                 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
1411                                 ipv4_key->ipv4_src, is_mask);
1412                 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
1413                                 ipv4_key->ipv4_dst, is_mask);
1414                 attrs &= ~(1ULL << OVS_KEY_ATTR_IPV4);
1415         }
1416
1417         if (attrs & (1ULL << OVS_KEY_ATTR_IPV6)) {
1418                 const struct ovs_key_ipv6 *ipv6_key;
1419
1420                 ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
1421                 if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) {
1422                         OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n",
1423                                 ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
1424                         return -EINVAL;
1425                 }
1426                 SW_FLOW_KEY_PUT(match, ipv6.label,
1427                                 ipv6_key->ipv6_label, is_mask);
1428                 SW_FLOW_KEY_PUT(match, ip.proto,
1429                                 ipv6_key->ipv6_proto, is_mask);
1430                 SW_FLOW_KEY_PUT(match, ip.tos,
1431                                 ipv6_key->ipv6_tclass, is_mask);
1432                 SW_FLOW_KEY_PUT(match, ip.ttl,
1433                                 ipv6_key->ipv6_hlimit, is_mask);
1434                 SW_FLOW_KEY_PUT(match, ip.frag,
1435                                 ipv6_key->ipv6_frag, is_mask);
1436                 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src,
1437                                 ipv6_key->ipv6_src,
1438                                 sizeof(match->key->ipv6.addr.src),
1439                                 is_mask);
1440                 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst,
1441                                 ipv6_key->ipv6_dst,
1442                                 sizeof(match->key->ipv6.addr.dst),
1443                                 is_mask);
1444
1445                 attrs &= ~(1ULL << OVS_KEY_ATTR_IPV6);
1446         }
1447
1448         if (attrs & (1ULL << OVS_KEY_ATTR_ARP)) {
1449                 const struct ovs_key_arp *arp_key;
1450
1451                 arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
1452                 if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
1453                         OVS_NLERR("Unknown ARP opcode (opcode=%d).\n",
1454                                   arp_key->arp_op);
1455                         return -EINVAL;
1456                 }
1457
1458                 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
1459                                 arp_key->arp_sip, is_mask);
1460                 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
1461                         arp_key->arp_tip, is_mask);
1462                 SW_FLOW_KEY_PUT(match, ip.proto,
1463                                 ntohs(arp_key->arp_op), is_mask);
1464                 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha,
1465                                 arp_key->arp_sha, ETH_ALEN, is_mask);
1466                 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha,
1467                                 arp_key->arp_tha, ETH_ALEN, is_mask);
1468
1469                 attrs &= ~(1ULL << OVS_KEY_ATTR_ARP);
1470         }
1471
1472         if (attrs & (1ULL << OVS_KEY_ATTR_TCP)) {
1473                 const struct ovs_key_tcp *tcp_key;
1474
1475                 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
1476                 if (orig_attrs & (1ULL << OVS_KEY_ATTR_IPV4)) {
1477                         SW_FLOW_KEY_PUT(match, ipv4.tp.src,
1478                                         tcp_key->tcp_src, is_mask);
1479                         SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
1480                                         tcp_key->tcp_dst, is_mask);
1481                 } else {
1482                         SW_FLOW_KEY_PUT(match, ipv6.tp.src,
1483                                         tcp_key->tcp_src, is_mask);
1484                         SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
1485                                         tcp_key->tcp_dst, is_mask);
1486                 }
1487                 attrs &= ~(1ULL << OVS_KEY_ATTR_TCP);
1488         }
1489
1490         if (attrs & (1ULL << OVS_KEY_ATTR_UDP)) {
1491                 const struct ovs_key_udp *udp_key;
1492
1493                 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
1494                 if (orig_attrs & (1ULL << OVS_KEY_ATTR_IPV4)) {
1495                         SW_FLOW_KEY_PUT(match, ipv4.tp.src,
1496                                         udp_key->udp_src, is_mask);
1497                         SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
1498                                         udp_key->udp_dst, is_mask);
1499                 } else {
1500                         SW_FLOW_KEY_PUT(match, ipv6.tp.src,
1501                                         udp_key->udp_src, is_mask);
1502                         SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
1503                                         udp_key->udp_dst, is_mask);
1504                 }
1505                 attrs &= ~(1ULL << OVS_KEY_ATTR_UDP);
1506         }
1507
1508         if (attrs & (1ULL << OVS_KEY_ATTR_ICMP)) {
1509                 const struct ovs_key_icmp *icmp_key;
1510
1511                 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
1512                 SW_FLOW_KEY_PUT(match, ipv4.tp.src,
1513                                 htons(icmp_key->icmp_type), is_mask);
1514                 SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
1515                                 htons(icmp_key->icmp_code), is_mask);
1516                 attrs &= ~(1ULL << OVS_KEY_ATTR_ICMP);
1517         }
1518
1519         if (attrs & (1ULL << OVS_KEY_ATTR_ICMPV6)) {
1520                 const struct ovs_key_icmpv6 *icmpv6_key;
1521
1522                 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
1523                 SW_FLOW_KEY_PUT(match, ipv6.tp.src,
1524                                 htons(icmpv6_key->icmpv6_type), is_mask);
1525                 SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
1526                                 htons(icmpv6_key->icmpv6_code), is_mask);
1527                 attrs &= ~(1ULL << OVS_KEY_ATTR_ICMPV6);
1528         }
1529
1530         if (attrs & (1ULL << OVS_KEY_ATTR_ND)) {
1531                 const struct ovs_key_nd *nd_key;
1532
1533                 nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
1534                 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
1535                         nd_key->nd_target,
1536                         sizeof(match->key->ipv6.nd.target),
1537                         is_mask);
1538                 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
1539                         nd_key->nd_sll, ETH_ALEN, is_mask);
1540                 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
1541                                 nd_key->nd_tll, ETH_ALEN, is_mask);
1542                 attrs &= ~(1ULL << OVS_KEY_ATTR_ND);
1543         }
1544
1545         if (attrs != 0)
1546                 return -EINVAL;
1547
1548         return 0;
1549 }
1550
1551 /**
1552  * ovs_match_from_nlattrs - parses Netlink attributes into a flow key and
1553  * mask. In case the 'mask' is NULL, the flow is treated as exact match
1554  * flow. Otherwise, it is treated as a wildcarded flow, except the mask
1555  * does not include any don't care bit.
1556  * @match: receives the extracted flow match information.
1557  * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1558  * sequence. The fields should of the packet that triggered the creation
1559  * of this flow.
1560  * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
1561  * attribute specifies the mask field of the wildcarded flow.
1562  */
1563 int ovs_match_from_nlattrs(struct sw_flow_match *match,
1564                            const struct nlattr *key,
1565                            const struct nlattr *mask)
1566 {
1567         const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
1568         const struct nlattr *encap;
1569         u64 key_attrs = 0;
1570         u64 mask_attrs = 0;
1571         bool encap_valid = false;
1572         int err;
1573
1574         err = parse_flow_nlattrs(key, a, &key_attrs);
1575         if (err)
1576                 return err;
1577
1578         if (key_attrs & 1ULL << OVS_KEY_ATTR_ENCAP) {
1579                 encap = a[OVS_KEY_ATTR_ENCAP];
1580                 key_attrs &= ~(1ULL << OVS_KEY_ATTR_ENCAP);
1581                 if (nla_len(encap)) {
1582                         __be16 eth_type = 0; /* ETH_P_8021Q */
1583
1584                         if (a[OVS_KEY_ATTR_ETHERTYPE])
1585                                 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1586
1587                         if  ((eth_type == htons(ETH_P_8021Q)) && (a[OVS_KEY_ATTR_VLAN])) {
1588                                 encap_valid = true;
1589                                 key_attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERTYPE);
1590                                 err = parse_flow_nlattrs(encap, a, &key_attrs);
1591                         } else {
1592                                 OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n");
1593                                 err = -EINVAL;
1594                         }
1595
1596                         if (err)
1597                                 return err;
1598                 }
1599         }
1600
1601         err = ovs_key_from_nlattrs(match, key_attrs, a, false);
1602         if (err)
1603                 return err;
1604
1605         if (mask) {
1606                 err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
1607                 if (err)
1608                         return err;
1609
1610                 if ((mask_attrs & 1ULL << OVS_KEY_ATTR_ENCAP) && encap_valid) {
1611                         __be16 eth_type = 0;
1612
1613                         mask_attrs &= ~(1ULL << OVS_KEY_ATTR_ENCAP);
1614                         if (a[OVS_KEY_ATTR_ETHERTYPE])
1615                                 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1616                         if (eth_type == htons(0xffff)) {
1617                                 mask_attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERTYPE);
1618                                 encap = a[OVS_KEY_ATTR_ENCAP];
1619                                 err = parse_flow_mask_nlattrs(encap, a, &mask_attrs);
1620                         } else {
1621                                 OVS_NLERR("VLAN frames must have an exact match"
1622                                          " on the TPID (mask=%x).\n",
1623                                          ntohs(eth_type));
1624                                 err = -EINVAL;
1625                         }
1626
1627                         if (err)
1628                                 return err;
1629                 }
1630
1631                 err = ovs_key_from_nlattrs(match, mask_attrs, a, true);
1632                 if (err)
1633                         return err;
1634         } else {
1635                 /* Populate exact match flow's key mask. */
1636                 if (match->mask)
1637                         ovs_sw_flow_mask_set(match->mask, &match->range, 0xff);
1638         }
1639
1640         if (!ovs_match_validate(match, key_attrs, mask_attrs))
1641                 return -EINVAL;
1642
1643         return 0;
1644 }
1645
1646 /**
1647  * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
1648  * @flow: Receives extracted in_port, priority, tun_key and skb_mark.
1649  * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1650  * sequence.
1651  *
1652  * This parses a series of Netlink attributes that form a flow key, which must
1653  * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1654  * get the metadata, that is, the parts of the flow key that cannot be
1655  * extracted from the packet itself.
1656  */
1657
1658 int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow,
1659                 const struct nlattr *attr)
1660 {
1661         struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key;
1662         const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
1663         u64 attrs = 0;
1664         int err;
1665         struct sw_flow_match match;
1666
1667         flow->key.phy.in_port = DP_MAX_PORTS;
1668         flow->key.phy.priority = 0;
1669         flow->key.phy.skb_mark = 0;
1670         memset(tun_key, 0, sizeof(flow->key.tun_key));
1671
1672         err = parse_flow_nlattrs(attr, a, &attrs);
1673         if (err)
1674                 return -EINVAL;
1675
1676         memset(&match, 0, sizeof(match));
1677         match.key = &flow->key;
1678
1679         err = metadata_from_nlattrs(&match, &attrs, a, false);
1680         if (err)
1681                 return err;
1682
1683         return 0;
1684 }
1685
1686 int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey,
1687                 const struct sw_flow_key *output, struct sk_buff *skb)
1688 {
1689         struct ovs_key_ethernet *eth_key;
1690         struct nlattr *nla, *encap;
1691
1692         if (output->phy.priority &&
1693                 nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
1694                 goto nla_put_failure;
1695
1696         if (swkey->tun_key.ipv4_dst &&
1697             ipv4_tun_to_nlattr(skb, &swkey->tun_key, &output->tun_key))
1698                 goto nla_put_failure;
1699
1700         if (swkey->phy.in_port == DP_MAX_PORTS) {
1701                 if ((swkey != output) && (output->phy.in_port == 0xffff))
1702                         if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
1703                                 goto nla_put_failure;
1704         } else {
1705                 u16 upper_u16;
1706                 upper_u16 = (swkey == output) ? 0 : 0xffff;
1707
1708                 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
1709                                 (upper_u16 << 16) | output->phy.in_port))
1710                         goto nla_put_failure;
1711         }
1712
1713         if (output->phy.skb_mark &&
1714                 nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
1715                 goto nla_put_failure;
1716
1717         nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
1718         if (!nla)
1719                 goto nla_put_failure;
1720
1721         eth_key = nla_data(nla);
1722         memcpy(eth_key->eth_src, output->eth.src, ETH_ALEN);
1723         memcpy(eth_key->eth_dst, output->eth.dst, ETH_ALEN);
1724
1725         if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
1726                 __be16 eth_type;
1727                 eth_type = (swkey == output) ? htons(ETH_P_8021Q) : htons(0xffff) ;
1728                 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
1729                     nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci))
1730                         goto nla_put_failure;
1731                 encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
1732                 if (!swkey->eth.tci)
1733                         goto unencap;
1734         } else
1735                 encap = NULL;
1736
1737         if (swkey->eth.type == htons(ETH_P_802_2)) {
1738                 /*
1739                  * Ethertype 802.2 is represented in the netlink with omitted
1740                  * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
1741                  * 0xffff in the mask attribute.  Ethertype can also
1742                  * be wildcarded.
1743                  */
1744                 if (swkey != output && output->eth.type)
1745                         if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
1746                                                 output->eth.type))
1747                                 goto nla_put_failure;
1748                 goto unencap;
1749         }
1750
1751         if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
1752                 goto nla_put_failure;
1753
1754         if (swkey->eth.type == htons(ETH_P_IP)) {
1755                 struct ovs_key_ipv4 *ipv4_key;
1756
1757                 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
1758                 if (!nla)
1759                         goto nla_put_failure;
1760                 ipv4_key = nla_data(nla);
1761                 ipv4_key->ipv4_src = output->ipv4.addr.src;
1762                 ipv4_key->ipv4_dst = output->ipv4.addr.dst;
1763                 ipv4_key->ipv4_proto = output->ip.proto;
1764                 ipv4_key->ipv4_tos = output->ip.tos;
1765                 ipv4_key->ipv4_ttl = output->ip.ttl;
1766                 ipv4_key->ipv4_frag = output->ip.frag;
1767         } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1768                 struct ovs_key_ipv6 *ipv6_key;
1769
1770                 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
1771                 if (!nla)
1772                         goto nla_put_failure;
1773                 ipv6_key = nla_data(nla);
1774                 memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
1775                                 sizeof(ipv6_key->ipv6_src));
1776                 memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
1777                                 sizeof(ipv6_key->ipv6_dst));
1778                 ipv6_key->ipv6_label = output->ipv6.label;
1779                 ipv6_key->ipv6_proto = output->ip.proto;
1780                 ipv6_key->ipv6_tclass = output->ip.tos;
1781                 ipv6_key->ipv6_hlimit = output->ip.ttl;
1782                 ipv6_key->ipv6_frag = output->ip.frag;
1783         } else if (swkey->eth.type == htons(ETH_P_ARP) ||
1784                    swkey->eth.type == htons(ETH_P_RARP)) {
1785                 struct ovs_key_arp *arp_key;
1786
1787                 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
1788                 if (!nla)
1789                         goto nla_put_failure;
1790                 arp_key = nla_data(nla);
1791                 memset(arp_key, 0, sizeof(struct ovs_key_arp));
1792                 arp_key->arp_sip = output->ipv4.addr.src;
1793                 arp_key->arp_tip = output->ipv4.addr.dst;
1794                 arp_key->arp_op = htons(output->ip.proto);
1795                 memcpy(arp_key->arp_sha, output->ipv4.arp.sha, ETH_ALEN);
1796                 memcpy(arp_key->arp_tha, output->ipv4.arp.tha, ETH_ALEN);
1797         }
1798
1799         if ((swkey->eth.type == htons(ETH_P_IP) ||
1800              swkey->eth.type == htons(ETH_P_IPV6)) &&
1801              swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1802
1803                 if (swkey->ip.proto == IPPROTO_TCP) {
1804                         struct ovs_key_tcp *tcp_key;
1805
1806                         nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
1807                         if (!nla)
1808                                 goto nla_put_failure;
1809                         tcp_key = nla_data(nla);
1810                         if (swkey->eth.type == htons(ETH_P_IP)) {
1811                                 tcp_key->tcp_src = output->ipv4.tp.src;
1812                                 tcp_key->tcp_dst = output->ipv4.tp.dst;
1813                         } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1814                                 tcp_key->tcp_src = output->ipv6.tp.src;
1815                                 tcp_key->tcp_dst = output->ipv6.tp.dst;
1816                         }
1817                 } else if (swkey->ip.proto == IPPROTO_UDP) {
1818                         struct ovs_key_udp *udp_key;
1819
1820                         nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
1821                         if (!nla)
1822                                 goto nla_put_failure;
1823                         udp_key = nla_data(nla);
1824                         if (swkey->eth.type == htons(ETH_P_IP)) {
1825                                 udp_key->udp_src = output->ipv4.tp.src;
1826                                 udp_key->udp_dst = output->ipv4.tp.dst;
1827                         } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1828                                 udp_key->udp_src = output->ipv6.tp.src;
1829                                 udp_key->udp_dst = output->ipv6.tp.dst;
1830                         }
1831                 } else if (swkey->eth.type == htons(ETH_P_IP) &&
1832                            swkey->ip.proto == IPPROTO_ICMP) {
1833                         struct ovs_key_icmp *icmp_key;
1834
1835                         nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
1836                         if (!nla)
1837                                 goto nla_put_failure;
1838                         icmp_key = nla_data(nla);
1839                         icmp_key->icmp_type = ntohs(output->ipv4.tp.src);
1840                         icmp_key->icmp_code = ntohs(output->ipv4.tp.dst);
1841                 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
1842                            swkey->ip.proto == IPPROTO_ICMPV6) {
1843                         struct ovs_key_icmpv6 *icmpv6_key;
1844
1845                         nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
1846                                                 sizeof(*icmpv6_key));
1847                         if (!nla)
1848                                 goto nla_put_failure;
1849                         icmpv6_key = nla_data(nla);
1850                         icmpv6_key->icmpv6_type = ntohs(output->ipv6.tp.src);
1851                         icmpv6_key->icmpv6_code = ntohs(output->ipv6.tp.dst);
1852
1853                         if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
1854                             icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
1855                                 struct ovs_key_nd *nd_key;
1856
1857                                 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
1858                                 if (!nla)
1859                                         goto nla_put_failure;
1860                                 nd_key = nla_data(nla);
1861                                 memcpy(nd_key->nd_target, &output->ipv6.nd.target,
1862                                                         sizeof(nd_key->nd_target));
1863                                 memcpy(nd_key->nd_sll, output->ipv6.nd.sll, ETH_ALEN);
1864                                 memcpy(nd_key->nd_tll, output->ipv6.nd.tll, ETH_ALEN);
1865                         }
1866                 }
1867         }
1868
1869 unencap:
1870         if (encap)
1871                 nla_nest_end(skb, encap);
1872
1873         return 0;
1874
1875 nla_put_failure:
1876         return -EMSGSIZE;
1877 }
1878
1879 /* Initializes the flow module.
1880  * Returns zero if successful or a negative error code. */
1881 int ovs_flow_init(void)
1882 {
1883         flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
1884                                         0, NULL);
1885         if (flow_cache == NULL)
1886                 return -ENOMEM;
1887
1888         return 0;
1889 }
1890
1891 /* Uninitializes the flow module. */
1892 void ovs_flow_exit(void)
1893 {
1894         kmem_cache_destroy(flow_cache);
1895 }
1896
1897 struct sw_flow_mask *ovs_sw_flow_mask_alloc(void)
1898 {
1899         struct sw_flow_mask *mask;
1900
1901         mask = kmalloc(sizeof(*mask), GFP_KERNEL);
1902         if (mask)
1903                 mask->ref_count = 0;
1904
1905         return mask;
1906 }
1907
1908 void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *mask)
1909 {
1910         mask->ref_count++;
1911 }
1912
1913 static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
1914 {
1915         struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu);
1916
1917         kfree(mask);
1918 }
1919
1920 void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
1921 {
1922         if (!mask)
1923                 return;
1924
1925         BUG_ON(!mask->ref_count);
1926         mask->ref_count--;
1927
1928         if (!mask->ref_count) {
1929                 list_del_rcu(&mask->list);
1930                 if (deferred)
1931                         call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
1932                 else
1933                         kfree(mask);
1934         }
1935 }
1936
1937 static bool ovs_sw_flow_mask_equal(const struct sw_flow_mask *a,
1938                 const struct sw_flow_mask *b)
1939 {
1940         u8 *a_ = (u8 *)&a->key + a->range.start;
1941         u8 *b_ = (u8 *)&b->key + b->range.start;
1942
1943         return  (a->range.end == b->range.end)
1944                 && (a->range.start == b->range.start)
1945                 && (memcmp(a_, b_, ovs_sw_flow_mask_actual_size(a)) == 0);
1946 }
1947
1948 struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl,
1949                                            const struct sw_flow_mask *mask)
1950 {
1951         struct list_head *ml;
1952
1953         list_for_each(ml, tbl->mask_list) {
1954                 struct sw_flow_mask *m;
1955                 m = container_of(ml, struct sw_flow_mask, list);
1956                 if (ovs_sw_flow_mask_equal(mask, m))
1957                         return m;
1958         }
1959
1960         return NULL;
1961 }
1962
1963 /**
1964  * add a new mask into the mask list.
1965  * The caller needs to make sure that 'mask' is not the same
1966  * as any masks that are already on the list.
1967  */
1968 void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask)
1969 {
1970         list_add_rcu(&mask->list, tbl->mask_list);
1971 }
1972
1973 /**
1974  * Set 'range' fields in the mask to the value of 'val'.
1975  */
1976 static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask,
1977                 struct sw_flow_key_range *range, u8 val)
1978 {
1979         u8 *m = (u8 *)&mask->key + range->start;
1980
1981         mask->range = *range;
1982         memset(m, val, ovs_sw_flow_mask_size_roundup(mask));
1983 }