2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
36 #include <linux/ipv6.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/icmp.h>
40 #include <linux/icmpv6.h>
41 #include <linux/rculist.h>
44 #include <net/ndisc.h>
48 static struct kmem_cache *flow_cache;
50 static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask,
51 struct sw_flow_key_range *range, u8 val);
53 static void update_range__(struct sw_flow_match *match,
54 size_t offset, size_t size, bool is_mask)
56 struct sw_flow_key_range *range = NULL;
57 size_t start = offset;
58 size_t end = offset + size;
61 range = &match->range;
63 range = &match->mask->range;
68 if (range->start == range->end) {
74 if (range->start > start)
81 #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
83 update_range__(match, offsetof(struct sw_flow_key, field), \
84 sizeof((match)->key->field), is_mask); \
85 if (is_mask && match->mask != NULL) { \
86 (match)->mask->key.field = value; \
88 (match)->key->field = value; \
92 #define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
94 update_range__(match, offsetof(struct sw_flow_key, field), \
96 if (is_mask && match->mask != NULL) { \
97 memcpy(&(match)->mask->key.field, value_p, len); \
99 memcpy(&(match)->key->field, value_p, len); \
103 void ovs_match_init(struct sw_flow_match *match,
104 struct sw_flow_key *key,
105 struct sw_flow_mask *mask)
107 memset(match, 0, sizeof(*match));
111 memset(key, 0, sizeof(*key));
114 memset(&mask->key, 0, sizeof(mask->key));
115 mask->range.start = mask->range.end = 0;
119 static bool ovs_match_validate(const struct sw_flow_match *match,
120 u64 key_attrs, u64 mask_attrs)
122 u64 key_expected = 1ULL << OVS_KEY_ATTR_ETHERNET;
123 u64 mask_allowed = key_attrs; /* At most allow all key attributes */
125 /* The following mask attributes allowed only if they
126 * pass the validation tests. */
127 mask_allowed &= ~((1ULL << OVS_KEY_ATTR_IPV4)
128 | (1ULL << OVS_KEY_ATTR_IPV6)
129 | (1ULL << OVS_KEY_ATTR_TCP)
130 | (1ULL << OVS_KEY_ATTR_UDP)
131 | (1ULL << OVS_KEY_ATTR_ICMP)
132 | (1ULL << OVS_KEY_ATTR_ICMPV6)
133 | (1ULL << OVS_KEY_ATTR_ARP)
134 | (1ULL << OVS_KEY_ATTR_ND));
136 if (match->key->eth.type == htons(ETH_P_802_2) &&
137 match->mask && (match->mask->key.eth.type == htons(0xffff)))
138 mask_allowed |= (1ULL << OVS_KEY_ATTR_ETHERTYPE);
140 /* Check key attributes. */
141 if (match->key->eth.type == htons(ETH_P_ARP)
142 || match->key->eth.type == htons(ETH_P_RARP)) {
143 key_expected |= 1ULL << OVS_KEY_ATTR_ARP;
144 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
145 mask_allowed |= 1ULL << OVS_KEY_ATTR_ARP;
148 if (match->key->eth.type == htons(ETH_P_IP)) {
149 key_expected |= 1ULL << OVS_KEY_ATTR_IPV4;
150 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
151 mask_allowed |= 1ULL << OVS_KEY_ATTR_IPV4;
153 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
154 if (match->key->ip.proto == IPPROTO_UDP) {
155 key_expected |= 1ULL << OVS_KEY_ATTR_UDP;
156 if (match->mask && (match->mask->key.ip.proto == 0xff))
157 mask_allowed |= 1ULL << OVS_KEY_ATTR_UDP;
160 if (match->key->ip.proto == IPPROTO_TCP) {
161 key_expected |= 1ULL << OVS_KEY_ATTR_TCP;
162 if (match->mask && (match->mask->key.ip.proto == 0xff))
163 mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP;
166 if (match->key->ip.proto == IPPROTO_ICMP) {
167 key_expected |= 1ULL << OVS_KEY_ATTR_ICMP;
168 if (match->mask && (match->mask->key.ip.proto == 0xff))
169 mask_allowed |= 1ULL << OVS_KEY_ATTR_ICMP;
174 if (match->key->eth.type == htons(ETH_P_IPV6)) {
175 key_expected |= 1ULL << OVS_KEY_ATTR_IPV6;
176 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
177 mask_allowed |= 1ULL << OVS_KEY_ATTR_IPV6;
179 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
180 if (match->key->ip.proto == IPPROTO_UDP) {
181 key_expected |= 1ULL << OVS_KEY_ATTR_UDP;
182 if (match->mask && (match->mask->key.ip.proto == 0xff))
183 mask_allowed |= 1ULL << OVS_KEY_ATTR_UDP;
186 if (match->key->ip.proto == IPPROTO_TCP) {
187 key_expected |= 1ULL << OVS_KEY_ATTR_TCP;
188 if (match->mask && (match->mask->key.ip.proto == 0xff))
189 mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP;
192 if (match->key->ip.proto == IPPROTO_ICMPV6) {
193 key_expected |= 1ULL << OVS_KEY_ATTR_ICMPV6;
194 if (match->mask && (match->mask->key.ip.proto == 0xff))
195 mask_allowed |= 1ULL << OVS_KEY_ATTR_ICMPV6;
197 if (match->key->ipv6.tp.src ==
198 htons(NDISC_NEIGHBOUR_SOLICITATION) ||
199 match->key->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
200 key_expected |= 1ULL << OVS_KEY_ATTR_ND;
201 if (match->mask && (match->mask->key.ipv6.tp.src == htons(0xffff)))
202 mask_allowed |= 1ULL << OVS_KEY_ATTR_ND;
208 if ((key_attrs & key_expected) != key_expected)
209 /* Key attributes check failed. */
212 if ((mask_attrs & mask_allowed) != mask_attrs)
213 /* Mask attributes check failed. */
219 static int check_header(struct sk_buff *skb, int len)
221 if (unlikely(skb->len < len))
223 if (unlikely(!pskb_may_pull(skb, len)))
228 static bool arphdr_ok(struct sk_buff *skb)
230 return pskb_may_pull(skb, skb_network_offset(skb) +
231 sizeof(struct arp_eth_header));
234 static int check_iphdr(struct sk_buff *skb)
236 unsigned int nh_ofs = skb_network_offset(skb);
240 err = check_header(skb, nh_ofs + sizeof(struct iphdr));
244 ip_len = ip_hdrlen(skb);
245 if (unlikely(ip_len < sizeof(struct iphdr) ||
246 skb->len < nh_ofs + ip_len))
249 skb_set_transport_header(skb, nh_ofs + ip_len);
253 static bool tcphdr_ok(struct sk_buff *skb)
255 int th_ofs = skb_transport_offset(skb);
258 if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
261 tcp_len = tcp_hdrlen(skb);
262 if (unlikely(tcp_len < sizeof(struct tcphdr) ||
263 skb->len < th_ofs + tcp_len))
269 static bool udphdr_ok(struct sk_buff *skb)
271 return pskb_may_pull(skb, skb_transport_offset(skb) +
272 sizeof(struct udphdr));
275 static bool icmphdr_ok(struct sk_buff *skb)
277 return pskb_may_pull(skb, skb_transport_offset(skb) +
278 sizeof(struct icmphdr));
281 u64 ovs_flow_used_time(unsigned long flow_jiffies)
283 struct timespec cur_ts;
286 ktime_get_ts(&cur_ts);
287 idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
288 cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
289 cur_ts.tv_nsec / NSEC_PER_MSEC;
291 return cur_ms - idle_ms;
294 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
296 unsigned int nh_ofs = skb_network_offset(skb);
304 err = check_header(skb, nh_ofs + sizeof(*nh));
309 nexthdr = nh->nexthdr;
310 payload_ofs = (u8 *)(nh + 1) - skb->data;
312 key->ip.proto = NEXTHDR_NONE;
313 key->ip.tos = ipv6_get_dsfield(nh);
314 key->ip.ttl = nh->hop_limit;
315 key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
316 key->ipv6.addr.src = nh->saddr;
317 key->ipv6.addr.dst = nh->daddr;
319 payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
320 if (unlikely(payload_ofs < 0))
324 if (frag_off & htons(~0x7))
325 key->ip.frag = OVS_FRAG_TYPE_LATER;
327 key->ip.frag = OVS_FRAG_TYPE_FIRST;
330 nh_len = payload_ofs - nh_ofs;
331 skb_set_transport_header(skb, nh_ofs + nh_len);
332 key->ip.proto = nexthdr;
336 static bool icmp6hdr_ok(struct sk_buff *skb)
338 return pskb_may_pull(skb, skb_transport_offset(skb) +
339 sizeof(struct icmp6hdr));
342 static void flow_key_mask(struct sw_flow_key *dst,
343 const struct sw_flow_key *src,
344 const struct sw_flow_mask *mask)
346 u8 *m = (u8 *)&mask->key + mask->range.start;
347 u8 *s = (u8 *)src + mask->range.start;
348 u8 *d = (u8 *)dst + mask->range.start;
351 memset(dst, 0, sizeof(*dst));
352 for (i = 0; i < ovs_sw_flow_mask_size_roundup(mask); i++) {
358 #define TCP_FLAGS_OFFSET 13
359 #define TCP_FLAG_MASK 0x3f
361 void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
365 if ((flow->key.eth.type == htons(ETH_P_IP) ||
366 flow->key.eth.type == htons(ETH_P_IPV6)) &&
367 flow->key.ip.proto == IPPROTO_TCP &&
368 likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
369 u8 *tcp = (u8 *)tcp_hdr(skb);
370 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
373 spin_lock(&flow->lock);
374 flow->used = jiffies;
375 flow->packet_count++;
376 flow->byte_count += skb->len;
377 flow->tcp_flags |= tcp_flags;
378 spin_unlock(&flow->lock);
381 struct sw_flow_actions *ovs_flow_actions_alloc(int size)
383 struct sw_flow_actions *sfa;
385 if (size > MAX_ACTIONS_BUFSIZE)
386 return ERR_PTR(-EINVAL);
388 sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
390 return ERR_PTR(-ENOMEM);
392 sfa->actions_len = 0;
396 struct sw_flow *ovs_flow_alloc(void)
398 struct sw_flow *flow;
400 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
402 return ERR_PTR(-ENOMEM);
404 spin_lock_init(&flow->lock);
405 flow->sf_acts = NULL;
411 static struct hlist_head *find_bucket(struct flow_table *table, u32 hash)
413 hash = jhash_1word(hash, table->hash_seed);
414 return flex_array_get(table->buckets,
415 (hash & (table->n_buckets - 1)));
418 static struct flex_array *alloc_buckets(unsigned int n_buckets)
420 struct flex_array *buckets;
423 buckets = flex_array_alloc(sizeof(struct hlist_head *),
424 n_buckets, GFP_KERNEL);
428 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
430 flex_array_free(buckets);
434 for (i = 0; i < n_buckets; i++)
435 INIT_HLIST_HEAD((struct hlist_head *)
436 flex_array_get(buckets, i));
441 static void free_buckets(struct flex_array *buckets)
443 flex_array_free(buckets);
446 struct flow_table *ovs_flow_tbl_alloc(int new_size)
448 struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
453 table->buckets = alloc_buckets(new_size);
455 if (!table->buckets) {
459 table->n_buckets = new_size;
462 table->keep_flows = false;
463 get_random_bytes(&table->hash_seed, sizeof(u32));
464 INIT_LIST_HEAD(&table->mask_list);
469 static void __flow_tbl_destroy(struct flow_table *table)
473 if (table->keep_flows)
476 for (i = 0; i < table->n_buckets; i++) {
477 struct sw_flow *flow;
478 struct hlist_head *head = flex_array_get(table->buckets, i);
479 struct hlist_node *n;
480 int ver = table->node_ver;
482 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
483 hlist_del_rcu(&flow->hash_node[ver]);
484 ovs_flow_free(flow, false);
489 free_buckets(table->buckets);
493 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
495 struct flow_table *table = container_of(rcu, struct flow_table, rcu);
497 __flow_tbl_destroy(table);
500 void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
506 call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
508 __flow_tbl_destroy(table);
511 struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *last)
513 struct sw_flow *flow;
514 struct hlist_head *head;
518 ver = table->node_ver;
519 while (*bucket < table->n_buckets) {
521 head = flex_array_get(table->buckets, *bucket);
522 hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
537 static void __tbl_insert(struct flow_table *table, struct sw_flow *flow)
539 struct hlist_head *head;
541 head = find_bucket(table, flow->hash);
542 hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
547 static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new)
552 old_ver = old->node_ver;
553 new->node_ver = !old_ver;
555 /* Insert in new table. */
556 for (i = 0; i < old->n_buckets; i++) {
557 struct sw_flow *flow;
558 struct hlist_head *head;
560 head = flex_array_get(old->buckets, i);
562 hlist_for_each_entry(flow, head, hash_node[old_ver])
563 __tbl_insert(new, flow);
566 new->mask_list = old->mask_list;
567 old->keep_flows = true;
570 static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets)
572 struct flow_table *new_table;
574 new_table = ovs_flow_tbl_alloc(n_buckets);
576 return ERR_PTR(-ENOMEM);
578 flow_table_copy_flows(table, new_table);
583 struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table)
585 return __flow_tbl_rehash(table, table->n_buckets);
588 struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
590 return __flow_tbl_rehash(table, table->n_buckets * 2);
593 static void __flow_free(struct sw_flow *flow)
595 kfree((struct sf_flow_acts __force *)flow->sf_acts);
596 kmem_cache_free(flow_cache, flow);
599 static void rcu_free_flow_callback(struct rcu_head *rcu)
601 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
606 void ovs_flow_free(struct sw_flow *flow, bool deferred)
611 ovs_sw_flow_mask_del_ref((struct sw_flow_mask __force *)flow->mask,
615 call_rcu(&flow->rcu, rcu_free_flow_callback);
620 /* RCU callback used by ovs_flow_deferred_free_acts. */
621 static void rcu_free_acts_callback(struct rcu_head *rcu)
623 struct sw_flow_actions *sf_acts = container_of(rcu,
624 struct sw_flow_actions, rcu);
628 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
629 * The caller must hold rcu_read_lock for this to be sensible. */
630 void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
632 call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
635 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
638 __be16 eth_type; /* ETH_P_8021Q */
641 struct qtag_prefix *qp;
643 if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16)))
646 if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
650 qp = (struct qtag_prefix *) skb->data;
651 key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
652 __skb_pull(skb, sizeof(struct qtag_prefix));
657 static __be16 parse_ethertype(struct sk_buff *skb)
659 struct llc_snap_hdr {
660 u8 dsap; /* Always 0xAA */
661 u8 ssap; /* Always 0xAA */
666 struct llc_snap_hdr *llc;
669 proto = *(__be16 *) skb->data;
670 __skb_pull(skb, sizeof(__be16));
672 if (ntohs(proto) >= ETH_P_802_3_MIN)
675 if (skb->len < sizeof(struct llc_snap_hdr))
676 return htons(ETH_P_802_2);
678 if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
681 llc = (struct llc_snap_hdr *) skb->data;
682 if (llc->dsap != LLC_SAP_SNAP ||
683 llc->ssap != LLC_SAP_SNAP ||
684 (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
685 return htons(ETH_P_802_2);
687 __skb_pull(skb, sizeof(struct llc_snap_hdr));
689 if (ntohs(llc->ethertype) >= ETH_P_802_3_MIN)
690 return llc->ethertype;
692 return htons(ETH_P_802_2);
695 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
698 struct icmp6hdr *icmp = icmp6_hdr(skb);
700 /* The ICMPv6 type and code fields use the 16-bit transport port
701 * fields, so we need to store them in 16-bit network byte order.
703 key->ipv6.tp.src = htons(icmp->icmp6_type);
704 key->ipv6.tp.dst = htons(icmp->icmp6_code);
706 if (icmp->icmp6_code == 0 &&
707 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
708 icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
709 int icmp_len = skb->len - skb_transport_offset(skb);
713 /* In order to process neighbor discovery options, we need the
716 if (unlikely(icmp_len < sizeof(*nd)))
719 if (unlikely(skb_linearize(skb)))
722 nd = (struct nd_msg *)skb_transport_header(skb);
723 key->ipv6.nd.target = nd->target;
725 icmp_len -= sizeof(*nd);
727 while (icmp_len >= 8) {
728 struct nd_opt_hdr *nd_opt =
729 (struct nd_opt_hdr *)(nd->opt + offset);
730 int opt_len = nd_opt->nd_opt_len * 8;
732 if (unlikely(!opt_len || opt_len > icmp_len))
735 /* Store the link layer address if the appropriate
736 * option is provided. It is considered an error if
737 * the same link layer option is specified twice.
739 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
741 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
743 memcpy(key->ipv6.nd.sll,
744 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
745 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
747 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
749 memcpy(key->ipv6.nd.tll,
750 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
761 memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
762 memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
763 memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
769 * ovs_flow_extract - extracts a flow key from an Ethernet frame.
770 * @skb: sk_buff that contains the frame, with skb->data pointing to the
772 * @in_port: port number on which @skb was received.
773 * @key: output flow key
774 * @key_lenp: length of output flow key
776 * The caller must ensure that skb->len >= ETH_HLEN.
778 * Returns 0 if successful, otherwise a negative errno value.
780 * Initializes @skb header pointers as follows:
782 * - skb->mac_header: the Ethernet header.
784 * - skb->network_header: just past the Ethernet header, or just past the
785 * VLAN header, to the first byte of the Ethernet payload.
787 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
788 * on output, then just past the IP header, if one is present and
789 * of a correct length, otherwise the same as skb->network_header.
790 * For other key->eth.type values it is left untouched.
792 int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
797 memset(key, 0, sizeof(*key));
799 key->phy.priority = skb->priority;
800 if (OVS_CB(skb)->tun_key)
801 memcpy(&key->tun_key, OVS_CB(skb)->tun_key, sizeof(key->tun_key));
802 key->phy.in_port = in_port;
803 key->phy.skb_mark = skb_get_mark(skb);
805 skb_reset_mac_header(skb);
807 /* Link layer. We are guaranteed to have at least the 14 byte Ethernet
808 * header in the linear data area.
811 memcpy(key->eth.src, eth->h_source, ETH_ALEN);
812 memcpy(key->eth.dst, eth->h_dest, ETH_ALEN);
814 __skb_pull(skb, 2 * ETH_ALEN);
815 /* We are going to push all headers that we pull, so no need to
816 * update skb->csum here. */
818 if (vlan_tx_tag_present(skb))
819 key->eth.tci = htons(vlan_get_tci(skb));
820 else if (eth->h_proto == htons(ETH_P_8021Q))
821 if (unlikely(parse_vlan(skb, key)))
824 key->eth.type = parse_ethertype(skb);
825 if (unlikely(key->eth.type == htons(0)))
828 skb_reset_network_header(skb);
829 __skb_push(skb, skb->data - skb_mac_header(skb));
832 if (key->eth.type == htons(ETH_P_IP)) {
836 error = check_iphdr(skb);
837 if (unlikely(error)) {
838 if (error == -EINVAL) {
839 skb->transport_header = skb->network_header;
846 key->ipv4.addr.src = nh->saddr;
847 key->ipv4.addr.dst = nh->daddr;
849 key->ip.proto = nh->protocol;
850 key->ip.tos = nh->tos;
851 key->ip.ttl = nh->ttl;
853 offset = nh->frag_off & htons(IP_OFFSET);
855 key->ip.frag = OVS_FRAG_TYPE_LATER;
858 if (nh->frag_off & htons(IP_MF) ||
859 skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
860 key->ip.frag = OVS_FRAG_TYPE_FIRST;
862 /* Transport layer. */
863 if (key->ip.proto == IPPROTO_TCP) {
864 if (tcphdr_ok(skb)) {
865 struct tcphdr *tcp = tcp_hdr(skb);
866 key->ipv4.tp.src = tcp->source;
867 key->ipv4.tp.dst = tcp->dest;
869 } else if (key->ip.proto == IPPROTO_UDP) {
870 if (udphdr_ok(skb)) {
871 struct udphdr *udp = udp_hdr(skb);
872 key->ipv4.tp.src = udp->source;
873 key->ipv4.tp.dst = udp->dest;
875 } else if (key->ip.proto == IPPROTO_ICMP) {
876 if (icmphdr_ok(skb)) {
877 struct icmphdr *icmp = icmp_hdr(skb);
878 /* The ICMP type and code fields use the 16-bit
879 * transport port fields, so we need to store
880 * them in 16-bit network byte order. */
881 key->ipv4.tp.src = htons(icmp->type);
882 key->ipv4.tp.dst = htons(icmp->code);
886 } else if ((key->eth.type == htons(ETH_P_ARP) ||
887 key->eth.type == htons(ETH_P_RARP)) && arphdr_ok(skb)) {
888 struct arp_eth_header *arp;
890 arp = (struct arp_eth_header *)skb_network_header(skb);
892 if (arp->ar_hrd == htons(ARPHRD_ETHER)
893 && arp->ar_pro == htons(ETH_P_IP)
894 && arp->ar_hln == ETH_ALEN
895 && arp->ar_pln == 4) {
897 /* We only match on the lower 8 bits of the opcode. */
898 if (ntohs(arp->ar_op) <= 0xff)
899 key->ip.proto = ntohs(arp->ar_op);
900 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
901 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
902 memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
903 memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
905 } else if (key->eth.type == htons(ETH_P_IPV6)) {
906 int nh_len; /* IPv6 Header + Extensions */
908 nh_len = parse_ipv6hdr(skb, key);
909 if (unlikely(nh_len < 0)) {
910 if (nh_len == -EINVAL) {
911 skb->transport_header = skb->network_header;
919 if (key->ip.frag == OVS_FRAG_TYPE_LATER)
921 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
922 key->ip.frag = OVS_FRAG_TYPE_FIRST;
924 /* Transport layer. */
925 if (key->ip.proto == NEXTHDR_TCP) {
926 if (tcphdr_ok(skb)) {
927 struct tcphdr *tcp = tcp_hdr(skb);
928 key->ipv6.tp.src = tcp->source;
929 key->ipv6.tp.dst = tcp->dest;
931 } else if (key->ip.proto == NEXTHDR_UDP) {
932 if (udphdr_ok(skb)) {
933 struct udphdr *udp = udp_hdr(skb);
934 key->ipv6.tp.src = udp->source;
935 key->ipv6.tp.dst = udp->dest;
937 } else if (key->ip.proto == NEXTHDR_ICMP) {
938 if (icmp6hdr_ok(skb)) {
939 error = parse_icmpv6(skb, key, nh_len);
949 static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, int key_len)
951 return jhash2((u32 *)((u8 *)key + key_start),
952 DIV_ROUND_UP(key_len - key_start, sizeof(u32)), 0);
955 static int flow_key_start(const struct sw_flow_key *key)
957 if (key->tun_key.ipv4_dst)
960 return offsetof(struct sw_flow_key, phy);
963 static bool __cmp_key(const struct sw_flow_key *key1,
964 const struct sw_flow_key *key2, int key_start, int key_len)
966 return !memcmp((u8 *)key1 + key_start,
967 (u8 *)key2 + key_start, (key_len - key_start));
970 static bool __flow_cmp_key(const struct sw_flow *flow,
971 const struct sw_flow_key *key, int key_start, int key_len)
973 return __cmp_key(&flow->key, key, key_start, key_len);
976 static bool __flow_cmp_unmasked_key(const struct sw_flow *flow,
977 const struct sw_flow_key *key, int key_start, int key_len)
979 return __cmp_key(&flow->unmasked_key, key, key_start, key_len);
982 bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
983 const struct sw_flow_key *key, int key_len)
986 key_start = flow_key_start(key);
988 return __flow_cmp_unmasked_key(flow, key, key_start, key_len);
992 struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table,
993 struct sw_flow_match *match)
995 struct sw_flow_key *unmasked = match->key;
996 int key_len = match->range.end;
997 struct sw_flow *flow;
999 flow = ovs_flow_lookup(table, unmasked);
1000 if (flow && (!ovs_flow_cmp_unmasked_key(flow, unmasked, key_len)))
1006 static struct sw_flow *ovs_masked_flow_lookup(struct flow_table *table,
1007 const struct sw_flow_key *flow_key,
1008 struct sw_flow_mask *mask)
1010 struct sw_flow *flow;
1011 struct hlist_head *head;
1012 int key_start = mask->range.start;
1013 int key_len = mask->range.end;
1015 struct sw_flow_key masked_key;
1017 flow_key_mask(&masked_key, flow_key, mask);
1018 hash = ovs_flow_hash(&masked_key, key_start, key_len);
1019 head = find_bucket(table, hash);
1020 hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
1021 if (__flow_cmp_key(flow, &masked_key, key_start, key_len))
1027 struct sw_flow *ovs_flow_lookup(struct flow_table *tbl,
1028 const struct sw_flow_key *key)
1030 struct sw_flow *flow = NULL;
1031 struct sw_flow_mask *mask;
1033 list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
1034 flow = ovs_masked_flow_lookup(tbl, key, mask);
1035 if (flow) /* Found */
1043 void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow,
1044 const struct sw_flow_key *key, int key_len)
1046 flow->unmasked_key = *key;
1047 flow_key_mask(&flow->key, &flow->unmasked_key, ovsl_dereference(flow->mask));
1048 flow->hash = ovs_flow_hash(&flow->key,
1049 ovsl_dereference(flow->mask)->range.start,
1050 ovsl_dereference(flow->mask)->range.end);
1051 __tbl_insert(table, flow);
1054 void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow)
1056 BUG_ON(table->count == 0);
1057 hlist_del_rcu(&flow->hash_node[table->node_ver]);
1061 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
1062 const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
1063 [OVS_KEY_ATTR_ENCAP] = -1,
1064 [OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
1065 [OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
1066 [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32),
1067 [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
1068 [OVS_KEY_ATTR_VLAN] = sizeof(__be16),
1069 [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
1070 [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
1071 [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
1072 [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
1073 [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
1074 [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
1075 [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
1076 [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
1077 [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
1078 [OVS_KEY_ATTR_TUNNEL] = -1,
1081 static bool is_all_zero(const u8 *fp, size_t size)
1088 for (i = 0; i < size; i++)
1095 static int __parse_flow_nlattrs(const struct nlattr *attr,
1096 const struct nlattr *a[],
1097 u64 *attrsp, bool nz)
1099 const struct nlattr *nla;
1104 nla_for_each_nested(nla, attr, rem) {
1105 u16 type = nla_type(nla);
1108 if (type > OVS_KEY_ATTR_MAX || attrs & (1ULL << type))
1111 expected_len = ovs_key_lens[type];
1112 if (nla_len(nla) != expected_len && expected_len != -1)
1115 if (attrs & (1ULL << type))
1116 /* Duplicated field. */
1119 if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
1120 attrs |= 1ULL << type;
1131 static int parse_flow_mask_nlattrs(const struct nlattr *attr,
1132 const struct nlattr *a[], u64 *attrsp)
1134 return __parse_flow_nlattrs(attr, a, attrsp, true);
1137 static int parse_flow_nlattrs(const struct nlattr *attr,
1138 const struct nlattr *a[], u64 *attrsp)
1140 return __parse_flow_nlattrs(attr, a, attrsp, false);
1143 int ipv4_tun_from_nlattr(const struct nlattr *attr,
1144 struct sw_flow_match *match, bool is_mask)
1149 __be16 tun_flags = 0;
1151 nla_for_each_nested(a, attr, rem) {
1152 int type = nla_type(a);
1153 static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
1154 [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64),
1155 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32),
1156 [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32),
1157 [OVS_TUNNEL_KEY_ATTR_TOS] = 1,
1158 [OVS_TUNNEL_KEY_ATTR_TTL] = 1,
1159 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0,
1160 [OVS_TUNNEL_KEY_ATTR_CSUM] = 0,
1163 if (type > OVS_TUNNEL_KEY_ATTR_MAX ||
1164 ovs_tunnel_key_lens[type] != nla_len(a))
1168 case OVS_TUNNEL_KEY_ATTR_ID:
1169 SW_FLOW_KEY_PUT(match, tun_key.tun_id,
1170 nla_get_be64(a), is_mask);
1171 tun_flags |= TUNNEL_KEY;
1173 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
1174 SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
1175 nla_get_be32(a), is_mask);
1177 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
1178 SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
1179 nla_get_be32(a), is_mask);
1181 case OVS_TUNNEL_KEY_ATTR_TOS:
1182 SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
1183 nla_get_u8(a), is_mask);
1185 case OVS_TUNNEL_KEY_ATTR_TTL:
1186 SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl,
1187 nla_get_u8(a), is_mask);
1190 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
1191 tun_flags |= TUNNEL_DONT_FRAGMENT;
1193 case OVS_TUNNEL_KEY_ATTR_CSUM:
1194 tun_flags |= TUNNEL_CSUM;
1201 SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
1206 if (!match->key->tun_key.ipv4_dst)
1215 int ipv4_tun_to_nlattr(struct sk_buff *skb,
1216 const struct ovs_key_ipv4_tunnel *tun_key,
1217 const struct ovs_key_ipv4_tunnel *output)
1221 nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL);
1225 if (tun_key->tun_flags & TUNNEL_KEY &&
1226 nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
1228 if (tun_key->ipv4_src &&
1229 nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src))
1231 if (nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst))
1233 if (tun_key->ipv4_tos &&
1234 nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
1236 if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
1238 if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) &&
1239 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
1241 if ((tun_key->tun_flags & TUNNEL_CSUM) &&
1242 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
1245 nla_nest_end(skb, nla);
1250 static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
1251 const struct nlattr **a, bool is_mask)
1253 if (*attrs & (1ULL << OVS_KEY_ATTR_PRIORITY)) {
1254 SW_FLOW_KEY_PUT(match, phy.priority,
1255 nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
1256 *attrs &= ~(1ULL << OVS_KEY_ATTR_PRIORITY);
1259 if (*attrs & (1ULL << OVS_KEY_ATTR_IN_PORT)) {
1260 u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
1262 if (!is_mask && in_port >= DP_MAX_PORTS)
1264 SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
1265 *attrs &= ~(1ULL << OVS_KEY_ATTR_IN_PORT);
1268 if (*attrs & (1ULL << OVS_KEY_ATTR_SKB_MARK)) {
1269 uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
1270 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && !defined(CONFIG_NETFILTER)
1271 if (!is_mask && mark != 0)
1274 SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask);
1275 *attrs &= ~(1ULL << OVS_KEY_ATTR_SKB_MARK);
1277 if (*attrs & (1ULL << OVS_KEY_ATTR_TUNNEL)) {
1278 if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
1281 *attrs &= ~(1ULL << OVS_KEY_ATTR_TUNNEL);
1286 static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
1287 const struct nlattr **a, bool is_mask)
1291 err = metadata_from_nlattrs(match, &attrs, a, is_mask);
1295 if (attrs & (1ULL << OVS_KEY_ATTR_ETHERNET)) {
1296 const struct ovs_key_ethernet *eth_key;
1298 eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
1299 SW_FLOW_KEY_MEMCPY(match, eth.src,
1300 eth_key->eth_src, ETH_ALEN, is_mask);
1301 SW_FLOW_KEY_MEMCPY(match, eth.dst,
1302 eth_key->eth_dst, ETH_ALEN, is_mask);
1303 attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERNET);
1306 if (attrs & (1ULL << OVS_KEY_ATTR_VLAN)) {
1309 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
1310 if (!is_mask && (tci & htons(VLAN_TAG_PRESENT)))
1313 SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
1314 attrs &= ~(1ULL << OVS_KEY_ATTR_VLAN);
1317 if (attrs & (1ULL << OVS_KEY_ATTR_ETHERTYPE)) {
1320 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1321 if (!is_mask && ntohs(eth_type) < ETH_P_802_3_MIN)
1324 SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask);
1325 attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERTYPE);
1326 } else if (!is_mask) {
1327 SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
1330 if (attrs & (1ULL << OVS_KEY_ATTR_IPV4)) {
1331 const struct ovs_key_ipv4 *ipv4_key;
1333 ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
1334 if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX)
1336 SW_FLOW_KEY_PUT(match, ip.proto,
1337 ipv4_key->ipv4_proto, is_mask);
1338 SW_FLOW_KEY_PUT(match, ip.tos,
1339 ipv4_key->ipv4_tos, is_mask);
1340 SW_FLOW_KEY_PUT(match, ip.ttl,
1341 ipv4_key->ipv4_ttl, is_mask);
1342 SW_FLOW_KEY_PUT(match, ip.frag,
1343 ipv4_key->ipv4_frag, is_mask);
1344 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
1345 ipv4_key->ipv4_src, is_mask);
1346 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
1347 ipv4_key->ipv4_dst, is_mask);
1348 attrs &= ~(1ULL << OVS_KEY_ATTR_IPV4);
1351 if (attrs & (1ULL << OVS_KEY_ATTR_IPV6)) {
1352 const struct ovs_key_ipv6 *ipv6_key;
1354 ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
1355 if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX)
1357 SW_FLOW_KEY_PUT(match, ipv6.label,
1358 ipv6_key->ipv6_label, is_mask);
1359 SW_FLOW_KEY_PUT(match, ip.proto,
1360 ipv6_key->ipv6_proto, is_mask);
1361 SW_FLOW_KEY_PUT(match, ip.tos,
1362 ipv6_key->ipv6_tclass, is_mask);
1363 SW_FLOW_KEY_PUT(match, ip.ttl,
1364 ipv6_key->ipv6_hlimit, is_mask);
1365 SW_FLOW_KEY_PUT(match, ip.frag,
1366 ipv6_key->ipv6_frag, is_mask);
1367 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src,
1369 sizeof(match->key->ipv6.addr.src),
1371 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst,
1373 sizeof(match->key->ipv6.addr.dst),
1376 attrs &= ~(1ULL << OVS_KEY_ATTR_IPV6);
1379 if (attrs & (1ULL << OVS_KEY_ATTR_ARP)) {
1380 const struct ovs_key_arp *arp_key;
1382 arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
1383 if (!is_mask && (arp_key->arp_op & htons(0xff00)))
1386 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
1387 arp_key->arp_sip, is_mask);
1388 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
1389 arp_key->arp_tip, is_mask);
1390 SW_FLOW_KEY_PUT(match, ip.proto,
1391 ntohs(arp_key->arp_op), is_mask);
1392 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha,
1393 arp_key->arp_sha, ETH_ALEN, is_mask);
1394 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha,
1395 arp_key->arp_tha, ETH_ALEN, is_mask);
1397 attrs &= ~(1ULL << OVS_KEY_ATTR_ARP);
1400 if (attrs & (1ULL << OVS_KEY_ATTR_TCP)) {
1401 const struct ovs_key_tcp *tcp_key;
1403 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
1404 SW_FLOW_KEY_PUT(match, ipv4.tp.src,
1405 tcp_key->tcp_src, is_mask);
1406 SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
1407 tcp_key->tcp_dst, is_mask);
1408 attrs &= ~(1ULL << OVS_KEY_ATTR_TCP);
1411 if (attrs & (1ULL << OVS_KEY_ATTR_UDP)) {
1412 const struct ovs_key_udp *udp_key;
1414 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
1415 SW_FLOW_KEY_PUT(match, ipv4.tp.src,
1416 udp_key->udp_src, is_mask);
1417 SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
1418 udp_key->udp_dst, is_mask);
1419 attrs &= ~(1ULL << OVS_KEY_ATTR_UDP);
1422 if (attrs & (1ULL << OVS_KEY_ATTR_ICMP)) {
1423 const struct ovs_key_icmp *icmp_key;
1425 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
1426 SW_FLOW_KEY_PUT(match, ipv4.tp.src,
1427 htons(icmp_key->icmp_type), is_mask);
1428 SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
1429 htons(icmp_key->icmp_code), is_mask);
1430 attrs &= ~(1ULL << OVS_KEY_ATTR_ICMP);
1433 if (attrs & (1ULL << OVS_KEY_ATTR_ICMPV6)) {
1434 const struct ovs_key_icmpv6 *icmpv6_key;
1436 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
1437 SW_FLOW_KEY_PUT(match, ipv6.tp.src,
1438 htons(icmpv6_key->icmpv6_type), is_mask);
1439 SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
1440 htons(icmpv6_key->icmpv6_code), is_mask);
1441 attrs &= ~(1ULL << OVS_KEY_ATTR_ICMPV6);
1444 if (attrs & (1ULL << OVS_KEY_ATTR_ND)) {
1445 const struct ovs_key_nd *nd_key;
1447 nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
1448 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
1450 sizeof(match->key->ipv6.nd.target),
1452 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
1453 nd_key->nd_sll, ETH_ALEN, is_mask);
1454 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
1455 nd_key->nd_tll, ETH_ALEN, is_mask);
1456 attrs &= ~(1ULL << OVS_KEY_ATTR_ND);
1466 * ovs_match_from_nlattrs - parses Netlink attributes into a flow key and
1467 * mask. In case the 'mask' is NULL, the flow is treated as exact match
1468 * flow. Otherwise, it is treated as a wildcarded flow, except the mask
1469 * does not include any don't care bit.
1470 * @match: receives the extracted flow match information.
1471 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1472 * sequence. The fields should of the packet that triggered the creation
1474 * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
1475 * attribute specifies the mask field of the wildcarded flow.
1477 int ovs_match_from_nlattrs(struct sw_flow_match *match,
1478 const struct nlattr *key,
1479 const struct nlattr *mask)
1481 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
1482 const struct nlattr *encap;
1485 bool encap_valid = false;
1488 err = parse_flow_nlattrs(key, a, &key_attrs);
1492 if (key_attrs & 1ULL << OVS_KEY_ATTR_ENCAP) {
1493 encap = a[OVS_KEY_ATTR_ENCAP];
1494 key_attrs &= ~(1ULL << OVS_KEY_ATTR_ENCAP);
1495 if (nla_len(encap)) {
1496 __be16 eth_type = 0; /* ETH_P_8021Q */
1498 if (a[OVS_KEY_ATTR_ETHERTYPE])
1499 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1501 if ((eth_type == htons(ETH_P_8021Q)) && (a[OVS_KEY_ATTR_VLAN])) {
1503 key_attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERTYPE);
1504 err = parse_flow_nlattrs(encap, a, &key_attrs);
1513 err = ovs_key_from_nlattrs(match, key_attrs, a, false);
1518 err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
1522 if ((mask_attrs & 1ULL << OVS_KEY_ATTR_ENCAP) && encap_valid) {
1523 __be16 eth_type = 0;
1525 if (a[OVS_KEY_ATTR_ETHERTYPE])
1526 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1527 if (eth_type == htons(0xffff)) {
1528 mask_attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERTYPE);
1529 encap = a[OVS_KEY_ATTR_ENCAP];
1530 err = parse_flow_mask_nlattrs(encap, a, &mask_attrs);
1538 err = ovs_key_from_nlattrs(match, mask_attrs, a, true);
1542 /* Populate exact match flow's key mask. */
1544 ovs_sw_flow_mask_set(match->mask, &match->range, 0xff);
1547 if (!ovs_match_validate(match, key_attrs, mask_attrs))
1554 * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
1555 * @flow: Receives extracted in_port, priority, tun_key and skb_mark.
1556 * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1559 * This parses a series of Netlink attributes that form a flow key, which must
1560 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1561 * get the metadata, that is, the parts of the flow key that cannot be
1562 * extracted from the packet itself.
1565 int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow,
1566 const struct nlattr *attr)
1568 struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key;
1569 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
1572 struct sw_flow_match match;
1574 flow->key.phy.in_port = DP_MAX_PORTS;
1575 flow->key.phy.priority = 0;
1576 flow->key.phy.skb_mark = 0;
1577 memset(tun_key, 0, sizeof(flow->key.tun_key));
1579 err = parse_flow_nlattrs(attr, a, &attrs);
1583 ovs_match_init(&match, &flow->key, NULL);
1585 err = metadata_from_nlattrs(&match, &attrs, a, false);
1592 int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey,
1593 const struct sw_flow_key *output, struct sk_buff *skb)
1595 struct ovs_key_ethernet *eth_key;
1596 struct nlattr *nla, *encap;
1598 if (swkey->phy.priority &&
1599 nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
1600 goto nla_put_failure;
1602 if (swkey->tun_key.ipv4_dst &&
1603 ipv4_tun_to_nlattr(skb, &swkey->tun_key, &output->tun_key))
1604 goto nla_put_failure;
1606 if (swkey->phy.in_port != DP_MAX_PORTS) {
1607 /* Exact match upper 16 bits. */
1609 upper_u16 = (swkey == output) ? 0 : 0xffff;
1611 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
1612 (upper_u16 << 16) | output->phy.in_port))
1613 goto nla_put_failure;
1616 if (swkey->phy.skb_mark &&
1617 nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
1618 goto nla_put_failure;
1620 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
1622 goto nla_put_failure;
1624 eth_key = nla_data(nla);
1625 memcpy(eth_key->eth_src, output->eth.src, ETH_ALEN);
1626 memcpy(eth_key->eth_dst, output->eth.dst, ETH_ALEN);
1628 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
1630 eth_type = (swkey == output) ? htons(ETH_P_8021Q) : htons(0xffff) ;
1631 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
1632 nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci))
1633 goto nla_put_failure;
1634 encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
1635 if (!swkey->eth.tci)
1640 if ((swkey == output) && (swkey->eth.type == htons(ETH_P_802_2)))
1643 if (output->eth.type != 0)
1644 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
1645 goto nla_put_failure;
1647 if (swkey->eth.type == htons(ETH_P_IP)) {
1648 struct ovs_key_ipv4 *ipv4_key;
1650 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
1652 goto nla_put_failure;
1653 ipv4_key = nla_data(nla);
1654 ipv4_key->ipv4_src = output->ipv4.addr.src;
1655 ipv4_key->ipv4_dst = output->ipv4.addr.dst;
1656 ipv4_key->ipv4_proto = output->ip.proto;
1657 ipv4_key->ipv4_tos = output->ip.tos;
1658 ipv4_key->ipv4_ttl = output->ip.ttl;
1659 ipv4_key->ipv4_frag = output->ip.frag;
1660 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1661 struct ovs_key_ipv6 *ipv6_key;
1663 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
1665 goto nla_put_failure;
1666 ipv6_key = nla_data(nla);
1667 memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
1668 sizeof(ipv6_key->ipv6_src));
1669 memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
1670 sizeof(ipv6_key->ipv6_dst));
1671 ipv6_key->ipv6_label = output->ipv6.label;
1672 ipv6_key->ipv6_proto = output->ip.proto;
1673 ipv6_key->ipv6_tclass = output->ip.tos;
1674 ipv6_key->ipv6_hlimit = output->ip.ttl;
1675 ipv6_key->ipv6_frag = output->ip.frag;
1676 } else if (swkey->eth.type == htons(ETH_P_ARP) ||
1677 swkey->eth.type == htons(ETH_P_RARP)) {
1678 struct ovs_key_arp *arp_key;
1680 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
1682 goto nla_put_failure;
1683 arp_key = nla_data(nla);
1684 memset(arp_key, 0, sizeof(struct ovs_key_arp));
1685 arp_key->arp_sip = output->ipv4.addr.src;
1686 arp_key->arp_tip = output->ipv4.addr.dst;
1687 arp_key->arp_op = htons(output->ip.proto);
1688 memcpy(arp_key->arp_sha, output->ipv4.arp.sha, ETH_ALEN);
1689 memcpy(arp_key->arp_tha, output->ipv4.arp.tha, ETH_ALEN);
1692 if ((swkey->eth.type == htons(ETH_P_IP) ||
1693 swkey->eth.type == htons(ETH_P_IPV6)) &&
1694 swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1696 if (swkey->ip.proto == IPPROTO_TCP) {
1697 struct ovs_key_tcp *tcp_key;
1699 nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
1701 goto nla_put_failure;
1702 tcp_key = nla_data(nla);
1703 if (swkey->eth.type == htons(ETH_P_IP)) {
1704 tcp_key->tcp_src = output->ipv4.tp.src;
1705 tcp_key->tcp_dst = output->ipv4.tp.dst;
1706 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1707 tcp_key->tcp_src = output->ipv6.tp.src;
1708 tcp_key->tcp_dst = output->ipv6.tp.dst;
1710 } else if (swkey->ip.proto == IPPROTO_UDP) {
1711 struct ovs_key_udp *udp_key;
1713 nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
1715 goto nla_put_failure;
1716 udp_key = nla_data(nla);
1717 if (swkey->eth.type == htons(ETH_P_IP)) {
1718 udp_key->udp_src = output->ipv4.tp.src;
1719 udp_key->udp_dst = output->ipv4.tp.dst;
1720 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1721 udp_key->udp_src = output->ipv6.tp.src;
1722 udp_key->udp_dst = output->ipv6.tp.dst;
1724 } else if (swkey->eth.type == htons(ETH_P_IP) &&
1725 swkey->ip.proto == IPPROTO_ICMP) {
1726 struct ovs_key_icmp *icmp_key;
1728 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
1730 goto nla_put_failure;
1731 icmp_key = nla_data(nla);
1732 icmp_key->icmp_type = ntohs(output->ipv4.tp.src);
1733 icmp_key->icmp_code = ntohs(output->ipv4.tp.dst);
1734 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
1735 swkey->ip.proto == IPPROTO_ICMPV6) {
1736 struct ovs_key_icmpv6 *icmpv6_key;
1738 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
1739 sizeof(*icmpv6_key));
1741 goto nla_put_failure;
1742 icmpv6_key = nla_data(nla);
1743 icmpv6_key->icmpv6_type = ntohs(output->ipv6.tp.src);
1744 icmpv6_key->icmpv6_code = ntohs(output->ipv6.tp.dst);
1746 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
1747 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
1748 struct ovs_key_nd *nd_key;
1750 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
1752 goto nla_put_failure;
1753 nd_key = nla_data(nla);
1754 memcpy(nd_key->nd_target, &output->ipv6.nd.target,
1755 sizeof(nd_key->nd_target));
1756 memcpy(nd_key->nd_sll, output->ipv6.nd.sll, ETH_ALEN);
1757 memcpy(nd_key->nd_tll, output->ipv6.nd.tll, ETH_ALEN);
1764 nla_nest_end(skb, encap);
1772 /* Initializes the flow module.
1773 * Returns zero if successful or a negative error code. */
1774 int ovs_flow_init(void)
1776 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
1778 if (flow_cache == NULL)
1784 /* Uninitializes the flow module. */
1785 void ovs_flow_exit(void)
1787 kmem_cache_destroy(flow_cache);
1790 struct sw_flow_mask *ovs_sw_flow_mask_alloc(void)
1792 struct sw_flow_mask *mask;
1794 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
1796 mask->ref_count = 0;
1801 void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *mask)
1806 static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
1808 struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu);
1813 void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
1818 BUG_ON(!mask->ref_count);
1821 if (!mask->ref_count) {
1822 list_del_rcu(&mask->list);
1824 call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
1830 static bool ovs_sw_flow_mask_equal(const struct sw_flow_mask *a,
1831 const struct sw_flow_mask *b)
1833 u8 *a_ = (u8 *)&a->key + a->range.start;
1834 u8 *b_ = (u8 *)&b->key + b->range.start;
1836 return (a->range.end == b->range.end)
1837 && (a->range.start == b->range.start)
1838 && (memcmp(a_, b_, ovs_sw_flow_mask_actual_size(a)) == 0);
1841 struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl,
1842 const struct sw_flow_mask *mask)
1844 struct list_head *ml;
1846 list_for_each(ml, &tbl->mask_list) {
1847 struct sw_flow_mask *m;
1848 m = container_of(ml, struct sw_flow_mask, list);
1849 if (ovs_sw_flow_mask_equal(mask, m))
1857 * add a new mask into the mask list.
1858 * The caller needs to make sure that 'mask' is not the same
1859 * as any masks that are already on the list.
1861 void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask)
1863 list_add_rcu(&mask->list, &tbl->mask_list);
1867 * Set 'range' fields in the mask to the value of 'val'.
1869 static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask,
1870 struct sw_flow_key_range *range, u8 val)
1872 u8 *m = (u8 *)&mask->key + range->start;
1874 mask->range = *range;
1875 memset(m, val, ovs_sw_flow_mask_size_roundup(mask));