2 * Copyright (c) 2007-2011 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
36 #include <linux/ipv6.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/icmp.h>
40 #include <linux/icmpv6.h>
41 #include <linux/rculist.h>
44 #include <net/ndisc.h>
48 static struct kmem_cache *flow_cache;
50 static int check_header(struct sk_buff *skb, int len)
52 if (unlikely(skb->len < len))
54 if (unlikely(!pskb_may_pull(skb, len)))
59 static bool arphdr_ok(struct sk_buff *skb)
61 return pskb_may_pull(skb, skb_network_offset(skb) +
62 sizeof(struct arp_eth_header));
65 static int check_iphdr(struct sk_buff *skb)
67 unsigned int nh_ofs = skb_network_offset(skb);
71 err = check_header(skb, nh_ofs + sizeof(struct iphdr));
75 ip_len = ip_hdrlen(skb);
76 if (unlikely(ip_len < sizeof(struct iphdr) ||
77 skb->len < nh_ofs + ip_len))
80 skb_set_transport_header(skb, nh_ofs + ip_len);
84 static bool tcphdr_ok(struct sk_buff *skb)
86 int th_ofs = skb_transport_offset(skb);
89 if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
92 tcp_len = tcp_hdrlen(skb);
93 if (unlikely(tcp_len < sizeof(struct tcphdr) ||
94 skb->len < th_ofs + tcp_len))
100 static bool udphdr_ok(struct sk_buff *skb)
102 return pskb_may_pull(skb, skb_transport_offset(skb) +
103 sizeof(struct udphdr));
106 static bool icmphdr_ok(struct sk_buff *skb)
108 return pskb_may_pull(skb, skb_transport_offset(skb) +
109 sizeof(struct icmphdr));
112 u64 ovs_flow_used_time(unsigned long flow_jiffies)
114 struct timespec cur_ts;
117 ktime_get_ts(&cur_ts);
118 idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
119 cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
120 cur_ts.tv_nsec / NSEC_PER_MSEC;
122 return cur_ms - idle_ms;
125 #define SW_FLOW_KEY_OFFSET(field) \
126 (offsetof(struct sw_flow_key, field) + \
127 FIELD_SIZEOF(struct sw_flow_key, field))
129 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key,
132 unsigned int nh_ofs = skb_network_offset(skb);
140 *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label);
142 err = check_header(skb, nh_ofs + sizeof(*nh));
147 nexthdr = nh->nexthdr;
148 payload_ofs = (u8 *)(nh + 1) - skb->data;
150 key->ip.proto = NEXTHDR_NONE;
151 key->ip.tos = ipv6_get_dsfield(nh);
152 key->ip.ttl = nh->hop_limit;
153 key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
154 key->ipv6.addr.src = nh->saddr;
155 key->ipv6.addr.dst = nh->daddr;
157 payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
158 if (unlikely(payload_ofs < 0))
162 if (frag_off & htons(~0x7))
163 key->ip.frag = OVS_FRAG_TYPE_LATER;
165 key->ip.frag = OVS_FRAG_TYPE_FIRST;
168 nh_len = payload_ofs - nh_ofs;
169 skb_set_transport_header(skb, nh_ofs + nh_len);
170 key->ip.proto = nexthdr;
174 static bool icmp6hdr_ok(struct sk_buff *skb)
176 return pskb_may_pull(skb, skb_transport_offset(skb) +
177 sizeof(struct icmp6hdr));
180 #define TCP_FLAGS_OFFSET 13
181 #define TCP_FLAG_MASK 0x3f
183 void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
187 if ((flow->key.eth.type == htons(ETH_P_IP) ||
188 flow->key.eth.type == htons(ETH_P_IPV6)) &&
189 flow->key.ip.proto == IPPROTO_TCP &&
190 likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
191 u8 *tcp = (u8 *)tcp_hdr(skb);
192 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
195 spin_lock(&flow->lock);
196 flow->used = jiffies;
197 flow->packet_count++;
198 flow->byte_count += skb->len;
199 flow->tcp_flags |= tcp_flags;
200 spin_unlock(&flow->lock);
203 struct sw_flow_actions *ovs_flow_actions_alloc(int size)
205 struct sw_flow_actions *sfa;
207 if (size > MAX_ACTIONS_BUFSIZE)
208 return ERR_PTR(-EINVAL);
210 size += sizeof(*sfa);
211 if (size <= MAX_ACTIONS_BUFSIZE_KMALLOC)
212 sfa = kmalloc(size, GFP_KERNEL);
217 return ERR_PTR(-ENOMEM);
219 sfa->actions_len = 0;
220 sfa->buf_size = size;
225 void ovs_flow_actions_free(struct sw_flow_actions *sfa)
227 if (sfa->buf_size <= MAX_ACTIONS_BUFSIZE_KMALLOC)
233 struct sw_flow *ovs_flow_alloc(void)
235 struct sw_flow *flow;
237 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
239 return ERR_PTR(-ENOMEM);
241 spin_lock_init(&flow->lock);
242 flow->sf_acts = NULL;
247 static struct hlist_head *find_bucket(struct flow_table *table, u32 hash)
249 hash = jhash_1word(hash, table->hash_seed);
250 return flex_array_get(table->buckets,
251 (hash & (table->n_buckets - 1)));
254 static struct flex_array *alloc_buckets(unsigned int n_buckets)
256 struct flex_array *buckets;
259 buckets = flex_array_alloc(sizeof(struct hlist_head *),
260 n_buckets, GFP_KERNEL);
264 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
266 flex_array_free(buckets);
270 for (i = 0; i < n_buckets; i++)
271 INIT_HLIST_HEAD((struct hlist_head *)
272 flex_array_get(buckets, i));
277 static void free_buckets(struct flex_array *buckets)
279 flex_array_free(buckets);
282 struct flow_table *ovs_flow_tbl_alloc(int new_size)
284 struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
289 table->buckets = alloc_buckets(new_size);
291 if (!table->buckets) {
295 table->n_buckets = new_size;
298 table->keep_flows = false;
299 get_random_bytes(&table->hash_seed, sizeof(u32));
304 void ovs_flow_tbl_destroy(struct flow_table *table)
311 if (table->keep_flows)
314 for (i = 0; i < table->n_buckets; i++) {
315 struct sw_flow *flow;
316 struct hlist_head *head = flex_array_get(table->buckets, i);
317 struct hlist_node *node, *n;
318 int ver = table->node_ver;
320 hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) {
321 hlist_del_rcu(&flow->hash_node[ver]);
327 free_buckets(table->buckets);
331 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
333 struct flow_table *table = container_of(rcu, struct flow_table, rcu);
335 ovs_flow_tbl_destroy(table);
338 void ovs_flow_tbl_deferred_destroy(struct flow_table *table)
343 call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
346 struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
348 struct sw_flow *flow;
349 struct hlist_head *head;
350 struct hlist_node *n;
354 ver = table->node_ver;
355 while (*bucket < table->n_buckets) {
357 head = flex_array_get(table->buckets, *bucket);
358 hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) {
373 static void __flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
375 struct hlist_head *head;
376 head = find_bucket(table, flow->hash);
377 hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
381 static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new)
386 old_ver = old->node_ver;
387 new->node_ver = !old_ver;
389 /* Insert in new table. */
390 for (i = 0; i < old->n_buckets; i++) {
391 struct sw_flow *flow;
392 struct hlist_head *head;
393 struct hlist_node *n;
395 head = flex_array_get(old->buckets, i);
397 hlist_for_each_entry(flow, n, head, hash_node[old_ver])
398 __flow_tbl_insert(new, flow);
400 old->keep_flows = true;
403 static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets)
405 struct flow_table *new_table;
407 new_table = ovs_flow_tbl_alloc(n_buckets);
409 return ERR_PTR(-ENOMEM);
411 flow_table_copy_flows(table, new_table);
416 struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table)
418 return __flow_tbl_rehash(table, table->n_buckets);
421 struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
423 return __flow_tbl_rehash(table, table->n_buckets * 2);
426 void ovs_flow_free(struct sw_flow *flow)
431 kfree((struct sf_flow_acts __force *)flow->sf_acts);
432 kmem_cache_free(flow_cache, flow);
435 /* RCU callback used by ovs_flow_deferred_free. */
436 static void rcu_free_flow_callback(struct rcu_head *rcu)
438 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
443 /* Schedules 'flow' to be freed after the next RCU grace period.
444 * The caller must hold rcu_read_lock for this to be sensible. */
445 void ovs_flow_deferred_free(struct sw_flow *flow)
447 call_rcu(&flow->rcu, rcu_free_flow_callback);
450 /* RCU callback used by ovs_flow_deferred_free_acts. */
451 static void rcu_free_acts_callback(struct rcu_head *rcu)
453 struct sw_flow_actions *sf_acts = container_of(rcu,
454 struct sw_flow_actions, rcu);
455 ovs_flow_actions_free(sf_acts);
458 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
459 * The caller must hold rcu_read_lock for this to be sensible. */
460 void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
462 call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
465 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
468 __be16 eth_type; /* ETH_P_8021Q */
471 struct qtag_prefix *qp;
473 if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16)))
476 if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
480 qp = (struct qtag_prefix *) skb->data;
481 key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
482 __skb_pull(skb, sizeof(struct qtag_prefix));
487 static __be16 parse_ethertype(struct sk_buff *skb)
489 struct llc_snap_hdr {
490 u8 dsap; /* Always 0xAA */
491 u8 ssap; /* Always 0xAA */
496 struct llc_snap_hdr *llc;
499 proto = *(__be16 *) skb->data;
500 __skb_pull(skb, sizeof(__be16));
502 if (ntohs(proto) >= 1536)
505 if (skb->len < sizeof(struct llc_snap_hdr))
506 return htons(ETH_P_802_2);
508 if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
511 llc = (struct llc_snap_hdr *) skb->data;
512 if (llc->dsap != LLC_SAP_SNAP ||
513 llc->ssap != LLC_SAP_SNAP ||
514 (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
515 return htons(ETH_P_802_2);
517 __skb_pull(skb, sizeof(struct llc_snap_hdr));
519 if (ntohs(llc->ethertype) >= 1536)
520 return llc->ethertype;
522 return htons(ETH_P_802_2);
525 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
526 int *key_lenp, int nh_len)
528 struct icmp6hdr *icmp = icmp6_hdr(skb);
532 /* The ICMPv6 type and code fields use the 16-bit transport port
533 * fields, so we need to store them in 16-bit network byte order.
535 key->ipv6.tp.src = htons(icmp->icmp6_type);
536 key->ipv6.tp.dst = htons(icmp->icmp6_code);
537 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
539 if (icmp->icmp6_code == 0 &&
540 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
541 icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
542 int icmp_len = skb->len - skb_transport_offset(skb);
546 key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
548 /* In order to process neighbor discovery options, we need the
551 if (unlikely(icmp_len < sizeof(*nd)))
553 if (unlikely(skb_linearize(skb))) {
558 nd = (struct nd_msg *)skb_transport_header(skb);
559 key->ipv6.nd.target = nd->target;
560 key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
562 icmp_len -= sizeof(*nd);
564 while (icmp_len >= 8) {
565 struct nd_opt_hdr *nd_opt =
566 (struct nd_opt_hdr *)(nd->opt + offset);
567 int opt_len = nd_opt->nd_opt_len * 8;
569 if (unlikely(!opt_len || opt_len > icmp_len))
572 /* Store the link layer address if the appropriate
573 * option is provided. It is considered an error if
574 * the same link layer option is specified twice.
576 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
578 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
580 memcpy(key->ipv6.nd.sll,
581 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
582 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
584 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
586 memcpy(key->ipv6.nd.tll,
587 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
598 memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
599 memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
600 memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
608 * ovs_flow_extract - extracts a flow key from an Ethernet frame.
609 * @skb: sk_buff that contains the frame, with skb->data pointing to the
611 * @in_port: port number on which @skb was received.
612 * @key: output flow key
613 * @key_lenp: length of output flow key
615 * The caller must ensure that skb->len >= ETH_HLEN.
617 * Returns 0 if successful, otherwise a negative errno value.
619 * Initializes @skb header pointers as follows:
621 * - skb->mac_header: the Ethernet header.
623 * - skb->network_header: just past the Ethernet header, or just past the
624 * VLAN header, to the first byte of the Ethernet payload.
626 * - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6
627 * on output, then just past the IP header, if one is present and
628 * of a correct length, otherwise the same as skb->network_header.
629 * For other key->dl_type values it is left untouched.
631 int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
635 int key_len = SW_FLOW_KEY_OFFSET(eth);
638 memset(key, 0, sizeof(*key));
640 key->phy.priority = skb->priority;
641 if (OVS_CB(skb)->tun_key)
642 memcpy(&key->tun_key, OVS_CB(skb)->tun_key, sizeof(key->tun_key));
643 key->phy.in_port = in_port;
644 key->phy.skb_mark = skb_get_mark(skb);
646 skb_reset_mac_header(skb);
648 /* Link layer. We are guaranteed to have at least the 14 byte Ethernet
649 * header in the linear data area.
652 memcpy(key->eth.src, eth->h_source, ETH_ALEN);
653 memcpy(key->eth.dst, eth->h_dest, ETH_ALEN);
655 __skb_pull(skb, 2 * ETH_ALEN);
657 if (vlan_tx_tag_present(skb))
658 key->eth.tci = htons(vlan_get_tci(skb));
659 else if (eth->h_proto == htons(ETH_P_8021Q))
660 if (unlikely(parse_vlan(skb, key)))
663 key->eth.type = parse_ethertype(skb);
664 if (unlikely(key->eth.type == htons(0)))
667 skb_reset_network_header(skb);
668 __skb_push(skb, skb->data - skb_mac_header(skb));
671 if (key->eth.type == htons(ETH_P_IP)) {
675 key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
677 error = check_iphdr(skb);
678 if (unlikely(error)) {
679 if (error == -EINVAL) {
680 skb->transport_header = skb->network_header;
687 key->ipv4.addr.src = nh->saddr;
688 key->ipv4.addr.dst = nh->daddr;
690 key->ip.proto = nh->protocol;
691 key->ip.tos = nh->tos;
692 key->ip.ttl = nh->ttl;
694 offset = nh->frag_off & htons(IP_OFFSET);
696 key->ip.frag = OVS_FRAG_TYPE_LATER;
699 if (nh->frag_off & htons(IP_MF) ||
700 skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
701 key->ip.frag = OVS_FRAG_TYPE_FIRST;
703 /* Transport layer. */
704 if (key->ip.proto == IPPROTO_TCP) {
705 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
706 if (tcphdr_ok(skb)) {
707 struct tcphdr *tcp = tcp_hdr(skb);
708 key->ipv4.tp.src = tcp->source;
709 key->ipv4.tp.dst = tcp->dest;
711 } else if (key->ip.proto == IPPROTO_UDP) {
712 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
713 if (udphdr_ok(skb)) {
714 struct udphdr *udp = udp_hdr(skb);
715 key->ipv4.tp.src = udp->source;
716 key->ipv4.tp.dst = udp->dest;
718 } else if (key->ip.proto == IPPROTO_ICMP) {
719 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
720 if (icmphdr_ok(skb)) {
721 struct icmphdr *icmp = icmp_hdr(skb);
722 /* The ICMP type and code fields use the 16-bit
723 * transport port fields, so we need to store
724 * them in 16-bit network byte order. */
725 key->ipv4.tp.src = htons(icmp->type);
726 key->ipv4.tp.dst = htons(icmp->code);
730 } else if ((key->eth.type == htons(ETH_P_ARP) ||
731 key->eth.type == htons(ETH_P_RARP)) && arphdr_ok(skb)) {
732 struct arp_eth_header *arp;
734 arp = (struct arp_eth_header *)skb_network_header(skb);
736 if (arp->ar_hrd == htons(ARPHRD_ETHER)
737 && arp->ar_pro == htons(ETH_P_IP)
738 && arp->ar_hln == ETH_ALEN
739 && arp->ar_pln == 4) {
741 /* We only match on the lower 8 bits of the opcode. */
742 if (ntohs(arp->ar_op) <= 0xff)
743 key->ip.proto = ntohs(arp->ar_op);
744 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
745 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
746 memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
747 memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
748 key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
750 } else if (key->eth.type == htons(ETH_P_IPV6)) {
751 int nh_len; /* IPv6 Header + Extensions */
753 nh_len = parse_ipv6hdr(skb, key, &key_len);
754 if (unlikely(nh_len < 0)) {
755 if (nh_len == -EINVAL)
756 skb->transport_header = skb->network_header;
762 if (key->ip.frag == OVS_FRAG_TYPE_LATER)
764 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
765 key->ip.frag = OVS_FRAG_TYPE_FIRST;
767 /* Transport layer. */
768 if (key->ip.proto == NEXTHDR_TCP) {
769 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
770 if (tcphdr_ok(skb)) {
771 struct tcphdr *tcp = tcp_hdr(skb);
772 key->ipv6.tp.src = tcp->source;
773 key->ipv6.tp.dst = tcp->dest;
775 } else if (key->ip.proto == NEXTHDR_UDP) {
776 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
777 if (udphdr_ok(skb)) {
778 struct udphdr *udp = udp_hdr(skb);
779 key->ipv6.tp.src = udp->source;
780 key->ipv6.tp.dst = udp->dest;
782 } else if (key->ip.proto == NEXTHDR_ICMP) {
783 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
784 if (icmp6hdr_ok(skb)) {
785 error = parse_icmpv6(skb, key, &key_len, nh_len);
797 static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, int key_len)
799 return jhash2((u32 *)((u8 *)key + key_start),
800 DIV_ROUND_UP(key_len - key_start, sizeof(u32)), 0);
803 static int flow_key_start(struct sw_flow_key *key)
805 if (key->tun_key.ipv4_dst)
808 return offsetof(struct sw_flow_key, phy);
811 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
812 struct sw_flow_key *key, int key_len)
814 struct sw_flow *flow;
815 struct hlist_node *n;
816 struct hlist_head *head;
821 key_start = flow_key_start(key);
822 hash = ovs_flow_hash(key, key_start, key_len);
824 _key = (u8 *) key + key_start;
825 head = find_bucket(table, hash);
826 hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) {
828 if (flow->hash == hash &&
829 !memcmp((u8 *)&flow->key + key_start, _key, key_len - key_start)) {
836 void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
837 struct sw_flow_key *key, int key_len)
839 flow->hash = ovs_flow_hash(key, flow_key_start(key), key_len);
840 memcpy(&flow->key, key, sizeof(flow->key));
841 __flow_tbl_insert(table, flow);
844 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
846 hlist_del_rcu(&flow->hash_node[table->node_ver]);
848 BUG_ON(table->count < 0);
851 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
852 const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
853 [OVS_KEY_ATTR_ENCAP] = -1,
854 [OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
855 [OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
856 [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32),
857 [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
858 [OVS_KEY_ATTR_VLAN] = sizeof(__be16),
859 [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
860 [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
861 [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
862 [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
863 [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
864 [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
865 [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
866 [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
867 [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
868 [OVS_KEY_ATTR_TUNNEL] = -1,
871 [OVS_KEY_ATTR_TUN_ID] = sizeof(__be64),
874 static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
875 const struct nlattr *a[], u64 *attrs)
877 const struct ovs_key_icmp *icmp_key;
878 const struct ovs_key_tcp *tcp_key;
879 const struct ovs_key_udp *udp_key;
881 switch (swkey->ip.proto) {
883 if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
885 *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
887 *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
888 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
889 swkey->ipv4.tp.src = tcp_key->tcp_src;
890 swkey->ipv4.tp.dst = tcp_key->tcp_dst;
894 if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
896 *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
898 *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
899 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
900 swkey->ipv4.tp.src = udp_key->udp_src;
901 swkey->ipv4.tp.dst = udp_key->udp_dst;
905 if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP)))
907 *attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
909 *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
910 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
911 swkey->ipv4.tp.src = htons(icmp_key->icmp_type);
912 swkey->ipv4.tp.dst = htons(icmp_key->icmp_code);
919 static int ipv6_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
920 const struct nlattr *a[], u64 *attrs)
922 const struct ovs_key_icmpv6 *icmpv6_key;
923 const struct ovs_key_tcp *tcp_key;
924 const struct ovs_key_udp *udp_key;
926 switch (swkey->ip.proto) {
928 if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
930 *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
932 *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
933 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
934 swkey->ipv6.tp.src = tcp_key->tcp_src;
935 swkey->ipv6.tp.dst = tcp_key->tcp_dst;
939 if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
941 *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
943 *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
944 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
945 swkey->ipv6.tp.src = udp_key->udp_src;
946 swkey->ipv6.tp.dst = udp_key->udp_dst;
950 if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6)))
952 *attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
954 *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
955 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
956 swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type);
957 swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code);
959 if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
960 swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
961 const struct ovs_key_nd *nd_key;
963 if (!(*attrs & (1 << OVS_KEY_ATTR_ND)))
965 *attrs &= ~(1 << OVS_KEY_ATTR_ND);
967 *key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
968 nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
969 memcpy(&swkey->ipv6.nd.target, nd_key->nd_target,
970 sizeof(swkey->ipv6.nd.target));
971 memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN);
972 memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN);
980 static int parse_flow_nlattrs(const struct nlattr *attr,
981 const struct nlattr *a[], u64 *attrsp)
983 const struct nlattr *nla;
988 nla_for_each_nested(nla, attr, rem) {
989 u16 type = nla_type(nla);
992 if (type > OVS_KEY_ATTR_MAX || attrs & (1ULL << type))
995 expected_len = ovs_key_lens[type];
996 if (nla_len(nla) != expected_len && expected_len != -1)
999 attrs |= 1ULL << type;
1009 int ipv4_tun_from_nlattr(const struct nlattr *attr,
1010 struct ovs_key_ipv4_tunnel *tun_key)
1016 memset(tun_key, 0, sizeof(*tun_key));
1018 nla_for_each_nested(a, attr, rem) {
1019 int type = nla_type(a);
1020 static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
1021 [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64),
1022 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32),
1023 [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32),
1024 [OVS_TUNNEL_KEY_ATTR_TOS] = 1,
1025 [OVS_TUNNEL_KEY_ATTR_TTL] = 1,
1026 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0,
1027 [OVS_TUNNEL_KEY_ATTR_CSUM] = 0,
1030 if (type > OVS_TUNNEL_KEY_ATTR_MAX ||
1031 ovs_tunnel_key_lens[type] != nla_len(a))
1035 case OVS_TUNNEL_KEY_ATTR_ID:
1036 tun_key->tun_id = nla_get_be64(a);
1037 tun_key->tun_flags |= OVS_TNL_F_KEY;
1039 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
1040 tun_key->ipv4_src = nla_get_be32(a);
1042 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
1043 tun_key->ipv4_dst = nla_get_be32(a);
1045 case OVS_TUNNEL_KEY_ATTR_TOS:
1046 tun_key->ipv4_tos = nla_get_u8(a);
1048 case OVS_TUNNEL_KEY_ATTR_TTL:
1049 tun_key->ipv4_ttl = nla_get_u8(a);
1052 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
1053 tun_key->tun_flags |= OVS_TNL_F_DONT_FRAGMENT;
1055 case OVS_TUNNEL_KEY_ATTR_CSUM:
1056 tun_key->tun_flags |= OVS_TNL_F_CSUM;
1066 if (!tun_key->ipv4_dst)
1075 int ipv4_tun_to_nlattr(struct sk_buff *skb,
1076 const struct ovs_key_ipv4_tunnel *tun_key)
1080 nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL);
1084 if (tun_key->tun_flags & OVS_TNL_F_KEY &&
1085 nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, tun_key->tun_id))
1087 if (tun_key->ipv4_src &&
1088 nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, tun_key->ipv4_src))
1090 if (nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ipv4_dst))
1092 if (tun_key->ipv4_tos &&
1093 nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ipv4_tos))
1095 if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, tun_key->ipv4_ttl))
1097 if ((tun_key->tun_flags & OVS_TNL_F_DONT_FRAGMENT) &&
1098 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
1100 if ((tun_key->tun_flags & OVS_TNL_F_CSUM) &&
1101 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
1104 nla_nest_end(skb, nla);
1109 * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key.
1110 * @swkey: receives the extracted flow key.
1111 * @key_lenp: number of bytes used in @swkey.
1112 * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1115 int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
1116 const struct nlattr *attr)
1118 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
1119 const struct ovs_key_ethernet *eth_key;
1124 memset(swkey, 0, sizeof(struct sw_flow_key));
1125 key_len = SW_FLOW_KEY_OFFSET(eth);
1127 err = parse_flow_nlattrs(attr, a, &attrs);
1131 /* Metadata attributes. */
1132 if (attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
1133 swkey->phy.priority = nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]);
1134 attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
1136 if (attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
1137 u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
1138 if (in_port >= DP_MAX_PORTS)
1140 swkey->phy.in_port = in_port;
1141 attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
1143 swkey->phy.in_port = DP_MAX_PORTS;
1145 if (attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) {
1146 uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
1147 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && !defined(CONFIG_NETFILTER)
1151 swkey->phy.skb_mark = mark;
1152 attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
1155 if (attrs & (1ULL << OVS_KEY_ATTR_TUN_ID) &&
1156 attrs & (1ULL << OVS_KEY_ATTR_TUNNEL)) {
1159 err = ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], &swkey->tun_key);
1163 if (!(swkey->tun_key.tun_flags & OVS_TNL_F_KEY))
1166 tun_id = nla_get_be64(a[OVS_KEY_ATTR_TUN_ID]);
1167 if (tun_id != swkey->tun_key.tun_id)
1170 attrs &= ~(1ULL << OVS_KEY_ATTR_TUN_ID);
1171 attrs &= ~(1ULL << OVS_KEY_ATTR_TUNNEL);
1172 } else if (attrs & (1ULL << OVS_KEY_ATTR_TUNNEL)) {
1174 err = ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], &swkey->tun_key);
1178 attrs &= ~(1ULL << OVS_KEY_ATTR_TUNNEL);
1181 /* Data attributes. */
1182 if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET)))
1184 attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
1186 eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
1187 memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN);
1188 memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN);
1190 if (attrs & (1u << OVS_KEY_ATTR_ETHERTYPE) &&
1191 nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q)) {
1192 const struct nlattr *encap;
1195 if (attrs != ((1 << OVS_KEY_ATTR_VLAN) |
1196 (1 << OVS_KEY_ATTR_ETHERTYPE) |
1197 (1 << OVS_KEY_ATTR_ENCAP)))
1200 encap = a[OVS_KEY_ATTR_ENCAP];
1201 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
1202 if (tci & htons(VLAN_TAG_PRESENT)) {
1203 swkey->eth.tci = tci;
1205 err = parse_flow_nlattrs(encap, a, &attrs);
1209 /* Corner case for truncated 802.1Q header. */
1213 swkey->eth.type = htons(ETH_P_8021Q);
1214 *key_lenp = key_len;
1221 if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
1222 swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1223 if (ntohs(swkey->eth.type) < 1536)
1225 attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
1227 swkey->eth.type = htons(ETH_P_802_2);
1230 if (swkey->eth.type == htons(ETH_P_IP)) {
1231 const struct ovs_key_ipv4 *ipv4_key;
1233 if (!(attrs & (1 << OVS_KEY_ATTR_IPV4)))
1235 attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
1237 key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
1238 ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
1239 if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX)
1241 swkey->ip.proto = ipv4_key->ipv4_proto;
1242 swkey->ip.tos = ipv4_key->ipv4_tos;
1243 swkey->ip.ttl = ipv4_key->ipv4_ttl;
1244 swkey->ip.frag = ipv4_key->ipv4_frag;
1245 swkey->ipv4.addr.src = ipv4_key->ipv4_src;
1246 swkey->ipv4.addr.dst = ipv4_key->ipv4_dst;
1248 if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1249 err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs);
1253 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1254 const struct ovs_key_ipv6 *ipv6_key;
1256 if (!(attrs & (1 << OVS_KEY_ATTR_IPV6)))
1258 attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
1260 key_len = SW_FLOW_KEY_OFFSET(ipv6.label);
1261 ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
1262 if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX)
1264 swkey->ipv6.label = ipv6_key->ipv6_label;
1265 swkey->ip.proto = ipv6_key->ipv6_proto;
1266 swkey->ip.tos = ipv6_key->ipv6_tclass;
1267 swkey->ip.ttl = ipv6_key->ipv6_hlimit;
1268 swkey->ip.frag = ipv6_key->ipv6_frag;
1269 memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src,
1270 sizeof(swkey->ipv6.addr.src));
1271 memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst,
1272 sizeof(swkey->ipv6.addr.dst));
1274 if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1275 err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs);
1279 } else if (swkey->eth.type == htons(ETH_P_ARP) ||
1280 swkey->eth.type == htons(ETH_P_RARP)) {
1281 const struct ovs_key_arp *arp_key;
1283 if (!(attrs & (1 << OVS_KEY_ATTR_ARP)))
1285 attrs &= ~(1 << OVS_KEY_ATTR_ARP);
1287 key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
1288 arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
1289 swkey->ipv4.addr.src = arp_key->arp_sip;
1290 swkey->ipv4.addr.dst = arp_key->arp_tip;
1291 if (arp_key->arp_op & htons(0xff00))
1293 swkey->ip.proto = ntohs(arp_key->arp_op);
1294 memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN);
1295 memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN);
1300 *key_lenp = key_len;
1306 * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
1307 * @in_port: receives the extracted input port.
1308 * @tun_id: receives the extracted tunnel ID.
1309 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1312 * This parses a series of Netlink attributes that form a flow key, which must
1313 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1314 * get the metadata, that is, the parts of the flow key that cannot be
1315 * extracted from the packet itself.
1318 int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len, const struct nlattr *attr)
1320 struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key;
1321 const struct nlattr *nla;
1325 flow->key.phy.in_port = DP_MAX_PORTS;
1326 flow->key.phy.priority = 0;
1327 flow->key.phy.skb_mark = 0;
1328 memset(tun_key, 0, sizeof(flow->key.tun_key));
1330 nla_for_each_nested(nla, attr, rem) {
1331 int type = nla_type(nla);
1333 if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) {
1336 if (nla_len(nla) != ovs_key_lens[type])
1340 case OVS_KEY_ATTR_PRIORITY:
1341 flow->key.phy.priority = nla_get_u32(nla);
1344 case OVS_KEY_ATTR_TUN_ID:
1345 tun_id = nla_get_be64(nla);
1347 if (tun_key->ipv4_dst) {
1348 if (!(tun_key->tun_flags & OVS_TNL_F_KEY))
1350 if (tun_key->tun_id != tun_id)
1354 tun_key->tun_id = tun_id;
1355 tun_key->tun_flags |= OVS_TNL_F_KEY;
1359 case OVS_KEY_ATTR_TUNNEL:
1360 if (tun_key->tun_flags & OVS_TNL_F_KEY) {
1361 tun_id = tun_key->tun_id;
1362 err = ipv4_tun_from_nlattr(nla, tun_key);
1366 if (!(tun_key->tun_flags & OVS_TNL_F_KEY))
1369 if (tun_key->tun_id != tun_id)
1372 err = ipv4_tun_from_nlattr(nla, tun_key);
1378 case OVS_KEY_ATTR_IN_PORT:
1379 if (nla_get_u32(nla) >= DP_MAX_PORTS)
1381 flow->key.phy.in_port = nla_get_u32(nla);
1384 case OVS_KEY_ATTR_SKB_MARK:
1385 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && !defined(CONFIG_NETFILTER)
1386 if (nla_get_u32(nla) != 0)
1389 flow->key.phy.skb_mark = nla_get_u32(nla);
1397 flow->hash = ovs_flow_hash(&flow->key,
1398 flow_key_start(&flow->key), key_len);
1403 int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
1405 struct ovs_key_ethernet *eth_key;
1406 struct nlattr *nla, *encap;
1408 if (swkey->phy.priority &&
1409 nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
1410 goto nla_put_failure;
1412 if (swkey->tun_key.ipv4_dst &&
1413 ipv4_tun_to_nlattr(skb, &swkey->tun_key))
1414 goto nla_put_failure;
1416 if ((swkey->tun_key.tun_flags & OVS_TNL_F_KEY) &&
1417 nla_put_be64(skb, OVS_KEY_ATTR_TUN_ID, swkey->tun_key.tun_id))
1418 goto nla_put_failure;
1420 if (swkey->phy.in_port != DP_MAX_PORTS &&
1421 nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
1422 goto nla_put_failure;
1424 if (swkey->phy.skb_mark &&
1425 nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, swkey->phy.skb_mark))
1426 goto nla_put_failure;
1428 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
1430 goto nla_put_failure;
1431 eth_key = nla_data(nla);
1432 memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN);
1433 memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
1435 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
1436 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) ||
1437 nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci))
1438 goto nla_put_failure;
1439 encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
1440 if (!swkey->eth.tci)
1446 if (swkey->eth.type == htons(ETH_P_802_2))
1449 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type))
1450 goto nla_put_failure;
1452 if (swkey->eth.type == htons(ETH_P_IP)) {
1453 struct ovs_key_ipv4 *ipv4_key;
1455 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
1457 goto nla_put_failure;
1458 ipv4_key = nla_data(nla);
1459 ipv4_key->ipv4_src = swkey->ipv4.addr.src;
1460 ipv4_key->ipv4_dst = swkey->ipv4.addr.dst;
1461 ipv4_key->ipv4_proto = swkey->ip.proto;
1462 ipv4_key->ipv4_tos = swkey->ip.tos;
1463 ipv4_key->ipv4_ttl = swkey->ip.ttl;
1464 ipv4_key->ipv4_frag = swkey->ip.frag;
1465 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1466 struct ovs_key_ipv6 *ipv6_key;
1468 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
1470 goto nla_put_failure;
1471 ipv6_key = nla_data(nla);
1472 memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src,
1473 sizeof(ipv6_key->ipv6_src));
1474 memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst,
1475 sizeof(ipv6_key->ipv6_dst));
1476 ipv6_key->ipv6_label = swkey->ipv6.label;
1477 ipv6_key->ipv6_proto = swkey->ip.proto;
1478 ipv6_key->ipv6_tclass = swkey->ip.tos;
1479 ipv6_key->ipv6_hlimit = swkey->ip.ttl;
1480 ipv6_key->ipv6_frag = swkey->ip.frag;
1481 } else if (swkey->eth.type == htons(ETH_P_ARP) ||
1482 swkey->eth.type == htons(ETH_P_RARP)) {
1483 struct ovs_key_arp *arp_key;
1485 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
1487 goto nla_put_failure;
1488 arp_key = nla_data(nla);
1489 memset(arp_key, 0, sizeof(struct ovs_key_arp));
1490 arp_key->arp_sip = swkey->ipv4.addr.src;
1491 arp_key->arp_tip = swkey->ipv4.addr.dst;
1492 arp_key->arp_op = htons(swkey->ip.proto);
1493 memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN);
1494 memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN);
1497 if ((swkey->eth.type == htons(ETH_P_IP) ||
1498 swkey->eth.type == htons(ETH_P_IPV6)) &&
1499 swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1501 if (swkey->ip.proto == IPPROTO_TCP) {
1502 struct ovs_key_tcp *tcp_key;
1504 nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
1506 goto nla_put_failure;
1507 tcp_key = nla_data(nla);
1508 if (swkey->eth.type == htons(ETH_P_IP)) {
1509 tcp_key->tcp_src = swkey->ipv4.tp.src;
1510 tcp_key->tcp_dst = swkey->ipv4.tp.dst;
1511 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1512 tcp_key->tcp_src = swkey->ipv6.tp.src;
1513 tcp_key->tcp_dst = swkey->ipv6.tp.dst;
1515 } else if (swkey->ip.proto == IPPROTO_UDP) {
1516 struct ovs_key_udp *udp_key;
1518 nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
1520 goto nla_put_failure;
1521 udp_key = nla_data(nla);
1522 if (swkey->eth.type == htons(ETH_P_IP)) {
1523 udp_key->udp_src = swkey->ipv4.tp.src;
1524 udp_key->udp_dst = swkey->ipv4.tp.dst;
1525 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1526 udp_key->udp_src = swkey->ipv6.tp.src;
1527 udp_key->udp_dst = swkey->ipv6.tp.dst;
1529 } else if (swkey->eth.type == htons(ETH_P_IP) &&
1530 swkey->ip.proto == IPPROTO_ICMP) {
1531 struct ovs_key_icmp *icmp_key;
1533 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
1535 goto nla_put_failure;
1536 icmp_key = nla_data(nla);
1537 icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src);
1538 icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst);
1539 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
1540 swkey->ip.proto == IPPROTO_ICMPV6) {
1541 struct ovs_key_icmpv6 *icmpv6_key;
1543 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
1544 sizeof(*icmpv6_key));
1546 goto nla_put_failure;
1547 icmpv6_key = nla_data(nla);
1548 icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src);
1549 icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst);
1551 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
1552 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
1553 struct ovs_key_nd *nd_key;
1555 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
1557 goto nla_put_failure;
1558 nd_key = nla_data(nla);
1559 memcpy(nd_key->nd_target, &swkey->ipv6.nd.target,
1560 sizeof(nd_key->nd_target));
1561 memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN);
1562 memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN);
1569 nla_nest_end(skb, encap);
1577 /* Initializes the flow module.
1578 * Returns zero if successful or a negative error code. */
1579 int ovs_flow_init(void)
1581 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
1583 if (flow_cache == NULL)
1589 /* Uninitializes the flow module. */
1590 void ovs_flow_exit(void)
1592 kmem_cache_destroy(flow_cache);