2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
11 #include <linux/uaccess.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/if_ether.h>
15 #include <linux/if_vlan.h>
16 #include <net/llc_pdu.h>
17 #include <linux/kernel.h>
18 #include <linux/jhash.h>
19 #include <linux/jiffies.h>
20 #include <linux/llc.h>
21 #include <linux/module.h>
23 #include <linux/rcupdate.h>
24 #include <linux/if_arp.h>
25 #include <linux/if_ether.h>
27 #include <linux/ipv6.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/icmp.h>
31 #include <linux/icmpv6.h>
32 #include <linux/rculist.h>
35 #include <net/ndisc.h>
39 static struct kmem_cache *flow_cache;
40 static unsigned int hash_seed __read_mostly;
42 static int check_header(struct sk_buff *skb, int len)
44 if (unlikely(skb->len < len))
46 if (unlikely(!pskb_may_pull(skb, len)))
51 static bool arphdr_ok(struct sk_buff *skb)
53 return pskb_may_pull(skb, skb_network_offset(skb) +
54 sizeof(struct arp_eth_header));
57 static int check_iphdr(struct sk_buff *skb)
59 unsigned int nh_ofs = skb_network_offset(skb);
63 err = check_header(skb, nh_ofs + sizeof(struct iphdr));
67 ip_len = ip_hdrlen(skb);
68 if (unlikely(ip_len < sizeof(struct iphdr) ||
69 skb->len < nh_ofs + ip_len))
72 skb_set_transport_header(skb, nh_ofs + ip_len);
76 static bool tcphdr_ok(struct sk_buff *skb)
78 int th_ofs = skb_transport_offset(skb);
81 if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
84 tcp_len = tcp_hdrlen(skb);
85 if (unlikely(tcp_len < sizeof(struct tcphdr) ||
86 skb->len < th_ofs + tcp_len))
92 static bool udphdr_ok(struct sk_buff *skb)
94 return pskb_may_pull(skb, skb_transport_offset(skb) +
95 sizeof(struct udphdr));
98 static bool icmphdr_ok(struct sk_buff *skb)
100 return pskb_may_pull(skb, skb_transport_offset(skb) +
101 sizeof(struct icmphdr));
104 u64 flow_used_time(unsigned long flow_jiffies)
106 struct timespec cur_ts;
109 ktime_get_ts(&cur_ts);
110 idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
111 cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
112 cur_ts.tv_nsec / NSEC_PER_MSEC;
114 return cur_ms - idle_ms;
117 #define SW_FLOW_KEY_OFFSET(field) \
118 (offsetof(struct sw_flow_key, field) + \
119 FIELD_SIZEOF(struct sw_flow_key, field))
122 * skip_exthdr - skip any IPv6 extension headers
123 * @skb: skbuff to parse
124 * @start: offset of first extension header
125 * @nexthdrp: Initially, points to the type of the extension header at @start.
126 * This function updates it to point to the extension header at the final
128 * @frag: Points to the @frag member in a &struct sw_flow_key. This
129 * function sets an appropriate %OVS_FRAG_TYPE_* value.
131 * This is based on ipv6_skip_exthdr() but adds the updates to *@frag.
133 * When there is more than one fragment header, this version reports whether
134 * the final fragment header that it examines is a first fragment.
136 * Returns the final payload offset, or -1 on error.
138 static int skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
141 u8 nexthdr = *nexthdrp;
143 while (ipv6_ext_hdr(nexthdr)) {
144 struct ipv6_opt_hdr _hdr, *hp;
147 if (nexthdr == NEXTHDR_NONE)
149 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
152 if (nexthdr == NEXTHDR_FRAGMENT) {
153 __be16 _frag_off, *fp;
154 fp = skb_header_pointer(skb,
155 start+offsetof(struct frag_hdr,
162 if (ntohs(*fp) & ~0x7) {
163 *frag = OVS_FRAG_TYPE_LATER;
166 *frag = OVS_FRAG_TYPE_FIRST;
168 } else if (nexthdr == NEXTHDR_AUTH)
169 hdrlen = (hp->hdrlen+2)<<2;
171 hdrlen = ipv6_optlen(hp);
173 nexthdr = hp->nexthdr;
181 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key,
184 unsigned int nh_ofs = skb_network_offset(skb);
191 *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label);
193 err = check_header(skb, nh_ofs + sizeof(*nh));
198 nexthdr = nh->nexthdr;
199 payload_ofs = (u8 *)(nh + 1) - skb->data;
201 key->ip.proto = NEXTHDR_NONE;
202 key->ip.tos = ipv6_get_dsfield(nh);
203 key->ip.ttl = nh->hop_limit;
204 key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
205 ipv6_addr_copy(&key->ipv6.addr.src, &nh->saddr);
206 ipv6_addr_copy(&key->ipv6.addr.dst, &nh->daddr);
208 payload_ofs = skip_exthdr(skb, payload_ofs, &nexthdr, &key->ip.frag);
209 if (unlikely(payload_ofs < 0))
212 nh_len = payload_ofs - nh_ofs;
213 skb_set_transport_header(skb, nh_ofs + nh_len);
214 key->ip.proto = nexthdr;
218 static bool icmp6hdr_ok(struct sk_buff *skb)
220 return pskb_may_pull(skb, skb_transport_offset(skb) +
221 sizeof(struct icmp6hdr));
224 #define TCP_FLAGS_OFFSET 13
225 #define TCP_FLAG_MASK 0x3f
227 void flow_used(struct sw_flow *flow, struct sk_buff *skb)
231 if (flow->key.eth.type == htons(ETH_P_IP) &&
232 flow->key.ip.proto == IPPROTO_TCP) {
233 u8 *tcp = (u8 *)tcp_hdr(skb);
234 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
237 spin_lock(&flow->lock);
238 flow->used = jiffies;
239 flow->packet_count++;
240 flow->byte_count += skb->len;
241 flow->tcp_flags |= tcp_flags;
242 spin_unlock(&flow->lock);
245 struct sw_flow_actions *flow_actions_alloc(const struct nlattr *actions)
247 int actions_len = nla_len(actions);
248 struct sw_flow_actions *sfa;
250 /* At least DP_MAX_PORTS actions are required to be able to flood a
251 * packet to every port. Factor of 2 allows for setting VLAN tags,
253 if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4))
254 return ERR_PTR(-EINVAL);
256 sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
258 return ERR_PTR(-ENOMEM);
260 sfa->actions_len = actions_len;
261 memcpy(sfa->actions, nla_data(actions), actions_len);
265 struct sw_flow *flow_alloc(void)
267 struct sw_flow *flow;
269 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
271 return ERR_PTR(-ENOMEM);
273 spin_lock_init(&flow->lock);
274 atomic_set(&flow->refcnt, 1);
275 flow->sf_acts = NULL;
281 static struct hlist_head __rcu *find_bucket(struct flow_table * table, u32 hash)
283 return flex_array_get(table->buckets,
284 (hash & (table->n_buckets - 1)));
287 static struct flex_array __rcu *alloc_buckets(unsigned int n_buckets)
289 struct flex_array __rcu *buckets;
292 buckets = flex_array_alloc(sizeof(struct hlist_head *),
293 n_buckets, GFP_KERNEL);
297 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
299 flex_array_free(buckets);
303 for (i = 0; i < n_buckets; i++)
304 INIT_HLIST_HEAD((struct hlist_head *)
305 flex_array_get(buckets, i));
310 static void free_buckets(struct flex_array *buckets)
312 flex_array_free(buckets);
315 struct flow_table *flow_tbl_alloc(int new_size)
317 struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
322 table->buckets = alloc_buckets(new_size);
324 if (!table->buckets) {
328 table->n_buckets = new_size;
334 static void flow_free(struct sw_flow *flow)
340 void flow_tbl_destroy(struct flow_table *table)
347 for (i = 0; i < table->n_buckets; i++) {
348 struct sw_flow *flow;
349 struct hlist_head *head = flex_array_get(table->buckets, i);
350 struct hlist_node *node, *n;
352 hlist_for_each_entry_safe(flow, node, n, head, hash_node) {
353 hlist_del_init_rcu(&flow->hash_node);
358 free_buckets(table->buckets);
362 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
364 struct flow_table *table = container_of(rcu, struct flow_table, rcu);
366 flow_tbl_destroy(table);
369 void flow_tbl_deferred_destroy(struct flow_table *table)
374 call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
377 struct sw_flow *flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
379 struct sw_flow *flow;
380 struct hlist_head *head;
381 struct hlist_node *n;
384 while (*bucket < table->n_buckets) {
386 head = flex_array_get(table->buckets, *bucket);
387 hlist_for_each_entry_rcu(flow, n, head, hash_node) {
402 struct flow_table *flow_tbl_expand(struct flow_table *table)
404 struct flow_table *new_table;
405 int n_buckets = table->n_buckets * 2;
408 new_table = flow_tbl_alloc(n_buckets);
410 return ERR_PTR(-ENOMEM);
412 for (i = 0; i < table->n_buckets; i++) {
413 struct sw_flow *flow;
414 struct hlist_head *head;
415 struct hlist_node *n, *pos;
417 head = flex_array_get(table->buckets, i);
419 hlist_for_each_entry_safe(flow, n, pos, head, hash_node) {
420 hlist_del_init_rcu(&flow->hash_node);
421 flow_tbl_insert(new_table, flow);
428 /* RCU callback used by flow_deferred_free. */
429 static void rcu_free_flow_callback(struct rcu_head *rcu)
431 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
437 /* Schedules 'flow' to be freed after the next RCU grace period.
438 * The caller must hold rcu_read_lock for this to be sensible. */
439 void flow_deferred_free(struct sw_flow *flow)
441 call_rcu(&flow->rcu, rcu_free_flow_callback);
444 void flow_hold(struct sw_flow *flow)
446 atomic_inc(&flow->refcnt);
449 void flow_put(struct sw_flow *flow)
454 if (atomic_dec_and_test(&flow->refcnt)) {
455 kfree((struct sf_flow_acts __force *)flow->sf_acts);
456 kmem_cache_free(flow_cache, flow);
460 /* RCU callback used by flow_deferred_free_acts. */
461 static void rcu_free_acts_callback(struct rcu_head *rcu)
463 struct sw_flow_actions *sf_acts = container_of(rcu,
464 struct sw_flow_actions, rcu);
468 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
469 * The caller must hold rcu_read_lock for this to be sensible. */
470 void flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
472 call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
475 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
478 __be16 eth_type; /* ETH_P_8021Q */
481 struct qtag_prefix *qp;
483 if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
487 qp = (struct qtag_prefix *) skb->data;
488 key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
489 __skb_pull(skb, sizeof(struct qtag_prefix));
494 static __be16 parse_ethertype(struct sk_buff *skb)
496 struct llc_snap_hdr {
497 u8 dsap; /* Always 0xAA */
498 u8 ssap; /* Always 0xAA */
503 struct llc_snap_hdr *llc;
506 proto = *(__be16 *) skb->data;
507 __skb_pull(skb, sizeof(__be16));
509 if (ntohs(proto) >= 1536)
512 if (skb->len < sizeof(struct llc_snap_hdr))
513 return htons(ETH_P_802_2);
515 if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
518 llc = (struct llc_snap_hdr *) skb->data;
519 if (llc->dsap != LLC_SAP_SNAP ||
520 llc->ssap != LLC_SAP_SNAP ||
521 (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
522 return htons(ETH_P_802_2);
524 __skb_pull(skb, sizeof(struct llc_snap_hdr));
525 return llc->ethertype;
528 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
529 int *key_lenp, int nh_len)
531 struct icmp6hdr *icmp = icmp6_hdr(skb);
535 /* The ICMPv6 type and code fields use the 16-bit transport port
536 * fields, so we need to store them in 16-bit network byte order.
538 key->ipv6.tp.src = htons(icmp->icmp6_type);
539 key->ipv6.tp.dst = htons(icmp->icmp6_code);
540 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
542 if (icmp->icmp6_code == 0 &&
543 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
544 icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
545 int icmp_len = skb->len - skb_transport_offset(skb);
549 key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
551 /* In order to process neighbor discovery options, we need the
554 if (unlikely(icmp_len < sizeof(*nd)))
556 if (unlikely(skb_linearize(skb))) {
561 nd = (struct nd_msg *)skb_transport_header(skb);
562 ipv6_addr_copy(&key->ipv6.nd.target, &nd->target);
563 key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
565 icmp_len -= sizeof(*nd);
567 while (icmp_len >= 8) {
568 struct nd_opt_hdr *nd_opt =
569 (struct nd_opt_hdr *)(nd->opt + offset);
570 int opt_len = nd_opt->nd_opt_len * 8;
572 if (unlikely(!opt_len || opt_len > icmp_len))
575 /* Store the link layer address if the appropriate
576 * option is provided. It is considered an error if
577 * the same link layer option is specified twice.
579 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
581 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
583 memcpy(key->ipv6.nd.sll,
584 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
585 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
587 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
589 memcpy(key->ipv6.nd.tll,
590 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
601 memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
602 memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
603 memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
611 * flow_extract - extracts a flow key from an Ethernet frame.
612 * @skb: sk_buff that contains the frame, with skb->data pointing to the
614 * @in_port: port number on which @skb was received.
615 * @key: output flow key
616 * @key_lenp: length of output flow key
618 * The caller must ensure that skb->len >= ETH_HLEN.
620 * Returns 0 if successful, otherwise a negative errno value.
622 * Initializes @skb header pointers as follows:
624 * - skb->mac_header: the Ethernet header.
626 * - skb->network_header: just past the Ethernet header, or just past the
627 * VLAN header, to the first byte of the Ethernet payload.
629 * - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6
630 * on output, then just past the IP header, if one is present and
631 * of a correct length, otherwise the same as skb->network_header.
632 * For other key->dl_type values it is left untouched.
634 int flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
638 int key_len = SW_FLOW_KEY_OFFSET(eth);
641 memset(key, 0, sizeof(*key));
643 key->phy.priority = skb->priority;
644 key->phy.tun_id = OVS_CB(skb)->tun_id;
645 key->phy.in_port = in_port;
647 skb_reset_mac_header(skb);
649 /* Link layer. We are guaranteed to have at least the 14 byte Ethernet
650 * header in the linear data area.
653 memcpy(key->eth.src, eth->h_source, ETH_ALEN);
654 memcpy(key->eth.dst, eth->h_dest, ETH_ALEN);
656 __skb_pull(skb, 2 * ETH_ALEN);
658 if (vlan_tx_tag_present(skb))
659 key->eth.tci = htons(vlan_get_tci(skb));
660 else if (eth->h_proto == htons(ETH_P_8021Q))
661 if (unlikely(parse_vlan(skb, key)))
664 key->eth.type = parse_ethertype(skb);
665 if (unlikely(key->eth.type == htons(0)))
668 skb_reset_network_header(skb);
669 __skb_push(skb, skb->data - skb_mac_header(skb));
672 if (key->eth.type == htons(ETH_P_IP)) {
676 key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
678 error = check_iphdr(skb);
679 if (unlikely(error)) {
680 if (error == -EINVAL) {
681 skb->transport_header = skb->network_header;
688 key->ipv4.addr.src = nh->saddr;
689 key->ipv4.addr.dst = nh->daddr;
691 key->ip.proto = nh->protocol;
692 key->ip.tos = nh->tos;
693 key->ip.ttl = nh->ttl;
695 offset = nh->frag_off & htons(IP_OFFSET);
697 key->ip.frag = OVS_FRAG_TYPE_LATER;
700 if (nh->frag_off & htons(IP_MF) ||
701 skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
702 key->ip.frag = OVS_FRAG_TYPE_FIRST;
704 /* Transport layer. */
705 if (key->ip.proto == IPPROTO_TCP) {
706 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
707 if (tcphdr_ok(skb)) {
708 struct tcphdr *tcp = tcp_hdr(skb);
709 key->ipv4.tp.src = tcp->source;
710 key->ipv4.tp.dst = tcp->dest;
712 } else if (key->ip.proto == IPPROTO_UDP) {
713 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
714 if (udphdr_ok(skb)) {
715 struct udphdr *udp = udp_hdr(skb);
716 key->ipv4.tp.src = udp->source;
717 key->ipv4.tp.dst = udp->dest;
719 } else if (key->ip.proto == IPPROTO_ICMP) {
720 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
721 if (icmphdr_ok(skb)) {
722 struct icmphdr *icmp = icmp_hdr(skb);
723 /* The ICMP type and code fields use the 16-bit
724 * transport port fields, so we need to store
725 * them in 16-bit network byte order. */
726 key->ipv4.tp.src = htons(icmp->type);
727 key->ipv4.tp.dst = htons(icmp->code);
731 } else if (key->eth.type == htons(ETH_P_ARP) && arphdr_ok(skb)) {
732 struct arp_eth_header *arp;
734 arp = (struct arp_eth_header *)skb_network_header(skb);
736 if (arp->ar_hrd == htons(ARPHRD_ETHER)
737 && arp->ar_pro == htons(ETH_P_IP)
738 && arp->ar_hln == ETH_ALEN
739 && arp->ar_pln == 4) {
741 /* We only match on the lower 8 bits of the opcode. */
742 if (ntohs(arp->ar_op) <= 0xff)
743 key->ip.proto = ntohs(arp->ar_op);
745 if (key->ip.proto == ARPOP_REQUEST
746 || key->ip.proto == ARPOP_REPLY) {
747 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
748 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
749 memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
750 memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
751 key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
754 } else if (key->eth.type == htons(ETH_P_IPV6)) {
755 int nh_len; /* IPv6 Header + Extensions */
757 nh_len = parse_ipv6hdr(skb, key, &key_len);
758 if (unlikely(nh_len < 0)) {
759 if (nh_len == -EINVAL)
760 skb->transport_header = skb->network_header;
766 if (key->ip.frag == OVS_FRAG_TYPE_LATER)
768 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
769 key->ip.frag = OVS_FRAG_TYPE_FIRST;
771 /* Transport layer. */
772 if (key->ip.proto == NEXTHDR_TCP) {
773 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
774 if (tcphdr_ok(skb)) {
775 struct tcphdr *tcp = tcp_hdr(skb);
776 key->ipv6.tp.src = tcp->source;
777 key->ipv6.tp.dst = tcp->dest;
779 } else if (key->ip.proto == NEXTHDR_UDP) {
780 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
781 if (udphdr_ok(skb)) {
782 struct udphdr *udp = udp_hdr(skb);
783 key->ipv6.tp.src = udp->source;
784 key->ipv6.tp.dst = udp->dest;
786 } else if (key->ip.proto == NEXTHDR_ICMP) {
787 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
788 if (icmp6hdr_ok(skb)) {
789 error = parse_icmpv6(skb, key, &key_len, nh_len);
801 u32 flow_hash(const struct sw_flow_key *key, int key_len)
803 return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), hash_seed);
806 struct sw_flow *flow_tbl_lookup(struct flow_table *table,
807 struct sw_flow_key *key, int key_len)
809 struct sw_flow *flow;
810 struct hlist_node *n;
811 struct hlist_head *head;
814 hash = flow_hash(key, key_len);
816 head = find_bucket(table, hash);
817 hlist_for_each_entry_rcu(flow, n, head, hash_node) {
819 if (flow->hash == hash &&
820 !memcmp(&flow->key, key, key_len)) {
827 void flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
829 struct hlist_head *head;
831 head = find_bucket(table, flow->hash);
832 hlist_add_head_rcu(&flow->hash_node, head);
836 void flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
838 if (!hlist_unhashed(&flow->hash_node)) {
839 hlist_del_init_rcu(&flow->hash_node);
841 BUG_ON(table->count < 0);
845 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
846 const u32 ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
847 [OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
848 [OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
849 [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
850 [OVS_KEY_ATTR_8021Q] = sizeof(struct ovs_key_8021q),
851 [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
852 [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
853 [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
854 [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
855 [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
856 [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
857 [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
858 [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
859 [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
862 [OVS_KEY_ATTR_TUN_ID] = sizeof(__be64),
865 static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
866 const struct nlattr *a[], u64 *attrs)
868 const struct ovs_key_icmp *icmp_key;
869 const struct ovs_key_tcp *tcp_key;
870 const struct ovs_key_udp *udp_key;
872 switch (swkey->ip.proto) {
874 if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
876 *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
878 *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
879 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
880 swkey->ipv4.tp.src = tcp_key->tcp_src;
881 swkey->ipv4.tp.dst = tcp_key->tcp_dst;
885 if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
887 *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
889 *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
890 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
891 swkey->ipv4.tp.src = udp_key->udp_src;
892 swkey->ipv4.tp.dst = udp_key->udp_dst;
896 if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP)))
898 *attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
900 *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
901 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
902 swkey->ipv4.tp.src = htons(icmp_key->icmp_type);
903 swkey->ipv4.tp.dst = htons(icmp_key->icmp_code);
910 static int ipv6_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
911 const struct nlattr *a[], u64 *attrs)
913 const struct ovs_key_icmpv6 *icmpv6_key;
914 const struct ovs_key_tcp *tcp_key;
915 const struct ovs_key_udp *udp_key;
917 switch (swkey->ip.proto) {
919 if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
921 *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
923 *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
924 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
925 swkey->ipv6.tp.src = tcp_key->tcp_src;
926 swkey->ipv6.tp.dst = tcp_key->tcp_dst;
930 if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
932 *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
934 *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
935 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
936 swkey->ipv6.tp.src = udp_key->udp_src;
937 swkey->ipv6.tp.dst = udp_key->udp_dst;
941 if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6)))
943 *attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
945 *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
946 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
947 swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type);
948 swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code);
950 if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
951 swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
952 const struct ovs_key_nd *nd_key;
954 if (!(*attrs & (1 << OVS_KEY_ATTR_ND)))
956 *attrs &= ~(1 << OVS_KEY_ATTR_ND);
958 *key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
959 nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
960 memcpy(&swkey->ipv6.nd.target, nd_key->nd_target,
961 sizeof(swkey->ipv6.nd.target));
962 memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN);
963 memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN);
972 * flow_from_nlattrs - parses Netlink attributes into a flow key.
973 * @swkey: receives the extracted flow key.
974 * @key_lenp: number of bytes used in @swkey.
975 * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
978 int flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
979 const struct nlattr *attr)
981 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
982 const struct ovs_key_ethernet *eth_key;
983 const struct nlattr *nla;
988 memset(swkey, 0, sizeof(struct sw_flow_key));
989 key_len = SW_FLOW_KEY_OFFSET(eth);
992 nla_for_each_nested(nla, attr, rem) {
993 u16 type = nla_type(nla);
995 if (type > OVS_KEY_ATTR_MAX || attrs & (1ULL << type) ||
996 nla_len(nla) != ovs_key_lens[type])
998 attrs |= 1ULL << type;
1004 /* Metadata attributes. */
1005 if (attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
1006 swkey->phy.priority = nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]);
1007 attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
1009 if (attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
1010 u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
1011 if (in_port >= DP_MAX_PORTS)
1013 swkey->phy.in_port = in_port;
1014 attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
1016 swkey->phy.in_port = USHRT_MAX;
1019 if (attrs & (1ULL << OVS_KEY_ATTR_TUN_ID)) {
1020 swkey->phy.tun_id = nla_get_be64(a[OVS_KEY_ATTR_TUN_ID]);
1021 attrs &= ~(1ULL << OVS_KEY_ATTR_TUN_ID);
1024 /* Data attributes. */
1025 if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET)))
1027 attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
1029 eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
1030 memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN);
1031 memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN);
1033 if (attrs & (1 << OVS_KEY_ATTR_8021Q)) {
1034 const struct ovs_key_8021q *q_key;
1036 q_key = nla_data(a[OVS_KEY_ATTR_8021Q]);
1037 /* Only standard 0x8100 VLANs currently supported. */
1038 if (q_key->q_tpid != htons(ETH_P_8021Q))
1040 if (q_key->q_tci & htons(VLAN_TAG_PRESENT))
1042 swkey->eth.tci = q_key->q_tci | htons(VLAN_TAG_PRESENT);
1044 attrs &= ~(1 << OVS_KEY_ATTR_8021Q);
1047 if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
1048 swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1049 if (ntohs(swkey->eth.type) < 1536)
1051 attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
1053 swkey->eth.type = htons(ETH_P_802_2);
1056 if (swkey->eth.type == htons(ETH_P_IP)) {
1057 const struct ovs_key_ipv4 *ipv4_key;
1059 if (!(attrs & (1 << OVS_KEY_ATTR_IPV4)))
1061 attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
1063 key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
1064 ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
1065 if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX)
1067 swkey->ip.proto = ipv4_key->ipv4_proto;
1068 swkey->ip.tos = ipv4_key->ipv4_tos;
1069 swkey->ip.ttl = ipv4_key->ipv4_ttl;
1070 swkey->ip.frag = ipv4_key->ipv4_frag;
1071 swkey->ipv4.addr.src = ipv4_key->ipv4_src;
1072 swkey->ipv4.addr.dst = ipv4_key->ipv4_dst;
1074 if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1075 int err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs);
1079 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1080 const struct ovs_key_ipv6 *ipv6_key;
1082 if (!(attrs & (1 << OVS_KEY_ATTR_IPV6)))
1084 attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
1086 key_len = SW_FLOW_KEY_OFFSET(ipv6.label);
1087 ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
1088 if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX)
1090 swkey->ipv6.label = ipv6_key->ipv6_label;
1091 swkey->ip.proto = ipv6_key->ipv6_proto;
1092 swkey->ip.tos = ipv6_key->ipv6_tclass;
1093 swkey->ip.ttl = ipv6_key->ipv6_hlimit;
1094 swkey->ip.frag = ipv6_key->ipv6_frag;
1095 memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src,
1096 sizeof(swkey->ipv6.addr.src));
1097 memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst,
1098 sizeof(swkey->ipv6.addr.dst));
1100 if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1101 int err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs);
1105 } else if (swkey->eth.type == htons(ETH_P_ARP)) {
1106 const struct ovs_key_arp *arp_key;
1108 if (!(attrs & (1 << OVS_KEY_ATTR_ARP)))
1110 attrs &= ~(1 << OVS_KEY_ATTR_ARP);
1112 key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
1113 arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
1114 swkey->ipv4.addr.src = arp_key->arp_sip;
1115 swkey->ipv4.addr.dst = arp_key->arp_tip;
1116 if (arp_key->arp_op & htons(0xff00))
1118 swkey->ip.proto = ntohs(arp_key->arp_op);
1119 memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN);
1120 memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN);
1125 *key_lenp = key_len;
1131 * flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
1132 * @in_port: receives the extracted input port.
1133 * @tun_id: receives the extracted tunnel ID.
1134 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1137 * This parses a series of Netlink attributes that form a flow key, which must
1138 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1139 * get the metadata, that is, the parts of the flow key that cannot be
1140 * extracted from the packet itself.
1142 int flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id,
1143 const struct nlattr *attr)
1145 const struct nlattr *nla;
1148 *in_port = USHRT_MAX;
1152 nla_for_each_nested(nla, attr, rem) {
1153 int type = nla_type(nla);
1155 if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] != 0) {
1156 if (nla_len(nla) != ovs_key_lens[type])
1160 case OVS_KEY_ATTR_PRIORITY:
1161 *priority = nla_get_u32(nla);
1164 case OVS_KEY_ATTR_TUN_ID:
1165 *tun_id = nla_get_be64(nla);
1168 case OVS_KEY_ATTR_IN_PORT:
1169 if (nla_get_u32(nla) >= DP_MAX_PORTS)
1171 *in_port = nla_get_u32(nla);
1181 int flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
1183 struct ovs_key_ethernet *eth_key;
1186 if (swkey->phy.priority)
1187 NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority);
1189 if (swkey->phy.tun_id != cpu_to_be64(0))
1190 NLA_PUT_BE64(skb, OVS_KEY_ATTR_TUN_ID, swkey->phy.tun_id);
1192 if (swkey->phy.in_port != USHRT_MAX)
1193 NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port);
1195 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
1197 goto nla_put_failure;
1198 eth_key = nla_data(nla);
1199 memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN);
1200 memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
1202 if (swkey->eth.tci != htons(0)) {
1203 struct ovs_key_8021q q_key;
1205 q_key.q_tpid = htons(ETH_P_8021Q);
1206 q_key.q_tci = swkey->eth.tci & ~htons(VLAN_TAG_PRESENT);
1207 NLA_PUT(skb, OVS_KEY_ATTR_8021Q, sizeof(q_key), &q_key);
1210 if (swkey->eth.type == htons(ETH_P_802_2))
1213 NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type);
1215 if (swkey->eth.type == htons(ETH_P_IP)) {
1216 struct ovs_key_ipv4 *ipv4_key;
1218 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
1220 goto nla_put_failure;
1221 ipv4_key = nla_data(nla);
1222 ipv4_key->ipv4_src = swkey->ipv4.addr.src;
1223 ipv4_key->ipv4_dst = swkey->ipv4.addr.dst;
1224 ipv4_key->ipv4_proto = swkey->ip.proto;
1225 ipv4_key->ipv4_tos = swkey->ip.tos;
1226 ipv4_key->ipv4_ttl = swkey->ip.ttl;
1227 ipv4_key->ipv4_frag = swkey->ip.frag;
1228 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1229 struct ovs_key_ipv6 *ipv6_key;
1231 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
1233 goto nla_put_failure;
1234 ipv6_key = nla_data(nla);
1235 memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src,
1236 sizeof(ipv6_key->ipv6_src));
1237 memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst,
1238 sizeof(ipv6_key->ipv6_dst));
1239 ipv6_key->ipv6_label = swkey->ipv6.label;
1240 ipv6_key->ipv6_proto = swkey->ip.proto;
1241 ipv6_key->ipv6_tclass = swkey->ip.tos;
1242 ipv6_key->ipv6_hlimit = swkey->ip.ttl;
1243 ipv6_key->ipv6_frag = swkey->ip.frag;
1244 } else if (swkey->eth.type == htons(ETH_P_ARP)) {
1245 struct ovs_key_arp *arp_key;
1247 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
1249 goto nla_put_failure;
1250 arp_key = nla_data(nla);
1251 memset(arp_key, 0, sizeof(struct ovs_key_arp));
1252 arp_key->arp_sip = swkey->ipv4.addr.src;
1253 arp_key->arp_tip = swkey->ipv4.addr.dst;
1254 arp_key->arp_op = htons(swkey->ip.proto);
1255 memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN);
1256 memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN);
1259 if ((swkey->eth.type == htons(ETH_P_IP) ||
1260 swkey->eth.type == htons(ETH_P_IPV6)) &&
1261 swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1263 if (swkey->ip.proto == IPPROTO_TCP) {
1264 struct ovs_key_tcp *tcp_key;
1266 nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
1268 goto nla_put_failure;
1269 tcp_key = nla_data(nla);
1270 if (swkey->eth.type == htons(ETH_P_IP)) {
1271 tcp_key->tcp_src = swkey->ipv4.tp.src;
1272 tcp_key->tcp_dst = swkey->ipv4.tp.dst;
1273 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1274 tcp_key->tcp_src = swkey->ipv6.tp.src;
1275 tcp_key->tcp_dst = swkey->ipv6.tp.dst;
1277 } else if (swkey->ip.proto == IPPROTO_UDP) {
1278 struct ovs_key_udp *udp_key;
1280 nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
1282 goto nla_put_failure;
1283 udp_key = nla_data(nla);
1284 if (swkey->eth.type == htons(ETH_P_IP)) {
1285 udp_key->udp_src = swkey->ipv4.tp.src;
1286 udp_key->udp_dst = swkey->ipv4.tp.dst;
1287 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1288 udp_key->udp_src = swkey->ipv6.tp.src;
1289 udp_key->udp_dst = swkey->ipv6.tp.dst;
1291 } else if (swkey->eth.type == htons(ETH_P_IP) &&
1292 swkey->ip.proto == IPPROTO_ICMP) {
1293 struct ovs_key_icmp *icmp_key;
1295 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
1297 goto nla_put_failure;
1298 icmp_key = nla_data(nla);
1299 icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src);
1300 icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst);
1301 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
1302 swkey->ip.proto == IPPROTO_ICMPV6) {
1303 struct ovs_key_icmpv6 *icmpv6_key;
1305 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
1306 sizeof(*icmpv6_key));
1308 goto nla_put_failure;
1309 icmpv6_key = nla_data(nla);
1310 icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src);
1311 icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst);
1313 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
1314 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
1315 struct ovs_key_nd *nd_key;
1317 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
1319 goto nla_put_failure;
1320 nd_key = nla_data(nla);
1321 memcpy(nd_key->nd_target, &swkey->ipv6.nd.target,
1322 sizeof(nd_key->nd_target));
1323 memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN);
1324 memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN);
1335 /* Initializes the flow module.
1336 * Returns zero if successful or a negative error code. */
1339 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
1341 if (flow_cache == NULL)
1344 get_random_bytes(&hash_seed, sizeof(hash_seed));
1349 /* Uninitializes the flow module. */
1350 void flow_exit(void)
1352 kmem_cache_destroy(flow_cache);