2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
11 #include <linux/uaccess.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/if_ether.h>
15 #include <linux/if_vlan.h>
16 #include <net/llc_pdu.h>
17 #include <linux/kernel.h>
18 #include <linux/jhash.h>
19 #include <linux/jiffies.h>
20 #include <linux/llc.h>
21 #include <linux/module.h>
23 #include <linux/rcupdate.h>
24 #include <linux/if_arp.h>
25 #include <linux/if_ether.h>
27 #include <linux/ipv6.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/icmp.h>
31 #include <linux/icmpv6.h>
32 #include <linux/rculist.h>
35 #include <net/ndisc.h>
39 static struct kmem_cache *flow_cache;
40 static unsigned int hash_seed __read_mostly;
42 static int check_header(struct sk_buff *skb, int len)
44 if (unlikely(skb->len < len))
46 if (unlikely(!pskb_may_pull(skb, len)))
51 static bool arphdr_ok(struct sk_buff *skb)
53 return pskb_may_pull(skb, skb_network_offset(skb) +
54 sizeof(struct arp_eth_header));
57 static int check_iphdr(struct sk_buff *skb)
59 unsigned int nh_ofs = skb_network_offset(skb);
63 err = check_header(skb, nh_ofs + sizeof(struct iphdr));
67 ip_len = ip_hdrlen(skb);
68 if (unlikely(ip_len < sizeof(struct iphdr) ||
69 skb->len < nh_ofs + ip_len))
72 skb_set_transport_header(skb, nh_ofs + ip_len);
76 static bool tcphdr_ok(struct sk_buff *skb)
78 int th_ofs = skb_transport_offset(skb);
81 if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
84 tcp_len = tcp_hdrlen(skb);
85 if (unlikely(tcp_len < sizeof(struct tcphdr) ||
86 skb->len < th_ofs + tcp_len))
92 static bool udphdr_ok(struct sk_buff *skb)
94 return pskb_may_pull(skb, skb_transport_offset(skb) +
95 sizeof(struct udphdr));
98 static bool icmphdr_ok(struct sk_buff *skb)
100 return pskb_may_pull(skb, skb_transport_offset(skb) +
101 sizeof(struct icmphdr));
104 u64 flow_used_time(unsigned long flow_jiffies)
106 struct timespec cur_ts;
109 ktime_get_ts(&cur_ts);
110 idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
111 cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
112 cur_ts.tv_nsec / NSEC_PER_MSEC;
114 return cur_ms - idle_ms;
117 #define SW_FLOW_KEY_OFFSET(field) \
118 (offsetof(struct sw_flow_key, field) + \
119 FIELD_SIZEOF(struct sw_flow_key, field))
122 * skip_exthdr - skip any IPv6 extension headers
123 * @skb: skbuff to parse
124 * @start: offset of first extension header
125 * @nexthdrp: Initially, points to the type of the extension header at @start.
126 * This function updates it to point to the extension header at the final
128 * @frag: Points to the @frag member in a &struct sw_flow_key. This
129 * function sets an appropriate %OVS_FRAG_TYPE_* value.
131 * This is based on ipv6_skip_exthdr() but adds the updates to *@frag.
133 * When there is more than one fragment header, this version reports whether
134 * the final fragment header that it examines is a first fragment.
136 * Returns the final payload offset, or -1 on error.
138 static int skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
141 u8 nexthdr = *nexthdrp;
143 while (ipv6_ext_hdr(nexthdr)) {
144 struct ipv6_opt_hdr _hdr, *hp;
147 if (nexthdr == NEXTHDR_NONE)
149 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
152 if (nexthdr == NEXTHDR_FRAGMENT) {
153 __be16 _frag_off, *fp;
154 fp = skb_header_pointer(skb,
155 start+offsetof(struct frag_hdr,
162 if (ntohs(*fp) & ~0x7) {
163 *frag = OVS_FRAG_TYPE_LATER;
166 *frag = OVS_FRAG_TYPE_FIRST;
168 } else if (nexthdr == NEXTHDR_AUTH)
169 hdrlen = (hp->hdrlen+2)<<2;
171 hdrlen = ipv6_optlen(hp);
173 nexthdr = hp->nexthdr;
181 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key,
184 unsigned int nh_ofs = skb_network_offset(skb);
191 *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label);
193 err = check_header(skb, nh_ofs + sizeof(*nh));
198 nexthdr = nh->nexthdr;
199 payload_ofs = (u8 *)(nh + 1) - skb->data;
201 key->ip.proto = NEXTHDR_NONE;
202 key->ip.tos = ipv6_get_dsfield(nh);
203 key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
204 ipv6_addr_copy(&key->ipv6.addr.src, &nh->saddr);
205 ipv6_addr_copy(&key->ipv6.addr.dst, &nh->daddr);
207 payload_ofs = skip_exthdr(skb, payload_ofs, &nexthdr, &key->ip.frag);
208 if (unlikely(payload_ofs < 0))
211 nh_len = payload_ofs - nh_ofs;
212 skb_set_transport_header(skb, nh_ofs + nh_len);
213 key->ip.proto = nexthdr;
217 static bool icmp6hdr_ok(struct sk_buff *skb)
219 return pskb_may_pull(skb, skb_transport_offset(skb) +
220 sizeof(struct icmp6hdr));
223 #define TCP_FLAGS_OFFSET 13
224 #define TCP_FLAG_MASK 0x3f
226 void flow_used(struct sw_flow *flow, struct sk_buff *skb)
230 if (flow->key.eth.type == htons(ETH_P_IP) &&
231 flow->key.ip.proto == IPPROTO_TCP) {
232 u8 *tcp = (u8 *)tcp_hdr(skb);
233 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
236 spin_lock(&flow->lock);
237 flow->used = jiffies;
238 flow->packet_count++;
239 flow->byte_count += skb->len;
240 flow->tcp_flags |= tcp_flags;
241 spin_unlock(&flow->lock);
244 struct sw_flow_actions *flow_actions_alloc(const struct nlattr *actions)
246 int actions_len = nla_len(actions);
247 struct sw_flow_actions *sfa;
249 /* At least DP_MAX_PORTS actions are required to be able to flood a
250 * packet to every port. Factor of 2 allows for setting VLAN tags,
252 if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4))
253 return ERR_PTR(-EINVAL);
255 sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
257 return ERR_PTR(-ENOMEM);
259 sfa->actions_len = actions_len;
260 memcpy(sfa->actions, nla_data(actions), actions_len);
264 struct sw_flow *flow_alloc(void)
266 struct sw_flow *flow;
268 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
270 return ERR_PTR(-ENOMEM);
272 spin_lock_init(&flow->lock);
273 atomic_set(&flow->refcnt, 1);
274 flow->sf_acts = NULL;
280 static struct hlist_head __rcu *find_bucket(struct flow_table * table, u32 hash)
282 return flex_array_get(table->buckets,
283 (hash & (table->n_buckets - 1)));
286 static struct flex_array __rcu *alloc_buckets(unsigned int n_buckets)
288 struct flex_array __rcu *buckets;
291 buckets = flex_array_alloc(sizeof(struct hlist_head *),
292 n_buckets, GFP_KERNEL);
296 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
298 flex_array_free(buckets);
302 for (i = 0; i < n_buckets; i++)
303 INIT_HLIST_HEAD((struct hlist_head *)
304 flex_array_get(buckets, i));
309 static void free_buckets(struct flex_array *buckets)
311 flex_array_free(buckets);
314 struct flow_table *flow_tbl_alloc(int new_size)
316 struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
321 table->buckets = alloc_buckets(new_size);
323 if (!table->buckets) {
327 table->n_buckets = new_size;
333 static void flow_free(struct sw_flow *flow)
339 void flow_tbl_destroy(struct flow_table *table)
346 for (i = 0; i < table->n_buckets; i++) {
347 struct sw_flow *flow;
348 struct hlist_head *head = flex_array_get(table->buckets, i);
349 struct hlist_node *node, *n;
351 hlist_for_each_entry_safe(flow, node, n, head, hash_node) {
352 hlist_del_init_rcu(&flow->hash_node);
357 free_buckets(table->buckets);
361 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
363 struct flow_table *table = container_of(rcu, struct flow_table, rcu);
365 flow_tbl_destroy(table);
368 void flow_tbl_deferred_destroy(struct flow_table *table)
373 call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
376 struct sw_flow *flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
378 struct sw_flow *flow;
379 struct hlist_head *head;
380 struct hlist_node *n;
383 while (*bucket < table->n_buckets) {
385 head = flex_array_get(table->buckets, *bucket);
386 hlist_for_each_entry_rcu(flow, n, head, hash_node) {
401 struct flow_table *flow_tbl_expand(struct flow_table *table)
403 struct flow_table *new_table;
404 int n_buckets = table->n_buckets * 2;
407 new_table = flow_tbl_alloc(n_buckets);
409 return ERR_PTR(-ENOMEM);
411 for (i = 0; i < table->n_buckets; i++) {
412 struct sw_flow *flow;
413 struct hlist_head *head;
414 struct hlist_node *n, *pos;
416 head = flex_array_get(table->buckets, i);
418 hlist_for_each_entry_safe(flow, n, pos, head, hash_node) {
419 hlist_del_init_rcu(&flow->hash_node);
420 flow_tbl_insert(new_table, flow);
427 /* RCU callback used by flow_deferred_free. */
428 static void rcu_free_flow_callback(struct rcu_head *rcu)
430 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
436 /* Schedules 'flow' to be freed after the next RCU grace period.
437 * The caller must hold rcu_read_lock for this to be sensible. */
438 void flow_deferred_free(struct sw_flow *flow)
440 call_rcu(&flow->rcu, rcu_free_flow_callback);
443 void flow_hold(struct sw_flow *flow)
445 atomic_inc(&flow->refcnt);
448 void flow_put(struct sw_flow *flow)
453 if (atomic_dec_and_test(&flow->refcnt)) {
454 kfree((struct sf_flow_acts __force *)flow->sf_acts);
455 kmem_cache_free(flow_cache, flow);
459 /* RCU callback used by flow_deferred_free_acts. */
460 static void rcu_free_acts_callback(struct rcu_head *rcu)
462 struct sw_flow_actions *sf_acts = container_of(rcu,
463 struct sw_flow_actions, rcu);
467 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
468 * The caller must hold rcu_read_lock for this to be sensible. */
469 void flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
471 call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
474 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
477 __be16 eth_type; /* ETH_P_8021Q */
480 struct qtag_prefix *qp;
482 if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
486 qp = (struct qtag_prefix *) skb->data;
487 key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
488 __skb_pull(skb, sizeof(struct qtag_prefix));
493 static __be16 parse_ethertype(struct sk_buff *skb)
495 struct llc_snap_hdr {
496 u8 dsap; /* Always 0xAA */
497 u8 ssap; /* Always 0xAA */
502 struct llc_snap_hdr *llc;
505 proto = *(__be16 *) skb->data;
506 __skb_pull(skb, sizeof(__be16));
508 if (ntohs(proto) >= 1536)
511 if (skb->len < sizeof(struct llc_snap_hdr))
512 return htons(ETH_P_802_2);
514 if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
517 llc = (struct llc_snap_hdr *) skb->data;
518 if (llc->dsap != LLC_SAP_SNAP ||
519 llc->ssap != LLC_SAP_SNAP ||
520 (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
521 return htons(ETH_P_802_2);
523 __skb_pull(skb, sizeof(struct llc_snap_hdr));
524 return llc->ethertype;
527 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
528 int *key_lenp, int nh_len)
530 struct icmp6hdr *icmp = icmp6_hdr(skb);
534 /* The ICMPv6 type and code fields use the 16-bit transport port
535 * fields, so we need to store them in 16-bit network byte order.
537 key->ipv6.tp.src = htons(icmp->icmp6_type);
538 key->ipv6.tp.dst = htons(icmp->icmp6_code);
539 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
541 if (icmp->icmp6_code == 0 &&
542 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
543 icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
544 int icmp_len = skb->len - skb_transport_offset(skb);
548 key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
550 /* In order to process neighbor discovery options, we need the
553 if (unlikely(icmp_len < sizeof(*nd)))
555 if (unlikely(skb_linearize(skb))) {
560 nd = (struct nd_msg *)skb_transport_header(skb);
561 ipv6_addr_copy(&key->ipv6.nd.target, &nd->target);
562 key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
564 icmp_len -= sizeof(*nd);
566 while (icmp_len >= 8) {
567 struct nd_opt_hdr *nd_opt =
568 (struct nd_opt_hdr *)(nd->opt + offset);
569 int opt_len = nd_opt->nd_opt_len * 8;
571 if (unlikely(!opt_len || opt_len > icmp_len))
574 /* Store the link layer address if the appropriate
575 * option is provided. It is considered an error if
576 * the same link layer option is specified twice.
578 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
580 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
582 memcpy(key->ipv6.nd.sll,
583 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
584 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
586 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
588 memcpy(key->ipv6.nd.tll,
589 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
600 memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
601 memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
602 memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
610 * flow_extract - extracts a flow key from an Ethernet frame.
611 * @skb: sk_buff that contains the frame, with skb->data pointing to the
613 * @in_port: port number on which @skb was received.
614 * @key: output flow key
615 * @key_lenp: length of output flow key
617 * The caller must ensure that skb->len >= ETH_HLEN.
619 * Returns 0 if successful, otherwise a negative errno value.
621 * Initializes @skb header pointers as follows:
623 * - skb->mac_header: the Ethernet header.
625 * - skb->network_header: just past the Ethernet header, or just past the
626 * VLAN header, to the first byte of the Ethernet payload.
628 * - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6
629 * on output, then just past the IP header, if one is present and
630 * of a correct length, otherwise the same as skb->network_header.
631 * For other key->dl_type values it is left untouched.
633 int flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
637 int key_len = SW_FLOW_KEY_OFFSET(eth);
640 memset(key, 0, sizeof(*key));
642 key->phy.priority = skb->priority;
643 key->phy.tun_id = OVS_CB(skb)->tun_id;
644 key->phy.in_port = in_port;
646 skb_reset_mac_header(skb);
648 /* Link layer. We are guaranteed to have at least the 14 byte Ethernet
649 * header in the linear data area.
652 memcpy(key->eth.src, eth->h_source, ETH_ALEN);
653 memcpy(key->eth.dst, eth->h_dest, ETH_ALEN);
655 __skb_pull(skb, 2 * ETH_ALEN);
657 if (vlan_tx_tag_present(skb))
658 key->eth.tci = htons(vlan_get_tci(skb));
659 else if (eth->h_proto == htons(ETH_P_8021Q))
660 if (unlikely(parse_vlan(skb, key)))
663 key->eth.type = parse_ethertype(skb);
664 if (unlikely(key->eth.type == htons(0)))
667 skb_reset_network_header(skb);
668 __skb_push(skb, skb->data - skb_mac_header(skb));
671 if (key->eth.type == htons(ETH_P_IP)) {
675 key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
677 error = check_iphdr(skb);
678 if (unlikely(error)) {
679 if (error == -EINVAL) {
680 skb->transport_header = skb->network_header;
687 key->ipv4.addr.src = nh->saddr;
688 key->ipv4.addr.dst = nh->daddr;
690 key->ip.proto = nh->protocol;
691 key->ip.tos = nh->tos;
693 offset = nh->frag_off & htons(IP_OFFSET);
695 key->ip.frag = OVS_FRAG_TYPE_LATER;
698 if (nh->frag_off & htons(IP_MF) ||
699 skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
700 key->ip.frag = OVS_FRAG_TYPE_FIRST;
702 /* Transport layer. */
703 if (key->ip.proto == IPPROTO_TCP) {
704 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
705 if (tcphdr_ok(skb)) {
706 struct tcphdr *tcp = tcp_hdr(skb);
707 key->ipv4.tp.src = tcp->source;
708 key->ipv4.tp.dst = tcp->dest;
710 } else if (key->ip.proto == IPPROTO_UDP) {
711 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
712 if (udphdr_ok(skb)) {
713 struct udphdr *udp = udp_hdr(skb);
714 key->ipv4.tp.src = udp->source;
715 key->ipv4.tp.dst = udp->dest;
717 } else if (key->ip.proto == IPPROTO_ICMP) {
718 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
719 if (icmphdr_ok(skb)) {
720 struct icmphdr *icmp = icmp_hdr(skb);
721 /* The ICMP type and code fields use the 16-bit
722 * transport port fields, so we need to store
723 * them in 16-bit network byte order. */
724 key->ipv4.tp.src = htons(icmp->type);
725 key->ipv4.tp.dst = htons(icmp->code);
729 } else if (key->eth.type == htons(ETH_P_ARP) && arphdr_ok(skb)) {
730 struct arp_eth_header *arp;
732 arp = (struct arp_eth_header *)skb_network_header(skb);
734 if (arp->ar_hrd == htons(ARPHRD_ETHER)
735 && arp->ar_pro == htons(ETH_P_IP)
736 && arp->ar_hln == ETH_ALEN
737 && arp->ar_pln == 4) {
739 /* We only match on the lower 8 bits of the opcode. */
740 if (ntohs(arp->ar_op) <= 0xff)
741 key->ip.proto = ntohs(arp->ar_op);
743 if (key->ip.proto == ARPOP_REQUEST
744 || key->ip.proto == ARPOP_REPLY) {
745 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
746 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
747 memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
748 memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
749 key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
752 } else if (key->eth.type == htons(ETH_P_IPV6)) {
753 int nh_len; /* IPv6 Header + Extensions */
755 nh_len = parse_ipv6hdr(skb, key, &key_len);
756 if (unlikely(nh_len < 0)) {
757 if (nh_len == -EINVAL)
758 skb->transport_header = skb->network_header;
764 if (key->ip.frag == OVS_FRAG_TYPE_LATER)
766 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
767 key->ip.frag = OVS_FRAG_TYPE_FIRST;
769 /* Transport layer. */
770 if (key->ip.proto == NEXTHDR_TCP) {
771 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
772 if (tcphdr_ok(skb)) {
773 struct tcphdr *tcp = tcp_hdr(skb);
774 key->ipv6.tp.src = tcp->source;
775 key->ipv6.tp.dst = tcp->dest;
777 } else if (key->ip.proto == NEXTHDR_UDP) {
778 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
779 if (udphdr_ok(skb)) {
780 struct udphdr *udp = udp_hdr(skb);
781 key->ipv6.tp.src = udp->source;
782 key->ipv6.tp.dst = udp->dest;
784 } else if (key->ip.proto == NEXTHDR_ICMP) {
785 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
786 if (icmp6hdr_ok(skb)) {
787 error = parse_icmpv6(skb, key, &key_len, nh_len);
799 u32 flow_hash(const struct sw_flow_key *key, int key_len)
801 return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), hash_seed);
804 struct sw_flow *flow_tbl_lookup(struct flow_table *table,
805 struct sw_flow_key *key, int key_len)
807 struct sw_flow *flow;
808 struct hlist_node *n;
809 struct hlist_head *head;
812 hash = flow_hash(key, key_len);
814 head = find_bucket(table, hash);
815 hlist_for_each_entry_rcu(flow, n, head, hash_node) {
817 if (flow->hash == hash &&
818 !memcmp(&flow->key, key, key_len)) {
825 void flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
827 struct hlist_head *head;
829 head = find_bucket(table, flow->hash);
830 hlist_add_head_rcu(&flow->hash_node, head);
834 void flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
836 if (!hlist_unhashed(&flow->hash_node)) {
837 hlist_del_init_rcu(&flow->hash_node);
839 BUG_ON(table->count < 0);
843 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
844 const u32 ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
845 [OVS_KEY_ATTR_PRIORITY] = 4,
846 [OVS_KEY_ATTR_TUN_ID] = 8,
847 [OVS_KEY_ATTR_IN_PORT] = 4,
848 [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
849 [OVS_KEY_ATTR_8021Q] = sizeof(struct ovs_key_8021q),
850 [OVS_KEY_ATTR_ETHERTYPE] = 2,
851 [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
852 [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
853 [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
854 [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
855 [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
856 [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
857 [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
858 [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
862 * flow_from_nlattrs - parses Netlink attributes into a flow key.
863 * @swkey: receives the extracted flow key.
864 * @key_lenp: number of bytes used in @swkey.
865 * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
868 * This state machine accepts the following forms, with [] for optional
869 * elements and | for alternatives:
871 * [priority] [tun_id] [in_port] ethernet [8021q] [ethertype \
872 * [IPv4 [TCP|UDP|ICMP] | IPv6 [TCP|UDP|ICMPv6 [ND]] | ARP]]
874 * except that IPv4 or IPv6 terminates the sequence if its @ipv4_frag or
875 * @ipv6_frag member, respectively, equals %OVS_FRAG_TYPE_LATER.
877 int flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
878 const struct nlattr *attr)
881 const struct nlattr *nla;
886 memset(swkey, 0, sizeof(*swkey));
887 swkey->phy.in_port = USHRT_MAX;
888 swkey->eth.type = htons(ETH_P_802_2);
889 key_len = SW_FLOW_KEY_OFFSET(eth);
891 prev_type = OVS_KEY_ATTR_UNSPEC;
892 nla_for_each_nested(nla, attr, rem) {
893 const struct ovs_key_ethernet *eth_key;
894 const struct ovs_key_8021q *q_key;
895 const struct ovs_key_ipv4 *ipv4_key;
896 const struct ovs_key_ipv6 *ipv6_key;
897 const struct ovs_key_tcp *tcp_key;
898 const struct ovs_key_udp *udp_key;
899 const struct ovs_key_icmp *icmp_key;
900 const struct ovs_key_icmpv6 *icmpv6_key;
901 const struct ovs_key_arp *arp_key;
902 const struct ovs_key_nd *nd_key;
904 int type = nla_type(nla);
906 if (type > OVS_KEY_ATTR_MAX ||
907 nla_len(nla) != ovs_key_lens[type])
910 #define TRANSITION(PREV_TYPE, TYPE) (((PREV_TYPE) << 16) | (TYPE))
911 switch (TRANSITION(prev_type, type)) {
912 case TRANSITION(OVS_KEY_ATTR_UNSPEC, OVS_KEY_ATTR_PRIORITY):
913 swkey->phy.priority = nla_get_u32(nla);
916 case TRANSITION(OVS_KEY_ATTR_UNSPEC, OVS_KEY_ATTR_TUN_ID):
917 case TRANSITION(OVS_KEY_ATTR_PRIORITY, OVS_KEY_ATTR_TUN_ID):
918 swkey->phy.tun_id = nla_get_be64(nla);
921 case TRANSITION(OVS_KEY_ATTR_UNSPEC, OVS_KEY_ATTR_IN_PORT):
922 case TRANSITION(OVS_KEY_ATTR_PRIORITY, OVS_KEY_ATTR_IN_PORT):
923 case TRANSITION(OVS_KEY_ATTR_TUN_ID, OVS_KEY_ATTR_IN_PORT):
924 if (nla_get_u32(nla) >= DP_MAX_PORTS)
926 swkey->phy.in_port = nla_get_u32(nla);
929 case TRANSITION(OVS_KEY_ATTR_UNSPEC, OVS_KEY_ATTR_ETHERNET):
930 case TRANSITION(OVS_KEY_ATTR_PRIORITY, OVS_KEY_ATTR_ETHERNET):
931 case TRANSITION(OVS_KEY_ATTR_TUN_ID, OVS_KEY_ATTR_ETHERNET):
932 case TRANSITION(OVS_KEY_ATTR_IN_PORT, OVS_KEY_ATTR_ETHERNET):
933 eth_key = nla_data(nla);
934 memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN);
935 memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN);
938 case TRANSITION(OVS_KEY_ATTR_ETHERNET, OVS_KEY_ATTR_8021Q):
939 q_key = nla_data(nla);
940 /* Only standard 0x8100 VLANs currently supported. */
941 if (q_key->q_tpid != htons(ETH_P_8021Q))
943 if (q_key->q_tci & htons(VLAN_TAG_PRESENT))
945 swkey->eth.tci = q_key->q_tci | htons(VLAN_TAG_PRESENT);
948 case TRANSITION(OVS_KEY_ATTR_8021Q, OVS_KEY_ATTR_ETHERTYPE):
949 case TRANSITION(OVS_KEY_ATTR_ETHERNET, OVS_KEY_ATTR_ETHERTYPE):
950 swkey->eth.type = nla_get_be16(nla);
951 if (ntohs(swkey->eth.type) < 1536)
955 case TRANSITION(OVS_KEY_ATTR_ETHERTYPE, OVS_KEY_ATTR_IPV4):
956 key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
957 if (swkey->eth.type != htons(ETH_P_IP))
959 ipv4_key = nla_data(nla);
960 if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX)
962 swkey->ip.proto = ipv4_key->ipv4_proto;
963 swkey->ip.tos = ipv4_key->ipv4_tos;
964 swkey->ip.frag = ipv4_key->ipv4_frag;
965 swkey->ipv4.addr.src = ipv4_key->ipv4_src;
966 swkey->ipv4.addr.dst = ipv4_key->ipv4_dst;
969 case TRANSITION(OVS_KEY_ATTR_ETHERTYPE, OVS_KEY_ATTR_IPV6):
970 key_len = SW_FLOW_KEY_OFFSET(ipv6.label);
971 if (swkey->eth.type != htons(ETH_P_IPV6))
973 ipv6_key = nla_data(nla);
974 if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX)
976 swkey->ipv6.label = ipv6_key->ipv6_label;
977 swkey->ip.proto = ipv6_key->ipv6_proto;
978 swkey->ip.tos = ipv6_key->ipv6_tos;
979 swkey->ip.frag = ipv6_key->ipv6_frag;
980 memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src,
981 sizeof(swkey->ipv6.addr.src));
982 memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst,
983 sizeof(swkey->ipv6.addr.dst));
986 case TRANSITION(OVS_KEY_ATTR_IPV4, OVS_KEY_ATTR_TCP):
987 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
988 if (swkey->ip.proto != IPPROTO_TCP)
990 tcp_key = nla_data(nla);
991 swkey->ipv4.tp.src = tcp_key->tcp_src;
992 swkey->ipv4.tp.dst = tcp_key->tcp_dst;
995 case TRANSITION(OVS_KEY_ATTR_IPV6, OVS_KEY_ATTR_TCP):
996 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
997 if (swkey->ip.proto != IPPROTO_TCP)
999 tcp_key = nla_data(nla);
1000 swkey->ipv6.tp.src = tcp_key->tcp_src;
1001 swkey->ipv6.tp.dst = tcp_key->tcp_dst;
1004 case TRANSITION(OVS_KEY_ATTR_IPV4, OVS_KEY_ATTR_UDP):
1005 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
1006 if (swkey->ip.proto != IPPROTO_UDP)
1008 udp_key = nla_data(nla);
1009 swkey->ipv4.tp.src = udp_key->udp_src;
1010 swkey->ipv4.tp.dst = udp_key->udp_dst;
1013 case TRANSITION(OVS_KEY_ATTR_IPV6, OVS_KEY_ATTR_UDP):
1014 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
1015 if (swkey->ip.proto != IPPROTO_UDP)
1017 udp_key = nla_data(nla);
1018 swkey->ipv6.tp.src = udp_key->udp_src;
1019 swkey->ipv6.tp.dst = udp_key->udp_dst;
1022 case TRANSITION(OVS_KEY_ATTR_IPV4, OVS_KEY_ATTR_ICMP):
1023 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
1024 if (swkey->ip.proto != IPPROTO_ICMP)
1026 icmp_key = nla_data(nla);
1027 swkey->ipv4.tp.src = htons(icmp_key->icmp_type);
1028 swkey->ipv4.tp.dst = htons(icmp_key->icmp_code);
1031 case TRANSITION(OVS_KEY_ATTR_IPV6, OVS_KEY_ATTR_ICMPV6):
1032 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
1033 if (swkey->ip.proto != IPPROTO_ICMPV6)
1035 icmpv6_key = nla_data(nla);
1036 swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type);
1037 swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code);
1040 case TRANSITION(OVS_KEY_ATTR_ETHERTYPE, OVS_KEY_ATTR_ARP):
1041 key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
1042 if (swkey->eth.type != htons(ETH_P_ARP))
1044 arp_key = nla_data(nla);
1045 swkey->ipv4.addr.src = arp_key->arp_sip;
1046 swkey->ipv4.addr.dst = arp_key->arp_tip;
1047 if (arp_key->arp_op & htons(0xff00))
1049 swkey->ip.proto = ntohs(arp_key->arp_op);
1050 memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN);
1051 memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN);
1054 case TRANSITION(OVS_KEY_ATTR_ICMPV6, OVS_KEY_ATTR_ND):
1055 key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
1056 if (swkey->ipv6.tp.src != htons(NDISC_NEIGHBOUR_SOLICITATION)
1057 && swkey->ipv6.tp.src != htons(NDISC_NEIGHBOUR_ADVERTISEMENT))
1059 nd_key = nla_data(nla);
1060 memcpy(&swkey->ipv6.nd.target, nd_key->nd_target,
1061 sizeof(swkey->ipv6.nd.target));
1062 memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN);
1063 memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN);
1075 switch (prev_type) {
1076 case OVS_KEY_ATTR_UNSPEC:
1079 case OVS_KEY_ATTR_PRIORITY:
1080 case OVS_KEY_ATTR_TUN_ID:
1081 case OVS_KEY_ATTR_IN_PORT:
1084 case OVS_KEY_ATTR_ETHERNET:
1085 case OVS_KEY_ATTR_8021Q:
1088 case OVS_KEY_ATTR_ETHERTYPE:
1089 if (swkey->eth.type == htons(ETH_P_IP) ||
1090 swkey->eth.type == htons(ETH_P_IPV6) ||
1091 swkey->eth.type == htons(ETH_P_ARP))
1095 case OVS_KEY_ATTR_IPV4:
1096 if (swkey->ip.frag == OVS_FRAG_TYPE_LATER)
1098 if (swkey->ip.proto == IPPROTO_TCP ||
1099 swkey->ip.proto == IPPROTO_UDP ||
1100 swkey->ip.proto == IPPROTO_ICMP)
1104 case OVS_KEY_ATTR_IPV6:
1105 if (swkey->ip.frag == OVS_FRAG_TYPE_LATER)
1107 if (swkey->ip.proto == IPPROTO_TCP ||
1108 swkey->ip.proto == IPPROTO_UDP ||
1109 swkey->ip.proto == IPPROTO_ICMPV6)
1113 case OVS_KEY_ATTR_ICMPV6:
1114 if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
1115 swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT) ||
1116 swkey->ip.frag == OVS_FRAG_TYPE_LATER)
1120 case OVS_KEY_ATTR_TCP:
1121 case OVS_KEY_ATTR_UDP:
1122 case OVS_KEY_ATTR_ICMP:
1123 case OVS_KEY_ATTR_ND:
1124 if (swkey->ip.frag == OVS_FRAG_TYPE_LATER)
1128 case OVS_KEY_ATTR_ARP:
1139 *key_lenp = key_len;
1144 * flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
1145 * @in_port: receives the extracted input port.
1146 * @tun_id: receives the extracted tunnel ID.
1147 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1150 * This parses a series of Netlink attributes that form a flow key, which must
1151 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1152 * get the metadata, that is, the parts of the flow key that cannot be
1153 * extracted from the packet itself.
1155 int flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id,
1156 const struct nlattr *attr)
1158 const struct nlattr *nla;
1162 *in_port = USHRT_MAX;
1166 prev_type = OVS_KEY_ATTR_UNSPEC;
1167 nla_for_each_nested(nla, attr, rem) {
1168 int type = nla_type(nla);
1170 if (type > OVS_KEY_ATTR_MAX || nla_len(nla) != ovs_key_lens[type])
1173 switch (TRANSITION(prev_type, type)) {
1174 case TRANSITION(OVS_KEY_ATTR_UNSPEC, OVS_KEY_ATTR_PRIORITY):
1175 *priority = nla_get_u32(nla);
1178 case TRANSITION(OVS_KEY_ATTR_UNSPEC, OVS_KEY_ATTR_TUN_ID):
1179 case TRANSITION(OVS_KEY_ATTR_PRIORITY, OVS_KEY_ATTR_TUN_ID):
1180 *tun_id = nla_get_be64(nla);
1183 case TRANSITION(OVS_KEY_ATTR_UNSPEC, OVS_KEY_ATTR_IN_PORT):
1184 case TRANSITION(OVS_KEY_ATTR_PRIORITY, OVS_KEY_ATTR_IN_PORT):
1185 case TRANSITION(OVS_KEY_ATTR_TUN_ID, OVS_KEY_ATTR_IN_PORT):
1186 if (nla_get_u32(nla) >= DP_MAX_PORTS)
1188 *in_port = nla_get_u32(nla);
1202 int flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
1204 struct ovs_key_ethernet *eth_key;
1207 if (swkey->phy.priority)
1208 NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority);
1210 if (swkey->phy.tun_id != cpu_to_be64(0))
1211 NLA_PUT_BE64(skb, OVS_KEY_ATTR_TUN_ID, swkey->phy.tun_id);
1213 if (swkey->phy.in_port != USHRT_MAX)
1214 NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port);
1216 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
1218 goto nla_put_failure;
1219 eth_key = nla_data(nla);
1220 memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN);
1221 memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
1223 if (swkey->eth.tci != htons(0)) {
1224 struct ovs_key_8021q q_key;
1226 q_key.q_tpid = htons(ETH_P_8021Q);
1227 q_key.q_tci = swkey->eth.tci & ~htons(VLAN_TAG_PRESENT);
1228 NLA_PUT(skb, OVS_KEY_ATTR_8021Q, sizeof(q_key), &q_key);
1231 if (swkey->eth.type == htons(ETH_P_802_2))
1234 NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type);
1236 if (swkey->eth.type == htons(ETH_P_IP)) {
1237 struct ovs_key_ipv4 *ipv4_key;
1239 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
1241 goto nla_put_failure;
1242 ipv4_key = nla_data(nla);
1243 memset(ipv4_key, 0, sizeof(struct ovs_key_ipv4));
1244 ipv4_key->ipv4_src = swkey->ipv4.addr.src;
1245 ipv4_key->ipv4_dst = swkey->ipv4.addr.dst;
1246 ipv4_key->ipv4_proto = swkey->ip.proto;
1247 ipv4_key->ipv4_tos = swkey->ip.tos;
1248 ipv4_key->ipv4_frag = swkey->ip.frag;
1249 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1250 struct ovs_key_ipv6 *ipv6_key;
1252 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
1254 goto nla_put_failure;
1255 ipv6_key = nla_data(nla);
1256 memset(ipv6_key, 0, sizeof(struct ovs_key_ipv6));
1257 memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src,
1258 sizeof(ipv6_key->ipv6_src));
1259 memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst,
1260 sizeof(ipv6_key->ipv6_dst));
1261 ipv6_key->ipv6_label = swkey->ipv6.label;
1262 ipv6_key->ipv6_proto = swkey->ip.proto;
1263 ipv6_key->ipv6_tos = swkey->ip.tos;
1264 ipv6_key->ipv6_frag = swkey->ip.frag;
1265 } else if (swkey->eth.type == htons(ETH_P_ARP)) {
1266 struct ovs_key_arp *arp_key;
1268 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
1270 goto nla_put_failure;
1271 arp_key = nla_data(nla);
1272 memset(arp_key, 0, sizeof(struct ovs_key_arp));
1273 arp_key->arp_sip = swkey->ipv4.addr.src;
1274 arp_key->arp_tip = swkey->ipv4.addr.dst;
1275 arp_key->arp_op = htons(swkey->ip.proto);
1276 memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN);
1277 memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN);
1280 if ((swkey->eth.type == htons(ETH_P_IP) ||
1281 swkey->eth.type == htons(ETH_P_IPV6)) &&
1282 swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1284 if (swkey->ip.proto == IPPROTO_TCP) {
1285 struct ovs_key_tcp *tcp_key;
1287 nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
1289 goto nla_put_failure;
1290 tcp_key = nla_data(nla);
1291 if (swkey->eth.type == htons(ETH_P_IP)) {
1292 tcp_key->tcp_src = swkey->ipv4.tp.src;
1293 tcp_key->tcp_dst = swkey->ipv4.tp.dst;
1294 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1295 tcp_key->tcp_src = swkey->ipv6.tp.src;
1296 tcp_key->tcp_dst = swkey->ipv6.tp.dst;
1298 } else if (swkey->ip.proto == IPPROTO_UDP) {
1299 struct ovs_key_udp *udp_key;
1301 nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
1303 goto nla_put_failure;
1304 udp_key = nla_data(nla);
1305 if (swkey->eth.type == htons(ETH_P_IP)) {
1306 udp_key->udp_src = swkey->ipv4.tp.src;
1307 udp_key->udp_dst = swkey->ipv4.tp.dst;
1308 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1309 udp_key->udp_src = swkey->ipv6.tp.src;
1310 udp_key->udp_dst = swkey->ipv6.tp.dst;
1312 } else if (swkey->eth.type == htons(ETH_P_IP) &&
1313 swkey->ip.proto == IPPROTO_ICMP) {
1314 struct ovs_key_icmp *icmp_key;
1316 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
1318 goto nla_put_failure;
1319 icmp_key = nla_data(nla);
1320 icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src);
1321 icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst);
1322 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
1323 swkey->ip.proto == IPPROTO_ICMPV6) {
1324 struct ovs_key_icmpv6 *icmpv6_key;
1326 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
1327 sizeof(*icmpv6_key));
1329 goto nla_put_failure;
1330 icmpv6_key = nla_data(nla);
1331 icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src);
1332 icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst);
1334 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
1335 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
1336 struct ovs_key_nd *nd_key;
1338 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
1340 goto nla_put_failure;
1341 nd_key = nla_data(nla);
1342 memcpy(nd_key->nd_target, &swkey->ipv6.nd.target,
1343 sizeof(nd_key->nd_target));
1344 memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN);
1345 memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN);
1356 /* Initializes the flow module.
1357 * Returns zero if successful or a negative error code. */
1360 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
1362 if (flow_cache == NULL)
1365 get_random_bytes(&hash_seed, sizeof(hash_seed));
1370 /* Uninitializes the flow module. */
1371 void flow_exit(void)
1373 kmem_cache_destroy(flow_cache);