X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=datapath%2Fflow.c;h=3802926408e009819916745d237d77a0f19bec9d;hb=85c9de194b9f432f7b8a66bda980cbab52a55b72;hp=7a1362489e49854b4074910750d7dd3f2531b591;hpb=8ddc056dd1e2c150c3bf8bb16811815736beb554;p=sliver-openvswitch.git diff --git a/datapath/flow.c b/datapath/flow.c index 7a1362489..380292640 100644 --- a/datapath/flow.c +++ b/datapath/flow.c @@ -1,9 +1,19 @@ /* - * Distributed under the terms of the GNU GPL version 2. - * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks. + * Copyright (c) 2007-2011 Nicira, Inc. * - * Significant portions of this file may be copied from parts of the Linux - * kernel, by Linus Torvalds and others. + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA */ #include "flow.h" @@ -22,7 +32,6 @@ #include #include #include -#include #include #include #include @@ -37,7 +46,6 @@ #include "vlan.h" static struct kmem_cache *flow_cache; -static unsigned int hash_seed __read_mostly; static int check_header(struct sk_buff *skb, int len) { @@ -101,7 +109,7 @@ static bool icmphdr_ok(struct sk_buff *skb) sizeof(struct icmphdr)); } -u64 flow_used_time(unsigned long flow_jiffies) +u64 ovs_flow_used_time(unsigned long flow_jiffies) { struct timespec cur_ts; u64 cur_ms, idle_ms; @@ -118,66 +126,6 @@ u64 flow_used_time(unsigned long flow_jiffies) (offsetof(struct sw_flow_key, field) + \ FIELD_SIZEOF(struct sw_flow_key, field)) -/** - * skip_exthdr - skip any IPv6 extension headers - * @skb: skbuff to parse - * @start: offset of first extension header - * @nexthdrp: Initially, points to the type of the extension header at @start. - * This function updates it to point to the extension header at the final - * offset. - * @frag: Points to the @frag member in a &struct sw_flow_key. This - * function sets an appropriate %OVS_FRAG_TYPE_* value. - * - * This is based on ipv6_skip_exthdr() but adds the updates to *@frag. - * - * When there is more than one fragment header, this version reports whether - * the final fragment header that it examines is a first fragment. - * - * Returns the final payload offset, or -1 on error. - */ -static int skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp, - u8 *frag) -{ - u8 nexthdr = *nexthdrp; - - while (ipv6_ext_hdr(nexthdr)) { - struct ipv6_opt_hdr _hdr, *hp; - int hdrlen; - - if (nexthdr == NEXTHDR_NONE) - return -1; - hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); - if (hp == NULL) - return -1; - if (nexthdr == NEXTHDR_FRAGMENT) { - __be16 _frag_off, *fp; - fp = skb_header_pointer(skb, - start+offsetof(struct frag_hdr, - frag_off), - sizeof(_frag_off), - &_frag_off); - if (fp == NULL) - return -1; - - if (ntohs(*fp) & ~0x7) { - *frag = OVS_FRAG_TYPE_LATER; - break; - } - *frag = OVS_FRAG_TYPE_FIRST; - hdrlen = 8; - } else if (nexthdr == NEXTHDR_AUTH) - hdrlen = (hp->hdrlen+2)<<2; - else - hdrlen = ipv6_optlen(hp); - - nexthdr = hp->nexthdr; - start += hdrlen; - } - - *nexthdrp = nexthdr; - return start; -} - static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key, int *key_lenp) { @@ -186,6 +134,7 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key, int payload_ofs; struct ipv6hdr *nh; uint8_t nexthdr; + __be16 frag_off; int err; *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label); @@ -202,13 +151,20 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key, key->ip.tos = ipv6_get_dsfield(nh); key->ip.ttl = nh->hop_limit; key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); - ipv6_addr_copy(&key->ipv6.addr.src, &nh->saddr); - ipv6_addr_copy(&key->ipv6.addr.dst, &nh->daddr); + key->ipv6.addr.src = nh->saddr; + key->ipv6.addr.dst = nh->daddr; - payload_ofs = skip_exthdr(skb, payload_ofs, &nexthdr, &key->ip.frag); + payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off); if (unlikely(payload_ofs < 0)) return -EINVAL; + if (frag_off) { + if (frag_off & htons(~0x7)) + key->ip.frag = OVS_FRAG_TYPE_LATER; + else + key->ip.frag = OVS_FRAG_TYPE_FIRST; + } + nh_len = payload_ofs - nh_ofs; skb_set_transport_header(skb, nh_ofs + nh_len); key->ip.proto = nexthdr; @@ -224,12 +180,14 @@ static bool icmp6hdr_ok(struct sk_buff *skb) #define TCP_FLAGS_OFFSET 13 #define TCP_FLAG_MASK 0x3f -void flow_used(struct sw_flow *flow, struct sk_buff *skb) +void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb) { u8 tcp_flags = 0; - if (flow->key.eth.type == htons(ETH_P_IP) && - flow->key.ip.proto == IPPROTO_TCP) { + if ((flow->key.eth.type == htons(ETH_P_IP) || + flow->key.eth.type == htons(ETH_P_IPV6)) && + flow->key.ip.proto == IPPROTO_TCP && + likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { u8 *tcp = (u8 *)tcp_hdr(skb); tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK; } @@ -242,27 +200,22 @@ void flow_used(struct sw_flow *flow, struct sk_buff *skb) spin_unlock(&flow->lock); } -struct sw_flow_actions *flow_actions_alloc(const struct nlattr *actions) +struct sw_flow_actions *ovs_flow_actions_alloc(int size) { - int actions_len = nla_len(actions); struct sw_flow_actions *sfa; - /* At least DP_MAX_PORTS actions are required to be able to flood a - * packet to every port. Factor of 2 allows for setting VLAN tags, - * etc. */ - if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4)) + if (size > MAX_ACTIONS_BUFSIZE) return ERR_PTR(-EINVAL); - sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL); + sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); if (!sfa) return ERR_PTR(-ENOMEM); - sfa->actions_len = actions_len; - memcpy(sfa->actions, nla_data(actions), actions_len); + sfa->actions_len = 0; return sfa; } -struct sw_flow *flow_alloc(void) +struct sw_flow *ovs_flow_alloc(void) { struct sw_flow *flow; @@ -271,22 +224,21 @@ struct sw_flow *flow_alloc(void) return ERR_PTR(-ENOMEM); spin_lock_init(&flow->lock); - atomic_set(&flow->refcnt, 1); flow->sf_acts = NULL; - flow->dead = false; return flow; } -static struct hlist_head __rcu *find_bucket(struct flow_table * table, u32 hash) +static struct hlist_head *find_bucket(struct flow_table *table, u32 hash) { + hash = jhash_1word(hash, table->hash_seed); return flex_array_get(table->buckets, (hash & (table->n_buckets - 1))); } -static struct flex_array __rcu *alloc_buckets(unsigned int n_buckets) +static struct flex_array *alloc_buckets(unsigned int n_buckets) { - struct flex_array __rcu *buckets; + struct flex_array *buckets; int i, err; buckets = flex_array_alloc(sizeof(struct hlist_head *), @@ -312,7 +264,7 @@ static void free_buckets(struct flex_array *buckets) flex_array_free(buckets); } -struct flow_table *flow_tbl_alloc(int new_size) +struct flow_table *ovs_flow_tbl_alloc(int new_size) { struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL); @@ -327,34 +279,36 @@ struct flow_table *flow_tbl_alloc(int new_size) } table->n_buckets = new_size; table->count = 0; + table->node_ver = 0; + table->keep_flows = false; + get_random_bytes(&table->hash_seed, sizeof(u32)); return table; } -static void flow_free(struct sw_flow *flow) -{ - flow->dead = true; - flow_put(flow); -} - -void flow_tbl_destroy(struct flow_table *table) +void ovs_flow_tbl_destroy(struct flow_table *table) { int i; if (!table) return; + if (table->keep_flows) + goto skip_flows; + for (i = 0; i < table->n_buckets; i++) { struct sw_flow *flow; struct hlist_head *head = flex_array_get(table->buckets, i); struct hlist_node *node, *n; + int ver = table->node_ver; - hlist_for_each_entry_safe(flow, node, n, head, hash_node) { - hlist_del_init_rcu(&flow->hash_node); - flow_free(flow); + hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) { + hlist_del_rcu(&flow->hash_node[ver]); + ovs_flow_free(flow); } } +skip_flows: free_buckets(table->buckets); kfree(table); } @@ -363,10 +317,10 @@ static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) { struct flow_table *table = container_of(rcu, struct flow_table, rcu); - flow_tbl_destroy(table); + ovs_flow_tbl_destroy(table); } -void flow_tbl_deferred_destroy(struct flow_table *table) +void ovs_flow_tbl_deferred_destroy(struct flow_table *table) { if (!table) return; @@ -374,17 +328,19 @@ void flow_tbl_deferred_destroy(struct flow_table *table) call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb); } -struct sw_flow *flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last) +struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last) { struct sw_flow *flow; struct hlist_head *head; struct hlist_node *n; + int ver; int i; + ver = table->node_ver; while (*bucket < table->n_buckets) { i = 0; head = flex_array_get(table->buckets, *bucket); - hlist_for_each_entry_rcu(flow, n, head, hash_node) { + hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) { if (i < *last) { i++; continue; @@ -399,65 +355,84 @@ struct sw_flow *flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last) return NULL; } -struct flow_table *flow_tbl_expand(struct flow_table *table) +static void __flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) { - struct flow_table *new_table; - int n_buckets = table->n_buckets * 2; + struct hlist_head *head; + head = find_bucket(table, flow->hash); + hlist_add_head_rcu(&flow->hash_node[table->node_ver], head); + table->count++; +} + +static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new) +{ + int old_ver; int i; - new_table = flow_tbl_alloc(n_buckets); - if (!new_table) - return ERR_PTR(-ENOMEM); + old_ver = old->node_ver; + new->node_ver = !old_ver; - for (i = 0; i < table->n_buckets; i++) { + /* Insert in new table. */ + for (i = 0; i < old->n_buckets; i++) { struct sw_flow *flow; struct hlist_head *head; - struct hlist_node *n, *pos; + struct hlist_node *n; - head = flex_array_get(table->buckets, i); + head = flex_array_get(old->buckets, i); - hlist_for_each_entry_safe(flow, n, pos, head, hash_node) { - hlist_del_init_rcu(&flow->hash_node); - flow_tbl_insert(new_table, flow); - } + hlist_for_each_entry(flow, n, head, hash_node[old_ver]) + __flow_tbl_insert(new, flow); } - - return new_table; + old->keep_flows = true; } -/* RCU callback used by flow_deferred_free. */ -static void rcu_free_flow_callback(struct rcu_head *rcu) +static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets) { - struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); + struct flow_table *new_table; + + new_table = ovs_flow_tbl_alloc(n_buckets); + if (!new_table) + return ERR_PTR(-ENOMEM); + + flow_table_copy_flows(table, new_table); - flow->dead = true; - flow_put(flow); + return new_table; } -/* Schedules 'flow' to be freed after the next RCU grace period. - * The caller must hold rcu_read_lock for this to be sensible. */ -void flow_deferred_free(struct sw_flow *flow) +struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table) { - call_rcu(&flow->rcu, rcu_free_flow_callback); + return __flow_tbl_rehash(table, table->n_buckets); } -void flow_hold(struct sw_flow *flow) +struct flow_table *ovs_flow_tbl_expand(struct flow_table *table) { - atomic_inc(&flow->refcnt); + return __flow_tbl_rehash(table, table->n_buckets * 2); } -void flow_put(struct sw_flow *flow) +void ovs_flow_free(struct sw_flow *flow) { if (unlikely(!flow)) return; - if (atomic_dec_and_test(&flow->refcnt)) { - kfree((struct sf_flow_acts __force *)flow->sf_acts); - kmem_cache_free(flow_cache, flow); - } + kfree((struct sf_flow_acts __force *)flow->sf_acts); + kmem_cache_free(flow_cache, flow); } -/* RCU callback used by flow_deferred_free_acts. */ +/* RCU callback used by ovs_flow_deferred_free. */ +static void rcu_free_flow_callback(struct rcu_head *rcu) +{ + struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); + + ovs_flow_free(flow); +} + +/* Schedules 'flow' to be freed after the next RCU grace period. + * The caller must hold rcu_read_lock for this to be sensible. */ +void ovs_flow_deferred_free(struct sw_flow *flow) +{ + call_rcu(&flow->rcu, rcu_free_flow_callback); +} + +/* RCU callback used by ovs_flow_deferred_free_acts. */ static void rcu_free_acts_callback(struct rcu_head *rcu) { struct sw_flow_actions *sf_acts = container_of(rcu, @@ -467,7 +442,7 @@ static void rcu_free_acts_callback(struct rcu_head *rcu) /* Schedules 'sf_acts' to be freed after the next RCU grace period. * The caller must hold rcu_read_lock for this to be sensible. */ -void flow_deferred_free_acts(struct sw_flow_actions *sf_acts) +void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts) { call_rcu(&sf_acts->rcu, rcu_free_acts_callback); } @@ -525,7 +500,11 @@ static __be16 parse_ethertype(struct sk_buff *skb) return htons(ETH_P_802_2); __skb_pull(skb, sizeof(struct llc_snap_hdr)); - return llc->ethertype; + + if (ntohs(llc->ethertype) >= 1536) + return llc->ethertype; + + return htons(ETH_P_802_2); } static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, @@ -562,7 +541,7 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, } nd = (struct nd_msg *)skb_transport_header(skb); - ipv6_addr_copy(&key->ipv6.nd.target, &nd->target); + key->ipv6.nd.target = nd->target; key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); icmp_len -= sizeof(*nd); @@ -611,7 +590,7 @@ out: } /** - * flow_extract - extracts a flow key from an Ethernet frame. + * ovs_flow_extract - extracts a flow key from an Ethernet frame. * @skb: sk_buff that contains the frame, with skb->data pointing to the * Ethernet header * @in_port: port number on which @skb was received. @@ -634,7 +613,7 @@ out: * of a correct length, otherwise the same as skb->network_header. * For other key->dl_type values it is left untouched. */ -int flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, +int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, int *key_lenp) { int error = 0; @@ -644,8 +623,10 @@ int flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, memset(key, 0, sizeof(*key)); key->phy.priority = skb->priority; - key->phy.tun_id = OVS_CB(skb)->tun_id; + if (OVS_CB(skb)->tun_key) + memcpy(&key->tun_key, OVS_CB(skb)->tun_key, sizeof(key->tun_key)); key->phy.in_port = in_port; + key->phy.skb_mark = skb_get_mark(skb); skb_reset_mac_header(skb); @@ -731,7 +712,8 @@ int flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, } } - } else if (key->eth.type == htons(ETH_P_ARP) && arphdr_ok(skb)) { + } else if ((key->eth.type == htons(ETH_P_ARP) || + key->eth.type == htons(ETH_P_RARP)) && arphdr_ok(skb)) { struct arp_eth_header *arp; arp = (struct arp_eth_header *)skb_network_header(skb); @@ -744,15 +726,11 @@ int flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, /* We only match on the lower 8 bits of the opcode. */ if (ntohs(arp->ar_op) <= 0xff) key->ip.proto = ntohs(arp->ar_op); - - if (key->ip.proto == ARPOP_REQUEST - || key->ip.proto == ARPOP_REPLY) { - memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); - memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); - memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN); - memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN); - key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); - } + memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); + memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); + memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN); + memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN); + key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); } } else if (key->eth.type == htons(ETH_P_IPV6)) { int nh_len; /* IPv6 Header + Extensions */ @@ -801,55 +779,66 @@ out: return error; } -u32 flow_hash(const struct sw_flow_key *key, int key_len) +static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, int key_len) { - return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), hash_seed); + return jhash2((u32 *)((u8 *)key + key_start), + DIV_ROUND_UP(key_len - key_start, sizeof(u32)), 0); } -struct sw_flow *flow_tbl_lookup(struct flow_table *table, +static int flow_key_start(struct sw_flow_key *key) +{ + if (key->tun_key.ipv4_dst) + return 0; + else + return offsetof(struct sw_flow_key, phy); +} + +struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table, struct sw_flow_key *key, int key_len) { struct sw_flow *flow; struct hlist_node *n; struct hlist_head *head; + u8 *_key; + int key_start; u32 hash; - hash = flow_hash(key, key_len); + key_start = flow_key_start(key); + hash = ovs_flow_hash(key, key_start, key_len); + _key = (u8 *) key + key_start; head = find_bucket(table, hash); - hlist_for_each_entry_rcu(flow, n, head, hash_node) { + hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) { if (flow->hash == hash && - !memcmp(&flow->key, key, key_len)) { + !memcmp((u8 *)&flow->key + key_start, _key, key_len - key_start)) { return flow; } } return NULL; } -void flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) +void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, + struct sw_flow_key *key, int key_len) { - struct hlist_head *head; - - head = find_bucket(table, flow->hash); - hlist_add_head_rcu(&flow->hash_node, head); - table->count++; + flow->hash = ovs_flow_hash(key, flow_key_start(key), key_len); + memcpy(&flow->key, key, sizeof(flow->key)); + __flow_tbl_insert(table, flow); } -void flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) +void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) { - if (!hlist_unhashed(&flow->hash_node)) { - hlist_del_init_rcu(&flow->hash_node); - table->count--; - BUG_ON(table->count < 0); - } + hlist_del_rcu(&flow->hash_node[table->node_ver]); + table->count--; + BUG_ON(table->count < 0); } /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ -const u32 ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { - [OVS_KEY_ATTR_ENCAP] = 0, +const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { + [OVS_KEY_ATTR_ENCAP] = -1, [OVS_KEY_ATTR_PRIORITY] = sizeof(u32), [OVS_KEY_ATTR_IN_PORT] = sizeof(u32), + [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32), [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet), [OVS_KEY_ATTR_VLAN] = sizeof(__be16), [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16), @@ -861,9 +850,7 @@ const u32 ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6), [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp), [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd), - - /* Not upstream. */ - [OVS_KEY_ATTR_TUN_ID] = sizeof(__be64), + [OVS_KEY_ATTR_TUNNEL] = -1, }; static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len, @@ -982,10 +969,15 @@ static int parse_flow_nlattrs(const struct nlattr *attr, attrs = 0; nla_for_each_nested(nla, attr, rem) { u16 type = nla_type(nla); + int expected_len; + + if (type > OVS_KEY_ATTR_MAX || attrs & (1ULL << type)) + return -EINVAL; - if (type > OVS_KEY_ATTR_MAX || attrs & (1ULL << type) || - nla_len(nla) != ovs_key_lens[type]) + expected_len = ovs_key_lens[type]; + if (nla_len(nla) != expected_len && expected_len != -1) return -EINVAL; + attrs |= 1ULL << type; a[type] = nla; } @@ -996,14 +988,113 @@ static int parse_flow_nlattrs(const struct nlattr *attr, return 0; } +int ipv4_tun_from_nlattr(const struct nlattr *attr, + struct ovs_key_ipv4_tunnel *tun_key) +{ + struct nlattr *a; + int rem; + bool ttl = false; + + memset(tun_key, 0, sizeof(*tun_key)); + + nla_for_each_nested(a, attr, rem) { + int type = nla_type(a); + static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = { + [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64), + [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32), + [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32), + [OVS_TUNNEL_KEY_ATTR_TOS] = 1, + [OVS_TUNNEL_KEY_ATTR_TTL] = 1, + [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0, + [OVS_TUNNEL_KEY_ATTR_CSUM] = 0, + }; + + if (type > OVS_TUNNEL_KEY_ATTR_MAX || + ovs_tunnel_key_lens[type] != nla_len(a)) + return -EINVAL; + + switch (type) { + case OVS_TUNNEL_KEY_ATTR_ID: + tun_key->tun_id = nla_get_be64(a); + tun_key->tun_flags |= OVS_TNL_F_KEY; + break; + case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: + tun_key->ipv4_src = nla_get_be32(a); + break; + case OVS_TUNNEL_KEY_ATTR_IPV4_DST: + tun_key->ipv4_dst = nla_get_be32(a); + break; + case OVS_TUNNEL_KEY_ATTR_TOS: + tun_key->ipv4_tos = nla_get_u8(a); + break; + case OVS_TUNNEL_KEY_ATTR_TTL: + tun_key->ipv4_ttl = nla_get_u8(a); + ttl = true; + break; + case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT: + tun_key->tun_flags |= OVS_TNL_F_DONT_FRAGMENT; + break; + case OVS_TUNNEL_KEY_ATTR_CSUM: + tun_key->tun_flags |= OVS_TNL_F_CSUM; + break; + default: + return -EINVAL; + + } + } + if (rem > 0) + return -EINVAL; + + if (!tun_key->ipv4_dst) + return -EINVAL; + + if (!ttl) + return -EINVAL; + + return 0; +} + +int ipv4_tun_to_nlattr(struct sk_buff *skb, + const struct ovs_key_ipv4_tunnel *tun_key) +{ + struct nlattr *nla; + + nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL); + if (!nla) + return -EMSGSIZE; + + if (tun_key->tun_flags & OVS_TNL_F_KEY && + nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, tun_key->tun_id)) + return -EMSGSIZE; + if (tun_key->ipv4_src && + nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, tun_key->ipv4_src)) + return -EMSGSIZE; + if (nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ipv4_dst)) + return -EMSGSIZE; + if (tun_key->ipv4_tos && + nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ipv4_tos)) + return -EMSGSIZE; + if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, tun_key->ipv4_ttl)) + return -EMSGSIZE; + if ((tun_key->tun_flags & OVS_TNL_F_DONT_FRAGMENT) && + nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT)) + return -EMSGSIZE; + if ((tun_key->tun_flags & OVS_TNL_F_CSUM) && + nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM)) + return -EMSGSIZE; + + nla_nest_end(skb, nla); + return 0; +} + /** - * flow_from_nlattrs - parses Netlink attributes into a flow key. + * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key. * @swkey: receives the extracted flow key. * @key_lenp: number of bytes used in @swkey. * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute * sequence. */ -int flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, +int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, const struct nlattr *attr) { const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; @@ -1031,12 +1122,24 @@ int flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, swkey->phy.in_port = in_port; attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); } else { - swkey->phy.in_port = USHRT_MAX; + swkey->phy.in_port = DP_MAX_PORTS; } + if (attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) { + uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && !defined(CONFIG_NETFILTER) + if (mark != 0) + return -EINVAL; +#endif + swkey->phy.skb_mark = mark; + attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK); + } + + if (attrs & (1ULL << OVS_KEY_ATTR_TUNNEL)) { + err = ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], &swkey->tun_key); + if (err) + return err; - if (attrs & (1ULL << OVS_KEY_ATTR_TUN_ID)) { - swkey->phy.tun_id = nla_get_be64(a[OVS_KEY_ATTR_TUN_ID]); - attrs &= ~(1ULL << OVS_KEY_ATTR_TUN_ID); + attrs &= ~(1ULL << OVS_KEY_ATTR_TUNNEL); } /* Data attributes. */ @@ -1137,7 +1240,8 @@ int flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, if (err) return err; } - } else if (swkey->eth.type == htons(ETH_P_ARP)) { + } else if (swkey->eth.type == htons(ETH_P_ARP) || + swkey->eth.type == htons(ETH_P_RARP)) { const struct ovs_key_arp *arp_key; if (!(attrs & (1 << OVS_KEY_ATTR_ARP))) @@ -1163,7 +1267,7 @@ int flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, } /** - * flow_metadata_from_nlattrs - parses Netlink attributes into a flow key. + * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key. * @in_port: receives the extracted input port. * @tun_id: receives the extracted tunnel ID. * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute @@ -1174,58 +1278,83 @@ int flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, * get the metadata, that is, the parts of the flow key that cannot be * extracted from the packet itself. */ -int flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id, - const struct nlattr *attr) + +int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len, const struct nlattr *attr) { + struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key; const struct nlattr *nla; int rem; - *in_port = USHRT_MAX; - *tun_id = 0; - *priority = 0; + flow->key.phy.in_port = DP_MAX_PORTS; + flow->key.phy.priority = 0; + flow->key.phy.skb_mark = 0; + memset(tun_key, 0, sizeof(flow->key.tun_key)); nla_for_each_nested(nla, attr, rem) { int type = nla_type(nla); - if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] != 0) { + if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) { + int err; + if (nla_len(nla) != ovs_key_lens[type]) return -EINVAL; switch (type) { case OVS_KEY_ATTR_PRIORITY: - *priority = nla_get_u32(nla); + flow->key.phy.priority = nla_get_u32(nla); break; - case OVS_KEY_ATTR_TUN_ID: - *tun_id = nla_get_be64(nla); + case OVS_KEY_ATTR_TUNNEL: + err = ipv4_tun_from_nlattr(nla, tun_key); + if (err) + return err; break; case OVS_KEY_ATTR_IN_PORT: if (nla_get_u32(nla) >= DP_MAX_PORTS) return -EINVAL; - *in_port = nla_get_u32(nla); + flow->key.phy.in_port = nla_get_u32(nla); + break; + + case OVS_KEY_ATTR_SKB_MARK: +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && !defined(CONFIG_NETFILTER) + if (nla_get_u32(nla) != 0) + return -EINVAL; +#endif + flow->key.phy.skb_mark = nla_get_u32(nla); break; } } } if (rem) return -EINVAL; + + flow->hash = ovs_flow_hash(&flow->key, + flow_key_start(&flow->key), key_len); + return 0; } -int flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) +int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) { struct ovs_key_ethernet *eth_key; struct nlattr *nla, *encap; - if (swkey->phy.priority) - NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority); + if (swkey->phy.priority && + nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority)) + goto nla_put_failure; - if (swkey->phy.tun_id != cpu_to_be64(0)) - NLA_PUT_BE64(skb, OVS_KEY_ATTR_TUN_ID, swkey->phy.tun_id); + if (swkey->tun_key.ipv4_dst && + ipv4_tun_to_nlattr(skb, &swkey->tun_key)) + goto nla_put_failure; + + if (swkey->phy.in_port != DP_MAX_PORTS && + nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port)) + goto nla_put_failure; - if (swkey->phy.in_port != USHRT_MAX) - NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port); + if (swkey->phy.skb_mark && + nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, swkey->phy.skb_mark)) + goto nla_put_failure; nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); if (!nla) @@ -1235,8 +1364,9 @@ int flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN); if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { - NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)); - NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci); + if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) || + nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci)) + goto nla_put_failure; encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); if (!swkey->eth.tci) goto unencap; @@ -1247,7 +1377,8 @@ int flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) if (swkey->eth.type == htons(ETH_P_802_2)) goto unencap; - NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type); + if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type)) + goto nla_put_failure; if (swkey->eth.type == htons(ETH_P_IP)) { struct ovs_key_ipv4 *ipv4_key; @@ -1278,7 +1409,8 @@ int flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) ipv6_key->ipv6_tclass = swkey->ip.tos; ipv6_key->ipv6_hlimit = swkey->ip.ttl; ipv6_key->ipv6_frag = swkey->ip.frag; - } else if (swkey->eth.type == htons(ETH_P_ARP)) { + } else if (swkey->eth.type == htons(ETH_P_ARP) || + swkey->eth.type == htons(ETH_P_RARP)) { struct ovs_key_arp *arp_key; nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key)); @@ -1375,20 +1507,18 @@ nla_put_failure: /* Initializes the flow module. * Returns zero if successful or a negative error code. */ -int flow_init(void) +int ovs_flow_init(void) { flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0, 0, NULL); if (flow_cache == NULL) return -ENOMEM; - get_random_bytes(&hash_seed, sizeof(hash_seed)); - return 0; } /* Uninitializes the flow module. */ -void flow_exit(void) +void ovs_flow_exit(void) { kmem_cache_destroy(flow_cache); }