X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=datapath%2Fflow.c;h=0c3d75fd2f8d158f15746220bc5a71819b7f956e;hb=72e8bf28bb38e8816435c64859fb350215b6a9e6;hp=8b736c8836f8a28387fcbad76a2ed49879fc8fe2;hpb=9d2094938de6e6d6503a3d0777f64cff945321c3;p=sliver-openvswitch.git diff --git a/datapath/flow.c b/datapath/flow.c index 8b736c883..0c3d75fd2 100644 --- a/datapath/flow.c +++ b/datapath/flow.c @@ -1,13 +1,24 @@ /* - * Distributed under the terms of the GNU GPL version 2. - * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks. + * Copyright (c) 2007-2011 Nicira, Inc. * - * Significant portions of this file may be copied from parts of the Linux - * kernel, by Linus Torvalds and others. + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA */ #include "flow.h" #include "datapath.h" +#include #include #include #include @@ -21,278 +32,686 @@ #include #include #include -#include #include +#include #include #include #include -#include +#include +#include #include +#include +#include -#include "compat.h" +#include "vlan.h" -struct kmem_cache *flow_cache; -static unsigned int hash_seed; +static struct kmem_cache *flow_cache; -struct arp_eth_header +static int check_header(struct sk_buff *skb, int len) { - __be16 ar_hrd; /* format of hardware address */ - __be16 ar_pro; /* format of protocol address */ - unsigned char ar_hln; /* length of hardware address */ - unsigned char ar_pln; /* length of protocol address */ - __be16 ar_op; /* ARP opcode (command) */ - - /* Ethernet+IPv4 specific members. */ - unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */ - unsigned char ar_sip[4]; /* sender IP address */ - unsigned char ar_tha[ETH_ALEN]; /* target hardware address */ - unsigned char ar_tip[4]; /* target IP address */ -} __attribute__((packed)); + if (unlikely(skb->len < len)) + return -EINVAL; + if (unlikely(!pskb_may_pull(skb, len))) + return -ENOMEM; + return 0; +} -static inline int arphdr_ok(struct sk_buff *skb) +static bool arphdr_ok(struct sk_buff *skb) { - int nh_ofs = skb_network_offset(skb); - return pskb_may_pull(skb, nh_ofs + sizeof(struct arp_eth_header)); + return pskb_may_pull(skb, skb_network_offset(skb) + + sizeof(struct arp_eth_header)); } -static inline int iphdr_ok(struct sk_buff *skb) +static int check_iphdr(struct sk_buff *skb) { - int nh_ofs = skb_network_offset(skb); - if (skb->len >= nh_ofs + sizeof(struct iphdr)) { - int ip_len = ip_hdrlen(skb); - return (ip_len >= sizeof(struct iphdr) - && pskb_may_pull(skb, nh_ofs + ip_len)); - } + unsigned int nh_ofs = skb_network_offset(skb); + unsigned int ip_len; + int err; + + err = check_header(skb, nh_ofs + sizeof(struct iphdr)); + if (unlikely(err)) + return err; + + ip_len = ip_hdrlen(skb); + if (unlikely(ip_len < sizeof(struct iphdr) || + skb->len < nh_ofs + ip_len)) + return -EINVAL; + + skb_set_transport_header(skb, nh_ofs + ip_len); return 0; } -static inline int tcphdr_ok(struct sk_buff *skb) +static bool tcphdr_ok(struct sk_buff *skb) { int th_ofs = skb_transport_offset(skb); - if (pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))) { - int tcp_len = tcp_hdrlen(skb); - return (tcp_len >= sizeof(struct tcphdr) - && skb->len >= th_ofs + tcp_len); - } - return 0; + int tcp_len; + + if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr)))) + return false; + + tcp_len = tcp_hdrlen(skb); + if (unlikely(tcp_len < sizeof(struct tcphdr) || + skb->len < th_ofs + tcp_len)) + return false; + + return true; } -static inline int udphdr_ok(struct sk_buff *skb) +static bool udphdr_ok(struct sk_buff *skb) { - int th_ofs = skb_transport_offset(skb); - return pskb_may_pull(skb, th_ofs + sizeof(struct udphdr)); + return pskb_may_pull(skb, skb_transport_offset(skb) + + sizeof(struct udphdr)); } -static inline int icmphdr_ok(struct sk_buff *skb) +static bool icmphdr_ok(struct sk_buff *skb) { - int th_ofs = skb_transport_offset(skb); - return pskb_may_pull(skb, th_ofs + sizeof(struct icmphdr)); + return pskb_may_pull(skb, skb_transport_offset(skb) + + sizeof(struct icmphdr)); +} + +u64 ovs_flow_used_time(unsigned long flow_jiffies) +{ + struct timespec cur_ts; + u64 cur_ms, idle_ms; + + ktime_get_ts(&cur_ts); + idle_ms = jiffies_to_msecs(jiffies - flow_jiffies); + cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC + + cur_ts.tv_nsec / NSEC_PER_MSEC; + + return cur_ms - idle_ms; +} + +#define SW_FLOW_KEY_OFFSET(field) \ + (offsetof(struct sw_flow_key, field) + \ + FIELD_SIZEOF(struct sw_flow_key, field)) + +static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key, + int *key_lenp) +{ + unsigned int nh_ofs = skb_network_offset(skb); + unsigned int nh_len; + int payload_ofs; + struct ipv6hdr *nh; + uint8_t nexthdr; + __be16 frag_off; + int err; + + *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label); + + err = check_header(skb, nh_ofs + sizeof(*nh)); + if (unlikely(err)) + return err; + + nh = ipv6_hdr(skb); + nexthdr = nh->nexthdr; + payload_ofs = (u8 *)(nh + 1) - skb->data; + + key->ip.proto = NEXTHDR_NONE; + key->ip.tos = ipv6_get_dsfield(nh); + key->ip.ttl = nh->hop_limit; + key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); + key->ipv6.addr.src = nh->saddr; + key->ipv6.addr.dst = nh->daddr; + + payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off); + if (unlikely(payload_ofs < 0)) + return -EINVAL; + + if (frag_off) { + if (frag_off & htons(~0x7)) + key->ip.frag = OVS_FRAG_TYPE_LATER; + else + key->ip.frag = OVS_FRAG_TYPE_FIRST; + } + + nh_len = payload_ofs - nh_ofs; + skb_set_transport_header(skb, nh_ofs + nh_len); + key->ip.proto = nexthdr; + return nh_len; +} + +static bool icmp6hdr_ok(struct sk_buff *skb) +{ + return pskb_may_pull(skb, skb_transport_offset(skb) + + sizeof(struct icmp6hdr)); } #define TCP_FLAGS_OFFSET 13 #define TCP_FLAG_MASK 0x3f -void flow_used(struct sw_flow *flow, struct sk_buff *skb) +void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb) { u8 tcp_flags = 0; - if (flow->key.dl_type == htons(ETH_P_IP) && - flow->key.nw_proto == IPPROTO_TCP) { + if ((flow->key.eth.type == htons(ETH_P_IP) || + flow->key.eth.type == htons(ETH_P_IPV6)) && + flow->key.ip.proto == IPPROTO_TCP && + likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { u8 *tcp = (u8 *)tcp_hdr(skb); tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK; } - spin_lock_bh(&flow->lock); + spin_lock(&flow->lock); flow->used = jiffies; flow->packet_count++; flow->byte_count += skb->len; flow->tcp_flags |= tcp_flags; - spin_unlock_bh(&flow->lock); + spin_unlock(&flow->lock); } -struct sw_flow_actions *flow_actions_alloc(size_t n_actions) +struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions) { + int actions_len = nla_len(actions); struct sw_flow_actions *sfa; - if (n_actions > (PAGE_SIZE - sizeof *sfa) / sizeof(union odp_action)) + if (actions_len > MAX_ACTIONS_BUFSIZE) return ERR_PTR(-EINVAL); - sfa = kmalloc(sizeof *sfa + n_actions * sizeof(union odp_action), - GFP_KERNEL); + sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL); if (!sfa) return ERR_PTR(-ENOMEM); - sfa->n_actions = n_actions; + sfa->actions_len = actions_len; + memcpy(sfa->actions, nla_data(actions), actions_len); return sfa; } +struct sw_flow *ovs_flow_alloc(void) +{ + struct sw_flow *flow; + + flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); + if (!flow) + return ERR_PTR(-ENOMEM); -/* Frees 'flow' immediately. */ -static void flow_free(struct sw_flow *flow) + spin_lock_init(&flow->lock); + flow->sf_acts = NULL; + + return flow; +} + +static struct hlist_head *find_bucket(struct flow_table *table, u32 hash) { - if (unlikely(!flow)) + hash = jhash_1word(hash, table->hash_seed); + return flex_array_get(table->buckets, + (hash & (table->n_buckets - 1))); +} + +static struct flex_array *alloc_buckets(unsigned int n_buckets) +{ + struct flex_array *buckets; + int i, err; + + buckets = flex_array_alloc(sizeof(struct hlist_head *), + n_buckets, GFP_KERNEL); + if (!buckets) + return NULL; + + err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL); + if (err) { + flex_array_free(buckets); + return NULL; + } + + for (i = 0; i < n_buckets; i++) + INIT_HLIST_HEAD((struct hlist_head *) + flex_array_get(buckets, i)); + + return buckets; +} + +static void free_buckets(struct flex_array *buckets) +{ + flex_array_free(buckets); +} + +struct flow_table *ovs_flow_tbl_alloc(int new_size) +{ + struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL); + + if (!table) + return NULL; + + table->buckets = alloc_buckets(new_size); + + if (!table->buckets) { + kfree(table); + return NULL; + } + table->n_buckets = new_size; + table->count = 0; + table->node_ver = 0; + table->keep_flows = false; + get_random_bytes(&table->hash_seed, sizeof(u32)); + + return table; +} + +void ovs_flow_tbl_destroy(struct flow_table *table) +{ + int i; + + if (!table) return; - kfree(flow->sf_acts); - kmem_cache_free(flow_cache, flow); + + if (table->keep_flows) + goto skip_flows; + + for (i = 0; i < table->n_buckets; i++) { + struct sw_flow *flow; + struct hlist_head *head = flex_array_get(table->buckets, i); + struct hlist_node *node, *n; + int ver = table->node_ver; + + hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) { + hlist_del_rcu(&flow->hash_node[ver]); + ovs_flow_free(flow); + } + } + +skip_flows: + free_buckets(table->buckets); + kfree(table); +} + +static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) +{ + struct flow_table *table = container_of(rcu, struct flow_table, rcu); + + ovs_flow_tbl_destroy(table); +} + +void ovs_flow_tbl_deferred_destroy(struct flow_table *table) +{ + if (!table) + return; + + call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb); } -void flow_free_tbl(struct tbl_node *node) +struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last) { - struct sw_flow *flow = flow_cast(node); - flow_free(flow); + struct sw_flow *flow; + struct hlist_head *head; + struct hlist_node *n; + int ver; + int i; + + ver = table->node_ver; + while (*bucket < table->n_buckets) { + i = 0; + head = flex_array_get(table->buckets, *bucket); + hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) { + if (i < *last) { + i++; + continue; + } + *last = i + 1; + return flow; + } + (*bucket)++; + *last = 0; + } + + return NULL; } -/* RCU callback used by flow_deferred_free. */ +static void __flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) +{ + struct hlist_head *head; + head = find_bucket(table, flow->hash); + hlist_add_head_rcu(&flow->hash_node[table->node_ver], head); + table->count++; +} + +static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new) +{ + int old_ver; + int i; + + old_ver = old->node_ver; + new->node_ver = !old_ver; + + /* Insert in new table. */ + for (i = 0; i < old->n_buckets; i++) { + struct sw_flow *flow; + struct hlist_head *head; + struct hlist_node *n; + + head = flex_array_get(old->buckets, i); + + hlist_for_each_entry(flow, n, head, hash_node[old_ver]) + __flow_tbl_insert(new, flow); + } + old->keep_flows = true; +} + +static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets) +{ + struct flow_table *new_table; + + new_table = ovs_flow_tbl_alloc(n_buckets); + if (!new_table) + return ERR_PTR(-ENOMEM); + + flow_table_copy_flows(table, new_table); + + return new_table; +} + +struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table) +{ + return __flow_tbl_rehash(table, table->n_buckets); +} + +struct flow_table *ovs_flow_tbl_expand(struct flow_table *table) +{ + return __flow_tbl_rehash(table, table->n_buckets * 2); +} + +void ovs_flow_free(struct sw_flow *flow) +{ + if (unlikely(!flow)) + return; + + kfree((struct sf_flow_acts __force *)flow->sf_acts); + kmem_cache_free(flow_cache, flow); +} + +/* RCU callback used by ovs_flow_deferred_free. */ static void rcu_free_flow_callback(struct rcu_head *rcu) { struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); - flow_free(flow); + + ovs_flow_free(flow); } /* Schedules 'flow' to be freed after the next RCU grace period. * The caller must hold rcu_read_lock for this to be sensible. */ -void flow_deferred_free(struct sw_flow *flow) +void ovs_flow_deferred_free(struct sw_flow *flow) { call_rcu(&flow->rcu, rcu_free_flow_callback); } -/* RCU callback used by flow_deferred_free_acts. */ +/* RCU callback used by ovs_flow_deferred_free_acts. */ static void rcu_free_acts_callback(struct rcu_head *rcu) { - struct sw_flow_actions *sf_acts = container_of(rcu, + struct sw_flow_actions *sf_acts = container_of(rcu, struct sw_flow_actions, rcu); kfree(sf_acts); } /* Schedules 'sf_acts' to be freed after the next RCU grace period. * The caller must hold rcu_read_lock for this to be sensible. */ -void flow_deferred_free_acts(struct sw_flow_actions *sf_acts) +void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts) { call_rcu(&sf_acts->rcu, rcu_free_acts_callback); } -#define SNAP_OUI_LEN 3 +static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key) +{ + struct qtag_prefix { + __be16 eth_type; /* ETH_P_8021Q */ + __be16 tci; + }; + struct qtag_prefix *qp; + + if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16))) + return 0; + + if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) + + sizeof(__be16)))) + return -ENOMEM; + + qp = (struct qtag_prefix *) skb->data; + key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT); + __skb_pull(skb, sizeof(struct qtag_prefix)); + + return 0; +} -struct eth_snap_hdr +static __be16 parse_ethertype(struct sk_buff *skb) { - struct ethhdr eth; - u8 dsap; /* Always 0xAA */ - u8 ssap; /* Always 0xAA */ - u8 ctrl; - u8 oui[SNAP_OUI_LEN]; - u16 ethertype; -} __attribute__ ((packed)); + struct llc_snap_hdr { + u8 dsap; /* Always 0xAA */ + u8 ssap; /* Always 0xAA */ + u8 ctrl; + u8 oui[3]; + __be16 ethertype; + }; + struct llc_snap_hdr *llc; + __be16 proto; + + proto = *(__be16 *) skb->data; + __skb_pull(skb, sizeof(__be16)); + + if (ntohs(proto) >= 1536) + return proto; + + if (skb->len < sizeof(struct llc_snap_hdr)) + return htons(ETH_P_802_2); + + if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr)))) + return htons(0); + + llc = (struct llc_snap_hdr *) skb->data; + if (llc->dsap != LLC_SAP_SNAP || + llc->ssap != LLC_SAP_SNAP || + (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0) + return htons(ETH_P_802_2); + + __skb_pull(skb, sizeof(struct llc_snap_hdr)); + return llc->ethertype; +} -static int is_snap(const struct eth_snap_hdr *esh) +static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, + int *key_lenp, int nh_len) { - return (esh->dsap == LLC_SAP_SNAP - && esh->ssap == LLC_SAP_SNAP - && !memcmp(esh->oui, "\0\0\0", 3)); + struct icmp6hdr *icmp = icmp6_hdr(skb); + int error = 0; + int key_len; + + /* The ICMPv6 type and code fields use the 16-bit transport port + * fields, so we need to store them in 16-bit network byte order. + */ + key->ipv6.tp.src = htons(icmp->icmp6_type); + key->ipv6.tp.dst = htons(icmp->icmp6_code); + key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); + + if (icmp->icmp6_code == 0 && + (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION || + icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) { + int icmp_len = skb->len - skb_transport_offset(skb); + struct nd_msg *nd; + int offset; + + key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); + + /* In order to process neighbor discovery options, we need the + * entire packet. + */ + if (unlikely(icmp_len < sizeof(*nd))) + goto out; + if (unlikely(skb_linearize(skb))) { + error = -ENOMEM; + goto out; + } + + nd = (struct nd_msg *)skb_transport_header(skb); + key->ipv6.nd.target = nd->target; + key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); + + icmp_len -= sizeof(*nd); + offset = 0; + while (icmp_len >= 8) { + struct nd_opt_hdr *nd_opt = + (struct nd_opt_hdr *)(nd->opt + offset); + int opt_len = nd_opt->nd_opt_len * 8; + + if (unlikely(!opt_len || opt_len > icmp_len)) + goto invalid; + + /* Store the link layer address if the appropriate + * option is provided. It is considered an error if + * the same link layer option is specified twice. + */ + if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR + && opt_len == 8) { + if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll))) + goto invalid; + memcpy(key->ipv6.nd.sll, + &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN); + } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR + && opt_len == 8) { + if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll))) + goto invalid; + memcpy(key->ipv6.nd.tll, + &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN); + } + + icmp_len -= opt_len; + offset += opt_len; + } + } + + goto out; + +invalid: + memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target)); + memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll)); + memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll)); + +out: + *key_lenp = key_len; + return error; } -/* Parses the Ethernet frame in 'skb', which was received on 'in_port', - * and initializes 'key' to match. Returns 1 if 'skb' contains an IP - * fragment, 0 otherwise. */ -int flow_extract(struct sk_buff *skb, u16 in_port, struct odp_flow_key *key) +/** + * ovs_flow_extract - extracts a flow key from an Ethernet frame. + * @skb: sk_buff that contains the frame, with skb->data pointing to the + * Ethernet header + * @in_port: port number on which @skb was received. + * @key: output flow key + * @key_lenp: length of output flow key + * + * The caller must ensure that skb->len >= ETH_HLEN. + * + * Returns 0 if successful, otherwise a negative errno value. + * + * Initializes @skb header pointers as follows: + * + * - skb->mac_header: the Ethernet header. + * + * - skb->network_header: just past the Ethernet header, or just past the + * VLAN header, to the first byte of the Ethernet payload. + * + * - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6 + * on output, then just past the IP header, if one is present and + * of a correct length, otherwise the same as skb->network_header. + * For other key->dl_type values it is left untouched. + */ +int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, + int *key_lenp) { + int error = 0; + int key_len = SW_FLOW_KEY_OFFSET(eth); struct ethhdr *eth; - struct eth_snap_hdr *esh; - int retval = 0; - int nh_ofs; - memset(key, 0, sizeof *key); - key->tun_id = OVS_CB(skb)->tun_id; - key->in_port = in_port; - key->dl_vlan = htons(ODP_VLAN_NONE); + memset(key, 0, sizeof(*key)); - if (skb->len < sizeof *eth) - return 0; - if (!pskb_may_pull(skb, skb->len >= 64 ? 64 : skb->len)) { - return 0; - } + key->phy.priority = skb->priority; + if (OVS_CB(skb)->tun_key) + memcpy(&key->tun_key, OVS_CB(skb)->tun_key, sizeof(key->tun_key)); + key->phy.in_port = in_port; + key->phy.skb_mark = skb_get_mark(skb); skb_reset_mac_header(skb); + + /* Link layer. We are guaranteed to have at least the 14 byte Ethernet + * header in the linear data area. + */ eth = eth_hdr(skb); - esh = (struct eth_snap_hdr *) eth; - nh_ofs = sizeof *eth; - if (likely(ntohs(eth->h_proto) >= ODP_DL_TYPE_ETH2_CUTOFF)) - key->dl_type = eth->h_proto; - else if (skb->len >= sizeof *esh && is_snap(esh)) { - key->dl_type = esh->ethertype; - nh_ofs = sizeof *esh; - } else { - key->dl_type = htons(ODP_DL_TYPE_NOT_ETH_TYPE); - if (skb->len >= nh_ofs + sizeof(struct llc_pdu_un)) { - nh_ofs += sizeof(struct llc_pdu_un); - } - } + memcpy(key->eth.src, eth->h_source, ETH_ALEN); + memcpy(key->eth.dst, eth->h_dest, ETH_ALEN); - /* Check for a VLAN tag */ - if (key->dl_type == htons(ETH_P_8021Q) && - skb->len >= nh_ofs + sizeof(struct vlan_hdr)) { - struct vlan_hdr *vh = (struct vlan_hdr*)(skb->data + nh_ofs); - key->dl_type = vh->h_vlan_encapsulated_proto; - key->dl_vlan = vh->h_vlan_TCI & htons(VLAN_VID_MASK); - key->dl_vlan_pcp = (ntohs(vh->h_vlan_TCI) & VLAN_PCP_MASK) >> VLAN_PCP_SHIFT; - nh_ofs += sizeof(struct vlan_hdr); - } - memcpy(key->dl_src, eth->h_source, ETH_ALEN); - memcpy(key->dl_dst, eth->h_dest, ETH_ALEN); - skb_set_network_header(skb, nh_ofs); + __skb_pull(skb, 2 * ETH_ALEN); + + if (vlan_tx_tag_present(skb)) + key->eth.tci = htons(vlan_get_tci(skb)); + else if (eth->h_proto == htons(ETH_P_8021Q)) + if (unlikely(parse_vlan(skb, key))) + return -ENOMEM; + + key->eth.type = parse_ethertype(skb); + if (unlikely(key->eth.type == htons(0))) + return -ENOMEM; + + skb_reset_network_header(skb); + __skb_push(skb, skb->data - skb_mac_header(skb)); /* Network layer. */ - if (key->dl_type == htons(ETH_P_IP) && iphdr_ok(skb)) { - struct iphdr *nh = ip_hdr(skb); - int th_ofs = nh_ofs + nh->ihl * 4; - key->nw_src = nh->saddr; - key->nw_dst = nh->daddr; - key->nw_tos = nh->tos & ~INET_ECN_MASK; - key->nw_proto = nh->protocol; - skb_set_transport_header(skb, th_ofs); + if (key->eth.type == htons(ETH_P_IP)) { + struct iphdr *nh; + __be16 offset; + + key_len = SW_FLOW_KEY_OFFSET(ipv4.addr); + + error = check_iphdr(skb); + if (unlikely(error)) { + if (error == -EINVAL) { + skb->transport_header = skb->network_header; + error = 0; + } + goto out; + } + + nh = ip_hdr(skb); + key->ipv4.addr.src = nh->saddr; + key->ipv4.addr.dst = nh->daddr; + + key->ip.proto = nh->protocol; + key->ip.tos = nh->tos; + key->ip.ttl = nh->ttl; + + offset = nh->frag_off & htons(IP_OFFSET); + if (offset) { + key->ip.frag = OVS_FRAG_TYPE_LATER; + goto out; + } + if (nh->frag_off & htons(IP_MF) || + skb_shinfo(skb)->gso_type & SKB_GSO_UDP) + key->ip.frag = OVS_FRAG_TYPE_FIRST; /* Transport layer. */ - if (!(nh->frag_off & htons(IP_MF | IP_OFFSET))) { - if (key->nw_proto == IPPROTO_TCP) { - if (tcphdr_ok(skb)) { - struct tcphdr *tcp = tcp_hdr(skb); - key->tp_src = tcp->source; - key->tp_dst = tcp->dest; - } else { - /* Avoid tricking other code into - * thinking that this packet has an L4 - * header. */ - key->nw_proto = 0; - } - } else if (key->nw_proto == IPPROTO_UDP) { - if (udphdr_ok(skb)) { - struct udphdr *udp = udp_hdr(skb); - key->tp_src = udp->source; - key->tp_dst = udp->dest; - } else { - /* Avoid tricking other code into - * thinking that this packet has an L4 - * header. */ - key->nw_proto = 0; - } - } else if (key->nw_proto == IPPROTO_ICMP) { - if (icmphdr_ok(skb)) { - struct icmphdr *icmp = icmp_hdr(skb); - /* The ICMP type and code fields use the 16-bit - * transport port fields, so we need to store them - * in 16-bit network byte order. */ - key->tp_src = htons(icmp->type); - key->tp_dst = htons(icmp->code); - } else { - /* Avoid tricking other code into - * thinking that this packet has an L4 - * header. */ - key->nw_proto = 0; - } + if (key->ip.proto == IPPROTO_TCP) { + key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); + if (tcphdr_ok(skb)) { + struct tcphdr *tcp = tcp_hdr(skb); + key->ipv4.tp.src = tcp->source; + key->ipv4.tp.dst = tcp->dest; + } + } else if (key->ip.proto == IPPROTO_UDP) { + key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); + if (udphdr_ok(skb)) { + struct udphdr *udp = udp_hdr(skb); + key->ipv4.tp.src = udp->source; + key->ipv4.tp.dst = udp->dest; + } + } else if (key->ip.proto == IPPROTO_ICMP) { + key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); + if (icmphdr_ok(skb)) { + struct icmphdr *icmp = icmp_hdr(skb); + /* The ICMP type and code fields use the 16-bit + * transport port fields, so we need to store + * them in 16-bit network byte order. */ + key->ipv4.tp.src = htons(icmp->type); + key->ipv4.tp.dst = htons(icmp->code); } - } else { - retval = 1; } - } else if (key->dl_type == htons(ETH_P_ARP) && arphdr_ok(skb)) { + + } else if ((key->eth.type == htons(ETH_P_ARP) || + key->eth.type == htons(ETH_P_RARP)) && arphdr_ok(skb)) { struct arp_eth_header *arp; arp = (struct arp_eth_header *)skb_network_header(skb); @@ -303,51 +722,762 @@ int flow_extract(struct sk_buff *skb, u16 in_port, struct odp_flow_key *key) && arp->ar_pln == 4) { /* We only match on the lower 8 bits of the opcode. */ - if (ntohs(arp->ar_op) <= 0xff) { - key->nw_proto = ntohs(arp->ar_op); - } + if (ntohs(arp->ar_op) <= 0xff) + key->ip.proto = ntohs(arp->ar_op); + memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); + memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); + memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN); + memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN); + key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); + } + } else if (key->eth.type == htons(ETH_P_IPV6)) { + int nh_len; /* IPv6 Header + Extensions */ + + nh_len = parse_ipv6hdr(skb, key, &key_len); + if (unlikely(nh_len < 0)) { + if (nh_len == -EINVAL) + skb->transport_header = skb->network_header; + else + error = nh_len; + goto out; + } + + if (key->ip.frag == OVS_FRAG_TYPE_LATER) + goto out; + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) + key->ip.frag = OVS_FRAG_TYPE_FIRST; - if (key->nw_proto == ARPOP_REQUEST - || key->nw_proto == ARPOP_REPLY) { - memcpy(&key->nw_src, arp->ar_sip, sizeof(key->nw_src)); - memcpy(&key->nw_dst, arp->ar_tip, sizeof(key->nw_dst)); + /* Transport layer. */ + if (key->ip.proto == NEXTHDR_TCP) { + key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); + if (tcphdr_ok(skb)) { + struct tcphdr *tcp = tcp_hdr(skb); + key->ipv6.tp.src = tcp->source; + key->ipv6.tp.dst = tcp->dest; } + } else if (key->ip.proto == NEXTHDR_UDP) { + key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); + if (udphdr_ok(skb)) { + struct udphdr *udp = udp_hdr(skb); + key->ipv6.tp.src = udp->source; + key->ipv6.tp.dst = udp->dest; + } + } else if (key->ip.proto == NEXTHDR_ICMP) { + key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); + if (icmp6hdr_ok(skb)) { + error = parse_icmpv6(skb, key, &key_len, nh_len); + if (error < 0) + goto out; + } + } + } + +out: + *key_lenp = key_len; + return error; +} + +static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, int key_len) +{ + return jhash2((u32 *)((u8 *)key + key_start), + DIV_ROUND_UP(key_len - key_start, sizeof(u32)), 0); +} + +static int flow_key_start(struct sw_flow_key *key) +{ + if (key->tun_key.ipv4_dst) + return 0; + else + return offsetof(struct sw_flow_key, phy); +} + +struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table, + struct sw_flow_key *key, int key_len) +{ + struct sw_flow *flow; + struct hlist_node *n; + struct hlist_head *head; + u8 *_key; + int key_start; + u32 hash; + + key_start = flow_key_start(key); + hash = ovs_flow_hash(key, key_start, key_len); + + _key = (u8 *) key + key_start; + head = find_bucket(table, hash); + hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) { + + if (flow->hash == hash && + !memcmp((u8 *)&flow->key + key_start, _key, key_len - key_start)) { + return flow; + } + } + return NULL; +} + +void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, + struct sw_flow_key *key, int key_len) +{ + flow->hash = ovs_flow_hash(key, flow_key_start(key), key_len); + memcpy(&flow->key, key, sizeof(flow->key)); + __flow_tbl_insert(table, flow); +} + +void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) +{ + hlist_del_rcu(&flow->hash_node[table->node_ver]); + table->count--; + BUG_ON(table->count < 0); +} + +/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ +const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { + [OVS_KEY_ATTR_ENCAP] = -1, + [OVS_KEY_ATTR_PRIORITY] = sizeof(u32), + [OVS_KEY_ATTR_IN_PORT] = sizeof(u32), + [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32), + [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet), + [OVS_KEY_ATTR_VLAN] = sizeof(__be16), + [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16), + [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4), + [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6), + [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp), + [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp), + [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp), + [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6), + [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp), + [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd), + [OVS_KEY_ATTR_IPV4_TUNNEL] = sizeof(struct ovs_key_ipv4_tunnel), + + /* Not upstream. */ + [OVS_KEY_ATTR_TUN_ID] = sizeof(__be64), +}; + +static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len, + const struct nlattr *a[], u64 *attrs) +{ + const struct ovs_key_icmp *icmp_key; + const struct ovs_key_tcp *tcp_key; + const struct ovs_key_udp *udp_key; + + switch (swkey->ip.proto) { + case IPPROTO_TCP: + if (!(*attrs & (1 << OVS_KEY_ATTR_TCP))) + return -EINVAL; + *attrs &= ~(1 << OVS_KEY_ATTR_TCP); + + *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); + tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); + swkey->ipv4.tp.src = tcp_key->tcp_src; + swkey->ipv4.tp.dst = tcp_key->tcp_dst; + break; + + case IPPROTO_UDP: + if (!(*attrs & (1 << OVS_KEY_ATTR_UDP))) + return -EINVAL; + *attrs &= ~(1 << OVS_KEY_ATTR_UDP); + + *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); + udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); + swkey->ipv4.tp.src = udp_key->udp_src; + swkey->ipv4.tp.dst = udp_key->udp_dst; + break; + + case IPPROTO_ICMP: + if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP))) + return -EINVAL; + *attrs &= ~(1 << OVS_KEY_ATTR_ICMP); + + *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); + icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]); + swkey->ipv4.tp.src = htons(icmp_key->icmp_type); + swkey->ipv4.tp.dst = htons(icmp_key->icmp_code); + break; + } + + return 0; +} + +static int ipv6_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len, + const struct nlattr *a[], u64 *attrs) +{ + const struct ovs_key_icmpv6 *icmpv6_key; + const struct ovs_key_tcp *tcp_key; + const struct ovs_key_udp *udp_key; + + switch (swkey->ip.proto) { + case IPPROTO_TCP: + if (!(*attrs & (1 << OVS_KEY_ATTR_TCP))) + return -EINVAL; + *attrs &= ~(1 << OVS_KEY_ATTR_TCP); + + *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); + tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); + swkey->ipv6.tp.src = tcp_key->tcp_src; + swkey->ipv6.tp.dst = tcp_key->tcp_dst; + break; + + case IPPROTO_UDP: + if (!(*attrs & (1 << OVS_KEY_ATTR_UDP))) + return -EINVAL; + *attrs &= ~(1 << OVS_KEY_ATTR_UDP); + + *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); + udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); + swkey->ipv6.tp.src = udp_key->udp_src; + swkey->ipv6.tp.dst = udp_key->udp_dst; + break; + + case IPPROTO_ICMPV6: + if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6))) + return -EINVAL; + *attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6); + + *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); + icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]); + swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type); + swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code); + + if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) || + swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { + const struct ovs_key_nd *nd_key; + + if (!(*attrs & (1 << OVS_KEY_ATTR_ND))) + return -EINVAL; + *attrs &= ~(1 << OVS_KEY_ATTR_ND); + + *key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); + nd_key = nla_data(a[OVS_KEY_ATTR_ND]); + memcpy(&swkey->ipv6.nd.target, nd_key->nd_target, + sizeof(swkey->ipv6.nd.target)); + memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN); + memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN); + } + break; + } + + return 0; +} + +static int parse_flow_nlattrs(const struct nlattr *attr, + const struct nlattr *a[], u64 *attrsp) +{ + const struct nlattr *nla; + u64 attrs; + int rem; + + attrs = 0; + nla_for_each_nested(nla, attr, rem) { + u16 type = nla_type(nla); + int expected_len; + + if (type > OVS_KEY_ATTR_MAX || attrs & (1ULL << type)) + return -EINVAL; + + expected_len = ovs_key_lens[type]; + if (nla_len(nla) != expected_len && expected_len != -1) + return -EINVAL; + + attrs |= 1ULL << type; + a[type] = nla; + } + if (rem) + return -EINVAL; + + *attrsp = attrs; + return 0; +} + +/** + * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key. + * @swkey: receives the extracted flow key. + * @key_lenp: number of bytes used in @swkey. + * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute + * sequence. + */ +int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, + const struct nlattr *attr) +{ + const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; + const struct ovs_key_ethernet *eth_key; + int key_len; + u64 attrs; + int err; + + memset(swkey, 0, sizeof(struct sw_flow_key)); + key_len = SW_FLOW_KEY_OFFSET(eth); + + err = parse_flow_nlattrs(attr, a, &attrs); + if (err) + return err; + + /* Metadata attributes. */ + if (attrs & (1 << OVS_KEY_ATTR_PRIORITY)) { + swkey->phy.priority = nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]); + attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY); + } + if (attrs & (1 << OVS_KEY_ATTR_IN_PORT)) { + u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]); + if (in_port >= DP_MAX_PORTS) + return -EINVAL; + swkey->phy.in_port = in_port; + attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); + } else { + swkey->phy.in_port = DP_MAX_PORTS; + } + if (attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) { + uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && !defined(CONFIG_NETFILTER) + if (mark != 0) + return -EINVAL; +#endif + swkey->phy.skb_mark = mark; + attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK); + } + + if (attrs & (1ULL << OVS_KEY_ATTR_TUN_ID) && + attrs & (1ULL << OVS_KEY_ATTR_IPV4_TUNNEL)) { + struct ovs_key_ipv4_tunnel *tun_key; + __be64 tun_id; + + tun_key = nla_data(a[OVS_KEY_ATTR_IPV4_TUNNEL]); + + if (!tun_key->ipv4_dst) + return -EINVAL; + if (!(tun_key->tun_flags & OVS_TNL_F_KEY)) + return -EINVAL; + + tun_id = nla_get_be64(a[OVS_KEY_ATTR_TUN_ID]); + if (tun_id != tun_key->tun_id) + return -EINVAL; + + memcpy(&swkey->tun_key, tun_key, sizeof(swkey->tun_key)); + + attrs &= ~(1ULL << OVS_KEY_ATTR_TUN_ID); + attrs &= ~(1ULL << OVS_KEY_ATTR_IPV4_TUNNEL); + } else if (attrs & (1ULL << OVS_KEY_ATTR_IPV4_TUNNEL)) { + struct ovs_key_ipv4_tunnel *tun_key; + tun_key = nla_data(a[OVS_KEY_ATTR_IPV4_TUNNEL]); + + if (!tun_key->ipv4_dst) + return -EINVAL; + + memcpy(&swkey->tun_key, tun_key, sizeof(swkey->tun_key)); + + attrs &= ~(1ULL << OVS_KEY_ATTR_IPV4_TUNNEL); + } + + /* Data attributes. */ + if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET))) + return -EINVAL; + attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET); + + eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]); + memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN); + memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN); + + if (attrs & (1u << OVS_KEY_ATTR_ETHERTYPE) && + nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q)) { + const struct nlattr *encap; + __be16 tci; + + if (attrs != ((1 << OVS_KEY_ATTR_VLAN) | + (1 << OVS_KEY_ATTR_ETHERTYPE) | + (1 << OVS_KEY_ATTR_ENCAP))) + return -EINVAL; + + encap = a[OVS_KEY_ATTR_ENCAP]; + tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); + if (tci & htons(VLAN_TAG_PRESENT)) { + swkey->eth.tci = tci; + + err = parse_flow_nlattrs(encap, a, &attrs); + if (err) + return err; + } else if (!tci) { + /* Corner case for truncated 802.1Q header. */ + if (nla_len(encap)) + return -EINVAL; + + swkey->eth.type = htons(ETH_P_8021Q); + *key_lenp = key_len; + return 0; + } else { + return -EINVAL; } + } + + if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { + swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); + if (ntohs(swkey->eth.type) < 1536) + return -EINVAL; + attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); } else { - skb_reset_transport_header(skb); + swkey->eth.type = htons(ETH_P_802_2); } - return retval; + + if (swkey->eth.type == htons(ETH_P_IP)) { + const struct ovs_key_ipv4 *ipv4_key; + + if (!(attrs & (1 << OVS_KEY_ATTR_IPV4))) + return -EINVAL; + attrs &= ~(1 << OVS_KEY_ATTR_IPV4); + + key_len = SW_FLOW_KEY_OFFSET(ipv4.addr); + ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]); + if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) + return -EINVAL; + swkey->ip.proto = ipv4_key->ipv4_proto; + swkey->ip.tos = ipv4_key->ipv4_tos; + swkey->ip.ttl = ipv4_key->ipv4_ttl; + swkey->ip.frag = ipv4_key->ipv4_frag; + swkey->ipv4.addr.src = ipv4_key->ipv4_src; + swkey->ipv4.addr.dst = ipv4_key->ipv4_dst; + + if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) { + err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs); + if (err) + return err; + } + } else if (swkey->eth.type == htons(ETH_P_IPV6)) { + const struct ovs_key_ipv6 *ipv6_key; + + if (!(attrs & (1 << OVS_KEY_ATTR_IPV6))) + return -EINVAL; + attrs &= ~(1 << OVS_KEY_ATTR_IPV6); + + key_len = SW_FLOW_KEY_OFFSET(ipv6.label); + ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]); + if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) + return -EINVAL; + swkey->ipv6.label = ipv6_key->ipv6_label; + swkey->ip.proto = ipv6_key->ipv6_proto; + swkey->ip.tos = ipv6_key->ipv6_tclass; + swkey->ip.ttl = ipv6_key->ipv6_hlimit; + swkey->ip.frag = ipv6_key->ipv6_frag; + memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src, + sizeof(swkey->ipv6.addr.src)); + memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst, + sizeof(swkey->ipv6.addr.dst)); + + if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) { + err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs); + if (err) + return err; + } + } else if (swkey->eth.type == htons(ETH_P_ARP) || + swkey->eth.type == htons(ETH_P_RARP)) { + const struct ovs_key_arp *arp_key; + + if (!(attrs & (1 << OVS_KEY_ATTR_ARP))) + return -EINVAL; + attrs &= ~(1 << OVS_KEY_ATTR_ARP); + + key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); + arp_key = nla_data(a[OVS_KEY_ATTR_ARP]); + swkey->ipv4.addr.src = arp_key->arp_sip; + swkey->ipv4.addr.dst = arp_key->arp_tip; + if (arp_key->arp_op & htons(0xff00)) + return -EINVAL; + swkey->ip.proto = ntohs(arp_key->arp_op); + memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN); + memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN); + } + + if (attrs) + return -EINVAL; + *key_lenp = key_len; + + return 0; } -u32 flow_hash(const struct odp_flow_key *key) +/** + * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key. + * @in_port: receives the extracted input port. + * @tun_id: receives the extracted tunnel ID. + * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute + * sequence. + * + * This parses a series of Netlink attributes that form a flow key, which must + * take the same form accepted by flow_from_nlattrs(), but only enough of it to + * get the metadata, that is, the parts of the flow key that cannot be + * extracted from the packet itself. + */ + +int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len, const struct nlattr *attr) { - return jhash2((u32*)key, sizeof *key / sizeof(u32), hash_seed); + struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key; + const struct nlattr *nla; + int rem; + __be64 tun_id = 0; + + flow->key.phy.in_port = DP_MAX_PORTS; + flow->key.phy.priority = 0; + flow->key.phy.skb_mark = 0; + memset(tun_key, 0, sizeof(flow->key.tun_key)); + + nla_for_each_nested(nla, attr, rem) { + int type = nla_type(nla); + + if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) { + if (nla_len(nla) != ovs_key_lens[type]) + return -EINVAL; + + switch (type) { + case OVS_KEY_ATTR_PRIORITY: + flow->key.phy.priority = nla_get_u32(nla); + break; + + case OVS_KEY_ATTR_TUN_ID: + tun_id = nla_get_be64(nla); + + if (tun_key->ipv4_dst) { + if (!(tun_key->tun_flags & OVS_TNL_F_KEY)) + return -EINVAL; + if (tun_key->tun_id != tun_id) + return -EINVAL; + break; + } + tun_key->tun_id = tun_id; + tun_key->tun_flags |= OVS_TNL_F_KEY; + + break; + + case OVS_KEY_ATTR_IPV4_TUNNEL: + if (tun_key->tun_flags & OVS_TNL_F_KEY) { + tun_id = tun_key->tun_id; + + memcpy(tun_key, nla_data(nla), sizeof(*tun_key)); + if (!(tun_key->tun_flags & OVS_TNL_F_KEY)) + return -EINVAL; + + if (tun_key->tun_id != tun_id) + return -EINVAL; + } else + memcpy(tun_key, nla_data(nla), sizeof(*tun_key)); + + if (!tun_key->ipv4_dst) + return -EINVAL; + break; + + case OVS_KEY_ATTR_IN_PORT: + if (nla_get_u32(nla) >= DP_MAX_PORTS) + return -EINVAL; + flow->key.phy.in_port = nla_get_u32(nla); + break; + + case OVS_KEY_ATTR_SKB_MARK: +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && !defined(CONFIG_NETFILTER) + if (nla_get_u32(nla) != 0) + return -EINVAL; +#endif + flow->key.phy.skb_mark = nla_get_u32(nla); + break; + } + } + } + if (rem) + return -EINVAL; + + flow->hash = ovs_flow_hash(&flow->key, + flow_key_start(&flow->key), key_len); + + return 0; } -int flow_cmp(const struct tbl_node *node, void *key2_) +int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) { - const struct odp_flow_key *key1 = &flow_cast(node)->key; - const struct odp_flow_key *key2 = key2_; + struct ovs_key_ethernet *eth_key; + struct nlattr *nla, *encap; + + if (swkey->phy.priority && + nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority)) + goto nla_put_failure; + + if (swkey->tun_key.ipv4_dst) { + struct ovs_key_ipv4_tunnel *tun_key; + nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4_TUNNEL, sizeof(*tun_key)); + if (!nla) + goto nla_put_failure; + tun_key = nla_data(nla); + memcpy(tun_key, &swkey->tun_key, sizeof(*tun_key)); + } + if ((swkey->tun_key.tun_flags & OVS_TNL_F_KEY) && + nla_put_be64(skb, OVS_KEY_ATTR_TUN_ID, swkey->tun_key.tun_id)) + goto nla_put_failure; + + if (swkey->phy.in_port != DP_MAX_PORTS && + nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port)) + goto nla_put_failure; + + if (swkey->phy.skb_mark && + nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, swkey->phy.skb_mark)) + goto nla_put_failure; + + nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); + if (!nla) + goto nla_put_failure; + eth_key = nla_data(nla); + memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN); + memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN); + + if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { + if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) || + nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci)) + goto nla_put_failure; + encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); + if (!swkey->eth.tci) + goto unencap; + } else { + encap = NULL; + } + + if (swkey->eth.type == htons(ETH_P_802_2)) + goto unencap; + + if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type)) + goto nla_put_failure; - return !memcmp(key1, key2, sizeof(struct odp_flow_key)); + if (swkey->eth.type == htons(ETH_P_IP)) { + struct ovs_key_ipv4 *ipv4_key; + + nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key)); + if (!nla) + goto nla_put_failure; + ipv4_key = nla_data(nla); + ipv4_key->ipv4_src = swkey->ipv4.addr.src; + ipv4_key->ipv4_dst = swkey->ipv4.addr.dst; + ipv4_key->ipv4_proto = swkey->ip.proto; + ipv4_key->ipv4_tos = swkey->ip.tos; + ipv4_key->ipv4_ttl = swkey->ip.ttl; + ipv4_key->ipv4_frag = swkey->ip.frag; + } else if (swkey->eth.type == htons(ETH_P_IPV6)) { + struct ovs_key_ipv6 *ipv6_key; + + nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key)); + if (!nla) + goto nla_put_failure; + ipv6_key = nla_data(nla); + memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src, + sizeof(ipv6_key->ipv6_src)); + memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst, + sizeof(ipv6_key->ipv6_dst)); + ipv6_key->ipv6_label = swkey->ipv6.label; + ipv6_key->ipv6_proto = swkey->ip.proto; + ipv6_key->ipv6_tclass = swkey->ip.tos; + ipv6_key->ipv6_hlimit = swkey->ip.ttl; + ipv6_key->ipv6_frag = swkey->ip.frag; + } else if (swkey->eth.type == htons(ETH_P_ARP) || + swkey->eth.type == htons(ETH_P_RARP)) { + struct ovs_key_arp *arp_key; + + nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key)); + if (!nla) + goto nla_put_failure; + arp_key = nla_data(nla); + memset(arp_key, 0, sizeof(struct ovs_key_arp)); + arp_key->arp_sip = swkey->ipv4.addr.src; + arp_key->arp_tip = swkey->ipv4.addr.dst; + arp_key->arp_op = htons(swkey->ip.proto); + memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN); + memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN); + } + + if ((swkey->eth.type == htons(ETH_P_IP) || + swkey->eth.type == htons(ETH_P_IPV6)) && + swkey->ip.frag != OVS_FRAG_TYPE_LATER) { + + if (swkey->ip.proto == IPPROTO_TCP) { + struct ovs_key_tcp *tcp_key; + + nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key)); + if (!nla) + goto nla_put_failure; + tcp_key = nla_data(nla); + if (swkey->eth.type == htons(ETH_P_IP)) { + tcp_key->tcp_src = swkey->ipv4.tp.src; + tcp_key->tcp_dst = swkey->ipv4.tp.dst; + } else if (swkey->eth.type == htons(ETH_P_IPV6)) { + tcp_key->tcp_src = swkey->ipv6.tp.src; + tcp_key->tcp_dst = swkey->ipv6.tp.dst; + } + } else if (swkey->ip.proto == IPPROTO_UDP) { + struct ovs_key_udp *udp_key; + + nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key)); + if (!nla) + goto nla_put_failure; + udp_key = nla_data(nla); + if (swkey->eth.type == htons(ETH_P_IP)) { + udp_key->udp_src = swkey->ipv4.tp.src; + udp_key->udp_dst = swkey->ipv4.tp.dst; + } else if (swkey->eth.type == htons(ETH_P_IPV6)) { + udp_key->udp_src = swkey->ipv6.tp.src; + udp_key->udp_dst = swkey->ipv6.tp.dst; + } + } else if (swkey->eth.type == htons(ETH_P_IP) && + swkey->ip.proto == IPPROTO_ICMP) { + struct ovs_key_icmp *icmp_key; + + nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key)); + if (!nla) + goto nla_put_failure; + icmp_key = nla_data(nla); + icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src); + icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst); + } else if (swkey->eth.type == htons(ETH_P_IPV6) && + swkey->ip.proto == IPPROTO_ICMPV6) { + struct ovs_key_icmpv6 *icmpv6_key; + + nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6, + sizeof(*icmpv6_key)); + if (!nla) + goto nla_put_failure; + icmpv6_key = nla_data(nla); + icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src); + icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst); + + if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION || + icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) { + struct ovs_key_nd *nd_key; + + nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key)); + if (!nla) + goto nla_put_failure; + nd_key = nla_data(nla); + memcpy(nd_key->nd_target, &swkey->ipv6.nd.target, + sizeof(nd_key->nd_target)); + memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN); + memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN); + } + } + } + +unencap: + if (encap) + nla_nest_end(skb, encap); + + return 0; + +nla_put_failure: + return -EMSGSIZE; } /* Initializes the flow module. * Returns zero if successful or a negative error code. */ -int flow_init(void) +int ovs_flow_init(void) { flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0, 0, NULL); if (flow_cache == NULL) return -ENOMEM; - get_random_bytes(&hash_seed, sizeof hash_seed); - return 0; } /* Uninitializes the flow module. */ -void flow_exit(void) +void ovs_flow_exit(void) { kmem_cache_destroy(flow_cache); }