X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=datapath%2Fflow.c;h=841e8be0eb9e1529519cbaaf2275104c23b1d4de;hb=d978fa4832bbc5176e05edd05bcdf2452a8dded2;hp=c70daeed7fa6c3cbe7b528f5fd8d5a74248e681a;hpb=7c690f462fec1af60d76c67fac226fc648ea9149;p=sliver-openvswitch.git diff --git a/datapath/flow.c b/datapath/flow.c index c70daeed7..841e8be0e 100644 --- a/datapath/flow.c +++ b/datapath/flow.c @@ -200,20 +200,18 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb) spin_unlock(&flow->lock); } -struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions) +struct sw_flow_actions *ovs_flow_actions_alloc(int size) { - int actions_len = nla_len(actions); struct sw_flow_actions *sfa; - if (actions_len > MAX_ACTIONS_BUFSIZE) + if (size > MAX_ACTIONS_BUFSIZE) return ERR_PTR(-EINVAL); - sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL); + sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); if (!sfa) return ERR_PTR(-ENOMEM); - sfa->actions_len = actions_len; - memcpy(sfa->actions, nla_data(actions), actions_len); + sfa->actions_len = 0; return sfa; } @@ -226,9 +224,7 @@ struct sw_flow *ovs_flow_alloc(void) return ERR_PTR(-ENOMEM); spin_lock_init(&flow->lock); - atomic_set(&flow->refcnt, 1); flow->sf_acts = NULL; - flow->dead = false; return flow; } @@ -290,12 +286,6 @@ struct flow_table *ovs_flow_tbl_alloc(int new_size) return table; } -static void flow_free(struct sw_flow *flow) -{ - flow->dead = true; - ovs_flow_put(flow); -} - void ovs_flow_tbl_destroy(struct flow_table *table) { int i; @@ -309,12 +299,12 @@ void ovs_flow_tbl_destroy(struct flow_table *table) for (i = 0; i < table->n_buckets; i++) { struct sw_flow *flow; struct hlist_head *head = flex_array_get(table->buckets, i); - struct hlist_node *node, *n; + struct hlist_node *n; int ver = table->node_ver; - hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) { + hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { hlist_del_rcu(&flow->hash_node[ver]); - flow_free(flow); + ovs_flow_free(flow); } } @@ -342,7 +332,6 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la { struct sw_flow *flow; struct hlist_head *head; - struct hlist_node *n; int ver; int i; @@ -350,7 +339,7 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la while (*bucket < table->n_buckets) { i = 0; head = flex_array_get(table->buckets, *bucket); - hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) { + hlist_for_each_entry_rcu(flow, head, hash_node[ver]) { if (i < *last) { i++; continue; @@ -385,11 +374,10 @@ static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new for (i = 0; i < old->n_buckets; i++) { struct sw_flow *flow; struct hlist_head *head; - struct hlist_node *n; head = flex_array_get(old->buckets, i); - hlist_for_each_entry(flow, n, head, hash_node[old_ver]) + hlist_for_each_entry(flow, head, hash_node[old_ver]) __flow_tbl_insert(new, flow); } old->keep_flows = true; @@ -418,13 +406,21 @@ struct flow_table *ovs_flow_tbl_expand(struct flow_table *table) return __flow_tbl_rehash(table, table->n_buckets * 2); } +void ovs_flow_free(struct sw_flow *flow) +{ + if (unlikely(!flow)) + return; + + kfree((struct sf_flow_acts __force *)flow->sf_acts); + kmem_cache_free(flow_cache, flow); +} + /* RCU callback used by ovs_flow_deferred_free. */ static void rcu_free_flow_callback(struct rcu_head *rcu) { struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); - flow->dead = true; - ovs_flow_put(flow); + ovs_flow_free(flow); } /* Schedules 'flow' to be freed after the next RCU grace period. @@ -434,22 +430,6 @@ void ovs_flow_deferred_free(struct sw_flow *flow) call_rcu(&flow->rcu, rcu_free_flow_callback); } -void ovs_flow_hold(struct sw_flow *flow) -{ - atomic_inc(&flow->refcnt); -} - -void ovs_flow_put(struct sw_flow *flow) -{ - if (unlikely(!flow)) - return; - - if (atomic_dec_and_test(&flow->refcnt)) { - kfree((struct sf_flow_acts __force *)flow->sf_acts); - kmem_cache_free(flow_cache, flow); - } -} - /* RCU callback used by ovs_flow_deferred_free_acts. */ static void rcu_free_acts_callback(struct rcu_head *rcu) { @@ -502,7 +482,7 @@ static __be16 parse_ethertype(struct sk_buff *skb) proto = *(__be16 *) skb->data; __skb_pull(skb, sizeof(__be16)); - if (ntohs(proto) >= 1536) + if (ntohs(proto) >= ETH_P_802_3_MIN) return proto; if (skb->len < sizeof(struct llc_snap_hdr)) @@ -518,7 +498,11 @@ static __be16 parse_ethertype(struct sk_buff *skb) return htons(ETH_P_802_2); __skb_pull(skb, sizeof(struct llc_snap_hdr)); - return llc->ethertype; + + if (ntohs(llc->ethertype) >= ETH_P_802_3_MIN) + return llc->ethertype; + + return htons(ETH_P_802_2); } static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, @@ -638,8 +622,9 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, key->phy.priority = skb->priority; if (OVS_CB(skb)->tun_key) - memcpy(&key->phy.tun.tun_key, OVS_CB(skb)->tun_key, sizeof(key->phy.tun.tun_key)); + memcpy(&key->tun_key, OVS_CB(skb)->tun_key, sizeof(key->tun_key)); key->phy.in_port = in_port; + key->phy.skb_mark = skb_get_mark(skb); skb_reset_mac_header(skb); @@ -725,7 +710,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, } } - } else if (key->eth.type == htons(ETH_P_ARP) && arphdr_ok(skb)) { + } else if ((key->eth.type == htons(ETH_P_ARP) || + key->eth.type == htons(ETH_P_RARP)) && arphdr_ok(skb)) { struct arp_eth_header *arp; arp = (struct arp_eth_header *)skb_network_header(skb); @@ -799,17 +785,16 @@ static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, int key_l static int flow_key_start(struct sw_flow_key *key) { - if (key->phy.tun.tun_key.ipv4_dst) + if (key->tun_key.ipv4_dst) return 0; else - return offsetof(struct sw_flow_key, phy.priority); + return offsetof(struct sw_flow_key, phy); } struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table, struct sw_flow_key *key, int key_len) { struct sw_flow *flow; - struct hlist_node *n; struct hlist_head *head; u8 *_key; int key_start; @@ -820,7 +805,7 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table, _key = (u8 *) key + key_start; head = find_bucket(table, hash); - hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) { + hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) { if (flow->hash == hash && !memcmp((u8 *)&flow->key + key_start, _key, key_len - key_start)) { @@ -840,9 +825,9 @@ void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) { + BUG_ON(table->count == 0); hlist_del_rcu(&flow->hash_node[table->node_ver]); table->count--; - BUG_ON(table->count < 0); } /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ @@ -850,6 +835,7 @@ const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_ENCAP] = -1, [OVS_KEY_ATTR_PRIORITY] = sizeof(u32), [OVS_KEY_ATTR_IN_PORT] = sizeof(u32), + [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32), [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet), [OVS_KEY_ATTR_VLAN] = sizeof(__be16), [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16), @@ -861,10 +847,7 @@ const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6), [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp), [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd), - - /* Not upstream. */ - [OVS_KEY_ATTR_TUN_ID] = sizeof(__be64), - [OVS_KEY_ATTR_IPV4_TUNNEL] = sizeof(struct ovs_key_ipv4_tunnel), + [OVS_KEY_ATTR_TUNNEL] = -1, }; static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len, @@ -1002,6 +985,105 @@ static int parse_flow_nlattrs(const struct nlattr *attr, return 0; } +int ipv4_tun_from_nlattr(const struct nlattr *attr, + struct ovs_key_ipv4_tunnel *tun_key) +{ + struct nlattr *a; + int rem; + bool ttl = false; + + memset(tun_key, 0, sizeof(*tun_key)); + + nla_for_each_nested(a, attr, rem) { + int type = nla_type(a); + static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = { + [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64), + [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32), + [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32), + [OVS_TUNNEL_KEY_ATTR_TOS] = 1, + [OVS_TUNNEL_KEY_ATTR_TTL] = 1, + [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0, + [OVS_TUNNEL_KEY_ATTR_CSUM] = 0, + }; + + if (type > OVS_TUNNEL_KEY_ATTR_MAX || + ovs_tunnel_key_lens[type] != nla_len(a)) + return -EINVAL; + + switch (type) { + case OVS_TUNNEL_KEY_ATTR_ID: + tun_key->tun_id = nla_get_be64(a); + tun_key->tun_flags |= OVS_TNL_F_KEY; + break; + case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: + tun_key->ipv4_src = nla_get_be32(a); + break; + case OVS_TUNNEL_KEY_ATTR_IPV4_DST: + tun_key->ipv4_dst = nla_get_be32(a); + break; + case OVS_TUNNEL_KEY_ATTR_TOS: + tun_key->ipv4_tos = nla_get_u8(a); + break; + case OVS_TUNNEL_KEY_ATTR_TTL: + tun_key->ipv4_ttl = nla_get_u8(a); + ttl = true; + break; + case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT: + tun_key->tun_flags |= OVS_TNL_F_DONT_FRAGMENT; + break; + case OVS_TUNNEL_KEY_ATTR_CSUM: + tun_key->tun_flags |= OVS_TNL_F_CSUM; + break; + default: + return -EINVAL; + + } + } + if (rem > 0) + return -EINVAL; + + if (!tun_key->ipv4_dst) + return -EINVAL; + + if (!ttl) + return -EINVAL; + + return 0; +} + +int ipv4_tun_to_nlattr(struct sk_buff *skb, + const struct ovs_key_ipv4_tunnel *tun_key) +{ + struct nlattr *nla; + + nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL); + if (!nla) + return -EMSGSIZE; + + if (tun_key->tun_flags & OVS_TNL_F_KEY && + nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, tun_key->tun_id)) + return -EMSGSIZE; + if (tun_key->ipv4_src && + nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, tun_key->ipv4_src)) + return -EMSGSIZE; + if (nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ipv4_dst)) + return -EMSGSIZE; + if (tun_key->ipv4_tos && + nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ipv4_tos)) + return -EMSGSIZE; + if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, tun_key->ipv4_ttl)) + return -EMSGSIZE; + if ((tun_key->tun_flags & OVS_TNL_F_DONT_FRAGMENT) && + nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT)) + return -EMSGSIZE; + if ((tun_key->tun_flags & OVS_TNL_F_CSUM) && + nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM)) + return -EMSGSIZE; + + nla_nest_end(skb, nla); + return 0; +} + /** * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key. * @swkey: receives the extracted flow key. @@ -1039,40 +1121,22 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, } else { swkey->phy.in_port = DP_MAX_PORTS; } - - if (attrs & (1ULL << OVS_KEY_ATTR_TUN_ID) && - attrs & (1ULL << OVS_KEY_ATTR_IPV4_TUNNEL)) { - struct ovs_key_ipv4_tunnel *tun_key; - __be64 tun_id; - - tun_key = nla_data(a[OVS_KEY_ATTR_IPV4_TUNNEL]); - - if (!tun_key->ipv4_dst) + if (attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) { + uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && !defined(CONFIG_NETFILTER) + if (mark != 0) return -EINVAL; - if (!(tun_key->tun_flags & OVS_FLOW_TNL_F_KEY)) - return -EINVAL; - - tun_id = nla_get_be64(a[OVS_KEY_ATTR_TUN_ID]); - if (tun_id != tun_key->tun_id) - return -EINVAL; - - memcpy(&swkey->phy.tun.tun_key, tun_key, sizeof(swkey->phy.tun.tun_key)); - attrs &= ~(1ULL << OVS_KEY_ATTR_TUN_ID); - attrs &= ~(1ULL << OVS_KEY_ATTR_IPV4_TUNNEL); - } else if (attrs & (1ULL << OVS_KEY_ATTR_TUN_ID)) { - swkey->phy.tun.tun_key.tun_id = nla_get_be64(a[OVS_KEY_ATTR_TUN_ID]); - swkey->phy.tun.tun_key.tun_flags |= OVS_FLOW_TNL_F_KEY; +#endif + swkey->phy.skb_mark = mark; + attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK); + } - attrs &= ~(1ULL << OVS_KEY_ATTR_TUN_ID); - } else if (attrs & (1ULL << OVS_KEY_ATTR_IPV4_TUNNEL)) { - struct ovs_key_ipv4_tunnel *tun_key; - tun_key = nla_data(a[OVS_KEY_ATTR_IPV4_TUNNEL]); + if (attrs & (1ULL << OVS_KEY_ATTR_TUNNEL)) { + err = ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], &swkey->tun_key); + if (err) + return err; - if (!tun_key->ipv4_dst) - return -EINVAL; - - memcpy(&swkey->phy.tun.tun_key, tun_key, sizeof(swkey->phy.tun.tun_key)); - attrs &= ~(1ULL << OVS_KEY_ATTR_IPV4_TUNNEL); + attrs &= ~(1ULL << OVS_KEY_ATTR_TUNNEL); } /* Data attributes. */ @@ -1117,7 +1181,7 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); - if (ntohs(swkey->eth.type) < 1536) + if (ntohs(swkey->eth.type) < ETH_P_802_3_MIN) return -EINVAL; attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); } else { @@ -1173,7 +1237,8 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, if (err) return err; } - } else if (swkey->eth.type == htons(ETH_P_ARP)) { + } else if (swkey->eth.type == htons(ETH_P_ARP) || + swkey->eth.type == htons(ETH_P_RARP)) { const struct ovs_key_arp *arp_key; if (!(attrs & (1 << OVS_KEY_ATTR_ARP))) @@ -1213,19 +1278,21 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len, const struct nlattr *attr) { - struct ovs_key_ipv4_tunnel *tun_key = &flow->key.phy.tun.tun_key; + struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key; const struct nlattr *nla; int rem; - __be64 tun_id = 0; flow->key.phy.in_port = DP_MAX_PORTS; flow->key.phy.priority = 0; - memset(tun_key, 0, sizeof(flow->key.phy.tun.tun_key)); + flow->key.phy.skb_mark = 0; + memset(tun_key, 0, sizeof(flow->key.tun_key)); nla_for_each_nested(nla, attr, rem) { int type = nla_type(nla); if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) { + int err; + if (nla_len(nla) != ovs_key_lens[type]) return -EINVAL; @@ -1234,36 +1301,10 @@ int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len, const stru flow->key.phy.priority = nla_get_u32(nla); break; - case OVS_KEY_ATTR_TUN_ID: - tun_id = nla_get_be64(nla); - - if (tun_key->ipv4_dst) { - if (!(tun_key->tun_flags & OVS_FLOW_TNL_F_KEY)) - return -EINVAL; - if (tun_key->tun_id != tun_id) - return -EINVAL; - break; - } - tun_key->tun_id = tun_id; - tun_key->tun_flags |= OVS_FLOW_TNL_F_KEY; - - break; - - case OVS_KEY_ATTR_IPV4_TUNNEL: - if (tun_key->tun_flags & OVS_FLOW_TNL_F_KEY) { - tun_id = tun_key->tun_id; - - memcpy(tun_key, nla_data(nla), sizeof(*tun_key)); - if (!(tun_key->tun_flags & OVS_FLOW_TNL_F_KEY)) - return -EINVAL; - - if (tun_key->tun_id != tun_id) - return -EINVAL; - } else - memcpy(tun_key, nla_data(nla), sizeof(*tun_key)); - - if (!tun_key->ipv4_dst) - return -EINVAL; + case OVS_KEY_ATTR_TUNNEL: + err = ipv4_tun_from_nlattr(nla, tun_key); + if (err) + return err; break; case OVS_KEY_ATTR_IN_PORT: @@ -1271,6 +1312,14 @@ int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len, const stru return -EINVAL; flow->key.phy.in_port = nla_get_u32(nla); break; + + case OVS_KEY_ATTR_SKB_MARK: +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && !defined(CONFIG_NETFILTER) + if (nla_get_u32(nla) != 0) + return -EINVAL; +#endif + flow->key.phy.skb_mark = nla_get_u32(nla); + break; } } } @@ -1292,22 +1341,18 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority)) goto nla_put_failure; - if (swkey->phy.tun.tun_key.ipv4_dst) { - struct ovs_key_ipv4_tunnel *tun_key; - nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4_TUNNEL, sizeof(*tun_key)); - if (!nla) - goto nla_put_failure; - tun_key = nla_data(nla); - memcpy(tun_key, &swkey->phy.tun.tun_key, sizeof(*tun_key)); - } - if ((swkey->phy.tun.tun_key.tun_flags & OVS_FLOW_TNL_F_KEY) && - nla_put_be64(skb, OVS_KEY_ATTR_TUN_ID, swkey->phy.tun.tun_key.tun_id)) + if (swkey->tun_key.ipv4_dst && + ipv4_tun_to_nlattr(skb, &swkey->tun_key)) goto nla_put_failure; if (swkey->phy.in_port != DP_MAX_PORTS && nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port)) goto nla_put_failure; + if (swkey->phy.skb_mark && + nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, swkey->phy.skb_mark)) + goto nla_put_failure; + nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); if (!nla) goto nla_put_failure; @@ -1361,7 +1406,8 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) ipv6_key->ipv6_tclass = swkey->ip.tos; ipv6_key->ipv6_hlimit = swkey->ip.ttl; ipv6_key->ipv6_frag = swkey->ip.frag; - } else if (swkey->eth.type == htons(ETH_P_ARP)) { + } else if (swkey->eth.type == htons(ETH_P_ARP) || + swkey->eth.type == htons(ETH_P_RARP)) { struct ovs_key_arp *arp_key; nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));