X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=datapath%2Fflow.c;h=407535063c717d6b0a48e2278375a10ed12b86c1;hb=591cb419cf3694e0ae66a95973e73c61bad9e03d;hp=3a7bc9b47c57fe5ba330121d19dcc394dc8bab6e;hpb=1a02b7689b01db7fff2cda87ac311389ff524283;p=sliver-openvswitch.git diff --git a/datapath/flow.c b/datapath/flow.c index 3a7bc9b47..407535063 100644 --- a/datapath/flow.c +++ b/datapath/flow.c @@ -82,8 +82,9 @@ static void update_range__(struct sw_flow_match *match, do { \ update_range__(match, offsetof(struct sw_flow_key, field), \ sizeof((match)->key->field), is_mask); \ - if (is_mask && match->mask != NULL) { \ - (match)->mask->key.field = value; \ + if (is_mask) { \ + if ((match)->mask) \ + (match)->mask->key.field = value; \ } else { \ (match)->key->field = value; \ } \ @@ -93,8 +94,9 @@ static void update_range__(struct sw_flow_match *match, do { \ update_range__(match, offsetof(struct sw_flow_key, field), \ len, is_mask); \ - if (is_mask && match->mask != NULL) { \ - memcpy(&(match)->mask->key.field, value_p, len); \ + if (is_mask) { \ + if ((match)->mask) \ + memcpy(&(match)->mask->key.field, value_p, len);\ } else { \ memcpy(&(match)->key->field, value_p, len); \ } \ @@ -133,9 +135,10 @@ static bool ovs_match_validate(const struct sw_flow_match *match, | (1ULL << OVS_KEY_ATTR_ARP) | (1ULL << OVS_KEY_ATTR_ND)); - if (match->key->eth.type == htons(ETH_P_802_2) && - match->mask && (match->mask->key.eth.type == htons(0xffff))) - mask_allowed |= (1ULL << OVS_KEY_ATTR_ETHERTYPE); + /* Always allowed mask fields. */ + mask_allowed |= ((1ULL << OVS_KEY_ATTR_TUNNEL) + | (1ULL << OVS_KEY_ATTR_IN_PORT) + | (1ULL << OVS_KEY_ATTR_ETHERTYPE)); /* Check key attributes. */ if (match->key->eth.type == htons(ETH_P_ARP) @@ -205,13 +208,19 @@ static bool ovs_match_validate(const struct sw_flow_match *match, } } - if ((key_attrs & key_expected) != key_expected) + if ((key_attrs & key_expected) != key_expected) { /* Key attributes check failed. */ + OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n", + key_attrs, key_expected); return false; + } - if ((mask_attrs & mask_allowed) != mask_attrs) + if ((mask_attrs & mask_allowed) != mask_attrs) { /* Mask attributes check failed. */ + OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n", + mask_attrs, mask_allowed); return false; + } return true; } @@ -339,9 +348,8 @@ static bool icmp6hdr_ok(struct sk_buff *skb) sizeof(struct icmp6hdr)); } -static void flow_key_mask(struct sw_flow_key *dst, - const struct sw_flow_key *src, - const struct sw_flow_mask *mask) +void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src, + const struct sw_flow_mask *mask) { u8 *m = (u8 *)&mask->key + mask->range.start; u8 *s = (u8 *)src + mask->range.start; @@ -420,7 +428,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets) struct flex_array *buckets; int i, err; - buckets = flex_array_alloc(sizeof(struct hlist_head *), + buckets = flex_array_alloc(sizeof(struct hlist_head), n_buckets, GFP_KERNEL); if (!buckets) return NULL; @@ -443,7 +451,7 @@ static void free_buckets(struct flex_array *buckets) flex_array_free(buckets); } -struct flow_table *ovs_flow_tbl_alloc(int new_size) +static struct flow_table *__flow_tbl_alloc(int new_size) { struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL); @@ -461,7 +469,7 @@ struct flow_table *ovs_flow_tbl_alloc(int new_size) table->node_ver = 0; table->keep_flows = false; get_random_bytes(&table->hash_seed, sizeof(u32)); - INIT_LIST_HEAD(&table->mask_list); + table->mask_list = NULL; return table; } @@ -480,16 +488,37 @@ static void __flow_tbl_destroy(struct flow_table *table) int ver = table->node_ver; hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { - hlist_del_rcu(&flow->hash_node[ver]); + hlist_del(&flow->hash_node[ver]); ovs_flow_free(flow, false); } } + BUG_ON(!list_empty(table->mask_list)); + kfree(table->mask_list); + skip_flows: free_buckets(table->buckets); kfree(table); } +struct flow_table *ovs_flow_tbl_alloc(int new_size) +{ + struct flow_table *table = __flow_tbl_alloc(new_size); + + if (!table) + return NULL; + + table->mask_list = kmalloc(sizeof(struct list_head), GFP_KERNEL); + if (!table->mask_list) { + table->keep_flows = true; + __flow_tbl_destroy(table); + return NULL; + } + INIT_LIST_HEAD(table->mask_list); + + return table; +} + static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) { struct flow_table *table = container_of(rcu, struct flow_table, rcu); @@ -571,7 +600,7 @@ static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buck { struct flow_table *new_table; - new_table = ovs_flow_tbl_alloc(n_buckets); + new_table = __flow_tbl_alloc(n_buckets); if (!new_table) return ERR_PTR(-ENOMEM); @@ -608,8 +637,7 @@ void ovs_flow_free(struct sw_flow *flow, bool deferred) if (!flow) return; - ovs_sw_flow_mask_del_ref((struct sw_flow_mask __force *)flow->mask, - deferred); + ovs_sw_flow_mask_del_ref(flow->mask, deferred); if (deferred) call_rcu(&flow->rcu, rcu_free_flow_callback); @@ -1014,11 +1042,12 @@ static struct sw_flow *ovs_masked_flow_lookup(struct flow_table *table, u32 hash; struct sw_flow_key masked_key; - flow_key_mask(&masked_key, flow_key, mask); + ovs_flow_key_mask(&masked_key, flow_key, mask); hash = ovs_flow_hash(&masked_key, key_start, key_len); head = find_bucket(table, hash); hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) { - if (__flow_cmp_key(flow, &masked_key, key_start, key_len)) + if (flow->mask == mask && + __flow_cmp_key(flow, &masked_key, key_start, key_len)) return flow; } return NULL; @@ -1030,7 +1059,7 @@ struct sw_flow *ovs_flow_lookup(struct flow_table *tbl, struct sw_flow *flow = NULL; struct sw_flow_mask *mask; - list_for_each_entry_rcu(mask, &tbl->mask_list, list) { + list_for_each_entry_rcu(mask, tbl->mask_list, list) { flow = ovs_masked_flow_lookup(tbl, key, mask); if (flow) /* Found */ break; @@ -1040,14 +1069,10 @@ struct sw_flow *ovs_flow_lookup(struct flow_table *tbl, } -void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow, - const struct sw_flow_key *key, int key_len) +void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow) { - flow->unmasked_key = *key; - flow_key_mask(&flow->key, &flow->unmasked_key, ovsl_dereference(flow->mask)); - flow->hash = ovs_flow_hash(&flow->key, - ovsl_dereference(flow->mask)->range.start, - ovsl_dereference(flow->mask)->range.end); + flow->hash = ovs_flow_hash(&flow->key, flow->mask->range.start, + flow->mask->range.end); __tbl_insert(table, flow); } @@ -1105,24 +1130,33 @@ static int __parse_flow_nlattrs(const struct nlattr *attr, u16 type = nla_type(nla); int expected_len; - if (type > OVS_KEY_ATTR_MAX || attrs & (1ULL << type)) - return -EINVAL; + if (type > OVS_KEY_ATTR_MAX) { + OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n", + type, OVS_KEY_ATTR_MAX); + } - expected_len = ovs_key_lens[type]; - if (nla_len(nla) != expected_len && expected_len != -1) + if (attrs & (1ULL << type)) { + OVS_NLERR("Duplicate key attribute (type %d).\n", type); return -EINVAL; + } - if (attrs & (1ULL << type)) - /* Duplicated field. */ + expected_len = ovs_key_lens[type]; + if (nla_len(nla) != expected_len && expected_len != -1) { + OVS_NLERR("Key attribute has unexpected length (type=%d" + ", length=%d, expected=%d).\n", type, + nla_len(nla), expected_len); return -EINVAL; + } if (!nz || !is_all_zero(nla_data(nla), expected_len)) { attrs |= 1ULL << type; a[type] = nla; } } - if (rem) + if (rem) { + OVS_NLERR("Message has %d unknown bytes.\n", rem); return -EINVAL; + } *attrsp = attrs; return 0; @@ -1140,13 +1174,13 @@ static int parse_flow_nlattrs(const struct nlattr *attr, return __parse_flow_nlattrs(attr, a, attrsp, false); } -int ipv4_tun_from_nlattr(const struct nlattr *attr, - struct sw_flow_match *match, bool is_mask) +int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr, + struct sw_flow_match *match, bool is_mask) { struct nlattr *a; int rem; bool ttl = false; - u16 tun_flags = 0; + __be16 tun_flags = 0; nla_for_each_nested(a, attr, rem) { int type = nla_type(a); @@ -1160,15 +1194,24 @@ int ipv4_tun_from_nlattr(const struct nlattr *attr, [OVS_TUNNEL_KEY_ATTR_CSUM] = 0, }; - if (type > OVS_TUNNEL_KEY_ATTR_MAX || - ovs_tunnel_key_lens[type] != nla_len(a)) + if (type > OVS_TUNNEL_KEY_ATTR_MAX) { + OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n", + type, OVS_TUNNEL_KEY_ATTR_MAX); return -EINVAL; + } + + if (ovs_tunnel_key_lens[type] != nla_len(a)) { + OVS_NLERR("IPv4 tunnel attribute type has unexpected " + " legnth (type=%d, length=%d, expected=%d).\n", + type, nla_len(a), ovs_tunnel_key_lens[type]); + return -EINVAL; + } switch (type) { case OVS_TUNNEL_KEY_ATTR_ID: SW_FLOW_KEY_PUT(match, tun_key.tun_id, nla_get_be64(a), is_mask); - tun_flags |= OVS_TNL_F_KEY; + tun_flags |= TUNNEL_KEY; break; case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: SW_FLOW_KEY_PUT(match, tun_key.ipv4_src, @@ -1188,10 +1231,10 @@ int ipv4_tun_from_nlattr(const struct nlattr *attr, ttl = true; break; case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT: - tun_flags |= OVS_TNL_F_DONT_FRAGMENT; + tun_flags |= TUNNEL_DONT_FRAGMENT; break; case OVS_TUNNEL_KEY_ATTR_CSUM: - tun_flags |= OVS_TNL_F_CSUM; + tun_flags |= TUNNEL_CSUM; break; default: return -EINVAL; @@ -1200,21 +1243,29 @@ int ipv4_tun_from_nlattr(const struct nlattr *attr, SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask); - if (rem > 0) + if (rem > 0) { + OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem); return -EINVAL; + } - if (!match->key->tun_key.ipv4_dst) - return -EINVAL; + if (!is_mask) { + if (!match->key->tun_key.ipv4_dst) { + OVS_NLERR("IPv4 tunnel destination address is zero.\n"); + return -EINVAL; + } - if (!ttl) - return -EINVAL; + if (!ttl) { + OVS_NLERR("IPv4 tunnel TTL not specified.\n"); + return -EINVAL; + } + } return 0; } -int ipv4_tun_to_nlattr(struct sk_buff *skb, - const struct ovs_key_ipv4_tunnel *tun_key, - const struct ovs_key_ipv4_tunnel *output) +int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb, + const struct ovs_key_ipv4_tunnel *tun_key, + const struct ovs_key_ipv4_tunnel *output) { struct nlattr *nla; @@ -1222,23 +1273,24 @@ int ipv4_tun_to_nlattr(struct sk_buff *skb, if (!nla) return -EMSGSIZE; - if (tun_key->tun_flags & OVS_TNL_F_KEY && + if (output->tun_flags & TUNNEL_KEY && nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id)) return -EMSGSIZE; - if (tun_key->ipv4_src && - nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src)) + if (output->ipv4_src && + nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src)) return -EMSGSIZE; - if (nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst)) + if (output->ipv4_dst && + nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst)) return -EMSGSIZE; - if (tun_key->ipv4_tos && - nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos)) + if (output->ipv4_tos && + nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos)) return -EMSGSIZE; if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl)) return -EMSGSIZE; - if ((tun_key->tun_flags & OVS_TNL_F_DONT_FRAGMENT) && + if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) && nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT)) return -EMSGSIZE; - if ((tun_key->tun_flags & OVS_TNL_F_CSUM) && + if ((output->tun_flags & TUNNEL_CSUM) && nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM)) return -EMSGSIZE; @@ -1259,23 +1311,30 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, if (*attrs & (1ULL << OVS_KEY_ATTR_IN_PORT)) { u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]); - if (!is_mask && in_port >= DP_MAX_PORTS) + if (is_mask) + in_port = 0xffffffff; /* Always exact match in_port. */ + else if (in_port >= DP_MAX_PORTS) return -EINVAL; + SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask); *attrs &= ~(1ULL << OVS_KEY_ATTR_IN_PORT); + } else if (!is_mask) { + SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask); } if (*attrs & (1ULL << OVS_KEY_ATTR_SKB_MARK)) { uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && !defined(CONFIG_NETFILTER) - if (!is_mask && mark != 0) + if (!is_mask && mark != 0) { + OVS_NLERR("skb->mark must be zero on this kernel (mark=%d).\n", mark); return -EINVAL; + } #endif SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask); *attrs &= ~(1ULL << OVS_KEY_ATTR_SKB_MARK); } if (*attrs & (1ULL << OVS_KEY_ATTR_TUNNEL)) { - if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match, + if (ovs_ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match, is_mask)) return -EINVAL; *attrs &= ~(1ULL << OVS_KEY_ATTR_TUNNEL); @@ -1287,6 +1346,7 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, const struct nlattr **a, bool is_mask) { int err; + u64 orig_attrs = attrs; err = metadata_from_nlattrs(match, &attrs, a, is_mask); if (err) @@ -1307,19 +1367,32 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, __be16 tci; tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); - if (!is_mask && (tci & htons(VLAN_TAG_PRESENT))) + if (!(tci & htons(VLAN_TAG_PRESENT))) { + if (is_mask) + OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n"); + else + OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n"); + return -EINVAL; + } SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask); attrs &= ~(1ULL << OVS_KEY_ATTR_VLAN); - } + } else if (!is_mask) + SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true); if (attrs & (1ULL << OVS_KEY_ATTR_ETHERTYPE)) { __be16 eth_type; eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); - if (!is_mask && ntohs(eth_type) < ETH_P_802_3_MIN) + if (is_mask) { + /* Always exact match EtherType. */ + eth_type = htons(0xffff); + } else if (ntohs(eth_type) < ETH_P_802_3_MIN) { + OVS_NLERR("EtherType is less than mimimum (type=%x, min=%x).\n", + ntohs(eth_type), ETH_P_802_3_MIN); return -EINVAL; + } SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask); attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERTYPE); @@ -1331,8 +1404,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, const struct ovs_key_ipv4 *ipv4_key; ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]); - if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) + if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) { + OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n", + ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX); return -EINVAL; + } SW_FLOW_KEY_PUT(match, ip.proto, ipv4_key->ipv4_proto, is_mask); SW_FLOW_KEY_PUT(match, ip.tos, @@ -1352,8 +1428,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, const struct ovs_key_ipv6 *ipv6_key; ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]); - if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) + if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) { + OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n", + ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); return -EINVAL; + } SW_FLOW_KEY_PUT(match, ipv6.label, ipv6_key->ipv6_label, is_mask); SW_FLOW_KEY_PUT(match, ip.proto, @@ -1380,8 +1459,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, const struct ovs_key_arp *arp_key; arp_key = nla_data(a[OVS_KEY_ATTR_ARP]); - if (!is_mask && (arp_key->arp_op & htons(0xff00))) + if (!is_mask && (arp_key->arp_op & htons(0xff00))) { + OVS_NLERR("Unknown ARP opcode (opcode=%d).\n", + arp_key->arp_op); return -EINVAL; + } SW_FLOW_KEY_PUT(match, ipv4.addr.src, arp_key->arp_sip, is_mask); @@ -1401,10 +1483,17 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, const struct ovs_key_tcp *tcp_key; tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); - SW_FLOW_KEY_PUT(match, ipv4.tp.src, - tcp_key->tcp_src, is_mask); - SW_FLOW_KEY_PUT(match, ipv4.tp.dst, - tcp_key->tcp_dst, is_mask); + if (orig_attrs & (1ULL << OVS_KEY_ATTR_IPV4)) { + SW_FLOW_KEY_PUT(match, ipv4.tp.src, + tcp_key->tcp_src, is_mask); + SW_FLOW_KEY_PUT(match, ipv4.tp.dst, + tcp_key->tcp_dst, is_mask); + } else { + SW_FLOW_KEY_PUT(match, ipv6.tp.src, + tcp_key->tcp_src, is_mask); + SW_FLOW_KEY_PUT(match, ipv6.tp.dst, + tcp_key->tcp_dst, is_mask); + } attrs &= ~(1ULL << OVS_KEY_ATTR_TCP); } @@ -1412,10 +1501,17 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, const struct ovs_key_udp *udp_key; udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); - SW_FLOW_KEY_PUT(match, ipv4.tp.src, - udp_key->udp_src, is_mask); - SW_FLOW_KEY_PUT(match, ipv4.tp.dst, - udp_key->udp_dst, is_mask); + if (orig_attrs & (1ULL << OVS_KEY_ATTR_IPV4)) { + SW_FLOW_KEY_PUT(match, ipv4.tp.src, + udp_key->udp_src, is_mask); + SW_FLOW_KEY_PUT(match, ipv4.tp.dst, + udp_key->udp_dst, is_mask); + } else { + SW_FLOW_KEY_PUT(match, ipv6.tp.src, + udp_key->udp_src, is_mask); + SW_FLOW_KEY_PUT(match, ipv6.tp.dst, + udp_key->udp_dst, is_mask); + } attrs &= ~(1ULL << OVS_KEY_ATTR_UDP); } @@ -1502,8 +1598,10 @@ int ovs_match_from_nlattrs(struct sw_flow_match *match, encap_valid = true; key_attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERTYPE); err = parse_flow_nlattrs(encap, a, &key_attrs); - } else + } else { + OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n"); err = -EINVAL; + } if (err) return err; @@ -1522,14 +1620,19 @@ int ovs_match_from_nlattrs(struct sw_flow_match *match, if ((mask_attrs & 1ULL << OVS_KEY_ATTR_ENCAP) && encap_valid) { __be16 eth_type = 0; + mask_attrs &= ~(1ULL << OVS_KEY_ATTR_ENCAP); if (a[OVS_KEY_ATTR_ETHERTYPE]) eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); if (eth_type == htons(0xffff)) { mask_attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERTYPE); encap = a[OVS_KEY_ATTR_ENCAP]; err = parse_flow_mask_nlattrs(encap, a, &mask_attrs); - } else + } else { + OVS_NLERR("VLAN frames must have an exact match" + " on the TPID (mask=%x).\n", + ntohs(eth_type)); err = -EINVAL; + } if (err) return err; @@ -1580,7 +1683,8 @@ int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, if (err) return -EINVAL; - ovs_match_init(&match, &flow->key, NULL); + memset(&match, 0, sizeof(match)); + match.key = &flow->key; err = metadata_from_nlattrs(&match, &attrs, a, false); if (err) @@ -1594,27 +1698,29 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, { struct ovs_key_ethernet *eth_key; struct nlattr *nla, *encap; + bool is_mask = (swkey != output); - if (swkey->phy.priority && - nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority)) + if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority)) goto nla_put_failure; - if (swkey->tun_key.ipv4_dst && - ipv4_tun_to_nlattr(skb, &swkey->tun_key, &output->tun_key)) + if ((swkey->tun_key.ipv4_dst || is_mask) && + ovs_ipv4_tun_to_nlattr(skb, &swkey->tun_key, &output->tun_key)) goto nla_put_failure; - if (swkey->phy.in_port != DP_MAX_PORTS) { - /* Exact match upper 16 bits. */ + if (swkey->phy.in_port == DP_MAX_PORTS) { + if (is_mask && (output->phy.in_port == 0xffff)) + if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff)) + goto nla_put_failure; + } else { u16 upper_u16; - upper_u16 = (swkey == output) ? 0 : 0xffff; + upper_u16 = !is_mask ? 0 : 0xffff; if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, - (upper_u16 << 16) | output->phy.in_port)) + (upper_u16 << 16) | output->phy.in_port)) goto nla_put_failure; } - if (swkey->phy.skb_mark && - nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark)) + if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark)) goto nla_put_failure; nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); @@ -1627,7 +1733,7 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { __be16 eth_type; - eth_type = (swkey == output) ? htons(ETH_P_8021Q) : htons(0xffff) ; + eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff); if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) || nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci)) goto nla_put_failure; @@ -1637,12 +1743,22 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, } else encap = NULL; - if ((swkey == output) && (swkey->eth.type == htons(ETH_P_802_2))) + if (swkey->eth.type == htons(ETH_P_802_2)) { + /* + * Ethertype 802.2 is represented in the netlink with omitted + * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and + * 0xffff in the mask attribute. Ethertype can also + * be wildcarded. + */ + if (is_mask && output->eth.type) + if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, + output->eth.type)) + goto nla_put_failure; goto unencap; + } - if (output->eth.type != 0) - if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type)) - goto nla_put_failure; + if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type)) + goto nla_put_failure; if (swkey->eth.type == htons(ETH_P_IP)) { struct ovs_key_ipv4 *ipv4_key; @@ -1843,7 +1959,7 @@ struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl, { struct list_head *ml; - list_for_each(ml, &tbl->mask_list) { + list_for_each(ml, tbl->mask_list) { struct sw_flow_mask *m; m = container_of(ml, struct sw_flow_mask, list); if (ovs_sw_flow_mask_equal(mask, m)) @@ -1860,7 +1976,7 @@ struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl, */ void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask) { - list_add_rcu(&mask->list, &tbl->mask_list); + list_add_rcu(&mask->list, tbl->mask_list); } /**