do { \
update_range__(match, offsetof(struct sw_flow_key, field), \
sizeof((match)->key->field), is_mask); \
- if (is_mask && match->mask != NULL) { \
- (match)->mask->key.field = value; \
+ if (is_mask) { \
+ if ((match)->mask) \
+ (match)->mask->key.field = value; \
} else { \
(match)->key->field = value; \
} \
do { \
update_range__(match, offsetof(struct sw_flow_key, field), \
len, is_mask); \
- if (is_mask && match->mask != NULL) { \
- memcpy(&(match)->mask->key.field, value_p, len); \
+ if (is_mask) { \
+ if ((match)->mask) \
+ memcpy(&(match)->mask->key.field, value_p, len);\
} else { \
memcpy(&(match)->key->field, value_p, len); \
} \
| (1ULL << OVS_KEY_ATTR_ARP)
| (1ULL << OVS_KEY_ATTR_ND));
+ /* Tunnel mask is always allowed. */
+ mask_allowed |= (1ULL << OVS_KEY_ATTR_TUNNEL);
+
if (match->key->phy.in_port == DP_MAX_PORTS &&
match->mask && (match->mask->key.phy.in_port == 0xffff))
mask_allowed |= (1ULL << OVS_KEY_ATTR_IN_PORT);
struct flex_array *buckets;
int i, err;
- buckets = flex_array_alloc(sizeof(struct hlist_head *),
+ buckets = flex_array_alloc(sizeof(struct hlist_head),
n_buckets, GFP_KERNEL);
if (!buckets)
return NULL;
int ver = table->node_ver;
hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
- hlist_del_rcu(&flow->hash_node[ver]);
+ hlist_del(&flow->hash_node[ver]);
ovs_flow_free(flow, false);
}
}
if (!flow)
return;
- ovs_sw_flow_mask_del_ref((struct sw_flow_mask __force *)flow->mask,
- deferred);
+ ovs_sw_flow_mask_del_ref(flow->mask, deferred);
if (deferred)
call_rcu(&flow->rcu, rcu_free_flow_callback);
hash = ovs_flow_hash(&masked_key, key_start, key_len);
head = find_bucket(table, hash);
hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
- if (__flow_cmp_key(flow, &masked_key, key_start, key_len))
+ if (flow->mask == mask &&
+ __flow_cmp_key(flow, &masked_key, key_start, key_len))
return flow;
}
return NULL;
void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow)
{
- flow->hash = ovs_flow_hash(&flow->key,
- ovsl_dereference(flow->mask)->range.start,
- ovsl_dereference(flow->mask)->range.end);
+ flow->hash = ovs_flow_hash(&flow->key, flow->mask->range.start,
+ flow->mask->range.end);
__tbl_insert(table, flow);
}
__be16 tci;
tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
- if (!is_mask)
- if (!(tci & htons(VLAN_TAG_PRESENT))) {
+ if (!(tci & htons(VLAN_TAG_PRESENT))) {
+ if (is_mask)
+ OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n");
+ else
OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n");
- return -EINVAL;
- }
+
+ return -EINVAL;
+ }
SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
attrs &= ~(1ULL << OVS_KEY_ATTR_VLAN);
- }
+ } else if (!is_mask)
+ SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
if (attrs & (1ULL << OVS_KEY_ATTR_ETHERTYPE)) {
__be16 eth_type;
struct ovs_key_ethernet *eth_key;
struct nlattr *nla, *encap;
- if (output->phy.priority &&
- nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
+ if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
goto nla_put_failure;
if (swkey->tun_key.ipv4_dst &&
goto nla_put_failure;
}
- if (output->phy.skb_mark &&
- nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
+ if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
goto nla_put_failure;
nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));