static bool flow_equal_except(const struct flow *, const struct flow *,
const struct flow_wildcards *);
-static void zero_wildcards(struct flow *, const struct flow_wildcards *);
/* Iterates RULE over HEAD and all of the cls_rules on HEAD->list. */
#define FOR_EACH_RULE_IN_LIST(RULE, HEAD) \
void
cls_rule_zero_wildcarded_fields(struct cls_rule *rule)
{
- zero_wildcards(&rule->flow, &rule->wc);
+ flow_zero_wildcards(&rule->flow, &rule->wc);
}
void
memcpy(rule->flow.dl_src, dl_src, ETH_ADDR_LEN);
}
+/* Modifies 'rule' so that the Ethernet address must match 'dl_dst' exactly. */
void
cls_rule_set_dl_dst(struct cls_rule *rule, const uint8_t dl_dst[ETH_ADDR_LEN])
{
memcpy(rule->flow.dl_dst, dl_dst, ETH_ADDR_LEN);
}
+/* Modifies 'rule' so that the Ethernet address must match 'dl_dst' after each
+ * byte is ANDed with the appropriate byte in 'mask'.
+ *
+ * This function will assert-fail if 'mask' is invalid. Only 'mask' values
+ * accepted by flow_wildcards_is_dl_dst_mask_valid() are allowed. */
+void
+cls_rule_set_dl_dst_masked(struct cls_rule *rule,
+ const uint8_t dl_dst[ETH_ADDR_LEN],
+ const uint8_t mask[ETH_ADDR_LEN])
+{
+ flow_wildcards_t *wc = &rule->wc.wildcards;
+ size_t i;
+
+ *wc = flow_wildcards_set_dl_dst_mask(*wc, mask);
+ for (i = 0; i < ETH_ADDR_LEN; i++) {
+ rule->flow.dl_dst[i] = dl_dst[i] & mask[i];
+ }
+}
+
void
cls_rule_set_dl_tci(struct cls_rule *rule, ovs_be16 tci)
{
}
void
-cls_rule_set_nd_target(struct cls_rule *rule, const struct in6_addr target)
+cls_rule_set_nd_target(struct cls_rule *rule, const struct in6_addr *target)
{
rule->wc.wildcards &= ~FWW_ND_TARGET;
- rule->flow.nd_target = target;
+ rule->flow.nd_target = *target;
}
/* Returns true if 'a' and 'b' have the same priority, wildcard the same
&& flow_equal(&a->flow, &b->flow));
}
+/* Returns a hash value for the flow, wildcards, and priority in 'rule',
+ * starting from 'basis'. */
+uint32_t
+cls_rule_hash(const struct cls_rule *rule, uint32_t basis)
+{
+ uint32_t h0 = flow_hash(&rule->flow, basis);
+ uint32_t h1 = flow_wildcards_hash(&rule->wc, h0);
+ return hash_int(rule->priority, h1);
+}
+
static void
format_ip_netmask(struct ds *s, const char *name, ovs_be32 ip,
ovs_be32 netmask)
{
if (netmask) {
- ds_put_format(s, "%s="IP_FMT, name, IP_ARGS(&ip));
- if (netmask != htonl(UINT32_MAX)) {
- if (ip_is_cidr(netmask)) {
- int wcbits = ofputil_netmask_to_wcbits(netmask);
- ds_put_format(s, "/%d", 32 - wcbits);
- } else {
- ds_put_format(s, "/"IP_FMT, IP_ARGS(&netmask));
- }
- }
+ ds_put_format(s, "%s=", name);
+ ip_format_masked(ip, netmask, s);
ds_put_char(s, ',');
}
}
{
if (!ipv6_mask_is_any(netmask)) {
ds_put_format(s, "%s=", name);
- print_ipv6_addr(s, addr);
- if (!ipv6_mask_is_exact(netmask)) {
- if (ipv6_is_cidr(netmask)) {
- int cidr_bits = ipv6_count_cidr_bits(netmask);
- ds_put_format(s, "/%d", cidr_bits);
- } else {
- ds_put_char(s, '/');
- print_ipv6_addr(s, netmask);
- }
- }
+ print_ipv6_masked(s, addr, netmask);
ds_put_char(s, ',');
}
}
int i;
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 1);
+
if (rule->priority != OFP_DEFAULT_PRIORITY) {
ds_put_format(s, "priority=%d,", rule->priority);
}
}
if (!skip_proto && !(w & FWW_NW_PROTO)) {
if (f->dl_type == htons(ETH_TYPE_ARP)) {
- ds_put_format(s, "opcode=%"PRIu8",", f->nw_proto);
+ ds_put_format(s, "arp_op=%"PRIu8",", f->nw_proto);
} else {
ds_put_format(s, "nw_proto=%"PRIu8",", f->nw_proto);
}
ds_put_char(s, ',');
}
if (!(w & FWW_ARP_SHA)) {
- ds_put_format(s, "nd_sll="ETH_ADDR_FMT",",
+ ds_put_format(s, "nd_sll="ETH_ADDR_FMT",",
ETH_ADDR_ARGS(f->arp_sha));
}
if (!(w & FWW_ARP_THA)) {
- ds_put_format(s, "nd_tll="ETH_ADDR_FMT",",
+ ds_put_format(s, "nd_tll="ETH_ADDR_FMT",",
ETH_ADDR_ARGS(f->arp_tha));
}
} else {
* rule, even rules that cannot have any effect because the new rule matches a
* superset of their flows and has higher priority. */
struct cls_rule *
-classifier_insert(struct classifier *cls, struct cls_rule *rule)
+classifier_replace(struct classifier *cls, struct cls_rule *rule)
{
struct cls_rule *old_rule;
struct cls_table *table;
return old_rule;
}
+/* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
+ * must not modify or free it.
+ *
+ * 'cls' must not contain an identical rule (including wildcards, values of
+ * fixed fields, and priority). Use classifier_find_rule_exactly() to find
+ * such a rule. */
+void
+classifier_insert(struct classifier *cls, struct cls_rule *rule)
+{
+ struct cls_rule *displaced_rule = classifier_replace(cls, rule);
+ assert(!displaced_rule);
+}
+
/* Removes 'rule' from 'cls'. It is the caller's responsibility to free
* 'rule', if this is desirable. */
void
/* Finds and returns a rule in 'cls' with exactly the same priority and
* matching criteria as 'target'. Returns a null pointer if 'cls' doesn't
- * contain an exact match.
- *
- * Priority is ignored for exact-match rules (because OpenFlow 1.0 always
- * treats exact-match rules as highest priority). */
+ * contain an exact match. */
struct cls_rule *
classifier_find_rule_exactly(const struct classifier *cls,
const struct cls_rule *target)
}
head = find_equal(table, &target->flow, flow_hash(&target->flow, 0));
- if (flow_wildcards_is_exact(&target->wc)) {
- return head;
- }
FOR_EACH_RULE_IN_LIST (rule, head) {
if (target->priority >= rule->priority) {
return target->priority == rule->priority ? rule : NULL;
{
struct cls_table *table;
- HMAP_FOR_EACH_IN_BUCKET (table, hmap_node, flow_wildcards_hash(wc),
+ HMAP_FOR_EACH_IN_BUCKET (table, hmap_node, flow_wildcards_hash(wc, 0),
&cls->tables) {
if (flow_wildcards_equal(wc, &table->wc)) {
return table;
table = xzalloc(sizeof *table);
hmap_init(&table->rules);
table->wc = *wc;
- hmap_insert(&cls->tables, &table->hmap_node, flow_wildcards_hash(wc));
+ hmap_insert(&cls->tables, &table->hmap_node, flow_wildcards_hash(wc, 0));
return table;
}
struct flow f;
f = *flow;
- zero_wildcards(&f, &table->wc);
+ flow_zero_wildcards(&f, &table->wc);
HMAP_FOR_EACH_WITH_HASH (rule, hmap_node, flow_hash(&f, 0),
&table->rules) {
if (flow_equal(&f, &rule->flow)) {
const flow_wildcards_t wc = wildcards->wildcards;
int i;
- BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 100 + FLOW_N_REGS * 4);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 1);
for (i = 0; i < FLOW_N_REGS; i++) {
if ((a->regs[i] ^ b->regs[i]) & wildcards->reg_masks[i]) {
&wildcards->ipv6_src_mask)
&& ipv6_equal_except(&a->ipv6_dst, &b->ipv6_dst,
&wildcards->ipv6_dst_mask)
- && (wc & FWW_ND_TARGET
+ && (wc & FWW_ND_TARGET
|| ipv6_addr_equals(&a->nd_target, &b->nd_target)));
}
-
-static void
-zero_wildcards(struct flow *flow, const struct flow_wildcards *wildcards)
-{
- const flow_wildcards_t wc = wildcards->wildcards;
- int i;
-
- BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 100 + 4 * FLOW_N_REGS);
-
- for (i = 0; i < FLOW_N_REGS; i++) {
- flow->regs[i] &= wildcards->reg_masks[i];
- }
- flow->tun_id &= wildcards->tun_id_mask;
- flow->nw_src &= wildcards->nw_src_mask;
- flow->nw_dst &= wildcards->nw_dst_mask;
- if (wc & FWW_IN_PORT) {
- flow->in_port = 0;
- }
- flow->vlan_tci &= wildcards->vlan_tci_mask;
- if (wc & FWW_DL_TYPE) {
- flow->dl_type = 0;
- }
- if (wc & FWW_TP_SRC) {
- flow->tp_src = 0;
- }
- if (wc & FWW_TP_DST) {
- flow->tp_dst = 0;
- }
- if (wc & FWW_DL_SRC) {
- memset(flow->dl_src, 0, sizeof flow->dl_src);
- }
- if (wc & FWW_DL_DST) {
- flow->dl_dst[0] &= 0x01;
- memset(&flow->dl_dst[1], 0, 5);
- }
- if (wc & FWW_ETH_MCAST) {
- flow->dl_dst[0] &= 0xfe;
- }
- if (wc & FWW_NW_PROTO) {
- flow->nw_proto = 0;
- }
- if (wc & FWW_NW_TOS) {
- flow->nw_tos = 0;
- }
- if (wc & FWW_ARP_SHA) {
- memset(flow->arp_sha, 0, sizeof flow->arp_sha);
- }
- if (wc & FWW_ARP_THA) {
- memset(flow->arp_tha, 0, sizeof flow->arp_tha);
- }
- flow->ipv6_src = ipv6_addr_bitand(&flow->ipv6_src,
- &wildcards->ipv6_src_mask);
- flow->ipv6_dst = ipv6_addr_bitand(&flow->ipv6_dst,
- &wildcards->ipv6_dst_mask);
- if (wc & FWW_ND_TARGET) {
- memset(&flow->nd_target, 0, sizeof flow->nd_target);
- }
-}