const struct mf_subfield *fields,
size_t n_fields);
-static void oftable_remove_rule(struct rule *);
+static void oftable_remove_rule(struct rule *rule) OVS_RELEASES(rule->evict);
+static void oftable_remove_rule__(struct ofproto *ofproto,
+ struct classifier *cls, struct rule *rule)
+ OVS_REQ_WRLOCK(cls->rwlock) OVS_RELEASES(rule->evict);
static struct rule *oftable_replace_rule(struct rule *);
static void oftable_substitute_rule(struct rule *old, struct rule *new);
struct heap rules; /* Contains "struct rule"s. */
};
-static struct rule *choose_rule_to_evict(struct oftable *);
+static bool choose_rule_to_evict(struct oftable *table, struct rule **rulep)
+ OVS_TRY_WRLOCK(true, (*rulep)->evict);
static void ofproto_evict(struct ofproto *);
static uint32_t rule_eviction_priority(struct rule *);
static enum ofperr add_flow(struct ofproto *, struct ofconn *,
struct ofputil_flow_mod *,
const struct ofp_header *);
-static void delete_flow__(struct rule *, struct ofopgroup *,
- enum ofp_flow_removed_reason);
+static void delete_flow__(struct rule *rule, struct ofopgroup *,
+ enum ofp_flow_removed_reason)
+ OVS_RELEASES(rule->evict);
static bool handle_openflow(struct ofconn *, const struct ofpbuf *);
static enum ofperr handle_flow_mod__(struct ofproto *, struct ofconn *,
struct ofputil_flow_mod *,
}
table->max_flows = s->max_flows;
+ ovs_rwlock_rdlock(&table->cls.rwlock);
if (classifier_count(&table->cls) > table->max_flows
&& table->eviction_fields) {
/* 'table' contains more flows than allowed. We might not be able to
break;
}
}
+ ovs_rwlock_unlock(&table->cls.rwlock);
}
\f
bool
continue;
}
+ ovs_rwlock_wrlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, NULL);
CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) {
if (!rule->pending) {
ofoperation_create(group, rule, OFOPERATION_DELETE,
OFPRR_DELETE);
- oftable_remove_rule(rule);
+ ovs_rwlock_wrlock(&rule->evict);
+ oftable_remove_rule__(ofproto, &table->cls, rule);
ofproto->ofproto_class->rule_destruct(rule);
}
}
+ ovs_rwlock_unlock(&table->cls.rwlock);
}
ofopgroup_submit(group);
}
n_rules = 0;
OFPROTO_FOR_EACH_TABLE (table, ofproto) {
+ ovs_rwlock_rdlock(&table->cls.rwlock);
n_rules += classifier_count(&table->cls);
+ ovs_rwlock_unlock(&table->cls.rwlock);
}
simap_increase(usage, "rules", n_rules);
{
const struct rule *rule;
+ ovs_rwlock_rdlock(&ofproto->tables[0].cls.rwlock);
rule = rule_from_cls_rule(classifier_find_match_exactly(
&ofproto->tables[0].cls, match, priority));
+ ovs_rwlock_unlock(&ofproto->tables[0].cls.rwlock);
if (!rule || !ofpacts_equal(rule->ofpacts, rule->ofpacts_len,
ofpacts, ofpacts_len)) {
struct ofputil_flow_mod fm;
{
struct rule *rule;
+ ovs_rwlock_rdlock(&ofproto->tables[0].cls.rwlock);
rule = rule_from_cls_rule(classifier_find_match_exactly(
&ofproto->tables[0].cls, target, priority));
+ ovs_rwlock_unlock(&ofproto->tables[0].cls.rwlock);
if (!rule) {
/* No such rule -> success. */
return true;
/* Initiate deletion -> success. */
struct ofopgroup *group = ofopgroup_create_unattached(ofproto);
ofoperation_create(group, rule, OFOPERATION_DELETE, OFPRR_DELETE);
+ ovs_rwlock_wrlock(&rule->evict);
oftable_remove_rule(rule);
ofproto->ofproto_class->rule_destruct(rule);
ofopgroup_submit(group);
cls_rule_destroy(&rule->cr);
free(rule->ofpacts);
ovs_mutex_destroy(&rule->timeout_mutex);
+ ovs_rwlock_destroy(&rule->evict);
rule->ofproto->ofproto_class->rule_dealloc(rule);
}
}
* This function should only be called from an ofproto implementation's
* ->destruct() function. It is not suitable elsewhere. */
void
-ofproto_rule_destroy(struct rule *rule)
+ofproto_rule_destroy(struct ofproto *ofproto, struct classifier *cls,
+ struct rule *rule) OVS_REQ_WRLOCK(cls->rwlock)
{
ovs_assert(!rule->pending);
- oftable_remove_rule(rule);
+ if (!ovs_rwlock_trywrlock(&rule->evict)) {
+ oftable_remove_rule__(ofproto, cls, rule);
+ } else {
+ NOT_REACHED();
+ }
ofproto_rule_destroy__(rule);
}
ots[i].instructions = htonl(OFPIT11_ALL);
ots[i].config = htonl(OFPTC11_TABLE_MISS_MASK);
ots[i].max_entries = htonl(1000000); /* An arbitrary big number. */
+ ovs_rwlock_rdlock(&p->tables[i].cls.rwlock);
ots[i].active_count = htonl(classifier_count(&p->tables[i].cls));
+ ovs_rwlock_unlock(&p->tables[i].cls.rwlock);
}
p->ofproto_class->get_tables(p, ots);
struct cls_cursor cursor;
struct rule *rule;
+ ovs_rwlock_rdlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, &cr);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
if (rule->pending) {
+ ovs_rwlock_unlock(&table->cls.rwlock);
error = OFPROTO_POSTPONE;
goto exit;
}
list_push_back(rules, &rule->ofproto_node);
}
}
+ ovs_rwlock_unlock(&table->cls.rwlock);
}
exit:
FOR_EACH_MATCHING_TABLE (table, table_id, ofproto) {
struct rule *rule;
+ ovs_rwlock_rdlock(&table->cls.rwlock);
rule = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls,
&cr));
+ ovs_rwlock_unlock(&table->cls.rwlock);
if (rule) {
if (rule->pending) {
error = OFPROTO_POSTPONE;
struct cls_cursor cursor;
struct rule *rule;
+ ovs_rwlock_rdlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, NULL);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
flow_stats_ds(rule, results);
}
+ ovs_rwlock_unlock(&table->cls.rwlock);
}
}
struct rule *victim;
struct rule *rule;
uint8_t table_id;
+ bool overlaps;
int error;
error = check_table_id(ofproto, fm->table_id);
}
/* Check for overlap, if requested. */
- if (fm->flags & OFPFF_CHECK_OVERLAP
- && classifier_rule_overlaps(&table->cls, &rule->cr)) {
+ ovs_rwlock_rdlock(&table->cls.rwlock);
+ overlaps = classifier_rule_overlaps(&table->cls, &rule->cr);
+ ovs_rwlock_unlock(&table->cls.rwlock);
+ if (fm->flags & OFPFF_CHECK_OVERLAP && overlaps) {
cls_rule_destroy(&rule->cr);
ofproto->ofproto_class->rule_dealloc(rule);
return OFPERR_OFPFMFC_OVERLAP;
rule->ofpacts_len = fm->ofpacts_len;
rule->meter_id = find_meter(rule->ofpacts, rule->ofpacts_len);
list_init(&rule->meter_list_node);
- rule->evictable = true;
rule->eviction_group = NULL;
list_init(&rule->expirable);
rule->monitor_flags = 0;
rule->add_seqno = 0;
rule->modify_seqno = 0;
+ ovs_rwlock_init(&rule->evict);
/* Insert new rule. */
victim = oftable_replace_rule(rule);
} else {
struct ofoperation *op;
struct rule *evict;
-
- if (classifier_count(&table->cls) > table->max_flows) {
- bool was_evictable;
-
- was_evictable = rule->evictable;
- rule->evictable = false;
- evict = choose_rule_to_evict(table);
- rule->evictable = was_evictable;
-
- if (!evict) {
+ size_t n_rules;
+
+ ovs_rwlock_rdlock(&table->cls.rwlock);
+ n_rules = classifier_count(&table->cls);
+ ovs_rwlock_unlock(&table->cls.rwlock);
+ if (n_rules > table->max_flows) {
+ ovs_rwlock_rdlock(&rule->evict);
+ if (choose_rule_to_evict(table, &evict)) {
+ ovs_rwlock_unlock(&rule->evict);
+ ovs_rwlock_unlock(&evict->evict);
+ if (evict->pending) {
+ error = OFPROTO_POSTPONE;
+ goto exit;
+ }
+ } else {
+ ovs_rwlock_unlock(&rule->evict);
error = OFPERR_OFPFMFC_TABLE_FULL;
goto exit;
- } else if (evict->pending) {
- error = OFPROTO_POSTPONE;
- goto exit;
}
} else {
evict = NULL;
op->group->n_running--;
ofoperation_destroy(rule->pending);
} else if (evict) {
+ /* It would be better if we maintained the lock we took in
+ * choose_rule_to_evict() earlier, but that confuses the thread
+ * safety analysis, and this code is fragile enough that we really
+ * need it. In the worst case, we'll have to block a little while
+ * before we perform the eviction, which doesn't seem like a big
+ * problem. */
+ ovs_rwlock_wrlock(&evict->evict);
delete_flow__(evict, group, OFPRR_EVICTION);
}
ofopgroup_submit(group);
group = ofopgroup_create(ofproto, ofconn, request, UINT32_MAX);
LIST_FOR_EACH_SAFE (rule, next, ofproto_node, rules) {
+ ovs_rwlock_wrlock(&rule->evict);
delete_flow__(rule, group, reason);
}
ofopgroup_submit(group);
struct cls_cursor cursor;
struct rule *rule;
+ ovs_rwlock_rdlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, &target);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
ovs_assert(!rule->pending); /* XXX */
ofproto_collect_ofmonitor_refresh_rule(m, rule, seqno, rules);
}
+ ovs_rwlock_unlock(&table->cls.rwlock);
}
HMAP_FOR_EACH (op, hmap_node, &ofproto->deletions) {
\f
/* Table overflow policy. */
-/* Chooses and returns a rule to evict from 'table'. Returns NULL if the table
- * is not configured to evict rules or if the table contains no evictable
- * rules. (Rules with 'evictable' set to false or with no timeouts are not
- * evictable.) */
-static struct rule *
-choose_rule_to_evict(struct oftable *table)
+/* Chooses and updates 'rulep' with a rule to evict from 'table'. Sets 'rulep'
+ * to NULL if the table is not configured to evict rules or if the table
+ * contains no evictable rules. (Rules with a readlock on their evict rwlock,
+ * or with no timeouts are not evictable.) */
+static bool
+choose_rule_to_evict(struct oftable *table, struct rule **rulep)
{
struct eviction_group *evg;
+ *rulep = NULL;
if (!table->eviction_fields) {
- return NULL;
+ return false;
}
/* In the common case, the outer and inner loops here will each be entered
struct rule *rule;
HEAP_FOR_EACH (rule, evg_node, &evg->rules) {
- if (rule->evictable) {
- return rule;
+ if (!ovs_rwlock_trywrlock(&rule->evict)) {
+ *rulep = rule;
+ return true;
}
}
}
- return NULL;
+ return false;
}
/* Searches 'ofproto' for tables that have more flows than their configured
group = ofopgroup_create_unattached(ofproto);
OFPROTO_FOR_EACH_TABLE (table, ofproto) {
- while (classifier_count(&table->cls) > table->max_flows
- && table->eviction_fields) {
+ while (table->eviction_fields) {
struct rule *rule;
+ size_t n_rules;
- rule = choose_rule_to_evict(table);
- if (!rule || rule->pending) {
+ ovs_rwlock_rdlock(&table->cls.rwlock);
+ n_rules = classifier_count(&table->cls);
+ ovs_rwlock_unlock(&table->cls.rwlock);
+
+ if (n_rules <= table->max_flows) {
+ break;
+ }
+
+ if (!choose_rule_to_evict(table, &rule)) {
+ break;
+ }
+
+ if (rule->pending) {
+ ovs_rwlock_unlock(&rule->evict);
break;
}
static void
oftable_destroy(struct oftable *table)
{
+ ovs_rwlock_rdlock(&table->cls.rwlock);
ovs_assert(classifier_is_empty(&table->cls));
+ ovs_rwlock_unlock(&table->cls.rwlock);
oftable_disable_eviction(table);
classifier_destroy(&table->cls);
free(table->name);
hmap_init(&table->eviction_groups_by_id);
heap_init(&table->eviction_groups_by_size);
+ ovs_rwlock_rdlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, NULL);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
eviction_group_add_rule(rule);
}
+ ovs_rwlock_unlock(&table->cls.rwlock);
}
/* Removes 'rule' from the oftable that contains it. */
static void
-oftable_remove_rule(struct rule *rule)
+oftable_remove_rule__(struct ofproto *ofproto, struct classifier *cls,
+ struct rule *rule)
+ OVS_REQ_WRLOCK(cls->rwlock) OVS_RELEASES(rule->evict)
{
- struct ofproto *ofproto = rule->ofproto;
- struct oftable *table = &ofproto->tables[rule->table_id];
-
- classifier_remove(&table->cls, &rule->cr);
+ classifier_remove(cls, &rule->cr);
if (rule->meter_id) {
list_remove(&rule->meter_list_node);
}
if (!list_is_empty(&rule->meter_list_node)) {
list_remove(&rule->meter_list_node);
}
+ ovs_rwlock_unlock(&rule->evict);
+}
+
+static void
+oftable_remove_rule(struct rule *rule)
+{
+ struct ofproto *ofproto = rule->ofproto;
+ struct oftable *table = &ofproto->tables[rule->table_id];
+
+ ovs_rwlock_wrlock(&table->cls.rwlock);
+ oftable_remove_rule__(ofproto, &table->cls, rule);
+ ovs_rwlock_unlock(&table->cls.rwlock);
}
/* Inserts 'rule' into its oftable. Removes any existing rule from 'rule''s
struct meter *meter = ofproto->meters[rule->meter_id];
list_insert(&meter->rules, &rule->meter_list_node);
}
+ ovs_rwlock_wrlock(&table->cls.rwlock);
victim = rule_from_cls_rule(classifier_replace(&table->cls, &rule->cr));
+ ovs_rwlock_unlock(&table->cls.rwlock);
if (victim) {
if (victim->meter_id) {
list_remove(&victim->meter_list_node);
if (new) {
oftable_replace_rule(new);
} else {
+ ovs_rwlock_wrlock(&old->evict);
oftable_remove_rule(old);
}
}