#include "odp-util.h"
#include "ofp-util.h"
#include "packets.h"
+#include "ovs-thread.h"
static struct cls_table *find_table(const struct classifier *,
const struct minimask *);
cls->n_rules = 0;
hmap_init(&cls->tables);
list_init(&cls->tables_priority);
+ ovs_rwlock_init(&cls->rwlock);
}
/* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
destroy_table(cls, table);
}
hmap_destroy(&cls->tables);
+ ovs_rwlock_destroy(&cls->rwlock);
}
}
* a hash map from fixed field values to "struct cls_rule",
* which can contain a list of otherwise identical rules
* with lower priorities.
- */
+ *
+ * Thread-safety
+ * =============
+ *
+ * When locked properly, the classifier is thread safe as long as the following
+ * conditions are satisfied.
+ * - Only the main thread calls functions requiring a write lock.
+ * - Only the main thread is allowed to iterate over rules. */
#include "flow.h"
#include "hmap.h"
#include "match.h"
#include "openflow/nicira-ext.h"
#include "openflow/openflow.h"
+#include "ovs-thread.h"
+#include "util.h"
#ifdef __cplusplus
extern "C" {
int n_rules; /* Total number of rules. */
struct hmap tables; /* Contains "struct cls_table"s. */
struct list tables_priority; /* Tables in descending priority order */
+ struct ovs_rwlock rwlock;
};
/* A set of rules that all have the same fields wildcarded. */
bool cls_rule_is_loose_match(const struct cls_rule *rule,
const struct minimatch *criteria);
-void classifier_init(struct classifier *);
+void classifier_init(struct classifier *cls);
void classifier_destroy(struct classifier *);
-bool classifier_is_empty(const struct classifier *);
-int classifier_count(const struct classifier *);
-void classifier_insert(struct classifier *, struct cls_rule *);
-struct cls_rule *classifier_replace(struct classifier *, struct cls_rule *);
-void classifier_remove(struct classifier *, struct cls_rule *);
-struct cls_rule *classifier_lookup(const struct classifier *,
+bool classifier_is_empty(const struct classifier *cls)
+ OVS_REQ_RDLOCK(cls->rwlock);
+int classifier_count(const struct classifier *cls)
+ OVS_REQ_RDLOCK(cls->rwlock);
+void classifier_insert(struct classifier *cls, struct cls_rule *)
+ OVS_REQ_WRLOCK(cls->rwlock);
+struct cls_rule *classifier_replace(struct classifier *cls, struct cls_rule *)
+ OVS_REQ_WRLOCK(cls->rwlock);
+void classifier_remove(struct classifier *cls, struct cls_rule *)
+ OVS_REQ_WRLOCK(cls->rwlock);
+struct cls_rule *classifier_lookup(const struct classifier *cls,
const struct flow *,
- struct flow_wildcards *);
-bool classifier_rule_overlaps(const struct classifier *,
- const struct cls_rule *);
+ struct flow_wildcards *)
+ OVS_REQ_RDLOCK(cls->rwlock);
+bool classifier_rule_overlaps(const struct classifier *cls,
+ const struct cls_rule *)
+ OVS_REQ_RDLOCK(cls->rwlock);
typedef void cls_cb_func(struct cls_rule *, void *aux);
-struct cls_rule *classifier_find_rule_exactly(const struct classifier *,
- const struct cls_rule *);
-struct cls_rule *classifier_find_match_exactly(const struct classifier *,
+struct cls_rule *classifier_find_rule_exactly(const struct classifier *cls,
+ const struct cls_rule *)
+ OVS_REQ_RDLOCK(cls->rwlock);
+struct cls_rule *classifier_find_match_exactly(const struct classifier *cls,
const struct match *,
- unsigned int priority);
+ unsigned int priority)
+ OVS_REQ_RDLOCK(cls->rwlock);
\f
/* Iteration. */
const struct cls_rule *target;
};
-void cls_cursor_init(struct cls_cursor *, const struct classifier *,
- const struct cls_rule *match);
-struct cls_rule *cls_cursor_first(struct cls_cursor *);
-struct cls_rule *cls_cursor_next(struct cls_cursor *, struct cls_rule *);
+void cls_cursor_init(struct cls_cursor *cursor, const struct classifier *cls,
+ const struct cls_rule *match) OVS_REQ_RDLOCK(cls->rwlock);
+struct cls_rule *cls_cursor_first(struct cls_cursor *cursor);
+struct cls_rule *cls_cursor_next(struct cls_cursor *cursor, struct cls_rule *);
#define CLS_CURSOR_FOR_EACH(RULE, MEMBER, CURSOR) \
for (ASSIGN_CONTAINER(RULE, cls_cursor_first(CURSOR), MEMBER); \
}
ovs_rwlock_unlock(&xlate_rwlock);
+ /* Only ofproto-dpif cares about the facet classifier so we just
+ * lock cls_cursor_init() to appease the thread safety analysis. */
+ ovs_rwlock_rdlock(&ofproto->facets.rwlock);
cls_cursor_init(&cursor, &ofproto->facets, NULL);
+ ovs_rwlock_unlock(&ofproto->facets.rwlock);
CLS_CURSOR_FOR_EACH_SAFE (facet, next, cr, &cursor) {
facet_revalidate(facet);
run_fast_rl();
OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
struct cls_cursor cursor;
+ ovs_rwlock_wrlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, NULL);
CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
- ofproto_rule_destroy(&rule->up);
+ ofproto_rule_destroy(&ofproto->up, &table->cls, &rule->up);
}
+ ovs_rwlock_unlock(&table->cls.rwlock);
}
ovs_mutex_lock(&ofproto->flow_mod_mutex);
ovs_rwlock_unlock(&ofproto->ml->rwlock);
/* Check the consistency of a random facet, to aid debugging. */
+ ovs_rwlock_rdlock(&ofproto->facets.rwlock);
if (time_msec() >= ofproto->consistency_rl
&& !classifier_is_empty(&ofproto->facets)
&& !ofproto->backer->need_revalidate) {
ofproto->backer->need_revalidate = REV_INCONSISTENCY;
}
}
+ ovs_rwlock_unlock(&ofproto->facets.rwlock);
return 0;
}
size_t n_subfacets = 0;
struct facet *facet;
+ ovs_rwlock_rdlock(&ofproto->facets.rwlock);
simap_increase(usage, "facets", classifier_count(&ofproto->facets));
+ ovs_rwlock_unlock(&ofproto->facets.rwlock);
+ ovs_rwlock_rdlock(&ofproto->facets.rwlock);
cls_cursor_init(&cursor, &ofproto->facets, NULL);
CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
n_subfacets += list_size(&facet->subfacets);
}
+ ovs_rwlock_unlock(&ofproto->facets.rwlock);
simap_increase(usage, "subfacets", n_subfacets);
}
match_init(&match, &facet->flow, &facet->xout.wc);
cls_rule_init(&facet->cr, &match, OFP_DEFAULT_PRIORITY);
+ ovs_rwlock_wrlock(&ofproto->facets.rwlock);
classifier_insert(&ofproto->facets, &facet->cr);
+ ovs_rwlock_unlock(&ofproto->facets.rwlock);
facet->nf_flow.output_iface = facet->xout.nf_output_iface;
facet->fail_open = rule->up.cr.priority == FAIL_OPEN_PRIORITY;
&facet->subfacets) {
subfacet_destroy__(subfacet);
}
+ ovs_rwlock_wrlock(&facet->ofproto->facets.rwlock);
classifier_remove(&facet->ofproto->facets, &facet->cr);
+ ovs_rwlock_unlock(&facet->ofproto->facets.rwlock);
cls_rule_destroy(&facet->cr);
facet_free(facet);
}
static struct facet *
facet_find(struct ofproto_dpif *ofproto, const struct flow *flow)
{
- struct cls_rule *cr = classifier_lookup(&ofproto->facets, flow, NULL);
+ struct cls_rule *cr;
+
+ ovs_rwlock_rdlock(&ofproto->facets.rwlock);
+ cr = classifier_lookup(&ofproto->facets, flow, NULL);
+ ovs_rwlock_unlock(&ofproto->facets.rwlock);
return cr ? CONTAINER_OF(cr, struct facet, cr) : NULL;
}
struct cls_cursor cursor;
struct facet *facet;
+ ovs_rwlock_rdlock(&ofproto->facets.rwlock);
cls_cursor_init(&cursor, &ofproto->facets, NULL);
CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
facet_push_stats(facet, false);
run_fast_rl();
}
}
+ ovs_rwlock_unlock(&ofproto->facets.rwlock);
}
rl = time_msec() + 100;
struct flow ofpc_normal_flow = *flow;
ofpc_normal_flow.tp_src = htons(0);
ofpc_normal_flow.tp_dst = htons(0);
+ ovs_rwlock_rdlock(&cls->rwlock);
cls_rule = classifier_lookup(cls, &ofpc_normal_flow, wc);
+ ovs_rwlock_unlock(&cls->rwlock);
} else if (frag && ofproto->up.frag_handling == OFPC_FRAG_DROP) {
cls_rule = &ofproto->drop_frags_rule->up.cr;
if (wc) {
flow_wildcards_init_exact(wc);
}
} else {
+ ovs_rwlock_rdlock(&cls->rwlock);
cls_rule = classifier_lookup(cls, flow, wc);
+ ovs_rwlock_unlock(&cls->rwlock);
}
return rule_dpif_cast(rule_from_cls_rule(cls_rule));
}
struct cls_cursor cursor;
struct facet *facet;
+ ovs_rwlock_rdlock(&ofproto->facets.rwlock);
cls_cursor_init(&cursor, &ofproto->facets, NULL);
CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
send_active_timeout(ofproto, facet);
}
+ ovs_rwlock_unlock(&ofproto->facets.rwlock);
}
\f
static struct ofproto_dpif *
int errors;
errors = 0;
+ ovs_rwlock_rdlock(&ofproto->facets.rwlock);
cls_cursor_init(&cursor, &ofproto->facets, NULL);
CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
if (!facet_check_consistency(facet)) {
errors++;
}
}
+ ovs_rwlock_unlock(&ofproto->facets.rwlock);
if (errors) {
ofproto->backer->need_revalidate = REV_INCONSISTENCY;
}
return;
}
+ ovs_rwlock_rdlock(&ofproto->facets.rwlock);
cls_cursor_init(&cursor, &ofproto->facets, NULL);
CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
cls_rule_format(&facet->cr, &ds);
}
ds_put_cstr(&ds, "\n");
}
+ ovs_rwlock_unlock(&ofproto->facets.rwlock);
ds_chomp(&ds, '\n');
unixctl_command_reply(conn, ds_cstr(&ds));
void ofproto_rule_update_used(struct rule *, long long int used);
void ofproto_rule_expire(struct rule *, uint8_t reason);
-void ofproto_rule_destroy(struct rule *);
+void ofproto_rule_destroy(struct ofproto *, struct classifier *cls,
+ struct rule *) OVS_REQ_WRLOCK(cls->rwlock);
bool ofproto_rule_has_out_port(const struct rule *, ofp_port_t out_port);
size_t n_fields);
static void oftable_remove_rule(struct rule *);
+static void oftable_remove_rule__(struct ofproto *ofproto,
+ struct classifier *cls, struct rule *rule)
+ OVS_REQ_WRLOCK(cls->rwlock);
static struct rule *oftable_replace_rule(struct rule *);
static void oftable_substitute_rule(struct rule *old, struct rule *new);
}
table->max_flows = s->max_flows;
+ ovs_rwlock_rdlock(&table->cls.rwlock);
if (classifier_count(&table->cls) > table->max_flows
&& table->eviction_fields) {
/* 'table' contains more flows than allowed. We might not be able to
break;
}
}
+ ovs_rwlock_unlock(&table->cls.rwlock);
}
\f
bool
continue;
}
+ ovs_rwlock_wrlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, NULL);
CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) {
if (!rule->pending) {
ofoperation_create(group, rule, OFOPERATION_DELETE,
OFPRR_DELETE);
- oftable_remove_rule(rule);
+ oftable_remove_rule__(ofproto, &table->cls, rule);
ofproto->ofproto_class->rule_destruct(rule);
}
}
+ ovs_rwlock_unlock(&table->cls.rwlock);
}
ofopgroup_submit(group);
}
n_rules = 0;
OFPROTO_FOR_EACH_TABLE (table, ofproto) {
+ ovs_rwlock_rdlock(&table->cls.rwlock);
n_rules += classifier_count(&table->cls);
+ ovs_rwlock_unlock(&table->cls.rwlock);
}
simap_increase(usage, "rules", n_rules);
{
const struct rule *rule;
+ ovs_rwlock_rdlock(&ofproto->tables[0].cls.rwlock);
rule = rule_from_cls_rule(classifier_find_match_exactly(
&ofproto->tables[0].cls, match, priority));
+ ovs_rwlock_unlock(&ofproto->tables[0].cls.rwlock);
if (!rule || !ofpacts_equal(rule->ofpacts, rule->ofpacts_len,
ofpacts, ofpacts_len)) {
struct ofputil_flow_mod fm;
{
struct rule *rule;
+ ovs_rwlock_rdlock(&ofproto->tables[0].cls.rwlock);
rule = rule_from_cls_rule(classifier_find_match_exactly(
&ofproto->tables[0].cls, target, priority));
+ ovs_rwlock_unlock(&ofproto->tables[0].cls.rwlock);
if (!rule) {
/* No such rule -> success. */
return true;
* This function should only be called from an ofproto implementation's
* ->destruct() function. It is not suitable elsewhere. */
void
-ofproto_rule_destroy(struct rule *rule)
+ofproto_rule_destroy(struct ofproto *ofproto, struct classifier *cls,
+ struct rule *rule) OVS_REQ_WRLOCK(cls->rwlock)
{
ovs_assert(!rule->pending);
- oftable_remove_rule(rule);
+ oftable_remove_rule__(ofproto, cls, rule);
ofproto_rule_destroy__(rule);
}
ots[i].instructions = htonl(OFPIT11_ALL);
ots[i].config = htonl(OFPTC11_TABLE_MISS_MASK);
ots[i].max_entries = htonl(1000000); /* An arbitrary big number. */
+ ovs_rwlock_rdlock(&p->tables[i].cls.rwlock);
ots[i].active_count = htonl(classifier_count(&p->tables[i].cls));
+ ovs_rwlock_unlock(&p->tables[i].cls.rwlock);
}
p->ofproto_class->get_tables(p, ots);
struct cls_cursor cursor;
struct rule *rule;
+ ovs_rwlock_rdlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, &cr);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
if (rule->pending) {
+ ovs_rwlock_unlock(&table->cls.rwlock);
error = OFPROTO_POSTPONE;
goto exit;
}
list_push_back(rules, &rule->ofproto_node);
}
}
+ ovs_rwlock_unlock(&table->cls.rwlock);
}
exit:
FOR_EACH_MATCHING_TABLE (table, table_id, ofproto) {
struct rule *rule;
+ ovs_rwlock_rdlock(&table->cls.rwlock);
rule = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls,
&cr));
+ ovs_rwlock_unlock(&table->cls.rwlock);
if (rule) {
if (rule->pending) {
error = OFPROTO_POSTPONE;
struct cls_cursor cursor;
struct rule *rule;
+ ovs_rwlock_rdlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, NULL);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
flow_stats_ds(rule, results);
}
+ ovs_rwlock_unlock(&table->cls.rwlock);
}
}
struct rule *victim;
struct rule *rule;
uint8_t table_id;
+ bool overlaps;
int error;
error = check_table_id(ofproto, fm->table_id);
}
/* Check for overlap, if requested. */
- if (fm->flags & OFPFF_CHECK_OVERLAP
- && classifier_rule_overlaps(&table->cls, &rule->cr)) {
+ ovs_rwlock_rdlock(&table->cls.rwlock);
+ overlaps = classifier_rule_overlaps(&table->cls, &rule->cr);
+ ovs_rwlock_unlock(&table->cls.rwlock);
+ if (fm->flags & OFPFF_CHECK_OVERLAP && overlaps) {
cls_rule_destroy(&rule->cr);
ofproto->ofproto_class->rule_dealloc(rule);
return OFPERR_OFPFMFC_OVERLAP;
} else {
struct ofoperation *op;
struct rule *evict;
+ size_t n_rules;
- if (classifier_count(&table->cls) > table->max_flows) {
+ ovs_rwlock_rdlock(&table->cls.rwlock);
+ n_rules = classifier_count(&table->cls);
+ ovs_rwlock_unlock(&table->cls.rwlock);
+ if (n_rules > table->max_flows) {
bool was_evictable;
was_evictable = rule->evictable;
struct cls_cursor cursor;
struct rule *rule;
+ ovs_rwlock_rdlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, &target);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
ovs_assert(!rule->pending); /* XXX */
ofproto_collect_ofmonitor_refresh_rule(m, rule, seqno, rules);
}
+ ovs_rwlock_unlock(&table->cls.rwlock);
}
HMAP_FOR_EACH (op, hmap_node, &ofproto->deletions) {
group = ofopgroup_create_unattached(ofproto);
OFPROTO_FOR_EACH_TABLE (table, ofproto) {
- while (classifier_count(&table->cls) > table->max_flows
- && table->eviction_fields) {
+ while (table->eviction_fields) {
struct rule *rule;
+ size_t n_rules;
+
+ ovs_rwlock_rdlock(&table->cls.rwlock);
+ n_rules = classifier_count(&table->cls);
+ ovs_rwlock_unlock(&table->cls.rwlock);
+
+ if (n_rules <= table->max_flows) {
+ break;
+ }
rule = choose_rule_to_evict(table);
if (!rule || rule->pending) {
static void
oftable_destroy(struct oftable *table)
{
+ ovs_rwlock_rdlock(&table->cls.rwlock);
ovs_assert(classifier_is_empty(&table->cls));
+ ovs_rwlock_unlock(&table->cls.rwlock);
oftable_disable_eviction(table);
classifier_destroy(&table->cls);
free(table->name);
hmap_init(&table->eviction_groups_by_id);
heap_init(&table->eviction_groups_by_size);
+ ovs_rwlock_rdlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, NULL);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
eviction_group_add_rule(rule);
}
+ ovs_rwlock_unlock(&table->cls.rwlock);
}
/* Removes 'rule' from the oftable that contains it. */
static void
-oftable_remove_rule(struct rule *rule)
+oftable_remove_rule__(struct ofproto *ofproto, struct classifier *cls,
+ struct rule *rule) OVS_REQ_WRLOCK(cls->rwlock)
{
- struct ofproto *ofproto = rule->ofproto;
- struct oftable *table = &ofproto->tables[rule->table_id];
-
- classifier_remove(&table->cls, &rule->cr);
+ classifier_remove(cls, &rule->cr);
if (rule->meter_id) {
list_remove(&rule->meter_list_node);
}
}
}
+static void
+oftable_remove_rule(struct rule *rule)
+{
+ struct ofproto *ofproto = rule->ofproto;
+ struct oftable *table = &ofproto->tables[rule->table_id];
+
+ ovs_rwlock_wrlock(&table->cls.rwlock);
+ oftable_remove_rule__(ofproto, &table->cls, rule);
+ ovs_rwlock_unlock(&table->cls.rwlock);
+}
+
/* Inserts 'rule' into its oftable. Removes any existing rule from 'rule''s
* oftable that has an identical cls_rule. Returns the rule that was removed,
* if any, and otherwise NULL. */
struct meter *meter = ofproto->meters[rule->meter_id];
list_insert(&meter->rules, &rule->meter_list_node);
}
+ ovs_rwlock_wrlock(&table->cls.rwlock);
victim = rule_from_cls_rule(classifier_replace(&table->cls, &rule->cr));
+ ovs_rwlock_unlock(&table->cls.rwlock);
if (victim) {
if (victim->meter_id) {
list_remove(&victim->meter_list_node);
static void
compare_classifiers(struct classifier *cls, struct tcls *tcls)
+ OVS_REQ_RDLOCK(cls->rwlock)
{
static const int confidence = 500;
unsigned int i;
struct test_rule *rule, *next_rule;
struct cls_cursor cursor;
+ ovs_rwlock_wrlock(&cls->rwlock);
cls_cursor_init(&cursor, cls, NULL);
CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cls_rule, &cursor) {
classifier_remove(cls, &rule->cls_rule);
free_rule(rule);
}
+ ovs_rwlock_unlock(&cls->rwlock);
classifier_destroy(cls);
}
static void
-check_tables(const struct classifier *cls,
- int n_tables, int n_rules, int n_dups)
+check_tables(const struct classifier *cls, int n_tables, int n_rules,
+ int n_dups) OVS_REQ_RDLOCK(cls->rwlock)
{
const struct cls_table *table;
struct test_rule *test_rule;
struct tcls tcls;
classifier_init(&cls);
+ ovs_rwlock_rdlock(&cls.rwlock);
tcls_init(&tcls);
assert(classifier_is_empty(&cls));
assert(tcls_is_empty(&tcls));
compare_classifiers(&cls, &tcls);
+ ovs_rwlock_unlock(&cls.rwlock);
classifier_destroy(&cls);
tcls_destroy(&tcls);
}
hash_bytes(&wc_fields, sizeof wc_fields, 0), 0);
classifier_init(&cls);
+ ovs_rwlock_wrlock(&cls.rwlock);
tcls_init(&tcls);
tcls_rule = tcls_insert(&tcls, rule);
compare_classifiers(&cls, &tcls);
free_rule(rule);
+ ovs_rwlock_unlock(&cls.rwlock);
classifier_destroy(&cls);
tcls_destroy(&tcls);
}
rule2->aux += 5;
classifier_init(&cls);
+ ovs_rwlock_wrlock(&cls.rwlock);
tcls_init(&tcls);
tcls_insert(&tcls, rule1);
classifier_insert(&cls, &rule1->cls_rule);
check_tables(&cls, 1, 1, 0);
compare_classifiers(&cls, &tcls);
tcls_destroy(&tcls);
+ ovs_rwlock_unlock(&cls.rwlock);
destroy_classifier(&cls);
}
}
}
classifier_init(&cls);
+ ovs_rwlock_wrlock(&cls.rwlock);
tcls_init(&tcls);
for (i = 0; i < ARRAY_SIZE(ops); i++) {
compare_classifiers(&cls, &tcls);
}
+ ovs_rwlock_unlock(&cls.rwlock);
classifier_destroy(&cls);
tcls_destroy(&tcls);
} while ((1 << count_ones(value_mask)) < N_RULES);
classifier_init(&cls);
+ ovs_rwlock_wrlock(&cls.rwlock);
tcls_init(&tcls);
for (i = 0; i < N_RULES; i++) {
compare_classifiers(&cls, &tcls);
}
+ ovs_rwlock_unlock(&cls.rwlock);
classifier_destroy(&cls);
tcls_destroy(&tcls);
}
shuffle(priorities, ARRAY_SIZE(priorities));
classifier_init(&cls);
+ ovs_rwlock_wrlock(&cls.rwlock);
tcls_init(&tcls);
for (i = 0; i < MAX_RULES; i++) {
free_rule(target);
}
+ ovs_rwlock_unlock(&cls.rwlock);
destroy_classifier(&cls);
tcls_destroy(&tcls);
}
struct cls_cursor cursor;
struct fte *fte, *next;
+ ovs_rwlock_wrlock(&cls->rwlock);
cls_cursor_init(&cursor, cls, NULL);
CLS_CURSOR_FOR_EACH_SAFE (fte, next, rule, &cursor) {
classifier_remove(cls, &fte->rule);
fte_free(fte);
}
+ ovs_rwlock_unlock(&cls->rwlock);
classifier_destroy(cls);
}
cls_rule_init(&fte->rule, match, priority);
fte->versions[index] = version;
+ ovs_rwlock_wrlock(&cls->rwlock);
old = fte_from_cls_rule(classifier_replace(cls, &fte->rule));
+ ovs_rwlock_unlock(&cls->rwlock);
if (old) {
fte_version_free(old->versions[index]);
fte->versions[!index] = old->versions[!index];
list_init(&requests);
/* Delete flows that exist on the switch but not in the file. */
+ ovs_rwlock_rdlock(&cls.rwlock);
cls_cursor_init(&cursor, &cls, NULL);
CLS_CURSOR_FOR_EACH (fte, rule, &cursor) {
struct fte_version *file_ver = fte->versions[FILE_IDX];
fte_make_flow_mod(fte, FILE_IDX, OFPFC_ADD, protocol, &requests);
}
}
+ ovs_rwlock_unlock(&cls.rwlock);
transact_multiple_noreply(vconn, &requests);
vconn_close(vconn);
ds_init(&a_s);
ds_init(&b_s);
+ ovs_rwlock_rdlock(&cls.rwlock);
cls_cursor_init(&cursor, &cls, NULL);
CLS_CURSOR_FOR_EACH (fte, rule, &cursor) {
struct fte_version *a = fte->versions[0];
}
}
}
+ ovs_rwlock_unlock(&cls.rwlock);
ds_destroy(&a_s);
ds_destroy(&b_s);