Merge branch 'mainstream'
[sliver-openvswitch.git] / ofproto / ofproto.c
index 522c839..709fd07 100644 (file)
@@ -21,6 +21,7 @@
 #include <inttypes.h>
 #include <stdbool.h>
 #include <stdlib.h>
+#include <unistd.h>
 #include "bitmap.h"
 #include "byte-order.h"
 #include "classifier.h"
@@ -152,7 +153,10 @@ static void oftable_enable_eviction(struct oftable *,
                                     const struct mf_subfield *fields,
                                     size_t n_fields);
 
-static void oftable_remove_rule(struct rule *);
+static void oftable_remove_rule(struct rule *rule) OVS_RELEASES(rule->evict);
+static void oftable_remove_rule__(struct ofproto *ofproto,
+                                  struct classifier *cls, struct rule *rule)
+    OVS_REQ_WRLOCK(cls->rwlock) OVS_RELEASES(rule->evict);
 static struct rule *oftable_replace_rule(struct rule *);
 static void oftable_substitute_rule(struct rule *old, struct rule *new);
 
@@ -178,7 +182,8 @@ struct eviction_group {
     struct heap rules;          /* Contains "struct rule"s. */
 };
 
-static struct rule *choose_rule_to_evict(struct oftable *);
+static bool choose_rule_to_evict(struct oftable *table, struct rule **rulep)
+    OVS_TRY_WRLOCK(true, (*rulep)->evict);
 static void ofproto_evict(struct ofproto *);
 static uint32_t rule_eviction_priority(struct rule *);
 
@@ -199,8 +204,9 @@ static bool rule_is_modifiable(const struct rule *);
 static enum ofperr add_flow(struct ofproto *, struct ofconn *,
                             struct ofputil_flow_mod *,
                             const struct ofp_header *);
-static void delete_flow__(struct rule *, struct ofopgroup *,
-                          enum ofp_flow_removed_reason);
+static void delete_flow__(struct rule *rule, struct ofopgroup *,
+                          enum ofp_flow_removed_reason)
+    OVS_RELEASES(rule->evict);
 static bool handle_openflow(struct ofconn *, const struct ofpbuf *);
 static enum ofperr handle_flow_mod__(struct ofproto *, struct ofconn *,
                                      struct ofputil_flow_mod *,
@@ -213,6 +219,7 @@ static uint64_t pick_datapath_id(const struct ofproto *);
 static uint64_t pick_fallback_dpid(void);
 static void ofproto_destroy__(struct ofproto *);
 static void update_mtu(struct ofproto *, struct ofport *);
+static void meter_delete(struct ofproto *, uint32_t first, uint32_t last);
 
 /* unixctl. */
 static void ofproto_unixctl_init(void);
@@ -223,6 +230,7 @@ static size_t n_ofproto_classes;
 static size_t allocated_ofproto_classes;
 
 unsigned flow_eviction_threshold = OFPROTO_FLOW_EVICTION_THRESHOLD_DEFAULT;
+unsigned n_handler_threads;
 enum ofproto_flow_miss_model flow_miss_model = OFPROTO_HANDLE_MISS_AUTO;
 
 /* Map from datapath name to struct ofproto, for use by unixctl commands. */
@@ -424,11 +432,12 @@ ofproto_create(const char *datapath_name, const char *datapath_type,
     hmap_init(&ofproto->ports);
     shash_init(&ofproto->port_by_name);
     simap_init(&ofproto->ofp_requests);
-    ofproto->max_ports = OFPP_MAX;
+    ofproto->max_ports = ofp_to_u16(OFPP_MAX);
     ofproto->tables = NULL;
     ofproto->n_tables = 0;
     hindex_init(&ofproto->cookies);
     list_init(&ofproto->expirable);
+    ovs_mutex_init_recursive(&ofproto->expirable_mutex);
     ofproto->connmgr = connmgr_create(ofproto, datapath_name, datapath_name);
     ofproto->state = S_OPENFLOW;
     list_init(&ofproto->pending);
@@ -452,7 +461,7 @@ ofproto_create(const char *datapath_name, const char *datapath_type,
 
     /* The "max_ports" member should have been set by ->construct(ofproto).
      * Port 0 is not a valid OpenFlow port, so mark that as unavailable. */
-    ofproto->ofp_port_ids = bitmap_allocate(ofp_to_u16(ofproto->max_ports));
+    ofproto->ofp_port_ids = bitmap_allocate(ofproto->max_ports);
     bitmap_set1(ofproto->ofp_port_ids, 0);
 
     /* Check that hidden tables, if any, are at the end. */
@@ -511,9 +520,9 @@ ofproto_init_tables(struct ofproto *ofproto, int n_tables)
  * Reserved ports numbered OFPP_MAX and higher are special and not subject to
  * the 'max_ports' restriction. */
 void
-ofproto_init_max_ports(struct ofproto *ofproto, ofp_port_t max_ports)
+ofproto_init_max_ports(struct ofproto *ofproto, uint16_t max_ports)
 {
-    ovs_assert(ofp_to_u16(max_ports) <= ofp_to_u16(OFPP_MAX));
+    ovs_assert(max_ports <= ofp_to_u16(OFPP_MAX));
     ofproto->max_ports = max_ports;
 }
 
@@ -621,6 +630,18 @@ ofproto_set_mac_table_config(struct ofproto *ofproto, unsigned idle_time,
     }
 }
 
+/* Sets number of upcall handler threads.  The default is
+ * (number of online cores - 1). */
+void
+ofproto_set_n_handler_threads(unsigned limit)
+{
+    if (limit) {
+        n_handler_threads = limit;
+    } else {
+        n_handler_threads = MAX(1, sysconf(_SC_NPROCESSORS_ONLN) - 1);
+    }
+}
+
 void
 ofproto_set_dp_desc(struct ofproto *p, const char *dp_desc)
 {
@@ -1016,6 +1037,7 @@ ofproto_configure_table(struct ofproto *ofproto, int table_id,
     }
 
     table->max_flows = s->max_flows;
+    ovs_rwlock_rdlock(&table->cls.rwlock);
     if (classifier_count(&table->cls) > table->max_flows
         && table->eviction_fields) {
         /* 'table' contains more flows than allowed.  We might not be able to
@@ -1031,6 +1053,7 @@ ofproto_configure_table(struct ofproto *ofproto, int table_id,
             break;
         }
     }
+    ovs_rwlock_unlock(&table->cls.rwlock);
 }
 \f
 bool
@@ -1064,15 +1087,18 @@ ofproto_flush__(struct ofproto *ofproto)
             continue;
         }
 
+        ovs_rwlock_wrlock(&table->cls.rwlock);
         cls_cursor_init(&cursor, &table->cls, NULL);
         CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) {
             if (!rule->pending) {
                 ofoperation_create(group, rule, OFOPERATION_DELETE,
                                    OFPRR_DELETE);
-                oftable_remove_rule(rule);
+                ovs_rwlock_wrlock(&rule->evict);
+                oftable_remove_rule__(ofproto, &table->cls, rule);
                 ofproto->ofproto_class->rule_destruct(rule);
             }
         }
+        ovs_rwlock_unlock(&table->cls.rwlock);
     }
     ofopgroup_submit(group);
 }
@@ -1085,6 +1111,11 @@ ofproto_destroy__(struct ofproto *ofproto)
     ovs_assert(list_is_empty(&ofproto->pending));
     ovs_assert(!ofproto->n_pending);
 
+    if (ofproto->meters) {
+        meter_delete(ofproto, 1, ofproto->meter_features.max_meters);
+        free(ofproto->meters);
+    }
+
     connmgr_destroy(ofproto->connmgr);
 
     hmap_remove(&all_ofprotos, &ofproto->hmap_node);
@@ -1109,6 +1140,7 @@ ofproto_destroy__(struct ofproto *ofproto)
 
     free(ofproto->vlan_bitmap);
 
+    ovs_mutex_destroy(&ofproto->expirable_mutex);
     ofproto->ofproto_class->dealloc(ofproto);
 }
 
@@ -1379,7 +1411,9 @@ ofproto_get_memory_usage(const struct ofproto *ofproto, struct simap *usage)
 
     n_rules = 0;
     OFPROTO_FOR_EACH_TABLE (table, ofproto) {
+        ovs_rwlock_rdlock(&table->cls.rwlock);
         n_rules += classifier_count(&table->cls);
+        ovs_rwlock_unlock(&table->cls.rwlock);
     }
     simap_increase(usage, "rules", n_rules);
 
@@ -1606,8 +1640,10 @@ ofproto_add_flow(struct ofproto *ofproto, const struct match *match,
 {
     const struct rule *rule;
 
+    ovs_rwlock_rdlock(&ofproto->tables[0].cls.rwlock);
     rule = rule_from_cls_rule(classifier_find_match_exactly(
                                   &ofproto->tables[0].cls, match, priority));
+    ovs_rwlock_unlock(&ofproto->tables[0].cls.rwlock);
     if (!rule || !ofpacts_equal(rule->ofpacts, rule->ofpacts_len,
                                 ofpacts, ofpacts_len)) {
         struct ofputil_flow_mod fm;
@@ -1644,8 +1680,10 @@ ofproto_delete_flow(struct ofproto *ofproto,
 {
     struct rule *rule;
 
+    ovs_rwlock_rdlock(&ofproto->tables[0].cls.rwlock);
     rule = rule_from_cls_rule(classifier_find_match_exactly(
                                   &ofproto->tables[0].cls, target, priority));
+    ovs_rwlock_unlock(&ofproto->tables[0].cls.rwlock);
     if (!rule) {
         /* No such rule -> success. */
         return true;
@@ -1657,6 +1695,7 @@ ofproto_delete_flow(struct ofproto *ofproto,
         /* Initiate deletion -> success. */
         struct ofopgroup *group = ofopgroup_create_unattached(ofproto);
         ofoperation_create(group, rule, OFOPERATION_DELETE, OFPRR_DELETE);
+        ovs_rwlock_wrlock(&rule->evict);
         oftable_remove_rule(rule);
         ofproto->ofproto_class->rule_destruct(rule);
         ofopgroup_submit(group);
@@ -1703,32 +1742,28 @@ reinit_ports(struct ofproto *p)
 static ofp_port_t
 alloc_ofp_port(struct ofproto *ofproto, const char *netdev_name)
 {
-    uint16_t max_ports = ofp_to_u16(ofproto->max_ports);
     uint16_t port_idx;
 
     port_idx = simap_get(&ofproto->ofp_requests, netdev_name);
-    if (!port_idx) {
-        port_idx = UINT16_MAX;
-    }
+    port_idx = port_idx ? port_idx : UINT16_MAX;
 
-    if (port_idx >= max_ports
+    if (port_idx >= ofproto->max_ports
         || bitmap_is_set(ofproto->ofp_port_ids, port_idx)) {
-        uint16_t end_port_no = ofp_to_u16(ofproto->alloc_port_no);
-        uint16_t alloc_port_no = end_port_no;
+        uint16_t end_port_no = ofproto->alloc_port_no;
 
         /* Search for a free OpenFlow port number.  We try not to
          * immediately reuse them to prevent problems due to old
          * flows. */
         for (;;) {
-            if (++alloc_port_no >= max_ports) {
-                alloc_port_no = 0;
+            if (++ofproto->alloc_port_no >= ofproto->max_ports) {
+                ofproto->alloc_port_no = 0;
             }
-            if (!bitmap_is_set(ofproto->ofp_port_ids, alloc_port_no)) {
-                port_idx = alloc_port_no;
-                ofproto->alloc_port_no = u16_to_ofp(alloc_port_no);
+            if (!bitmap_is_set(ofproto->ofp_port_ids,
+                               ofproto->alloc_port_no)) {
+                port_idx = ofproto->alloc_port_no;
                 break;
             }
-            if (alloc_port_no == end_port_no) {
+            if (ofproto->alloc_port_no == end_port_no) {
                 return OFPP_NONE;
             }
         }
@@ -1740,7 +1775,7 @@ alloc_ofp_port(struct ofproto *ofproto, const char *netdev_name)
 static void
 dealloc_ofp_port(const struct ofproto *ofproto, ofp_port_t ofp_port)
 {
-    if (ofp_to_u16(ofp_port) < ofp_to_u16(ofproto->max_ports)) {
+    if (ofp_to_u16(ofp_port) < ofproto->max_ports) {
         bitmap_set0(ofproto->ofp_port_ids, ofp_to_u16(ofp_port));
     }
 }
@@ -2064,6 +2099,10 @@ init_ports(struct ofproto *p)
             netdev = ofport_open(p, &ofproto_port, &pp);
             if (netdev) {
                 ofport_install(p, netdev, &pp);
+                if (ofp_to_u16(ofproto_port.ofp_port) < p->max_ports) {
+                    p->alloc_port_no = MAX(p->alloc_port_no,
+                                           ofp_to_u16(ofproto_port.ofp_port));
+                }
             }
         }
     }
@@ -2159,6 +2198,8 @@ ofproto_rule_destroy__(struct rule *rule)
     if (rule) {
         cls_rule_destroy(&rule->cr);
         free(rule->ofpacts);
+        ovs_mutex_destroy(&rule->timeout_mutex);
+        ovs_rwlock_destroy(&rule->evict);
         rule->ofproto->ofproto_class->rule_dealloc(rule);
     }
 }
@@ -2172,10 +2213,15 @@ ofproto_rule_destroy__(struct rule *rule)
  * This function should only be called from an ofproto implementation's
  * ->destruct() function.  It is not suitable elsewhere. */
 void
-ofproto_rule_destroy(struct rule *rule)
+ofproto_rule_destroy(struct ofproto *ofproto, struct classifier *cls,
+                     struct rule *rule) OVS_REQ_WRLOCK(cls->rwlock)
 {
     ovs_assert(!rule->pending);
-    oftable_remove_rule(rule);
+    if (!ovs_rwlock_trywrlock(&rule->evict)) {
+        oftable_remove_rule__(ofproto, cls, rule);
+    } else {
+        NOT_REACHED();
+    }
     ofproto_rule_destroy__(rule);
 }
 
@@ -2416,8 +2462,8 @@ ofproto_check_ofpacts(struct ofproto *ofproto,
     enum ofperr error;
     uint32_t mid;
 
-    error = ofpacts_check(ofpacts, ofpacts_len, flow, ofproto->max_ports,
-                          table_id);
+    error = ofpacts_check(ofpacts, ofpacts_len, flow,
+                          u16_to_ofp(ofproto->max_ports), table_id);
     if (error) {
         return error;
     }
@@ -2454,7 +2500,7 @@ handle_packet_out(struct ofconn *ofconn, const struct ofp_header *oh)
     if (error) {
         goto exit_free_ofpacts;
     }
-    if (ofp_to_u16(po.in_port) >= ofp_to_u16(p->max_ports)
+    if (ofp_to_u16(po.in_port) >= p->max_ports
         && ofp_to_u16(po.in_port) < ofp_to_u16(OFPP_MAX)) {
         error = OFPERR_OFPBRC_BAD_PORT;
         goto exit_free_ofpacts;
@@ -2607,7 +2653,9 @@ handle_table_stats_request(struct ofconn *ofconn,
         ots[i].instructions = htonl(OFPIT11_ALL);
         ots[i].config = htonl(OFPTC11_TABLE_MISS_MASK);
         ots[i].max_entries = htonl(1000000); /* An arbitrary big number. */
+        ovs_rwlock_rdlock(&p->tables[i].cls.rwlock);
         ots[i].active_count = htonl(classifier_count(&p->tables[i].cls));
+        ovs_rwlock_unlock(&p->tables[i].cls.rwlock);
     }
 
     p->ofproto_class->get_tables(p, ots);
@@ -2874,9 +2922,11 @@ collect_rules_loose(struct ofproto *ofproto, uint8_t table_id,
         struct cls_cursor cursor;
         struct rule *rule;
 
+        ovs_rwlock_rdlock(&table->cls.rwlock);
         cls_cursor_init(&cursor, &table->cls, &cr);
         CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
             if (rule->pending) {
+                ovs_rwlock_unlock(&table->cls.rwlock);
                 error = OFPROTO_POSTPONE;
                 goto exit;
             }
@@ -2886,6 +2936,7 @@ collect_rules_loose(struct ofproto *ofproto, uint8_t table_id,
                 list_push_back(rules, &rule->ofproto_node);
             }
         }
+        ovs_rwlock_unlock(&table->cls.rwlock);
     }
 
 exit:
@@ -2950,8 +3001,10 @@ collect_rules_strict(struct ofproto *ofproto, uint8_t table_id,
     FOR_EACH_MATCHING_TABLE (table, table_id, ofproto) {
         struct rule *rule;
 
+        ovs_rwlock_rdlock(&table->cls.rwlock);
         rule = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls,
                                                                &cr));
+        ovs_rwlock_unlock(&table->cls.rwlock);
         if (rule) {
             if (rule->pending) {
                 error = OFPROTO_POSTPONE;
@@ -3013,8 +3066,6 @@ handle_flow_stats_request(struct ofconn *ofconn,
         fs.cookie = rule->flow_cookie;
         fs.table_id = rule->table_id;
         calc_duration(rule->created, now, &fs.duration_sec, &fs.duration_nsec);
-        fs.idle_timeout = rule->idle_timeout;
-        fs.hard_timeout = rule->hard_timeout;
         fs.idle_age = age_secs(now - rule->used);
         fs.hard_age = age_secs(now - rule->modified);
         ofproto->ofproto_class->rule_get_stats(rule, &fs.packet_count,
@@ -3022,6 +3073,12 @@ handle_flow_stats_request(struct ofconn *ofconn,
         fs.ofpacts = rule->ofpacts;
         fs.ofpacts_len = rule->ofpacts_len;
         fs.flags = 0;
+
+        ovs_mutex_lock(&rule->timeout_mutex);
+        fs.idle_timeout = rule->idle_timeout;
+        fs.hard_timeout = rule->hard_timeout;
+        ovs_mutex_unlock(&rule->timeout_mutex);
+
         if (rule->send_flow_removed) {
             fs.flags |= OFPFF_SEND_FLOW_REM;
             /* FIXME: Implement OF 1.3 flags OFPFF13_NO_PKT_COUNTS
@@ -3067,10 +3124,12 @@ ofproto_get_all_flows(struct ofproto *p, struct ds *results)
         struct cls_cursor cursor;
         struct rule *rule;
 
+        ovs_rwlock_rdlock(&table->cls.rwlock);
         cls_cursor_init(&cursor, &table->cls, NULL);
         CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
             flow_stats_ds(rule, results);
         }
+        ovs_rwlock_unlock(&table->cls.rwlock);
     }
 }
 
@@ -3088,8 +3147,7 @@ ofproto_get_netflow_ids(const struct ofproto *ofproto,
  * Returns false if the port did not have CFM configured, in which case
  * '*status' is indeterminate.
  *
- * The caller must provide and owns '*status', but it does not own and must not
- * modify or free the array returned in 'status->rmps'. */
+ * The caller must provide and owns '*status', and must free 'status->rmps'. */
 bool
 ofproto_port_get_cfm_status(const struct ofproto *ofproto, ofp_port_t ofp_port,
                             struct ofproto_cfm_status *status)
@@ -3164,18 +3222,26 @@ handle_aggregate_stats_request(struct ofconn *ofconn,
 struct queue_stats_cbdata {
     struct ofport *ofport;
     struct list replies;
+    long long int now;
 };
 
 static void
 put_queue_stats(struct queue_stats_cbdata *cbdata, uint32_t queue_id,
                 const struct netdev_queue_stats *stats)
 {
+    struct ofputil_queue_stats oqs;
 
-    struct ofputil_queue_stats oqs = {
-        .port_no = cbdata->ofport->pp.port_no,
-        .queue_id = queue_id,
-        .stats = *stats,
-    };
+    oqs.port_no = cbdata->ofport->pp.port_no;
+    oqs.queue_id = queue_id;
+    oqs.tx_bytes = stats->tx_bytes;
+    oqs.tx_packets = stats->tx_packets;
+    oqs.tx_errors = stats->tx_errors;
+    if (stats->created != LLONG_MIN) {
+        calc_duration(stats->created, cbdata->now,
+                      &oqs.duration_sec, &oqs.duration_nsec);
+    } else {
+        oqs.duration_sec = oqs.duration_nsec = UINT32_MAX;
+    }
     ofputil_append_queue_stat(&cbdata->replies, &oqs);
 }
 
@@ -3222,6 +3288,7 @@ handle_queue_stats_request(struct ofconn *ofconn,
     COVERAGE_INC(ofproto_queue_req);
 
     ofpmp_init(&cbdata.replies, rq);
+    cbdata.now = time_msec();
 
     error = ofputil_decode_queue_stats_request(rq, &oqsr);
     if (error) {
@@ -3348,11 +3415,18 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
     }
 
     /* Check for overlap, if requested. */
-    if (fm->flags & OFPFF_CHECK_OVERLAP
-        && classifier_rule_overlaps(&table->cls, &rule->cr)) {
-        cls_rule_destroy(&rule->cr);
-        ofproto->ofproto_class->rule_dealloc(rule);
-        return OFPERR_OFPFMFC_OVERLAP;
+    if (fm->flags & OFPFF_CHECK_OVERLAP) {
+        bool overlaps;
+
+        ovs_rwlock_rdlock(&table->cls.rwlock);
+        overlaps = classifier_rule_overlaps(&table->cls, &rule->cr);
+        ovs_rwlock_unlock(&table->cls.rwlock);
+
+        if (overlaps) {
+            cls_rule_destroy(&rule->cr);
+            ofproto->ofproto_class->rule_dealloc(rule);
+            return OFPERR_OFPFMFC_OVERLAP;
+        }
     }
 
     /* FIXME: Implement OFPFF12_RESET_COUNTS */
@@ -3361,8 +3435,13 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
     rule->pending = NULL;
     rule->flow_cookie = fm->new_cookie;
     rule->created = rule->modified = rule->used = time_msec();
+
+    ovs_mutex_init(&rule->timeout_mutex);
+    ovs_mutex_lock(&rule->timeout_mutex);
     rule->idle_timeout = fm->idle_timeout;
     rule->hard_timeout = fm->hard_timeout;
+    ovs_mutex_unlock(&rule->timeout_mutex);
+
     rule->table_id = table - ofproto->tables;
     rule->send_flow_removed = (fm->flags & OFPFF_SEND_FLOW_REM) != 0;
     /* FIXME: Implement OF 1.3 flags OFPFF13_NO_PKT_COUNTS
@@ -3371,12 +3450,12 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
     rule->ofpacts_len = fm->ofpacts_len;
     rule->meter_id = find_meter(rule->ofpacts, rule->ofpacts_len);
     list_init(&rule->meter_list_node);
-    rule->evictable = true;
     rule->eviction_group = NULL;
     list_init(&rule->expirable);
     rule->monitor_flags = 0;
     rule->add_seqno = 0;
     rule->modify_seqno = 0;
+    ovs_rwlock_init(&rule->evict);
 
     /* Insert new rule. */
     victim = oftable_replace_rule(rule);
@@ -3387,21 +3466,24 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
     } else {
         struct ofoperation *op;
         struct rule *evict;
-
-        if (classifier_count(&table->cls) > table->max_flows) {
-            bool was_evictable;
-
-            was_evictable = rule->evictable;
-            rule->evictable = false;
-            evict = choose_rule_to_evict(table);
-            rule->evictable = was_evictable;
-
-            if (!evict) {
+        size_t n_rules;
+
+        ovs_rwlock_rdlock(&table->cls.rwlock);
+        n_rules = classifier_count(&table->cls);
+        ovs_rwlock_unlock(&table->cls.rwlock);
+        if (n_rules > table->max_flows) {
+            ovs_rwlock_rdlock(&rule->evict);
+            if (choose_rule_to_evict(table, &evict)) {
+                ovs_rwlock_unlock(&rule->evict);
+                ovs_rwlock_unlock(&evict->evict);
+                if (evict->pending) {
+                    error = OFPROTO_POSTPONE;
+                    goto exit;
+                }
+            } else {
+                ovs_rwlock_unlock(&rule->evict);
                 error = OFPERR_OFPFMFC_TABLE_FULL;
                 goto exit;
-            } else if (evict->pending) {
-                error = OFPROTO_POSTPONE;
-                goto exit;
             }
         } else {
             evict = NULL;
@@ -3416,6 +3498,13 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
             op->group->n_running--;
             ofoperation_destroy(rule->pending);
         } else if (evict) {
+            /* It would be better if we maintained the lock we took in
+             * choose_rule_to_evict() earlier, but that confuses the thread
+             * safety analysis, and this code is fragile enough that we really
+             * need it.  In the worst case, we'll have to block a little while
+             * before we perform the eviction, which doesn't seem like a big
+             * problem. */
+            ovs_rwlock_wrlock(&evict->evict);
             delete_flow__(evict, group, OFPRR_EVICTION);
         }
         ofopgroup_submit(group);
@@ -3465,7 +3554,7 @@ modify_flows__(struct ofproto *ofproto, struct ofconn *ofconn,
 
         /* Verify actions. */
         error = ofpacts_check(fm->ofpacts, fm->ofpacts_len, &fm->match.flow,
-                              ofproto->max_ports, rule->table_id);
+                              u16_to_ofp(ofproto->max_ports), rule->table_id);
         if (error) {
             return error;
         }
@@ -3475,7 +3564,7 @@ modify_flows__(struct ofproto *ofproto, struct ofconn *ofconn,
 
         op = ofoperation_create(group, rule, OFOPERATION_MODIFY, 0);
 
-        if (fm->new_cookie != htonll(UINT64_MAX)) {
+        if (fm->modify_cookie && fm->new_cookie != htonll(UINT64_MAX)) {
             ofproto_rule_change_cookie(ofproto, rule, fm->new_cookie);
         }
         if (actions_changed) {
@@ -3586,6 +3675,7 @@ delete_flows__(struct ofproto *ofproto, struct ofconn *ofconn,
 
     group = ofopgroup_create(ofproto, ofconn, request, UINT32_MAX);
     LIST_FOR_EACH_SAFE (rule, next, ofproto_node, rules) {
+        ovs_rwlock_wrlock(&rule->evict);
         delete_flow__(rule, group, reason);
     }
     ofopgroup_submit(group);
@@ -3646,8 +3736,10 @@ ofproto_rule_send_removed(struct rule *rule, uint8_t reason)
     fr.table_id = rule->table_id;
     calc_duration(rule->created, time_msec(),
                   &fr.duration_sec, &fr.duration_nsec);
+    ovs_mutex_lock(&rule->timeout_mutex);
     fr.idle_timeout = rule->idle_timeout;
     fr.hard_timeout = rule->hard_timeout;
+    ovs_mutex_unlock(&rule->timeout_mutex);
     rule->ofproto->ofproto_class->rule_get_stats(rule, &fr.packet_count,
                                                  &fr.byte_count);
 
@@ -3953,8 +4045,10 @@ ofproto_compose_flow_refresh_update(const struct rule *rule,
     fu.event = (flags & (NXFMF_INITIAL | NXFMF_ADD)
                 ? NXFME_ADDED : NXFME_MODIFIED);
     fu.reason = 0;
+    ovs_mutex_lock(&rule->timeout_mutex);
     fu.idle_timeout = rule->idle_timeout;
     fu.hard_timeout = rule->hard_timeout;
+    ovs_mutex_unlock(&rule->timeout_mutex);
     fu.table_id = rule->table_id;
     fu.cookie = rule->flow_cookie;
     minimatch_expand(&rule->cr.match, &match);
@@ -4069,11 +4163,13 @@ ofproto_collect_ofmonitor_refresh_rules(const struct ofmonitor *m,
         struct cls_cursor cursor;
         struct rule *rule;
 
+        ovs_rwlock_rdlock(&table->cls.rwlock);
         cls_cursor_init(&cursor, &table->cls, &target);
         CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
             ovs_assert(!rule->pending); /* XXX */
             ofproto_collect_ofmonitor_refresh_rule(m, rule, seqno, rules);
         }
+        ovs_rwlock_unlock(&table->cls.rwlock);
     }
 
     HMAP_FOR_EACH (op, hmap_node, &ofproto->deletions) {
@@ -4255,6 +4351,22 @@ meter_create(const struct ofputil_meter_config *config,
     return meter;
 }
 
+static void
+meter_delete(struct ofproto *ofproto, uint32_t first, uint32_t last)
+{
+    uint32_t mid;
+    for (mid = first; mid <= last; ++mid) {
+        struct meter *meter = ofproto->meters[mid];
+        if (meter) {
+            ofproto->meters[mid] = NULL;
+            ofproto->ofproto_class->meter_del(ofproto,
+                                              meter->provider_meter_id);
+            free(meter->bands);
+            free(meter);
+        }
+    }
+}
+
 static enum ofperr
 handle_add_meter(struct ofproto *ofproto, struct ofputil_meter_mod *mm)
 {
@@ -4335,16 +4447,7 @@ handle_delete_meter(struct ofconn *ofconn, const struct ofp_header *oh,
     }
 
     /* Delete the meters. */
-    for (meter_id = first; meter_id <= last; ++meter_id) {
-        struct meter *meter = ofproto->meters[meter_id];
-        if (meter) {
-            ofproto->meters[meter_id] = NULL;
-            ofproto->ofproto_class->meter_del(ofproto,
-                                              meter->provider_meter_id);
-            free(meter->bands);
-            free(meter);
-        }
-    }
+    meter_delete(ofproto, first, last);
 
     return 0;
 }
@@ -4458,7 +4561,7 @@ handle_meter_request(struct ofconn *ofconn, const struct ofp_header *request,
         if (!meter) {
             continue; /* Skip non-existing meters. */
         }
-        if (type == OFPTYPE_METER_REQUEST) {
+        if (type == OFPTYPE_METER_STATS_REQUEST) {
             struct ofputil_meter_stats stats;
 
             stats.meter_id = meter_id;
@@ -4590,20 +4693,20 @@ handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg)
     case OFPTYPE_FLOW_MONITOR_STATS_REQUEST:
         return handle_flow_monitor_request(ofconn, oh);
 
-    case OFPTYPE_METER_REQUEST:
-    case OFPTYPE_METER_CONFIG_REQUEST:
+    case OFPTYPE_METER_STATS_REQUEST:
+    case OFPTYPE_METER_CONFIG_STATS_REQUEST:
         return handle_meter_request(ofconn, oh, type);
 
-    case OFPTYPE_METER_FEATURES_REQUEST:
+    case OFPTYPE_METER_FEATURES_STATS_REQUEST:
         return handle_meter_features_request(ofconn, oh);
 
         /* FIXME: Change the following once they are implemented: */
     case OFPTYPE_QUEUE_GET_CONFIG_REQUEST:
     case OFPTYPE_GET_ASYNC_REQUEST:
-    case OFPTYPE_GROUP_REQUEST:
-    case OFPTYPE_GROUP_DESC_REQUEST:
-    case OFPTYPE_GROUP_FEATURES_REQUEST:
-    case OFPTYPE_TABLE_FEATURES_REQUEST:
+    case OFPTYPE_GROUP_STATS_REQUEST:
+    case OFPTYPE_GROUP_DESC_STATS_REQUEST:
+    case OFPTYPE_GROUP_FEATURES_STATS_REQUEST:
+    case OFPTYPE_TABLE_FEATURES_STATS_REQUEST:
         return OFPERR_OFPBRC_BAD_TYPE;
 
     case OFPTYPE_HELLO:
@@ -4627,13 +4730,13 @@ handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg)
     case OFPTYPE_FLOW_MONITOR_RESUMED:
     case OFPTYPE_FLOW_MONITOR_STATS_REPLY:
     case OFPTYPE_GET_ASYNC_REPLY:
-    case OFPTYPE_GROUP_REPLY:
-    case OFPTYPE_GROUP_DESC_REPLY:
-    case OFPTYPE_GROUP_FEATURES_REPLY:
-    case OFPTYPE_METER_REPLY:
-    case OFPTYPE_METER_CONFIG_REPLY:
-    case OFPTYPE_METER_FEATURES_REPLY:
-    case OFPTYPE_TABLE_FEATURES_REPLY:
+    case OFPTYPE_GROUP_STATS_REPLY:
+    case OFPTYPE_GROUP_DESC_STATS_REPLY:
+    case OFPTYPE_GROUP_FEATURES_STATS_REPLY:
+    case OFPTYPE_METER_STATS_REPLY:
+    case OFPTYPE_METER_CONFIG_STATS_REPLY:
+    case OFPTYPE_METER_FEATURES_STATS_REPLY:
+    case OFPTYPE_TABLE_FEATURES_STATS_REPLY:
     default:
         return OFPERR_OFPBRC_BAD_TYPE;
     }
@@ -4996,17 +5099,18 @@ pick_fallback_dpid(void)
 \f
 /* Table overflow policy. */
 
-/* Chooses and returns a rule to evict from 'table'.  Returns NULL if the table
- * is not configured to evict rules or if the table contains no evictable
- * rules.  (Rules with 'evictable' set to false or with no timeouts are not
- * evictable.) */
-static struct rule *
-choose_rule_to_evict(struct oftable *table)
+/* Chooses and updates 'rulep' with a rule to evict from 'table'.  Sets 'rulep'
+ * to NULL if the table is not configured to evict rules or if the table
+ * contains no evictable rules.  (Rules with a readlock on their evict rwlock,
+ * or with no timeouts are not evictable.) */
+static bool
+choose_rule_to_evict(struct oftable *table, struct rule **rulep)
 {
     struct eviction_group *evg;
 
+    *rulep = NULL;
     if (!table->eviction_fields) {
-        return NULL;
+        return false;
     }
 
     /* In the common case, the outer and inner loops here will each be entered
@@ -5020,18 +5124,19 @@ choose_rule_to_evict(struct oftable *table)
      *     group has no evictable rules.
      *
      *   - The outer loop can exit only if table's 'max_flows' is all filled up
-     *     by unevictable rules'. */
+     *     by unevictable rules. */
     HEAP_FOR_EACH (evg, size_node, &table->eviction_groups_by_size) {
         struct rule *rule;
 
         HEAP_FOR_EACH (rule, evg_node, &evg->rules) {
-            if (rule->evictable) {
-                return rule;
+            if (!ovs_rwlock_trywrlock(&rule->evict)) {
+                *rulep = rule;
+                return true;
             }
         }
     }
 
-    return NULL;
+    return false;
 }
 
 /* Searches 'ofproto' for tables that have more flows than their configured
@@ -5048,12 +5153,24 @@ ofproto_evict(struct ofproto *ofproto)
 
     group = ofopgroup_create_unattached(ofproto);
     OFPROTO_FOR_EACH_TABLE (table, ofproto) {
-        while (classifier_count(&table->cls) > table->max_flows
-               && table->eviction_fields) {
+        while (table->eviction_fields) {
             struct rule *rule;
+            size_t n_rules;
+
+            ovs_rwlock_rdlock(&table->cls.rwlock);
+            n_rules = classifier_count(&table->cls);
+            ovs_rwlock_unlock(&table->cls.rwlock);
 
-            rule = choose_rule_to_evict(table);
-            if (!rule || rule->pending) {
+            if (n_rules <= table->max_flows) {
+                break;
+            }
+
+            if (!choose_rule_to_evict(table, &rule)) {
+                break;
+            }
+
+            if (rule->pending) {
+                ovs_rwlock_unlock(&rule->evict);
                 break;
             }
 
@@ -5197,6 +5314,7 @@ rule_eviction_priority(struct rule *rule)
     uint32_t expiration_offset;
 
     /* Calculate time of expiration. */
+    ovs_mutex_lock(&rule->timeout_mutex);
     hard_expiration = (rule->hard_timeout
                        ? rule->modified + rule->hard_timeout * 1000
                        : LLONG_MAX);
@@ -5204,6 +5322,7 @@ rule_eviction_priority(struct rule *rule)
                        ? rule->used + rule->idle_timeout * 1000
                        : LLONG_MAX);
     expiration = MIN(hard_expiration, idle_expiration);
+    ovs_mutex_unlock(&rule->timeout_mutex);
     if (expiration == LLONG_MAX) {
         return 0;
     }
@@ -5230,9 +5349,13 @@ eviction_group_add_rule(struct rule *rule)
 {
     struct ofproto *ofproto = rule->ofproto;
     struct oftable *table = &ofproto->tables[rule->table_id];
+    bool has_timeout;
 
-    if (table->eviction_fields
-        && (rule->hard_timeout || rule->idle_timeout)) {
+    ovs_mutex_lock(&rule->timeout_mutex);
+    has_timeout = rule->hard_timeout || rule->idle_timeout;
+    ovs_mutex_unlock(&rule->timeout_mutex);
+
+    if (table->eviction_fields && has_timeout) {
         struct eviction_group *evg;
 
         evg = eviction_group_find(table, eviction_group_hash_rule(rule));
@@ -5261,7 +5384,9 @@ oftable_init(struct oftable *table)
 static void
 oftable_destroy(struct oftable *table)
 {
+    ovs_rwlock_rdlock(&table->cls.rwlock);
     ovs_assert(classifier_is_empty(&table->cls));
+    ovs_rwlock_unlock(&table->cls.rwlock);
     oftable_disable_eviction(table);
     classifier_destroy(&table->cls);
     free(table->name);
@@ -5341,31 +5466,46 @@ oftable_enable_eviction(struct oftable *table,
     hmap_init(&table->eviction_groups_by_id);
     heap_init(&table->eviction_groups_by_size);
 
+    ovs_rwlock_rdlock(&table->cls.rwlock);
     cls_cursor_init(&cursor, &table->cls, NULL);
     CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
         eviction_group_add_rule(rule);
     }
+    ovs_rwlock_unlock(&table->cls.rwlock);
 }
 
 /* Removes 'rule' from the oftable that contains it. */
 static void
-oftable_remove_rule(struct rule *rule)
+oftable_remove_rule__(struct ofproto *ofproto, struct classifier *cls,
+                      struct rule *rule)
+    OVS_REQ_WRLOCK(cls->rwlock) OVS_RELEASES(rule->evict)
 {
-    struct ofproto *ofproto = rule->ofproto;
-    struct oftable *table = &ofproto->tables[rule->table_id];
-
-    classifier_remove(&table->cls, &rule->cr);
+    classifier_remove(cls, &rule->cr);
     if (rule->meter_id) {
         list_remove(&rule->meter_list_node);
     }
     cookies_remove(ofproto, rule);
     eviction_group_remove_rule(rule);
+    ovs_mutex_lock(&ofproto->expirable_mutex);
     if (!list_is_empty(&rule->expirable)) {
         list_remove(&rule->expirable);
     }
+    ovs_mutex_unlock(&ofproto->expirable_mutex);
     if (!list_is_empty(&rule->meter_list_node)) {
         list_remove(&rule->meter_list_node);
     }
+    ovs_rwlock_unlock(&rule->evict);
+}
+
+static void
+oftable_remove_rule(struct rule *rule)
+{
+    struct ofproto *ofproto = rule->ofproto;
+    struct oftable *table = &ofproto->tables[rule->table_id];
+
+    ovs_rwlock_wrlock(&table->cls.rwlock);
+    oftable_remove_rule__(ofproto, &table->cls, rule);
+    ovs_rwlock_unlock(&table->cls.rwlock);
 }
 
 /* Inserts 'rule' into its oftable.  Removes any existing rule from 'rule''s
@@ -5377,26 +5517,36 @@ oftable_replace_rule(struct rule *rule)
     struct ofproto *ofproto = rule->ofproto;
     struct oftable *table = &ofproto->tables[rule->table_id];
     struct rule *victim;
-    bool may_expire = rule->hard_timeout || rule->idle_timeout;
+    bool may_expire;
+
+    ovs_mutex_lock(&rule->timeout_mutex);
+    may_expire = rule->hard_timeout || rule->idle_timeout;
+    ovs_mutex_unlock(&rule->timeout_mutex);
 
     if (may_expire) {
+        ovs_mutex_lock(&ofproto->expirable_mutex);
         list_insert(&ofproto->expirable, &rule->expirable);
+        ovs_mutex_unlock(&ofproto->expirable_mutex);
     }
     cookies_insert(ofproto, rule);
     if (rule->meter_id) {
         struct meter *meter = ofproto->meters[rule->meter_id];
         list_insert(&meter->rules, &rule->meter_list_node);
     }
+    ovs_rwlock_wrlock(&table->cls.rwlock);
     victim = rule_from_cls_rule(classifier_replace(&table->cls, &rule->cr));
+    ovs_rwlock_unlock(&table->cls.rwlock);
     if (victim) {
         if (victim->meter_id) {
             list_remove(&victim->meter_list_node);
         }
         cookies_remove(ofproto, victim);
 
+        ovs_mutex_lock(&ofproto->expirable_mutex);
         if (!list_is_empty(&victim->expirable)) {
             list_remove(&victim->expirable);
         }
+        ovs_mutex_unlock(&ofproto->expirable_mutex);
         eviction_group_remove_rule(victim);
     }
     eviction_group_add_rule(rule);
@@ -5410,6 +5560,7 @@ oftable_substitute_rule(struct rule *old, struct rule *new)
     if (new) {
         oftable_replace_rule(new);
     } else {
+        ovs_rwlock_wrlock(&old->evict);
         oftable_remove_rule(old);
     }
 }