ofproto: Reduce default number of miss handlers.
[sliver-openvswitch.git] / ofproto / ofproto.c
index 623039b..abf14f2 100644 (file)
@@ -155,10 +155,10 @@ static void oftable_enable_eviction(struct oftable *,
                                     const struct mf_subfield *fields,
                                     size_t n_fields);
 
-static void oftable_remove_rule(struct rule *rule) OVS_RELEASES(rule->evict);
+static void oftable_remove_rule(struct rule *rule) OVS_RELEASES(rule->rwlock);
 static void oftable_remove_rule__(struct ofproto *ofproto,
                                   struct classifier *cls, struct rule *rule)
-    OVS_REQ_WRLOCK(cls->rwlock) OVS_RELEASES(rule->evict);
+    OVS_REQ_WRLOCK(cls->rwlock) OVS_RELEASES(rule->rwlock);
 static void oftable_insert_rule(struct rule *);
 
 /* A set of rules within a single OpenFlow table (oftable) that have the same
@@ -184,7 +184,7 @@ struct eviction_group {
 };
 
 static bool choose_rule_to_evict(struct oftable *table, struct rule **rulep)
-    OVS_TRY_WRLOCK(true, (*rulep)->evict);
+    OVS_TRY_WRLOCK(true, (*rulep)->rwlock);
 static void ofproto_evict(struct ofproto *);
 static uint32_t rule_eviction_priority(struct rule *);
 static void eviction_group_add_rule(struct rule *);
@@ -199,6 +199,7 @@ static int init_ports(struct ofproto *);
 static void reinit_ports(struct ofproto *);
 
 /* rule. */
+static void ofproto_rule_destroy(struct rule *);
 static void ofproto_rule_destroy__(struct rule *);
 static void ofproto_rule_send_removed(struct rule *, uint8_t reason);
 static bool rule_is_modifiable(const struct rule *);
@@ -212,7 +213,8 @@ static enum ofperr modify_flows__(struct ofproto *, struct ofconn *,
                                   const struct ofp_header *, struct list *);
 static void delete_flow__(struct rule *rule, struct ofopgroup *,
                           enum ofp_flow_removed_reason)
-    OVS_RELEASES(rule->evict);
+    OVS_RELEASES(rule->rwlock);
+static enum ofperr add_group(struct ofproto *, struct ofputil_group_mod *);
 static bool handle_openflow(struct ofconn *, const struct ofpbuf *);
 static enum ofperr handle_flow_mod__(struct ofproto *, struct ofconn *,
                                      struct ofputil_flow_mod *,
@@ -439,6 +441,7 @@ ofproto_create(const char *datapath_name, const char *datapath_type,
     shash_init(&ofproto->port_by_name);
     simap_init(&ofproto->ofp_requests);
     ofproto->max_ports = ofp_to_u16(OFPP_MAX);
+    ofproto->eviction_group_timer = LLONG_MIN;
     ofproto->tables = NULL;
     ofproto->n_tables = 0;
     hindex_init(&ofproto->cookies);
@@ -456,6 +459,8 @@ ofproto_create(const char *datapath_name, const char *datapath_type,
     ofproto->vlan_bitmap = NULL;
     ofproto->vlans_changed = false;
     ofproto->min_mtu = INT_MAX;
+    ovs_rwlock_init(&ofproto->groups_rwlock);
+    hmap_init(&ofproto->groups);
 
     error = ofproto->ofproto_class->construct(ofproto);
     if (error) {
@@ -637,14 +642,15 @@ ofproto_set_mac_table_config(struct ofproto *ofproto, unsigned idle_time,
 }
 
 /* Sets number of upcall handler threads.  The default is
- * (number of online cores - 1). */
+ * (number of online cores - 2). */
 void
 ofproto_set_n_handler_threads(unsigned limit)
 {
     if (limit) {
         n_handler_threads = limit;
     } else {
-        n_handler_threads = MAX(1, sysconf(_SC_NPROCESSORS_ONLN) - 1);
+        int n_proc = sysconf(_SC_NPROCESSORS_ONLN);
+        n_handler_threads = n_proc > 2 ? n_proc - 2 : 1;
     }
 }
 
@@ -1074,17 +1080,37 @@ ofproto_get_snoops(const struct ofproto *ofproto, struct sset *snoops)
     connmgr_get_snoops(ofproto->connmgr, snoops);
 }
 
+/* Deletes 'rule' from 'cls' within 'ofproto'.
+ *
+ * The 'cls' argument is redundant (it is &ofproto->tables[rule->table_id].cls)
+ * but it allows Clang to do better checking. */
 static void
-ofproto_flush__(struct ofproto *ofproto)
+ofproto_delete_rule(struct ofproto *ofproto, struct classifier *cls,
+                    struct rule *rule)
+    OVS_REQ_WRLOCK(cls->rwlock)
 {
     struct ofopgroup *group;
+
+    ovs_assert(!rule->pending);
+    ovs_assert(cls == &ofproto->tables[rule->table_id].cls);
+
+    group = ofopgroup_create_unattached(ofproto);
+    ofoperation_create(group, rule, OFOPERATION_DELETE, OFPRR_DELETE);
+    ovs_rwlock_wrlock(&rule->rwlock);
+    oftable_remove_rule__(ofproto, cls, rule);
+    ofproto->ofproto_class->rule_delete(rule);
+    ofopgroup_submit(group);
+}
+
+static void
+ofproto_flush__(struct ofproto *ofproto)
+{
     struct oftable *table;
 
     if (ofproto->ofproto_class->flush) {
         ofproto->ofproto_class->flush(ofproto);
     }
 
-    group = ofopgroup_create_unattached(ofproto);
     OFPROTO_FOR_EACH_TABLE (table, ofproto) {
         struct rule *rule, *next_rule;
         struct cls_cursor cursor;
@@ -1097,18 +1123,15 @@ ofproto_flush__(struct ofproto *ofproto)
         cls_cursor_init(&cursor, &table->cls, NULL);
         CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) {
             if (!rule->pending) {
-                ofoperation_create(group, rule, OFOPERATION_DELETE,
-                                   OFPRR_DELETE);
-                ovs_rwlock_wrlock(&rule->evict);
-                oftable_remove_rule__(ofproto, &table->cls, rule);
-                ofproto->ofproto_class->rule_destruct(rule);
+                ofproto_delete_rule(ofproto, &table->cls, rule);
             }
         }
         ovs_rwlock_unlock(&table->cls.rwlock);
     }
-    ofopgroup_submit(group);
 }
 
+static void delete_group(struct ofproto *ofproto, uint32_t group_id);
+
 static void
 ofproto_destroy__(struct ofproto *ofproto)
 {
@@ -1122,6 +1145,10 @@ ofproto_destroy__(struct ofproto *ofproto)
         free(ofproto->meters);
     }
 
+    delete_group(ofproto, OFPG_ALL);
+    ovs_rwlock_destroy(&ofproto->groups_rwlock);
+    hmap_destroy(&ofproto->groups);
+
     connmgr_destroy(ofproto->connmgr);
 
     hmap_remove(&all_ofprotos, &ofproto->hmap_node);
@@ -1254,6 +1281,38 @@ ofproto_run(struct ofproto *p)
         VLOG_ERR_RL(&rl, "%s: run failed (%s)", p->name, ovs_strerror(error));
     }
 
+    /* Restore the eviction group heap invariant occasionally. */
+    if (p->eviction_group_timer < time_msec()) {
+        size_t i;
+
+        p->eviction_group_timer = time_msec() + 1000;
+
+        for (i = 0; i < p->n_tables; i++) {
+            struct oftable *table = &p->tables[i];
+            struct eviction_group *evg;
+            struct cls_cursor cursor;
+            struct rule *rule;
+
+            if (!table->eviction_fields) {
+                continue;
+            }
+
+            HEAP_FOR_EACH (evg, size_node, &table->eviction_groups_by_size) {
+                heap_rebuild(&evg->rules);
+            }
+
+            ovs_rwlock_rdlock(&table->cls.rwlock);
+            cls_cursor_init(&cursor, &table->cls, NULL);
+            CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
+                if (!rule->eviction_group
+                    && (rule->idle_timeout || rule->hard_timeout)) {
+                    eviction_group_add_rule(rule);
+                }
+            }
+            ovs_rwlock_unlock(&table->cls.rwlock);
+        }
+    }
+
     if (p->ofproto_class->port_poll) {
         char *devname;
 
@@ -1684,12 +1743,13 @@ bool
 ofproto_delete_flow(struct ofproto *ofproto,
                     const struct match *target, unsigned int priority)
 {
+    struct classifier *cls = &ofproto->tables[0].cls;
     struct rule *rule;
 
-    ovs_rwlock_rdlock(&ofproto->tables[0].cls.rwlock);
-    rule = rule_from_cls_rule(classifier_find_match_exactly(
-                                  &ofproto->tables[0].cls, target, priority));
-    ovs_rwlock_unlock(&ofproto->tables[0].cls.rwlock);
+    ovs_rwlock_rdlock(&cls->rwlock);
+    rule = rule_from_cls_rule(classifier_find_match_exactly(cls, target,
+                                                            priority));
+    ovs_rwlock_unlock(&cls->rwlock);
     if (!rule) {
         /* No such rule -> success. */
         return true;
@@ -1699,12 +1759,10 @@ ofproto_delete_flow(struct ofproto *ofproto,
         return false;
     } else {
         /* Initiate deletion -> success. */
-        struct ofopgroup *group = ofopgroup_create_unattached(ofproto);
-        ofoperation_create(group, rule, OFOPERATION_DELETE, OFPRR_DELETE);
-        ovs_rwlock_wrlock(&rule->evict);
-        oftable_remove_rule(rule);
-        ofproto->ofproto_class->rule_destruct(rule);
-        ofopgroup_submit(group);
+        ovs_rwlock_wrlock(&cls->rwlock);
+        ofproto_delete_rule(ofproto, cls, rule);
+        ovs_rwlock_unlock(&cls->rwlock);
+
         return true;
     }
 
@@ -1824,8 +1882,8 @@ ofport_open(struct ofproto *ofproto,
     pp->state = netdev_get_carrier(netdev) ? 0 : OFPUTIL_PS_LINK_DOWN;
     netdev_get_features(netdev, &pp->curr, &pp->advertised,
                         &pp->supported, &pp->peer);
-    pp->curr_speed = netdev_features_to_bps(pp->curr, 0);
-    pp->max_speed = netdev_features_to_bps(pp->supported, 0);
+    pp->curr_speed = netdev_features_to_bps(pp->curr, 0) / 1000;
+    pp->max_speed = netdev_features_to_bps(pp->supported, 0) / 1000;
 
     return netdev;
 }
@@ -2199,36 +2257,37 @@ update_mtu(struct ofproto *p, struct ofport *port)
 }
 \f
 static void
-ofproto_rule_destroy__(struct rule *rule)
+ofproto_rule_destroy(struct rule *rule)
 {
     if (rule) {
-        cls_rule_destroy(&rule->cr);
-        free(rule->ofpacts);
-        ovs_mutex_destroy(&rule->timeout_mutex);
-        ovs_rwlock_destroy(&rule->evict);
-        rule->ofproto->ofproto_class->rule_dealloc(rule);
+        rule->ofproto->ofproto_class->rule_destruct(rule);
+        ofproto_rule_destroy__(rule);
     }
 }
 
+static void
+ofproto_rule_destroy__(struct rule *rule)
+{
+    cls_rule_destroy(&rule->cr);
+    free(rule->ofpacts);
+    ovs_mutex_destroy(&rule->timeout_mutex);
+    ovs_rwlock_destroy(&rule->rwlock);
+    rule->ofproto->ofproto_class->rule_dealloc(rule);
+}
+
 /* This function allows an ofproto implementation to destroy any rules that
- * remain when its ->destruct() function is called.  The caller must have
- * already uninitialized any derived members of 'rule' (step 5 described in the
- * large comment in ofproto/ofproto-provider.h titled "Life Cycle").
- * This function implements steps 6 and 7.
+ * remain when its ->destruct() function is called..  This function implements
+ * steps 4.4 and 4.5 in the section titled "Rule Life Cycle" in
+ * ofproto-provider.h.
  *
  * This function should only be called from an ofproto implementation's
  * ->destruct() function.  It is not suitable elsewhere. */
 void
-ofproto_rule_destroy(struct ofproto *ofproto, struct classifier *cls,
-                     struct rule *rule) OVS_REQ_WRLOCK(cls->rwlock)
+ofproto_rule_delete(struct ofproto *ofproto, struct classifier *cls,
+                    struct rule *rule)
+    OVS_REQ_WRLOCK(cls->rwlock)
 {
-    ovs_assert(!rule->pending);
-    if (!ovs_rwlock_trywrlock(&rule->evict)) {
-        oftable_remove_rule__(ofproto, cls, rule);
-    } else {
-        NOT_REACHED();
-    }
-    ofproto_rule_destroy__(rule);
+    ofproto_delete_rule(ofproto, cls, rule);
 }
 
 /* Returns true if 'rule' has an OpenFlow OFPAT_OUTPUT or OFPAT_ENQUEUE action
@@ -2240,6 +2299,14 @@ ofproto_rule_has_out_port(const struct rule *rule, ofp_port_t port)
             || ofpacts_output_to_port(rule->ofpacts, rule->ofpacts_len, port));
 }
 
+/* Returns true if 'rule' has group and equals group_id. */
+bool
+ofproto_rule_has_out_group(const struct rule *rule, uint32_t group_id)
+{
+    return (group_id == OFPG11_ANY
+            || ofpacts_output_to_group(rule->ofpacts, rule->ofpacts_len, group_id));
+}
+
 /* Returns true if a rule related to 'op' has an OpenFlow OFPAT_OUTPUT or
  * OFPAT_ENQUEUE action that outputs to 'out_port'. */
 bool
@@ -2263,8 +2330,7 @@ ofoperation_has_out_port(const struct ofoperation *op, ofp_port_t out_port)
 }
 
 /* Executes the actions indicated by 'rule' on 'packet' and credits 'rule''s
- * statistics appropriately.  'packet' must have at least sizeof(struct
- * ofp10_packet_in) bytes of headroom.
+ * statistics appropriately.
  *
  * 'packet' doesn't necessarily have to match 'rule'.  'rule' will be credited
  * with statistics for 'packet' either way.
@@ -2276,8 +2342,6 @@ rule_execute(struct rule *rule, ofp_port_t in_port, struct ofpbuf *packet)
     struct flow flow;
     union flow_in_port in_port_;
 
-    ovs_assert(ofpbuf_headroom(packet) >= sizeof(struct ofp10_packet_in));
-
     in_port_.ofp_port = in_port;
     flow_extract(packet, 0, 0, NULL, &in_port_, &flow);
     return rule->ofproto->ofproto_class->rule_execute(rule, &flow, packet);
@@ -2519,8 +2583,8 @@ handle_packet_out(struct ofconn *ofconn, const struct ofp_header *oh)
             goto exit_free_ofpacts;
         }
     } else {
-        payload = xmalloc(sizeof *payload);
-        ofpbuf_use_const(payload, po.packet, po.packet_len);
+        /* Ensure that the L3 header is 32-bit aligned. */
+        payload = ofpbuf_clone_data_with_headroom(po.packet, po.packet_len, 2);
     }
 
     /* Verify actions against packet, then send packet if successful. */
@@ -2786,7 +2850,9 @@ ofproto_rule_change_cookie(struct ofproto *ofproto, struct rule *rule,
     if (new_cookie != rule->flow_cookie) {
         cookies_remove(ofproto, rule);
 
+        ovs_rwlock_wrlock(&rule->rwlock);
         rule->flow_cookie = new_cookie;
+        ovs_rwlock_unlock(&rule->rwlock);
 
         cookies_insert(ofproto, rule);
     }
@@ -2884,7 +2950,8 @@ static enum ofperr
 collect_rules_loose(struct ofproto *ofproto, uint8_t table_id,
                     const struct match *match,
                     ovs_be64 cookie, ovs_be64 cookie_mask,
-                    ofp_port_t out_port, struct list *rules)
+                    ofp_port_t out_port, uint32_t out_group,
+                    struct list *rules)
 {
     struct oftable *table;
     struct cls_rule cr;
@@ -2915,7 +2982,8 @@ collect_rules_loose(struct ofproto *ofproto, uint8_t table_id,
                     goto exit;
                 }
                 if (rule->flow_cookie == cookie /* Hash collisions possible. */
-                    && ofproto_rule_has_out_port(rule, out_port)) {
+                    && ofproto_rule_has_out_port(rule, out_port)
+                    && ofproto_rule_has_out_group(rule, out_group)) {
                     list_push_back(rules, &rule->ofproto_node);
                 }
             }
@@ -2937,6 +3005,7 @@ collect_rules_loose(struct ofproto *ofproto, uint8_t table_id,
             }
             if (!ofproto_rule_is_hidden(rule)
                 && ofproto_rule_has_out_port(rule, out_port)
+                && ofproto_rule_has_out_group(rule, out_group)
                     && !((rule->flow_cookie ^ cookie) & cookie_mask)) {
                 list_push_back(rules, &rule->ofproto_node);
             }
@@ -2964,7 +3033,8 @@ static enum ofperr
 collect_rules_strict(struct ofproto *ofproto, uint8_t table_id,
                      const struct match *match, unsigned int priority,
                      ovs_be64 cookie, ovs_be64 cookie_mask,
-                     ofp_port_t out_port, struct list *rules)
+                     ofp_port_t out_port, uint32_t out_group,
+                     struct list *rules)
 {
     struct oftable *table;
     struct cls_rule cr;
@@ -2995,7 +3065,8 @@ collect_rules_strict(struct ofproto *ofproto, uint8_t table_id,
                     goto exit;
                 }
                 if (rule->flow_cookie == cookie /* Hash collisions possible. */
-                    && ofproto_rule_has_out_port(rule, out_port)) {
+                    && ofproto_rule_has_out_port(rule, out_port)
+                    && ofproto_rule_has_out_group(rule, out_group)) {
                     list_push_back(rules, &rule->ofproto_node);
                 }
             }
@@ -3017,6 +3088,7 @@ collect_rules_strict(struct ofproto *ofproto, uint8_t table_id,
             }
             if (!ofproto_rule_is_hidden(rule)
                 && ofproto_rule_has_out_port(rule, out_port)
+                && ofproto_rule_has_out_group(rule, out_group)
                     && !((rule->flow_cookie ^ cookie) & cookie_mask)) {
                 list_push_back(rules, &rule->ofproto_node);
             }
@@ -3056,7 +3128,7 @@ handle_flow_stats_request(struct ofconn *ofconn,
 
     error = collect_rules_loose(ofproto, fsr.table_id, &fsr.match,
                                 fsr.cookie, fsr.cookie_mask,
-                                fsr.out_port, &rules);
+                                fsr.out_port, fsr.out_group, &rules);
     if (error) {
         return error;
     }
@@ -3183,7 +3255,7 @@ handle_aggregate_stats_request(struct ofconn *ofconn,
 
     error = collect_rules_loose(ofproto, request.table_id, &request.match,
                                 request.cookie, request.cookie_mask,
-                                request.out_port, &rules);
+                                request.out_port, request.out_group, &rules);
     if (error) {
         return error;
     }
@@ -3342,6 +3414,34 @@ is_flow_deletion_pending(const struct ofproto *ofproto,
     return false;
 }
 
+static enum ofperr
+evict_rule_from_table(struct ofproto *ofproto, struct oftable *table)
+{
+    struct rule *rule;
+    size_t n_rules;
+
+    ovs_rwlock_rdlock(&table->cls.rwlock);
+    n_rules = classifier_count(&table->cls);
+    ovs_rwlock_unlock(&table->cls.rwlock);
+
+    if (n_rules < table->max_flows) {
+        return 0;
+    } else if (!choose_rule_to_evict(table, &rule)) {
+        return OFPERR_OFPFMFC_TABLE_FULL;
+    } else if (rule->pending) {
+        ovs_rwlock_unlock(&rule->rwlock);
+        return OFPROTO_POSTPONE;
+    } else {
+        struct ofopgroup *group;
+
+        group = ofopgroup_create_unattached(ofproto);
+        delete_flow__(rule, group, OFPRR_EVICTION);
+        ofopgroup_submit(group);
+
+        return 0;
+    }
+}
+
 /* Implements OFPFC_ADD and the cases for OFPFC_MODIFY and OFPFC_MODIFY_STRICT
  * in which no matching flow already exists in the flow table.
  *
@@ -3361,10 +3461,8 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
 {
     struct oftable *table;
     struct ofopgroup *group;
-    struct ofoperation *op;
-    struct rule *evict;
+    struct cls_rule cr;
     struct rule *rule;
-    size_t n_rules;
     uint8_t table_id;
     int error;
 
@@ -3398,13 +3496,14 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
         return OFPERR_OFPBRC_EPERM;
     }
 
+    cls_rule_init(&cr, &fm->match, fm->priority);
+
     /* Transform "add" into "modify" if there's an existing identical flow. */
     ovs_rwlock_rdlock(&table->cls.rwlock);
-    rule = rule_from_cls_rule(classifier_find_match_exactly(&table->cls,
-                                                            &fm->match,
-                                                            fm->priority));
+    rule = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls, &cr));
     ovs_rwlock_unlock(&table->cls.rwlock);
     if (rule) {
+        cls_rule_destroy(&cr);
         if (!rule_is_modifiable(rule)) {
             return OFPERR_OFPBRC_EPERM;
         } else if (rule->pending) {
@@ -3423,22 +3522,13 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
     error = ofproto_check_ofpacts(ofproto, fm->ofpacts, fm->ofpacts_len,
                                   &fm->match.flow, table_id);
     if (error) {
+        cls_rule_destroy(&cr);
         return error;
     }
 
-    /* Allocate new rule and initialize classifier rule. */
-    rule = ofproto->ofproto_class->rule_alloc();
-    if (!rule) {
-        VLOG_WARN_RL(&rl, "%s: failed to create rule (%s)",
-                     ofproto->name, ovs_strerror(error));
-        return ENOMEM;
-    }
-    cls_rule_init(&rule->cr, &fm->match, fm->priority);
-
     /* Serialize against pending deletion. */
-    if (is_flow_deletion_pending(ofproto, &rule->cr, table_id)) {
-        cls_rule_destroy(&rule->cr);
-        ofproto->ofproto_class->rule_dealloc(rule);
+    if (is_flow_deletion_pending(ofproto, &cr, table_id)) {
+        cls_rule_destroy(&cr);
         return OFPROTO_POSTPONE;
     }
 
@@ -3447,19 +3537,34 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
         bool overlaps;
 
         ovs_rwlock_rdlock(&table->cls.rwlock);
-        overlaps = classifier_rule_overlaps(&table->cls, &rule->cr);
+        overlaps = classifier_rule_overlaps(&table->cls, &cr);
         ovs_rwlock_unlock(&table->cls.rwlock);
 
         if (overlaps) {
-            cls_rule_destroy(&rule->cr);
-            ofproto->ofproto_class->rule_dealloc(rule);
+            cls_rule_destroy(&cr);
             return OFPERR_OFPFMFC_OVERLAP;
         }
     }
 
-    /* FIXME: Implement OFPFF12_RESET_COUNTS */
+    /* If necessary, evict an existing rule to clear out space. */
+    error = evict_rule_from_table(ofproto, table);
+    if (error) {
+        cls_rule_destroy(&cr);
+        return error;
+    }
+
+    /* Allocate new rule. */
+    rule = ofproto->ofproto_class->rule_alloc();
+    if (!rule) {
+        cls_rule_destroy(&cr);
+        VLOG_WARN_RL(&rl, "%s: failed to create rule (%s)",
+                     ofproto->name, ovs_strerror(error));
+        return ENOMEM;
+    }
 
+    /* Initialize base state. */
     rule->ofproto = ofproto;
+    cls_rule_move(&rule->cr, &cr);
     rule->pending = NULL;
     rule->flow_cookie = fm->new_cookie;
     rule->created = rule->modified = rule->used = time_msec();
@@ -3481,58 +3586,23 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
     rule->monitor_flags = 0;
     rule->add_seqno = 0;
     rule->modify_seqno = 0;
-    ovs_rwlock_init(&rule->evict);
-
-    /* Insert new rule. */
-    oftable_insert_rule(rule);
-
-    ovs_rwlock_rdlock(&table->cls.rwlock);
-    n_rules = classifier_count(&table->cls);
-    ovs_rwlock_unlock(&table->cls.rwlock);
-    if (n_rules > table->max_flows) {
-        ovs_rwlock_rdlock(&rule->evict);
-        if (choose_rule_to_evict(table, &evict)) {
-            ovs_rwlock_unlock(&rule->evict);
-            ovs_rwlock_unlock(&evict->evict);
-            if (evict->pending) {
-                error = OFPROTO_POSTPONE;
-                goto exit;
-            }
-        } else {
-            ovs_rwlock_unlock(&rule->evict);
-            error = OFPERR_OFPFMFC_TABLE_FULL;
-            goto exit;
-        }
-    } else {
-        evict = NULL;
-    }
-
-    group = ofopgroup_create(ofproto, ofconn, request, fm->buffer_id);
-    op = ofoperation_create(group, rule, OFOPERATION_ADD, 0);
+    ovs_rwlock_init(&rule->rwlock);
 
+    /* Construct rule, initializing derived state. */
     error = ofproto->ofproto_class->rule_construct(rule);
     if (error) {
-        op->group->n_running--;
-        ofoperation_destroy(rule->pending);
-    } else if (evict) {
-        /* It would be better if we maintained the lock we took in
-         * choose_rule_to_evict() earlier, but that confuses the thread
-         * safety analysis, and this code is fragile enough that we really
-         * need it.  In the worst case, we'll have to block a little while
-         * before we perform the eviction, which doesn't seem like a big
-         * problem. */
-        ovs_rwlock_wrlock(&evict->evict);
-        delete_flow__(evict, group, OFPRR_EVICTION);
+        ofproto_rule_destroy__(rule);
+        return error;
     }
+
+    /* Insert rule. */
+    oftable_insert_rule(rule);
+
+    group = ofopgroup_create(ofproto, ofconn, request, fm->buffer_id);
+    ofoperation_create(group, rule, OFOPERATION_ADD, 0);
+    ofproto->ofproto_class->rule_insert(rule);
     ofopgroup_submit(group);
 
-exit:
-    /* Back out if an error occurred. */
-    if (error) {
-        ovs_rwlock_wrlock(&rule->evict);
-        oftable_remove_rule(rule);
-        ofproto_rule_destroy__(rule);
-    }
     return error;
 }
 \f
@@ -3610,8 +3680,12 @@ modify_flows__(struct ofproto *ofproto, struct ofconn *ofconn,
             op->ofpacts = rule->ofpacts;
             op->ofpacts_len = rule->ofpacts_len;
             op->meter_id = rule->meter_id;
+
+            ovs_rwlock_wrlock(&rule->rwlock);
             rule->ofpacts = xmemdup(fm->ofpacts, fm->ofpacts_len);
             rule->ofpacts_len = fm->ofpacts_len;
+            ovs_rwlock_unlock(&rule->rwlock);
+
             rule->meter_id = find_meter(rule->ofpacts, rule->ofpacts_len);
             rule->ofproto->ofproto_class->rule_modify_actions(rule,
                                                               reset_counters);
@@ -3649,7 +3723,7 @@ modify_flows_loose(struct ofproto *ofproto, struct ofconn *ofconn,
 
     error = collect_rules_loose(ofproto, fm->table_id, &fm->match,
                                 fm->cookie, fm->cookie_mask,
-                                OFPP_ANY, &rules);
+                                OFPP_ANY, OFPG11_ANY, &rules);
     if (error) {
         return error;
     } else if (list_is_empty(&rules)) {
@@ -3674,8 +3748,7 @@ modify_flow_strict(struct ofproto *ofproto, struct ofconn *ofconn,
 
     error = collect_rules_strict(ofproto, fm->table_id, &fm->match,
                                  fm->priority, fm->cookie, fm->cookie_mask,
-                                 OFPP_ANY, &rules);
-
+                                 OFPP_ANY, OFPG11_ANY, &rules);
     if (error) {
         return error;
     } else if (list_is_empty(&rules)) {
@@ -3699,7 +3772,7 @@ delete_flow__(struct rule *rule, struct ofopgroup *group,
 
     ofoperation_create(group, rule, OFOPERATION_DELETE, reason);
     oftable_remove_rule(rule);
-    ofproto->ofproto_class->rule_destruct(rule);
+    ofproto->ofproto_class->rule_delete(rule);
 }
 
 /* Deletes the rules listed in 'rules'.
@@ -3715,7 +3788,7 @@ delete_flows__(struct ofproto *ofproto, struct ofconn *ofconn,
 
     group = ofopgroup_create(ofproto, ofconn, request, UINT32_MAX);
     LIST_FOR_EACH_SAFE (rule, next, ofproto_node, rules) {
-        ovs_rwlock_wrlock(&rule->evict);
+        ovs_rwlock_wrlock(&rule->rwlock);
         delete_flow__(rule, group, reason);
     }
     ofopgroup_submit(group);
@@ -3734,7 +3807,7 @@ delete_flows_loose(struct ofproto *ofproto, struct ofconn *ofconn,
 
     error = collect_rules_loose(ofproto, fm->table_id, &fm->match,
                                 fm->cookie, fm->cookie_mask,
-                                fm->out_port, &rules);
+                                fm->out_port, fm->out_group, &rules);
     return (error ? error
             : !list_is_empty(&rules) ? delete_flows__(ofproto, ofconn, request,
                                                       &rules, OFPRR_DELETE)
@@ -3752,7 +3825,7 @@ delete_flow_strict(struct ofproto *ofproto, struct ofconn *ofconn,
 
     error = collect_rules_strict(ofproto, fm->table_id, &fm->match,
                                  fm->priority, fm->cookie, fm->cookie_mask,
-                                 fm->out_port, &rules);
+                                 fm->out_port, fm->out_group, &rules);
     return (error ? error
             : list_is_singleton(&rules) ? delete_flows__(ofproto, ofconn,
                                                          request, &rules,
@@ -3786,20 +3859,6 @@ ofproto_rule_send_removed(struct rule *rule, uint8_t reason)
     connmgr_send_flow_removed(rule->ofproto->connmgr, &fr);
 }
 
-void
-ofproto_rule_update_used(struct rule *rule, long long int used)
-{
-    if (used > rule->used) {
-        struct eviction_group *evg = rule->eviction_group;
-
-        rule->used = used;
-        if (evg) {
-            heap_change(&evg->rules, &rule->evg_node,
-                        rule_eviction_priority(rule));
-        }
-    }
-}
-
 /* Sends an OpenFlow "flow removed" message with the given 'reason' (either
  * OFPRR_HARD_TIMEOUT or OFPRR_IDLE_TIMEOUT), and then removes 'rule' from its
  * ofproto.
@@ -3813,17 +3872,15 @@ void
 ofproto_rule_expire(struct rule *rule, uint8_t reason)
 {
     struct ofproto *ofproto = rule->ofproto;
-    struct ofopgroup *group;
-
-    ovs_assert(reason == OFPRR_HARD_TIMEOUT || reason == OFPRR_IDLE_TIMEOUT);
+    struct classifier *cls = &ofproto->tables[rule->table_id].cls;
 
+    ovs_assert(reason == OFPRR_HARD_TIMEOUT || reason == OFPRR_IDLE_TIMEOUT
+               || reason == OFPRR_DELETE || reason == OFPRR_GROUP_DELETE);
     ofproto_rule_send_removed(rule, reason);
 
-    group = ofopgroup_create_unattached(ofproto);
-    ofoperation_create(group, rule, OFOPERATION_DELETE, reason);
-    oftable_remove_rule(rule);
-    ofproto->ofproto_class->rule_destruct(rule);
-    ofopgroup_submit(group);
+    ovs_rwlock_wrlock(&cls->rwlock);
+    ofproto_delete_rule(ofproto, cls, rule);
+    ovs_rwlock_unlock(&cls->rwlock);
 }
 
 /* Reduces '*timeout' to no more than 'max'.  A value of zero in either case
@@ -3860,10 +3917,6 @@ ofproto_rule_reduce_timeouts(struct rule *rule,
     reduce_timeout(idle_timeout, &rule->idle_timeout);
     reduce_timeout(hard_timeout, &rule->hard_timeout);
     ovs_mutex_unlock(&rule->timeout_mutex);
-
-    if (!rule->eviction_group) {
-        eviction_group_add_rule(rule);
-    }
 }
 \f
 static enum ofperr
@@ -4077,6 +4130,31 @@ handle_nxt_set_async_config(struct ofconn *ofconn, const struct ofp_header *oh)
     return 0;
 }
 
+static enum ofperr
+handle_nxt_get_async_request(struct ofconn *ofconn, const struct ofp_header *oh)
+{
+    struct ofpbuf *buf;
+    uint32_t master[OAM_N_TYPES];
+    uint32_t slave[OAM_N_TYPES];
+    struct nx_async_config *msg;
+
+    ofconn_get_async_config(ofconn, master, slave);
+    buf = ofpraw_alloc_reply(OFPRAW_OFPT13_GET_ASYNC_REPLY, oh, 0);
+    msg = ofpbuf_put_zeros(buf, sizeof *msg);
+
+    msg->packet_in_mask[0] = htonl(master[OAM_PACKET_IN]);
+    msg->port_status_mask[0] = htonl(master[OAM_PORT_STATUS]);
+    msg->flow_removed_mask[0] = htonl(master[OAM_FLOW_REMOVED]);
+
+    msg->packet_in_mask[1] = htonl(slave[OAM_PACKET_IN]);
+    msg->port_status_mask[1] = htonl(slave[OAM_PORT_STATUS]);
+    msg->flow_removed_mask[1] = htonl(slave[OAM_FLOW_REMOVED]);
+
+    ofconn_send_reply(ofconn, buf);
+
+    return 0;
+}
+
 static enum ofperr
 handle_nxt_set_controller_id(struct ofconn *ofconn,
                              const struct ofp_header *oh)
@@ -4675,6 +4753,426 @@ handle_meter_request(struct ofconn *ofconn, const struct ofp_header *request,
     return 0;
 }
 
+bool
+ofproto_group_lookup(const struct ofproto *ofproto, uint32_t group_id,
+                     struct ofgroup **group)
+    OVS_TRY_RDLOCK(true, (*group)->rwlock)
+{
+    ovs_rwlock_rdlock(&ofproto->groups_rwlock);
+    HMAP_FOR_EACH_IN_BUCKET (*group, hmap_node,
+                             hash_int(group_id, 0), &ofproto->groups) {
+        if ((*group)->group_id == group_id) {
+            ovs_rwlock_rdlock(&(*group)->rwlock);
+            ovs_rwlock_unlock(&ofproto->groups_rwlock);
+            return true;
+        }
+    }
+    ovs_rwlock_unlock(&ofproto->groups_rwlock);
+    return false;
+}
+
+void
+ofproto_group_release(struct ofgroup *group)
+    OVS_RELEASES(group->rwlock)
+{
+    ovs_rwlock_unlock(&group->rwlock);
+}
+
+static bool
+ofproto_group_write_lookup(const struct ofproto *ofproto, uint32_t group_id,
+                           struct ofgroup **group)
+    OVS_TRY_WRLOCK(true, ofproto->groups_rwlock)
+    OVS_TRY_WRLOCK(true, (*group)->rwlock)
+{
+    ovs_rwlock_wrlock(&ofproto->groups_rwlock);
+    HMAP_FOR_EACH_IN_BUCKET (*group, hmap_node,
+                             hash_int(group_id, 0), &ofproto->groups) {
+        if ((*group)->group_id == group_id) {
+            ovs_rwlock_wrlock(&(*group)->rwlock);
+            return true;
+        }
+    }
+    ovs_rwlock_unlock(&ofproto->groups_rwlock);
+    return false;
+}
+
+static bool
+ofproto_group_exists(const struct ofproto *ofproto, uint32_t group_id)
+    OVS_REQ_RDLOCK(ofproto->groups_rwlock)
+{
+    struct ofgroup *grp;
+
+    HMAP_FOR_EACH_IN_BUCKET (grp, hmap_node,
+                             hash_int(group_id, 0), &ofproto->groups) {
+        if (grp->group_id == group_id) {
+            return true;
+        }
+    }
+    return false;
+}
+
+static void
+append_group_stats(struct ofgroup *group, struct list *replies)
+    OVS_REQ_RDLOCK(group->rwlock)
+{
+    struct ofputil_group_stats ogs;
+    struct ofproto *ofproto = group->ofproto;
+    long long int now = time_msec();
+    int error;
+
+    ogs.bucket_stats = xmalloc(group->n_buckets * sizeof *ogs.bucket_stats);
+
+    error = (ofproto->ofproto_class->group_get_stats
+             ? ofproto->ofproto_class->group_get_stats(group, &ogs)
+             : EOPNOTSUPP);
+    if (error) {
+        ogs.ref_count = UINT32_MAX;
+        ogs.packet_count = UINT64_MAX;
+        ogs.byte_count = UINT64_MAX;
+        ogs.n_buckets = group->n_buckets;
+        memset(ogs.bucket_stats, 0xff,
+               ogs.n_buckets * sizeof *ogs.bucket_stats);
+    }
+
+    ogs.group_id = group->group_id;
+    calc_duration(group->created, now, &ogs.duration_sec, &ogs.duration_nsec);
+
+    ofputil_append_group_stats(replies, &ogs);
+
+    free(ogs.bucket_stats);
+}
+
+static enum ofperr
+handle_group_stats_request(struct ofconn *ofconn,
+                           const struct ofp_header *request)
+{
+    struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+    struct list replies;
+    enum ofperr error;
+    struct ofgroup *group;
+    uint32_t group_id;
+
+    error = ofputil_decode_group_stats_request(request, &group_id);
+    if (error) {
+        return error;
+    }
+
+    ofpmp_init(&replies, request);
+
+    if (group_id == OFPG_ALL) {
+        ovs_rwlock_rdlock(&ofproto->groups_rwlock);
+        HMAP_FOR_EACH (group, hmap_node, &ofproto->groups) {
+            ovs_rwlock_rdlock(&group->rwlock);
+            append_group_stats(group, &replies);
+            ovs_rwlock_unlock(&group->rwlock);
+        }
+        ovs_rwlock_unlock(&ofproto->groups_rwlock);
+    } else {
+        if (ofproto_group_lookup(ofproto, group_id, &group)) {
+            append_group_stats(group, &replies);
+            ofproto_group_release(group);
+        }
+    }
+
+    ofconn_send_replies(ofconn, &replies);
+
+    return 0;
+}
+
+static enum ofperr
+handle_group_desc_stats_request(struct ofconn *ofconn,
+                                const struct ofp_header *request)
+{
+    struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+    struct list replies;
+    struct ofputil_group_desc gds;
+    struct ofgroup *group;
+
+    ofpmp_init(&replies, request);
+
+    ovs_rwlock_rdlock(&ofproto->groups_rwlock);
+    HMAP_FOR_EACH (group, hmap_node, &ofproto->groups) {
+        gds.group_id = group->group_id;
+        gds.type = group->type;
+        ofputil_append_group_desc_reply(&gds, &group->buckets, &replies);
+    }
+    ovs_rwlock_unlock(&ofproto->groups_rwlock);
+
+    ofconn_send_replies(ofconn, &replies);
+
+    return 0;
+}
+
+static enum ofperr
+handle_group_features_stats_request(struct ofconn *ofconn,
+                                    const struct ofp_header *request)
+{
+    struct ofproto *p = ofconn_get_ofproto(ofconn);
+    struct ofpbuf *msg;
+
+    msg = ofputil_encode_group_features_reply(&p->ogf, request);
+    if (msg) {
+        ofconn_send_reply(ofconn, msg);
+    }
+
+    return 0;
+}
+
+/* Implements OFPGC11_ADD
+ * in which no matching flow already exists in the flow table.
+ *
+ * Adds the flow specified by 'ofm', which is followed by 'n_actions'
+ * ofp_actions, to the ofproto's flow table.  Returns 0 on success, an OpenFlow
+ * error code on failure, or OFPROTO_POSTPONE if the operation cannot be
+ * initiated now but may be retried later.
+ *
+ * Upon successful return, takes ownership of 'fm->ofpacts'.  On failure,
+ * ownership remains with the caller.
+ *
+ * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
+ * if any. */
+static enum ofperr
+add_group(struct ofproto *ofproto, struct ofputil_group_mod *gm)
+{
+    struct ofgroup *ofgroup;
+    enum ofperr error;
+
+    if (gm->group_id > OFPG_MAX) {
+        return OFPERR_OFPGMFC_INVALID_GROUP;
+    }
+    if (gm->type > OFPGT11_FF) {
+        return OFPERR_OFPGMFC_BAD_TYPE;
+    }
+
+    /* Allocate new group and initialize it. */
+    ofgroup = ofproto->ofproto_class->group_alloc();
+    if (!ofgroup) {
+        VLOG_WARN_RL(&rl, "%s: failed to create group", ofproto->name);
+        return OFPERR_OFPGMFC_OUT_OF_GROUPS;
+    }
+
+    ovs_rwlock_init(&ofgroup->rwlock);
+    ofgroup->ofproto  = ofproto;
+    ofgroup->group_id = gm->group_id;
+    ofgroup->type     = gm->type;
+    ofgroup->created = ofgroup->modified = time_msec();
+
+    list_move(&ofgroup->buckets, &gm->buckets);
+    ofgroup->n_buckets = list_size(&ofgroup->buckets);
+
+    /* Construct called BEFORE any locks are held. */
+    error = ofproto->ofproto_class->group_construct(ofgroup);
+    if (error) {
+        goto free_out;
+    }
+
+    /* We wrlock as late as possible to minimize the time we jam any other
+     * threads: No visible state changes before acquiring the lock. */
+    ovs_rwlock_wrlock(&ofproto->groups_rwlock);
+
+    if (ofproto->n_groups[gm->type] >= ofproto->ogf.max_groups[gm->type]) {
+        error = OFPERR_OFPGMFC_OUT_OF_GROUPS;
+        goto unlock_out;
+    }
+
+    if (ofproto_group_exists(ofproto, gm->group_id)) {
+        error = OFPERR_OFPGMFC_GROUP_EXISTS;
+        goto unlock_out;
+    }
+
+    if (!error) {
+        /* Insert new group. */
+        hmap_insert(&ofproto->groups, &ofgroup->hmap_node,
+                    hash_int(ofgroup->group_id, 0));
+        ofproto->n_groups[ofgroup->type]++;
+
+        ovs_rwlock_unlock(&ofproto->groups_rwlock);
+        return error;
+    }
+
+ unlock_out:
+    ovs_rwlock_unlock(&ofproto->groups_rwlock);
+    ofproto->ofproto_class->group_destruct(ofgroup);
+ free_out:
+    ofputil_bucket_list_destroy(&ofgroup->buckets);
+    ofproto->ofproto_class->group_dealloc(ofgroup);
+
+    return error;
+}
+
+/* Implements OFPFC_MODIFY.  Returns 0 on success or an OpenFlow error code on
+ * failure.
+ *
+ * 'ofconn' is used to retrieve the packet buffer specified in fm->buffer_id,
+ * if any. */
+static enum ofperr
+modify_group(struct ofproto *ofproto, struct ofputil_group_mod *gm)
+{
+    struct ofgroup *ofgroup;
+    struct ofgroup *victim;
+    enum ofperr error;
+
+    if (gm->group_id > OFPG_MAX) {
+        return OFPERR_OFPGMFC_INVALID_GROUP;
+    }
+
+    if (gm->type > OFPGT11_FF) {
+        return OFPERR_OFPGMFC_BAD_TYPE;
+    }
+
+    victim = ofproto->ofproto_class->group_alloc();
+    if (!victim) {
+        VLOG_WARN_RL(&rl, "%s: failed to allocate group", ofproto->name);
+        return OFPERR_OFPGMFC_OUT_OF_GROUPS;
+    }
+
+    if (!ofproto_group_write_lookup(ofproto, gm->group_id, &ofgroup)) {
+        error = OFPERR_OFPGMFC_UNKNOWN_GROUP;
+        goto free_out;
+    }
+    /* Both group's and its container's write locks held now.
+     * Also, n_groups[] is protected by ofproto->groups_rwlock. */
+    if (ofgroup->type != gm->type
+        && ofproto->n_groups[gm->type] >= ofproto->ogf.max_groups[gm->type]) {
+        error = OFPERR_OFPGMFC_OUT_OF_GROUPS;
+        goto unlock_out;
+    }
+
+    *victim = *ofgroup;
+    list_move(&victim->buckets, &ofgroup->buckets);
+
+    ofgroup->type = gm->type;
+    list_move(&ofgroup->buckets, &gm->buckets);
+    ofgroup->n_buckets = list_size(&ofgroup->buckets);
+
+    error = ofproto->ofproto_class->group_modify(ofgroup, victim);
+    if (!error) {
+        ofputil_bucket_list_destroy(&victim->buckets);
+        ofproto->n_groups[victim->type]--;
+        ofproto->n_groups[ofgroup->type]++;
+        ofgroup->modified = time_msec();
+    } else {
+        ofputil_bucket_list_destroy(&ofgroup->buckets);
+
+        *ofgroup = *victim;
+        list_move(&ofgroup->buckets, &victim->buckets);
+    }
+
+ unlock_out:
+    ovs_rwlock_unlock(&ofgroup->rwlock);
+    ovs_rwlock_unlock(&ofproto->groups_rwlock);
+ free_out:
+    ofproto->ofproto_class->group_dealloc(victim);
+    return error;
+}
+
+static void
+delete_group__(struct ofproto *ofproto, struct ofgroup *ofgroup)
+    OVS_RELEASES(ofproto->groups_rwlock)
+{
+    /* Must wait until existing readers are done,
+     * while holding the container's write lock at the same time. */
+    ovs_rwlock_wrlock(&ofgroup->rwlock);
+    hmap_remove(&ofproto->groups, &ofgroup->hmap_node);
+    /* No-one can find this group any more. */
+    ofproto->n_groups[ofgroup->type]--;
+    ovs_rwlock_unlock(&ofproto->groups_rwlock);
+
+    ofproto->ofproto_class->group_destruct(ofgroup);
+    ofputil_bucket_list_destroy(&ofgroup->buckets);
+    ovs_rwlock_unlock(&ofgroup->rwlock);
+    ovs_rwlock_destroy(&ofgroup->rwlock);
+    ofproto->ofproto_class->group_dealloc(ofgroup);
+}
+
+/* Implements OFPGC_DELETE. */
+static void
+delete_group(struct ofproto *ofproto, uint32_t group_id)
+{
+    struct ofgroup *ofgroup;
+
+    ovs_rwlock_wrlock(&ofproto->groups_rwlock);
+    if (group_id == OFPG_ALL) {
+        for (;;) {
+            struct hmap_node *node = hmap_first(&ofproto->groups);
+            if (!node) {
+                break;
+            }
+            ofgroup = CONTAINER_OF(node, struct ofgroup, hmap_node);
+            delete_group__(ofproto, ofgroup);
+            /* Lock for each node separately, so that we will not jam the
+             * other threads for too long time. */
+            ovs_rwlock_wrlock(&ofproto->groups_rwlock);
+        }
+    } else {
+        HMAP_FOR_EACH_IN_BUCKET (ofgroup, hmap_node,
+                                 hash_int(group_id, 0), &ofproto->groups) {
+            if (ofgroup->group_id == group_id) {
+                delete_group__(ofproto, ofgroup);
+                return;
+            }
+        }
+    }
+    ovs_rwlock_unlock(&ofproto->groups_rwlock);
+}
+
+static enum ofperr
+handle_group_mod(struct ofconn *ofconn, const struct ofp_header *oh)
+{
+    struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+    struct ofputil_group_mod gm;
+    enum ofperr error;
+
+    error = reject_slave_controller(ofconn);
+    if (error) {
+        return error;
+    }
+
+    error = ofputil_decode_group_mod(oh, &gm);
+    if (error) {
+        return error;
+    }
+
+    switch (gm.command) {
+    case OFPGC11_ADD:
+        return add_group(ofproto, &gm);
+
+    case OFPGC11_MODIFY:
+        return modify_group(ofproto, &gm);
+
+    case OFPGC11_DELETE:
+        delete_group(ofproto, gm.group_id);
+        return 0;
+
+    default:
+        if (gm.command > OFPGC11_DELETE) {
+            VLOG_WARN_RL(&rl, "%s: Invalid group_mod command type %d",
+                         ofproto->name, gm.command);
+        }
+        return OFPERR_OFPGMFC_BAD_COMMAND;
+    }
+}
+
+static enum ofperr
+handle_table_mod(struct ofconn *ofconn, const struct ofp_header *oh)
+{
+    struct ofputil_table_mod tm;
+    enum ofperr error;
+
+    error = reject_slave_controller(ofconn);
+    if (error) {
+        return error;
+    }
+
+    error = ofputil_decode_table_mod(oh, &tm);
+    if (error) {
+        return error;
+    }
+
+    /* XXX Actual table mod support is not implemented yet. */
+    return 0;
+}
+
 static enum ofperr
 handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg)
 {
@@ -4710,6 +5208,12 @@ handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg)
     case OFPTYPE_FLOW_MOD:
         return handle_flow_mod(ofconn, oh);
 
+    case OFPTYPE_GROUP_MOD:
+        return handle_group_mod(ofconn, oh);
+
+    case OFPTYPE_TABLE_MOD:
+        return handle_table_mod(ofconn, oh);
+
     case OFPTYPE_METER_MOD:
         return handle_meter_mod(ofconn, oh);
 
@@ -4746,6 +5250,9 @@ handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg)
     case OFPTYPE_SET_ASYNC_CONFIG:
         return handle_nxt_set_async_config(ofconn, oh);
 
+    case OFPTYPE_GET_ASYNC_REQUEST:
+        return handle_nxt_get_async_request(ofconn, oh);
+
         /* Statistics requests. */
     case OFPTYPE_DESC_STATS_REQUEST:
         return handle_desc_stats_request(ofconn, oh);
@@ -4778,12 +5285,17 @@ handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg)
     case OFPTYPE_METER_FEATURES_STATS_REQUEST:
         return handle_meter_features_request(ofconn, oh);
 
-        /* FIXME: Change the following once they are implemented: */
-    case OFPTYPE_QUEUE_GET_CONFIG_REQUEST:
-    case OFPTYPE_GET_ASYNC_REQUEST:
     case OFPTYPE_GROUP_STATS_REQUEST:
+        return handle_group_stats_request(ofconn, oh);
+
     case OFPTYPE_GROUP_DESC_STATS_REQUEST:
+        return handle_group_desc_stats_request(ofconn, oh);
+
     case OFPTYPE_GROUP_FEATURES_STATS_REQUEST:
+        return handle_group_features_stats_request(ofconn, oh);
+
+        /* FIXME: Change the following once they are implemented: */
+    case OFPTYPE_QUEUE_GET_CONFIG_REQUEST:
     case OFPTYPE_TABLE_FEATURES_STATS_REQUEST:
         return OFPERR_OFPBRC_BAD_TYPE;
 
@@ -5003,15 +5515,15 @@ ofopgroup_complete(struct ofopgroup *group)
                     }
                 }
             } else {
-                ovs_rwlock_wrlock(&rule->evict);
+                ovs_rwlock_wrlock(&rule->rwlock);
                 oftable_remove_rule(rule);
-                ofproto_rule_destroy__(rule);
+                ofproto_rule_destroy(rule);
             }
             break;
 
         case OFOPERATION_DELETE:
             ovs_assert(!op->error);
-            ofproto_rule_destroy__(rule);
+            ofproto_rule_destroy(rule);
             op->rule = NULL;
             break;
 
@@ -5032,8 +5544,12 @@ ofopgroup_complete(struct ofopgroup *group)
                 ovs_mutex_unlock(&rule->timeout_mutex);
                 if (op->ofpacts) {
                     free(rule->ofpacts);
+
+                    ovs_rwlock_wrlock(&rule->rwlock);
                     rule->ofpacts = op->ofpacts;
                     rule->ofpacts_len = op->ofpacts_len;
+                    ovs_rwlock_unlock(&rule->rwlock);
+
                     op->ofpacts = NULL;
                     op->ofpacts_len = 0;
                 }
@@ -5221,7 +5737,7 @@ choose_rule_to_evict(struct oftable *table, struct rule **rulep)
         struct rule *rule;
 
         HEAP_FOR_EACH (rule, evg_node, &evg->rules) {
-            if (!ovs_rwlock_trywrlock(&rule->evict)) {
+            if (!ovs_rwlock_trywrlock(&rule->rwlock)) {
                 *rulep = rule;
                 return true;
             }
@@ -5262,14 +5778,14 @@ ofproto_evict(struct ofproto *ofproto)
             }
 
             if (rule->pending) {
-                ovs_rwlock_unlock(&rule->evict);
+                ovs_rwlock_unlock(&rule->rwlock);
                 break;
             }
 
             ofoperation_create(group, rule,
                                OFOPERATION_DELETE, OFPRR_EVICTION);
             oftable_remove_rule(rule);
-            ofproto->ofproto_class->rule_destruct(rule);
+            ofproto->ofproto_class->rule_delete(rule);
         }
     }
     ofopgroup_submit(group);
@@ -5570,12 +6086,9 @@ oftable_enable_eviction(struct oftable *table,
 static void
 oftable_remove_rule__(struct ofproto *ofproto, struct classifier *cls,
                       struct rule *rule)
-    OVS_REQ_WRLOCK(cls->rwlock) OVS_RELEASES(rule->evict)
+    OVS_REQ_WRLOCK(cls->rwlock) OVS_RELEASES(rule->rwlock)
 {
     classifier_remove(cls, &rule->cr);
-    if (rule->meter_id) {
-        list_remove(&rule->meter_list_node);
-    }
     cookies_remove(ofproto, rule);
     eviction_group_remove_rule(rule);
     ovs_mutex_lock(&ofproto->expirable_mutex);
@@ -5585,8 +6098,9 @@ oftable_remove_rule__(struct ofproto *ofproto, struct classifier *cls,
     ovs_mutex_unlock(&ofproto->expirable_mutex);
     if (!list_is_empty(&rule->meter_list_node)) {
         list_remove(&rule->meter_list_node);
+        list_init(&rule->meter_list_node);
     }
-    ovs_rwlock_unlock(&rule->evict);
+    ovs_rwlock_unlock(&rule->rwlock);
 }
 
 static void