ofproto: Move 'rule->used' to the provider.
[sliver-openvswitch.git] / ofproto / ofproto.c
index 6d25dab..62c97a2 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
  * Copyright (c) 2010 Jean Tourrilhes - HP-Labs.
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -182,7 +182,7 @@ struct eviction_group {
 
 static bool choose_rule_to_evict(struct oftable *table, struct rule **rulep);
 static void ofproto_evict(struct ofproto *) OVS_EXCLUDED(ofproto_mutex);
-static uint32_t rule_eviction_priority(struct rule *);
+static uint32_t rule_eviction_priority(struct ofproto *ofproto, struct rule *);
 static void eviction_group_add_rule(struct rule *);
 static void eviction_group_remove_rule(struct rule *);
 
@@ -306,10 +306,11 @@ static size_t allocated_ofproto_classes;
 /* Global lock that protects all flow table operations. */
 struct ovs_mutex ofproto_mutex = OVS_MUTEX_INITIALIZER;
 
-unsigned flow_eviction_threshold = OFPROTO_FLOW_EVICTION_THRESHOLD_DEFAULT;
-unsigned n_handler_threads;
+unsigned ofproto_flow_limit = OFPROTO_FLOW_LIMIT_DEFAULT;
 enum ofproto_flow_miss_model flow_miss_model = OFPROTO_HANDLE_MISS_AUTO;
 
+size_t n_handlers, n_revalidators;
+
 /* Map from datapath name to struct ofproto, for use by unixctl commands. */
 static struct hmap all_ofprotos = HMAP_INITIALIZER(&all_ofprotos);
 
@@ -692,10 +693,9 @@ ofproto_set_in_band_queue(struct ofproto *ofproto, int queue_id)
 /* Sets the number of flows at which eviction from the kernel flow table
  * will occur. */
 void
-ofproto_set_flow_eviction_threshold(unsigned threshold)
+ofproto_set_flow_limit(unsigned limit)
 {
-    flow_eviction_threshold = MAX(OFPROTO_FLOW_EVICTION_THRESHOLD_MIN,
-                                  threshold);
+    ofproto_flow_limit = limit;
 }
 
 /* Sets the path for handling flow misses. */
@@ -733,16 +733,22 @@ ofproto_set_mac_table_config(struct ofproto *ofproto, unsigned idle_time,
     }
 }
 
-/* Sets number of upcall handler threads.  The default is
- * (number of online cores - 2). */
 void
-ofproto_set_n_handler_threads(unsigned limit)
+ofproto_set_threads(int n_handlers_, int n_revalidators_)
 {
-    if (limit) {
-        n_handler_threads = limit;
-    } else {
-        int n_proc = count_cpu_cores();
-        n_handler_threads = n_proc > 2 ? n_proc - 2 : 1;
+    int threads = MAX(count_cpu_cores(), 2);
+
+    n_revalidators = MAX(n_revalidators_, 0);
+    n_handlers = MAX(n_handlers_, 0);
+
+    if (!n_revalidators) {
+        n_revalidators = n_handlers
+            ? MAX(threads - (int) n_handlers, 1)
+            : threads / 4 + 1;
+    }
+
+    if (!n_handlers) {
+        n_handlers = MAX(threads - (int) n_revalidators, 1);
     }
 }
 
@@ -1162,7 +1168,7 @@ ofproto_configure_table(struct ofproto *ofproto, int table_id,
     }
 
     table->max_flows = s->max_flows;
-    ovs_rwlock_wrlock(&table->cls.rwlock);
+    fat_rwlock_wrlock(&table->cls.rwlock);
     if (classifier_count(&table->cls) > table->max_flows
         && table->eviction_fields) {
         /* 'table' contains more flows than allowed.  We might not be able to
@@ -1182,7 +1188,7 @@ ofproto_configure_table(struct ofproto *ofproto, int table_id,
     classifier_set_prefix_fields(&table->cls,
                                  s->prefix_fields, s->n_prefix_fields);
 
-    ovs_rwlock_unlock(&table->cls.rwlock);
+    fat_rwlock_unlock(&table->cls.rwlock);
 }
 \f
 bool
@@ -1211,7 +1217,7 @@ ofproto_rule_delete__(struct ofproto *ofproto, struct rule *rule,
     ofopgroup_submit(group);
 }
 
-/* Deletes 'rule' from 'cls' within 'ofproto'.
+/* Deletes 'rule' from 'ofproto'.
  *
  * Within an ofproto implementation, this function allows an ofproto
  * implementation to destroy any rules that remain when its ->destruct()
@@ -1257,9 +1263,9 @@ ofproto_flush__(struct ofproto *ofproto)
             continue;
         }
 
-        ovs_rwlock_rdlock(&table->cls.rwlock);
+        fat_rwlock_rdlock(&table->cls.rwlock);
         cls_cursor_init(&cursor, &table->cls, NULL);
-        ovs_rwlock_unlock(&table->cls.rwlock);
+        fat_rwlock_unlock(&table->cls.rwlock);
         CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) {
             if (!rule->pending) {
                 ofproto_rule_delete__(ofproto, rule, OFPRR_DELETE);
@@ -1448,7 +1454,7 @@ ofproto_run(struct ofproto *p)
                 heap_rebuild(&evg->rules);
             }
 
-            ovs_rwlock_rdlock(&table->cls.rwlock);
+            fat_rwlock_rdlock(&table->cls.rwlock);
             cls_cursor_init(&cursor, &table->cls, NULL);
             CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
                 if (!rule->eviction_group
@@ -1456,7 +1462,7 @@ ofproto_run(struct ofproto *p)
                     eviction_group_add_rule(rule);
                 }
             }
-            ovs_rwlock_unlock(&table->cls.rwlock);
+            fat_rwlock_unlock(&table->cls.rwlock);
             ovs_mutex_unlock(&ofproto_mutex);
         }
     }
@@ -1516,7 +1522,7 @@ ofproto_run(struct ofproto *p)
         break;
 
     default:
-        NOT_REACHED();
+        OVS_NOT_REACHED();
     }
 
     if (time_msec() >= p->next_op_report) {
@@ -1606,9 +1612,9 @@ ofproto_get_memory_usage(const struct ofproto *ofproto, struct simap *usage)
 
     n_rules = 0;
     OFPROTO_FOR_EACH_TABLE (table, ofproto) {
-        ovs_rwlock_rdlock(&table->cls.rwlock);
+        fat_rwlock_rdlock(&table->cls.rwlock);
         n_rules += classifier_count(&table->cls);
-        ovs_rwlock_unlock(&table->cls.rwlock);
+        fat_rwlock_unlock(&table->cls.rwlock);
     }
     simap_increase(usage, "rules", n_rules);
 
@@ -1895,7 +1901,7 @@ ofproto_add_flow(struct ofproto *ofproto, const struct match *match,
 
     /* First do a cheap check whether the rule we're looking for already exists
      * with the actions that we want.  If it does, then we're done. */
-    ovs_rwlock_rdlock(&ofproto->tables[0].cls.rwlock);
+    fat_rwlock_rdlock(&ofproto->tables[0].cls.rwlock);
     rule = rule_from_cls_rule(classifier_find_match_exactly(
                                   &ofproto->tables[0].cls, match, priority));
     if (rule) {
@@ -1907,7 +1913,7 @@ ofproto_add_flow(struct ofproto *ofproto, const struct match *match,
     } else {
         must_add = true;
     }
-    ovs_rwlock_unlock(&ofproto->tables[0].cls.rwlock);
+    fat_rwlock_unlock(&ofproto->tables[0].cls.rwlock);
 
     /* If there's no such rule or the rule doesn't have the actions we want,
      * fall back to a executing a full flow mod.  We can't optimize this at
@@ -1929,6 +1935,46 @@ int
 ofproto_flow_mod(struct ofproto *ofproto, struct ofputil_flow_mod *fm)
     OVS_EXCLUDED(ofproto_mutex)
 {
+    /* Optimize for the most common case of a repeated learn action.
+     * If an identical flow already exists we only need to update its
+     * 'modified' time. */
+    if (fm->command == OFPFC_MODIFY_STRICT && fm->table_id != OFPTT_ALL
+        && !(fm->flags & OFPUTIL_FF_RESET_COUNTS)) {
+        struct oftable *table = &ofproto->tables[fm->table_id];
+        struct cls_rule match_rule;
+        struct rule *rule;
+        bool done = false;
+
+        cls_rule_init(&match_rule, &fm->match, fm->priority);
+        fat_rwlock_rdlock(&table->cls.rwlock);
+        rule = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls,
+                                                               &match_rule));
+        if (rule) {
+            /* Reading many of the rule fields and writing on 'modified'
+             * requires the rule->mutex.  Also, rule->actions may change
+             * if rule->mutex is not held. */
+            ovs_mutex_lock(&rule->mutex);
+            if (rule->idle_timeout == fm->idle_timeout
+                && rule->hard_timeout == fm->hard_timeout
+                && rule->flags == (fm->flags & OFPUTIL_FF_STATE)
+                && (!fm->modify_cookie || (fm->new_cookie == rule->flow_cookie))
+                && ofpacts_equal(fm->ofpacts, fm->ofpacts_len,
+                                 rule->actions->ofpacts,
+                                 rule->actions->ofpacts_len)) {
+                /* Rule already exists and need not change, only update the
+                   modified timestamp. */
+                rule->modified = time_msec();
+                done = true;
+            }
+            ovs_mutex_unlock(&rule->mutex);
+        }
+        fat_rwlock_unlock(&table->cls.rwlock);
+
+        if (done) {
+            return 0;
+        }
+    }
+
     return handle_flow_mod__(ofproto, NULL, fm, NULL);
 }
 
@@ -1946,10 +1992,10 @@ ofproto_delete_flow(struct ofproto *ofproto,
 
     /* First do a cheap check whether the rule we're looking for has already
      * been deleted.  If so, then we're done. */
-    ovs_rwlock_rdlock(&cls->rwlock);
+    fat_rwlock_rdlock(&cls->rwlock);
     rule = rule_from_cls_rule(classifier_find_match_exactly(cls, target,
                                                             priority));
-    ovs_rwlock_unlock(&cls->rwlock);
+    fat_rwlock_unlock(&cls->rwlock);
     if (!rule) {
         return true;
     }
@@ -2521,26 +2567,16 @@ void
 ofproto_rule_ref(struct rule *rule)
 {
     if (rule) {
-        unsigned int orig;
-
-        atomic_add(&rule->ref_count, 1, &orig);
-        ovs_assert(orig != 0);
+        ovs_refcount_ref(&rule->ref_count);
     }
 }
 
 void
 ofproto_rule_unref(struct rule *rule)
 {
-    if (rule) {
-        unsigned int orig;
-
-        atomic_sub(&rule->ref_count, 1, &orig);
-        if (orig == 1) {
-            rule->ofproto->ofproto_class->rule_destruct(rule);
-            ofproto_rule_destroy__(rule);
-        } else {
-            ovs_assert(orig != 0);
-        }
+    if (rule && ovs_refcount_unref(&rule->ref_count) == 1) {
+        rule->ofproto->ofproto_class->rule_destruct(rule);
+        ofproto_rule_destroy__(rule);
     }
 }
 
@@ -2572,6 +2608,7 @@ ofproto_rule_destroy__(struct rule *rule)
     cls_rule_destroy(CONST_CAST(struct cls_rule *, &rule->cr));
     rule_actions_unref(rule->actions);
     ovs_mutex_destroy(&rule->mutex);
+    ovs_refcount_destroy(&rule->ref_count);
     rule->ofproto->ofproto_class->rule_dealloc(rule);
 }
 
@@ -2587,7 +2624,7 @@ rule_actions_create(const struct ofproto *ofproto,
     struct rule_actions *actions;
 
     actions = xmalloc(sizeof *actions);
-    atomic_init(&actions->ref_count, 1);
+    ovs_refcount_init(&actions->ref_count);
     actions->ofpacts = xmemdup(ofpacts, ofpacts_len);
     actions->ofpacts_len = ofpacts_len;
     actions->provider_meter_id
@@ -2602,10 +2639,7 @@ void
 rule_actions_ref(struct rule_actions *actions)
 {
     if (actions) {
-        unsigned int orig;
-
-        atomic_add(&actions->ref_count, 1, &orig);
-        ovs_assert(orig != 0);
+        ovs_refcount_ref(&actions->ref_count);
     }
 }
 
@@ -2614,16 +2648,10 @@ rule_actions_ref(struct rule_actions *actions)
 void
 rule_actions_unref(struct rule_actions *actions)
 {
-    if (actions) {
-        unsigned int orig;
-
-        atomic_sub(&actions->ref_count, 1, &orig);
-        if (orig == 1) {
-            free(actions->ofpacts);
-            free(actions);
-        } else {
-            ovs_assert(orig != 0);
-        }
+    if (actions && ovs_refcount_unref(&actions->ref_count) == 1) {
+        ovs_refcount_destroy(&actions->ref_count);
+        free(actions->ofpacts);
+        free(actions);
     }
 }
 
@@ -2669,7 +2697,7 @@ ofoperation_has_out_port(const struct ofoperation *op, ofp_port_t out_port)
                                       op->actions->ofpacts_len, out_port);
     }
 
-    NOT_REACHED();
+    OVS_NOT_REACHED();
 }
 
 static void
@@ -3072,9 +3100,9 @@ handle_table_stats_request(struct ofconn *ofconn,
         ots[i].instructions = htonl(OFPIT11_ALL);
         ots[i].config = htonl(OFPTC11_TABLE_MISS_MASK);
         ots[i].max_entries = htonl(1000000); /* An arbitrary big number. */
-        ovs_rwlock_rdlock(&p->tables[i].cls.rwlock);
+        fat_rwlock_rdlock(&p->tables[i].cls.rwlock);
         ots[i].active_count = htonl(classifier_count(&p->tables[i].cls));
-        ovs_rwlock_unlock(&p->tables[i].cls.rwlock);
+        fat_rwlock_unlock(&p->tables[i].cls.rwlock);
     }
 
     p->ofproto_class->get_tables(p, ots);
@@ -3221,14 +3249,11 @@ calc_duration(long long int start, long long int now,
 }
 
 /* Checks whether 'table_id' is 0xff or a valid table ID in 'ofproto'.  Returns
- * 0 if 'table_id' is OK, otherwise an OpenFlow error code.  */
-static enum ofperr
+ * true if 'table_id' is OK, false otherwise.  */
+static bool
 check_table_id(const struct ofproto *ofproto, uint8_t table_id)
 {
-    return (table_id == 0xff || table_id < ofproto->n_tables
-            ? 0
-            : OFPERR_OFPBRC_BAD_TABLE_ID);
-
+    return table_id == OFPTT_ALL || table_id < ofproto->n_tables;
 }
 
 static struct oftable *
@@ -3412,12 +3437,12 @@ collect_rules_loose(struct ofproto *ofproto,
     OVS_REQUIRES(ofproto_mutex)
 {
     struct oftable *table;
-    enum ofperr error;
+    enum ofperr error = 0;
 
     rule_collection_init(rules);
 
-    error = check_table_id(ofproto, criteria->table_id);
-    if (error) {
+    if (!check_table_id(ofproto, criteria->table_id)) {
+        error = OFPERR_OFPBRC_BAD_TABLE_ID;
         goto exit;
     }
 
@@ -3439,7 +3464,7 @@ collect_rules_loose(struct ofproto *ofproto,
             struct cls_cursor cursor;
             struct rule *rule;
 
-            ovs_rwlock_rdlock(&table->cls.rwlock);
+            fat_rwlock_rdlock(&table->cls.rwlock);
             cls_cursor_init(&cursor, &table->cls, &criteria->cr);
             CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
                 error = collect_rule(rule, criteria, rules);
@@ -3447,7 +3472,7 @@ collect_rules_loose(struct ofproto *ofproto,
                     break;
                 }
             }
-            ovs_rwlock_unlock(&table->cls.rwlock);
+            fat_rwlock_unlock(&table->cls.rwlock);
         }
     }
 
@@ -3473,12 +3498,12 @@ collect_rules_strict(struct ofproto *ofproto,
     OVS_REQUIRES(ofproto_mutex)
 {
     struct oftable *table;
-    int error;
+    int error = 0;
 
     rule_collection_init(rules);
 
-    error = check_table_id(ofproto, criteria->table_id);
-    if (error) {
+    if (!check_table_id(ofproto, criteria->table_id)) {
+        error = OFPERR_OFPBRC_BAD_TABLE_ID;
         goto exit;
     }
 
@@ -3499,10 +3524,10 @@ collect_rules_strict(struct ofproto *ofproto,
         FOR_EACH_MATCHING_TABLE (table, criteria->table_id, ofproto) {
             struct rule *rule;
 
-            ovs_rwlock_rdlock(&table->cls.rwlock);
+            fat_rwlock_rdlock(&table->cls.rwlock);
             rule = rule_from_cls_rule(classifier_find_rule_exactly(
                                           &table->cls, &criteria->cr));
-            ovs_rwlock_unlock(&table->cls.rwlock);
+            fat_rwlock_unlock(&table->cls.rwlock);
             if (rule) {
                 error = collect_rule(rule, criteria, rules);
                 if (error) {
@@ -3576,20 +3601,20 @@ handle_flow_stats_request(struct ofconn *ofconn,
         fs.idle_timeout = rule->idle_timeout;
         fs.hard_timeout = rule->hard_timeout;
         created = rule->created;
-        used = rule->used;
         modified = rule->modified;
         actions = rule_get_actions__(rule);
         flags = rule->flags;
         ovs_mutex_unlock(&rule->mutex);
 
+        ofproto->ofproto_class->rule_get_stats(rule, &fs.packet_count,
+                                               &fs.byte_count, &used);
+
         minimatch_expand(&rule->cr.match, &fs.match);
         fs.table_id = rule->table_id;
         calc_duration(created, now, &fs.duration_sec, &fs.duration_nsec);
         fs.priority = rule->cr.priority;
         fs.idle_age = age_secs(now - used);
         fs.hard_age = age_secs(now - modified);
-        ofproto->ofproto_class->rule_get_stats(rule, &fs.packet_count,
-                                               &fs.byte_count);
         fs.ofpacts = actions->ofpacts;
         fs.ofpacts_len = actions->ofpacts_len;
 
@@ -3612,10 +3637,10 @@ flow_stats_ds(struct rule *rule, struct ds *results)
 {
     uint64_t packet_count, byte_count;
     struct rule_actions *actions;
-    long long int created;
+    long long int created, used;
 
-    rule->ofproto->ofproto_class->rule_get_stats(rule,
-                                                 &packet_count, &byte_count);
+    rule->ofproto->ofproto_class->rule_get_stats(rule, &packet_count,
+                                                 &byte_count, &used);
 
     ovs_mutex_lock(&rule->mutex);
     actions = rule_get_actions__(rule);
@@ -3650,12 +3675,12 @@ ofproto_get_all_flows(struct ofproto *p, struct ds *results)
         struct cls_cursor cursor;
         struct rule *rule;
 
-        ovs_rwlock_rdlock(&table->cls.rwlock);
+        fat_rwlock_rdlock(&table->cls.rwlock);
         cls_cursor_init(&cursor, &table->cls, NULL);
         CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
             flow_stats_ds(rule, results);
         }
-        ovs_rwlock_unlock(&table->cls.rwlock);
+        fat_rwlock_unlock(&table->cls.rwlock);
     }
 }
 
@@ -3726,9 +3751,10 @@ handle_aggregate_stats_request(struct ofconn *ofconn,
         struct rule *rule = rules.rules[i];
         uint64_t packet_count;
         uint64_t byte_count;
+        long long int used;
 
         ofproto->ofproto_class->rule_get_stats(rule, &packet_count,
-                                               &byte_count);
+                                               &byte_count, &used);
 
         if (packet_count == UINT64_MAX) {
             unknown_packets = true;
@@ -3931,10 +3957,10 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
     struct cls_rule cr;
     struct rule *rule;
     uint8_t table_id;
-    int error;
+    int error = 0;
 
-    error = check_table_id(ofproto, fm->table_id);
-    if (error) {
+    if (!check_table_id(ofproto, fm->table_id)) {
+        error = OFPERR_OFPBRC_BAD_TABLE_ID;
         return error;
     }
 
@@ -3966,9 +3992,9 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
     cls_rule_init(&cr, &fm->match, fm->priority);
 
     /* Transform "add" into "modify" if there's an existing identical flow. */
-    ovs_rwlock_rdlock(&table->cls.rwlock);
+    fat_rwlock_rdlock(&table->cls.rwlock);
     rule = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls, &cr));
-    ovs_rwlock_unlock(&table->cls.rwlock);
+    fat_rwlock_unlock(&table->cls.rwlock);
     if (rule) {
         cls_rule_destroy(&cr);
         if (!rule_is_modifiable(rule)) {
@@ -3998,9 +4024,9 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
     if (fm->flags & OFPUTIL_FF_CHECK_OVERLAP) {
         bool overlaps;
 
-        ovs_rwlock_rdlock(&table->cls.rwlock);
+        fat_rwlock_rdlock(&table->cls.rwlock);
         overlaps = classifier_rule_overlaps(&table->cls, &cr);
-        ovs_rwlock_unlock(&table->cls.rwlock);
+        fat_rwlock_unlock(&table->cls.rwlock);
 
         if (overlaps) {
             cls_rule_destroy(&cr);
@@ -4027,10 +4053,10 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
     /* Initialize base state. */
     *CONST_CAST(struct ofproto **, &rule->ofproto) = ofproto;
     cls_rule_move(CONST_CAST(struct cls_rule *, &rule->cr), &cr);
-    atomic_init(&rule->ref_count, 1);
+    ovs_refcount_init(&rule->ref_count);
     rule->pending = NULL;
     rule->flow_cookie = fm->new_cookie;
-    rule->created = rule->modified = rule->used = time_msec();
+    rule->created = rule->modified = time_msec();
 
     ovs_mutex_init(&rule->mutex);
     ovs_mutex_lock(&rule->mutex);
@@ -4320,6 +4346,7 @@ ofproto_rule_send_removed(struct rule *rule, uint8_t reason)
     OVS_REQUIRES(ofproto_mutex)
 {
     struct ofputil_flow_removed fr;
+    long long int used;
 
     if (ofproto_rule_is_hidden(rule) ||
         !(rule->flags & OFPUTIL_FF_SEND_FLOW_REM)) {
@@ -4338,7 +4365,7 @@ ofproto_rule_send_removed(struct rule *rule, uint8_t reason)
     fr.hard_timeout = rule->hard_timeout;
     ovs_mutex_unlock(&rule->mutex);
     rule->ofproto->ofproto_class->rule_get_stats(rule, &fr.packet_count,
-                                                 &fr.byte_count);
+                                                 &fr.byte_count, &used);
 
     connmgr_send_flow_removed(rule->ofproto->connmgr, &fr);
 }
@@ -4725,7 +4752,7 @@ ofproto_compose_flow_refresh_update(const struct rule *rule,
          * actions, so that when the operation commits we report the change. */
         switch (op->type) {
         case OFOPERATION_ADD:
-            NOT_REACHED();
+            OVS_NOT_REACHED();
 
         case OFOPERATION_MODIFY:
         case OFOPERATION_REPLACE:
@@ -4737,7 +4764,7 @@ ofproto_compose_flow_refresh_update(const struct rule *rule,
             break;
 
         default:
-            NOT_REACHED();
+            OVS_NOT_REACHED();
         }
     }
     fu.ofpacts = actions ? actions->ofpacts : NULL;
@@ -4821,13 +4848,13 @@ ofproto_collect_ofmonitor_refresh_rules(const struct ofmonitor *m,
         struct cls_cursor cursor;
         struct rule *rule;
 
-        ovs_rwlock_rdlock(&table->cls.rwlock);
+        fat_rwlock_rdlock(&table->cls.rwlock);
         cls_cursor_init(&cursor, &table->cls, &target);
         CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
             ovs_assert(!rule->pending); /* XXX */
             ofproto_collect_ofmonitor_refresh_rule(m, rule, seqno, rules);
         }
-        ovs_rwlock_unlock(&table->cls.rwlock);
+        fat_rwlock_unlock(&table->cls.rwlock);
     }
 
     HMAP_FOR_EACH (op, hmap_node, &ofproto->deletions) {
@@ -5775,9 +5802,32 @@ handle_group_mod(struct ofconn *ofconn, const struct ofp_header *oh)
     }
 }
 
+static enum ofperr
+table_mod(struct ofproto *ofproto, const struct ofputil_table_mod *tm)
+{
+    /* XXX Reject all configurations because none are currently supported */
+    return OFPERR_OFPTMFC_BAD_CONFIG;
+
+    if (tm->table_id == OFPTT_ALL) {
+        int i;
+        for (i = 0; i < ofproto->n_tables; i++) {
+            atomic_store(&ofproto->tables[i].config,
+                         (unsigned int)tm->config);
+        }
+    } else if (!check_table_id(ofproto, tm->table_id)) {
+        return OFPERR_OFPTMFC_BAD_TABLE;
+    } else {
+        atomic_store(&ofproto->tables[tm->table_id].config,
+                     (unsigned int)tm->config);
+    }
+
+    return 0;
+}
+
 static enum ofperr
 handle_table_mod(struct ofconn *ofconn, const struct ofp_header *oh)
 {
+    struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
     struct ofputil_table_mod tm;
     enum ofperr error;
 
@@ -5791,8 +5841,7 @@ handle_table_mod(struct ofconn *ofconn, const struct ofp_header *oh)
         return error;
     }
 
-    /* XXX Actual table mod support is not implemented yet. */
-    return 0;
+    return table_mod(ofproto, &tm);
 }
 
 static enum ofperr
@@ -6142,7 +6191,7 @@ ofopgroup_complete(struct ofopgroup *group)
                 break;
 
             default:
-                NOT_REACHED();
+                OVS_NOT_REACHED();
             }
 
             ofmonitor_report(ofproto->connmgr, rule, event_type,
@@ -6185,10 +6234,12 @@ ofopgroup_complete(struct ofopgroup *group)
             if (!op->error) {
                 long long int now = time_msec();
 
+                ovs_mutex_lock(&rule->mutex);
                 rule->modified = now;
                 if (op->type == OFOPERATION_REPLACE) {
-                    rule->created = rule->used = now;
+                    rule->created = now;
                 }
+                ovs_mutex_unlock(&rule->mutex);
             } else {
                 ofproto_rule_change_cookie(ofproto, rule, op->flow_cookie);
                 ovs_mutex_lock(&rule->mutex);
@@ -6211,7 +6262,7 @@ ofopgroup_complete(struct ofopgroup *group)
             break;
 
         default:
-            NOT_REACHED();
+            OVS_NOT_REACHED();
         }
 
         ofoperation_destroy(op);
@@ -6551,26 +6602,34 @@ eviction_group_find(struct oftable *table, uint32_t id)
 
 /* Returns an eviction priority for 'rule'.  The return value should be
  * interpreted so that higher priorities make a rule more attractive candidates
- * for eviction. */
+ * for eviction.
+ * Called only if have a timeout. */
 static uint32_t
-rule_eviction_priority(struct rule *rule)
+rule_eviction_priority(struct ofproto *ofproto, struct rule *rule)
     OVS_REQUIRES(ofproto_mutex)
 {
-    long long int hard_expiration;
-    long long int idle_expiration;
-    long long int expiration;
+    long long int expiration = LLONG_MAX;
+    long long int modified;
     uint32_t expiration_offset;
 
-    /* Calculate time of expiration. */
+    /* 'modified' needs protection even when we hold 'ofproto_mutex'. */
     ovs_mutex_lock(&rule->mutex);
-    hard_expiration = (rule->hard_timeout
-                       ? rule->modified + rule->hard_timeout * 1000
-                       : LLONG_MAX);
-    idle_expiration = (rule->idle_timeout
-                       ? rule->used + rule->idle_timeout * 1000
-                       : LLONG_MAX);
-    expiration = MIN(hard_expiration, idle_expiration);
+    modified = rule->modified;
     ovs_mutex_unlock(&rule->mutex);
+
+    if (rule->hard_timeout) {
+        expiration = modified + rule->hard_timeout * 1000;
+    }
+    if (rule->idle_timeout) {
+        uint64_t packets, bytes;
+        long long int used;
+        long long int idle_expiration;
+
+        ofproto->ofproto_class->rule_get_stats(rule, &packets, &bytes, &used);
+        idle_expiration = used + rule->idle_timeout * 1000;
+        expiration = MIN(expiration, idle_expiration);
+    }
+
     if (expiration == LLONG_MAX) {
         return 0;
     }
@@ -6600,9 +6659,9 @@ eviction_group_add_rule(struct rule *rule)
     struct oftable *table = &ofproto->tables[rule->table_id];
     bool has_timeout;
 
-    ovs_mutex_lock(&rule->mutex);
+    /* Timeouts may be modified only when holding 'ofproto_mutex'.  We have it
+     * so no additional protection is needed. */
     has_timeout = rule->hard_timeout || rule->idle_timeout;
-    ovs_mutex_unlock(&rule->mutex);
 
     if (table->eviction_fields && has_timeout) {
         struct eviction_group *evg;
@@ -6611,7 +6670,7 @@ eviction_group_add_rule(struct rule *rule)
 
         rule->eviction_group = evg;
         heap_insert(&evg->rules, &rule->evg_node,
-                    rule_eviction_priority(rule));
+                    rule_eviction_priority(ofproto, rule));
         eviction_group_resized(table, evg);
     }
 }
@@ -6625,6 +6684,7 @@ oftable_init(struct oftable *table)
     memset(table, 0, sizeof *table);
     classifier_init(&table->cls, flow_segment_u32s);
     table->max_flows = UINT_MAX;
+    atomic_init(&table->config, (unsigned int)OFPTC11_TABLE_MISS_CONTROLLER);
 }
 
 /* Destroys 'table', including its classifier and eviction groups.
@@ -6633,12 +6693,13 @@ oftable_init(struct oftable *table)
 static void
 oftable_destroy(struct oftable *table)
 {
-    ovs_rwlock_rdlock(&table->cls.rwlock);
+    fat_rwlock_rdlock(&table->cls.rwlock);
     ovs_assert(classifier_is_empty(&table->cls));
-    ovs_rwlock_unlock(&table->cls.rwlock);
+    fat_rwlock_unlock(&table->cls.rwlock);
     oftable_disable_eviction(table);
     classifier_destroy(&table->cls);
     free(table->name);
+    atomic_destroy(&table->config);
 }
 
 /* Changes the name of 'table' to 'name'.  If 'name' is NULL or the empty
@@ -6717,12 +6778,12 @@ oftable_enable_eviction(struct oftable *table,
     hmap_init(&table->eviction_groups_by_id);
     heap_init(&table->eviction_groups_by_size);
 
-    ovs_rwlock_rdlock(&table->cls.rwlock);
+    fat_rwlock_rdlock(&table->cls.rwlock);
     cls_cursor_init(&cursor, &table->cls, NULL);
     CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
         eviction_group_add_rule(rule);
     }
-    ovs_rwlock_unlock(&table->cls.rwlock);
+    fat_rwlock_unlock(&table->cls.rwlock);
 }
 
 /* Removes 'rule' from the oftable that contains it. */
@@ -6732,9 +6793,9 @@ oftable_remove_rule__(struct ofproto *ofproto, struct rule *rule)
 {
     struct classifier *cls = &ofproto->tables[rule->table_id].cls;
 
-    ovs_rwlock_wrlock(&cls->rwlock);
+    fat_rwlock_wrlock(&cls->rwlock);
     classifier_remove(cls, CONST_CAST(struct cls_rule *, &rule->cr));
-    ovs_rwlock_unlock(&cls->rwlock);
+    fat_rwlock_unlock(&cls->rwlock);
 
     cookies_remove(ofproto, rule);
 
@@ -6781,9 +6842,9 @@ oftable_insert_rule(struct rule *rule)
         struct meter *meter = ofproto->meters[meter_id];
         list_insert(&meter->rules, &rule->meter_list_node);
     }
-    ovs_rwlock_wrlock(&table->cls.rwlock);
+    fat_rwlock_wrlock(&table->cls.rwlock);
     classifier_insert(&table->cls, CONST_CAST(struct cls_rule *, &rule->cr));
-    ovs_rwlock_unlock(&table->cls.rwlock);
+    fat_rwlock_unlock(&table->cls.rwlock);
     eviction_group_add_rule(rule);
 }
 \f
@@ -6852,7 +6913,7 @@ ofproto_get_vlan_usage(struct ofproto *ofproto, unsigned long int *vlan_bitmap)
     OFPROTO_FOR_EACH_TABLE (oftable, ofproto) {
         const struct cls_subtable *table;
 
-        ovs_rwlock_rdlock(&oftable->cls.rwlock);
+        fat_rwlock_rdlock(&oftable->cls.rwlock);
         HMAP_FOR_EACH (table, hmap_node, &oftable->cls.subtables) {
             if (minimask_get_vid_mask(&table->mask) == VLAN_VID_MASK) {
                 const struct cls_rule *rule;
@@ -6864,7 +6925,7 @@ ofproto_get_vlan_usage(struct ofproto *ofproto, unsigned long int *vlan_bitmap)
                 }
             }
         }
-        ovs_rwlock_unlock(&oftable->cls.rwlock);
+        fat_rwlock_unlock(&oftable->cls.rwlock);
     }
 }