+static void *
+udpif_flow_dumper(void *arg)
+{
+ struct udpif *udpif = arg;
+
+ set_subprogram_name("flow_dumper");
+ while (!latch_is_set(&udpif->exit_latch)) {
+ const struct dpif_flow_stats *stats;
+ long long int start_time, duration;
+ const struct nlattr *key, *mask;
+ struct dpif_flow_dump dump;
+ size_t key_len, mask_len;
+ unsigned int flow_limit;
+ bool need_revalidate;
+ uint64_t reval_seq;
+ size_t n_flows, i;
+
+ reval_seq = seq_read(udpif->reval_seq);
+ need_revalidate = udpif->last_reval_seq != reval_seq;
+ udpif->last_reval_seq = reval_seq;
+
+ n_flows = udpif_get_n_flows(udpif);
+ udpif->max_n_flows = MAX(n_flows, udpif->max_n_flows);
+ udpif->avg_n_flows = (udpif->avg_n_flows + n_flows) / 2;
+
+ start_time = time_msec();
+ dpif_flow_dump_start(&dump, udpif->dpif);
+ while (dpif_flow_dump_next(&dump, &key, &key_len, &mask, &mask_len,
+ NULL, NULL, &stats)
+ && !latch_is_set(&udpif->exit_latch)) {
+ struct udpif_flow_dump *udump = xmalloc(sizeof *udump);
+ struct revalidator *revalidator;
+
+ udump->key_hash = hash_bytes(key, key_len, udpif->secret);
+ memcpy(&udump->key_buf, key, key_len);
+ udump->key = (struct nlattr *) &udump->key_buf;
+ udump->key_len = key_len;
+
+ memcpy(&udump->mask_buf, mask, mask_len);
+ udump->mask = (struct nlattr *) &udump->mask_buf;
+ udump->mask_len = mask_len;
+
+ udump->stats = *stats;
+ udump->need_revalidate = need_revalidate;
+
+ revalidator = &udpif->revalidators[udump->key_hash
+ % udpif->n_revalidators];
+
+ ovs_mutex_lock(&revalidator->mutex);
+ while (revalidator->n_udumps >= REVALIDATE_MAX_BATCH * 3
+ && !latch_is_set(&udpif->exit_latch)) {
+ ovs_mutex_cond_wait(&revalidator->wake_cond,
+ &revalidator->mutex);
+ }
+ list_push_back(&revalidator->udumps, &udump->list_node);
+ revalidator->n_udumps++;
+ xpthread_cond_signal(&revalidator->wake_cond);
+ ovs_mutex_unlock(&revalidator->mutex);
+ }
+ dpif_flow_dump_done(&dump);
+
+ /* Let all the revalidators finish and garbage collect. */
+ seq_change(udpif->dump_seq);
+ for (i = 0; i < udpif->n_revalidators; i++) {
+ struct revalidator *revalidator = &udpif->revalidators[i];
+ ovs_mutex_lock(&revalidator->mutex);
+ xpthread_cond_signal(&revalidator->wake_cond);
+ ovs_mutex_unlock(&revalidator->mutex);
+ }
+
+ for (i = 0; i < udpif->n_revalidators; i++) {
+ struct revalidator *revalidator = &udpif->revalidators[i];
+
+ ovs_mutex_lock(&revalidator->mutex);
+ while (revalidator->dump_seq != seq_read(udpif->dump_seq)
+ && !latch_is_set(&udpif->exit_latch)) {
+ ovs_mutex_cond_wait(&revalidator->wake_cond,
+ &revalidator->mutex);
+ }
+ ovs_mutex_unlock(&revalidator->mutex);
+ }
+
+ duration = MAX(time_msec() - start_time, 1);
+ udpif->dump_duration = duration;
+ atomic_read(&udpif->flow_limit, &flow_limit);
+ if (duration > 2000) {
+ flow_limit /= duration / 1000;
+ } else if (duration > 1300) {
+ flow_limit = flow_limit * 3 / 4;
+ } else if (duration < 1000 && n_flows > 2000
+ && flow_limit < n_flows * 1000 / duration) {
+ flow_limit += 1000;
+ }
+ flow_limit = MIN(ofproto_flow_limit, MAX(flow_limit, 1000));
+ atomic_store(&udpif->flow_limit, flow_limit);
+
+ if (duration > 2000) {
+ VLOG_INFO("Spent an unreasonably long %lldms dumping flows",
+ duration);
+ }
+
+ poll_timer_wait_until(start_time + MIN(MAX_IDLE, 500));
+ seq_wait(udpif->reval_seq, udpif->last_reval_seq);
+ latch_wait(&udpif->exit_latch);
+ poll_block();
+ }
+
+ return NULL;
+}
+
+/* The miss handler thread is responsible for processing miss upcalls retrieved