#define MAX_QUEUE_LENGTH 512
#define FLOW_MISS_MAX_BATCH 50
#define REVALIDATE_MAX_BATCH 50
-#define MAX_IDLE 1500
VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall);
udpif_destroy(struct udpif *udpif)
{
udpif_set_threads(udpif, 0, 0);
- udpif_flush();
+ udpif_flush(udpif);
list_remove(&udpif->list_node);
latch_destroy(&udpif->exit_latch);
seq_destroy(udpif->reval_seq);
seq_destroy(udpif->dump_seq);
- atomic_destroy(&udpif->flow_limit);
- atomic_destroy(&udpif->n_flows);
- atomic_destroy(&udpif->n_flows_timestamp);
ovs_mutex_destroy(&udpif->n_flows_mutex);
free(udpif);
}
}
}
-/* Removes all flows from all datapaths. */
+/* Remove flows from a single datapath. */
void
-udpif_flush(void)
+udpif_flush(struct udpif *udpif)
+{
+ size_t n_handlers, n_revalidators;
+
+ n_handlers = udpif->n_handlers;
+ n_revalidators = udpif->n_revalidators;
+
+ udpif_set_threads(udpif, 0, 0);
+ dpif_flow_flush(udpif->dpif);
+ udpif_set_threads(udpif, n_handlers, n_revalidators);
+}
+
+/* Removes all flows from all datapaths. */
+static void
+udpif_flush_all_datapaths(void)
{
struct udpif *udpif;
LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
- dpif_flow_flush(udpif->dpif);
+ udpif_flush(udpif);
}
}
+
\f
/* Destroys and deallocates 'upcall'. */
static void
bool need_revalidate;
uint64_t reval_seq;
size_t n_flows, i;
+ int error;
+ void *state = NULL;
reval_seq = seq_read(udpif->reval_seq);
need_revalidate = udpif->last_reval_seq != reval_seq;
udpif->avg_n_flows = (udpif->avg_n_flows + n_flows) / 2;
start_time = time_msec();
- dpif_flow_dump_start(&dump, udpif->dpif);
- while (dpif_flow_dump_next(&dump, &key, &key_len, &mask, &mask_len,
- NULL, NULL, &stats)
+ error = dpif_flow_dump_start(&dump, udpif->dpif);
+ if (error) {
+ VLOG_INFO("Failed to start flow dump (%s)", ovs_strerror(error));
+ goto skip;
+ }
+ dpif_flow_dump_state_init(udpif->dpif, &state);
+ while (dpif_flow_dump_next(&dump, state, &key, &key_len,
+ &mask, &mask_len, NULL, NULL, &stats)
&& !latch_is_set(&udpif->exit_latch)) {
struct udpif_flow_dump *udump = xmalloc(sizeof *udump);
struct revalidator *revalidator;
xpthread_cond_signal(&revalidator->wake_cond);
ovs_mutex_unlock(&revalidator->mutex);
}
+ dpif_flow_dump_state_uninit(udpif->dpif, state);
dpif_flow_dump_done(&dump);
/* Let all the revalidators finish and garbage collect. */
duration);
}
- poll_timer_wait_until(start_time + MIN(MAX_IDLE, 500));
+skip:
+ poll_timer_wait_until(start_time + MIN(ofproto_max_idle, 500));
seq_wait(udpif->reval_seq, udpif->last_reval_seq);
latch_wait(&udpif->exit_latch);
poll_block();
type = classify_upcall(upcall);
if (type == MISS_UPCALL) {
uint32_t hash;
+ struct pkt_metadata md;
- flow_extract(packet, flow.skb_priority, flow.pkt_mark,
- &flow.tunnel, &flow.in_port, &miss->flow);
+ pkt_metadata_from_flow(&md, &flow);
+ flow_extract(packet, &md, &miss->flow);
hash = flow_hash(&miss->flow, 0);
existing_miss = flow_miss_find(&misses, ofproto, &miss->flow,
n_flows = udpif_get_n_flows(udpif);
must_del = false;
- max_idle = MAX_IDLE;
+ max_idle = ofproto_max_idle;
if (n_flows > flow_limit) {
must_del = n_flows > 2 * flow_limit;
max_idle = 100;
void *aux OVS_UNUSED)
{
atomic_store(&enable_megaflows, false);
- udpif_flush();
+ udpif_flush_all_datapaths();
unixctl_command_reply(conn, "megaflows disabled");
}
void *aux OVS_UNUSED)
{
atomic_store(&enable_megaflows, true);
- udpif_flush();
+ udpif_flush_all_datapaths();
unixctl_command_reply(conn, "megaflows enabled");
}