VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall);
+COVERAGE_DEFINE(upcall_duplicate_flow);
+
/* A thread that reads upcalls from dpif, forwards each upcall's packet,
* and possibly sets up a kernel flow as a cache. */
struct handler {
long long int created; /* Estimation of creation time. */
bool mark; /* Used by mark and sweep GC algorithm. */
+ bool flow_exists; /* Ensures flows are only deleted once. */
struct odputil_keybuf key_buf; /* Memory for 'key'. */
struct xlate_cache *xcache; /* Cache for xlate entries that
static void
udpif_stop_threads(struct udpif *udpif)
{
- if (udpif->handlers &&
- (udpif->n_handlers != n_handlers
- || udpif->n_revalidators != n_revalidators)) {
+ if (udpif && (udpif->n_handlers != 0 || udpif->n_revalidators != 0)) {
size_t i;
latch_set(&udpif->exit_latch);
udpif_start_threads(struct udpif *udpif, size_t n_handlers,
size_t n_revalidators)
{
- if (!udpif->handlers && n_handlers) {
+ if (udpif && (!udpif->handlers && !udpif->revalidators)) {
size_t i;
udpif->n_handlers = n_handlers;
{
int error;
+ ovs_assert(udpif);
ovs_assert(n_handlers && n_revalidators);
ovsrcu_quiesce_start();
- udpif_stop_threads(udpif);
+ if (udpif->n_handlers != n_handlers
+ || udpif->n_revalidators != n_revalidators) {
+ udpif_stop_threads(udpif);
+ }
error = dpif_handlers_set(udpif->dpif, n_handlers);
if (error) {
return;
}
- udpif_start_threads(udpif, n_handlers, n_revalidators);
+ if (!udpif->handlers && !udpif->revalidators) {
+ udpif_start_threads(udpif, n_handlers, n_revalidators);
+ }
ovsrcu_quiesce_end();
}
ukey->key_len = key_len;
ukey->mark = false;
+ ukey->flow_exists = true;
ukey->created = used ? used : time_msec();
memset(&ukey->stats, 0, sizeof ukey->stats);
ukey->xcache = NULL;
used = ukey->created;
}
+ if (ukey && (ukey->mark || !ukey->flow_exists)) {
+ /* The flow has already been dumped. This can occasionally occur
+ * if the datapath is changed in the middle of a flow dump. Rather
+ * than perform the same work twice, skip the flow this time. */
+ COVERAGE_INC(upcall_duplicate_flow);
+ continue;
+ }
+
if (must_del || (used && used < now - max_idle)) {
struct dump_op *dop = &ops[n_ops++];
+ if (ukey) {
+ ukey->flow_exists = false;
+ }
dump_op_init(dop, udump->key, udump->key_len, ukey, udump);
continue;
}
ukey->mark = true;
if (!revalidate_ukey(udpif, udump, ukey)) {
+ ukey->flow_exists = false;
dpif_flow_del(udpif->dpif, udump->key, udump->key_len, NULL);
- ukey_delete(revalidator, ukey);
+ /* The ukey will be cleaned up by revalidator_sweep().
+ * This helps to avoid deleting the same flow twice. */
}
list_remove(&udump->list_node);
HMAP_FOR_EACH_SAFE (ukey, next, hmap_node, &revalidator->ukeys) {
if (!purge && ukey->mark) {
ukey->mark = false;
+ } else if (!ukey->flow_exists) {
+ ukey_delete(revalidator, ukey);
} else {
struct dump_op *op = &ops[n_ops++];