+ ofpbuf_use_stack(&xout_actions, slow_path_buf, sizeof slow_path_buf);
+ compose_slow_path(udpif, &xout, odp_in_port, &xout_actions);
+ }
+
+ if (!ofpbuf_equal(&xout_actions, actions)) {
+ goto exit;
+ }
+
+ if (odp_flow_key_to_mask(udump->mask, udump->mask_len, &udump_mask, &flow)
+ == ODP_FIT_ERROR) {
+ goto exit;
+ }
+
+ /* Since the kernel is free to ignore wildcarded bits in the mask, we can't
+ * directly check that the masks are the same. Instead we check that the
+ * mask in the kernel is more specific i.e. less wildcarded, than what
+ * we've calculated here. This guarantees we don't catch any packets we
+ * shouldn't with the megaflow. */
+ udump32 = (uint32_t *) &udump_mask;
+ xout32 = (uint32_t *) &xout.wc.masks;
+ for (i = 0; i < FLOW_U32S; i++) {
+ if ((udump32[i] | xout32[i]) != udump32[i]) {
+ goto exit;
+ }
+ }
+ ok = true;
+
+exit:
+ ofpbuf_delete(actions);
+ xlate_out_uninit(xoutp);
+ return ok;
+}
+
+static void
+revalidate_udumps(struct revalidator *revalidator, struct list *udumps)
+{
+ struct udpif *udpif = revalidator->udpif;
+
+ struct {
+ struct dpif_flow_stats ukey_stats; /* Stats stored in the ukey. */
+ struct dpif_flow_stats stats; /* Stats for 'op'. */
+ struct dpif_op op; /* Flow del operation. */
+ } ops[REVALIDATE_MAX_BATCH];
+
+ struct dpif_op *opsp[REVALIDATE_MAX_BATCH];
+ struct udpif_flow_dump *udump, *next_udump;
+ size_t n_ops, i, n_flows;
+ unsigned int flow_limit;
+ long long int max_idle;
+ bool must_del;
+
+ atomic_read(&udpif->max_idle, &max_idle);
+ atomic_read(&udpif->flow_limit, &flow_limit);
+
+ n_flows = udpif_get_n_flows(udpif);
+
+ must_del = false;
+ if (n_flows > flow_limit) {
+ must_del = n_flows > 2 * flow_limit;
+ max_idle = 100;
+ }
+
+ n_ops = 0;
+ LIST_FOR_EACH_SAFE (udump, next_udump, list_node, udumps) {
+ long long int used, now;
+ struct udpif_key *ukey;
+
+ now = time_msec();
+ ukey = ukey_lookup(revalidator, udump);
+
+ used = udump->stats.used;
+ if (!used && ukey) {
+ used = ukey->created;
+ }
+
+ if (must_del || (used && used < now - max_idle)) {
+ struct dpif_flow_stats *ukey_stats = &ops[n_ops].ukey_stats;
+ struct dpif_op *op = &ops[n_ops].op;
+
+ op->type = DPIF_OP_FLOW_DEL;
+ op->u.flow_del.key = udump->key;
+ op->u.flow_del.key_len = udump->key_len;
+ op->u.flow_del.stats = &ops[n_ops].stats;
+ n_ops++;
+
+ if (ukey) {
+ *ukey_stats = ukey->stats;
+ ukey_delete(revalidator, ukey);
+ } else {
+ memset(ukey_stats, 0, sizeof *ukey_stats);
+ }
+
+ continue;
+ }
+
+ if (!ukey) {
+ ukey = xmalloc(sizeof *ukey);
+
+ ukey->key = (struct nlattr *) &ukey->key_buf;
+ memcpy(ukey->key, udump->key, udump->key_len);
+ ukey->key_len = udump->key_len;
+
+ ukey->created = used ? used : now;
+ memset(&ukey->stats, 0, sizeof ukey->stats);
+
+ ukey->mark = false;
+
+ hmap_insert(&revalidator->ukeys, &ukey->hmap_node,
+ udump->key_hash);
+ }
+ ukey->mark = true;
+
+ if (!revalidate_ukey(udpif, udump, ukey)) {
+ dpif_flow_del(udpif->dpif, udump->key, udump->key_len, NULL);
+ ukey_delete(revalidator, ukey);
+ }
+
+ list_remove(&udump->list_node);
+ free(udump);
+ }
+
+ for (i = 0; i < n_ops; i++) {
+ opsp[i] = &ops[i].op;
+ }
+ dpif_operate(udpif->dpif, opsp, n_ops);
+
+ for (i = 0; i < n_ops; i++) {
+ struct dpif_flow_stats push, *stats, *ukey_stats;
+
+ ukey_stats = &ops[i].ukey_stats;
+ stats = ops[i].op.u.flow_del.stats;
+ push.used = MAX(stats->used, ukey_stats->used);
+ push.tcp_flags = stats->tcp_flags | ukey_stats->tcp_flags;
+ push.n_packets = stats->n_packets - ukey_stats->n_packets;
+ push.n_bytes = stats->n_bytes - ukey_stats->n_bytes;
+
+ if (push.n_packets || netflow_exists()) {
+ struct ofproto_dpif *ofproto;
+ struct netflow *netflow;
+ struct flow flow;
+
+ if (!xlate_receive(udpif->backer, NULL, ops[i].op.u.flow_del.key,
+ ops[i].op.u.flow_del.key_len, &flow, NULL,
+ &ofproto, NULL, NULL, &netflow, NULL)) {
+ struct xlate_in xin;
+
+ xlate_in_init(&xin, ofproto, &flow, NULL, push.tcp_flags,
+ NULL);
+ xin.resubmit_stats = push.n_packets ? &push : NULL;
+ xin.may_learn = push.n_packets > 0;
+ xin.skip_wildcards = true;
+ xlate_actions_for_side_effects(&xin);
+
+ if (netflow) {
+ netflow_expire(netflow, &flow);
+ netflow_flow_clear(netflow, &flow);
+ netflow_unref(netflow);
+ }
+ }
+ }
+ }
+
+ LIST_FOR_EACH_SAFE (udump, next_udump, list_node, udumps) {
+ list_remove(&udump->list_node);
+ free(udump);
+ }
+}
+
+static void
+revalidator_sweep(struct revalidator *revalidator)
+{
+ struct udpif_key *ukey, *next;
+
+ HMAP_FOR_EACH_SAFE (ukey, next, hmap_node, &revalidator->ukeys) {
+ if (ukey->mark) {
+ ukey->mark = false;
+ } else {
+ ukey_delete(revalidator, ukey);
+ }