+static void
+dp_netdev_destroy_all_queues(struct dp_netdev *dp)
+ OVS_REQ_WRLOCK(dp->queue_rwlock)
+{
+ size_t i;
+
+ dp_netdev_purge_queues(dp);
+
+ for (i = 0; i < dp->n_handlers; i++) {
+ struct dp_netdev_queue *q = &dp->handler_queues[i];
+
+ ovs_mutex_destroy(&q->mutex);
+ seq_destroy(q->seq);
+ }
+ free(dp->handler_queues);
+ dp->handler_queues = NULL;
+ dp->n_handlers = 0;
+}
+
+static void
+dp_netdev_refresh_queues(struct dp_netdev *dp, uint32_t n_handlers)
+ OVS_REQ_WRLOCK(dp->queue_rwlock)
+{
+ if (dp->n_handlers != n_handlers) {
+ size_t i;
+
+ dp_netdev_destroy_all_queues(dp);
+
+ dp->n_handlers = n_handlers;
+ dp->handler_queues = xzalloc(n_handlers * sizeof *dp->handler_queues);
+
+ for (i = 0; i < n_handlers; i++) {
+ struct dp_netdev_queue *q = &dp->handler_queues[i];
+
+ ovs_mutex_init(&q->mutex);
+ q->seq = seq_create();
+ }
+ }
+}
+
+static int
+dpif_netdev_recv_set(struct dpif *dpif, bool enable)
+{
+ struct dp_netdev *dp = get_dp_netdev(dpif);
+
+ if ((dp->handler_queues != NULL) == enable) {
+ return 0;
+ }
+
+ fat_rwlock_wrlock(&dp->queue_rwlock);
+ if (!enable) {
+ dp_netdev_destroy_all_queues(dp);
+ } else {
+ dp_netdev_refresh_queues(dp, 1);
+ }
+ fat_rwlock_unlock(&dp->queue_rwlock);
+
+ return 0;
+}
+