+
+static void
+dpif_netdev_wait(struct dpif *dpif)
+{
+ struct dp_netdev_port *port;
+ struct dp_netdev *dp = get_dp_netdev(dpif);
+
+ ovs_rwlock_rdlock(&dp->port_rwlock);
+
+ HMAP_FOR_EACH (port, node, &dp->ports) {
+ if (!netdev_is_pmd(port->netdev)) {
+ int i;
+
+ for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
+ netdev_rxq_wait(port->rxq[i]);
+ }
+ }
+ }
+ ovs_rwlock_unlock(&dp->port_rwlock);
+}
+
+struct rxq_poll {
+ struct dp_netdev_port *port;
+ struct netdev_rxq *rx;
+};
+
+static int
+pmd_load_queues(struct pmd_thread *f,
+ struct rxq_poll **ppoll_list, int poll_cnt)
+{
+ struct dp_netdev *dp = f->dp;
+ struct rxq_poll *poll_list = *ppoll_list;
+ struct dp_netdev_port *port;
+ int id = f->id;
+ int index;
+ int i;
+
+ /* Simple scheduler for netdev rx polling. */
+ ovs_rwlock_rdlock(&dp->port_rwlock);
+ for (i = 0; i < poll_cnt; i++) {
+ port_unref(poll_list[i].port);
+ }
+
+ poll_cnt = 0;
+ index = 0;
+
+ HMAP_FOR_EACH (port, node, &f->dp->ports) {
+ if (netdev_is_pmd(port->netdev)) {
+ int i;
+
+ for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
+ if ((index % dp->n_pmd_threads) == id) {
+ poll_list = xrealloc(poll_list, sizeof *poll_list * (poll_cnt + 1));
+
+ port_ref(port);
+ poll_list[poll_cnt].port = port;
+ poll_list[poll_cnt].rx = port->rxq[i];
+ poll_cnt++;
+ }
+ index++;
+ }
+ }
+ }
+
+ ovs_rwlock_unlock(&dp->port_rwlock);
+ *ppoll_list = poll_list;
+ return poll_cnt;
+}
+