+ struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+ int read_tries = 0;
+
+ if (dpif->epoll_fd < 0) {
+ return EAGAIN;
+ }
+
+ if (dpif->event_offset >= dpif->n_events) {
+ int retval;
+
+ dpif->event_offset = dpif->n_events = 0;
+
+ do {
+ retval = epoll_wait(dpif->epoll_fd, dpif->epoll_events,
+ dpif->uc_array_size, 0);
+ } while (retval < 0 && errno == EINTR);
+ if (retval < 0) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+ VLOG_WARN_RL(&rl, "epoll_wait failed (%s)", ovs_strerror(errno));
+ } else if (retval > 0) {
+ dpif->n_events = retval;
+ }
+ }
+
+ while (dpif->event_offset < dpif->n_events) {
+ int idx = dpif->epoll_events[dpif->event_offset].data.u32;
+ struct dpif_channel *ch = &dpif->channels[idx];
+
+ dpif->event_offset++;
+
+ for (;;) {
+ int dp_ifindex;
+ int error;
+
+ if (++read_tries > 50) {
+ return EAGAIN;
+ }
+
+ error = nl_sock_recv(ch->sock, buf, false);
+ if (error == ENOBUFS) {
+ /* ENOBUFS typically means that we've received so many
+ * packets that the buffer overflowed. Try again
+ * immediately because there's almost certainly a packet
+ * waiting for us. */
+ report_loss(dpif_, ch);
+ continue;
+ }
+
+ ch->last_poll = time_msec();
+ if (error) {
+ if (error == EAGAIN) {
+ break;
+ }
+ return error;
+ }
+
+ error = parse_odp_packet(buf, upcall, &dp_ifindex);
+ if (!error && dp_ifindex == dpif->dp_ifindex) {
+ return 0;
+ } else if (error) {
+ return error;
+ }
+ }
+ }
+
+ return EAGAIN;
+}
+
+static int
+dpif_linux_recv(struct dpif *dpif_, struct dpif_upcall *upcall,
+ struct ofpbuf *buf)
+{
+ struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+ int error;
+
+ ovs_mutex_lock(&dpif->upcall_lock);
+ error = dpif_linux_recv__(dpif_, upcall, buf);
+ ovs_mutex_unlock(&dpif->upcall_lock);
+
+ return error;
+}
+
+static void
+dpif_linux_recv_wait(struct dpif *dpif_)
+{
+ struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+
+ ovs_mutex_lock(&dpif->upcall_lock);
+ if (dpif->epoll_fd >= 0) {
+ poll_fd_wait(dpif->epoll_fd, POLLIN);
+ }
+ ovs_mutex_unlock(&dpif->upcall_lock);
+}
+
+static void
+dpif_linux_recv_purge(struct dpif *dpif_)
+{
+ struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+
+ ovs_mutex_lock(&dpif->upcall_lock);
+ if (dpif->epoll_fd >= 0) {
+ struct dpif_channel *ch;
+
+ for (ch = dpif->channels; ch < &dpif->channels[dpif->uc_array_size];
+ ch++) {
+ if (ch->sock) {
+ nl_sock_drain(ch->sock);
+ }
+ }
+ }
+ ovs_mutex_unlock(&dpif->upcall_lock);
+}
+
+const struct dpif_class dpif_linux_class = {
+ "system",
+ dpif_linux_enumerate,
+ NULL,
+ dpif_linux_open,
+ dpif_linux_close,
+ dpif_linux_destroy,
+ NULL, /* run */
+ NULL, /* wait */
+ dpif_linux_get_stats,
+ dpif_linux_port_add,
+ dpif_linux_port_del,
+ dpif_linux_port_query_by_number,
+ dpif_linux_port_query_by_name,
+ dpif_linux_get_max_ports,
+ dpif_linux_port_get_pid,
+ dpif_linux_port_dump_start,
+ dpif_linux_port_dump_next,
+ dpif_linux_port_dump_done,
+ dpif_linux_port_poll,
+ dpif_linux_port_poll_wait,
+ dpif_linux_flow_get,
+ dpif_linux_flow_put,
+ dpif_linux_flow_del,
+ dpif_linux_flow_flush,
+ dpif_linux_flow_dump_start,
+ dpif_linux_flow_dump_next,
+ dpif_linux_flow_dump_done,
+ dpif_linux_execute,
+ dpif_linux_operate,
+ dpif_linux_recv_set,
+ dpif_linux_queue_to_priority,
+ dpif_linux_recv,
+ dpif_linux_recv_wait,
+ dpif_linux_recv_purge,
+};
+\f
+static int
+dpif_linux_init(void)
+{
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
+ static int error;
+
+ if (ovsthread_once_start(&once)) {
+ error = nl_lookup_genl_family(OVS_DATAPATH_FAMILY,
+ &ovs_datapath_family);
+ if (error) {
+ VLOG_ERR("Generic Netlink family '%s' does not exist. "
+ "The Open vSwitch kernel module is probably not loaded.",
+ OVS_DATAPATH_FAMILY);
+ }