+ struct dpif_linux_port_state *state = state_;
+ struct dpif_linux_vport vport;
+ int error;
+
+ error = dpif_linux_port_dump_next__(dpif, &state->dump, &vport,
+ &state->buf);
+ if (error) {
+ return error;
+ }
+ dpif_port->name = CONST_CAST(char *, vport.name);
+ dpif_port->type = CONST_CAST(char *, get_vport_type(&vport));
+ dpif_port->port_no = vport.port_no;
+ return 0;
+}
+
+static int
+dpif_linux_port_dump_done(const struct dpif *dpif_ OVS_UNUSED, void *state_)
+{
+ struct dpif_linux_port_state *state = state_;
+ int error = nl_dump_done(&state->dump);
+
+ ofpbuf_uninit(&state->buf);
+ free(state);
+ return error;
+}
+
+static int
+dpif_linux_port_poll(const struct dpif *dpif_, char **devnamep)
+{
+ struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+
+ /* Lazily create the Netlink socket to listen for notifications. */
+ if (!dpif->port_notifier) {
+ struct nl_sock *sock;
+ int error;
+
+ error = nl_sock_create(NETLINK_GENERIC, &sock);
+ if (error) {
+ return error;
+ }
+
+ error = nl_sock_join_mcgroup(sock, ovs_vport_mcgroup);
+ if (error) {
+ nl_sock_destroy(sock);
+ return error;
+ }
+ dpif->port_notifier = sock;
+
+ /* We have no idea of the current state so report that everything
+ * changed. */
+ return ENOBUFS;
+ }
+
+ for (;;) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ uint64_t buf_stub[4096 / 8];
+ struct ofpbuf buf;
+ int error;
+
+ ofpbuf_use_stub(&buf, buf_stub, sizeof buf_stub);
+ error = nl_sock_recv(dpif->port_notifier, &buf, false);
+ if (!error) {
+ struct dpif_linux_vport vport;
+
+ error = dpif_linux_vport_from_ofpbuf(&vport, &buf);
+ if (!error) {
+ if (vport.dp_ifindex == dpif->dp_ifindex
+ && (vport.cmd == OVS_VPORT_CMD_NEW
+ || vport.cmd == OVS_VPORT_CMD_DEL
+ || vport.cmd == OVS_VPORT_CMD_SET)) {
+ VLOG_DBG("port_changed: dpif:%s vport:%s cmd:%"PRIu8,
+ dpif->dpif.full_name, vport.name, vport.cmd);
+ if (vport.cmd == OVS_VPORT_CMD_DEL && dpif->handlers) {
+ dpif->refresh_channels = true;
+ }
+ *devnamep = xstrdup(vport.name);
+ ofpbuf_uninit(&buf);
+ return 0;
+ }
+ }
+ } else if (error != EAGAIN) {
+ VLOG_WARN_RL(&rl, "error reading or parsing netlink (%s)",
+ ovs_strerror(error));
+ nl_sock_drain(dpif->port_notifier);
+ error = ENOBUFS;
+ }
+
+ ofpbuf_uninit(&buf);
+ if (error) {
+ return error;
+ }
+ }
+}
+
+static void
+dpif_linux_port_poll_wait(const struct dpif *dpif_)
+{
+ const struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+
+ if (dpif->port_notifier) {
+ nl_sock_wait(dpif->port_notifier, POLLIN);
+ } else {
+ poll_immediate_wake();
+ }
+}
+
+static int
+dpif_linux_flow_get__(const struct dpif_linux *dpif,
+ const struct nlattr *key, size_t key_len,
+ struct dpif_linux_flow *reply, struct ofpbuf **bufp)
+{
+ struct dpif_linux_flow request;
+
+ dpif_linux_flow_init(&request);
+ request.cmd = OVS_FLOW_CMD_GET;
+ request.dp_ifindex = dpif->dp_ifindex;
+ request.key = key;
+ request.key_len = key_len;
+ return dpif_linux_flow_transact(&request, reply, bufp);
+}
+
+static int
+dpif_linux_flow_get(const struct dpif *dpif_,
+ const struct nlattr *key, size_t key_len,
+ struct ofpbuf **actionsp, struct dpif_flow_stats *stats)
+{
+ const struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+ struct dpif_linux_flow reply;
+ struct ofpbuf *buf;
+ int error;
+
+ error = dpif_linux_flow_get__(dpif, key, key_len, &reply, &buf);
+ if (!error) {
+ if (stats) {
+ dpif_linux_flow_get_stats(&reply, stats);
+ }
+ if (actionsp) {
+ ofpbuf_set_data(buf, CONST_CAST(struct nlattr *, reply.actions));
+ ofpbuf_set_size(buf, reply.actions_len);
+ *actionsp = buf;
+ } else {
+ ofpbuf_delete(buf);
+ }
+ }
+ return error;
+}
+
+static void
+dpif_linux_init_flow_put(struct dpif_linux *dpif, const struct dpif_flow_put *put,
+ struct dpif_linux_flow *request)
+{
+ static const struct nlattr dummy_action;
+
+ dpif_linux_flow_init(request);
+ request->cmd = (put->flags & DPIF_FP_CREATE
+ ? OVS_FLOW_CMD_NEW : OVS_FLOW_CMD_SET);
+ request->dp_ifindex = dpif->dp_ifindex;
+ request->key = put->key;
+ request->key_len = put->key_len;
+ request->mask = put->mask;
+ request->mask_len = put->mask_len;
+ /* Ensure that OVS_FLOW_ATTR_ACTIONS will always be included. */
+ request->actions = (put->actions
+ ? put->actions
+ : CONST_CAST(struct nlattr *, &dummy_action));
+ request->actions_len = put->actions_len;
+ if (put->flags & DPIF_FP_ZERO_STATS) {
+ request->clear = true;
+ }
+ request->nlmsg_flags = put->flags & DPIF_FP_MODIFY ? 0 : NLM_F_CREATE;
+}
+
+static int
+dpif_linux_flow_put(struct dpif *dpif_, const struct dpif_flow_put *put)
+{
+ struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+ struct dpif_linux_flow request, reply;
+ struct ofpbuf *buf;
+ int error;
+
+ dpif_linux_init_flow_put(dpif, put, &request);
+ error = dpif_linux_flow_transact(&request,
+ put->stats ? &reply : NULL,
+ put->stats ? &buf : NULL);
+ if (!error && put->stats) {
+ dpif_linux_flow_get_stats(&reply, put->stats);
+ ofpbuf_delete(buf);
+ }
+ return error;
+}
+
+static void
+dpif_linux_init_flow_del(struct dpif_linux *dpif, const struct dpif_flow_del *del,
+ struct dpif_linux_flow *request)
+{
+ dpif_linux_flow_init(request);
+ request->cmd = OVS_FLOW_CMD_DEL;
+ request->dp_ifindex = dpif->dp_ifindex;
+ request->key = del->key;
+ request->key_len = del->key_len;
+}
+
+static int
+dpif_linux_flow_del(struct dpif *dpif_, const struct dpif_flow_del *del)
+{
+ struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+ struct dpif_linux_flow request, reply;
+ struct ofpbuf *buf;
+ int error;
+
+ dpif_linux_init_flow_del(dpif, del, &request);
+ error = dpif_linux_flow_transact(&request,
+ del->stats ? &reply : NULL,
+ del->stats ? &buf : NULL);
+ if (!error && del->stats) {
+ dpif_linux_flow_get_stats(&reply, del->stats);
+ ofpbuf_delete(buf);
+ }
+ return error;
+}
+
+struct dpif_linux_flow_state {
+ struct dpif_linux_flow flow;
+ struct dpif_flow_stats stats;
+ struct ofpbuf buffer; /* Always used to store flows. */
+ struct ofpbuf *tmp; /* Used if kernel does not supply actions. */
+};
+
+struct dpif_linux_flow_iter {
+ struct nl_dump dump;
+ atomic_int status;
+};
+
+static void
+dpif_linux_flow_dump_state_init(void **statep)
+{
+ struct dpif_linux_flow_state *state;
+
+ *statep = state = xmalloc(sizeof *state);
+ ofpbuf_init(&state->buffer, NL_DUMP_BUFSIZE);
+ state->tmp = NULL;
+}
+
+static void
+dpif_linux_flow_dump_state_uninit(void *state_)
+{
+ struct dpif_linux_flow_state *state = state_;
+
+ ofpbuf_uninit(&state->buffer);
+ ofpbuf_delete(state->tmp);
+ free(state);
+}
+
+static int
+dpif_linux_flow_dump_start(const struct dpif *dpif_, void **iterp)
+{
+ const struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+ struct dpif_linux_flow_iter *iter;
+ struct dpif_linux_flow request;
+ struct ofpbuf *buf;
+
+ *iterp = iter = xmalloc(sizeof *iter);
+
+ dpif_linux_flow_init(&request);
+ request.cmd = OVS_FLOW_CMD_GET;
+ request.dp_ifindex = dpif->dp_ifindex;
+
+ buf = ofpbuf_new(1024);
+ dpif_linux_flow_to_ofpbuf(&request, buf);
+ nl_dump_start(&iter->dump, NETLINK_GENERIC, buf);
+ ofpbuf_delete(buf);
+ atomic_init(&iter->status, 0);
+
+ return 0;
+}
+
+static int
+dpif_linux_flow_dump_next(const struct dpif *dpif_, void *iter_, void *state_,
+ const struct nlattr **key, size_t *key_len,
+ const struct nlattr **mask, size_t *mask_len,
+ const struct nlattr **actions, size_t *actions_len,
+ const struct dpif_flow_stats **stats)
+{
+ const struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+ struct dpif_linux_flow_iter *iter = iter_;
+ struct dpif_linux_flow_state *state = state_;
+ struct ofpbuf buf;
+ int error;
+
+ do {
+ ofpbuf_delete(state->tmp);
+ state->tmp = NULL;
+
+ if (!nl_dump_next(&iter->dump, &buf, &state->buffer)) {
+ return EOF;
+ }
+
+ error = dpif_linux_flow_from_ofpbuf(&state->flow, &buf);
+ if (error) {
+ atomic_store(&iter->status, error);
+ return error;
+ }
+
+ if (actions && !state->flow.actions) {
+ error = dpif_linux_flow_get__(dpif, state->flow.key,
+ state->flow.key_len,
+ &state->flow, &state->tmp);
+ if (error == ENOENT) {
+ VLOG_DBG("dumped flow disappeared on get");
+ } else if (error) {
+ VLOG_WARN("error fetching dumped flow: %s",
+ ovs_strerror(error));
+ }
+ }
+ } while (error);
+
+ if (actions) {
+ *actions = state->flow.actions;
+ *actions_len = state->flow.actions_len;
+ }
+ if (key) {
+ *key = state->flow.key;
+ *key_len = state->flow.key_len;
+ }
+ if (mask) {
+ *mask = state->flow.mask;
+ *mask_len = state->flow.mask ? state->flow.mask_len : 0;
+ }
+ if (stats) {
+ dpif_linux_flow_get_stats(&state->flow, &state->stats);
+ *stats = &state->stats;
+ }
+ return error;
+}
+
+static bool
+dpif_linux_flow_dump_next_may_destroy_keys(void *state_)
+{
+ struct dpif_linux_flow_state *state = state_;
+
+ return ofpbuf_size(&state->buffer) ? false : true;
+}
+
+static int
+dpif_linux_flow_dump_done(const struct dpif *dpif OVS_UNUSED, void *iter_)
+{
+ struct dpif_linux_flow_iter *iter = iter_;
+ int dump_status;
+ unsigned int nl_status = nl_dump_done(&iter->dump);
+
+ atomic_read(&iter->status, &dump_status);
+ free(iter);
+ return dump_status ? dump_status : nl_status;
+}
+
+static void
+dpif_linux_encode_execute(int dp_ifindex, const struct dpif_execute *d_exec,
+ struct ofpbuf *buf)
+{
+ struct ovs_header *k_exec;
+ size_t key_ofs;
+
+ ofpbuf_prealloc_tailroom(buf, (64
+ + ofpbuf_size(d_exec->packet)
+ + ODP_KEY_METADATA_SIZE
+ + d_exec->actions_len));
+
+ nl_msg_put_genlmsghdr(buf, 0, ovs_packet_family, NLM_F_REQUEST,
+ OVS_PACKET_CMD_EXECUTE, OVS_PACKET_VERSION);
+
+ k_exec = ofpbuf_put_uninit(buf, sizeof *k_exec);
+ k_exec->dp_ifindex = dp_ifindex;
+
+ nl_msg_put_unspec(buf, OVS_PACKET_ATTR_PACKET,
+ ofpbuf_data(d_exec->packet),
+ ofpbuf_size(d_exec->packet));
+
+ key_ofs = nl_msg_start_nested(buf, OVS_PACKET_ATTR_KEY);
+ odp_key_from_pkt_metadata(buf, &d_exec->md);
+ nl_msg_end_nested(buf, key_ofs);
+
+ nl_msg_put_unspec(buf, OVS_PACKET_ATTR_ACTIONS,
+ d_exec->actions, d_exec->actions_len);
+}
+
+static int
+dpif_linux_execute__(int dp_ifindex, const struct dpif_execute *execute)
+{
+ uint64_t request_stub[1024 / 8];
+ struct ofpbuf request;
+ int error;
+
+ ofpbuf_use_stub(&request, request_stub, sizeof request_stub);
+ dpif_linux_encode_execute(dp_ifindex, execute, &request);
+ error = nl_transact(NETLINK_GENERIC, &request, NULL);
+ ofpbuf_uninit(&request);
+
+ return error;
+}
+
+static int
+dpif_linux_execute(struct dpif *dpif_, struct dpif_execute *execute)
+{
+ const struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+
+ return dpif_linux_execute__(dpif->dp_ifindex, execute);
+}
+
+#define MAX_OPS 50
+
+static void
+dpif_linux_operate__(struct dpif_linux *dpif, struct dpif_op **ops, size_t n_ops)
+{
+
+ struct op_auxdata {
+ struct nl_transaction txn;
+
+ struct ofpbuf request;
+ uint64_t request_stub[1024 / 8];
+
+ struct ofpbuf reply;
+ uint64_t reply_stub[1024 / 8];
+ } auxes[MAX_OPS];
+
+ struct nl_transaction *txnsp[MAX_OPS];
+ size_t i;
+
+ ovs_assert(n_ops <= MAX_OPS);
+ for (i = 0; i < n_ops; i++) {
+ struct op_auxdata *aux = &auxes[i];
+ struct dpif_op *op = ops[i];
+ struct dpif_flow_put *put;
+ struct dpif_flow_del *del;
+ struct dpif_execute *execute;
+ struct dpif_linux_flow flow;
+
+ ofpbuf_use_stub(&aux->request,
+ aux->request_stub, sizeof aux->request_stub);
+ aux->txn.request = &aux->request;
+
+ ofpbuf_use_stub(&aux->reply, aux->reply_stub, sizeof aux->reply_stub);
+ aux->txn.reply = NULL;
+
+ switch (op->type) {
+ case DPIF_OP_FLOW_PUT:
+ put = &op->u.flow_put;
+ dpif_linux_init_flow_put(dpif, put, &flow);
+ if (put->stats) {
+ flow.nlmsg_flags |= NLM_F_ECHO;
+ aux->txn.reply = &aux->reply;
+ }
+ dpif_linux_flow_to_ofpbuf(&flow, &aux->request);
+ break;
+
+ case DPIF_OP_FLOW_DEL:
+ del = &op->u.flow_del;
+ dpif_linux_init_flow_del(dpif, del, &flow);
+ if (del->stats) {
+ flow.nlmsg_flags |= NLM_F_ECHO;
+ aux->txn.reply = &aux->reply;
+ }
+ dpif_linux_flow_to_ofpbuf(&flow, &aux->request);
+ break;
+
+ case DPIF_OP_EXECUTE:
+ execute = &op->u.execute;
+ dpif_linux_encode_execute(dpif->dp_ifindex, execute,
+ &aux->request);
+ break;
+
+ default:
+ OVS_NOT_REACHED();
+ }
+ }
+
+ for (i = 0; i < n_ops; i++) {
+ txnsp[i] = &auxes[i].txn;
+ }
+ nl_transact_multiple(NETLINK_GENERIC, txnsp, n_ops);
+
+ for (i = 0; i < n_ops; i++) {
+ struct op_auxdata *aux = &auxes[i];
+ struct nl_transaction *txn = &auxes[i].txn;
+ struct dpif_op *op = ops[i];
+ struct dpif_flow_put *put;
+ struct dpif_flow_del *del;
+
+ op->error = txn->error;
+
+ switch (op->type) {
+ case DPIF_OP_FLOW_PUT:
+ put = &op->u.flow_put;
+ if (put->stats) {
+ if (!op->error) {
+ struct dpif_linux_flow reply;
+
+ op->error = dpif_linux_flow_from_ofpbuf(&reply,
+ txn->reply);
+ if (!op->error) {
+ dpif_linux_flow_get_stats(&reply, put->stats);
+ }
+ }
+
+ if (op->error) {
+ memset(put->stats, 0, sizeof *put->stats);
+ }
+ }
+ break;
+
+ case DPIF_OP_FLOW_DEL:
+ del = &op->u.flow_del;
+ if (del->stats) {
+ if (!op->error) {
+ struct dpif_linux_flow reply;
+
+ op->error = dpif_linux_flow_from_ofpbuf(&reply,
+ txn->reply);
+ if (!op->error) {
+ dpif_linux_flow_get_stats(&reply, del->stats);
+ }
+ }
+
+ if (op->error) {
+ memset(del->stats, 0, sizeof *del->stats);
+ }
+ }
+ break;
+
+ case DPIF_OP_EXECUTE:
+ break;
+
+ default:
+ OVS_NOT_REACHED();
+ }
+
+ ofpbuf_uninit(&aux->request);
+ ofpbuf_uninit(&aux->reply);
+ }
+}
+
+static void
+dpif_linux_operate(struct dpif *dpif_, struct dpif_op **ops, size_t n_ops)
+{
+ struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+
+ while (n_ops > 0) {
+ size_t chunk = MIN(n_ops, MAX_OPS);
+ dpif_linux_operate__(dpif, ops, chunk);
+ ops += chunk;
+ n_ops -= chunk;
+ }
+}
+
+/* Synchronizes 'channels' in 'dpif->handlers' with the set of vports
+ * currently in 'dpif' in the kernel, by adding a new set of channels for
+ * any kernel vport that lacks one and deleting any channels that have no
+ * backing kernel vports. */
+static int
+dpif_linux_refresh_channels(struct dpif_linux *dpif, uint32_t n_handlers)
+ OVS_REQ_WRLOCK(dpif->upcall_lock)
+{
+ unsigned long int *keep_channels;
+ struct dpif_linux_vport vport;
+ size_t keep_channels_nbits;
+ struct nl_dump dump;
+ uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
+ struct ofpbuf buf;
+ int retval = 0;
+ size_t i;
+
+ if (dpif->n_handlers != n_handlers) {
+ destroy_all_channels(dpif);
+ dpif->handlers = xzalloc(n_handlers * sizeof *dpif->handlers);
+ for (i = 0; i < n_handlers; i++) {
+ struct dpif_handler *handler = &dpif->handlers[i];
+
+ handler->epoll_fd = epoll_create(10);
+ if (handler->epoll_fd < 0) {
+ size_t j;
+
+ for (j = 0; j < i; j++) {
+ close(dpif->handlers[j].epoll_fd);
+ }
+ free(dpif->handlers);
+ dpif->handlers = NULL;
+
+ return errno;
+ }
+ }
+ dpif->n_handlers = n_handlers;
+ }
+
+ for (i = 0; i < n_handlers; i++) {
+ struct dpif_handler *handler = &dpif->handlers[i];
+
+ handler->event_offset = handler->n_events = 0;
+ }
+
+ keep_channels_nbits = dpif->uc_array_size;
+ keep_channels = bitmap_allocate(keep_channels_nbits);
+
+ ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
+ dpif_linux_port_dump_start__(dpif, &dump);
+ while (!dpif_linux_port_dump_next__(dpif, &dump, &vport, &buf)) {
+ uint32_t port_no = odp_to_u32(vport.port_no);
+ uint32_t *upcall_pids = NULL;
+ int error;
+
+ if (port_no >= dpif->uc_array_size
+ || !vport_get_pids(dpif, port_no, &upcall_pids)) {
+ struct nl_sock **socksp = vport_create_socksp(dpif->n_handlers,
+ &error);
+
+ if (!socksp) {
+ goto error;
+ }
+
+ error = vport_add_channels(dpif, vport.port_no, socksp);
+ if (error) {
+ VLOG_INFO("%s: could not add channels for port %s",
+ dpif_name(&dpif->dpif), vport.name);
+ vport_del_socksp(socksp, dpif->n_handlers);
+ retval = error;
+ goto error;
+ }
+ upcall_pids = vport_socksp_to_pids(socksp, dpif->n_handlers);
+ free(socksp);
+ }
+
+ /* Configure the vport to deliver misses to 'sock'. */
+ if (vport.upcall_pids[0] == 0
+ || vport.n_upcall_pids != dpif->n_handlers
+ || memcmp(upcall_pids, vport.upcall_pids, n_handlers * sizeof
+ *upcall_pids)) {
+ struct dpif_linux_vport vport_request;
+
+ dpif_linux_vport_init(&vport_request);
+ vport_request.cmd = OVS_VPORT_CMD_SET;
+ vport_request.dp_ifindex = dpif->dp_ifindex;
+ vport_request.port_no = vport.port_no;
+ vport_request.n_upcall_pids = dpif->n_handlers;
+ vport_request.upcall_pids = upcall_pids;
+ error = dpif_linux_vport_transact(&vport_request, NULL, NULL);
+ if (error) {
+ VLOG_WARN_RL(&error_rl,
+ "%s: failed to set upcall pid on port: %s",
+ dpif_name(&dpif->dpif), ovs_strerror(error));
+
+ if (error != ENODEV && error != ENOENT) {
+ retval = error;
+ } else {
+ /* The vport isn't really there, even though the dump says
+ * it is. Probably we just hit a race after a port
+ * disappeared. */
+ }
+ goto error;
+ }
+ }
+
+ if (port_no < keep_channels_nbits) {
+ bitmap_set1(keep_channels, port_no);
+ }
+ free(upcall_pids);
+ continue;
+
+ error:
+ free(upcall_pids);
+ vport_del_channels(dpif, vport.port_no);
+ }
+ nl_dump_done(&dump);
+ ofpbuf_uninit(&buf);
+
+ /* Discard any saved channels that we didn't reuse. */
+ for (i = 0; i < keep_channels_nbits; i++) {
+ if (!bitmap_is_set(keep_channels, i)) {
+ vport_del_channels(dpif, u32_to_odp(i));
+ }
+ }
+ free(keep_channels);
+
+ return retval;
+}
+
+static int
+dpif_linux_recv_set__(struct dpif_linux *dpif, bool enable)
+ OVS_REQ_WRLOCK(dpif->upcall_lock)
+{
+ if ((dpif->handlers != NULL) == enable) {
+ return 0;
+ } else if (!enable) {
+ destroy_all_channels(dpif);
+ return 0;
+ } else {
+ return dpif_linux_refresh_channels(dpif, 1);
+ }
+}
+
+static int
+dpif_linux_recv_set(struct dpif *dpif_, bool enable)
+{
+ struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+ int error;
+
+ fat_rwlock_wrlock(&dpif->upcall_lock);
+ error = dpif_linux_recv_set__(dpif, enable);
+ fat_rwlock_unlock(&dpif->upcall_lock);
+
+ return error;
+}
+
+static int
+dpif_linux_handlers_set(struct dpif *dpif_, uint32_t n_handlers)
+{
+ struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+ int error = 0;
+
+ fat_rwlock_wrlock(&dpif->upcall_lock);
+ if (dpif->handlers) {
+ error = dpif_linux_refresh_channels(dpif, n_handlers);
+ }
+ fat_rwlock_unlock(&dpif->upcall_lock);
+
+ return error;
+}
+
+static int
+dpif_linux_queue_to_priority(const struct dpif *dpif OVS_UNUSED,
+ uint32_t queue_id, uint32_t *priority)
+{
+ if (queue_id < 0xf000) {
+ *priority = TC_H_MAKE(1 << 16, queue_id + 1);
+ return 0;
+ } else {
+ return EINVAL;
+ }
+}
+
+static int
+parse_odp_packet(struct ofpbuf *buf, struct dpif_upcall *upcall,
+ int *dp_ifindex)
+{
+ static const struct nl_policy ovs_packet_policy[] = {
+ /* Always present. */
+ [OVS_PACKET_ATTR_PACKET] = { .type = NL_A_UNSPEC,
+ .min_len = ETH_HEADER_LEN },
+ [OVS_PACKET_ATTR_KEY] = { .type = NL_A_NESTED },
+
+ /* OVS_PACKET_CMD_ACTION only. */
+ [OVS_PACKET_ATTR_USERDATA] = { .type = NL_A_UNSPEC, .optional = true },
+ };
+
+ struct ovs_header *ovs_header;
+ struct nlattr *a[ARRAY_SIZE(ovs_packet_policy)];
+ struct nlmsghdr *nlmsg;
+ struct genlmsghdr *genl;
+ struct ofpbuf b;
+ int type;
+
+ ofpbuf_use_const(&b, ofpbuf_data(buf), ofpbuf_size(buf));
+
+ nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
+ genl = ofpbuf_try_pull(&b, sizeof *genl);
+ ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
+ if (!nlmsg || !genl || !ovs_header
+ || nlmsg->nlmsg_type != ovs_packet_family
+ || !nl_policy_parse(&b, 0, ovs_packet_policy, a,
+ ARRAY_SIZE(ovs_packet_policy))) {
+ return EINVAL;
+ }
+
+ type = (genl->cmd == OVS_PACKET_CMD_MISS ? DPIF_UC_MISS
+ : genl->cmd == OVS_PACKET_CMD_ACTION ? DPIF_UC_ACTION
+ : -1);
+ if (type < 0) {
+ return EINVAL;
+ }
+
+ /* (Re)set ALL fields of '*upcall' on successful return. */
+ upcall->type = type;
+ upcall->key = CONST_CAST(struct nlattr *,
+ nl_attr_get(a[OVS_PACKET_ATTR_KEY]));
+ upcall->key_len = nl_attr_get_size(a[OVS_PACKET_ATTR_KEY]);
+ upcall->userdata = a[OVS_PACKET_ATTR_USERDATA];
+
+ /* Allow overwriting the netlink attribute header without reallocating. */
+ ofpbuf_use_stub(&upcall->packet,
+ CONST_CAST(struct nlattr *,
+ nl_attr_get(a[OVS_PACKET_ATTR_PACKET])) - 1,
+ nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]) +
+ sizeof(struct nlattr));
+ ofpbuf_set_data(&upcall->packet,
+ (char *)ofpbuf_data(&upcall->packet) + sizeof(struct nlattr));
+ ofpbuf_set_size(&upcall->packet, nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]));
+
+ *dp_ifindex = ovs_header->dp_ifindex;
+
+ return 0;
+}
+
+static int
+dpif_linux_recv__(struct dpif_linux *dpif, uint32_t handler_id,
+ struct dpif_upcall *upcall, struct ofpbuf *buf)
+ OVS_REQ_RDLOCK(dpif->upcall_lock)
+{
+ struct dpif_handler *handler;
+ int read_tries = 0;
+
+ if (!dpif->handlers || handler_id >= dpif->n_handlers) {
+ return EAGAIN;
+ }
+
+ handler = &dpif->handlers[handler_id];
+ if (handler->event_offset >= handler->n_events) {
+ int retval;
+
+ handler->event_offset = handler->n_events = 0;
+
+ do {
+ retval = epoll_wait(handler->epoll_fd, handler->epoll_events,
+ dpif->uc_array_size, 0);
+ } while (retval < 0 && errno == EINTR);
+ if (retval < 0) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+ VLOG_WARN_RL(&rl, "epoll_wait failed (%s)", ovs_strerror(errno));
+ } else if (retval > 0) {
+ handler->n_events = retval;
+ }
+ }
+
+ while (handler->event_offset < handler->n_events) {
+ int idx = handler->epoll_events[handler->event_offset].data.u32;
+ struct dpif_channel *ch = &dpif->handlers[handler_id].channels[idx];
+
+ handler->event_offset++;
+
+ for (;;) {
+ int dp_ifindex;
+ int error;
+
+ if (++read_tries > 50) {
+ return EAGAIN;
+ }
+
+ error = nl_sock_recv(ch->sock, buf, false);
+ if (error == ENOBUFS) {
+ /* ENOBUFS typically means that we've received so many
+ * packets that the buffer overflowed. Try again
+ * immediately because there's almost certainly a packet
+ * waiting for us. */
+ report_loss(dpif, ch, idx, handler_id);
+ continue;
+ }
+
+ ch->last_poll = time_msec();
+ if (error) {
+ if (error == EAGAIN) {
+ break;
+ }
+ return error;
+ }
+
+ error = parse_odp_packet(buf, upcall, &dp_ifindex);
+ if (!error && dp_ifindex == dpif->dp_ifindex) {
+ return 0;
+ } else if (error) {
+ return error;
+ }
+ }
+ }
+
+ return EAGAIN;
+}
+
+static int
+dpif_linux_recv(struct dpif *dpif_, uint32_t handler_id,
+ struct dpif_upcall *upcall, struct ofpbuf *buf)
+{
+ struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+ int error;
+
+ fat_rwlock_rdlock(&dpif->upcall_lock);
+ error = dpif_linux_recv__(dpif, handler_id, upcall, buf);
+ fat_rwlock_unlock(&dpif->upcall_lock);
+
+ return error;
+}
+
+static void
+dpif_linux_recv_wait__(struct dpif_linux *dpif, uint32_t handler_id)
+ OVS_REQ_RDLOCK(dpif->upcall_lock)
+{
+ if (dpif->handlers && handler_id < dpif->n_handlers) {
+ struct dpif_handler *handler = &dpif->handlers[handler_id];
+
+ poll_fd_wait(handler->epoll_fd, POLLIN);
+ }
+}
+
+static void
+dpif_linux_recv_wait(struct dpif *dpif_, uint32_t handler_id)
+{
+ struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+
+ fat_rwlock_rdlock(&dpif->upcall_lock);
+ dpif_linux_recv_wait__(dpif, handler_id);
+ fat_rwlock_unlock(&dpif->upcall_lock);
+}
+
+static void
+dpif_linux_recv_purge__(struct dpif_linux *dpif)
+ OVS_REQ_WRLOCK(dpif->upcall_lock)
+{
+ if (dpif->handlers) {
+ size_t i, j;
+
+ for (i = 0; i < dpif->uc_array_size; i++ ) {
+ if (!dpif->handlers[0].channels[i].sock) {
+ continue;
+ }
+
+ for (j = 0; j < dpif->n_handlers; j++) {
+ nl_sock_drain(dpif->handlers[j].channels[i].sock);
+ }
+ }
+ }
+}
+
+static void
+dpif_linux_recv_purge(struct dpif *dpif_)
+{
+ struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+
+ fat_rwlock_wrlock(&dpif->upcall_lock);
+ dpif_linux_recv_purge__(dpif);
+ fat_rwlock_unlock(&dpif->upcall_lock);