+ struct dpif_linux_flow reply;
+ struct ofpbuf *buf;
+ int error;
+
+ error = dpif_linux_flow_get__(dpif_, key, key_len, &reply, &buf);
+ if (!error) {
+ if (stats) {
+ dpif_linux_flow_get_stats(&reply, stats);
+ }
+ if (actionsp) {
+ buf->data = CONST_CAST(struct nlattr *, reply.actions);
+ buf->size = reply.actions_len;
+ *actionsp = buf;
+ } else {
+ ofpbuf_delete(buf);
+ }
+ }
+ return error;
+}
+
+static void
+dpif_linux_init_flow_put(struct dpif *dpif_, const struct dpif_flow_put *put,
+ struct dpif_linux_flow *request)
+{
+ static const struct nlattr dummy_action;
+
+ const struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+
+ dpif_linux_flow_init(request);
+ request->cmd = (put->flags & DPIF_FP_CREATE
+ ? OVS_FLOW_CMD_NEW : OVS_FLOW_CMD_SET);
+ request->dp_ifindex = dpif->dp_ifindex;
+ request->key = put->key;
+ request->key_len = put->key_len;
+ request->mask = put->mask;
+ request->mask_len = put->mask_len;
+ /* Ensure that OVS_FLOW_ATTR_ACTIONS will always be included. */
+ request->actions = (put->actions
+ ? put->actions
+ : CONST_CAST(struct nlattr *, &dummy_action));
+ request->actions_len = put->actions_len;
+ if (put->flags & DPIF_FP_ZERO_STATS) {
+ request->clear = true;
+ }
+ request->nlmsg_flags = put->flags & DPIF_FP_MODIFY ? 0 : NLM_F_CREATE;
+}
+
+static int
+dpif_linux_flow_put(struct dpif *dpif_, const struct dpif_flow_put *put)
+{
+ struct dpif_linux_flow request, reply;
+ struct ofpbuf *buf;
+ int error;
+
+ dpif_linux_init_flow_put(dpif_, put, &request);
+ error = dpif_linux_flow_transact(&request,
+ put->stats ? &reply : NULL,
+ put->stats ? &buf : NULL);
+ if (!error && put->stats) {
+ dpif_linux_flow_get_stats(&reply, put->stats);
+ ofpbuf_delete(buf);
+ }
+ return error;
+}
+
+static void
+dpif_linux_init_flow_del(struct dpif *dpif_, const struct dpif_flow_del *del,
+ struct dpif_linux_flow *request)
+{
+ const struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+
+ dpif_linux_flow_init(request);
+ request->cmd = OVS_FLOW_CMD_DEL;
+ request->dp_ifindex = dpif->dp_ifindex;
+ request->key = del->key;
+ request->key_len = del->key_len;
+}
+
+static int
+dpif_linux_flow_del(struct dpif *dpif_, const struct dpif_flow_del *del)
+{
+ struct dpif_linux_flow request, reply;
+ struct ofpbuf *buf;
+ int error;
+
+ dpif_linux_init_flow_del(dpif_, del, &request);
+ error = dpif_linux_flow_transact(&request,
+ del->stats ? &reply : NULL,
+ del->stats ? &buf : NULL);
+ if (!error && del->stats) {
+ dpif_linux_flow_get_stats(&reply, del->stats);
+ ofpbuf_delete(buf);
+ }
+ return error;
+}
+
+struct dpif_linux_flow_state {
+ struct dpif_linux_flow flow;
+ struct dpif_flow_stats stats;
+ struct ofpbuf buffer; /* Always used to store flows. */
+ struct ofpbuf *tmp; /* Used if kernel does not supply actions. */
+};
+
+struct dpif_linux_flow_iter {
+ struct nl_dump dump;
+ atomic_int status;
+};
+
+static void
+dpif_linux_flow_dump_state_init(void **statep)
+{
+ struct dpif_linux_flow_state *state;
+
+ *statep = state = xmalloc(sizeof *state);
+ ofpbuf_init(&state->buffer, NL_DUMP_BUFSIZE);
+ state->tmp = NULL;
+}
+
+static void
+dpif_linux_flow_dump_state_uninit(void *state_)
+{
+ struct dpif_linux_flow_state *state = state_;
+
+ ofpbuf_uninit(&state->buffer);
+ ofpbuf_delete(state->tmp);
+ free(state);
+}
+
+static int
+dpif_linux_flow_dump_start(const struct dpif *dpif_, void **iterp)
+{
+ const struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+ struct dpif_linux_flow_iter *iter;
+ struct dpif_linux_flow request;
+ struct ofpbuf *buf;
+
+ *iterp = iter = xmalloc(sizeof *iter);
+
+ dpif_linux_flow_init(&request);
+ request.cmd = OVS_FLOW_CMD_GET;
+ request.dp_ifindex = dpif->dp_ifindex;
+
+ buf = ofpbuf_new(1024);
+ dpif_linux_flow_to_ofpbuf(&request, buf);
+ nl_dump_start(&iter->dump, NETLINK_GENERIC, buf);
+ ofpbuf_delete(buf);
+ atomic_init(&iter->status, 0);
+
+ return 0;
+}
+
+static int
+dpif_linux_flow_dump_next(const struct dpif *dpif_, void *iter_, void *state_,
+ const struct nlattr **key, size_t *key_len,
+ const struct nlattr **mask, size_t *mask_len,
+ const struct nlattr **actions, size_t *actions_len,
+ const struct dpif_flow_stats **stats)
+{
+ struct dpif_linux_flow_iter *iter = iter_;
+ struct dpif_linux_flow_state *state = state_;
+ struct ofpbuf buf;
+ int error;
+
+ do {
+ ofpbuf_delete(state->tmp);
+ state->tmp = NULL;
+
+ if (!nl_dump_next(&iter->dump, &buf, &state->buffer)) {
+ return EOF;
+ }
+
+ error = dpif_linux_flow_from_ofpbuf(&state->flow, &buf);
+ if (error) {
+ atomic_store(&iter->status, error);
+ return error;
+ }
+
+ if (actions && !state->flow.actions) {
+ error = dpif_linux_flow_get__(dpif_, state->flow.key,
+ state->flow.key_len,
+ &state->flow, &state->tmp);
+ if (error == ENOENT) {
+ VLOG_DBG("dumped flow disappeared on get");
+ } else if (error) {
+ VLOG_WARN("error fetching dumped flow: %s",
+ ovs_strerror(error));
+ }
+ }
+ } while (error);
+
+ if (actions) {
+ *actions = state->flow.actions;
+ *actions_len = state->flow.actions_len;
+ }
+ if (key) {
+ *key = state->flow.key;
+ *key_len = state->flow.key_len;
+ }
+ if (mask) {
+ *mask = state->flow.mask;
+ *mask_len = state->flow.mask ? state->flow.mask_len : 0;
+ }
+ if (stats) {
+ dpif_linux_flow_get_stats(&state->flow, &state->stats);
+ *stats = &state->stats;
+ }
+ return error;
+}
+
+static bool
+dpif_linux_flow_dump_next_may_destroy_keys(void *state_)
+{
+ struct dpif_linux_flow_state *state = state_;
+
+ return state->buffer.size ? false : true;
+}
+
+static int
+dpif_linux_flow_dump_done(const struct dpif *dpif OVS_UNUSED, void *iter_)
+{
+ struct dpif_linux_flow_iter *iter = iter_;
+ int dump_status;
+ unsigned int nl_status = nl_dump_done(&iter->dump);
+
+ atomic_read(&iter->status, &dump_status);
+ atomic_destroy(&iter->status);
+ free(iter);
+ return dump_status ? dump_status : nl_status;
+}
+
+static void
+dpif_linux_encode_execute(int dp_ifindex, const struct dpif_execute *d_exec,
+ struct ofpbuf *buf)
+{
+ struct ovs_header *k_exec;
+ size_t key_ofs;
+
+ ofpbuf_prealloc_tailroom(buf, (64
+ + d_exec->packet->size
+ + ODP_KEY_METADATA_SIZE
+ + d_exec->actions_len));
+
+ nl_msg_put_genlmsghdr(buf, 0, ovs_packet_family, NLM_F_REQUEST,
+ OVS_PACKET_CMD_EXECUTE, OVS_PACKET_VERSION);
+
+ k_exec = ofpbuf_put_uninit(buf, sizeof *k_exec);
+ k_exec->dp_ifindex = dp_ifindex;
+
+ nl_msg_put_unspec(buf, OVS_PACKET_ATTR_PACKET,
+ d_exec->packet->data, d_exec->packet->size);
+
+ key_ofs = nl_msg_start_nested(buf, OVS_PACKET_ATTR_KEY);
+ odp_key_from_pkt_metadata(buf, &d_exec->md);
+ nl_msg_end_nested(buf, key_ofs);
+
+ nl_msg_put_unspec(buf, OVS_PACKET_ATTR_ACTIONS,
+ d_exec->actions, d_exec->actions_len);
+}
+
+static int
+dpif_linux_execute__(int dp_ifindex, const struct dpif_execute *execute)
+{
+ uint64_t request_stub[1024 / 8];
+ struct ofpbuf request;
+ int error;
+
+ ofpbuf_use_stub(&request, request_stub, sizeof request_stub);
+ dpif_linux_encode_execute(dp_ifindex, execute, &request);
+ error = nl_transact(NETLINK_GENERIC, &request, NULL);
+ ofpbuf_uninit(&request);
+
+ return error;
+}
+
+static int
+dpif_linux_execute(struct dpif *dpif_, struct dpif_execute *execute)
+{
+ const struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+
+ return dpif_linux_execute__(dpif->dp_ifindex, execute);
+}
+
+#define MAX_OPS 50
+
+static void
+dpif_linux_operate__(struct dpif *dpif_, struct dpif_op **ops, size_t n_ops)
+{
+ const struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+
+ struct op_auxdata {
+ struct nl_transaction txn;
+
+ struct ofpbuf request;
+ uint64_t request_stub[1024 / 8];
+
+ struct ofpbuf reply;
+ uint64_t reply_stub[1024 / 8];
+ } auxes[MAX_OPS];
+
+ struct nl_transaction *txnsp[MAX_OPS];
+ size_t i;
+
+ ovs_assert(n_ops <= MAX_OPS);
+ for (i = 0; i < n_ops; i++) {
+ struct op_auxdata *aux = &auxes[i];
+ struct dpif_op *op = ops[i];
+ struct dpif_flow_put *put;
+ struct dpif_flow_del *del;
+ struct dpif_execute *execute;
+ struct dpif_linux_flow flow;
+
+ ofpbuf_use_stub(&aux->request,
+ aux->request_stub, sizeof aux->request_stub);
+ aux->txn.request = &aux->request;
+
+ ofpbuf_use_stub(&aux->reply, aux->reply_stub, sizeof aux->reply_stub);
+ aux->txn.reply = NULL;
+
+ switch (op->type) {
+ case DPIF_OP_FLOW_PUT:
+ put = &op->u.flow_put;
+ dpif_linux_init_flow_put(dpif_, put, &flow);
+ if (put->stats) {
+ flow.nlmsg_flags |= NLM_F_ECHO;
+ aux->txn.reply = &aux->reply;
+ }
+ dpif_linux_flow_to_ofpbuf(&flow, &aux->request);
+ break;
+
+ case DPIF_OP_FLOW_DEL:
+ del = &op->u.flow_del;
+ dpif_linux_init_flow_del(dpif_, del, &flow);
+ if (del->stats) {
+ flow.nlmsg_flags |= NLM_F_ECHO;
+ aux->txn.reply = &aux->reply;
+ }
+ dpif_linux_flow_to_ofpbuf(&flow, &aux->request);
+ break;
+
+ case DPIF_OP_EXECUTE:
+ execute = &op->u.execute;
+ dpif_linux_encode_execute(dpif->dp_ifindex, execute,
+ &aux->request);
+ break;
+
+ default:
+ OVS_NOT_REACHED();
+ }
+ }
+
+ for (i = 0; i < n_ops; i++) {
+ txnsp[i] = &auxes[i].txn;
+ }
+ nl_transact_multiple(NETLINK_GENERIC, txnsp, n_ops);
+
+ for (i = 0; i < n_ops; i++) {
+ struct op_auxdata *aux = &auxes[i];
+ struct nl_transaction *txn = &auxes[i].txn;
+ struct dpif_op *op = ops[i];
+ struct dpif_flow_put *put;
+ struct dpif_flow_del *del;
+
+ op->error = txn->error;
+
+ switch (op->type) {
+ case DPIF_OP_FLOW_PUT:
+ put = &op->u.flow_put;
+ if (put->stats) {
+ if (!op->error) {
+ struct dpif_linux_flow reply;
+
+ op->error = dpif_linux_flow_from_ofpbuf(&reply,
+ txn->reply);
+ if (!op->error) {
+ dpif_linux_flow_get_stats(&reply, put->stats);
+ }
+ }
+
+ if (op->error) {
+ memset(put->stats, 0, sizeof *put->stats);
+ }
+ }
+ break;
+
+ case DPIF_OP_FLOW_DEL:
+ del = &op->u.flow_del;
+ if (del->stats) {
+ if (!op->error) {
+ struct dpif_linux_flow reply;
+
+ op->error = dpif_linux_flow_from_ofpbuf(&reply,
+ txn->reply);
+ if (!op->error) {
+ dpif_linux_flow_get_stats(&reply, del->stats);
+ }
+ }
+
+ if (op->error) {
+ memset(del->stats, 0, sizeof *del->stats);
+ }
+ }
+ break;
+
+ case DPIF_OP_EXECUTE:
+ break;
+
+ default:
+ OVS_NOT_REACHED();
+ }
+
+ ofpbuf_uninit(&aux->request);
+ ofpbuf_uninit(&aux->reply);
+ }
+}
+
+static void
+dpif_linux_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops)
+{
+ while (n_ops > 0) {
+ size_t chunk = MIN(n_ops, MAX_OPS);
+ dpif_linux_operate__(dpif, ops, chunk);
+ ops += chunk;
+ n_ops -= chunk;
+ }
+}
+
+/* Synchronizes 'dpif->channels' with the set of vports currently in 'dpif' in
+ * the kernel, by adding a new channel for any kernel vport that lacks one and
+ * deleting any channels that have no backing kernel vports. */
+static int
+dpif_linux_refresh_channels(struct dpif *dpif_)
+{
+ struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+ unsigned long int *keep_channels;
+ struct dpif_linux_vport vport;
+ size_t keep_channels_nbits;
+ struct nl_dump dump;
+ uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
+ struct ofpbuf buf;
+ int retval = 0;
+ size_t i;
+
+ /* To start with, we need an epoll fd. */
+ if (dpif->epoll_fd < 0) {
+ dpif->epoll_fd = epoll_create(10);
+ if (dpif->epoll_fd < 0) {
+ return errno;
+ }
+ }
+
+ keep_channels_nbits = dpif->uc_array_size;
+ keep_channels = bitmap_allocate(keep_channels_nbits);
+
+ dpif->n_events = dpif->event_offset = 0;
+
+ ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
+ dpif_linux_port_dump_start__(dpif_, &dump);
+ while (!dpif_linux_port_dump_next__(dpif_, &dump, &vport, &buf)) {
+ uint32_t port_no = odp_to_u32(vport.port_no);
+ struct nl_sock *sock = (port_no < dpif->uc_array_size
+ ? dpif->channels[port_no].sock
+ : NULL);
+ bool new_sock = !sock;
+ int error;
+
+ if (new_sock) {
+ error = nl_sock_create(NETLINK_GENERIC, &sock);
+ if (error) {
+ retval = error;
+ goto error;
+ }
+ }
+
+ /* Configure the vport to deliver misses to 'sock'. */
+ if (!vport.upcall_pid || *vport.upcall_pid != nl_sock_pid(sock)) {
+ uint32_t upcall_pid = nl_sock_pid(sock);
+ struct dpif_linux_vport vport_request;
+
+ dpif_linux_vport_init(&vport_request);
+ vport_request.cmd = OVS_VPORT_CMD_SET;
+ vport_request.dp_ifindex = dpif->dp_ifindex;
+ vport_request.port_no = vport.port_no;
+ vport_request.upcall_pid = &upcall_pid;
+ error = dpif_linux_vport_transact(&vport_request, NULL, NULL);
+ if (!error) {
+ VLOG_DBG("%s: assigning port %"PRIu32" to netlink pid %"PRIu32,
+ dpif_name(&dpif->dpif), vport_request.port_no,
+ upcall_pid);
+ } else {
+ VLOG_WARN_RL(&error_rl,
+ "%s: failed to set upcall pid on port: %s",
+ dpif_name(&dpif->dpif), ovs_strerror(error));
+
+ if (error != ENODEV && error != ENOENT) {
+ retval = error;
+ } else {
+ /* The vport isn't really there, even though the dump says
+ * it is. Probably we just hit a race after a port
+ * disappeared. */
+ }
+ goto error;
+ }
+ }
+
+ if (new_sock) {
+ error = add_channel(dpif, vport.port_no, sock);
+ if (error) {
+ VLOG_INFO("%s: could not add channel for port %s",
+ dpif_name(dpif_), vport.name);
+ retval = error;
+ goto error;
+ }
+ }
+
+ if (port_no < keep_channels_nbits) {
+ bitmap_set1(keep_channels, port_no);
+ }
+ continue;
+
+ error:
+ nl_sock_destroy(sock);
+ }
+ nl_dump_done(&dump);
+ ofpbuf_uninit(&buf);
+
+ /* Discard any saved channels that we didn't reuse. */
+ for (i = 0; i < keep_channels_nbits; i++) {
+ if (!bitmap_is_set(keep_channels, i)) {
+ nl_sock_destroy(dpif->channels[i].sock);
+ dpif->channels[i].sock = NULL;
+ }
+ }
+ free(keep_channels);
+
+ return retval;
+}
+
+static int
+dpif_linux_recv_set__(struct dpif *dpif_, bool enable)
+{
+ struct dpif_linux *dpif = dpif_linux_cast(dpif_);
+
+ if ((dpif->epoll_fd >= 0) == enable) {
+ return 0;
+ } else if (!enable) {
+ destroy_channels(dpif);