#include "list.h"
#include "meta-flow.h"
#include "netdev.h"
+#include "netdev-dpdk.h"
#include "netdev-vport.h"
#include "netlink.h"
#include "odp-execute.h"
/* By default, choose a priority in the middle. */
#define NETDEV_RULE_PRIORITY 0x8000
+#define NR_THREADS 1
+/* Use per thread recirc_depth to prevent recirculation loop. */
+#define MAX_RECIRC_DEPTH 5
+DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth, 0)
+
/* Configuration parameters. */
enum { MAX_FLOWS = 65536 }; /* Maximum number of flows in flow table. */
/* Forwarding threads. */
struct latch exit_latch;
- struct dp_forwarder *forwarders;
- size_t n_forwarders;
+ struct pmd_thread *pmd_threads;
+ size_t n_pmd_threads;
+ int pmd_count;
};
static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp,
odp_port_t port_no;
struct netdev *netdev;
struct netdev_saved_flags *sf;
- struct netdev_rx *rx;
+ struct netdev_rxq **rxq;
struct ovs_refcount ref_cnt;
char *type; /* Port type as requested by user. */
};
const struct dp_netdev_flow *);
static void dp_netdev_actions_free(struct dp_netdev_actions *);
-/* A thread that receives packets from some ports, looks them up in the flow
- * table, and executes the actions it finds. */
-struct dp_forwarder {
+/* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate
+ * the performance overhead of interrupt processing. Therefore netdev can
+ * not implement rx-wait for these devices. dpif-netdev needs to poll
+ * these device to check for recv buffer. pmd-thread does polling for
+ * devices assigned to itself thread.
+ *
+ * DPDK used PMD for accessing NIC.
+ *
+ * A thread that receives packets from PMD ports, looks them up in the flow
+ * table, and executes the actions it finds.
+ **/
+struct pmd_thread {
struct dp_netdev *dp;
pthread_t thread;
+ int id;
+ atomic_uint change_seq;
char *name;
- uint32_t min_hash, max_hash;
};
/* Interface to netdev-based datapath. */
bool create, struct dpif **);
static int dp_netdev_output_userspace(struct dp_netdev *dp, struct ofpbuf *,
int queue_no, int type,
- const struct flow *,
- const struct nlattr *userdata)
- OVS_EXCLUDED(dp->queue_rwlock);
+ const struct miniflow *,
+ const struct nlattr *userdata);
static void dp_netdev_execute_actions(struct dp_netdev *dp,
- const struct flow *, struct ofpbuf *, bool may_steal,
+ const struct miniflow *,
+ struct ofpbuf *, bool may_steal,
struct pkt_metadata *,
const struct nlattr *actions,
- size_t actions_len)
- OVS_REQ_RDLOCK(dp->port_rwlock);
+ size_t actions_len);
static void dp_netdev_port_input(struct dp_netdev *dp, struct ofpbuf *packet,
- struct pkt_metadata *)
- OVS_REQ_RDLOCK(dp->port_rwlock);
-static void dp_netdev_set_threads(struct dp_netdev *, int n);
+ struct pkt_metadata *);
+
+static void dp_netdev_set_pmd_threads(struct dp_netdev *, int n);
static struct dpif_netdev *
dpif_netdev_cast(const struct dpif *dpif)
dp_netdev_free(dp);
return error;
}
- dp_netdev_set_threads(dp, 2);
*dpp = dp;
return 0;
shash_find_and_delete(&dp_netdevs, dp->name);
- dp_netdev_set_threads(dp, 0);
- free(dp->forwarders);
+ dp_netdev_set_pmd_threads(dp, 0);
+ free(dp->pmd_threads);
dp_netdev_flow_flush(dp);
ovs_rwlock_wrlock(&dp->port_rwlock);
return 0;
}
+static void
+dp_netdev_reload_pmd_threads(struct dp_netdev *dp)
+{
+ int i;
+
+ for (i = 0; i < dp->n_pmd_threads; i++) {
+ struct pmd_thread *f = &dp->pmd_threads[i];
+ int id;
+
+ atomic_add(&f->change_seq, 1, &id);
+ }
+}
+
static int
do_add_port(struct dp_netdev *dp, const char *devname, const char *type,
odp_port_t port_no)
struct netdev_saved_flags *sf;
struct dp_netdev_port *port;
struct netdev *netdev;
- struct netdev_rx *rx;
enum netdev_flags flags;
const char *open_type;
int error;
+ int i;
/* XXX reject devices already in some dp_netdev. */
return EINVAL;
}
- error = netdev_rx_open(netdev, &rx);
- if (error
- && !(error == EOPNOTSUPP && dpif_netdev_class_is_dummy(dp->class))) {
- VLOG_ERR("%s: cannot receive packets on this network device (%s)",
- devname, ovs_strerror(errno));
- netdev_close(netdev);
- return error;
+ port = xzalloc(sizeof *port);
+ port->port_no = port_no;
+ port->netdev = netdev;
+ port->rxq = xmalloc(sizeof *port->rxq * netdev_n_rxq(netdev));
+ port->type = xstrdup(type);
+ for (i = 0; i < netdev_n_rxq(netdev); i++) {
+ error = netdev_rxq_open(netdev, &port->rxq[i], i);
+ if (error
+ && !(error == EOPNOTSUPP && dpif_netdev_class_is_dummy(dp->class))) {
+ VLOG_ERR("%s: cannot receive packets on this network device (%s)",
+ devname, ovs_strerror(errno));
+ netdev_close(netdev);
+ return error;
+ }
}
error = netdev_turn_flags_on(netdev, NETDEV_PROMISC, &sf);
if (error) {
- netdev_rx_close(rx);
+ for (i = 0; i < netdev_n_rxq(netdev); i++) {
+ netdev_rxq_close(port->rxq[i]);
+ }
netdev_close(netdev);
+ free(port->rxq);
+ free(port);
return error;
}
-
- port = xmalloc(sizeof *port);
- port->port_no = port_no;
- port->netdev = netdev;
port->sf = sf;
- port->rx = rx;
- port->type = xstrdup(type);
+
+ if (netdev_is_pmd(netdev)) {
+ dp->pmd_count++;
+ dp_netdev_set_pmd_threads(dp, NR_THREADS);
+ dp_netdev_reload_pmd_threads(dp);
+ }
+ ovs_refcount_init(&port->ref_cnt);
hmap_insert(&dp->ports, &port->node, hash_int(odp_to_u32(port_no), 0));
seq_change(dp->port_seq);
- ovs_refcount_init(&port->ref_cnt);
return 0;
}
port_unref(struct dp_netdev_port *port)
{
if (port && ovs_refcount_unref(&port->ref_cnt) == 1) {
+ int i;
+
netdev_close(port->netdev);
netdev_restore_flags(port->sf);
- netdev_rx_close(port->rx);
+
+ for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
+ netdev_rxq_close(port->rxq[i]);
+ }
free(port->type);
free(port);
}
hmap_remove(&dp->ports, &port->node);
seq_change(dp->port_seq);
+ if (netdev_is_pmd(port->netdev)) {
+ dp_netdev_reload_pmd_threads(dp);
+ }
port_unref(port);
return 0;
}
static struct dp_netdev_flow *
-dp_netdev_lookup_flow(const struct dp_netdev *dp, const struct flow *flow)
+dp_netdev_lookup_flow(const struct dp_netdev *dp, const struct miniflow *key)
OVS_EXCLUDED(dp->cls.rwlock)
{
struct dp_netdev_flow *netdev_flow;
+ struct cls_rule *rule;
fat_rwlock_rdlock(&dp->cls.rwlock);
- netdev_flow = dp_netdev_flow_cast(classifier_lookup(&dp->cls, flow, NULL));
+ rule = classifier_lookup_miniflow_first(&dp->cls, key);
+ netdev_flow = dp_netdev_flow_cast(rule);
fat_rwlock_unlock(&dp->cls.rwlock);
return netdev_flow;
return EINVAL;
}
- /* Force unwildcard the in_port. */
- mask->in_port.odp_port = u32_to_odp(UINT32_MAX);
} else {
enum mf_field_id id;
/* No mask key, unwildcard everything except fields whose
}
}
+ /* Force unwildcard the in_port.
+ *
+ * We need to do this even in the case where we unwildcard "everything"
+ * above because "everything" only includes the 16-bit OpenFlow port number
+ * mask->in_port.ofp_port, which only covers half of the 32-bit datapath
+ * port number mask->in_port.odp_port. */
+ mask->in_port.odp_port = u32_to_odp(UINT32_MAX);
+
return 0;
}
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_flow *netdev_flow;
struct flow flow;
+ struct miniflow miniflow;
struct flow_wildcards wc;
int error;
if (error) {
return error;
}
+ miniflow_init(&miniflow, &flow);
ovs_mutex_lock(&dp->flow_mutex);
- netdev_flow = dp_netdev_lookup_flow(dp, &flow);
+ netdev_flow = dp_netdev_lookup_flow(dp, &miniflow);
if (!netdev_flow) {
if (put->flags & DPIF_FP_CREATE) {
if (hmap_count(&dp->flow_table) < MAX_FLOWS) {
struct dp_netdev_flow_state *state = state_;
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_flow *netdev_flow;
+ struct flow_wildcards wc;
int error;
ovs_mutex_lock(&iter->mutex);
return error;
}
+ minimask_expand(&netdev_flow->cr.match.mask, &wc);
+
if (key) {
struct ofpbuf buf;
ofpbuf_use_stack(&buf, &state->keybuf, sizeof state->keybuf);
- odp_flow_key_from_flow(&buf, &netdev_flow->flow,
+ odp_flow_key_from_flow(&buf, &netdev_flow->flow, &wc.masks,
netdev_flow->flow.in_port.odp_port);
- *key = buf.data;
- *key_len = buf.size;
+ *key = ofpbuf_data(&buf);
+ *key_len = ofpbuf_size(&buf);
}
if (key && mask) {
struct ofpbuf buf;
- struct flow_wildcards wc;
ofpbuf_use_stack(&buf, &state->maskbuf, sizeof state->maskbuf);
- minimask_expand(&netdev_flow->cr.match.mask, &wc);
odp_flow_key_from_mask(&buf, &wc.masks, &netdev_flow->flow,
odp_to_u32(wc.masks.in_port.odp_port),
SIZE_MAX);
- *mask = buf.data;
- *mask_len = buf.size;
+ *mask = ofpbuf_data(&buf);
+ *mask_len = ofpbuf_size(&buf);
}
if (actions || stats) {
{
struct dp_netdev *dp = get_dp_netdev(dpif);
struct pkt_metadata *md = &execute->md;
- struct flow key;
+ struct miniflow key;
+ uint32_t buf[FLOW_U32S];
- if (execute->packet->size < ETH_HEADER_LEN ||
- execute->packet->size > UINT16_MAX) {
+ if (ofpbuf_size(execute->packet) < ETH_HEADER_LEN ||
+ ofpbuf_size(execute->packet) > UINT16_MAX) {
return EINVAL;
}
/* Extract flow key. */
- flow_extract(execute->packet, md, &key);
+ miniflow_initialize(&key, buf);
+ miniflow_extract(execute->packet, md, &key);
ovs_rwlock_rdlock(&dp->port_rwlock);
dp_netdev_execute_actions(dp, &key, execute->packet, false, md,
free(actions);
}
\f
+
+static void
+dp_netdev_process_rxq_port(struct dp_netdev *dp,
+ struct dp_netdev_port *port,
+ struct netdev_rxq *rxq)
+{
+ struct ofpbuf *packet[NETDEV_MAX_RX_BATCH];
+ int error, c;
+
+ error = netdev_rxq_recv(rxq, packet, &c);
+ if (!error) {
+ struct pkt_metadata md = PKT_METADATA_INITIALIZER(port->port_no);
+ int i;
+
+ for (i = 0; i < c; i++) {
+ dp_netdev_port_input(dp, packet[i], &md);
+ }
+ } else if (error != EAGAIN && error != EOPNOTSUPP) {
+ static struct vlog_rate_limit rl
+ = VLOG_RATE_LIMIT_INIT(1, 5);
+
+ VLOG_ERR_RL(&rl, "error receiving data from %s: %s",
+ netdev_get_name(port->netdev),
+ ovs_strerror(error));
+ }
+}
+
+static void
+dpif_netdev_run(struct dpif *dpif)
+{
+ struct dp_netdev_port *port;
+ struct dp_netdev *dp = get_dp_netdev(dpif);
+
+ ovs_rwlock_rdlock(&dp->port_rwlock);
+
+ HMAP_FOR_EACH (port, node, &dp->ports) {
+ if (!netdev_is_pmd(port->netdev)) {
+ int i;
+
+ for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
+ dp_netdev_process_rxq_port(dp, port, port->rxq[i]);
+ }
+ }
+ }
+
+ ovs_rwlock_unlock(&dp->port_rwlock);
+}
+
+static void
+dpif_netdev_wait(struct dpif *dpif)
+{
+ struct dp_netdev_port *port;
+ struct dp_netdev *dp = get_dp_netdev(dpif);
+
+ ovs_rwlock_rdlock(&dp->port_rwlock);
+
+ HMAP_FOR_EACH (port, node, &dp->ports) {
+ if (!netdev_is_pmd(port->netdev)) {
+ int i;
+
+ for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
+ netdev_rxq_wait(port->rxq[i]);
+ }
+ }
+ }
+ ovs_rwlock_unlock(&dp->port_rwlock);
+}
+
+struct rxq_poll {
+ struct dp_netdev_port *port;
+ struct netdev_rxq *rx;
+};
+
+static int
+pmd_load_queues(struct pmd_thread *f,
+ struct rxq_poll **ppoll_list, int poll_cnt)
+{
+ struct dp_netdev *dp = f->dp;
+ struct rxq_poll *poll_list = *ppoll_list;
+ struct dp_netdev_port *port;
+ int id = f->id;
+ int index;
+ int i;
+
+ /* Simple scheduler for netdev rx polling. */
+ ovs_rwlock_rdlock(&dp->port_rwlock);
+ for (i = 0; i < poll_cnt; i++) {
+ port_unref(poll_list[i].port);
+ }
+
+ poll_cnt = 0;
+ index = 0;
+
+ HMAP_FOR_EACH (port, node, &f->dp->ports) {
+ if (netdev_is_pmd(port->netdev)) {
+ int i;
+
+ for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
+ if ((index % dp->n_pmd_threads) == id) {
+ poll_list = xrealloc(poll_list, sizeof *poll_list * (poll_cnt + 1));
+
+ port_ref(port);
+ poll_list[poll_cnt].port = port;
+ poll_list[poll_cnt].rx = port->rxq[i];
+ poll_cnt++;
+ }
+ index++;
+ }
+ }
+ }
+
+ ovs_rwlock_unlock(&dp->port_rwlock);
+ *ppoll_list = poll_list;
+ return poll_cnt;
+}
+
static void *
-dp_forwarder_main(void *f_)
+pmd_thread_main(void *f_)
{
- struct dp_forwarder *f = f_;
+ struct pmd_thread *f = f_;
struct dp_netdev *dp = f->dp;
+ unsigned int lc = 0;
+ struct rxq_poll *poll_list;
+ unsigned int port_seq;
+ int poll_cnt;
+ int i;
- f->name = xasprintf("forwarder_%u", ovsthread_id_self());
+ f->name = xasprintf("pmd_%u", ovsthread_id_self());
set_subprogram_name("%s", f->name);
+ poll_cnt = 0;
+ poll_list = NULL;
- while (!latch_is_set(&dp->exit_latch)) {
- bool received_anything;
+ pmd_thread_setaffinity_cpu(f->id);
+reload:
+ poll_cnt = pmd_load_queues(f, &poll_list, poll_cnt);
+ atomic_read(&f->change_seq, &port_seq);
+
+ for (;;) {
+ unsigned int c_port_seq;
int i;
- ovs_rwlock_rdlock(&dp->port_rwlock);
- for (i = 0; i < 50; i++) {
- struct dp_netdev_port *port;
-
- received_anything = false;
- HMAP_FOR_EACH (port, node, &f->dp->ports) {
- if (port->rx
- && port->node.hash >= f->min_hash
- && port->node.hash <= f->max_hash) {
- struct ofpbuf *packets[NETDEV_MAX_RX_BATCH];
- int count;
- int error;
-
- error = netdev_rx_recv(port->rx, packets, &count);
- if (!error) {
- int i;
- struct pkt_metadata md
- = PKT_METADATA_INITIALIZER(port->port_no);
-
- for (i = 0; i < count; i++) {
- dp_netdev_port_input(dp, packets[i], &md);
- }
- received_anything = true;
- } else if (error != EAGAIN && error != EOPNOTSUPP) {
- static struct vlog_rate_limit rl
- = VLOG_RATE_LIMIT_INIT(1, 5);
-
- VLOG_ERR_RL(&rl, "error receiving data from %s: %s",
- netdev_get_name(port->netdev),
- ovs_strerror(error));
- }
- }
- }
+ for (i = 0; i < poll_cnt; i++) {
+ dp_netdev_process_rxq_port(dp, poll_list[i].port, poll_list[i].rx);
+ }
+
+ if (lc++ > 1024) {
+ ovsrcu_quiesce();
- if (!received_anything) {
+ /* TODO: need completely userspace based signaling method.
+ * to keep this thread entirely in userspace.
+ * For now using atomic counter. */
+ lc = 0;
+ atomic_read_explicit(&f->change_seq, &c_port_seq, memory_order_consume);
+ if (c_port_seq != port_seq) {
break;
}
}
+ }
- if (received_anything) {
- poll_immediate_wake();
- } else {
- struct dp_netdev_port *port;
-
- HMAP_FOR_EACH (port, node, &f->dp->ports)
- if (port->rx
- && port->node.hash >= f->min_hash
- && port->node.hash <= f->max_hash) {
- netdev_rx_wait(port->rx);
- }
- seq_wait(dp->port_seq, seq_read(dp->port_seq));
- latch_wait(&dp->exit_latch);
- }
- ovs_rwlock_unlock(&dp->port_rwlock);
+ if (!latch_is_set(&f->dp->exit_latch)){
+ goto reload;
+ }
- poll_block();
+ for (i = 0; i < poll_cnt; i++) {
+ port_unref(poll_list[i].port);
}
+ free(poll_list);
free(f->name);
-
return NULL;
}
static void
-dp_netdev_set_threads(struct dp_netdev *dp, int n)
+dp_netdev_set_pmd_threads(struct dp_netdev *dp, int n)
{
int i;
- if (n == dp->n_forwarders) {
+ if (n == dp->n_pmd_threads) {
return;
}
/* Stop existing threads. */
latch_set(&dp->exit_latch);
- for (i = 0; i < dp->n_forwarders; i++) {
- struct dp_forwarder *f = &dp->forwarders[i];
+ dp_netdev_reload_pmd_threads(dp);
+ for (i = 0; i < dp->n_pmd_threads; i++) {
+ struct pmd_thread *f = &dp->pmd_threads[i];
xpthread_join(f->thread, NULL);
}
latch_poll(&dp->exit_latch);
- free(dp->forwarders);
+ free(dp->pmd_threads);
/* Start new threads. */
- dp->forwarders = xmalloc(n * sizeof *dp->forwarders);
- dp->n_forwarders = n;
+ dp->pmd_threads = xmalloc(n * sizeof *dp->pmd_threads);
+ dp->n_pmd_threads = n;
+
for (i = 0; i < n; i++) {
- struct dp_forwarder *f = &dp->forwarders[i];
+ struct pmd_thread *f = &dp->pmd_threads[i];
f->dp = dp;
- f->min_hash = UINT32_MAX / n * i;
- f->max_hash = UINT32_MAX / n * (i + 1) - 1;
- if (i == n - 1) {
- f->max_hash = UINT32_MAX;
- }
- xpthread_create(&f->thread, NULL, dp_forwarder_main, f);
+ f->id = i;
+ atomic_store(&f->change_seq, 1);
+
+ /* Each thread will distribute all devices rx-queues among
+ * themselves. */
+ xpthread_create(&f->thread, NULL, pmd_thread_main, f);
}
}
+
\f
static void *
dp_netdev_flow_stats_new_cb(void)
static void
dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow,
const struct ofpbuf *packet,
- const struct flow *key)
+ const struct miniflow *key)
{
- uint16_t tcp_flags = ntohs(key->tcp_flags);
+ uint16_t tcp_flags = miniflow_get_tcp_flags(key);
long long int now = time_msec();
struct dp_netdev_flow_stats *bucket;
ovs_mutex_lock(&bucket->mutex);
bucket->used = MAX(now, bucket->used);
bucket->packet_count++;
- bucket->byte_count += packet->size;
+ bucket->byte_count += ofpbuf_size(packet);
bucket->tcp_flags |= tcp_flags;
ovs_mutex_unlock(&bucket->mutex);
}
}
static void
-dp_netdev_port_input(struct dp_netdev *dp, struct ofpbuf *packet,
- struct pkt_metadata *md)
+dp_netdev_input(struct dp_netdev *dp, struct ofpbuf *packet,
+ struct pkt_metadata *md)
OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_flow *netdev_flow;
- struct flow key;
+ struct miniflow key;
+ uint32_t buf[FLOW_U32S];
- if (packet->size < ETH_HEADER_LEN) {
+ if (ofpbuf_size(packet) < ETH_HEADER_LEN) {
ofpbuf_delete(packet);
return;
}
- flow_extract(packet, md, &key);
+ miniflow_initialize(&key, buf);
+ miniflow_extract(packet, md, &key);
+
netdev_flow = dp_netdev_lookup_flow(dp, &key);
if (netdev_flow) {
struct dp_netdev_actions *actions;
} else if (dp->handler_queues) {
dp_netdev_count_packet(dp, DP_STAT_MISS);
dp_netdev_output_userspace(dp, packet,
- flow_hash_5tuple(&key, 0) % dp->n_handlers,
+ miniflow_hash_5tuple(&key, 0)
+ % dp->n_handlers,
DPIF_UC_MISS, &key, NULL);
ofpbuf_delete(packet);
}
}
+static void
+dp_netdev_port_input(struct dp_netdev *dp, struct ofpbuf *packet,
+ struct pkt_metadata *md)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
+{
+ uint32_t *recirc_depth = recirc_depth_get();
+
+ *recirc_depth = 0;
+ dp_netdev_input(dp, packet, md);
+}
+
static int
dp_netdev_output_userspace(struct dp_netdev *dp, struct ofpbuf *packet,
- int queue_no, int type, const struct flow *flow,
+ int queue_no, int type, const struct miniflow *key,
const struct nlattr *userdata)
- OVS_EXCLUDED(dp->queue_rwlock)
{
struct dp_netdev_queue *q;
int error;
struct dpif_upcall *upcall = &u->upcall;
struct ofpbuf *buf = &u->buf;
size_t buf_size;
+ struct flow flow;
upcall->type = type;
if (userdata) {
buf_size += NLA_ALIGN(userdata->nla_len);
}
- buf_size += packet->size;
+ buf_size += ofpbuf_size(packet);
ofpbuf_init(buf, buf_size);
/* Put ODP flow. */
- odp_flow_key_from_flow(buf, flow, flow->in_port.odp_port);
- upcall->key = buf->data;
- upcall->key_len = buf->size;
+ miniflow_expand(key, &flow);
+ odp_flow_key_from_flow(buf, &flow, NULL, flow.in_port.odp_port);
+ upcall->key = ofpbuf_data(buf);
+ upcall->key_len = ofpbuf_size(buf);
/* Put userdata. */
if (userdata) {
NLA_ALIGN(userdata->nla_len));
}
- upcall->packet.data = ofpbuf_put(buf, packet->data, packet->size);
- upcall->packet.size = packet->size;
+ ofpbuf_set_data(&upcall->packet,
+ ofpbuf_put(buf, ofpbuf_data(packet), ofpbuf_size(packet)));
+ ofpbuf_set_size(&upcall->packet, ofpbuf_size(packet));
seq_change(q->seq);
struct dp_netdev_execute_aux {
struct dp_netdev *dp;
- const struct flow *key;
+ const struct miniflow *key;
};
static void
dp_execute_cb(void *aux_, struct ofpbuf *packet,
- const struct pkt_metadata *md OVS_UNUSED,
+ struct pkt_metadata *md,
const struct nlattr *a, bool may_steal)
OVS_NO_THREAD_SAFETY_ANALYSIS
{
struct dp_netdev_execute_aux *aux = aux_;
int type = nl_attr_type(a);
struct dp_netdev_port *p;
+ uint32_t *depth = recirc_depth_get();
switch ((enum ovs_action_attr)type) {
case OVS_ACTION_ATTR_OUTPUT:
userdata = nl_attr_find_nested(a, OVS_USERSPACE_ATTR_USERDATA);
dp_netdev_output_userspace(aux->dp, packet,
- flow_hash_5tuple(aux->key, 0)
+ miniflow_hash_5tuple(aux->key, 0)
% aux->dp->n_handlers,
DPIF_UC_ACTION, aux->key,
userdata);
}
break;
}
+
+ case OVS_ACTION_ATTR_HASH: {
+ const struct ovs_action_hash *hash_act;
+ uint32_t hash;
+
+ hash_act = nl_attr_get(a);
+ if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
+ /* Hash need not be symmetric, nor does it need to include
+ * L2 fields. */
+ hash = miniflow_hash_5tuple(aux->key, hash_act->hash_basis);
+ if (!hash) {
+ hash = 1; /* 0 is not valid */
+ }
+
+ } else {
+ VLOG_WARN("Unknown hash algorithm specified for the hash action.");
+ hash = 2;
+ }
+
+ md->dp_hash = hash;
+ break;
+ }
+
+ case OVS_ACTION_ATTR_RECIRC:
+ if (*depth < MAX_RECIRC_DEPTH) {
+ struct pkt_metadata recirc_md = *md;
+ struct ofpbuf *recirc_packet;
+
+ recirc_packet = may_steal ? packet : ofpbuf_clone(packet);
+ recirc_md.recirc_id = nl_attr_get_u32(a);
+
+ (*depth)++;
+ dp_netdev_input(aux->dp, recirc_packet, &recirc_md);
+ (*depth)--;
+
+ break;
+ } else {
+ VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
+ }
+ break;
+
case OVS_ACTION_ATTR_PUSH_VLAN:
case OVS_ACTION_ATTR_POP_VLAN:
case OVS_ACTION_ATTR_PUSH_MPLS:
case __OVS_ACTION_ATTR_MAX:
OVS_NOT_REACHED();
}
-
}
static void
-dp_netdev_execute_actions(struct dp_netdev *dp, const struct flow *key,
+dp_netdev_execute_actions(struct dp_netdev *dp, const struct miniflow *key,
struct ofpbuf *packet, bool may_steal,
struct pkt_metadata *md,
const struct nlattr *actions, size_t actions_len)
- OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_execute_aux aux = {dp, key};
dpif_netdev_open,
dpif_netdev_close,
dpif_netdev_destroy,
- NULL, /* run */
- NULL, /* wait */
+ dpif_netdev_run,
+ dpif_netdev_wait,
dpif_netdev_get_stats,
dpif_netdev_port_add,
dpif_netdev_port_del,