#include "dynamic-string.h"
#include "flow.h"
#include "hmap.h"
+#include "latch.h"
#include "list.h"
#include "meta-flow.h"
#include "netdev.h"
struct ovs_rwlock port_rwlock;
struct hmap ports OVS_GUARDED;
struct seq *port_seq; /* Incremented whenever a port changes. */
+
+ /* Forwarding threads. */
+ struct latch exit_latch;
+ struct dp_forwarder *forwarders;
+ size_t n_forwarders;
};
static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp,
const struct dp_netdev_actions *);
void dp_netdev_actions_unref(struct dp_netdev_actions *);
+/* A thread that receives packets from some ports, looks them up in the flow
+ * table, and executes the actions it finds. */
+struct dp_forwarder {
+ struct dp_netdev *dp;
+ pthread_t thread;
+ char *name;
+ uint32_t min_hash, max_hash;
+};
+
/* Interface to netdev-based datapath. */
struct dpif_netdev {
struct dpif dpif;
static void dp_netdev_port_input(struct dp_netdev *dp, struct ofpbuf *packet,
struct pkt_metadata *)
OVS_REQ_RDLOCK(dp->port_rwlock);
+static void dp_netdev_set_threads(struct dp_netdev *, int n);
static struct dpif_netdev *
dpif_netdev_cast(const struct dpif *dpif)
ovs_rwlock_init(&dp->port_rwlock);
hmap_init(&dp->ports);
dp->port_seq = seq_create();
+ latch_init(&dp->exit_latch);
ovs_rwlock_wrlock(&dp->port_rwlock);
error = do_add_port(dp, name, "internal", ODPP_LOCAL);
dp_netdev_free(dp);
return error;
}
+ dp_netdev_set_threads(dp, 2);
*dpp = dp;
return 0;
shash_find_and_delete(&dp_netdevs, dp->name);
+ dp_netdev_set_threads(dp, 0);
+ free(dp->forwarders);
+
dp_netdev_flow_flush(dp);
ovs_rwlock_wrlock(&dp->port_rwlock);
HMAP_FOR_EACH_SAFE (port, next, node, &dp->ports) {
hmap_destroy(&dp->ports);
atomic_flag_destroy(&dp->destroyed);
ovs_refcount_destroy(&dp->ref_cnt);
+ latch_destroy(&dp->exit_latch);
free(CONST_CAST(char *, dp->name));
free(dp);
}
{
struct dp_netdev *dp = get_dp_netdev(dpif);
- ovs_rwlock_rdlock(&dp->cls.rwlock);
+ fat_rwlock_rdlock(&dp->cls.rwlock);
stats->n_flows = hmap_count(&dp->flow_table);
- ovs_rwlock_unlock(&dp->cls.rwlock);
+ fat_rwlock_unlock(&dp->cls.rwlock);
stats->n_hit = ovsthread_counter_read(dp->n_hit);
stats->n_missed = ovsthread_counter_read(dp->n_missed);
struct dp_netdev_flow *netdev_flow, *next;
ovs_mutex_lock(&dp->flow_mutex);
- ovs_rwlock_wrlock(&dp->cls.rwlock);
+ fat_rwlock_wrlock(&dp->cls.rwlock);
HMAP_FOR_EACH_SAFE (netdev_flow, next, node, &dp->flow_table) {
dp_netdev_remove_flow(dp, netdev_flow);
}
- ovs_rwlock_unlock(&dp->cls.rwlock);
+ fat_rwlock_unlock(&dp->cls.rwlock);
ovs_mutex_unlock(&dp->flow_mutex);
}
{
struct dp_netdev_flow *netdev_flow;
- ovs_rwlock_rdlock(&dp->cls.rwlock);
+ fat_rwlock_rdlock(&dp->cls.rwlock);
netdev_flow = dp_netdev_flow_cast(classifier_lookup(&dp->cls, flow, NULL));
dp_netdev_flow_ref(netdev_flow);
- ovs_rwlock_unlock(&dp->cls.rwlock);
+ fat_rwlock_unlock(&dp->cls.rwlock);
return netdev_flow;
}
struct flow *mask)
{
if (mask_key_len) {
- if (odp_flow_key_to_mask(mask_key, mask_key_len, mask, flow)) {
+ enum odp_key_fitness fitness;
+
+ fitness = odp_flow_key_to_mask(mask_key, mask_key_len, mask, flow);
+ if (fitness) {
/* This should not happen: it indicates that
* odp_flow_key_from_mask() and odp_flow_key_to_mask()
* disagree on the acceptable form of a mask. Log the problem
ds_init(&s);
odp_flow_format(key, key_len, mask_key, mask_key_len, NULL, &s,
true);
- VLOG_ERR("internal error parsing flow mask %s", ds_cstr(&s));
+ VLOG_ERR("internal error parsing flow mask %s (%s)",
+ ds_cstr(&s), odp_key_fitness_to_string(fitness));
ds_destroy(&s);
}
return error;
}
- ovs_rwlock_rdlock(&dp->cls.rwlock);
+ fat_rwlock_rdlock(&dp->cls.rwlock);
netdev_flow = dp_netdev_find_flow(dp, &key);
- ovs_rwlock_unlock(&dp->cls.rwlock);
+ fat_rwlock_unlock(&dp->cls.rwlock);
if (netdev_flow) {
struct dp_netdev_actions *actions = NULL;
match_init(&match, flow, wc);
cls_rule_init(CONST_CAST(struct cls_rule *, &netdev_flow->cr),
&match, NETDEV_RULE_PRIORITY);
- ovs_rwlock_wrlock(&dp->cls.rwlock);
+ fat_rwlock_wrlock(&dp->cls.rwlock);
classifier_insert(&dp->cls,
CONST_CAST(struct cls_rule *, &netdev_flow->cr));
hmap_insert(&dp->flow_table,
CONST_CAST(struct hmap_node *, &netdev_flow->node),
flow_hash(flow, 0));
- ovs_rwlock_unlock(&dp->cls.rwlock);
+ fat_rwlock_unlock(&dp->cls.rwlock);
ovs_mutex_unlock(&netdev_flow->mutex);
}
ovs_mutex_lock(&dp->flow_mutex);
- ovs_rwlock_wrlock(&dp->cls.rwlock);
+ fat_rwlock_wrlock(&dp->cls.rwlock);
netdev_flow = dp_netdev_find_flow(dp, &key);
if (netdev_flow) {
if (del->stats) {
} else {
error = ENOENT;
}
- ovs_rwlock_unlock(&dp->cls.rwlock);
+ fat_rwlock_unlock(&dp->cls.rwlock);
ovs_mutex_unlock(&dp->flow_mutex);
return error;
struct dp_netdev_flow *netdev_flow;
struct hmap_node *node;
- ovs_rwlock_rdlock(&dp->cls.rwlock);
+ fat_rwlock_rdlock(&dp->cls.rwlock);
node = hmap_at_position(&dp->flow_table, &state->bucket, &state->offset);
if (node) {
netdev_flow = CONTAINER_OF(node, struct dp_netdev_flow, node);
dp_netdev_flow_ref(netdev_flow);
}
- ovs_rwlock_unlock(&dp->cls.rwlock);
+ fat_rwlock_unlock(&dp->cls.rwlock);
if (!node) {
return EOF;
}
ofpbuf_use_stack(&buf, &state->maskbuf, sizeof state->maskbuf);
minimask_expand(&netdev_flow->cr.match.mask, &wc);
odp_flow_key_from_mask(&buf, &wc.masks, &netdev_flow->flow,
- odp_to_u32(wc.masks.in_port.odp_port));
+ odp_to_u32(wc.masks.in_port.odp_port),
+ SIZE_MAX);
*mask = buf.data;
*mask_len = buf.size;
}
}
\f
+static void *
+dp_forwarder_main(void *f_)
+{
+ struct dp_forwarder *f = f_;
+ struct dp_netdev *dp = f->dp;
+ struct ofpbuf packet;
+
+ f->name = xasprintf("forwarder_%u", ovsthread_id_self());
+ set_subprogram_name("%s", f->name);
+
+ ofpbuf_init(&packet, 0);
+ while (!latch_is_set(&dp->exit_latch)) {
+ bool received_anything;
+ int i;
+
+ ovs_rwlock_rdlock(&dp->port_rwlock);
+ for (i = 0; i < 50; i++) {
+ struct dp_netdev_port *port;
+
+ received_anything = false;
+ HMAP_FOR_EACH (port, node, &f->dp->ports) {
+ if (port->rx
+ && port->node.hash >= f->min_hash
+ && port->node.hash <= f->max_hash) {
+ int buf_size;
+ int error;
+ int mtu;
+
+ if (netdev_get_mtu(port->netdev, &mtu)) {
+ mtu = ETH_PAYLOAD_MAX;
+ }
+ buf_size = DP_NETDEV_HEADROOM + VLAN_ETH_HEADER_LEN + mtu;
+
+ ofpbuf_clear(&packet);
+ ofpbuf_reserve_with_tailroom(&packet, DP_NETDEV_HEADROOM,
+ buf_size);
+
+ error = netdev_rx_recv(port->rx, &packet);
+ if (!error) {
+ struct pkt_metadata md
+ = PKT_METADATA_INITIALIZER(port->port_no);
+ dp_netdev_port_input(dp, &packet, &md);
+
+ received_anything = true;
+ } else if (error != EAGAIN && error != EOPNOTSUPP) {
+ static struct vlog_rate_limit rl
+ = VLOG_RATE_LIMIT_INIT(1, 5);
+
+ VLOG_ERR_RL(&rl, "error receiving data from %s: %s",
+ netdev_get_name(port->netdev),
+ ovs_strerror(error));
+ }
+ }
+ }
+
+ if (!received_anything) {
+ break;
+ }
+ }
+
+ if (received_anything) {
+ poll_immediate_wake();
+ } else {
+ struct dp_netdev_port *port;
+
+ HMAP_FOR_EACH (port, node, &f->dp->ports)
+ if (port->rx
+ && port->node.hash >= f->min_hash
+ && port->node.hash <= f->max_hash) {
+ netdev_rx_wait(port->rx);
+ }
+ seq_wait(dp->port_seq, seq_read(dp->port_seq));
+ latch_wait(&dp->exit_latch);
+ }
+ ovs_rwlock_unlock(&dp->port_rwlock);
+
+ poll_block();
+ }
+ ofpbuf_uninit(&packet);
+
+ free(f->name);
+
+ return NULL;
+}
+
+static void
+dp_netdev_set_threads(struct dp_netdev *dp, int n)
+{
+ int i;
+
+ if (n == dp->n_forwarders) {
+ return;
+ }
+
+ /* Stop existing threads. */
+ latch_set(&dp->exit_latch);
+ for (i = 0; i < dp->n_forwarders; i++) {
+ struct dp_forwarder *f = &dp->forwarders[i];
+
+ xpthread_join(f->thread, NULL);
+ }
+ latch_poll(&dp->exit_latch);
+ free(dp->forwarders);
+
+ /* Start new threads. */
+ dp->forwarders = xmalloc(n * sizeof *dp->forwarders);
+ dp->n_forwarders = n;
+ for (i = 0; i < n; i++) {
+ struct dp_forwarder *f = &dp->forwarders[i];
+
+ f->dp = dp;
+ f->min_hash = UINT32_MAX / n * i;
+ f->max_hash = UINT32_MAX / n * (i + 1) - 1;
+ if (i == n - 1) {
+ f->max_hash = UINT32_MAX;
+ }
+ xpthread_create(&f->thread, NULL, dp_forwarder_main, f);
+ }
+}
+\f
static void
dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow,
const struct ofpbuf *packet)
}
}
-static void
-dpif_netdev_run(struct dpif *dpif)
-{
- struct dp_netdev_port *port;
- struct dp_netdev *dp;
- struct ofpbuf packet;
-
- dp = get_dp_netdev(dpif);
- ofpbuf_init(&packet, 0);
-
- ovs_rwlock_rdlock(&dp->port_rwlock);
- HMAP_FOR_EACH (port, node, &dp->ports) {
- int buf_size;
- int error;
- int mtu;
-
- error = netdev_get_mtu(port->netdev, &mtu);
- if (error) {
- mtu = ETH_PAYLOAD_MAX;
- }
- buf_size = DP_NETDEV_HEADROOM + VLAN_ETH_HEADER_LEN + mtu;
-
- ofpbuf_clear(&packet);
- ofpbuf_reserve_with_tailroom(&packet, DP_NETDEV_HEADROOM, buf_size);
-
- error = port->rx ? netdev_rx_recv(port->rx, &packet) : EOPNOTSUPP;
- if (!error) {
- struct pkt_metadata md = PKT_METADATA_INITIALIZER(port->port_no);
- dp_netdev_port_input(dp, &packet, &md);
- } else if (error != EAGAIN && error != EOPNOTSUPP) {
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
-
- VLOG_ERR_RL(&rl, "error receiving data from %s: %s",
- netdev_get_name(port->netdev), ovs_strerror(error));
- }
- }
- ovs_rwlock_unlock(&dp->port_rwlock);
-
- ofpbuf_uninit(&packet);
-}
-
-static void
-dpif_netdev_wait(struct dpif *dpif)
-{
- struct dp_netdev_port *port;
-
- /* There is a race here, if thread A calls dpif_netdev_wait(dpif) and
- * thread B calls dpif_port_add(dpif) or dpif_port_remove(dpif) before
- * A makes it to poll_block().
- *
- * But I think it doesn't matter:
- *
- * - In the dpif_port_add() case, A will not wake up when a packet
- * arrives on the new port, but this would also happen if the
- * ordering were reversed.
- *
- * - In the dpif_port_remove() case, A might wake up spuriously, but
- * that is harmless. */
-
- ovs_mutex_lock(&dp_netdev_mutex);
- HMAP_FOR_EACH (port, node, &get_dp_netdev(dpif)->ports) {
- if (port->rx) {
- netdev_rx_wait(port->rx);
- }
- }
- ovs_mutex_unlock(&dp_netdev_mutex);
-}
-
static int
dp_netdev_output_userspace(struct dp_netdev *dp, struct ofpbuf *packet,
int queue_no, const struct flow *flow,
dpif_netdev_open,
dpif_netdev_close,
dpif_netdev_destroy,
- dpif_netdev_run,
- dpif_netdev_wait,
+ NULL, /* run */
+ NULL, /* wait */
dpif_netdev_get_stats,
dpif_netdev_port_add,
dpif_netdev_port_del,