pthread_t thread;
int id;
atomic_uint change_seq;
- char *name;
};
/* Interface to netdev-based datapath. */
bool create, struct dpif **);
static int dp_netdev_output_userspace(struct dp_netdev *dp, struct ofpbuf *,
int queue_no, int type,
- const struct flow *,
+ const struct miniflow *,
const struct nlattr *userdata);
static void dp_netdev_execute_actions(struct dp_netdev *dp,
- const struct flow *, struct ofpbuf *, bool may_steal,
+ const struct miniflow *,
+ struct ofpbuf *, bool may_steal,
struct pkt_metadata *,
const struct nlattr *actions,
size_t actions_len);
return class != &dpif_netdev_class;
}
+static bool
+dpif_netdev_class_is_planetlab(const struct dpif_class *class)
+{
+ return class == &dpif_planetlab_class;
+}
+
static const char *
dpif_netdev_port_open_type(const struct dpif_class *class, const char *type)
{
return strcmp(type, "internal") ? type
+ : dpif_netdev_class_is_planetlab(class) ? "pltap"
: dpif_netdev_class_is_dummy(class) ? "dummy"
: "tap";
}
{
uint32_t port_no;
- if (dp->class != &dpif_netdev_class) {
+ if (dp->class != &dpif_netdev_class &&
+ dp->class != &dpif_planetlab_class) {
const char *p;
int start_no = 0;
}
static struct dp_netdev_flow *
- dp_netdev_lookup_flow(const struct dp_netdev *dp, const struct flow *flow)
+ dp_netdev_lookup_flow(const struct dp_netdev *dp, const struct miniflow *key)
OVS_EXCLUDED(dp->cls.rwlock)
{
struct dp_netdev_flow *netdev_flow;
+ struct cls_rule *rule;
fat_rwlock_rdlock(&dp->cls.rwlock);
- netdev_flow = dp_netdev_flow_cast(classifier_lookup(&dp->cls, flow, NULL));
+ rule = classifier_lookup_miniflow_first(&dp->cls, key);
+ netdev_flow = dp_netdev_flow_cast(rule);
fat_rwlock_unlock(&dp->cls.rwlock);
return netdev_flow;
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_flow *netdev_flow;
struct flow flow;
+ struct miniflow miniflow;
struct flow_wildcards wc;
int error;
if (error) {
return error;
}
+ miniflow_init(&miniflow, &flow);
ovs_mutex_lock(&dp->flow_mutex);
- netdev_flow = dp_netdev_lookup_flow(dp, &flow);
+ netdev_flow = dp_netdev_lookup_flow(dp, &miniflow);
if (!netdev_flow) {
if (put->flags & DPIF_FP_CREATE) {
if (hmap_count(&dp->flow_table) < MAX_FLOWS) {
struct dp_netdev_flow_state *state = state_;
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_flow *netdev_flow;
+ struct flow_wildcards wc;
int error;
ovs_mutex_lock(&iter->mutex);
return error;
}
+ minimask_expand(&netdev_flow->cr.match.mask, &wc);
+
if (key) {
struct ofpbuf buf;
ofpbuf_use_stack(&buf, &state->keybuf, sizeof state->keybuf);
- odp_flow_key_from_flow(&buf, &netdev_flow->flow,
+ odp_flow_key_from_flow(&buf, &netdev_flow->flow, &wc.masks,
netdev_flow->flow.in_port.odp_port);
*key = ofpbuf_data(&buf);
if (key && mask) {
struct ofpbuf buf;
- struct flow_wildcards wc;
ofpbuf_use_stack(&buf, &state->maskbuf, sizeof state->maskbuf);
- minimask_expand(&netdev_flow->cr.match.mask, &wc);
odp_flow_key_from_mask(&buf, &wc.masks, &netdev_flow->flow,
odp_to_u32(wc.masks.in_port.odp_port),
SIZE_MAX);
{
struct dp_netdev *dp = get_dp_netdev(dpif);
struct pkt_metadata *md = &execute->md;
- struct flow key;
+ struct {
+ struct miniflow flow;
+ uint32_t buf[FLOW_U32S];
+ } key;
if (ofpbuf_size(execute->packet) < ETH_HEADER_LEN ||
ofpbuf_size(execute->packet) > UINT16_MAX) {
}
/* Extract flow key. */
- flow_extract(execute->packet, md, &key);
+ miniflow_initialize(&key.flow, key.buf);
+ miniflow_extract(execute->packet, md, &key.flow);
ovs_rwlock_rdlock(&dp->port_rwlock);
- dp_netdev_execute_actions(dp, &key, execute->packet, false, md,
+ dp_netdev_execute_actions(dp, &key.flow, execute->packet, false, md,
execute->actions, execute->actions_len);
ovs_rwlock_unlock(&dp->port_rwlock);
int poll_cnt;
int i;
- f->name = xasprintf("pmd_%u", ovsthread_id_self());
- set_subprogram_name("%s", f->name);
poll_cnt = 0;
poll_list = NULL;
}
free(poll_list);
- free(f->name);
return NULL;
}
/* Each thread will distribute all devices rx-queues among
* themselves. */
- xpthread_create(&f->thread, NULL, pmd_thread_main, f);
+ f->thread = ovs_thread_create("pmd", pmd_thread_main, f);
}
}
static void
dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow,
const struct ofpbuf *packet,
- const struct flow *key)
+ const struct miniflow *key)
{
- uint16_t tcp_flags = ntohs(key->tcp_flags);
+ uint16_t tcp_flags = miniflow_get_tcp_flags(key);
long long int now = time_msec();
struct dp_netdev_flow_stats *bucket;
OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_flow *netdev_flow;
- struct flow key;
+ struct {
+ struct miniflow flow;
+ uint32_t buf[FLOW_U32S];
+ } key;
if (ofpbuf_size(packet) < ETH_HEADER_LEN) {
ofpbuf_delete(packet);
return;
}
- flow_extract(packet, md, &key);
- netdev_flow = dp_netdev_lookup_flow(dp, &key);
+ miniflow_initialize(&key.flow, key.buf);
+ miniflow_extract(packet, md, &key.flow);
+
+ netdev_flow = dp_netdev_lookup_flow(dp, &key.flow);
if (netdev_flow) {
struct dp_netdev_actions *actions;
- dp_netdev_flow_used(netdev_flow, packet, &key);
+ dp_netdev_flow_used(netdev_flow, packet, &key.flow);
actions = dp_netdev_flow_get_actions(netdev_flow);
- dp_netdev_execute_actions(dp, &key, packet, true, md,
+ dp_netdev_execute_actions(dp, &key.flow, packet, true, md,
actions->actions, actions->size);
dp_netdev_count_packet(dp, DP_STAT_HIT);
} else if (dp->handler_queues) {
dp_netdev_count_packet(dp, DP_STAT_MISS);
dp_netdev_output_userspace(dp, packet,
- flow_hash_5tuple(&key, 0) % dp->n_handlers,
- DPIF_UC_MISS, &key, NULL);
+ miniflow_hash_5tuple(&key.flow, 0)
+ % dp->n_handlers,
+ DPIF_UC_MISS, &key.flow, NULL);
ofpbuf_delete(packet);
}
}
static int
dp_netdev_output_userspace(struct dp_netdev *dp, struct ofpbuf *packet,
- int queue_no, int type, const struct flow *flow,
+ int queue_no, int type, const struct miniflow *key,
const struct nlattr *userdata)
{
struct dp_netdev_queue *q;
struct dpif_upcall *upcall = &u->upcall;
struct ofpbuf *buf = &u->buf;
size_t buf_size;
+ struct flow flow;
upcall->type = type;
ofpbuf_init(buf, buf_size);
/* Put ODP flow. */
- odp_flow_key_from_flow(buf, flow, flow->in_port.odp_port);
+ miniflow_expand(key, &flow);
+ odp_flow_key_from_flow(buf, &flow, NULL, flow.in_port.odp_port);
upcall->key = ofpbuf_data(buf);
upcall->key_len = ofpbuf_size(buf);
struct dp_netdev_execute_aux {
struct dp_netdev *dp;
- const struct flow *key;
+ const struct miniflow *key;
};
static void
userdata = nl_attr_find_nested(a, OVS_USERSPACE_ATTR_USERDATA);
dp_netdev_output_userspace(aux->dp, packet,
- flow_hash_5tuple(aux->key, 0)
+ miniflow_hash_5tuple(aux->key, 0)
% aux->dp->n_handlers,
DPIF_UC_ACTION, aux->key,
userdata);
break;
}
+ case OVS_ACTION_ATTR_HASH: {
+ const struct ovs_action_hash *hash_act;
+ uint32_t hash;
+
+ hash_act = nl_attr_get(a);
+ if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
+ /* Hash need not be symmetric, nor does it need to include
+ * L2 fields. */
+ hash = miniflow_hash_5tuple(aux->key, hash_act->hash_basis);
+ if (!hash) {
+ hash = 1; /* 0 is not valid */
+ }
+
+ } else {
+ VLOG_WARN("Unknown hash algorithm specified for the hash action.");
+ hash = 2;
+ }
+
+ md->dp_hash = hash;
+ break;
+ }
+
case OVS_ACTION_ATTR_RECIRC:
if (*depth < MAX_RECIRC_DEPTH) {
struct pkt_metadata recirc_md = *md;
struct ofpbuf *recirc_packet;
- const struct ovs_action_recirc *act;
recirc_packet = may_steal ? packet : ofpbuf_clone(packet);
-
- act = nl_attr_get(a);
- recirc_md.recirc_id = act->recirc_id;
- recirc_md.dp_hash = 0;
-
- if (act->hash_alg == OVS_RECIRC_HASH_ALG_L4) {
- recirc_md.dp_hash = flow_hash_symmetric_l4(aux->key,
- act->hash_bias);
- if (!recirc_md.dp_hash) {
- recirc_md.dp_hash = 1; /* 0 is not valid */
- }
- }
+ recirc_md.recirc_id = nl_attr_get_u32(a);
(*depth)++;
dp_netdev_input(aux->dp, recirc_packet, &recirc_md);
}
static void
- dp_netdev_execute_actions(struct dp_netdev *dp, const struct flow *key,
+ dp_netdev_execute_actions(struct dp_netdev *dp, const struct miniflow *key,
struct ofpbuf *packet, bool may_steal,
struct pkt_metadata *md,
const struct nlattr *actions, size_t actions_len)
actions, actions_len, dp_execute_cb);
}
+#define DPIF_NETDEV_CLASS_FUNCTIONS \
+ dpif_netdev_enumerate, \
+ dpif_netdev_port_open_type, \
+ dpif_netdev_open, \
+ dpif_netdev_close, \
+ dpif_netdev_destroy, \
+ dpif_netdev_run, \
+ dpif_netdev_wait, \
+ dpif_netdev_get_stats, \
+ dpif_netdev_port_add, \
+ dpif_netdev_port_del, \
+ dpif_netdev_port_query_by_number, \
+ dpif_netdev_port_query_by_name, \
+ NULL, /* port_get_pid */ \
+ dpif_netdev_port_dump_start, \
+ dpif_netdev_port_dump_next, \
+ dpif_netdev_port_dump_done, \
+ dpif_netdev_port_poll, \
+ dpif_netdev_port_poll_wait, \
+ dpif_netdev_flow_get, \
+ dpif_netdev_flow_put, \
+ dpif_netdev_flow_del, \
+ dpif_netdev_flow_flush, \
+ dpif_netdev_flow_dump_state_init, \
+ dpif_netdev_flow_dump_start, \
+ dpif_netdev_flow_dump_next, \
+ NULL, \
+ dpif_netdev_flow_dump_done, \
+ dpif_netdev_flow_dump_state_uninit, \
+ dpif_netdev_execute, \
+ NULL, /* operate */ \
+ dpif_netdev_recv_set, \
+ dpif_netdev_handlers_set, \
+ dpif_netdev_queue_to_priority, \
+ dpif_netdev_recv, \
+ dpif_netdev_recv_wait, \
+ dpif_netdev_recv_purge, \
+
const struct dpif_class dpif_netdev_class = {
"netdev",
- dpif_netdev_enumerate,
- dpif_netdev_port_open_type,
- dpif_netdev_open,
- dpif_netdev_close,
- dpif_netdev_destroy,
- dpif_netdev_run,
- dpif_netdev_wait,
- dpif_netdev_get_stats,
- dpif_netdev_port_add,
- dpif_netdev_port_del,
- dpif_netdev_port_query_by_number,
- dpif_netdev_port_query_by_name,
- NULL, /* port_get_pid */
- dpif_netdev_port_dump_start,
- dpif_netdev_port_dump_next,
- dpif_netdev_port_dump_done,
- dpif_netdev_port_poll,
- dpif_netdev_port_poll_wait,
- dpif_netdev_flow_get,
- dpif_netdev_flow_put,
- dpif_netdev_flow_del,
- dpif_netdev_flow_flush,
- dpif_netdev_flow_dump_state_init,
- dpif_netdev_flow_dump_start,
- dpif_netdev_flow_dump_next,
- NULL,
- dpif_netdev_flow_dump_done,
- dpif_netdev_flow_dump_state_uninit,
- dpif_netdev_execute,
- NULL, /* operate */
- dpif_netdev_recv_set,
- dpif_netdev_handlers_set,
- dpif_netdev_queue_to_priority,
- dpif_netdev_recv,
- dpif_netdev_recv_wait,
- dpif_netdev_recv_purge,
+ DPIF_NETDEV_CLASS_FUNCTIONS
+};
+
+const struct dpif_class dpif_planetlab_class = {
+ "planetlab",
+ DPIF_NETDEV_CLASS_FUNCTIONS
};
static void
"DP PORT NEW-NUMBER",
3, 3, dpif_dummy_change_port_number, NULL);
}
+
#include <string.h>
#include <unistd.h>
- #include "connectivity.h"
#include "coverage.h"
#include "dpif.h"
#include "dynamic-string.h"
#include "openflow/openflow.h"
#include "packets.h"
#include "poll-loop.h"
- #include "seq.h"
#include "shash.h"
#include "smap.h"
#include "sset.h"
}
static void
- netdev_initialize(void)
+ netdev_class_mutex_initialize(void)
OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
{
static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
if (ovsthread_once_start(&once)) {
ovs_mutex_init_recursive(&netdev_class_mutex);
+ ovsthread_once_done(&once);
+ }
+ }
+
+ static void
+ netdev_initialize(void)
+ OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
+ {
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
+
+ if (ovsthread_once_start(&once)) {
+ netdev_class_mutex_initialize();
fatal_signal_add_hook(restore_all_flags, NULL, NULL, true);
netdev_vport_patch_register();
netdev_register_provider(&netdev_tap_class);
netdev_register_provider(&netdev_bsd_class);
#endif
+ netdev_register_provider(&netdev_tunnel_class);
+ netdev_register_provider(&netdev_pltap_class);
netdev_dpdk_register();
ovsthread_once_done(&once);
{
struct netdev_registered_class *rc;
+ netdev_initialize();
ovs_mutex_lock(&netdev_class_mutex);
HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
if (rc->class->run) {
{
int error;
+ netdev_class_mutex_initialize();
ovs_mutex_lock(&netdev_class_mutex);
if (netdev_lookup_class(new_class->type)) {
VLOG_WARN("attempted to register duplicate netdev provider: %s",
memset(netdev, 0, sizeof *netdev);
netdev->netdev_class = rc->class;
netdev->name = xstrdup(name);
+ netdev->change_seq = 1;
netdev->node = shash_add(&netdev_shash, name, netdev);
/* By default enable one rx queue per netdev. */
int old_ref_cnt;
atomic_add(&rc->ref_cnt, 1, &old_ref_cnt);
- seq_change(connectivity_seq_get());
+ netdev_change_seq_changed(netdev);
} else {
free(netdev->name);
ovs_assert(list_is_empty(&netdev->saved_flags_list));
ovs_mutex_unlock(&netdev_mutex);
}
+ /* Extracts pointers to all 'netdev-vports' into an array 'vports'
+ * and returns it. Stores the size of the array into '*size'.
+ *
+ * The caller is responsible for freeing 'vports' and must close
+ * each 'netdev-vport' in the list. */
+ struct netdev **
+ netdev_get_vports(size_t *size)
+ OVS_EXCLUDED(netdev_mutex)
+ {
+ struct netdev **vports;
+ struct shash_node *node;
+ size_t n = 0;
+
+ if (!size) {
+ return NULL;
+ }
+
+ /* Explicitly allocates big enough chunk of memory. */
+ vports = xmalloc(shash_count(&netdev_shash) * sizeof *vports);
+ ovs_mutex_lock(&netdev_mutex);
+ SHASH_FOR_EACH (node, &netdev_shash) {
+ struct netdev *dev = node->data;
+
+ if (netdev_vport_is_vport_class(dev->netdev_class)) {
+ dev->ref_cnt++;
+ vports[n] = dev;
+ n++;
+ }
+ }
+ ovs_mutex_unlock(&netdev_mutex);
+ *size = n;
+
+ return vports;
+ }
+
const char *
netdev_get_type_from_name(const char *name)
{
}
}
}
+
+ uint64_t
+ netdev_get_change_seq(const struct netdev *netdev)
+ {
+ return netdev->change_seq;
+ }