#include "dpif.h"
#include "dpif-provider.h"
#include "dummy.h"
+#include "dynamic-string.h"
#include "flow.h"
#include "hmap.h"
#include "list.h"
VLOG_DEFINE_THIS_MODULE(dpif_netdev);
/* Configuration parameters. */
-enum { N_QUEUES = 2 }; /* Number of queues for dpif_recv(). */
-enum { MAX_QUEUE_LEN = 100 }; /* Maximum number of packets per queue. */
enum { MAX_PORTS = 256 }; /* Maximum number of ports. */
enum { MAX_FLOWS = 65536 }; /* Maximum number of flows in flow table. */
* headers to be aligned on a 4-byte boundary. */
enum { DP_NETDEV_HEADROOM = 2 + VLAN_HEADER_LEN };
+/* Queues. */
+enum { N_QUEUES = 2 }; /* Number of queues for dpif_recv(). */
+enum { MAX_QUEUE_LEN = 128 }; /* Maximum number of packets per queue. */
+enum { QUEUE_MASK = MAX_QUEUE_LEN - 1 };
+BUILD_ASSERT_DECL(IS_POW2(MAX_QUEUE_LEN));
+
+struct dp_netdev_queue {
+ struct dpif_upcall *upcalls[MAX_QUEUE_LEN];
+ unsigned int head, tail;
+};
+
/* Datapath based on the network device interface from netdev.h. */
struct dp_netdev {
const struct dpif_class *class;
bool destroyed;
bool drop_frags; /* Drop all IP fragments, if true. */
- struct list queues[N_QUEUES]; /* Contain ofpbufs queued for dpif_recv(). */
- size_t queue_len[N_QUEUES]; /* Number of packets in each queue. */
+ struct dp_netdev_queue queues[N_QUEUES];
struct hmap flow_table; /* Flow table. */
/* Statistics. */
static int dpif_netdev_open(const struct dpif_class *, const char *name,
bool create, struct dpif **);
static int dp_netdev_output_control(struct dp_netdev *, const struct ofpbuf *,
- int queue_no, int port_no, uint64_t arg);
+ int queue_no, const struct flow *,
+ uint64_t arg);
static int dp_netdev_execute_actions(struct dp_netdev *,
struct ofpbuf *, struct flow *,
const struct nlattr *actions,
dp->open_cnt = 0;
dp->drop_frags = false;
for (i = 0; i < N_QUEUES; i++) {
- list_init(&dp->queues[i]);
+ dp->queues[i].head = dp->queues[i].tail = 0;
}
hmap_init(&dp->flow_table);
list_init(&dp->port_list);
do_del_port(dp, port->port_no);
}
for (i = 0; i < N_QUEUES; i++) {
- ofpbuf_list_delete(&dp->queues[i]);
+ struct dp_netdev_queue *q = &dp->queues[i];
+ unsigned int j;
+
+ for (j = q->tail; j != q->head; j++) {
+ struct dpif_upcall *upcall = q->upcalls[j & QUEUE_MASK];
+
+ ofpbuf_delete(upcall->packet);
+ free(upcall);
+ }
}
hmap_destroy(&dp->flow_table);
free(dp->name);
{
struct dp_netdev *dp = get_dp_netdev(dpif);
memset(stats, 0, sizeof *stats);
- stats->n_flows = hmap_count(&dp->flow_table);
- stats->cur_capacity = hmap_capacity(&dp->flow_table);
- stats->max_capacity = MAX_FLOWS;
stats->n_ports = dp->n_ports;
stats->max_ports = MAX_PORTS;
stats->n_frags = dp->n_frags;
}
static void
-answer_port_query(const struct dp_netdev_port *port, struct odp_port *odp_port)
+answer_port_query(const struct dp_netdev_port *port,
+ struct dpif_port *dpif_port)
{
- memset(odp_port, 0, sizeof *odp_port);
- ovs_strlcpy(odp_port->devname, netdev_get_name(port->netdev),
- sizeof odp_port->devname);
- odp_port->port = port->port_no;
- strcpy(odp_port->type, port->internal ? "internal" : "system");
+ dpif_port->name = xstrdup(netdev_get_name(port->netdev));
+ dpif_port->type = xstrdup(port->internal ? "internal" : "system");
+ dpif_port->port_no = port->port_no;
}
static int
dpif_netdev_port_query_by_number(const struct dpif *dpif, uint16_t port_no,
- struct odp_port *odp_port)
+ struct dpif_port *dpif_port)
{
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_port *port;
error = get_port_by_number(dp, port_no, &port);
if (!error) {
- answer_port_query(port, odp_port);
+ answer_port_query(port, dpif_port);
}
return error;
}
static int
dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname,
- struct odp_port *odp_port)
+ struct dpif_port *dpif_port)
{
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_port *port;
error = get_port_by_name(dp, devname, &port);
if (!error) {
- answer_port_query(port, odp_port);
+ answer_port_query(port, dpif_port);
}
return error;
}
return 0;
}
+struct dp_netdev_port_state {
+ uint32_t port_no;
+ char *name;
+};
+
static int
-dpif_netdev_port_list(const struct dpif *dpif, struct odp_port *ports, int n)
+dpif_netdev_port_dump_start(const struct dpif *dpif OVS_UNUSED, void **statep)
{
- struct dp_netdev *dp = get_dp_netdev(dpif);
- struct dp_netdev_port *port;
- int i;
+ *statep = xzalloc(sizeof(struct dp_netdev_port_state));
+ return 0;
+}
- i = 0;
- LIST_FOR_EACH (port, node, &dp->port_list) {
- struct odp_port *odp_port = &ports[i];
- if (i >= n) {
- break;
+static int
+dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,
+ struct dpif_port *dpif_port)
+{
+ struct dp_netdev_port_state *state = state_;
+ struct dp_netdev *dp = get_dp_netdev(dpif);
+ uint32_t port_no;
+
+ for (port_no = state->port_no; port_no < MAX_PORTS; port_no++) {
+ struct dp_netdev_port *port = dp->ports[port_no];
+ if (port) {
+ free(state->name);
+ state->name = xstrdup(netdev_get_name(port->netdev));
+ dpif_port->name = state->name;
+ dpif_port->type = port->internal ? "internal" : "system";
+ dpif_port->port_no = port->port_no;
+ state->port_no = port_no + 1;
+ return 0;
}
- answer_port_query(port, odp_port);
- i++;
}
- return dp->n_ports;
+ return EOF;
+}
+
+static int
+dpif_netdev_port_dump_done(const struct dpif *dpif OVS_UNUSED, void *state_)
+{
+ struct dp_netdev_port_state *state = state_;
+ free(state->name);
+ free(state);
+ return 0;
}
static int
}
}
+static int
+dpif_netdev_flow_from_nlattrs(const struct nlattr *key, uint32_t key_len,
+ struct flow *flow)
+{
+ if (odp_flow_key_to_flow(key, key_len, flow)) {
+ /* This should not happen: it indicates that odp_flow_key_from_flow()
+ * and odp_flow_key_to_flow() disagree on the acceptable form of a
+ * flow. Log the problem as an error, with enough details to enable
+ * debugging. */
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+
+ if (!VLOG_DROP_ERR(&rl)) {
+ struct ds s;
+
+ ds_init(&s);
+ odp_flow_key_format(key, key_len, &s);
+ VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s));
+ ds_destroy(&s);
+ }
+
+ return EINVAL;
+ }
+
+ return 0;
+}
+
static int
dpif_netdev_flow_get(const struct dpif *dpif, struct odp_flow flows[], int n)
{
for (i = 0; i < n; i++) {
struct odp_flow *odp_flow = &flows[i];
struct flow key;
+ int error;
+
+ error = dpif_netdev_flow_from_nlattrs(odp_flow->key, odp_flow->key_len,
+ &key);
+ if (error) {
+ return error;
+ }
- odp_flow_key_to_flow(&odp_flow->key, &key);
answer_flow_query(dp_netdev_lookup_flow(dp, &key),
odp_flow->flags, odp_flow);
}
}
static int
-add_flow(struct dpif *dpif, struct odp_flow *odp_flow)
+add_flow(struct dpif *dpif, const struct flow *key, struct odp_flow *odp_flow)
{
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_flow *flow;
int error;
flow = xzalloc(sizeof *flow);
- odp_flow_key_to_flow(&odp_flow->key, &flow->key);
+ flow->key = *key;
error = set_flow_actions(flow, odp_flow);
if (error) {
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_flow *flow;
struct flow key;
+ int error;
+
+ error = dpif_netdev_flow_from_nlattrs(put->flow.key, put->flow.key_len,
+ &key);
+ if (error) {
+ return error;
+ }
- odp_flow_key_to_flow(&put->flow.key, &key);
flow = dp_netdev_lookup_flow(dp, &key);
if (!flow) {
if (put->flags & ODPPF_CREATE) {
if (hmap_count(&dp->flow_table) < MAX_FLOWS) {
- return add_flow(dpif, &put->flow);
+ return add_flow(dpif, &key, &put->flow);
} else {
return EFBIG;
}
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_flow *flow;
struct flow key;
+ int error;
+
+ error = dpif_netdev_flow_from_nlattrs(odp_flow->key, odp_flow->key_len,
+ &key);
+ if (error) {
+ return error;
+ }
- odp_flow_key_to_flow(&odp_flow->key, &key);
flow = dp_netdev_lookup_flow(dp, &key);
if (flow) {
answer_flow_query(flow, 0, odp_flow);
}
}
+struct dp_netdev_flow_state {
+ uint32_t bucket;
+ uint32_t offset;
+};
+
+static int
+dpif_netdev_flow_dump_start(const struct dpif *dpif OVS_UNUSED, void **statep)
+{
+ *statep = xzalloc(sizeof(struct dp_netdev_flow_state));
+ return 0;
+}
+
static int
-dpif_netdev_flow_list(const struct dpif *dpif, struct odp_flow flows[], int n)
+dpif_netdev_flow_dump_next(const struct dpif *dpif, void *state_,
+ struct odp_flow *odp_flow)
{
+ struct dp_netdev_flow_state *state = state_;
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_flow *flow;
- int i;
+ struct hmap_node *node;
+ struct ofpbuf key;
- i = 0;
- HMAP_FOR_EACH (flow, node, &dp->flow_table) {
- if (i >= n) {
- break;
- }
-
- odp_flow_key_from_flow(&flows[i].key, &flow->key);
- answer_flow_query(flow, 0, &flows[i]);
- i++;
+ node = hmap_at_position(&dp->flow_table, &state->bucket, &state->offset);
+ if (!node) {
+ return EOF;
}
- return hmap_count(&dp->flow_table);
+
+ flow = CONTAINER_OF(node, struct dp_netdev_flow, node);
+
+ ofpbuf_use_stack(&key, odp_flow->key, odp_flow->key_len);
+ odp_flow_key_from_flow(&key, &flow->key);
+ odp_flow->key_len = key.size;
+ ofpbuf_uninit(&key);
+
+ answer_flow_query(flow, 0, odp_flow);
+
+ return 0;
+}
+
+static int
+dpif_netdev_flow_dump_done(const struct dpif *dpif OVS_UNUSED, void *state)
+{
+ free(state);
+ return 0;
}
static int
}
}
-static int
+static struct dp_netdev_queue *
find_nonempty_queue(struct dpif *dpif)
{
struct dpif_netdev *dpif_netdev = dpif_netdev_cast(dpif);
int i;
for (i = 0; i < N_QUEUES; i++) {
- struct list *queue = &dp->queues[i];
- if (!list_is_empty(queue) && mask & (1u << i)) {
- return i;
+ struct dp_netdev_queue *q = &dp->queues[i];
+ if (q->head != q->tail && mask & (1u << i)) {
+ return q;
}
}
- return -1;
+ return NULL;
}
static int
-dpif_netdev_recv(struct dpif *dpif, struct ofpbuf **bufp)
+dpif_netdev_recv(struct dpif *dpif, struct dpif_upcall *upcall)
{
- int queue_idx = find_nonempty_queue(dpif);
- if (queue_idx >= 0) {
- struct dp_netdev *dp = get_dp_netdev(dpif);
-
- *bufp = ofpbuf_from_list(list_pop_front(&dp->queues[queue_idx]));
- dp->queue_len[queue_idx]--;
+ struct dp_netdev_queue *q = find_nonempty_queue(dpif);
+ if (q) {
+ struct dpif_upcall *u = q->upcalls[q->tail++ & QUEUE_MASK];
+ *upcall = *u;
+ free(u);
return 0;
} else {
static void
dpif_netdev_recv_wait(struct dpif *dpif)
{
- if (find_nonempty_queue(dpif) >= 0) {
+ if (find_nonempty_queue(dpif)) {
poll_immediate_wake();
} else {
/* No messages ready to be received, and dp_wait() will ensure that we
dp->n_hit++;
} else {
dp->n_missed++;
- dp_netdev_output_control(dp, packet, _ODPL_MISS_NR, port->port_no, 0);
+ dp_netdev_output_control(dp, packet, _ODPL_MISS_NR, &key, 0);
}
}
struct shash_node *node;
struct ofpbuf packet;
- ofpbuf_init(&packet, DP_NETDEV_HEADROOM + max_mtu);
+ ofpbuf_init(&packet, DP_NETDEV_HEADROOM + VLAN_ETH_HEADER_LEN + max_mtu);
SHASH_FOR_EACH (node, &dp_netdevs) {
struct dp_netdev *dp = node->data;
struct dp_netdev_port *port;
static int
dp_netdev_output_control(struct dp_netdev *dp, const struct ofpbuf *packet,
- int queue_no, int port_no, uint64_t arg)
+ int queue_no, const struct flow *flow, uint64_t arg)
{
- struct odp_msg *header;
- struct ofpbuf *msg;
- size_t msg_size;
+ struct dp_netdev_queue *q = &dp->queues[queue_no];
+ struct dpif_upcall *upcall;
+ struct ofpbuf *buf;
+ size_t key_len;
- if (dp->queue_len[queue_no] >= MAX_QUEUE_LEN) {
+ if (q->head - q->tail >= MAX_QUEUE_LEN) {
dp->n_lost++;
return ENOBUFS;
}
- msg_size = sizeof *header + packet->size;
- msg = ofpbuf_new_with_headroom(msg_size, DPIF_RECV_MSG_PADDING);
- header = ofpbuf_put_uninit(msg, sizeof *header);
- header->type = queue_no;
- header->length = msg_size;
- header->port = port_no;
- header->arg = arg;
- ofpbuf_put(msg, packet->data, packet->size);
- list_push_back(&dp->queues[queue_no], &msg->list_node);
- dp->queue_len[queue_no]++;
+ buf = ofpbuf_new(ODPUTIL_FLOW_KEY_BYTES + 2 + packet->size);
+ odp_flow_key_from_flow(buf, flow);
+ key_len = buf->size;
+ ofpbuf_pull(buf, key_len);
+ ofpbuf_reserve(buf, 2);
+ ofpbuf_put(buf, packet->data, packet->size);
+
+ upcall = xzalloc(sizeof *upcall);
+ upcall->type = queue_no;
+ upcall->packet = buf;
+ upcall->key = buf->base;
+ upcall->key_len = key_len;
+ upcall->userdata = arg;
+
+ q->upcalls[++q->head & QUEUE_MASK] = upcall;
return 0;
}
case ODPAT_CONTROLLER:
dp_netdev_output_control(dp, packet, _ODPL_ACTION_NR,
- key->in_port, nl_attr_get_u64(a));
+ key, nl_attr_get_u64(a));
break;
case ODPAT_SET_DL_TCI:
dpif_netdev_port_del,
dpif_netdev_port_query_by_number,
dpif_netdev_port_query_by_name,
- dpif_netdev_port_list,
+ dpif_netdev_port_dump_start,
+ dpif_netdev_port_dump_next,
+ dpif_netdev_port_dump_done,
dpif_netdev_port_poll,
dpif_netdev_port_poll_wait,
dpif_netdev_flow_get,
dpif_netdev_flow_put,
dpif_netdev_flow_del,
dpif_netdev_flow_flush,
- dpif_netdev_flow_list,
+ dpif_netdev_flow_dump_start,
+ dpif_netdev_flow_dump_next,
+ dpif_netdev_flow_dump_done,
dpif_netdev_execute,
dpif_netdev_recv_get_mask,
dpif_netdev_recv_set_mask,