#define NETDEV_RULE_PRIORITY 0x8000
#define NR_THREADS 1
+/* Use per thread recirc_depth to prevent recirculation loop. */
+#define MAX_RECIRC_DEPTH 5
+DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth, 0)
/* Configuration parameters. */
enum { MAX_FLOWS = 65536 }; /* Maximum number of flows in flow table. */
bool create, struct dpif **);
static int dp_netdev_output_userspace(struct dp_netdev *dp, struct ofpbuf *,
int queue_no, int type,
- const struct flow *,
+ const struct miniflow *,
const struct nlattr *userdata);
static void dp_netdev_execute_actions(struct dp_netdev *dp,
- const struct flow *, struct ofpbuf *, bool may_steal,
+ const struct miniflow *,
+ struct ofpbuf *, bool may_steal,
struct pkt_metadata *,
const struct nlattr *actions,
size_t actions_len);
}
static struct dp_netdev_flow *
-dp_netdev_lookup_flow(const struct dp_netdev *dp, const struct flow *flow)
+dp_netdev_lookup_flow(const struct dp_netdev *dp, const struct miniflow *key)
OVS_EXCLUDED(dp->cls.rwlock)
{
struct dp_netdev_flow *netdev_flow;
+ struct cls_rule *rule;
fat_rwlock_rdlock(&dp->cls.rwlock);
- netdev_flow = dp_netdev_flow_cast(classifier_lookup(&dp->cls, flow, NULL));
+ rule = classifier_lookup_miniflow_first(&dp->cls, key);
+ netdev_flow = dp_netdev_flow_cast(rule);
fat_rwlock_unlock(&dp->cls.rwlock);
return netdev_flow;
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_flow *netdev_flow;
struct flow flow;
+ struct miniflow miniflow;
struct flow_wildcards wc;
int error;
if (error) {
return error;
}
+ miniflow_init(&miniflow, &flow);
ovs_mutex_lock(&dp->flow_mutex);
- netdev_flow = dp_netdev_lookup_flow(dp, &flow);
+ netdev_flow = dp_netdev_lookup_flow(dp, &miniflow);
if (!netdev_flow) {
if (put->flags & DPIF_FP_CREATE) {
if (hmap_count(&dp->flow_table) < MAX_FLOWS) {
struct dp_netdev_flow_state *state = state_;
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_flow *netdev_flow;
+ struct flow_wildcards wc;
int error;
ovs_mutex_lock(&iter->mutex);
return error;
}
+ minimask_expand(&netdev_flow->cr.match.mask, &wc);
+
if (key) {
struct ofpbuf buf;
ofpbuf_use_stack(&buf, &state->keybuf, sizeof state->keybuf);
- odp_flow_key_from_flow(&buf, &netdev_flow->flow,
+ odp_flow_key_from_flow(&buf, &netdev_flow->flow, &wc.masks,
netdev_flow->flow.in_port.odp_port);
*key = ofpbuf_data(&buf);
if (key && mask) {
struct ofpbuf buf;
- struct flow_wildcards wc;
ofpbuf_use_stack(&buf, &state->maskbuf, sizeof state->maskbuf);
- minimask_expand(&netdev_flow->cr.match.mask, &wc);
odp_flow_key_from_mask(&buf, &wc.masks, &netdev_flow->flow,
odp_to_u32(wc.masks.in_port.odp_port),
SIZE_MAX);
{
struct dp_netdev *dp = get_dp_netdev(dpif);
struct pkt_metadata *md = &execute->md;
- struct flow key;
+ struct miniflow key;
+ uint32_t buf[FLOW_U32S];
if (ofpbuf_size(execute->packet) < ETH_HEADER_LEN ||
ofpbuf_size(execute->packet) > UINT16_MAX) {
}
/* Extract flow key. */
- flow_extract(execute->packet, md, &key);
+ miniflow_initialize(&key, buf);
+ miniflow_extract(execute->packet, md, &key);
ovs_rwlock_rdlock(&dp->port_rwlock);
dp_netdev_execute_actions(dp, &key, execute->packet, false, md,
static void
dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow,
const struct ofpbuf *packet,
- const struct flow *key)
+ const struct miniflow *key)
{
- uint16_t tcp_flags = ntohs(key->tcp_flags);
+ uint16_t tcp_flags = miniflow_get_tcp_flags(key);
long long int now = time_msec();
struct dp_netdev_flow_stats *bucket;
}
static void
-dp_netdev_port_input(struct dp_netdev *dp, struct ofpbuf *packet,
- struct pkt_metadata *md)
+dp_netdev_input(struct dp_netdev *dp, struct ofpbuf *packet,
+ struct pkt_metadata *md)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_flow *netdev_flow;
- struct flow key;
+ struct miniflow key;
+ uint32_t buf[FLOW_U32S];
if (ofpbuf_size(packet) < ETH_HEADER_LEN) {
ofpbuf_delete(packet);
return;
}
- flow_extract(packet, md, &key);
+ miniflow_initialize(&key, buf);
+ miniflow_extract(packet, md, &key);
+
netdev_flow = dp_netdev_lookup_flow(dp, &key);
if (netdev_flow) {
struct dp_netdev_actions *actions;
} else if (dp->handler_queues) {
dp_netdev_count_packet(dp, DP_STAT_MISS);
dp_netdev_output_userspace(dp, packet,
- flow_hash_5tuple(&key, 0) % dp->n_handlers,
+ miniflow_hash_5tuple(&key, 0)
+ % dp->n_handlers,
DPIF_UC_MISS, &key, NULL);
ofpbuf_delete(packet);
}
}
+static void
+dp_netdev_port_input(struct dp_netdev *dp, struct ofpbuf *packet,
+ struct pkt_metadata *md)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
+{
+ uint32_t *recirc_depth = recirc_depth_get();
+
+ *recirc_depth = 0;
+ dp_netdev_input(dp, packet, md);
+}
+
static int
dp_netdev_output_userspace(struct dp_netdev *dp, struct ofpbuf *packet,
- int queue_no, int type, const struct flow *flow,
+ int queue_no, int type, const struct miniflow *key,
const struct nlattr *userdata)
{
struct dp_netdev_queue *q;
struct dpif_upcall *upcall = &u->upcall;
struct ofpbuf *buf = &u->buf;
size_t buf_size;
+ struct flow flow;
upcall->type = type;
ofpbuf_init(buf, buf_size);
/* Put ODP flow. */
- odp_flow_key_from_flow(buf, flow, flow->in_port.odp_port);
+ miniflow_expand(key, &flow);
+ odp_flow_key_from_flow(buf, &flow, NULL, flow.in_port.odp_port);
upcall->key = ofpbuf_data(buf);
upcall->key_len = ofpbuf_size(buf);
struct dp_netdev_execute_aux {
struct dp_netdev *dp;
- const struct flow *key;
+ const struct miniflow *key;
};
static void
struct dp_netdev_execute_aux *aux = aux_;
int type = nl_attr_type(a);
struct dp_netdev_port *p;
+ uint32_t *depth = recirc_depth_get();
switch ((enum ovs_action_attr)type) {
case OVS_ACTION_ATTR_OUTPUT:
userdata = nl_attr_find_nested(a, OVS_USERSPACE_ATTR_USERDATA);
dp_netdev_output_userspace(aux->dp, packet,
- flow_hash_5tuple(aux->key, 0)
+ miniflow_hash_5tuple(aux->key, 0)
% aux->dp->n_handlers,
DPIF_UC_ACTION, aux->key,
userdata);
break;
}
- case OVS_ACTION_ATTR_RECIRC: {
- const struct ovs_action_recirc *act;
+ case OVS_ACTION_ATTR_HASH: {
+ const struct ovs_action_hash *hash_act;
+ uint32_t hash;
- act = nl_attr_get(a);
- md->recirc_id = act->recirc_id;
- md->dp_hash = 0;
-
- if (act->hash_alg == OVS_RECIRC_HASH_ALG_L4) {
- struct flow flow;
+ hash_act = nl_attr_get(a);
+ if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
+ /* Hash need not be symmetric, nor does it need to include
+ * L2 fields. */
+ hash = miniflow_hash_5tuple(aux->key, hash_act->hash_basis);
+ if (!hash) {
+ hash = 1; /* 0 is not valid */
+ }
- flow_extract(packet, md, &flow);
- md->dp_hash = flow_hash_symmetric_l4(&flow, act->hash_bias);
+ } else {
+ VLOG_WARN("Unknown hash algorithm specified for the hash action.");
+ hash = 2;
}
- dp_netdev_port_input(aux->dp, packet, md);
+ md->dp_hash = hash;
break;
}
+ case OVS_ACTION_ATTR_RECIRC:
+ if (*depth < MAX_RECIRC_DEPTH) {
+ struct pkt_metadata recirc_md = *md;
+ struct ofpbuf *recirc_packet;
+
+ recirc_packet = may_steal ? packet : ofpbuf_clone(packet);
+ recirc_md.recirc_id = nl_attr_get_u32(a);
+
+ (*depth)++;
+ dp_netdev_input(aux->dp, recirc_packet, &recirc_md);
+ (*depth)--;
+
+ break;
+ } else {
+ VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
+ }
+ break;
+
case OVS_ACTION_ATTR_PUSH_VLAN:
case OVS_ACTION_ATTR_POP_VLAN:
case OVS_ACTION_ATTR_PUSH_MPLS:
case __OVS_ACTION_ATTR_MAX:
OVS_NOT_REACHED();
}
-
}
static void
-dp_netdev_execute_actions(struct dp_netdev *dp, const struct flow *key,
+dp_netdev_execute_actions(struct dp_netdev *dp, const struct miniflow *key,
struct ofpbuf *packet, bool may_steal,
struct pkt_metadata *md,
const struct nlattr *actions, size_t actions_len)