/* Maximum depth of flow table recursion (due to resubmit actions) in a
* flow translation. */
#define MAX_RESUBMIT_RECURSION 64
+#define MAX_INTERNAL_RESUBMITS 1 /* Max resbmits allowed using rules in
+ internal table. */
/* Maximum number of resubmit actions in a flow translation, whether they are
* recursive or not. */
bool has_in_band; /* Bridge has in band control? */
bool forward_bpdu; /* Bridge forwards STP BPDUs? */
+ /* True if the datapath supports recirculation. */
+ bool enable_recirc;
+
/* True if the datapath supports variable-length
* OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions.
* False if the datapath supports only 8-byte (or shorter) userdata. */
struct xlate_ctx *);
static void xlate_actions__(struct xlate_in *, struct xlate_out *)
OVS_REQ_RDLOCK(xlate_rwlock);
- static void xlate_normal(struct xlate_ctx *);
- static void xlate_report(struct xlate_ctx *, const char *);
+static void xlate_normal(struct xlate_ctx *);
+static void xlate_report(struct xlate_ctx *, const char *);
static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
uint8_t table_id, bool may_packet_in,
bool honor_table_miss);
const struct dpif_ipfix *ipfix,
const struct netflow *netflow, enum ofp_config_flags frag,
bool forward_bpdu, bool has_in_band,
+ bool enable_recirc,
bool variable_length_userdata,
size_t max_mpls_depth)
{
xbridge->frag = frag;
xbridge->miss_rule = miss_rule;
xbridge->no_packet_in_rule = no_packet_in_rule;
+ xbridge->enable_recirc = enable_recirc;
xbridge->variable_length_userdata = variable_length_userdata;
xbridge->max_mpls_depth = max_mpls_depth;
}
{
struct stp_port *sp = xport_get_stp_port(xport);
struct ofpbuf payload = *packet;
- struct eth_header *eth = payload.data;
+ struct eth_header *eth = ofpbuf_data(&payload);
/* Sink packets on ports that have STP disabled when the bridge has
* STP enabled. */
}
/* Trim off padding on payload. */
- if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
- payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN;
+ if (ofpbuf_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
+ ofpbuf_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
}
if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
- stp_received_bpdu(sp, payload.data, payload.size);
+ stp_received_bpdu(sp, ofpbuf_data(&payload), ofpbuf_size(&payload));
}
}
/* Partially configured bundle with no slaves. Drop the packet. */
return;
} else if (!out_xbundle->bond) {
+ ctx->xout->use_recirc = false;
xport = CONTAINER_OF(list_front(&out_xbundle->xports), struct xport,
bundle_node);
} else {
struct ofport_dpif *ofport;
+ struct xlate_recirc *xr = &ctx->xout->recirc;
+ struct flow_wildcards *wc = &ctx->xout->wc;
+
+ if (ctx->xbridge->enable_recirc) {
+ ctx->xout->use_recirc = bond_may_recirc(
+ out_xbundle->bond, &xr->recirc_id, &xr->hash_bias);
+
+ if (ctx->xout->use_recirc) {
+ /* Only TCP mode uses recirculation. */
+ xr->hash_alg = OVS_RECIRC_HASH_ALG_L4;
+ bond_update_post_recirc_rules(out_xbundle->bond, false);
+
+ /* Recirculation does not require unmasking hash fields. */
+ wc = NULL;
+ }
+ }
- ofport = bond_choose_output_slave(out_xbundle->bond, &ctx->xin->flow,
- &ctx->xout->wc, vid);
+ ofport = bond_choose_output_slave(out_xbundle->bond,
+ &ctx->xin->flow, wc, vid);
xport = xport_lookup(ofport);
if (!xport) {
/* If 'struct flow' gets additional metadata, we'll need to zero it out
* before traversing a patch port. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 24);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 25);
if (!xport) {
xlate_report(ctx, "Nonexistent output port");
/* Forwarding is disabled by STP. Let OFPP_NORMAL and the
* learning action look at the packet, then drop it. */
struct flow old_base_flow = ctx->base_flow;
- size_t old_size = ctx->xout->odp_actions.size;
+ size_t old_size = ofpbuf_size(&ctx->xout->odp_actions);
mirror_mask_t old_mirrors = ctx->xout->mirrors;
xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
ctx->xout->mirrors = old_mirrors;
ctx->base_flow = old_base_flow;
- ctx->xout->odp_actions.size = old_size;
+ ofpbuf_set_size(&ctx->xout->odp_actions, old_size);
}
}
flow_nw_tos = flow->nw_tos;
if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
- wc->masks.nw_tos |= IP_ECN_MASK;
+ wc->masks.nw_tos |= IP_DSCP_MASK;
flow->nw_tos &= ~IP_DSCP_MASK;
flow->nw_tos |= dscp;
}
ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
&ctx->xout->odp_actions,
&ctx->xout->wc);
- nl_msg_put_odp_port(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT,
- out_port);
+
+ if (ctx->xout->use_recirc) {
+ struct ovs_action_recirc *act_recirc;
+ struct xlate_recirc *xr = &ctx->xout->recirc;
+
+ act_recirc = nl_msg_put_unspec_uninit(&ctx->xout->odp_actions,
+ OVS_ACTION_ATTR_RECIRC, sizeof *act_recirc);
+ act_recirc->recirc_id = xr->recirc_id;
+ act_recirc->hash_alg = xr->hash_alg;
+ act_recirc->hash_bias = xr->hash_bias;
+ } else {
+ nl_msg_put_odp_port(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT,
+ out_port);
+ }
ctx->sflow_odp_port = odp_port;
ctx->sflow_n_outputs++;
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
- if (ctx->recurse >= MAX_RESUBMIT_RECURSION) {
+ if (ctx->recurse >= MAX_RESUBMIT_RECURSION + MAX_INTERNAL_RESUBMITS) {
VLOG_ERR_RL(&rl, "resubmit actions recursed over %d times",
MAX_RESUBMIT_RECURSION);
- } else if (ctx->resubmits >= MAX_RESUBMITS) {
+ } else if (ctx->resubmits >= MAX_RESUBMITS + MAX_INTERNAL_RESUBMITS) {
VLOG_ERR_RL(&rl, "over %d resubmit actions", MAX_RESUBMITS);
- } else if (ctx->xout->odp_actions.size > UINT16_MAX) {
+ } else if (ofpbuf_size(&ctx->xout->odp_actions) > UINT16_MAX) {
VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of actions");
- } else if (ctx->stack.size >= 65536) {
+ } else if (ofpbuf_size(&ctx->stack) >= 65536) {
VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of stack");
} else {
return true;
case RULE_DPIF_LOOKUP_VERDICT_DROP:
config = OFPUTIL_PC_NO_PACKET_IN;
break;
+ case RULE_DPIF_LOOKUP_VERDICT_DEFAULT:
+ if (!ofproto_dpif_wants_packet_in_on_miss(ctx->xbridge->ofproto)) {
+ config = OFPUTIL_PC_NO_PACKET_IN;
+ }
+ break;
default:
OVS_NOT_REACHED();
}
ofpacts_execute_action_set(&action_list, &action_set);
ctx->recurse++;
- do_xlate_actions(action_list.data, action_list.size, ctx);
+ do_xlate_actions(ofpbuf_data(&action_list), ofpbuf_size(&action_list), ctx);
ctx->recurse--;
ofpbuf_uninit(&action_set);
const struct ofputil_bucket *bucket;
uint32_t basis;
- basis = hash_bytes(ctx->xin->flow.dl_dst, sizeof ctx->xin->flow.dl_dst, 0);
+ basis = hash_mac(ctx->xin->flow.dl_dst, 0, 0);
bucket = group_best_live_bucket(ctx, group, basis);
if (bucket) {
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
{
ofp_port_t in_port;
uint8_t table_id;
+ bool may_packet_in = false;
+ bool honor_table_miss = false;
+
+ if (ctx->rule && rule_dpif_is_internal(ctx->rule)) {
+ /* Still allow missed packets to be sent to the controller
+ * if resubmitting from an internal table. */
+ may_packet_in = true;
+ honor_table_miss = true;
+ }
in_port = resubmit->in_port;
if (in_port == OFPP_IN_PORT) {
table_id = ctx->table_id;
}
- xlate_table_action(ctx, in_port, table_id, false, false);
+ xlate_table_action(ctx, in_port, table_id, may_packet_in,
+ honor_table_miss);
}
static void
&ctx->xout->odp_actions,
&ctx->xout->wc);
- odp_execute_actions(NULL, packet, false, &md, ctx->xout->odp_actions.data,
- ctx->xout->odp_actions.size, NULL);
+ odp_execute_actions(NULL, packet, false, &md,
+ ofpbuf_data(&ctx->xout->odp_actions),
+ ofpbuf_size(&ctx->xout->odp_actions), NULL);
pin = xmalloc(sizeof *pin);
- pin->up.packet_len = packet->size;
+ pin->up.packet_len = ofpbuf_size(packet);
pin->up.packet = ofpbuf_steal_data(packet);
pin->up.reason = reason;
pin->up.table_id = ctx->table_id;
ofpbuf_use_stub(&action_list, action_list_stub, sizeof action_list_stub);
ofpacts_execute_action_set(&action_list, &ctx->action_set);
- do_xlate_actions(action_list.data, action_list.size, ctx);
+ do_xlate_actions(ofpbuf_data(&action_list), ofpbuf_size(&action_list), ctx);
ofpbuf_uninit(&action_list);
}
case OFPACT_SET_FIELD:
set_field = ofpact_get_SET_FIELD(a);
mf = set_field->field;
- mf_mask_field_and_prereqs(mf, &wc->masks);
/* Set field action only ever overwrites packet's outermost
* applicable header fields. Do nothing if no header exists. */
- if ((mf->id != MFF_VLAN_VID || flow->vlan_tci & htons(VLAN_CFI))
- && ((mf->id != MFF_MPLS_LABEL && mf->id != MFF_MPLS_TC)
- || eth_type_mpls(flow->dl_type))) {
- mf_set_flow_value(mf, &set_field->value, flow);
+ if (mf->id == MFF_VLAN_VID) {
+ wc->masks.vlan_tci |= htons(VLAN_CFI);
+ if (!(flow->vlan_tci & htons(VLAN_CFI))) {
+ break;
+ }
+ } else if ((mf->id == MFF_MPLS_LABEL || mf->id == MFF_MPLS_TC)
+ /* 'dl_type' is already unwildcarded. */
+ && !eth_type_mpls(flow->dl_type)) {
+ break;
}
+
+ mf_mask_field_and_prereqs(mf, &wc->masks);
+ mf_set_flow_value(mf, &set_field->value, flow);
break;
case OFPACT_STACK_PUSH:
ofpbuf_use_stub(&dst->odp_actions, dst->odp_actions_stub,
sizeof dst->odp_actions_stub);
- ofpbuf_put(&dst->odp_actions, src->odp_actions.data,
- src->odp_actions.size);
+ ofpbuf_put(&dst->odp_actions, ofpbuf_data(&src->odp_actions),
+ ofpbuf_size(&src->odp_actions));
}
\f
static struct skb_priority_to_dscp *
const struct nlattr *a;
unsigned int left;
- NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->xout->odp_actions.data,
- ctx->xout->odp_actions.size) {
+ NL_ATTR_FOR_EACH_UNSAFE (a, left, ofpbuf_data(&ctx->xout->odp_actions),
+ ofpbuf_size(&ctx->xout->odp_actions)) {
if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
&& nl_attr_get_odp_port(a) == local_odp_port) {
return true;
ctx.rule = rule;
}
xout->fail_open = ctx.rule && rule_dpif_is_fail_open(ctx.rule);
+ xout->use_recirc = false;
if (xin->ofpacts) {
ofpacts = xin->ofpacts;
add_sflow_action(&ctx);
add_ipfix_action(&ctx);
- sample_actions_len = ctx.xout->odp_actions.size;
+ sample_actions_len = ofpbuf_size(&ctx.xout->odp_actions);
if (tnl_may_send && (!in_port || may_receive(in_port, &ctx))) {
do_xlate_actions(ofpacts, ofpacts_len, &ctx);
/* We've let OFPP_NORMAL and the learning action look at the
* packet, so drop it now if forwarding is disabled. */
if (in_port && !xport_stp_forward_state(in_port)) {
- ctx.xout->odp_actions.size = sample_actions_len;
+ ofpbuf_set_size(&ctx.xout->odp_actions, sample_actions_len);
}
}
- if (ctx.action_set.size) {
+ if (ofpbuf_size(&ctx.action_set)) {
xlate_action_set(&ctx);
}
}
}
- if (nl_attr_oversized(ctx.xout->odp_actions.size)) {
+ if (nl_attr_oversized(ofpbuf_size(&ctx.xout->odp_actions))) {
/* These datapath actions are too big for a Netlink attribute, so we
* can't hand them to the kernel directly. dpif_execute() can execute
* them one by one with help, so just mark the result as SLOW_ACTION to