* flow translation. */
#define MAX_RESUBMIT_RECURSION 64
+/* Maximum number of resubmit actions in a flow translation, whether they are
+ * recursive or not. */
+#define MAX_RESUBMITS (MAX_RESUBMIT_RECURSION * MAX_RESUBMIT_RECURSION)
+
struct ovs_rwlock xlate_rwlock = OVS_RWLOCK_INITIALIZER;
struct xbridge {
* prior to an mpls_push so that it may be
* used for a subsequent mpls_pop. */
- int recurse; /* Recursion level, via xlate_table_action. */
+ /* Resubmit statistics, via xlate_table_action(). */
+ int recurse; /* Current resubmit nesting depth. */
+ int resubmits; /* Total number of resubmits. */
+
uint32_t orig_skb_priority; /* Priority when packet arrived. */
uint8_t table_id; /* OpenFlow table ID where flow was found. */
uint32_t sflow_n_outputs; /* Number of output ports. */
static bool may_receive(const struct xport *, struct xlate_ctx *);
static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
struct xlate_ctx *);
+static void xlate_actions__(struct xlate_in *, struct xlate_out *)
+ OVS_REQ_RDLOCK(xlate_rwlock);
static void xlate_normal(struct xlate_ctx *);
static void xlate_report(struct xlate_ctx *, const char *);
static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
special = process_special(ctx, &ctx->xin->flow, peer,
ctx->xin->packet);
if (special) {
- ctx->xout->slow = special;
+ ctx->xout->slow |= special;
} else if (may_receive(peer, ctx)) {
if (xport_stp_forward_state(peer)) {
xlate_table_action(ctx, flow->in_port.ofp_port, 0, true);
rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
}
+ ctx->resubmits++;
ctx->recurse++;
ctx->rule = rule;
actions = rule_dpif_get_actions(rule);
xlate_table_action(struct xlate_ctx *ctx,
ofp_port_t in_port, uint8_t table_id, bool may_packet_in)
{
- if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+ if (ctx->recurse >= MAX_RESUBMIT_RECURSION) {
+ VLOG_ERR_RL(&rl, "resubmit actions recursed over %d times",
+ MAX_RESUBMIT_RECURSION);
+ } else if (ctx->resubmits >= MAX_RESUBMITS) {
+ VLOG_ERR_RL(&rl, "over %d resubmit actions", MAX_RESUBMITS);
+ } else if (ctx->xout->odp_actions.size > UINT16_MAX) {
+ VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of actions");
+ } else if (ctx->stack.size >= 65536) {
+ VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of stack");
+ } else {
struct rule_dpif *rule;
ofp_port_t old_in_port = ctx->xin->flow.in_port.ofp_port;
uint8_t old_table_id = ctx->table_id;
}
ctx->table_id = old_table_id;
- } else {
- static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
-
- VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times",
- MAX_RESUBMIT_RECURSION);
+ return;
}
+
+ ctx->exit = true;
}
static void
struct ofpbuf *packet;
struct flow key;
- ovs_assert(!ctx->xout->slow || ctx->xout->slow == SLOW_CONTROLLER);
- ctx->xout->slow = SLOW_CONTROLLER;
+ ctx->xout->slow |= SLOW_CONTROLLER;
if (!ctx->xin->packet) {
return;
}
ofpbuf_put(&dst->odp_actions, src->odp_actions.data,
src->odp_actions.size);
}
+
+/* Returns a reference to the sflow handled associated with ofproto, or NULL if
+ * there is none. The caller is responsible for decrementing the results ref
+ * count with dpif_sflow_unref(). */
+struct dpif_sflow *
+xlate_get_sflow(const struct ofproto_dpif *ofproto)
+{
+ struct dpif_sflow *sflow = NULL;
+ struct xbridge *xbridge;
+
+ ovs_rwlock_rdlock(&xlate_rwlock);
+ xbridge = xbridge_lookup(ofproto);
+ if (xbridge) {
+ sflow = dpif_sflow_ref(xbridge->sflow);
+ }
+ ovs_rwlock_unlock(&xlate_rwlock);
+
+ return sflow;
+}
+
+/* Returns a reference to the ipfix handled associated with ofproto, or NULL if
+ * there is none. The caller is responsible for decrementing the results ref
+ * count with dpif_ipfix_unref(). */
+struct dpif_ipfix *
+xlate_get_ipfix(const struct ofproto_dpif *ofproto)
+{
+ struct dpif_ipfix *ipfix = NULL;
+ struct xbridge *xbridge;
+
+ ovs_rwlock_rdlock(&xlate_rwlock);
+ xbridge = xbridge_lookup(ofproto);
+ if (xbridge) {
+ ipfix = dpif_ipfix_ref(xbridge->ipfix);
+ }
+ ovs_rwlock_unlock(&xlate_rwlock);
+
+ return ipfix;
+}
\f
static struct skb_priority_to_dscp *
get_skb_priority(const struct xport *xport, uint32_t skb_priority)
return false;
}
+/* Thread safe call to xlate_actions__(). */
+void
+xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
+{
+ ovs_rwlock_rdlock(&xlate_rwlock);
+ xlate_actions__(xin, xout);
+ ovs_rwlock_unlock(&xlate_rwlock);
+}
+
/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
* into datapath actions in 'odp_actions', using 'ctx'.
*
* The caller must take responsibility for eventually freeing 'xout', with
* xlate_out_uninit(). */
-void
-xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
+static void
+xlate_actions__(struct xlate_in *xin, struct xlate_out *xout)
+ OVS_REQ_RDLOCK(xlate_rwlock)
{
struct flow_wildcards *wc = &xout->wc;
struct flow *flow = &xin->flow;
+ struct rule_dpif *rule = NULL;
struct rule_actions *actions = NULL;
enum slow_path_reason special;
COVERAGE_INC(xlate_actions);
- ovs_rwlock_rdlock(&xlate_rwlock);
-
/* Flow initialization rules:
* - 'base_flow' must match the kernel's view of the packet at the
* time that action processing starts. 'flow' represents any
}
ctx.recurse = 0;
+ ctx.resubmits = 0;
ctx.orig_skb_priority = flow->skb_priority;
ctx.table_id = 0;
ctx.exit = false;
ctx.mpls_depth_delta = 0;
+ if (!xin->ofpacts && !ctx.rule) {
+ rule_dpif_lookup(ctx.xbridge->ofproto, flow, wc, &rule);
+ if (ctx.xin->resubmit_stats) {
+ rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats);
+ }
+ ctx.rule = rule;
+ }
+ xout->fail_open = ctx.rule && rule_dpif_fail_open(ctx.rule);
+
if (xin->ofpacts) {
ofpacts = xin->ofpacts;
ofpacts_len = xin->ofpacts_len;
- } else if (xin->rule) {
- actions = rule_dpif_get_actions(xin->rule);
+ } else if (ctx.rule) {
+ actions = rule_dpif_get_actions(ctx.rule);
ofpacts = actions->ofpacts;
ofpacts_len = actions->ofpacts_len;
} else {
in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port);
special = process_special(&ctx, flow, in_port, ctx.xin->packet);
if (special) {
- ctx.xout->slow = special;
+ ctx.xout->slow |= special;
} else {
size_t sample_actions_len;
}
}
+ if (nl_attr_oversized(ctx.xout->odp_actions.size)) {
+ /* These datapath actions are too big for a Netlink attribute, so we
+ * can't execute them. */
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+ VLOG_ERR_RL(&rl, "discarding oversize datapath actions");
+ ofpbuf_clear(&ctx.xout->odp_actions);
+ }
+
ofpbuf_uninit(&ctx.stack);
/* Clear the metadata and register wildcard masks, because we won't
memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
out:
+ rule_actions_unref(actions);
+ rule_dpif_unref(rule);
+}
+
+/* Sends 'packet' out 'ofport'.
+ * May modify 'packet'.
+ * Returns 0 if successful, otherwise a positive errno value. */
+int
+xlate_send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet)
+{
+ uint64_t odp_actions_stub[1024 / 8];
+ struct xport *xport;
+ struct ofpbuf key, odp_actions;
+ struct dpif_flow_stats stats;
+ struct odputil_keybuf keybuf;
+ struct ofpact_output output;
+ struct xlate_out xout;
+ struct xlate_in xin;
+ struct flow flow;
+ union flow_in_port in_port_;
+ int error;
+
+ ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
+ ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
+ ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
+ /* Use OFPP_NONE as the in_port to avoid special packet processing. */
+ in_port_.ofp_port = OFPP_NONE;
+ flow_extract(packet, 0, 0, NULL, &in_port_, &flow);
+
+ ovs_rwlock_rdlock(&xlate_rwlock);
+ xport = xport_lookup(ofport);
+ if (!xport) {
+ error = EINVAL;
+ ovs_rwlock_unlock(&xlate_rwlock);
+ goto out;
+ }
+
+ odp_flow_key_from_flow(&key, &flow, ofp_port_to_odp_port(xport->xbridge, OFPP_LOCAL));
+ dpif_flow_stats_extract(&flow, packet, time_msec(), &stats);
+ output.port = xport->ofp_port;
+ output.max_len = 0;
+ xlate_in_init(&xin, xport->xbridge->ofproto, &flow, NULL, 0, packet);
+ xin.ofpacts_len = sizeof output;
+ xin.ofpacts = &output.ofpact;
+ xin.resubmit_stats = &stats;
+ /* Calls xlate_actions__ directly, since the rdlock is acquired. */
+ xlate_actions__(&xin, &xout);
+ error = dpif_execute(xport->xbridge->dpif,
+ key.data, key.size,
+ xout.odp_actions.data, xout.odp_actions.size,
+ packet);
ovs_rwlock_unlock(&xlate_rwlock);
- rule_actions_unref(actions);
+out:
+ xlate_out_uninit(&xout);
+ return error;
}