#include "ofproto/ofproto-dpif-mirror.h"
#include "ofproto/ofproto-dpif-sflow.h"
#include "ofproto/ofproto-dpif.h"
+#include "ofproto/ofproto-provider.h"
#include "tunnel.h"
#include "vlog.h"
* flow translation. */
#define MAX_RESUBMIT_RECURSION 64
+/* Maximum number of resubmit actions in a flow translation, whether they are
+ * recursive or not. */
+#define MAX_RESUBMITS (MAX_RESUBMIT_RECURSION * MAX_RESUBMIT_RECURSION)
+
struct ovs_rwlock xlate_rwlock = OVS_RWLOCK_INITIALIZER;
struct xbridge {
struct xport *peer; /* Patch port peer or null. */
enum ofputil_port_config config; /* OpenFlow port configuration. */
- int stp_port_no; /* STP port number or 0 if not in use. */
+ int stp_port_no; /* STP port number or -1 if not in use. */
struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */
/* The rule that we are currently translating, or NULL. */
struct rule_dpif *rule;
- int recurse; /* Recursion level, via xlate_table_action. */
+ int mpls_depth_delta; /* Delta of the mpls stack depth since
+ * actions were last committed.
+ * Must be between -1 and 1 inclusive. */
+ ovs_be32 pre_push_mpls_lse; /* Used to record the top-most MPLS LSE
+ * prior to an mpls_push so that it may be
+ * used for a subsequent mpls_pop. */
+
+ /* Resubmit statistics, via xlate_table_action(). */
+ int recurse; /* Current resubmit nesting depth. */
+ int resubmits; /* Total number of resubmits. */
+
uint32_t orig_skb_priority; /* Priority when packet arrived. */
uint8_t table_id; /* OpenFlow table ID where flow was found. */
uint32_t sflow_n_outputs; /* Number of output ports. */
static bool may_receive(const struct xport *, struct xlate_ctx *);
static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
struct xlate_ctx *);
+static void xlate_actions__(struct xlate_in *, struct xlate_out *)
+ OVS_REQ_RDLOCK(xlate_rwlock);
static void xlate_normal(struct xlate_ctx *);
static void xlate_report(struct xlate_ctx *, const char *);
static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
}
hmap_remove(&xbridges, &xbridge->hmap_node);
+ mac_learning_unref(xbridge->ml);
+ mbridge_unref(xbridge->mbridge);
+ dpif_sflow_unref(xbridge->sflow);
+ dpif_ipfix_unref(xbridge->ipfix);
+ stp_unref(xbridge->stp);
+ hmap_destroy(&xbridge->xports);
free(xbridge->name);
free(xbridge);
}
static struct stp_port *
xport_get_stp_port(const struct xport *xport)
{
- return xport->xbridge->stp && xport->stp_port_no
+ return xport->xbridge->stp && xport->stp_port_no != -1
? stp_get_port(xport->xbridge->stp, xport->stp_port_no)
: NULL;
}
struct xbundle *in_xbundle;
struct xport *in_port;
struct mac_entry *mac;
+ void *mac_port;
uint16_t vlan;
uint16_t vid;
/* Determine output bundle. */
ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
- if (mac) {
- struct xbundle *mac_xbundle = xbundle_lookup(mac->port.p);
+ mac_port = mac ? mac->port.p : NULL;
+ ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
+
+ if (mac_port) {
+ struct xbundle *mac_xbundle = xbundle_lookup(mac_port);
if (mac_xbundle && mac_xbundle != in_xbundle) {
xlate_report(ctx, "forwarding to learned port");
output_normal(ctx, mac_xbundle, vlan);
}
ctx->xout->nf_output_iface = NF_OUT_FLOOD;
}
- ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
}
/* Compose SAMPLE action for sFlow or IPFIX. The given probability is
/* If 'struct flow' gets additional metadata, we'll need to zero it out
* before traversing a patch port. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 20);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 21);
if (!xport) {
xlate_report(ctx, "Nonexistent output port");
special = process_special(ctx, &ctx->xin->flow, peer,
ctx->xin->packet);
if (special) {
- ctx->xout->slow = special;
+ ctx->xout->slow |= special;
} else if (may_receive(peer, ctx)) {
if (xport_stp_forward_state(peer)) {
xlate_table_action(ctx, flow->in_port.ofp_port, 0, true);
}
if (out_port != ODPP_NONE) {
- commit_odp_actions(flow, &ctx->base_flow,
- &ctx->xout->odp_actions, &ctx->xout->wc);
+ ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
+ &ctx->xout->odp_actions,
+ &ctx->xout->wc,
+ &ctx->mpls_depth_delta);
nl_msg_put_odp_port(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT,
out_port);
compose_output_action__(ctx, ofp_port, true);
}
+static void
+xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule)
+{
+ struct rule_dpif *old_rule = ctx->rule;
+ struct rule_actions *actions;
+
+ if (ctx->xin->resubmit_stats) {
+ rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
+ }
+
+ ctx->resubmits++;
+ ctx->recurse++;
+ ctx->rule = rule;
+ actions = rule_dpif_get_actions(rule);
+ do_xlate_actions(actions->ofpacts, actions->ofpacts_len, ctx);
+ rule_actions_unref(actions);
+ ctx->rule = old_rule;
+ ctx->recurse--;
+}
+
static void
xlate_table_action(struct xlate_ctx *ctx,
ofp_port_t in_port, uint8_t table_id, bool may_packet_in)
{
- if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+ if (ctx->recurse >= MAX_RESUBMIT_RECURSION) {
+ VLOG_ERR_RL(&rl, "resubmit actions recursed over %d times",
+ MAX_RESUBMIT_RECURSION);
+ } else if (ctx->resubmits >= MAX_RESUBMITS) {
+ VLOG_ERR_RL(&rl, "over %d resubmit actions", MAX_RESUBMITS);
+ } else if (ctx->xout->odp_actions.size > UINT16_MAX) {
+ VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of actions");
+ } else if (ctx->stack.size >= 65536) {
+ VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of stack");
+ } else {
struct rule_dpif *rule;
ofp_port_t old_in_port = ctx->xin->flow.in_port.ofp_port;
uint8_t old_table_id = ctx->table_id;
ctx->table_id = table_id;
- /* Look up a flow with 'in_port' as the input port. */
+ /* Look up a flow with 'in_port' as the input port. Then restore the
+ * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
+ * have surprising behavior). */
ctx->xin->flow.in_port.ofp_port = in_port;
- rule_dpif_lookup_in_table(ctx->xbridge->ofproto, &ctx->xin->flow,
- &ctx->xout->wc, table_id, &rule);
-
- /* Restore the original input port. Otherwise OFPP_NORMAL and
- * OFPP_IN_PORT will have surprising behavior. */
+ rule_dpif_lookup_in_table(ctx->xbridge->ofproto,
+ &ctx->xin->flow, &ctx->xout->wc,
+ table_id, &rule);
ctx->xin->flow.in_port.ofp_port = old_in_port;
if (ctx->xin->resubmit_hook) {
ctx->xin->resubmit_hook(ctx->xin, rule, ctx->recurse);
}
- if (rule == NULL && may_packet_in) {
+ if (!rule && may_packet_in) {
struct xport *xport;
- /* Makes clang's thread safety analysis happy. */
- rule_release(rule);
-
/* XXX
* check if table configuration flags
* OFPTC_TABLE_MISS_CONTROLLER, default.
* OFPTC_TABLE_MISS_DROP
* When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do? */
xport = get_ofp_port(ctx->xbridge, ctx->xin->flow.in_port.ofp_port);
- rule = choose_miss_rule(xport ? xport->config : 0,
- ctx->xbridge->miss_rule,
- ctx->xbridge->no_packet_in_rule);
- ovs_rwlock_rdlock(&rule->up.evict);
- }
-
- if (rule && ctx->xin->resubmit_stats) {
- rule_credit_stats(rule, ctx->xin->resubmit_stats);
+ choose_miss_rule(xport ? xport->config : 0,
+ ctx->xbridge->miss_rule,
+ ctx->xbridge->no_packet_in_rule, &rule);
}
-
if (rule) {
- struct rule_dpif *old_rule = ctx->rule;
-
- ctx->recurse++;
- ctx->rule = rule;
- do_xlate_actions(rule->up.ofpacts, rule->up.ofpacts_len, ctx);
- ctx->rule = old_rule;
- ctx->recurse--;
+ xlate_recursively(ctx, rule);
+ rule_dpif_unref(rule);
}
- rule_release(rule);
ctx->table_id = old_table_id;
- } else {
- static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
-
- VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times",
- MAX_RESUBMIT_RECURSION);
+ return;
}
+
+ ctx->exit = true;
}
static void
struct ofpbuf *packet;
struct flow key;
- ovs_assert(!ctx->xout->slow || ctx->xout->slow == SLOW_CONTROLLER);
- ctx->xout->slow = SLOW_CONTROLLER;
+ ctx->xout->slow |= SLOW_CONTROLLER;
if (!ctx->xin->packet) {
return;
}
key.pkt_mark = 0;
memset(&key.tunnel, 0, sizeof key.tunnel);
- commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
- &ctx->xout->odp_actions, &ctx->xout->wc);
+ ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
+ &ctx->xout->odp_actions,
+ &ctx->xout->wc,
+ &ctx->mpls_depth_delta);
odp_execute_actions(NULL, packet, &key, ctx->xout->odp_actions.data,
ctx->xout->odp_actions.size, NULL, NULL);
pin->reason = reason;
pin->controller_id = controller_id;
pin->table_id = ctx->table_id;
- pin->cookie = ctx->rule ? ctx->rule->up.flow_cookie : 0;
+ pin->cookie = ctx->rule ? rule_dpif_get_flow_cookie(ctx->rule) : 0;
pin->send_len = len;
flow_get_metadata(&ctx->xin->flow, &pin->fmd);
ofpbuf_delete(packet);
}
-static void
+static bool
compose_mpls_push_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
{
struct flow_wildcards *wc = &ctx->xout->wc;
ovs_assert(eth_type_mpls(eth_type));
+ /* If mpls_depth_delta is negative then an MPLS POP action has been
+ * composed and the resulting MPLS label stack is unknown. This means
+ * an MPLS PUSH action can't be composed as it needs to know either the
+ * top-most MPLS LSE to use as a template for the new MPLS LSE, or that
+ * there is no MPLS label stack present. Thus, stop processing.
+ *
+ * If mpls_depth_delta is positive then an MPLS PUSH action has been
+ * composed and no further MPLS PUSH action may be performed without
+ * losing MPLS LSE and ether type information held in xtx->xin->flow.
+ * Thus, stop processing.
+ *
+ * If the MPLS LSE of the flow and base_flow differ then the MPLS LSE
+ * has been updated. Performing a MPLS PUSH action may be would result in
+ * losing MPLS LSE and ether type information held in xtx->xin->flow.
+ * Thus, stop processing.
+ *
+ * It is planned that in the future this case will be handled
+ * by recirculation */
+ if (ctx->mpls_depth_delta ||
+ ctx->xin->flow.mpls_lse != ctx->base_flow.mpls_lse) {
+ return true;
+ }
+
memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
- memset(&wc->masks.mpls_depth, 0xff, sizeof wc->masks.mpls_depth);
- if (flow->mpls_depth) {
+ ctx->pre_push_mpls_lse = ctx->xin->flow.mpls_lse;
+
+ if (eth_type_mpls(ctx->xin->flow.dl_type)) {
flow->mpls_lse &= ~htonl(MPLS_BOS_MASK);
- flow->mpls_depth++;
} else {
ovs_be32 label;
uint8_t tc, ttl;
tc = (flow->nw_tos & IP_DSCP_MASK) >> 2;
ttl = flow->nw_ttl ? flow->nw_ttl : 0x40;
flow->mpls_lse = set_mpls_lse_values(ttl, tc, 1, label);
- flow->mpls_depth = 1;
}
flow->dl_type = eth_type;
+ ctx->mpls_depth_delta++;
+
+ return false;
}
-static void
+static bool
compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
{
struct flow_wildcards *wc = &ctx->xout->wc;
- struct flow *flow = &ctx->xin->flow;
- ovs_assert(eth_type_mpls(ctx->xin->flow.dl_type));
- ovs_assert(!eth_type_mpls(eth_type));
+ if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
+ return true;
+ }
+
+ /* If mpls_depth_delta is negative then an MPLS POP action has been
+ * composed. Performing another MPLS POP action
+ * would result in losing ether type that results from
+ * the already composed MPLS POP. Thus, stop processing.
+ *
+ * It is planned that in the future this case will be handled
+ * by recirculation */
+ if (ctx->mpls_depth_delta < 0) {
+ return true;
+ }
memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
- memset(&wc->masks.mpls_depth, 0xff, sizeof wc->masks.mpls_depth);
- if (flow->mpls_depth) {
- flow->mpls_depth--;
- flow->mpls_lse = htonl(0);
- if (!flow->mpls_depth) {
- flow->dl_type = eth_type;
- }
+ /* If mpls_depth_delta is positive then an MPLS PUSH action has been
+ * executed and the previous MPLS LSE saved in ctx->pre_push_mpls_lse. The
+ * flow's MPLS LSE should be restored to that value to allow any
+ * subsequent actions that update of the LSE to be executed correctly.
+ */
+ if (ctx->mpls_depth_delta > 0) {
+ ctx->xin->flow.mpls_lse = ctx->pre_push_mpls_lse;
}
+
+ ctx->xin->flow.dl_type = eth_type;
+ ctx->mpls_depth_delta--;
+
+ return false;
}
static bool
return true;
}
+ /* If mpls_depth_delta is negative then an MPLS POP action has been
+ * executed and the resulting MPLS label stack is unknown. This means
+ * a SET MPLS TTL push action can't be executed as it needs to manipulate
+ * the top-most MPLS LSE. Thus, stop processing.
+ *
+ * It is planned that in the future this case will be handled
+ * by recirculation.
+ */
+ if (ctx->mpls_depth_delta < 0) {
+ return true;
+ }
+
ctx->xout->wc.masks.mpls_lse |= htonl(MPLS_TTL_MASK);
set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse, ttl);
return false;
xlate_learn_action(struct xlate_ctx *ctx,
const struct ofpact_learn *learn)
{
- struct ofputil_flow_mod *fm;
+ uint64_t ofpacts_stub[1024 / 8];
+ struct ofputil_flow_mod fm;
struct ofpbuf ofpacts;
ctx->xout->has_learn = true;
return;
}
- fm = xmalloc(sizeof *fm);
- ofpbuf_init(&ofpacts, 0);
- learn_execute(learn, &ctx->xin->flow, fm, &ofpacts);
-
- ofproto_dpif_flow_mod(ctx->xbridge->ofproto, fm);
-}
-
-/* Reduces '*timeout' to no more than 'max'. A value of zero in either case
- * means "infinite". */
-static void
-reduce_timeout(uint16_t max, uint16_t *timeout)
-{
- if (max && (!*timeout || *timeout > max)) {
- *timeout = max;
- }
+ ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
+ learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
+ ofproto_dpif_flow_mod(ctx->xbridge->ofproto, &fm);
+ ofpbuf_uninit(&ofpacts);
}
static void
const struct ofpact_fin_timeout *oft)
{
if (ctx->xin->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
- struct rule_dpif *rule = ctx->rule;
-
- ovs_mutex_lock(&rule->up.ofproto->expirable_mutex);
- if (list_is_empty(&rule->up.expirable)) {
- list_insert(&rule->up.ofproto->expirable, &rule->up.expirable);
- }
- ovs_mutex_unlock(&rule->up.ofproto->expirable_mutex);
-
- ovs_mutex_lock(&rule->up.timeout_mutex);
- reduce_timeout(oft->fin_idle_timeout, &rule->up.idle_timeout);
- reduce_timeout(oft->fin_hard_timeout, &rule->up.hard_timeout);
- ovs_mutex_unlock(&rule->up.timeout_mutex);
+ rule_dpif_reduce_timeouts(ctx->rule, oft->fin_idle_timeout,
+ oft->fin_hard_timeout);
}
}
* the same percentage. */
uint32_t probability = (os->probability << 16) | os->probability;
- commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
- &ctx->xout->odp_actions, &ctx->xout->wc);
+ ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
+ &ctx->xout->odp_actions,
+ &ctx->xout->wc,
+ &ctx->mpls_depth_delta);
compose_flow_sample_cookie(os->probability, os->collector_set_id,
os->obs_domain_id, os->obs_point_id, &cookie);
ofpact_get_OUTPUT(a)->max_len, true);
break;
+ case OFPACT_GROUP:
+ /* XXX not yet implemented */
+ break;
+
case OFPACT_CONTROLLER:
controller = ofpact_get_CONTROLLER(a);
execute_controller_action(ctx, controller->max_len,
break;
case OFPACT_PUSH_MPLS:
- compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a)->ethertype);
+ if (compose_mpls_push_action(ctx,
+ ofpact_get_PUSH_MPLS(a)->ethertype)) {
+ return;
+ }
break;
case OFPACT_POP_MPLS:
- compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
+ if (compose_mpls_pop_action(ctx,
+ ofpact_get_POP_MPLS(a)->ethertype)) {
+ return;
+ }
break;
case OFPACT_SET_MPLS_TTL:
ofpbuf_put(&dst->odp_actions, src->odp_actions.data,
src->odp_actions.size);
}
+
+/* Returns a reference to the sflow handled associated with ofproto, or NULL if
+ * there is none. The caller is responsible for decrementing the results ref
+ * count with dpif_sflow_unref(). */
+struct dpif_sflow *
+xlate_get_sflow(const struct ofproto_dpif *ofproto)
+{
+ struct dpif_sflow *sflow = NULL;
+ struct xbridge *xbridge;
+
+ ovs_rwlock_rdlock(&xlate_rwlock);
+ xbridge = xbridge_lookup(ofproto);
+ if (xbridge) {
+ sflow = dpif_sflow_ref(xbridge->sflow);
+ }
+ ovs_rwlock_unlock(&xlate_rwlock);
+
+ return sflow;
+}
+
+/* Returns a reference to the ipfix handled associated with ofproto, or NULL if
+ * there is none. The caller is responsible for decrementing the results ref
+ * count with dpif_ipfix_unref(). */
+struct dpif_ipfix *
+xlate_get_ipfix(const struct ofproto_dpif *ofproto)
+{
+ struct dpif_ipfix *ipfix = NULL;
+ struct xbridge *xbridge;
+
+ ovs_rwlock_rdlock(&xlate_rwlock);
+ xbridge = xbridge_lookup(ofproto);
+ if (xbridge) {
+ ipfix = dpif_ipfix_ref(xbridge->ipfix);
+ }
+ ovs_rwlock_unlock(&xlate_rwlock);
+
+ return ipfix;
+}
\f
static struct skb_priority_to_dscp *
get_skb_priority(const struct xport *xport, uint32_t skb_priority)
return false;
}
-/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
- * into datapath actions in 'odp_actions', using 'ctx'. */
+/* Thread safe call to xlate_actions__(). */
void
xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
+{
+ ovs_rwlock_rdlock(&xlate_rwlock);
+ xlate_actions__(xin, xout);
+ ovs_rwlock_unlock(&xlate_rwlock);
+}
+
+/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
+ * into datapath actions in 'odp_actions', using 'ctx'.
+ *
+ * The caller must take responsibility for eventually freeing 'xout', with
+ * xlate_out_uninit(). */
+static void
+xlate_actions__(struct xlate_in *xin, struct xlate_out *xout)
+ OVS_REQ_RDLOCK(xlate_rwlock)
{
struct flow_wildcards *wc = &xout->wc;
struct flow *flow = &xin->flow;
+ struct rule_dpif *rule = NULL;
+ struct rule_actions *actions = NULL;
enum slow_path_reason special;
const struct ofpact *ofpacts;
struct xport *in_port;
COVERAGE_INC(xlate_actions);
- ovs_rwlock_rdlock(&xlate_rwlock);
-
/* Flow initialization rules:
* - 'base_flow' must match the kernel's view of the packet at the
* time that action processing starts. 'flow' represents any
}
ctx.recurse = 0;
+ ctx.resubmits = 0;
ctx.orig_skb_priority = flow->skb_priority;
ctx.table_id = 0;
ctx.exit = false;
+ ctx.mpls_depth_delta = 0;
+
+ if (!xin->ofpacts && !ctx.rule) {
+ rule_dpif_lookup(ctx.xbridge->ofproto, flow, wc, &rule);
+ if (ctx.xin->resubmit_stats) {
+ rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats);
+ }
+ ctx.rule = rule;
+ }
+ xout->fail_open = ctx.rule && rule_dpif_fail_open(ctx.rule);
if (xin->ofpacts) {
ofpacts = xin->ofpacts;
ofpacts_len = xin->ofpacts_len;
- } else if (xin->rule) {
- ofpacts = xin->rule->up.ofpacts;
- ofpacts_len = xin->rule->up.ofpacts_len;
+ } else if (ctx.rule) {
+ actions = rule_dpif_get_actions(ctx.rule);
+ ofpacts = actions->ofpacts;
+ ofpacts_len = actions->ofpacts_len;
} else {
NOT_REACHED();
}
in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port);
special = process_special(&ctx, flow, in_port, ctx.xin->packet);
if (special) {
- ctx.xout->slow = special;
+ ctx.xout->slow |= special;
} else {
size_t sample_actions_len;
}
}
+ if (nl_attr_oversized(ctx.xout->odp_actions.size)) {
+ /* These datapath actions are too big for a Netlink attribute, so we
+ * can't execute them. */
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+ VLOG_ERR_RL(&rl, "discarding oversize datapath actions");
+ ofpbuf_clear(&ctx.xout->odp_actions);
+ }
+
ofpbuf_uninit(&ctx.stack);
/* Clear the metadata and register wildcard masks, because we won't
memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
out:
+ rule_actions_unref(actions);
+ rule_dpif_unref(rule);
+}
+
+/* Sends 'packet' out 'ofport'.
+ * May modify 'packet'.
+ * Returns 0 if successful, otherwise a positive errno value. */
+int
+xlate_send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet)
+{
+ uint64_t odp_actions_stub[1024 / 8];
+ struct xport *xport;
+ struct ofpbuf key, odp_actions;
+ struct dpif_flow_stats stats;
+ struct odputil_keybuf keybuf;
+ struct ofpact_output output;
+ struct xlate_out xout;
+ struct xlate_in xin;
+ struct flow flow;
+ union flow_in_port in_port_;
+ int error;
+
+ ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
+ ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
+ ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
+ /* Use OFPP_NONE as the in_port to avoid special packet processing. */
+ in_port_.ofp_port = OFPP_NONE;
+ flow_extract(packet, 0, 0, NULL, &in_port_, &flow);
+
+ ovs_rwlock_rdlock(&xlate_rwlock);
+ xport = xport_lookup(ofport);
+ if (!xport) {
+ ovs_rwlock_unlock(&xlate_rwlock);
+ return EINVAL;
+ }
+
+ odp_flow_key_from_flow(&key, &flow, ofp_port_to_odp_port(xport->xbridge, OFPP_LOCAL));
+ dpif_flow_stats_extract(&flow, packet, time_msec(), &stats);
+ output.port = xport->ofp_port;
+ output.max_len = 0;
+ xlate_in_init(&xin, xport->xbridge->ofproto, &flow, NULL, 0, packet);
+ xin.ofpacts_len = sizeof output;
+ xin.ofpacts = &output.ofpact;
+ xin.resubmit_stats = &stats;
+ /* Calls xlate_actions__ directly, since the rdlock is acquired. */
+ xlate_actions__(&xin, &xout);
+ error = dpif_execute(xport->xbridge->dpif,
+ key.data, key.size,
+ xout.odp_actions.data, xout.odp_actions.size,
+ packet, (xout.slow & SLOW_ACTION) != 0);
ovs_rwlock_unlock(&xlate_rwlock);
+ xlate_out_uninit(&xout);
+ return error;
}