#include "ofproto/ofproto-dpif-xlate.h"
+#include "bfd.h"
#include "bitmap.h"
#include "bond.h"
#include "bundle.h"
#include "byte-order.h"
+#include "cfm.h"
#include "connmgr.h"
#include "coverage.h"
#include "dpif.h"
#include "dynamic-string.h"
+#include "lacp.h"
#include "learn.h"
#include "mac-learning.h"
#include "meta-flow.h"
VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
+/* Maximum depth of flow table recursion (due to resubmit actions) in a
+ * flow translation. */
+#define MAX_RESUBMIT_RECURSION 64
+
struct xlate_ctx {
struct xlate_in *xin;
struct xlate_out *xout;
ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
}
+static enum slow_path_reason
+process_special(struct xlate_ctx *ctx, const struct flow *flow,
+ const struct ofport_dpif *ofport, const struct ofpbuf *packet)
+{
+ struct ofproto_dpif *ofproto = ctx->ofproto;
+ struct flow_wildcards *wc = &ctx->xout->wc;
+
+ if (!ofport) {
+ return 0;
+ } else if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow, wc)) {
+ if (packet) {
+ cfm_process_heartbeat(ofport->cfm, packet);
+ }
+ return SLOW_CFM;
+ } else if (ofport->bfd && bfd_should_process_flow(flow, wc)) {
+ if (packet) {
+ bfd_process_packet(ofport->bfd, flow, packet);
+ }
+ return SLOW_BFD;
+ } else if (ofport->bundle && ofport->bundle->lacp
+ && flow->dl_type == htons(ETH_TYPE_LACP)) {
+ if (packet) {
+ lacp_process_packet(ofport->bundle->lacp, ofport, packet);
+ }
+ return SLOW_LACP;
+ } else if (ofproto->stp && stp_should_process_flow(flow, wc)) {
+ if (packet) {
+ stp_process_packet(ofport, packet);
+ }
+ return SLOW_STP;
+ } else {
+ return 0;
+ }
+}
+
static void
compose_output_action__(struct xlate_ctx *ctx, uint16_t ofp_port,
bool check_stp)
ovs_be16 flow_vlan_tci;
uint32_t flow_skb_mark;
uint8_t flow_nw_tos;
- struct priority_to_dscp *pdscp;
uint32_t out_port, odp_port;
+ uint8_t dscp;
/* If 'struct flow' gets additional metadata, we'll need to zero it out
* before traversing a patch port. */
struct ofport_dpif *peer = ofport_get_peer(ofport);
struct flow old_flow = ctx->xin->flow;
enum slow_path_reason special;
- struct ofport_dpif *in_port;
if (!peer) {
xlate_report(ctx, "Nonexistent patch port peer");
memset(&flow->tunnel, 0, sizeof flow->tunnel);
memset(flow->regs, 0, sizeof flow->regs);
- in_port = get_ofp_port(ctx->ofproto, flow->in_port);
- special = process_special(ctx->ofproto, &ctx->xin->flow, in_port,
+ special = process_special(ctx, &ctx->xin->flow, peer,
ctx->xin->packet);
if (special) {
ctx->xout->slow = special;
- } else if (!in_port || may_receive(in_port, ctx)) {
- if (!in_port || stp_forward_in_state(in_port->stp_state)) {
+ } else if (may_receive(peer, ctx)) {
+ if (stp_forward_in_state(peer->stp_state)) {
xlate_table_action(ctx, flow->in_port, 0, true);
} else {
/* Forwarding is disabled by STP. Let OFPP_NORMAL and the
flow_skb_mark = flow->skb_mark;
flow_nw_tos = flow->nw_tos;
- pdscp = get_priority(ofport, flow->skb_priority);
- if (pdscp) {
+ if (ofproto_dpif_dscp_from_priority(ofport, flow->skb_priority, &dscp)) {
flow->nw_tos &= ~IP_DSCP_MASK;
- flow->nw_tos |= pdscp->dscp;
+ flow->nw_tos |= dscp;
}
if (ofport->tnl_port) {
* matches, while explicit set actions on tunnel metadata are.
*/
struct flow_tnl flow_tnl = flow->tunnel;
- odp_port = tnl_port_send(ofport->tnl_port, flow);
+ odp_port = tnl_port_send(ofport->tnl_port, flow, &ctx->xout->wc);
if (odp_port == OVSP_NONE) {
xlate_report(ctx, "Tunneling decided against output");
goto out; /* restore flow_nw_tos */
}
in_port = get_ofp_port(ctx.ofproto, flow->in_port);
- special = process_special(ctx.ofproto, flow, in_port, ctx.xin->packet);
+ special = process_special(&ctx, flow, in_port, ctx.xin->packet);
if (special) {
ctx.xout->slow = special;
} else {