#include "coverage.h"
#include "dpif.h"
#include "dynamic-string.h"
+#include "in-band.h"
#include "lacp.h"
#include "learn.h"
#include "mac-learning.h"
#include "odp-execute.h"
#include "ofp-actions.h"
#include "ofproto/ofproto-dpif-ipfix.h"
+#include "ofproto/ofproto-dpif-mirror.h"
#include "ofproto/ofproto-dpif-sflow.h"
#include "ofproto/ofproto-dpif.h"
#include "tunnel.h"
return vlan == bundle->vlan || ofbundle_trunks_vlan(bundle, vlan);
}
-static bool
-vlan_is_mirrored(const struct ofmirror *m, int vlan)
-{
- return !m->vlans || bitmap_is_set(m->vlans, vlan);
-}
-
static struct ofbundle *
lookup_input_bundle(const struct ofproto_dpif *ofproto, ofp_port_t in_port,
bool warn, struct ofport_dpif **in_ofportp)
struct ofbundle *in_bundle;
uint16_t vlan;
uint16_t vid;
- const struct nlattr *a;
- size_t left;
+
+ mirrors = ctx->xout->mirrors;
+ ctx->xout->mirrors = 0;
in_bundle = lookup_input_bundle(ctx->ofproto, orig_flow->in_port.ofp_port,
ctx->xin->packet != NULL, NULL);
if (!in_bundle) {
return;
}
- mirrors = in_bundle->src_mirrors;
+ mirrors |= mirror_bundle_src(ctx->ofproto->mbridge, in_bundle);
/* Drop frames on bundles reserved for mirroring. */
- if (in_bundle->mirror_out) {
+ if (mirror_bundle_out(ctx->ofproto->mbridge, in_bundle)) {
if (ctx->xin->packet != NULL) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
}
vlan = input_vid_to_vlan(in_bundle, vid);
- /* Look at the output ports to check for destination selections. */
-
- NL_ATTR_FOR_EACH (a, left, ctx->xout->odp_actions.data,
- ctx->xout->odp_actions.size) {
- enum ovs_action_attr type = nl_attr_type(a);
- struct ofport_dpif *ofport;
-
- if (type != OVS_ACTION_ATTR_OUTPUT) {
- continue;
- }
-
- ofport = get_odp_port(ofproto, nl_attr_get_odp_port(a));
- if (ofport && ofport->bundle) {
- mirrors |= ofport->bundle->dst_mirrors;
- }
- }
-
if (!mirrors) {
return;
}
ctx->xin->flow = *orig_flow;
while (mirrors) {
- struct ofmirror *m;
-
- m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
-
- if (m->vlans) {
+ mirror_mask_t dup_mirrors;
+ struct ofbundle *out;
+ unsigned long *vlans;
+ bool vlan_mirrored;
+ bool has_mirror;
+ int out_vlan;
+
+ has_mirror = mirror_get(ofproto->mbridge, mirror_mask_ffs(mirrors) - 1,
+ &vlans, &dup_mirrors, &out, &out_vlan);
+ ovs_assert(has_mirror);
+
+ if (vlans) {
ctx->xout->wc.masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK);
}
+ vlan_mirrored = !vlans || bitmap_is_set(vlans, vlan);
+ free(vlans);
- if (!vlan_is_mirrored(m, vlan)) {
+ if (!vlan_mirrored) {
mirrors = zero_rightmost_1bit(mirrors);
continue;
}
- mirrors &= ~m->dup_mirrors;
- ctx->xout->mirrors |= m->dup_mirrors;
- if (m->out) {
- output_normal(ctx, m->out, vlan);
- } else if (vlan != m->out_vlan
+ mirrors &= ~dup_mirrors;
+ ctx->xout->mirrors |= dup_mirrors;
+ if (out) {
+ output_normal(ctx, out, vlan);
+ } else if (vlan != out_vlan
&& !eth_addr_is_reserved(orig_flow->dl_dst)) {
struct ofbundle *bundle;
HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
- if (ofbundle_includes_vlan(bundle, m->out_vlan)
- && !bundle->mirror_out) {
- output_normal(ctx, bundle, m->out_vlan);
+ if (ofbundle_includes_vlan(bundle, out_vlan)
+ && !mirror_bundle_out(bundle->ofproto->mbridge, bundle)) {
+ output_normal(ctx, bundle, out_vlan);
}
}
}
ctx->xout->has_normal = true;
- /* Check the dl_type, since we may check for gratuituous ARP. */
- memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
}
/* Drop frames on bundles reserved for mirroring. */
- if (in_bundle->mirror_out) {
+ if (mirror_bundle_out(ctx->ofproto->mbridge, in_bundle)) {
if (ctx->xin->packet != NULL) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
if (bundle != in_bundle
&& ofbundle_includes_vlan(bundle, vlan)
&& bundle->floodable
- && !bundle->mirror_out) {
+ && !mirror_bundle_out(bundle->ofproto->mbridge, bundle)) {
output_normal(ctx, bundle, vlan);
}
}
return SLOW_BFD;
} else if (ofport->bundle && ofport->bundle->lacp
&& flow->dl_type == htons(ETH_TYPE_LACP)) {
- memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
if (packet) {
lacp_process_packet(ofport->bundle->lacp, ofport, packet);
}
return;
}
+ if (mbridge_has_mirrors(ctx->ofproto->mbridge) && ofport->bundle) {
+ ctx->xout->mirrors |=
+ mirror_bundle_dst(ofport->bundle->ofproto->mbridge,
+ ofport->bundle);
+ }
+
if (ofport->peer) {
struct ofport_dpif *peer = ofport->peer;
struct flow old_flow = ctx->xin->flow;
* learning action look at the packet, then drop it. */
struct flow old_base_flow = ctx->base_flow;
size_t old_size = ctx->xout->odp_actions.size;
+ mirror_mask_t old_mirrors = ctx->xout->mirrors;
xlate_table_action(ctx, flow->in_port.ofp_port, 0, true);
+ ctx->xout->mirrors = old_mirrors;
ctx->base_flow = old_base_flow;
ctx->xout->odp_actions.size = old_size;
}
flow->nw_tos |= dscp;
}
- if (ofport->tnl_port) {
+ if (ofport->is_tunnel) {
/* Save tunnel metadata so that changes made due to
* the Logical (tunnel) Port are not visible for any further
* matches, while explicit set actions on tunnel metadata are.
*/
struct flow_tnl flow_tnl = flow->tunnel;
- odp_port = tnl_port_send(ofport->tnl_port, flow, &ctx->xout->wc);
+ odp_port = tnl_port_send(ofport, flow, &ctx->xout->wc);
if (odp_port == ODPP_NONE) {
xlate_report(ctx, "Tunneling decided against output");
goto out; /* restore flow_nw_tos */
compose_output_action__(ctx, ofp_port, true);
}
-static void
-tag_the_flow(struct xlate_ctx *ctx, struct rule_dpif *rule)
-{
- struct ofproto_dpif *ofproto = ctx->ofproto;
- uint8_t table_id = ctx->table_id;
-
- if (table_id > 0 && table_id < N_TABLES) {
- struct table_dpif *table = &ofproto->tables[table_id];
- if (table->other_table) {
- ctx->xout->tags |= (rule && rule->tag
- ? rule->tag
- : rule_calculate_tag(&ctx->xin->flow,
- &table->other_table->mask,
- table->basis));
- }
- }
-}
-
/* Common rule processing in one place to avoid duplicating code. */
static struct rule_dpif *
ctx_rule_hooks(struct xlate_ctx *ctx, struct rule_dpif *rule,
rule = rule_dpif_lookup_in_table(ctx->ofproto, &ctx->xin->flow,
&ctx->xout->wc, table_id);
- tag_the_flow(ctx, rule);
+ ctx->xout->tags |= calculate_flow_tag(ctx->ofproto, &ctx->xin->flow,
+ ctx->table_id, rule);
/* Restore the original input port. Otherwise OFPP_NORMAL and
* OFPP_IN_PORT will have surprising behavior. */
ovs_assert(eth_type_mpls(eth_type));
- memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
memset(&wc->masks.mpls_depth, 0xff, sizeof wc->masks.mpls_depth);
ovs_assert(eth_type_mpls(ctx->xin->flow.dl_type));
ovs_assert(!eth_type_mpls(eth_type));
- memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
memset(&wc->masks.mpls_depth, 0xff, sizeof wc->masks.mpls_depth);
uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse);
struct flow_wildcards *wc = &ctx->xout->wc;
- memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
if (!eth_type_mpls(flow->dl_type)) {
break;
case OFPACT_SET_IPV4_SRC:
- memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
if (flow->dl_type == htons(ETH_TYPE_IP)) {
flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
}
break;
case OFPACT_SET_IPV4_DST:
- memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
if (flow->dl_type == htons(ETH_TYPE_IP)) {
flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
}
case OFPACT_SET_IPV4_DSCP:
/* OpenFlow 1.0 only supports IPv4. */
- memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
if (flow->dl_type == htons(ETH_TYPE_IP)) {
flow->nw_tos &= ~IP_DSCP_MASK;
flow->nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp;
break;
case OFPACT_SET_L4_SRC_PORT:
- memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
if (is_ip_any(flow)) {
flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
break;
case OFPACT_SET_L4_DST_PORT:
- memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
if (is_ip_any(flow)) {
flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
break;
case OFPACT_DEC_TTL:
- memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
goto out;
}
break;
case OFPACT_BUNDLE:
- ctx->ofproto->has_bundle_action = true;
xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
break;
break;
case OFPACT_FIN_TIMEOUT:
- memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
ctx->xout->has_fin_timeout = true;
xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
rule = rule_dpif_lookup_in_table(ctx->ofproto, flow, wc,
ctx->table_id);
- tag_the_flow(ctx, rule);
+ ctx->xout->tags = calculate_flow_tag(ctx->ofproto, &ctx->xin->flow,
+ ctx->table_id, rule);
rule = ctx_rule_hooks(ctx, rule, true);
src->odp_actions.size);
}
\f
+static bool
+actions_output_to_local_port(const struct xlate_ctx *ctx)
+{
+ odp_port_t local_odp_port = ofp_port_to_odp_port(ctx->ofproto, OFPP_LOCAL);
+ const struct nlattr *a;
+ unsigned int left;
+
+ NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->xout->odp_actions.data,
+ ctx->xout->odp_actions.size) {
+ if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
+ && nl_attr_get_odp_port(a) == local_odp_port) {
+ return true;
+ }
+ }
+ return false;
+}
/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
* into datapath actions in 'odp_actions', using 'ctx'. */
flow_wildcards_init_catchall(wc);
memset(&wc->masks.in_port, 0xff, sizeof wc->masks.in_port);
memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
+ memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
if (tnl_port_should_receive(&ctx.xin->flow)) {
memset(&wc->masks.tunnel, 0xff, sizeof wc->masks.tunnel);
}
if (xin->ofproto->netflow) {
- netflow_mask_wc(wc);
+ netflow_mask_wc(flow, wc);
}
ctx.xout->tags = 0;
ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
- if (ctx.ofproto->has_mirrors || hit_resubmit_limit) {
+ if (mbridge_has_mirrors(ctx.ofproto->mbridge) || hit_resubmit_limit) {
/* Do this conditionally because the copy is expensive enough that it
* shows up in profiles. */
orig_flow = *flow;
} else {
static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1);
size_t sample_actions_len;
- odp_port_t local_odp_port;
if (flow->in_port.ofp_port
!= vsp_realdev_to_vlandev(ctx.ofproto, flow->in_port.ofp_port,
}
}
- local_odp_port = ofp_port_to_odp_port(ctx.ofproto, OFPP_LOCAL);
- if (!connmgr_must_output_local(ctx.ofproto->up.connmgr, flow,
- local_odp_port,
- ctx.xout->odp_actions.data,
- ctx.xout->odp_actions.size)) {
+ if (connmgr_has_in_band(ctx.ofproto->up.connmgr)
+ && in_band_must_output_to_local_port(flow)
+ && !actions_output_to_local_port(&ctx)) {
compose_output_action(&ctx, OFPP_LOCAL);
}
- if (ctx.ofproto->has_mirrors) {
+ if (mbridge_has_mirrors(ctx.ofproto->mbridge)) {
add_mirror_actions(&ctx, &orig_flow);
}
fix_sflow_action(&ctx);