X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=ofproto%2Fofproto-dpif-xlate.c;h=1ff80d9464acbdde4167a9f011844acf5fca5a97;hb=e0eecb1ca122bd8a22058e42161bd93c8474a74f;hp=a880376fbb33ae0643156e73a86dfd4cca3ea9e3;hpb=269dc90a0a090f2b40e55b4572ac5edeb1684b7d;p=sliver-openvswitch.git diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c index a880376fb..1ff80d946 100644 --- a/ofproto/ofproto-dpif-xlate.c +++ b/ofproto/ofproto-dpif-xlate.c @@ -178,6 +178,7 @@ struct xlate_ctx { /* Resubmit statistics, via xlate_table_action(). */ int recurse; /* Current resubmit nesting depth. */ int resubmits; /* Total number of resubmits. */ + bool in_group; /* Currently translating ofgroup, if true. */ uint32_t orig_skb_priority; /* Priority when packet arrived. */ uint8_t table_id; /* OpenFlow table ID where flow was found. */ @@ -664,7 +665,7 @@ xport_get_stp_port(const struct xport *xport) : NULL; } -static enum stp_state +static bool xport_stp_learn_state(const struct xport *xport) { struct stp_port *sp = xport_get_stp_port(xport); @@ -678,6 +679,13 @@ xport_stp_forward_state(const struct xport *xport) return stp_forward_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED); } +static bool +xport_stp_listen_state(const struct xport *xport) +{ + struct stp_port *sp = xport_get_stp_port(xport); + return stp_listen_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED); +} + /* Returns true if STP should process 'flow'. Sets fields in 'wc' that * were used to make the determination.*/ static bool @@ -1692,9 +1700,18 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, } else if (xport->config & OFPUTIL_PC_NO_FWD) { xlate_report(ctx, "OFPPC_NO_FWD set, skipping output"); return; - } else if (check_stp && !xport_stp_forward_state(xport)) { - xlate_report(ctx, "STP not in forwarding state, skipping output"); - return; + } else if (check_stp) { + if (eth_addr_equals(ctx->base_flow.dl_dst, eth_addr_stp)) { + if (!xport_stp_listen_state(xport)) { + xlate_report(ctx, "STP not in listening state, " + "skipping bpdu output"); + return; + } + } else if (!xport_stp_forward_state(xport)) { + xlate_report(ctx, "STP not in forwarding state, " + "skipping output"); + return; + } } if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) { @@ -1780,19 +1797,18 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, &ctx->xout->odp_actions); flow->tunnel = flow_tnl; /* Restore tunnel metadata */ } else { - ofp_port_t vlandev_port; - odp_port = xport->odp_port; + out_port = odp_port; if (ofproto_has_vlan_splinters(ctx->xbridge->ofproto)) { + ofp_port_t vlandev_port; + wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI); - } - vlandev_port = vsp_realdev_to_vlandev(ctx->xbridge->ofproto, ofp_port, - flow->vlan_tci); - if (vlandev_port == ofp_port) { - out_port = odp_port; - } else { - out_port = ofp_port_to_odp_port(ctx->xbridge, vlandev_port); - flow->vlan_tci = htons(0); + vlandev_port = vsp_realdev_to_vlandev(ctx->xbridge->ofproto, + ofp_port, flow->vlan_tci); + if (vlandev_port != ofp_port) { + out_port = ofp_port_to_odp_port(ctx->xbridge, vlandev_port); + flow->vlan_tci = htons(0); + } } } @@ -1836,7 +1852,6 @@ xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule) ctx->rule = rule; actions = rule_dpif_get_actions(rule); do_xlate_actions(actions->ofpacts, actions->ofpacts_len, ctx); - rule_actions_unref(actions); ctx->rule = old_rule; ctx->recurse--; } @@ -1981,6 +1996,8 @@ xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group) static void xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group) { + ctx->in_group = true; + switch (group_dpif_get_type(group)) { case OFPGT11_ALL: case OFPGT11_INDIRECT: @@ -1996,12 +2013,38 @@ xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group) OVS_NOT_REACHED(); } group_dpif_release(group); + + ctx->in_group = false; +} + +static bool +xlate_group_resource_check(struct xlate_ctx *ctx) +{ + if (!xlate_resubmit_resource_check(ctx)) { + return false; + } else if (ctx->in_group) { + /* Prevent nested translation of OpenFlow groups. + * + * OpenFlow allows this restriction. We enforce this restriction only + * because, with the current architecture, we would otherwise have to + * take a possibly recursive read lock on the ofgroup rwlock, which is + * unsafe given that POSIX allows taking a read lock to block if there + * is a thread blocked on taking the write lock. Other solutions + * without this restriction are also possible, but seem unwarranted + * given the current limited use of groups. */ + static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1); + + VLOG_ERR_RL(&rl, "cannot recursively translate OpenFlow group"); + return false; + } else { + return true; + } } static bool xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id) { - if (xlate_resubmit_resource_check(ctx)) { + if (xlate_group_resource_check(ctx)) { struct group_dpif *group; bool got_group; @@ -2911,6 +2954,7 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout) struct xlate_ctx ctx; size_t ofpacts_len; bool tnl_may_send; + bool is_icmp; COVERAGE_INC(xlate_actions); @@ -2965,6 +3009,7 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout) if (is_ip_any(flow)) { wc->masks.nw_frag |= FLOW_NW_FRAG_MASK; } + is_icmp = is_icmpv4(flow) || is_icmpv6(flow); tnl_may_send = tnl_xlate_init(&ctx.base_flow, flow, wc); if (ctx.xbridge->netflow) { @@ -2973,6 +3018,7 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout) ctx.recurse = 0; ctx.resubmits = 0; + ctx.in_group = false; ctx.orig_skb_priority = flow->skb_priority; ctx.table_id = 0; ctx.exit = false; @@ -3124,8 +3170,22 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout) * use non-header fields as part of the cache. */ flow_wildcards_clear_non_packet_fields(wc); + /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow uses + * the low 8 bits of the 16-bit tp_src and tp_dst members to represent + * these fields. The datapath interface, on the other hand, represents + * them with just 8 bits each. This means that if the high 8 bits of the + * masks for these fields somehow become set, then they will get chopped + * off by a round trip through the datapath, and revalidation will spot + * that as an inconsistency and delete the flow. Avoid the problem here by + * making sure that only the low 8 bits of either field can be unwildcarded + * for ICMP. + */ + if (is_icmp) { + wc->masks.tp_src &= htons(UINT8_MAX); + wc->masks.tp_dst &= htons(UINT8_MAX); + } + out: - rule_actions_unref(actions); rule_dpif_unref(rule); } @@ -3138,12 +3198,11 @@ xlate_send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet) struct xport *xport; struct ofpact_output output; struct flow flow; - union flow_in_port in_port_; ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output); /* Use OFPP_NONE as the in_port to avoid special packet processing. */ - in_port_.ofp_port = OFPP_NONE; - flow_extract(packet, 0, 0, NULL, &in_port_, &flow); + flow_extract(packet, NULL, &flow); + flow.in_port.ofp_port = OFPP_NONE; ovs_rwlock_rdlock(&xlate_rwlock); xport = xport_lookup(ofport);