X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=ofproto%2Fofproto-dpif-xlate.c;h=0e7b9a0768583a9d3a074ae35f416f4e4321ee79;hb=5caf4376cc3710145ea9f57c577f3c3ecde0cad5;hp=68666d7e342b55935a393c47ee441877ab0678ac;hpb=9cfef3d04d86f8ce465cf89c0eeae543a46c47ab;p=sliver-openvswitch.git diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c index 68666d7e3..0e7b9a076 100644 --- a/ofproto/ofproto-dpif-xlate.c +++ b/ofproto/ofproto-dpif-xlate.c @@ -16,14 +16,18 @@ #include "ofproto/ofproto-dpif-xlate.h" +#include "bfd.h" #include "bitmap.h" #include "bond.h" #include "bundle.h" #include "byte-order.h" +#include "cfm.h" #include "connmgr.h" #include "coverage.h" #include "dpif.h" #include "dynamic-string.h" +#include "in-band.h" +#include "lacp.h" #include "learn.h" #include "mac-learning.h" #include "meta-flow.h" @@ -43,6 +47,10 @@ COVERAGE_DEFINE(ofproto_dpif_xlate); VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate); +/* Maximum depth of flow table recursion (due to resubmit actions) in a + * flow translation. */ +#define MAX_RESUBMIT_RECURSION 64 + struct xlate_ctx { struct xlate_in *xin; struct xlate_out *xout; @@ -73,7 +81,7 @@ struct xlate_ctx { uint32_t orig_skb_priority; /* Priority when packet arrived. */ uint8_t table_id; /* OpenFlow table ID where flow was found. */ uint32_t sflow_n_outputs; /* Number of output ports. */ - uint32_t sflow_odp_port; /* Output port for composing sFlow action. */ + odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */ uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */ bool exit; /* No further actions should be processed. */ }; @@ -93,13 +101,13 @@ static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len, struct xlate_ctx *); static void xlate_normal(struct xlate_ctx *); static void xlate_report(struct xlate_ctx *, const char *); -static void xlate_table_action(struct xlate_ctx *, uint16_t in_port, +static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port, uint8_t table_id, bool may_packet_in); static bool input_vid_is_valid(uint16_t vid, struct ofbundle *, bool warn); static uint16_t input_vid_to_vlan(const struct ofbundle *, uint16_t vid); static void output_normal(struct xlate_ctx *, const struct ofbundle *, uint16_t vlan); -static void compose_output_action(struct xlate_ctx *, uint16_t ofp_port); +static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port); static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); @@ -123,7 +131,7 @@ vlan_is_mirrored(const struct ofmirror *m, int vlan) } static struct ofbundle * -lookup_input_bundle(const struct ofproto_dpif *ofproto, uint16_t in_port, +lookup_input_bundle(const struct ofproto_dpif *ofproto, ofp_port_t in_port, bool warn, struct ofport_dpif **in_ofportp) { struct ofport_dpif *ofport; @@ -176,7 +184,7 @@ add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow) const struct nlattr *a; size_t left; - in_bundle = lookup_input_bundle(ctx->ofproto, orig_flow->in_port, + in_bundle = lookup_input_bundle(ctx->ofproto, orig_flow->in_port.ofp_port, ctx->xin->packet != NULL, NULL); if (!in_bundle) { return; @@ -212,7 +220,7 @@ add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow) continue; } - ofport = get_odp_port(ofproto, nl_attr_get_u32(a)); + ofport = get_odp_port(ofproto, nl_attr_get_odp_port(a)); if (ofport && ofport->bundle) { mirrors |= ofport->bundle->dst_mirrors; } @@ -370,6 +378,7 @@ static void output_normal(struct xlate_ctx *ctx, const struct ofbundle *out_bundle, uint16_t vlan) { + ovs_be16 *flow_tci = &ctx->xin->flow.vlan_tci; struct ofport_dpif *port; uint16_t vid; ovs_be16 tci, old_tci; @@ -386,18 +395,18 @@ output_normal(struct xlate_ctx *ctx, const struct ofbundle *out_bundle, } } - old_tci = ctx->xin->flow.vlan_tci; + old_tci = *flow_tci; tci = htons(vid); if (tci || out_bundle->use_priority_tags) { - tci |= ctx->xin->flow.vlan_tci & htons(VLAN_PCP_MASK); + tci |= *flow_tci & htons(VLAN_PCP_MASK); if (tci) { tci |= htons(VLAN_CFI); } } - ctx->xin->flow.vlan_tci = tci; + *flow_tci = tci; compose_output_action(ctx, port->up.ofp_port); - ctx->xin->flow.vlan_tci = old_tci; + *flow_tci = old_tci; } /* A VM broadcasts a gratuitous ARP to indicate that it has resumed after @@ -465,8 +474,7 @@ update_learning_table(struct ofproto_dpif *ofproto, in_bundle->name, vlan); mac->port.p = in_bundle; - tag_set_add(&ofproto->backer->revalidate_set, - mac_learning_changed(ofproto->ml, mac)); + mac_learning_changed(ofproto->ml, mac); } } @@ -530,6 +538,8 @@ is_admissible(struct xlate_ctx *ctx, struct ofport_dpif *in_port, static void xlate_normal(struct xlate_ctx *ctx) { + struct flow_wildcards *wc = &ctx->xout->wc; + struct flow *flow = &ctx->xin->flow; struct ofport_dpif *in_port; struct ofbundle *in_bundle; struct mac_entry *mac; @@ -538,18 +548,11 @@ xlate_normal(struct xlate_ctx *ctx) ctx->xout->has_normal = true; - /* Check the dl_type, since we may check for gratuituous ARP. */ - memset(&ctx->xout->wc.masks.dl_type, 0xff, - sizeof ctx->xout->wc.masks.dl_type); - - memset(&ctx->xout->wc.masks.dl_src, 0xff, - sizeof ctx->xout->wc.masks.dl_src); - memset(&ctx->xout->wc.masks.dl_dst, 0xff, - sizeof ctx->xout->wc.masks.dl_dst); - memset(&ctx->xout->wc.masks.vlan_tci, 0xff, - sizeof ctx->xout->wc.masks.vlan_tci); + memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src); + memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst); + wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI); - in_bundle = lookup_input_bundle(ctx->ofproto, ctx->xin->flow.in_port, + in_bundle = lookup_input_bundle(ctx->ofproto, flow->in_port.ofp_port, ctx->xin->packet != NULL, &in_port); if (!in_bundle) { xlate_report(ctx, "no input bundle, dropping"); @@ -557,8 +560,8 @@ xlate_normal(struct xlate_ctx *ctx) } /* Drop malformed frames. */ - if (ctx->xin->flow.dl_type == htons(ETH_TYPE_VLAN) && - !(ctx->xin->flow.vlan_tci & htons(VLAN_CFI))) { + if (flow->dl_type == htons(ETH_TYPE_VLAN) && + !(flow->vlan_tci & htons(VLAN_CFI))) { if (ctx->xin->packet != NULL) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial " @@ -582,7 +585,7 @@ xlate_normal(struct xlate_ctx *ctx) } /* Check VLAN. */ - vid = vlan_tci_to_vid(ctx->xin->flow.vlan_tci); + vid = vlan_tci_to_vid(flow->vlan_tci); if (!input_vid_is_valid(vid, in_bundle, ctx->xin->packet != NULL)) { xlate_report(ctx, "disallowed VLAN VID for this input port, dropping"); return; @@ -596,12 +599,11 @@ xlate_normal(struct xlate_ctx *ctx) /* Learn source MAC. */ if (ctx->xin->may_learn) { - update_learning_table(ctx->ofproto, &ctx->xin->flow, &ctx->xout->wc, - vlan, in_bundle); + update_learning_table(ctx->ofproto, flow, wc, vlan, in_bundle); } /* Determine output bundle. */ - mac = mac_learning_lookup(ctx->ofproto->ml, ctx->xin->flow.dl_dst, vlan, + mac = mac_learning_lookup(ctx->ofproto->ml, flow->dl_dst, vlan, &ctx->xout->tags); if (mac) { if (mac->port.p != in_bundle) { @@ -656,7 +658,7 @@ compose_sample_action(const struct ofproto_dpif *ofproto, static void compose_sflow_cookie(const struct ofproto_dpif *ofproto, - ovs_be16 vlan_tci, uint32_t odp_port, + ovs_be16 vlan_tci, odp_port_t odp_port, unsigned int n_outputs, union user_action_cookie *cookie) { int ifindex; @@ -691,18 +693,18 @@ static size_t compose_sflow_action(const struct ofproto_dpif *ofproto, struct ofpbuf *odp_actions, const struct flow *flow, - uint32_t odp_port) + odp_port_t odp_port) { uint32_t probability; union user_action_cookie cookie; - if (!ofproto->sflow || flow->in_port == OFPP_NONE) { + if (!ofproto->sflow || flow->in_port.ofp_port == OFPP_NONE) { return 0; } probability = dpif_sflow_get_probability(ofproto->sflow); compose_sflow_cookie(ofproto, htons(0), odp_port, - odp_port == OVSP_NONE ? 0 : 1, &cookie); + odp_port == ODPP_NONE ? 0 : 1, &cookie); return compose_sample_action(ofproto, odp_actions, flow, probability, &cookie, sizeof cookie.sflow); @@ -735,7 +737,7 @@ compose_ipfix_action(const struct ofproto_dpif *ofproto, uint32_t probability; union user_action_cookie cookie; - if (!ofproto->ipfix || flow->in_port == OFPP_NONE) { + if (!ofproto->ipfix || flow->in_port.ofp_port == OFPP_NONE) { return; } @@ -754,7 +756,7 @@ add_sflow_action(struct xlate_ctx *ctx) { ctx->user_cookie_offset = compose_sflow_action(ctx->ofproto, &ctx->xout->odp_actions, - &ctx->xin->flow, OVSP_NONE); + &ctx->xin->flow, ODPP_NONE); ctx->sflow_odp_port = 0; ctx->sflow_n_outputs = 0; } @@ -789,16 +791,53 @@ fix_sflow_action(struct xlate_ctx *ctx) ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie); } +static enum slow_path_reason +process_special(struct xlate_ctx *ctx, const struct flow *flow, + const struct ofport_dpif *ofport, const struct ofpbuf *packet) +{ + struct ofproto_dpif *ofproto = ctx->ofproto; + struct flow_wildcards *wc = &ctx->xout->wc; + + if (!ofport) { + return 0; + } else if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow, wc)) { + if (packet) { + cfm_process_heartbeat(ofport->cfm, packet); + } + return SLOW_CFM; + } else if (ofport->bfd && bfd_should_process_flow(flow, wc)) { + if (packet) { + bfd_process_packet(ofport->bfd, flow, packet); + } + return SLOW_BFD; + } else if (ofport->bundle && ofport->bundle->lacp + && flow->dl_type == htons(ETH_TYPE_LACP)) { + if (packet) { + lacp_process_packet(ofport->bundle->lacp, ofport, packet); + } + return SLOW_LACP; + } else if (ofproto->stp && stp_should_process_flow(flow, wc)) { + if (packet) { + stp_process_packet(ofport, packet); + } + return SLOW_STP; + } else { + return 0; + } +} + static void -compose_output_action__(struct xlate_ctx *ctx, uint16_t ofp_port, +compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, bool check_stp) { const struct ofport_dpif *ofport = get_ofp_port(ctx->ofproto, ofp_port); + struct flow_wildcards *wc = &ctx->xout->wc; + struct flow *flow = &ctx->xin->flow; ovs_be16 flow_vlan_tci; uint32_t flow_skb_mark; uint8_t flow_nw_tos; - struct priority_to_dscp *pdscp; - uint32_t out_port, odp_port; + odp_port_t out_port, odp_port; + uint8_t dscp; /* If 'struct flow' gets additional metadata, we'll need to zero it out * before traversing a patch port. */ @@ -815,44 +854,30 @@ compose_output_action__(struct xlate_ctx *ctx, uint16_t ofp_port, return; } - if (netdev_vport_is_patch(ofport->up.netdev)) { - struct ofport_dpif *peer = ofport_get_peer(ofport); + if (ofport->peer) { + struct ofport_dpif *peer = ofport->peer; struct flow old_flow = ctx->xin->flow; - const struct ofproto_dpif *peer_ofproto; enum slow_path_reason special; - struct ofport_dpif *in_port; - - if (!peer) { - xlate_report(ctx, "Nonexistent patch port peer"); - return; - } - - peer_ofproto = ofproto_dpif_cast(peer->up.ofproto); - if (peer_ofproto->backer != ctx->ofproto->backer) { - xlate_report(ctx, "Patch port peer on a different datapath"); - return; - } ctx->ofproto = ofproto_dpif_cast(peer->up.ofproto); - ctx->xin->flow.in_port = peer->up.ofp_port; - ctx->xin->flow.metadata = htonll(0); - memset(&ctx->xin->flow.tunnel, 0, sizeof ctx->xin->flow.tunnel); - memset(ctx->xin->flow.regs, 0, sizeof ctx->xin->flow.regs); + flow->in_port.ofp_port = peer->up.ofp_port; + flow->metadata = htonll(0); + memset(&flow->tunnel, 0, sizeof flow->tunnel); + memset(flow->regs, 0, sizeof flow->regs); - in_port = get_ofp_port(ctx->ofproto, ctx->xin->flow.in_port); - special = process_special(ctx->ofproto, &ctx->xin->flow, in_port, + special = process_special(ctx, &ctx->xin->flow, peer, ctx->xin->packet); if (special) { ctx->xout->slow = special; - } else if (!in_port || may_receive(in_port, ctx)) { - if (!in_port || stp_forward_in_state(in_port->stp_state)) { - xlate_table_action(ctx, ctx->xin->flow.in_port, 0, true); + } else if (may_receive(peer, ctx)) { + if (stp_forward_in_state(peer->stp_state)) { + xlate_table_action(ctx, flow->in_port.ofp_port, 0, true); } else { /* Forwarding is disabled by STP. Let OFPP_NORMAL and the * learning action look at the packet, then drop it. */ struct flow old_base_flow = ctx->base_flow; size_t old_size = ctx->xout->odp_actions.size; - xlate_table_action(ctx, ctx->xin->flow.in_port, 0, true); + xlate_table_action(ctx, flow->in_port.ofp_port, 0, true); ctx->base_flow = old_base_flow; ctx->xout->odp_actions.size = old_size; } @@ -869,28 +894,28 @@ compose_output_action__(struct xlate_ctx *ctx, uint16_t ofp_port, return; } - flow_vlan_tci = ctx->xin->flow.vlan_tci; - flow_skb_mark = ctx->xin->flow.skb_mark; - flow_nw_tos = ctx->xin->flow.nw_tos; + flow_vlan_tci = flow->vlan_tci; + flow_skb_mark = flow->skb_mark; + flow_nw_tos = flow->nw_tos; - pdscp = get_priority(ofport, ctx->xin->flow.skb_priority); - if (pdscp) { - ctx->xin->flow.nw_tos &= ~IP_DSCP_MASK; - ctx->xin->flow.nw_tos |= pdscp->dscp; + if (ofproto_dpif_dscp_from_priority(ofport, flow->skb_priority, &dscp)) { + wc->masks.nw_tos |= IP_ECN_MASK; + flow->nw_tos &= ~IP_DSCP_MASK; + flow->nw_tos |= dscp; } - if (ofport->tnl_port) { + if (ofport->is_tunnel) { /* Save tunnel metadata so that changes made due to * the Logical (tunnel) Port are not visible for any further * matches, while explicit set actions on tunnel metadata are. */ - struct flow_tnl flow_tnl = ctx->xin->flow.tunnel; - odp_port = tnl_port_send(ofport->tnl_port, &ctx->xin->flow); - if (odp_port == OVSP_NONE) { + struct flow_tnl flow_tnl = flow->tunnel; + odp_port = tnl_port_send(ofport, flow, &ctx->xout->wc); + if (odp_port == ODPP_NONE) { xlate_report(ctx, "Tunneling decided against output"); goto out; /* restore flow_nw_tos */ } - if (ctx->xin->flow.tunnel.ip_dst == ctx->orig_tunnel_ip_dst) { + if (flow->tunnel.ip_dst == ctx->orig_tunnel_ip_dst) { xlate_report(ctx, "Not tunneling to our own address"); goto out; /* restore flow_nw_tos */ } @@ -898,61 +923,51 @@ compose_output_action__(struct xlate_ctx *ctx, uint16_t ofp_port, netdev_vport_inc_tx(ofport->up.netdev, ctx->xin->resubmit_stats); } out_port = odp_port; - commit_odp_tunnel_action(&ctx->xin->flow, &ctx->base_flow, + commit_odp_tunnel_action(flow, &ctx->base_flow, &ctx->xout->odp_actions); - ctx->xin->flow.tunnel = flow_tnl; /* Restore tunnel metadata */ + flow->tunnel = flow_tnl; /* Restore tunnel metadata */ } else { - uint16_t vlandev_port; + ofp_port_t vlandev_port; + odp_port = ofport->odp_port; + if (!hmap_is_empty(&ctx->ofproto->realdev_vid_map)) { + wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI); + } vlandev_port = vsp_realdev_to_vlandev(ctx->ofproto, ofp_port, - ctx->xin->flow.vlan_tci); + flow->vlan_tci); if (vlandev_port == ofp_port) { out_port = odp_port; } else { out_port = ofp_port_to_odp_port(ctx->ofproto, vlandev_port); - ctx->xin->flow.vlan_tci = htons(0); + flow->vlan_tci = htons(0); } - ctx->xin->flow.skb_mark &= ~IPSEC_MARK; + flow->skb_mark &= ~IPSEC_MARK; } - commit_odp_actions(&ctx->xin->flow, &ctx->base_flow, - &ctx->xout->odp_actions); - nl_msg_put_u32(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT, out_port); - ctx->sflow_odp_port = odp_port; - ctx->sflow_n_outputs++; - ctx->xout->nf_output_iface = ofp_port; + if (out_port != ODPP_NONE) { + commit_odp_actions(flow, &ctx->base_flow, + &ctx->xout->odp_actions, &ctx->xout->wc); + nl_msg_put_odp_port(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT, + out_port); + + ctx->sflow_odp_port = odp_port; + ctx->sflow_n_outputs++; + ctx->xout->nf_output_iface = ofp_port; + } - /* Restore flow */ - ctx->xin->flow.vlan_tci = flow_vlan_tci; - ctx->xin->flow.skb_mark = flow_skb_mark; out: - ctx->xin->flow.nw_tos = flow_nw_tos; + /* Restore flow */ + flow->vlan_tci = flow_vlan_tci; + flow->skb_mark = flow_skb_mark; + flow->nw_tos = flow_nw_tos; } static void -compose_output_action(struct xlate_ctx *ctx, uint16_t ofp_port) +compose_output_action(struct xlate_ctx *ctx, ofp_port_t ofp_port) { compose_output_action__(ctx, ofp_port, true); } -static void -tag_the_flow(struct xlate_ctx *ctx, struct rule_dpif *rule) -{ - struct ofproto_dpif *ofproto = ctx->ofproto; - uint8_t table_id = ctx->table_id; - - if (table_id > 0 && table_id < N_TABLES) { - struct table_dpif *table = &ofproto->tables[table_id]; - if (table->other_table) { - ctx->xout->tags |= (rule && rule->tag - ? rule->tag - : rule_calculate_tag(&ctx->xin->flow, - &table->other_table->mask, - table->basis)); - } - } -} - /* Common rule processing in one place to avoid duplicating code. */ static struct rule_dpif * ctx_rule_hooks(struct xlate_ctx *ctx, struct rule_dpif *rule, @@ -979,25 +994,26 @@ ctx_rule_hooks(struct xlate_ctx *ctx, struct rule_dpif *rule, static void xlate_table_action(struct xlate_ctx *ctx, - uint16_t in_port, uint8_t table_id, bool may_packet_in) + ofp_port_t in_port, uint8_t table_id, bool may_packet_in) { if (ctx->recurse < MAX_RESUBMIT_RECURSION) { struct rule_dpif *rule; - uint16_t old_in_port = ctx->xin->flow.in_port; + ofp_port_t old_in_port = ctx->xin->flow.in_port.ofp_port; uint8_t old_table_id = ctx->table_id; ctx->table_id = table_id; /* Look up a flow with 'in_port' as the input port. */ - ctx->xin->flow.in_port = in_port; + ctx->xin->flow.in_port.ofp_port = in_port; rule = rule_dpif_lookup_in_table(ctx->ofproto, &ctx->xin->flow, &ctx->xout->wc, table_id); - tag_the_flow(ctx, rule); + ctx->xout->tags |= calculate_flow_tag(ctx->ofproto, &ctx->xin->flow, + ctx->table_id, rule); /* Restore the original input port. Otherwise OFPP_NORMAL and * OFPP_IN_PORT will have surprising behavior. */ - ctx->xin->flow.in_port = old_in_port; + ctx->xin->flow.in_port.ofp_port = old_in_port; rule = ctx_rule_hooks(ctx, rule, may_packet_in); @@ -1025,12 +1041,12 @@ static void xlate_ofpact_resubmit(struct xlate_ctx *ctx, const struct ofpact_resubmit *resubmit) { - uint16_t in_port; + ofp_port_t in_port; uint8_t table_id; in_port = resubmit->in_port; if (in_port == OFPP_IN_PORT) { - in_port = ctx->xin->flow.in_port; + in_port = ctx->xin->flow.in_port.ofp_port; } table_id = resubmit->table_id; @@ -1047,9 +1063,9 @@ flood_packets(struct xlate_ctx *ctx, bool all) struct ofport_dpif *ofport; HMAP_FOR_EACH (ofport, up.hmap_node, &ctx->ofproto->up.ports) { - uint16_t ofp_port = ofport->up.ofp_port; + ofp_port_t ofp_port = ofport->up.ofp_port; - if (ofp_port == ctx->xin->flow.in_port) { + if (ofp_port == ctx->xin->flow.in_port.ofp_port) { continue; } @@ -1085,7 +1101,7 @@ execute_controller_action(struct xlate_ctx *ctx, int len, memset(&key.tunnel, 0, sizeof key.tunnel); commit_odp_actions(&ctx->xin->flow, &ctx->base_flow, - &ctx->xout->odp_actions); + &ctx->xout->odp_actions, &ctx->xout->wc); odp_execute_actions(NULL, packet, &key, ctx->xout->odp_actions.data, ctx->xout->odp_actions.size, NULL, NULL); @@ -1107,53 +1123,53 @@ execute_controller_action(struct xlate_ctx *ctx, int len, static void compose_mpls_push_action(struct xlate_ctx *ctx, ovs_be16 eth_type) { + struct flow_wildcards *wc = &ctx->xout->wc; + struct flow *flow = &ctx->xin->flow; + ovs_assert(eth_type_mpls(eth_type)); - memset(&ctx->xout->wc.masks.dl_type, 0xff, - sizeof ctx->xout->wc.masks.dl_type); - memset(&ctx->xout->wc.masks.mpls_lse, 0xff, - sizeof ctx->xout->wc.masks.mpls_lse); - memset(&ctx->xout->wc.masks.mpls_depth, 0xff, - sizeof ctx->xout->wc.masks.mpls_depth); + memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse); + memset(&wc->masks.mpls_depth, 0xff, sizeof wc->masks.mpls_depth); - if (ctx->xin->flow.mpls_depth) { - ctx->xin->flow.mpls_lse &= ~htonl(MPLS_BOS_MASK); - ctx->xin->flow.mpls_depth++; + if (flow->mpls_depth) { + flow->mpls_lse &= ~htonl(MPLS_BOS_MASK); + flow->mpls_depth++; } else { ovs_be32 label; uint8_t tc, ttl; - if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IPV6)) { + if (flow->dl_type == htons(ETH_TYPE_IPV6)) { label = htonl(0x2); /* IPV6 Explicit Null. */ } else { label = htonl(0x0); /* IPV4 Explicit Null. */ } - tc = (ctx->xin->flow.nw_tos & IP_DSCP_MASK) >> 2; - ttl = ctx->xin->flow.nw_ttl ? ctx->xin->flow.nw_ttl : 0x40; - ctx->xin->flow.mpls_lse = set_mpls_lse_values(ttl, tc, 1, label); - ctx->xin->flow.mpls_depth = 1; - } - ctx->xin->flow.dl_type = eth_type; + wc->masks.nw_tos |= IP_DSCP_MASK; + wc->masks.nw_ttl = 0xff; + tc = (flow->nw_tos & IP_DSCP_MASK) >> 2; + ttl = flow->nw_ttl ? flow->nw_ttl : 0x40; + flow->mpls_lse = set_mpls_lse_values(ttl, tc, 1, label); + flow->mpls_depth = 1; + } + flow->dl_type = eth_type; } static void compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type) { + struct flow_wildcards *wc = &ctx->xout->wc; + struct flow *flow = &ctx->xin->flow; + ovs_assert(eth_type_mpls(ctx->xin->flow.dl_type)); ovs_assert(!eth_type_mpls(eth_type)); - memset(&ctx->xout->wc.masks.dl_type, 0xff, - sizeof ctx->xout->wc.masks.dl_type); - memset(&ctx->xout->wc.masks.mpls_lse, 0xff, - sizeof ctx->xout->wc.masks.mpls_lse); - memset(&ctx->xout->wc.masks.mpls_depth, 0xff, - sizeof ctx->xout->wc.masks.mpls_depth); - - if (ctx->xin->flow.mpls_depth) { - ctx->xin->flow.mpls_depth--; - ctx->xin->flow.mpls_lse = htonl(0); - if (!ctx->xin->flow.mpls_depth) { - ctx->xin->flow.dl_type = eth_type; + memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse); + memset(&wc->masks.mpls_depth, 0xff, sizeof wc->masks.mpls_depth); + + if (flow->mpls_depth) { + flow->mpls_depth--; + flow->mpls_lse = htonl(0); + if (!flow->mpls_depth) { + flow->dl_type = eth_type; } } } @@ -1161,13 +1177,15 @@ compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type) static bool compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids) { - if (ctx->xin->flow.dl_type != htons(ETH_TYPE_IP) && - ctx->xin->flow.dl_type != htons(ETH_TYPE_IPV6)) { + struct flow *flow = &ctx->xin->flow; + + if (!is_ip_any(flow)) { return false; } - if (ctx->xin->flow.nw_ttl > 1) { - ctx->xin->flow.nw_ttl--; + ctx->xout->wc.masks.nw_ttl = 0xff; + if (flow->nw_ttl > 1) { + flow->nw_ttl--; return false; } else { size_t i; @@ -1196,15 +1214,19 @@ compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl) static bool compose_dec_mpls_ttl_action(struct xlate_ctx *ctx) { - uint8_t ttl = mpls_lse_to_ttl(ctx->xin->flow.mpls_lse); + struct flow *flow = &ctx->xin->flow; + uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse); + struct flow_wildcards *wc = &ctx->xout->wc; - if (!eth_type_mpls(ctx->xin->flow.dl_type)) { + memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse); + + if (!eth_type_mpls(flow->dl_type)) { return false; } if (ttl > 1) { ttl--; - set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse, ttl); + set_mpls_lse_ttl(&flow->mpls_lse, ttl); return false; } else { execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0); @@ -1216,18 +1238,19 @@ compose_dec_mpls_ttl_action(struct xlate_ctx *ctx) static void xlate_output_action(struct xlate_ctx *ctx, - uint16_t port, uint16_t max_len, bool may_packet_in) + ofp_port_t port, uint16_t max_len, bool may_packet_in) { - uint16_t prev_nf_output_iface = ctx->xout->nf_output_iface; + ofp_port_t prev_nf_output_iface = ctx->xout->nf_output_iface; ctx->xout->nf_output_iface = NF_OUT_DROP; switch (port) { case OFPP_IN_PORT: - compose_output_action(ctx, ctx->xin->flow.in_port); + compose_output_action(ctx, ctx->xin->flow.in_port.ofp_port); break; case OFPP_TABLE: - xlate_table_action(ctx, ctx->xin->flow.in_port, 0, may_packet_in); + xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port, + 0, may_packet_in); break; case OFPP_NORMAL: xlate_normal(ctx); @@ -1245,7 +1268,7 @@ xlate_output_action(struct xlate_ctx *ctx, break; case OFPP_LOCAL: default: - if (port != ctx->xin->flow.in_port) { + if (port != ctx->xin->flow.in_port.ofp_port) { compose_output_action(ctx, port); } else { xlate_report(ctx, "skipping output to input port"); @@ -1273,7 +1296,8 @@ xlate_output_reg_action(struct xlate_ctx *ctx, memset(&value, 0xff, sizeof value); mf_write_subfield_flow(&or->src, &value, &ctx->xout->wc.masks); - xlate_output_action(ctx, port, or->max_len, false); + xlate_output_action(ctx, u16_to_ofp(port), + or->max_len, false); } } @@ -1281,14 +1305,13 @@ static void xlate_enqueue_action(struct xlate_ctx *ctx, const struct ofpact_enqueue *enqueue) { - uint16_t ofp_port = enqueue->port; + ofp_port_t ofp_port = enqueue->port; uint32_t queue_id = enqueue->queue; uint32_t flow_priority, priority; int error; /* Translate queue to priority. */ - error = dpif_queue_to_priority(ctx->ofproto->backer->dpif, - queue_id, &priority); + error = ofproto_dpif_queue_to_priority(ctx->ofproto, queue_id, &priority); if (error) { /* Fall back to ordinary output action. */ xlate_output_action(ctx, enqueue->port, 0, false); @@ -1297,8 +1320,8 @@ xlate_enqueue_action(struct xlate_ctx *ctx, /* Check output port. */ if (ofp_port == OFPP_IN_PORT) { - ofp_port = ctx->xin->flow.in_port; - } else if (ofp_port == ctx->xin->flow.in_port) { + ofp_port = ctx->xin->flow.in_port.ofp_port; + } else if (ofp_port == ctx->xin->flow.in_port.ofp_port) { return; } @@ -1321,8 +1344,8 @@ xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id) { uint32_t skb_priority; - if (!dpif_queue_to_priority(ctx->ofproto->backer->dpif, - queue_id, &skb_priority)) { + if (!ofproto_dpif_queue_to_priority(ctx->ofproto, queue_id, + &skb_priority)) { ctx->xin->flow.skb_priority = skb_priority; } else { /* Couldn't translate queue to a priority. Nothing to do. A warning @@ -1331,7 +1354,7 @@ xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id) } static bool -slave_enabled_cb(uint16_t ofp_port, void *ofproto_) +slave_enabled_cb(ofp_port_t ofp_port, void *ofproto_) { struct ofproto_dpif *ofproto = ofproto_; struct ofport_dpif *port; @@ -1356,12 +1379,12 @@ static void xlate_bundle_action(struct xlate_ctx *ctx, const struct ofpact_bundle *bundle) { - uint16_t port; + ofp_port_t port; port = bundle_execute(bundle, &ctx->xin->flow, &ctx->xout->wc, slave_enabled_cb, ctx->ofproto); if (bundle->dst.field) { - nxm_reg_load(&bundle->dst, port, &ctx->xin->flow); + nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow); } else { xlate_output_action(ctx, port, 0, false); } @@ -1429,7 +1452,7 @@ xlate_sample_action(struct xlate_ctx *ctx, uint32_t probability = (os->probability << 16) | os->probability; commit_odp_actions(&ctx->xin->flow, &ctx->base_flow, - &ctx->xout->odp_actions); + &ctx->xout->odp_actions, &ctx->xout->wc); compose_flow_sample_cookie(os->probability, os->collector_set_id, os->obs_domain_id, os->obs_point_id, &cookie); @@ -1481,6 +1504,8 @@ static void do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, struct xlate_ctx *ctx) { + struct flow_wildcards *wc = &ctx->xout->wc; + struct flow *flow = &ctx->xin->flow; bool was_evictable = true; const struct ofpact *a; @@ -1517,83 +1542,66 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, break; case OFPACT_SET_VLAN_VID: - ctx->xin->flow.vlan_tci &= ~htons(VLAN_VID_MASK); - ctx->xin->flow.vlan_tci |= - (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid) - | htons(VLAN_CFI)); + flow->vlan_tci &= ~htons(VLAN_VID_MASK); + flow->vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid) + | htons(VLAN_CFI)); break; case OFPACT_SET_VLAN_PCP: - ctx->xin->flow.vlan_tci &= ~htons(VLAN_PCP_MASK); - ctx->xin->flow.vlan_tci |= + flow->vlan_tci &= ~htons(VLAN_PCP_MASK); + flow->vlan_tci |= htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp << VLAN_PCP_SHIFT) | VLAN_CFI); break; case OFPACT_STRIP_VLAN: - ctx->xin->flow.vlan_tci = htons(0); + flow->vlan_tci = htons(0); break; case OFPACT_PUSH_VLAN: /* XXX 802.1AD(QinQ) */ - ctx->xin->flow.vlan_tci = htons(VLAN_CFI); + flow->vlan_tci = htons(VLAN_CFI); break; case OFPACT_SET_ETH_SRC: - memcpy(ctx->xin->flow.dl_src, ofpact_get_SET_ETH_SRC(a)->mac, - ETH_ADDR_LEN); + memcpy(flow->dl_src, ofpact_get_SET_ETH_SRC(a)->mac, ETH_ADDR_LEN); break; case OFPACT_SET_ETH_DST: - memcpy(ctx->xin->flow.dl_dst, ofpact_get_SET_ETH_DST(a)->mac, - ETH_ADDR_LEN); + memcpy(flow->dl_dst, ofpact_get_SET_ETH_DST(a)->mac, ETH_ADDR_LEN); break; case OFPACT_SET_IPV4_SRC: - memset(&ctx->xout->wc.masks.dl_type, 0xff, - sizeof ctx->xout->wc.masks.dl_type); - if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) { - ctx->xin->flow.nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4; + if (flow->dl_type == htons(ETH_TYPE_IP)) { + flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4; } break; case OFPACT_SET_IPV4_DST: - memset(&ctx->xout->wc.masks.dl_type, 0xff, - sizeof ctx->xout->wc.masks.dl_type); - if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) { - ctx->xin->flow.nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4; + if (flow->dl_type == htons(ETH_TYPE_IP)) { + flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4; } break; case OFPACT_SET_IPV4_DSCP: /* OpenFlow 1.0 only supports IPv4. */ - memset(&ctx->xout->wc.masks.dl_type, 0xff, - sizeof ctx->xout->wc.masks.dl_type); - if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) { - ctx->xin->flow.nw_tos &= ~IP_DSCP_MASK; - ctx->xin->flow.nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp; + if (flow->dl_type == htons(ETH_TYPE_IP)) { + flow->nw_tos &= ~IP_DSCP_MASK; + flow->nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp; } break; case OFPACT_SET_L4_SRC_PORT: - memset(&ctx->xout->wc.masks.dl_type, 0xff, - sizeof ctx->xout->wc.masks.dl_type); - memset(&ctx->xout->wc.masks.nw_proto, 0xff, - sizeof ctx->xout->wc.masks.nw_proto); - if (is_ip_any(&ctx->xin->flow)) { - ctx->xin->flow.tp_src = - htons(ofpact_get_SET_L4_SRC_PORT(a)->port); + memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto); + if (is_ip_any(flow)) { + flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port); } break; case OFPACT_SET_L4_DST_PORT: - memset(&ctx->xout->wc.masks.dl_type, 0xff, - sizeof ctx->xout->wc.masks.dl_type); - memset(&ctx->xout->wc.masks.nw_proto, 0xff, - sizeof ctx->xout->wc.masks.nw_proto); - if (is_ip_any(&ctx->xin->flow)) { - ctx->xin->flow.tp_dst = - htons(ofpact_get_SET_L4_DST_PORT(a)->port); + memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto); + if (is_ip_any(flow)) { + flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port); } break; @@ -1602,8 +1610,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, break; case OFPACT_SET_TUNNEL: - ctx->xin->flow.tunnel.tun_id = - htonll(ofpact_get_SET_TUNNEL(a)->tun_id); + flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id); break; case OFPACT_SET_QUEUE: @@ -1611,29 +1618,24 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, break; case OFPACT_POP_QUEUE: - memset(&ctx->xout->wc.masks.skb_priority, 0xff, - sizeof ctx->xout->wc.masks.skb_priority); - - ctx->xin->flow.skb_priority = ctx->orig_skb_priority; + flow->skb_priority = ctx->orig_skb_priority; break; case OFPACT_REG_MOVE: - nxm_execute_reg_move(ofpact_get_REG_MOVE(a), &ctx->xin->flow, - &ctx->xout->wc); + nxm_execute_reg_move(ofpact_get_REG_MOVE(a), flow, wc); break; case OFPACT_REG_LOAD: - nxm_execute_reg_load(ofpact_get_REG_LOAD(a), &ctx->xin->flow); + nxm_execute_reg_load(ofpact_get_REG_LOAD(a), flow); break; case OFPACT_STACK_PUSH: - nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), &ctx->xin->flow, - &ctx->xout->wc, &ctx->stack); + nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc, + &ctx->stack); break; case OFPACT_STACK_POP: - nxm_execute_stack_pop(ofpact_get_STACK_POP(a), &ctx->xin->flow, - &ctx->stack); + nxm_execute_stack_pop(ofpact_get_STACK_POP(a), flow, &ctx->stack); break; case OFPACT_PUSH_MPLS: @@ -1658,8 +1660,6 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, break; case OFPACT_DEC_TTL: - memset(&ctx->xout->wc.masks.dl_type, 0xff, - sizeof ctx->xout->wc.masks.dl_type); if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) { goto out; } @@ -1670,12 +1670,10 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, break; case OFPACT_MULTIPATH: - multipath_execute(ofpact_get_MULTIPATH(a), &ctx->xin->flow, - &ctx->xout->wc); + multipath_execute(ofpact_get_MULTIPATH(a), flow, wc); break; case OFPACT_BUNDLE: - ctx->ofproto->has_bundle_action = true; xlate_bundle_action(ctx, ofpact_get_BUNDLE(a)); break; @@ -1692,10 +1690,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, break; case OFPACT_FIN_TIMEOUT: - memset(&ctx->xout->wc.masks.dl_type, 0xff, - sizeof ctx->xout->wc.masks.dl_type); - memset(&ctx->xout->wc.masks.nw_proto, 0xff, - sizeof ctx->xout->wc.masks.nw_proto); + memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto); ctx->xout->has_fin_timeout = true; xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a)); break; @@ -1710,8 +1705,12 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, case OFPACT_WRITE_METADATA: metadata = ofpact_get_WRITE_METADATA(a); - ctx->xin->flow.metadata &= ~metadata->mask; - ctx->xin->flow.metadata |= metadata->metadata & metadata->mask; + flow->metadata &= ~metadata->mask; + flow->metadata |= metadata->metadata & metadata->mask; + break; + + case OFPACT_METER: + /* Not implemented yet. */ break; case OFPACT_GOTO_TABLE: { @@ -1724,10 +1723,11 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, ctx->table_id = ogt->table_id; /* Look up a flow from the new table. */ - rule = rule_dpif_lookup_in_table(ctx->ofproto, &ctx->xin->flow, - &ctx->xout->wc, ctx->table_id); + rule = rule_dpif_lookup_in_table(ctx->ofproto, flow, wc, + ctx->table_id); - tag_the_flow(ctx, rule); + ctx->xout->tags = calculate_flow_tag(ctx->ofproto, &ctx->xin->flow, + ctx->table_id, rule); rule = ctx_rule_hooks(ctx, rule, true); @@ -1822,6 +1822,22 @@ xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src) src->odp_actions.size); } +static bool +actions_output_to_local_port(const struct xlate_ctx *ctx) +{ + odp_port_t local_odp_port = ofp_port_to_odp_port(ctx->ofproto, OFPP_LOCAL); + const struct nlattr *a; + unsigned int left; + + NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->xout->odp_actions.data, + ctx->xout->odp_actions.size) { + if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT + && nl_attr_get_odp_port(a) == local_odp_port) { + return true; + } + } + return false; +} /* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts' * into datapath actions in 'odp_actions', using 'ctx'. */ @@ -1833,6 +1849,9 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) * tracing purposes. */ static bool hit_resubmit_limit; + struct flow_wildcards *wc = &xout->wc; + struct flow *flow = &xin->flow; + enum slow_path_reason special; const struct ofpact *ofpacts; struct ofport_dpif *in_port; @@ -1869,39 +1888,21 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) ctx.ofproto = xin->ofproto; ctx.rule = xin->rule; - ctx.base_flow = ctx.xin->flow; + ctx.base_flow = *flow; memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel); - ctx.orig_tunnel_ip_dst = ctx.xin->flow.tunnel.ip_dst; + ctx.orig_tunnel_ip_dst = flow->tunnel.ip_dst; - flow_wildcards_init_catchall(&ctx.xout->wc); - memset(&ctx.xout->wc.masks.in_port, 0xff, - sizeof ctx.xout->wc.masks.in_port); + flow_wildcards_init_catchall(wc); + memset(&wc->masks.in_port, 0xff, sizeof wc->masks.in_port); + memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority); + memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type); + wc->masks.nw_frag |= FLOW_NW_FRAG_MASK; if (tnl_port_should_receive(&ctx.xin->flow)) { - memset(&ctx.xout->wc.masks.tunnel, 0xff, - sizeof ctx.xout->wc.masks.tunnel); + memset(&wc->masks.tunnel, 0xff, sizeof wc->masks.tunnel); } - - /* Disable most wildcarding for NetFlow. */ if (xin->ofproto->netflow) { - memset(&ctx.xout->wc.masks.dl_src, 0xff, - sizeof ctx.xout->wc.masks.dl_src); - memset(&ctx.xout->wc.masks.dl_dst, 0xff, - sizeof ctx.xout->wc.masks.dl_dst); - memset(&ctx.xout->wc.masks.dl_type, 0xff, - sizeof ctx.xout->wc.masks.dl_type); - memset(&ctx.xout->wc.masks.vlan_tci, 0xff, - sizeof ctx.xout->wc.masks.vlan_tci); - memset(&ctx.xout->wc.masks.nw_proto, 0xff, - sizeof ctx.xout->wc.masks.nw_proto); - memset(&ctx.xout->wc.masks.nw_src, 0xff, - sizeof ctx.xout->wc.masks.nw_src); - memset(&ctx.xout->wc.masks.nw_dst, 0xff, - sizeof ctx.xout->wc.masks.nw_dst); - memset(&ctx.xout->wc.masks.tp_src, 0xff, - sizeof ctx.xout->wc.masks.tp_src); - memset(&ctx.xout->wc.masks.tp_dst, 0xff, - sizeof ctx.xout->wc.masks.tp_dst); + netflow_mask_wc(flow, wc); } ctx.xout->tags = 0; @@ -1918,7 +1919,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) ctx.recurse = 0; ctx.max_resubmit_trigger = false; - ctx.orig_skb_priority = ctx.xin->flow.skb_priority; + ctx.orig_skb_priority = flow->skb_priority; ctx.table_id = 0; ctx.exit = false; @@ -1937,15 +1938,15 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) if (ctx.ofproto->has_mirrors || hit_resubmit_limit) { /* Do this conditionally because the copy is expensive enough that it * shows up in profiles. */ - orig_flow = ctx.xin->flow; + orig_flow = *flow; } - if (ctx.xin->flow.nw_frag & FLOW_NW_FRAG_ANY) { + if (flow->nw_frag & FLOW_NW_FRAG_ANY) { switch (ctx.ofproto->up.frag_handling) { case OFPC_FRAG_NORMAL: /* We must pretend that transport ports are unavailable. */ - ctx.xin->flow.tp_src = ctx.base_flow.tp_src = htons(0); - ctx.xin->flow.tp_dst = ctx.base_flow.tp_dst = htons(0); + flow->tp_src = ctx.base_flow.tp_src = htons(0); + flow->tp_dst = ctx.base_flow.tp_dst = htons(0); break; case OFPC_FRAG_DROP: @@ -1963,19 +1964,17 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) } } - in_port = get_ofp_port(ctx.ofproto, ctx.xin->flow.in_port); - special = process_special(ctx.ofproto, &ctx.xin->flow, in_port, - ctx.xin->packet); + in_port = get_ofp_port(ctx.ofproto, flow->in_port.ofp_port); + special = process_special(&ctx, flow, in_port, ctx.xin->packet); if (special) { ctx.xout->slow = special; } else { static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1); size_t sample_actions_len; - uint32_t local_odp_port; - if (ctx.xin->flow.in_port - != vsp_realdev_to_vlandev(ctx.ofproto, ctx.xin->flow.in_port, - ctx.xin->flow.vlan_tci)) { + if (flow->in_port.ofp_port + != vsp_realdev_to_vlandev(ctx.ofproto, flow->in_port.ofp_port, + flow->vlan_tci)) { ctx.base_flow.vlan_tci = 0; } @@ -2008,11 +2007,9 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) } } - local_odp_port = ofp_port_to_odp_port(ctx.ofproto, OFPP_LOCAL); - if (!connmgr_must_output_local(ctx.ofproto->up.connmgr, &ctx.xin->flow, - local_odp_port, - ctx.xout->odp_actions.data, - ctx.xout->odp_actions.size)) { + if (connmgr_has_in_band(ctx.ofproto->up.connmgr) + && in_band_must_output_to_local_port(flow) + && !actions_output_to_local_port(&ctx)) { compose_output_action(&ctx, OFPP_LOCAL); } if (ctx.ofproto->has_mirrors) { @@ -2025,7 +2022,6 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) /* Clear the metadata and register wildcard masks, because we won't * use non-header fields as part of the cache. */ - memset(&ctx.xout->wc.masks.metadata, 0, - sizeof ctx.xout->wc.masks.metadata); - memset(&ctx.xout->wc.masks.regs, 0, sizeof ctx.xout->wc.masks.regs); + memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata); + memset(&wc->masks.regs, 0, sizeof wc->masks.regs); }