1 /* Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
17 #include "ofproto/ofproto-dpif-xlate.h"
22 #include "byte-order.h"
26 #include "dynamic-string.h"
28 #include "mac-learning.h"
29 #include "meta-flow.h"
30 #include "multipath.h"
31 #include "netdev-vport.h"
34 #include "odp-execute.h"
35 #include "ofp-actions.h"
36 #include "ofproto/ofproto-dpif-ipfix.h"
37 #include "ofproto/ofproto-dpif-sflow.h"
38 #include "ofproto/ofproto-dpif.h"
42 COVERAGE_DEFINE(ofproto_dpif_xlate);
44 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
46 /* A controller may use OFPP_NONE as the ingress port to indicate that
47 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
48 * when an input bundle is needed for validation (e.g., mirroring or
49 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
50 * any 'port' structs, so care must be taken when dealing with it. */
51 static struct ofbundle ofpp_none_bundle = {
53 .vlan_mode = PORT_VLAN_TRUNK
56 static bool may_receive(const struct ofport_dpif *, struct xlate_ctx *);
57 static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
59 static void xlate_normal(struct xlate_ctx *);
60 static void xlate_report(struct xlate_ctx *, const char *);
61 static void xlate_table_action(struct xlate_ctx *, uint16_t in_port,
62 uint8_t table_id, bool may_packet_in);
63 static bool input_vid_is_valid(uint16_t vid, struct ofbundle *, bool warn);
64 static uint16_t input_vid_to_vlan(const struct ofbundle *, uint16_t vid);
65 static void output_normal(struct xlate_ctx *, const struct ofbundle *,
67 static void compose_output_action(struct xlate_ctx *, uint16_t ofp_port);
69 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
72 ofbundle_trunks_vlan(const struct ofbundle *bundle, uint16_t vlan)
74 return (bundle->vlan_mode != PORT_VLAN_ACCESS
75 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
79 ofbundle_includes_vlan(const struct ofbundle *bundle, uint16_t vlan)
81 return vlan == bundle->vlan || ofbundle_trunks_vlan(bundle, vlan);
85 vlan_is_mirrored(const struct ofmirror *m, int vlan)
87 return !m->vlans || bitmap_is_set(m->vlans, vlan);
90 static struct ofbundle *
91 lookup_input_bundle(const struct ofproto_dpif *ofproto, uint16_t in_port,
92 bool warn, struct ofport_dpif **in_ofportp)
94 struct ofport_dpif *ofport;
96 /* Find the port and bundle for the received packet. */
97 ofport = get_ofp_port(ofproto, in_port);
101 if (ofport && ofport->bundle) {
102 return ofport->bundle;
105 /* Special-case OFPP_NONE, which a controller may use as the ingress
106 * port for traffic that it is sourcing. */
107 if (in_port == OFPP_NONE) {
108 return &ofpp_none_bundle;
111 /* Odd. A few possible reasons here:
113 * - We deleted a port but there are still a few packets queued up
116 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
117 * we don't know about.
119 * - The ofproto client didn't configure the port as part of a bundle.
120 * This is particularly likely to happen if a packet was received on the
121 * port after it was created, but before the client had a chance to
122 * configure its bundle.
125 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
127 VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
128 "port %"PRIu16, ofproto->up.name, in_port);
134 add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
136 struct ofproto_dpif *ofproto = ctx->ofproto;
137 mirror_mask_t mirrors;
138 struct ofbundle *in_bundle;
141 const struct nlattr *a;
144 in_bundle = lookup_input_bundle(ctx->ofproto, orig_flow->in_port,
145 ctx->xin->packet != NULL, NULL);
149 mirrors = in_bundle->src_mirrors;
151 /* Drop frames on bundles reserved for mirroring. */
152 if (in_bundle->mirror_out) {
153 if (ctx->xin->packet != NULL) {
154 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
155 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
156 "%s, which is reserved exclusively for mirroring",
157 ctx->ofproto->up.name, in_bundle->name);
163 vid = vlan_tci_to_vid(orig_flow->vlan_tci);
164 if (!input_vid_is_valid(vid, in_bundle, ctx->xin->packet != NULL)) {
167 vlan = input_vid_to_vlan(in_bundle, vid);
169 /* Look at the output ports to check for destination selections. */
171 NL_ATTR_FOR_EACH (a, left, ctx->xout->odp_actions.data,
172 ctx->xout->odp_actions.size) {
173 enum ovs_action_attr type = nl_attr_type(a);
174 struct ofport_dpif *ofport;
176 if (type != OVS_ACTION_ATTR_OUTPUT) {
180 ofport = get_odp_port(ofproto, nl_attr_get_u32(a));
181 if (ofport && ofport->bundle) {
182 mirrors |= ofport->bundle->dst_mirrors;
190 /* Restore the original packet before adding the mirror actions. */
191 ctx->xin->flow = *orig_flow;
196 m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
199 ctx->xout->wc.masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK);
202 if (!vlan_is_mirrored(m, vlan)) {
203 mirrors = zero_rightmost_1bit(mirrors);
207 mirrors &= ~m->dup_mirrors;
208 ctx->xout->mirrors |= m->dup_mirrors;
210 output_normal(ctx, m->out, vlan);
211 } else if (vlan != m->out_vlan
212 && !eth_addr_is_reserved(orig_flow->dl_dst)) {
213 struct ofbundle *bundle;
215 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
216 if (ofbundle_includes_vlan(bundle, m->out_vlan)
217 && !bundle->mirror_out) {
218 output_normal(ctx, bundle, m->out_vlan);
225 /* Given 'vid', the VID obtained from the 802.1Q header that was received as
226 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_bundle',
227 * the bundle on which the packet was received, returns the VLAN to which the
230 * Both 'vid' and the return value are in the range 0...4095. */
232 input_vid_to_vlan(const struct ofbundle *in_bundle, uint16_t vid)
234 switch (in_bundle->vlan_mode) {
235 case PORT_VLAN_ACCESS:
236 return in_bundle->vlan;
239 case PORT_VLAN_TRUNK:
242 case PORT_VLAN_NATIVE_UNTAGGED:
243 case PORT_VLAN_NATIVE_TAGGED:
244 return vid ? vid : in_bundle->vlan;
251 /* Checks whether a packet with the given 'vid' may ingress on 'in_bundle'.
252 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
255 * 'vid' should be the VID obtained from the 802.1Q header that was received as
256 * part of a packet (specify 0 if there was no 802.1Q header), in the range
259 input_vid_is_valid(uint16_t vid, struct ofbundle *in_bundle, bool warn)
261 /* Allow any VID on the OFPP_NONE port. */
262 if (in_bundle == &ofpp_none_bundle) {
266 switch (in_bundle->vlan_mode) {
267 case PORT_VLAN_ACCESS:
270 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
271 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" tagged "
272 "packet received on port %s configured as VLAN "
273 "%"PRIu16" access port",
274 in_bundle->ofproto->up.name, vid,
275 in_bundle->name, in_bundle->vlan);
281 case PORT_VLAN_NATIVE_UNTAGGED:
282 case PORT_VLAN_NATIVE_TAGGED:
284 /* Port must always carry its native VLAN. */
288 case PORT_VLAN_TRUNK:
289 if (!ofbundle_includes_vlan(in_bundle, vid)) {
291 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
292 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" packet "
293 "received on port %s not configured for trunking "
295 in_bundle->ofproto->up.name, vid,
296 in_bundle->name, vid);
308 /* Given 'vlan', the VLAN that a packet belongs to, and
309 * 'out_bundle', a bundle on which the packet is to be output, returns the VID
310 * that should be included in the 802.1Q header. (If the return value is 0,
311 * then the 802.1Q header should only be included in the packet if there is a
314 * Both 'vlan' and the return value are in the range 0...4095. */
316 output_vlan_to_vid(const struct ofbundle *out_bundle, uint16_t vlan)
318 switch (out_bundle->vlan_mode) {
319 case PORT_VLAN_ACCESS:
322 case PORT_VLAN_TRUNK:
323 case PORT_VLAN_NATIVE_TAGGED:
326 case PORT_VLAN_NATIVE_UNTAGGED:
327 return vlan == out_bundle->vlan ? 0 : vlan;
335 output_normal(struct xlate_ctx *ctx, const struct ofbundle *out_bundle,
338 struct ofport_dpif *port;
340 ovs_be16 tci, old_tci;
342 vid = output_vlan_to_vid(out_bundle, vlan);
343 if (!out_bundle->bond) {
344 port = ofbundle_get_a_port(out_bundle);
346 port = bond_choose_output_slave(out_bundle->bond, &ctx->xin->flow,
347 &ctx->xout->wc, vid, &ctx->xout->tags);
349 /* No slaves enabled, so drop packet. */
354 old_tci = ctx->xin->flow.vlan_tci;
356 if (tci || out_bundle->use_priority_tags) {
357 tci |= ctx->xin->flow.vlan_tci & htons(VLAN_PCP_MASK);
359 tci |= htons(VLAN_CFI);
362 ctx->xin->flow.vlan_tci = tci;
364 compose_output_action(ctx, port->up.ofp_port);
365 ctx->xin->flow.vlan_tci = old_tci;
368 /* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
369 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
370 * indicate this; newer upstream kernels use gratuitous ARP requests. */
372 is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
374 if (flow->dl_type != htons(ETH_TYPE_ARP)) {
378 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
379 if (!eth_addr_is_broadcast(flow->dl_dst)) {
383 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
384 if (flow->nw_proto == ARP_OP_REPLY) {
386 } else if (flow->nw_proto == ARP_OP_REQUEST) {
387 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
388 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
390 return flow->nw_src == flow->nw_dst;
397 update_learning_table(struct ofproto_dpif *ofproto,
398 const struct flow *flow, struct flow_wildcards *wc,
399 int vlan, struct ofbundle *in_bundle)
401 struct mac_entry *mac;
403 /* Don't learn the OFPP_NONE port. */
404 if (in_bundle == &ofpp_none_bundle) {
408 if (!mac_learning_may_learn(ofproto->ml, flow->dl_src, vlan)) {
412 mac = mac_learning_insert(ofproto->ml, flow->dl_src, vlan);
413 if (is_gratuitous_arp(flow, wc)) {
414 /* We don't want to learn from gratuitous ARP packets that are
415 * reflected back over bond slaves so we lock the learning table. */
416 if (!in_bundle->bond) {
417 mac_entry_set_grat_arp_lock(mac);
418 } else if (mac_entry_is_grat_arp_locked(mac)) {
423 if (mac_entry_is_new(mac) || mac->port.p != in_bundle) {
424 /* The log messages here could actually be useful in debugging,
425 * so keep the rate limit relatively high. */
426 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
427 VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
428 "on port %s in VLAN %d",
429 ofproto->up.name, ETH_ADDR_ARGS(flow->dl_src),
430 in_bundle->name, vlan);
432 mac->port.p = in_bundle;
433 tag_set_add(&ofproto->backer->revalidate_set,
434 mac_learning_changed(ofproto->ml, mac));
438 /* Determines whether packets in 'flow' within 'ofproto' should be forwarded or
439 * dropped. Returns true if they may be forwarded, false if they should be
442 * 'in_port' must be the ofport_dpif that corresponds to flow->in_port.
443 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
445 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
446 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
447 * checked by input_vid_is_valid().
449 * May also add tags to '*tags', although the current implementation only does
450 * so in one special case.
453 is_admissible(struct xlate_ctx *ctx, struct ofport_dpif *in_port,
456 struct ofproto_dpif *ofproto = ctx->ofproto;
457 struct flow *flow = &ctx->xin->flow;
458 struct ofbundle *in_bundle = in_port->bundle;
460 /* Drop frames for reserved multicast addresses
461 * only if forward_bpdu option is absent. */
462 if (!ofproto->up.forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
463 xlate_report(ctx, "packet has reserved destination MAC, dropping");
467 if (in_bundle->bond) {
468 struct mac_entry *mac;
470 switch (bond_check_admissibility(in_bundle->bond, in_port,
471 flow->dl_dst, &ctx->xout->tags)) {
476 xlate_report(ctx, "bonding refused admissibility, dropping");
479 case BV_DROP_IF_MOVED:
480 mac = mac_learning_lookup(ofproto->ml, flow->dl_src, vlan, NULL);
481 if (mac && mac->port.p != in_bundle &&
482 (!is_gratuitous_arp(flow, &ctx->xout->wc)
483 || mac_entry_is_grat_arp_locked(mac))) {
484 xlate_report(ctx, "SLB bond thinks this packet looped back, "
496 xlate_normal(struct xlate_ctx *ctx)
498 struct ofport_dpif *in_port;
499 struct ofbundle *in_bundle;
500 struct mac_entry *mac;
504 ctx->xout->has_normal = true;
506 /* Check the dl_type, since we may check for gratuituous ARP. */
507 memset(&ctx->xout->wc.masks.dl_type, 0xff,
508 sizeof ctx->xout->wc.masks.dl_type);
510 memset(&ctx->xout->wc.masks.dl_src, 0xff,
511 sizeof ctx->xout->wc.masks.dl_src);
512 memset(&ctx->xout->wc.masks.dl_dst, 0xff,
513 sizeof ctx->xout->wc.masks.dl_dst);
514 memset(&ctx->xout->wc.masks.vlan_tci, 0xff,
515 sizeof ctx->xout->wc.masks.vlan_tci);
517 in_bundle = lookup_input_bundle(ctx->ofproto, ctx->xin->flow.in_port,
518 ctx->xin->packet != NULL, &in_port);
520 xlate_report(ctx, "no input bundle, dropping");
524 /* Drop malformed frames. */
525 if (ctx->xin->flow.dl_type == htons(ETH_TYPE_VLAN) &&
526 !(ctx->xin->flow.vlan_tci & htons(VLAN_CFI))) {
527 if (ctx->xin->packet != NULL) {
528 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
529 VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial "
530 "VLAN tag received on port %s",
531 ctx->ofproto->up.name, in_bundle->name);
533 xlate_report(ctx, "partial VLAN tag, dropping");
537 /* Drop frames on bundles reserved for mirroring. */
538 if (in_bundle->mirror_out) {
539 if (ctx->xin->packet != NULL) {
540 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
541 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
542 "%s, which is reserved exclusively for mirroring",
543 ctx->ofproto->up.name, in_bundle->name);
545 xlate_report(ctx, "input port is mirror output port, dropping");
550 vid = vlan_tci_to_vid(ctx->xin->flow.vlan_tci);
551 if (!input_vid_is_valid(vid, in_bundle, ctx->xin->packet != NULL)) {
552 xlate_report(ctx, "disallowed VLAN VID for this input port, dropping");
555 vlan = input_vid_to_vlan(in_bundle, vid);
557 /* Check other admissibility requirements. */
558 if (in_port && !is_admissible(ctx, in_port, vlan)) {
562 /* Learn source MAC. */
563 if (ctx->xin->may_learn) {
564 update_learning_table(ctx->ofproto, &ctx->xin->flow, &ctx->xout->wc,
568 /* Determine output bundle. */
569 mac = mac_learning_lookup(ctx->ofproto->ml, ctx->xin->flow.dl_dst, vlan,
572 if (mac->port.p != in_bundle) {
573 xlate_report(ctx, "forwarding to learned port");
574 output_normal(ctx, mac->port.p, vlan);
576 xlate_report(ctx, "learned port is input port, dropping");
579 struct ofbundle *bundle;
581 xlate_report(ctx, "no learned MAC for destination, flooding");
582 HMAP_FOR_EACH (bundle, hmap_node, &ctx->ofproto->bundles) {
583 if (bundle != in_bundle
584 && ofbundle_includes_vlan(bundle, vlan)
586 && !bundle->mirror_out) {
587 output_normal(ctx, bundle, vlan);
590 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
594 /* Compose SAMPLE action for sFlow or IPFIX. The given probability is
595 * the number of packets out of UINT32_MAX to sample. The given
596 * cookie is passed back in the callback for each sampled packet.
599 compose_sample_action(const struct ofproto_dpif *ofproto,
600 struct ofpbuf *odp_actions,
601 const struct flow *flow,
602 const uint32_t probability,
603 const union user_action_cookie *cookie,
604 const size_t cookie_size)
606 size_t sample_offset, actions_offset;
609 sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
611 nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
613 actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
614 cookie_offset = put_userspace_action(ofproto, odp_actions, flow, cookie,
617 nl_msg_end_nested(odp_actions, actions_offset);
618 nl_msg_end_nested(odp_actions, sample_offset);
619 return cookie_offset;
623 compose_sflow_cookie(const struct ofproto_dpif *ofproto,
624 ovs_be16 vlan_tci, uint32_t odp_port,
625 unsigned int n_outputs, union user_action_cookie *cookie)
629 cookie->type = USER_ACTION_COOKIE_SFLOW;
630 cookie->sflow.vlan_tci = vlan_tci;
632 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
633 * port information") for the interpretation of cookie->output. */
636 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
637 cookie->sflow.output = 0x40000000 | 256;
641 ifindex = dpif_sflow_odp_port_to_ifindex(ofproto->sflow, odp_port);
643 cookie->sflow.output = ifindex;
648 /* 0x80000000 means "multiple output ports. */
649 cookie->sflow.output = 0x80000000 | n_outputs;
654 /* Compose SAMPLE action for sFlow bridge sampling. */
656 compose_sflow_action(const struct ofproto_dpif *ofproto,
657 struct ofpbuf *odp_actions,
658 const struct flow *flow,
661 uint32_t probability;
662 union user_action_cookie cookie;
664 if (!ofproto->sflow || flow->in_port == OFPP_NONE) {
668 probability = dpif_sflow_get_probability(ofproto->sflow);
669 compose_sflow_cookie(ofproto, htons(0), odp_port,
670 odp_port == OVSP_NONE ? 0 : 1, &cookie);
672 return compose_sample_action(ofproto, odp_actions, flow, probability,
673 &cookie, sizeof cookie.sflow);
677 compose_flow_sample_cookie(uint16_t probability, uint32_t collector_set_id,
678 uint32_t obs_domain_id, uint32_t obs_point_id,
679 union user_action_cookie *cookie)
681 cookie->type = USER_ACTION_COOKIE_FLOW_SAMPLE;
682 cookie->flow_sample.probability = probability;
683 cookie->flow_sample.collector_set_id = collector_set_id;
684 cookie->flow_sample.obs_domain_id = obs_domain_id;
685 cookie->flow_sample.obs_point_id = obs_point_id;
689 compose_ipfix_cookie(union user_action_cookie *cookie)
691 cookie->type = USER_ACTION_COOKIE_IPFIX;
694 /* Compose SAMPLE action for IPFIX bridge sampling. */
696 compose_ipfix_action(const struct ofproto_dpif *ofproto,
697 struct ofpbuf *odp_actions,
698 const struct flow *flow)
700 uint32_t probability;
701 union user_action_cookie cookie;
703 if (!ofproto->ipfix || flow->in_port == OFPP_NONE) {
707 probability = dpif_ipfix_get_bridge_exporter_probability(ofproto->ipfix);
708 compose_ipfix_cookie(&cookie);
710 compose_sample_action(ofproto, odp_actions, flow, probability,
711 &cookie, sizeof cookie.ipfix);
714 /* SAMPLE action for sFlow must be first action in any given list of
715 * actions. At this point we do not have all information required to
716 * build it. So try to build sample action as complete as possible. */
718 add_sflow_action(struct xlate_ctx *ctx)
720 ctx->user_cookie_offset = compose_sflow_action(ctx->ofproto,
721 &ctx->xout->odp_actions,
722 &ctx->xin->flow, OVSP_NONE);
723 ctx->sflow_odp_port = 0;
724 ctx->sflow_n_outputs = 0;
727 /* SAMPLE action for IPFIX must be 1st or 2nd action in any given list
728 * of actions, eventually after the SAMPLE action for sFlow. */
730 add_ipfix_action(struct xlate_ctx *ctx)
732 compose_ipfix_action(ctx->ofproto, &ctx->xout->odp_actions,
736 /* Fix SAMPLE action according to data collected while composing ODP actions.
737 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
738 * USERSPACE action's user-cookie which is required for sflow. */
740 fix_sflow_action(struct xlate_ctx *ctx)
742 const struct flow *base = &ctx->base_flow;
743 union user_action_cookie *cookie;
745 if (!ctx->user_cookie_offset) {
749 cookie = ofpbuf_at(&ctx->xout->odp_actions, ctx->user_cookie_offset,
750 sizeof cookie->sflow);
751 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
753 compose_sflow_cookie(ctx->ofproto, base->vlan_tci,
754 ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
758 compose_output_action__(struct xlate_ctx *ctx, uint16_t ofp_port,
761 const struct ofport_dpif *ofport = get_ofp_port(ctx->ofproto, ofp_port);
762 ovs_be16 flow_vlan_tci;
763 uint32_t flow_skb_mark;
765 struct priority_to_dscp *pdscp;
766 uint32_t out_port, odp_port;
768 /* If 'struct flow' gets additional metadata, we'll need to zero it out
769 * before traversing a patch port. */
770 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 20);
773 xlate_report(ctx, "Nonexistent output port");
775 } else if (ofport->up.pp.config & OFPUTIL_PC_NO_FWD) {
776 xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
778 } else if (check_stp && !stp_forward_in_state(ofport->stp_state)) {
779 xlate_report(ctx, "STP not in forwarding state, skipping output");
783 if (netdev_vport_is_patch(ofport->up.netdev)) {
784 struct ofport_dpif *peer = ofport_get_peer(ofport);
785 struct flow old_flow = ctx->xin->flow;
786 const struct ofproto_dpif *peer_ofproto;
787 enum slow_path_reason special;
788 struct ofport_dpif *in_port;
791 xlate_report(ctx, "Nonexistent patch port peer");
795 peer_ofproto = ofproto_dpif_cast(peer->up.ofproto);
796 if (peer_ofproto->backer != ctx->ofproto->backer) {
797 xlate_report(ctx, "Patch port peer on a different datapath");
801 ctx->ofproto = ofproto_dpif_cast(peer->up.ofproto);
802 ctx->xin->flow.in_port = peer->up.ofp_port;
803 ctx->xin->flow.metadata = htonll(0);
804 memset(&ctx->xin->flow.tunnel, 0, sizeof ctx->xin->flow.tunnel);
805 memset(ctx->xin->flow.regs, 0, sizeof ctx->xin->flow.regs);
807 in_port = get_ofp_port(ctx->ofproto, ctx->xin->flow.in_port);
808 special = process_special(ctx->ofproto, &ctx->xin->flow, in_port,
811 ctx->xout->slow = special;
812 } else if (!in_port || may_receive(in_port, ctx)) {
813 if (!in_port || stp_forward_in_state(in_port->stp_state)) {
814 xlate_table_action(ctx, ctx->xin->flow.in_port, 0, true);
816 /* Forwarding is disabled by STP. Let OFPP_NORMAL and the
817 * learning action look at the packet, then drop it. */
818 struct flow old_base_flow = ctx->base_flow;
819 size_t old_size = ctx->xout->odp_actions.size;
820 xlate_table_action(ctx, ctx->xin->flow.in_port, 0, true);
821 ctx->base_flow = old_base_flow;
822 ctx->xout->odp_actions.size = old_size;
826 ctx->xin->flow = old_flow;
827 ctx->ofproto = ofproto_dpif_cast(ofport->up.ofproto);
829 if (ctx->xin->resubmit_stats) {
830 netdev_vport_inc_tx(ofport->up.netdev, ctx->xin->resubmit_stats);
831 netdev_vport_inc_rx(peer->up.netdev, ctx->xin->resubmit_stats);
837 flow_vlan_tci = ctx->xin->flow.vlan_tci;
838 flow_skb_mark = ctx->xin->flow.skb_mark;
839 flow_nw_tos = ctx->xin->flow.nw_tos;
841 pdscp = get_priority(ofport, ctx->xin->flow.skb_priority);
843 ctx->xin->flow.nw_tos &= ~IP_DSCP_MASK;
844 ctx->xin->flow.nw_tos |= pdscp->dscp;
847 if (ofport->tnl_port) {
848 /* Save tunnel metadata so that changes made due to
849 * the Logical (tunnel) Port are not visible for any further
850 * matches, while explicit set actions on tunnel metadata are.
852 struct flow_tnl flow_tnl = ctx->xin->flow.tunnel;
853 odp_port = tnl_port_send(ofport->tnl_port, &ctx->xin->flow);
854 if (odp_port == OVSP_NONE) {
855 xlate_report(ctx, "Tunneling decided against output");
856 goto out; /* restore flow_nw_tos */
858 if (ctx->xin->flow.tunnel.ip_dst == ctx->orig_tunnel_ip_dst) {
859 xlate_report(ctx, "Not tunneling to our own address");
860 goto out; /* restore flow_nw_tos */
862 if (ctx->xin->resubmit_stats) {
863 netdev_vport_inc_tx(ofport->up.netdev, ctx->xin->resubmit_stats);
866 commit_odp_tunnel_action(&ctx->xin->flow, &ctx->base_flow,
867 &ctx->xout->odp_actions);
868 ctx->xin->flow.tunnel = flow_tnl; /* Restore tunnel metadata */
870 uint16_t vlandev_port;
871 odp_port = ofport->odp_port;
872 vlandev_port = vsp_realdev_to_vlandev(ctx->ofproto, ofp_port,
873 ctx->xin->flow.vlan_tci);
874 if (vlandev_port == ofp_port) {
877 out_port = ofp_port_to_odp_port(ctx->ofproto, vlandev_port);
878 ctx->xin->flow.vlan_tci = htons(0);
880 ctx->xin->flow.skb_mark &= ~IPSEC_MARK;
882 commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
883 &ctx->xout->odp_actions);
884 nl_msg_put_u32(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT, out_port);
886 ctx->sflow_odp_port = odp_port;
887 ctx->sflow_n_outputs++;
888 ctx->xout->nf_output_iface = ofp_port;
891 ctx->xin->flow.vlan_tci = flow_vlan_tci;
892 ctx->xin->flow.skb_mark = flow_skb_mark;
894 ctx->xin->flow.nw_tos = flow_nw_tos;
898 compose_output_action(struct xlate_ctx *ctx, uint16_t ofp_port)
900 compose_output_action__(ctx, ofp_port, true);
904 tag_the_flow(struct xlate_ctx *ctx, struct rule_dpif *rule)
906 struct ofproto_dpif *ofproto = ctx->ofproto;
907 uint8_t table_id = ctx->table_id;
909 if (table_id > 0 && table_id < N_TABLES) {
910 struct table_dpif *table = &ofproto->tables[table_id];
911 if (table->other_table) {
912 ctx->xout->tags |= (rule && rule->tag
914 : rule_calculate_tag(&ctx->xin->flow,
915 &table->other_table->mask,
921 /* Common rule processing in one place to avoid duplicating code. */
922 static struct rule_dpif *
923 ctx_rule_hooks(struct xlate_ctx *ctx, struct rule_dpif *rule,
926 if (ctx->xin->resubmit_hook) {
927 ctx->xin->resubmit_hook(ctx, rule);
929 if (rule == NULL && may_packet_in) {
931 * check if table configuration flags
932 * OFPTC_TABLE_MISS_CONTROLLER, default.
933 * OFPTC_TABLE_MISS_CONTINUE,
934 * OFPTC_TABLE_MISS_DROP
935 * When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do?
937 rule = rule_dpif_miss_rule(ctx->ofproto, &ctx->xin->flow);
939 if (rule && ctx->xin->resubmit_stats) {
940 rule_credit_stats(rule, ctx->xin->resubmit_stats);
946 xlate_table_action(struct xlate_ctx *ctx,
947 uint16_t in_port, uint8_t table_id, bool may_packet_in)
949 if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
950 struct rule_dpif *rule;
951 uint16_t old_in_port = ctx->xin->flow.in_port;
952 uint8_t old_table_id = ctx->table_id;
954 ctx->table_id = table_id;
956 /* Look up a flow with 'in_port' as the input port. */
957 ctx->xin->flow.in_port = in_port;
958 rule = rule_dpif_lookup_in_table(ctx->ofproto, &ctx->xin->flow,
959 &ctx->xout->wc, table_id);
961 tag_the_flow(ctx, rule);
963 /* Restore the original input port. Otherwise OFPP_NORMAL and
964 * OFPP_IN_PORT will have surprising behavior. */
965 ctx->xin->flow.in_port = old_in_port;
967 rule = ctx_rule_hooks(ctx, rule, may_packet_in);
970 struct rule_dpif *old_rule = ctx->rule;
974 do_xlate_actions(rule->up.ofpacts, rule->up.ofpacts_len, ctx);
975 ctx->rule = old_rule;
979 ctx->table_id = old_table_id;
981 static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
983 VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times",
984 MAX_RESUBMIT_RECURSION);
985 ctx->max_resubmit_trigger = true;
990 xlate_ofpact_resubmit(struct xlate_ctx *ctx,
991 const struct ofpact_resubmit *resubmit)
996 in_port = resubmit->in_port;
997 if (in_port == OFPP_IN_PORT) {
998 in_port = ctx->xin->flow.in_port;
1001 table_id = resubmit->table_id;
1002 if (table_id == 255) {
1003 table_id = ctx->table_id;
1006 xlate_table_action(ctx, in_port, table_id, false);
1010 flood_packets(struct xlate_ctx *ctx, bool all)
1012 struct ofport_dpif *ofport;
1014 HMAP_FOR_EACH (ofport, up.hmap_node, &ctx->ofproto->up.ports) {
1015 uint16_t ofp_port = ofport->up.ofp_port;
1017 if (ofp_port == ctx->xin->flow.in_port) {
1022 compose_output_action__(ctx, ofp_port, false);
1023 } else if (!(ofport->up.pp.config & OFPUTIL_PC_NO_FLOOD)) {
1024 compose_output_action(ctx, ofp_port);
1028 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
1032 execute_controller_action(struct xlate_ctx *ctx, int len,
1033 enum ofp_packet_in_reason reason,
1034 uint16_t controller_id)
1036 struct ofputil_packet_in pin;
1037 struct ofpbuf *packet;
1040 ovs_assert(!ctx->xout->slow || ctx->xout->slow == SLOW_CONTROLLER);
1041 ctx->xout->slow = SLOW_CONTROLLER;
1042 if (!ctx->xin->packet) {
1046 packet = ofpbuf_clone(ctx->xin->packet);
1048 key.skb_priority = 0;
1050 memset(&key.tunnel, 0, sizeof key.tunnel);
1052 commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
1053 &ctx->xout->odp_actions);
1055 odp_execute_actions(NULL, packet, &key, ctx->xout->odp_actions.data,
1056 ctx->xout->odp_actions.size, NULL, NULL);
1058 pin.packet = packet->data;
1059 pin.packet_len = packet->size;
1060 pin.reason = reason;
1061 pin.controller_id = controller_id;
1062 pin.table_id = ctx->table_id;
1063 pin.cookie = ctx->rule ? ctx->rule->up.flow_cookie : 0;
1066 flow_get_metadata(&ctx->xin->flow, &pin.fmd);
1068 connmgr_send_packet_in(ctx->ofproto->up.connmgr, &pin);
1069 ofpbuf_delete(packet);
1073 execute_mpls_push_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
1075 ovs_assert(eth_type_mpls(eth_type));
1077 memset(&ctx->xout->wc.masks.dl_type, 0xff,
1078 sizeof ctx->xout->wc.masks.dl_type);
1079 memset(&ctx->xout->wc.masks.mpls_lse, 0xff,
1080 sizeof ctx->xout->wc.masks.mpls_lse);
1081 memset(&ctx->xout->wc.masks.mpls_depth, 0xff,
1082 sizeof ctx->xout->wc.masks.mpls_depth);
1084 if (ctx->base_flow.mpls_depth) {
1085 ctx->xin->flow.mpls_lse &= ~htonl(MPLS_BOS_MASK);
1086 ctx->xin->flow.mpls_depth++;
1091 if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IPV6)) {
1092 label = htonl(0x2); /* IPV6 Explicit Null. */
1094 label = htonl(0x0); /* IPV4 Explicit Null. */
1096 tc = (ctx->xin->flow.nw_tos & IP_DSCP_MASK) >> 2;
1097 ttl = ctx->xin->flow.nw_ttl ? ctx->xin->flow.nw_ttl : 0x40;
1098 ctx->xin->flow.mpls_lse = set_mpls_lse_values(ttl, tc, 1, label);
1099 ctx->xin->flow.mpls_depth = 1;
1101 ctx->xin->flow.dl_type = eth_type;
1105 execute_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
1107 ovs_assert(eth_type_mpls(ctx->xin->flow.dl_type));
1108 ovs_assert(!eth_type_mpls(eth_type));
1110 memset(&ctx->xout->wc.masks.dl_type, 0xff,
1111 sizeof ctx->xout->wc.masks.dl_type);
1112 memset(&ctx->xout->wc.masks.mpls_lse, 0xff,
1113 sizeof ctx->xout->wc.masks.mpls_lse);
1114 memset(&ctx->xout->wc.masks.mpls_depth, 0xff,
1115 sizeof ctx->xout->wc.masks.mpls_depth);
1117 if (ctx->xin->flow.mpls_depth) {
1118 ctx->xin->flow.mpls_depth--;
1119 ctx->xin->flow.mpls_lse = htonl(0);
1120 if (!ctx->xin->flow.mpls_depth) {
1121 ctx->xin->flow.dl_type = eth_type;
1127 compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
1129 if (ctx->xin->flow.dl_type != htons(ETH_TYPE_IP) &&
1130 ctx->xin->flow.dl_type != htons(ETH_TYPE_IPV6)) {
1134 if (ctx->xin->flow.nw_ttl > 1) {
1135 ctx->xin->flow.nw_ttl--;
1140 for (i = 0; i < ids->n_controllers; i++) {
1141 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
1145 /* Stop processing for current table. */
1151 execute_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
1153 if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
1157 set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse, ttl);
1162 execute_dec_mpls_ttl_action(struct xlate_ctx *ctx)
1164 uint8_t ttl = mpls_lse_to_ttl(ctx->xin->flow.mpls_lse);
1166 if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
1172 set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse, ttl);
1175 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
1177 /* Stop processing for current table. */
1183 xlate_output_action(struct xlate_ctx *ctx,
1184 uint16_t port, uint16_t max_len, bool may_packet_in)
1186 uint16_t prev_nf_output_iface = ctx->xout->nf_output_iface;
1188 ctx->xout->nf_output_iface = NF_OUT_DROP;
1192 compose_output_action(ctx, ctx->xin->flow.in_port);
1195 xlate_table_action(ctx, ctx->xin->flow.in_port, 0, may_packet_in);
1201 flood_packets(ctx, false);
1204 flood_packets(ctx, true);
1206 case OFPP_CONTROLLER:
1207 execute_controller_action(ctx, max_len, OFPR_ACTION, 0);
1213 if (port != ctx->xin->flow.in_port) {
1214 compose_output_action(ctx, port);
1216 xlate_report(ctx, "skipping output to input port");
1221 if (prev_nf_output_iface == NF_OUT_FLOOD) {
1222 ctx->xout->nf_output_iface = NF_OUT_FLOOD;
1223 } else if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
1224 ctx->xout->nf_output_iface = prev_nf_output_iface;
1225 } else if (prev_nf_output_iface != NF_OUT_DROP &&
1226 ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
1227 ctx->xout->nf_output_iface = NF_OUT_MULTI;
1232 xlate_output_reg_action(struct xlate_ctx *ctx,
1233 const struct ofpact_output_reg *or)
1235 uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
1236 if (port <= UINT16_MAX) {
1237 union mf_subvalue value;
1239 memset(&value, 0xff, sizeof value);
1240 mf_write_subfield_flow(&or->src, &value, &ctx->xout->wc.masks);
1241 xlate_output_action(ctx, port, or->max_len, false);
1246 xlate_enqueue_action(struct xlate_ctx *ctx,
1247 const struct ofpact_enqueue *enqueue)
1249 uint16_t ofp_port = enqueue->port;
1250 uint32_t queue_id = enqueue->queue;
1251 uint32_t flow_priority, priority;
1254 /* Translate queue to priority. */
1255 error = dpif_queue_to_priority(ctx->ofproto->backer->dpif,
1256 queue_id, &priority);
1258 /* Fall back to ordinary output action. */
1259 xlate_output_action(ctx, enqueue->port, 0, false);
1263 /* Check output port. */
1264 if (ofp_port == OFPP_IN_PORT) {
1265 ofp_port = ctx->xin->flow.in_port;
1266 } else if (ofp_port == ctx->xin->flow.in_port) {
1270 /* Add datapath actions. */
1271 flow_priority = ctx->xin->flow.skb_priority;
1272 ctx->xin->flow.skb_priority = priority;
1273 compose_output_action(ctx, ofp_port);
1274 ctx->xin->flow.skb_priority = flow_priority;
1276 /* Update NetFlow output port. */
1277 if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
1278 ctx->xout->nf_output_iface = ofp_port;
1279 } else if (ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
1280 ctx->xout->nf_output_iface = NF_OUT_MULTI;
1285 xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
1287 uint32_t skb_priority;
1289 if (!dpif_queue_to_priority(ctx->ofproto->backer->dpif,
1290 queue_id, &skb_priority)) {
1291 ctx->xin->flow.skb_priority = skb_priority;
1293 /* Couldn't translate queue to a priority. Nothing to do. A warning
1294 * has already been logged. */
1299 slave_enabled_cb(uint16_t ofp_port, void *ofproto_)
1301 struct ofproto_dpif *ofproto = ofproto_;
1302 struct ofport_dpif *port;
1312 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
1315 port = get_ofp_port(ofproto, ofp_port);
1316 return port ? port->may_enable : false;
1321 xlate_bundle_action(struct xlate_ctx *ctx,
1322 const struct ofpact_bundle *bundle)
1326 port = bundle_execute(bundle, &ctx->xin->flow, &ctx->xout->wc,
1327 slave_enabled_cb, ctx->ofproto);
1328 if (bundle->dst.field) {
1329 nxm_reg_load(&bundle->dst, port, &ctx->xin->flow);
1331 xlate_output_action(ctx, port, 0, false);
1336 xlate_learn_action(struct xlate_ctx *ctx,
1337 const struct ofpact_learn *learn)
1339 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
1340 struct ofputil_flow_mod fm;
1341 uint64_t ofpacts_stub[1024 / 8];
1342 struct ofpbuf ofpacts;
1345 ctx->xout->has_learn = true;
1347 learn_mask(learn, &ctx->xout->wc);
1349 if (!ctx->xin->may_learn) {
1353 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
1354 learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
1356 error = ofproto_flow_mod(&ctx->ofproto->up, &fm);
1357 if (error && !VLOG_DROP_WARN(&rl)) {
1358 VLOG_WARN("learning action failed to modify flow table (%s)",
1359 ofperr_get_name(error));
1362 ofpbuf_uninit(&ofpacts);
1365 /* Reduces '*timeout' to no more than 'max'. A value of zero in either case
1366 * means "infinite". */
1368 reduce_timeout(uint16_t max, uint16_t *timeout)
1370 if (max && (!*timeout || *timeout > max)) {
1376 xlate_fin_timeout(struct xlate_ctx *ctx,
1377 const struct ofpact_fin_timeout *oft)
1379 if (ctx->xin->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
1380 struct rule_dpif *rule = ctx->rule;
1382 reduce_timeout(oft->fin_idle_timeout, &rule->up.idle_timeout);
1383 reduce_timeout(oft->fin_hard_timeout, &rule->up.hard_timeout);
1388 xlate_sample_action(struct xlate_ctx *ctx,
1389 const struct ofpact_sample *os)
1391 union user_action_cookie cookie;
1392 /* Scale the probability from 16-bit to 32-bit while representing
1393 * the same percentage. */
1394 uint32_t probability = (os->probability << 16) | os->probability;
1396 commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
1397 &ctx->xout->odp_actions);
1399 compose_flow_sample_cookie(os->probability, os->collector_set_id,
1400 os->obs_domain_id, os->obs_point_id, &cookie);
1401 compose_sample_action(ctx->ofproto, &ctx->xout->odp_actions, &ctx->xin->flow,
1402 probability, &cookie, sizeof cookie.flow_sample);
1406 may_receive(const struct ofport_dpif *port, struct xlate_ctx *ctx)
1408 if (port->up.pp.config & (eth_addr_equals(ctx->xin->flow.dl_dst,
1410 ? OFPUTIL_PC_NO_RECV_STP
1411 : OFPUTIL_PC_NO_RECV)) {
1415 /* Only drop packets here if both forwarding and learning are
1416 * disabled. If just learning is enabled, we need to have
1417 * OFPP_NORMAL and the learning action have a look at the packet
1418 * before we can drop it. */
1419 if (!stp_forward_in_state(port->stp_state)
1420 && !stp_learn_in_state(port->stp_state)) {
1428 tunnel_ecn_ok(struct xlate_ctx *ctx)
1430 if (is_ip_any(&ctx->base_flow)
1431 && (ctx->xin->flow.tunnel.ip_tos & IP_ECN_MASK) == IP_ECN_CE) {
1432 if ((ctx->base_flow.nw_tos & IP_ECN_MASK) == IP_ECN_NOT_ECT) {
1433 VLOG_WARN_RL(&rl, "dropping tunnel packet marked ECN CE"
1434 " but is not ECN capable");
1437 /* Set the ECN CE value in the tunneled packet. */
1438 ctx->xin->flow.nw_tos |= IP_ECN_CE;
1446 do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
1447 struct xlate_ctx *ctx)
1449 bool was_evictable = true;
1450 const struct ofpact *a;
1453 /* Don't let the rule we're working on get evicted underneath us. */
1454 was_evictable = ctx->rule->up.evictable;
1455 ctx->rule->up.evictable = false;
1458 do_xlate_actions_again:
1459 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
1460 struct ofpact_controller *controller;
1461 const struct ofpact_metadata *metadata;
1469 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
1470 ofpact_get_OUTPUT(a)->max_len, true);
1473 case OFPACT_CONTROLLER:
1474 controller = ofpact_get_CONTROLLER(a);
1475 execute_controller_action(ctx, controller->max_len,
1477 controller->controller_id);
1480 case OFPACT_ENQUEUE:
1481 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a));
1484 case OFPACT_SET_VLAN_VID:
1485 ctx->xin->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
1486 ctx->xin->flow.vlan_tci |=
1487 (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
1491 case OFPACT_SET_VLAN_PCP:
1492 ctx->xin->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
1493 ctx->xin->flow.vlan_tci |=
1494 htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp << VLAN_PCP_SHIFT)
1498 case OFPACT_STRIP_VLAN:
1499 ctx->xin->flow.vlan_tci = htons(0);
1502 case OFPACT_PUSH_VLAN:
1503 /* XXX 802.1AD(QinQ) */
1504 ctx->xin->flow.vlan_tci = htons(VLAN_CFI);
1507 case OFPACT_SET_ETH_SRC:
1508 memcpy(ctx->xin->flow.dl_src, ofpact_get_SET_ETH_SRC(a)->mac,
1512 case OFPACT_SET_ETH_DST:
1513 memcpy(ctx->xin->flow.dl_dst, ofpact_get_SET_ETH_DST(a)->mac,
1517 case OFPACT_SET_IPV4_SRC:
1518 memset(&ctx->xout->wc.masks.dl_type, 0xff,
1519 sizeof ctx->xout->wc.masks.dl_type);
1520 if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
1521 ctx->xin->flow.nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
1525 case OFPACT_SET_IPV4_DST:
1526 memset(&ctx->xout->wc.masks.dl_type, 0xff,
1527 sizeof ctx->xout->wc.masks.dl_type);
1528 if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
1529 ctx->xin->flow.nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
1533 case OFPACT_SET_IPV4_DSCP:
1534 /* OpenFlow 1.0 only supports IPv4. */
1535 memset(&ctx->xout->wc.masks.dl_type, 0xff,
1536 sizeof ctx->xout->wc.masks.dl_type);
1537 if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
1538 ctx->xin->flow.nw_tos &= ~IP_DSCP_MASK;
1539 ctx->xin->flow.nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp;
1543 case OFPACT_SET_L4_SRC_PORT:
1544 memset(&ctx->xout->wc.masks.dl_type, 0xff,
1545 sizeof ctx->xout->wc.masks.dl_type);
1546 memset(&ctx->xout->wc.masks.nw_proto, 0xff,
1547 sizeof ctx->xout->wc.masks.nw_proto);
1548 if (is_ip_any(&ctx->xin->flow)) {
1549 ctx->xin->flow.tp_src =
1550 htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
1554 case OFPACT_SET_L4_DST_PORT:
1555 memset(&ctx->xout->wc.masks.dl_type, 0xff,
1556 sizeof ctx->xout->wc.masks.dl_type);
1557 memset(&ctx->xout->wc.masks.nw_proto, 0xff,
1558 sizeof ctx->xout->wc.masks.nw_proto);
1559 if (is_ip_any(&ctx->xin->flow)) {
1560 ctx->xin->flow.tp_dst =
1561 htons(ofpact_get_SET_L4_DST_PORT(a)->port);
1565 case OFPACT_RESUBMIT:
1566 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a));
1569 case OFPACT_SET_TUNNEL:
1570 ctx->xin->flow.tunnel.tun_id =
1571 htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
1574 case OFPACT_SET_QUEUE:
1575 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
1578 case OFPACT_POP_QUEUE:
1579 memset(&ctx->xout->wc.masks.skb_priority, 0xff,
1580 sizeof ctx->xout->wc.masks.skb_priority);
1582 ctx->xin->flow.skb_priority = ctx->orig_skb_priority;
1585 case OFPACT_REG_MOVE:
1586 nxm_execute_reg_move(ofpact_get_REG_MOVE(a), &ctx->xin->flow,
1590 case OFPACT_REG_LOAD:
1591 nxm_execute_reg_load(ofpact_get_REG_LOAD(a), &ctx->xin->flow);
1594 case OFPACT_STACK_PUSH:
1595 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), &ctx->xin->flow,
1596 &ctx->xout->wc, &ctx->stack);
1599 case OFPACT_STACK_POP:
1600 nxm_execute_stack_pop(ofpact_get_STACK_POP(a), &ctx->xin->flow,
1604 case OFPACT_PUSH_MPLS:
1605 execute_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a)->ethertype);
1608 case OFPACT_POP_MPLS:
1609 execute_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
1612 case OFPACT_SET_MPLS_TTL:
1613 if (execute_set_mpls_ttl_action(ctx,
1614 ofpact_get_SET_MPLS_TTL(a)->ttl)) {
1619 case OFPACT_DEC_MPLS_TTL:
1620 if (execute_dec_mpls_ttl_action(ctx)) {
1625 case OFPACT_DEC_TTL:
1626 memset(&ctx->xout->wc.masks.dl_type, 0xff,
1627 sizeof ctx->xout->wc.masks.dl_type);
1628 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
1634 /* Nothing to do. */
1637 case OFPACT_MULTIPATH:
1638 multipath_execute(ofpact_get_MULTIPATH(a), &ctx->xin->flow,
1643 ctx->ofproto->has_bundle_action = true;
1644 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
1647 case OFPACT_OUTPUT_REG:
1648 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a));
1652 xlate_learn_action(ctx, ofpact_get_LEARN(a));
1659 case OFPACT_FIN_TIMEOUT:
1660 memset(&ctx->xout->wc.masks.dl_type, 0xff,
1661 sizeof ctx->xout->wc.masks.dl_type);
1662 memset(&ctx->xout->wc.masks.nw_proto, 0xff,
1663 sizeof ctx->xout->wc.masks.nw_proto);
1664 ctx->xout->has_fin_timeout = true;
1665 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
1668 case OFPACT_CLEAR_ACTIONS:
1670 * Nothing to do because writa-actions is not supported for now.
1671 * When writa-actions is supported, clear-actions also must
1672 * be supported at the same time.
1676 case OFPACT_WRITE_METADATA:
1677 metadata = ofpact_get_WRITE_METADATA(a);
1678 ctx->xin->flow.metadata &= ~metadata->mask;
1679 ctx->xin->flow.metadata |= metadata->metadata & metadata->mask;
1682 case OFPACT_GOTO_TABLE: {
1683 /* It is assumed that goto-table is the last action. */
1684 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
1685 struct rule_dpif *rule;
1687 ovs_assert(ctx->table_id < ogt->table_id);
1689 ctx->table_id = ogt->table_id;
1691 /* Look up a flow from the new table. */
1692 rule = rule_dpif_lookup_in_table(ctx->ofproto, &ctx->xin->flow,
1693 &ctx->xout->wc, ctx->table_id);
1695 tag_the_flow(ctx, rule);
1697 rule = ctx_rule_hooks(ctx, rule, true);
1701 ctx->rule->up.evictable = was_evictable;
1704 was_evictable = rule->up.evictable;
1705 rule->up.evictable = false;
1707 /* Tail recursion removal. */
1708 ofpacts = rule->up.ofpacts;
1709 ofpacts_len = rule->up.ofpacts_len;
1710 goto do_xlate_actions_again;
1716 xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
1723 ctx->rule->up.evictable = was_evictable;
1728 xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
1729 const struct flow *flow, struct rule_dpif *rule,
1730 uint8_t tcp_flags, const struct ofpbuf *packet)
1732 xin->ofproto = ofproto;
1734 xin->packet = packet;
1735 xin->may_learn = packet != NULL;
1737 xin->ofpacts = NULL;
1738 xin->ofpacts_len = 0;
1739 xin->tcp_flags = tcp_flags;
1740 xin->resubmit_hook = NULL;
1741 xin->report_hook = NULL;
1742 xin->resubmit_stats = NULL;
1746 xlate_out_uninit(struct xlate_out *xout)
1749 ofpbuf_uninit(&xout->odp_actions);
1753 /* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
1754 * into datapath actions, using 'ctx', and discards the datapath actions. */
1756 xlate_actions_for_side_effects(struct xlate_in *xin)
1758 struct xlate_out xout;
1760 xlate_actions(xin, &xout);
1761 xlate_out_uninit(&xout);
1765 xlate_report(struct xlate_ctx *ctx, const char *s)
1767 if (ctx->xin->report_hook) {
1768 ctx->xin->report_hook(ctx, s);
1773 xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src)
1776 dst->tags = src->tags;
1777 dst->slow = src->slow;
1778 dst->has_learn = src->has_learn;
1779 dst->has_normal = src->has_normal;
1780 dst->has_fin_timeout = src->has_fin_timeout;
1781 dst->nf_output_iface = src->nf_output_iface;
1782 dst->mirrors = src->mirrors;
1784 ofpbuf_use_stub(&dst->odp_actions, dst->odp_actions_stub,
1785 sizeof dst->odp_actions_stub);
1786 ofpbuf_put(&dst->odp_actions, src->odp_actions.data,
1787 src->odp_actions.size);
1791 /* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
1792 * into datapath actions in 'odp_actions', using 'ctx'. */
1794 xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
1796 /* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so
1797 * that in the future we always keep a copy of the original flow for
1798 * tracing purposes. */
1799 static bool hit_resubmit_limit;
1801 enum slow_path_reason special;
1802 const struct ofpact *ofpacts;
1803 struct ofport_dpif *in_port;
1804 struct flow orig_flow;
1805 struct xlate_ctx ctx;
1808 COVERAGE_INC(ofproto_dpif_xlate);
1810 /* Flow initialization rules:
1811 * - 'base_flow' must match the kernel's view of the packet at the
1812 * time that action processing starts. 'flow' represents any
1813 * transformations we wish to make through actions.
1814 * - By default 'base_flow' and 'flow' are the same since the input
1815 * packet matches the output before any actions are applied.
1816 * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
1817 * of the received packet as seen by the kernel. If we later output
1818 * to another device without any modifications this will cause us to
1819 * insert a new tag since the original one was stripped off by the
1821 * - Tunnel metadata as received is retained in 'flow'. This allows
1822 * tunnel metadata matching also in later tables.
1823 * Since a kernel action for setting the tunnel metadata will only be
1824 * generated with actual tunnel output, changing the tunnel metadata
1825 * values in 'flow' (such as tun_id) will only have effect with a later
1826 * tunnel output action.
1827 * - Tunnel 'base_flow' is completely cleared since that is what the
1828 * kernel does. If we wish to maintain the original values an action
1829 * needs to be generated. */
1834 ctx.ofproto = xin->ofproto;
1835 ctx.rule = xin->rule;
1837 ctx.base_flow = ctx.xin->flow;
1838 memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
1839 ctx.orig_tunnel_ip_dst = ctx.xin->flow.tunnel.ip_dst;
1841 flow_wildcards_init_catchall(&ctx.xout->wc);
1842 memset(&ctx.xout->wc.masks.in_port, 0xff,
1843 sizeof ctx.xout->wc.masks.in_port);
1845 if (tnl_port_should_receive(&ctx.xin->flow)) {
1846 memset(&ctx.xout->wc.masks.tunnel, 0xff,
1847 sizeof ctx.xout->wc.masks.tunnel);
1850 /* Disable most wildcarding for NetFlow. */
1851 if (xin->ofproto->netflow) {
1852 memset(&ctx.xout->wc.masks.dl_src, 0xff,
1853 sizeof ctx.xout->wc.masks.dl_src);
1854 memset(&ctx.xout->wc.masks.dl_dst, 0xff,
1855 sizeof ctx.xout->wc.masks.dl_dst);
1856 memset(&ctx.xout->wc.masks.dl_type, 0xff,
1857 sizeof ctx.xout->wc.masks.dl_type);
1858 memset(&ctx.xout->wc.masks.vlan_tci, 0xff,
1859 sizeof ctx.xout->wc.masks.vlan_tci);
1860 memset(&ctx.xout->wc.masks.nw_proto, 0xff,
1861 sizeof ctx.xout->wc.masks.nw_proto);
1862 memset(&ctx.xout->wc.masks.nw_src, 0xff,
1863 sizeof ctx.xout->wc.masks.nw_src);
1864 memset(&ctx.xout->wc.masks.nw_dst, 0xff,
1865 sizeof ctx.xout->wc.masks.nw_dst);
1866 memset(&ctx.xout->wc.masks.tp_src, 0xff,
1867 sizeof ctx.xout->wc.masks.tp_src);
1868 memset(&ctx.xout->wc.masks.tp_dst, 0xff,
1869 sizeof ctx.xout->wc.masks.tp_dst);
1874 ctx.xout->has_learn = false;
1875 ctx.xout->has_normal = false;
1876 ctx.xout->has_fin_timeout = false;
1877 ctx.xout->nf_output_iface = NF_OUT_DROP;
1878 ctx.xout->mirrors = 0;
1880 ofpbuf_use_stub(&ctx.xout->odp_actions, ctx.xout->odp_actions_stub,
1881 sizeof ctx.xout->odp_actions_stub);
1882 ofpbuf_reserve(&ctx.xout->odp_actions, NL_A_U32_SIZE);
1885 ctx.max_resubmit_trigger = false;
1886 ctx.orig_skb_priority = ctx.xin->flow.skb_priority;
1891 ofpacts = xin->ofpacts;
1892 ofpacts_len = xin->ofpacts_len;
1893 } else if (xin->rule) {
1894 ofpacts = xin->rule->up.ofpacts;
1895 ofpacts_len = xin->rule->up.ofpacts_len;
1900 ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
1902 if (ctx.ofproto->has_mirrors || hit_resubmit_limit) {
1903 /* Do this conditionally because the copy is expensive enough that it
1904 * shows up in profiles. */
1905 orig_flow = ctx.xin->flow;
1908 if (ctx.xin->flow.nw_frag & FLOW_NW_FRAG_ANY) {
1909 switch (ctx.ofproto->up.frag_handling) {
1910 case OFPC_FRAG_NORMAL:
1911 /* We must pretend that transport ports are unavailable. */
1912 ctx.xin->flow.tp_src = ctx.base_flow.tp_src = htons(0);
1913 ctx.xin->flow.tp_dst = ctx.base_flow.tp_dst = htons(0);
1916 case OFPC_FRAG_DROP:
1919 case OFPC_FRAG_REASM:
1922 case OFPC_FRAG_NX_MATCH:
1923 /* Nothing to do. */
1926 case OFPC_INVALID_TTL_TO_CONTROLLER:
1931 in_port = get_ofp_port(ctx.ofproto, ctx.xin->flow.in_port);
1932 special = process_special(ctx.ofproto, &ctx.xin->flow, in_port,
1935 ctx.xout->slow = special;
1937 static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1);
1938 size_t sample_actions_len;
1939 uint32_t local_odp_port;
1941 if (ctx.xin->flow.in_port
1942 != vsp_realdev_to_vlandev(ctx.ofproto, ctx.xin->flow.in_port,
1943 ctx.xin->flow.vlan_tci)) {
1944 ctx.base_flow.vlan_tci = 0;
1947 add_sflow_action(&ctx);
1948 add_ipfix_action(&ctx);
1949 sample_actions_len = ctx.xout->odp_actions.size;
1951 if (tunnel_ecn_ok(&ctx) && (!in_port || may_receive(in_port, &ctx))) {
1952 do_xlate_actions(ofpacts, ofpacts_len, &ctx);
1954 /* We've let OFPP_NORMAL and the learning action look at the
1955 * packet, so drop it now if forwarding is disabled. */
1956 if (in_port && !stp_forward_in_state(in_port->stp_state)) {
1957 ctx.xout->odp_actions.size = sample_actions_len;
1961 if (ctx.max_resubmit_trigger && !ctx.xin->resubmit_hook) {
1962 if (!hit_resubmit_limit) {
1963 /* We didn't record the original flow. Make sure we do from
1965 hit_resubmit_limit = true;
1966 } else if (!VLOG_DROP_ERR(&trace_rl)) {
1967 struct ds ds = DS_EMPTY_INITIALIZER;
1969 ofproto_trace(ctx.ofproto, &orig_flow, ctx.xin->packet, &ds);
1970 VLOG_ERR("Trace triggered by excessive resubmit "
1971 "recursion:\n%s", ds_cstr(&ds));
1976 local_odp_port = ofp_port_to_odp_port(ctx.ofproto, OFPP_LOCAL);
1977 if (!connmgr_must_output_local(ctx.ofproto->up.connmgr, &ctx.xin->flow,
1979 ctx.xout->odp_actions.data,
1980 ctx.xout->odp_actions.size)) {
1981 compose_output_action(&ctx, OFPP_LOCAL);
1983 if (ctx.ofproto->has_mirrors) {
1984 add_mirror_actions(&ctx, &orig_flow);
1986 fix_sflow_action(&ctx);
1989 ofpbuf_uninit(&ctx.stack);
1991 /* Clear the metadata and register wildcard masks, because we won't
1992 * use non-header fields as part of the cache. */
1993 memset(&ctx.xout->wc.masks.metadata, 0,
1994 sizeof ctx.xout->wc.masks.metadata);
1995 memset(&ctx.xout->wc.masks.regs, 0, sizeof ctx.xout->wc.masks.regs);