ofproto: Inline actions in struct rule_actions.
[sliver-openvswitch.git] / ofproto / ofproto-dpif-xlate.c
index eb6a1f9..e2ac9ad 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -42,6 +42,7 @@
 #include "ofp-actions.h"
 #include "ofproto/ofproto-dpif-ipfix.h"
 #include "ofproto/ofproto-dpif-mirror.h"
+#include "ofproto/ofproto-dpif-monitor.h"
 #include "ofproto/ofproto-dpif-sflow.h"
 #include "ofproto/ofproto-dpif.h"
 #include "ofproto/ofproto-provider.h"
 #include "vlog.h"
 
 COVERAGE_DEFINE(xlate_actions);
+COVERAGE_DEFINE(xlate_actions_oversize);
+COVERAGE_DEFINE(xlate_actions_mpls_overflow);
 
 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
 
 /* Maximum depth of flow table recursion (due to resubmit actions) in a
  * flow translation. */
 #define MAX_RESUBMIT_RECURSION 64
+#define MAX_INTERNAL_RESUBMITS 1   /* Max resbmits allowed using rules in
+                                      internal table. */
+
+/* Maximum number of resubmit actions in a flow translation, whether they are
+ * recursive or not. */
+#define MAX_RESUBMITS (MAX_RESUBMIT_RECURSION * MAX_RESUBMIT_RECURSION)
 
 struct ovs_rwlock xlate_rwlock = OVS_RWLOCK_INITIALIZER;
 
@@ -71,6 +80,7 @@ struct xbridge {
     struct mbridge *mbridge;      /* Mirroring. */
     struct dpif_sflow *sflow;     /* SFlow handle, or null. */
     struct dpif_ipfix *ipfix;     /* Ipfix handle, or null. */
+    struct netflow *netflow;      /* Netflow handle, or null. */
     struct stp *stp;              /* STP or null if disabled. */
 
     /* Special rules installed by ofproto-dpif. */
@@ -78,9 +88,20 @@ struct xbridge {
     struct rule_dpif *no_packet_in_rule;
 
     enum ofp_config_flags frag;   /* Fragmentation handling. */
-    bool has_netflow;             /* Bridge runs netflow? */
     bool has_in_band;             /* Bridge has in band control? */
     bool forward_bpdu;            /* Bridge forwards STP BPDUs? */
+
+    /* True if the datapath supports recirculation. */
+    bool enable_recirc;
+
+    /* True if the datapath supports variable-length
+     * OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions.
+     * False if the datapath supports only 8-byte (or shorter) userdata. */
+    bool variable_length_userdata;
+
+    /* Number of MPLS label stack entries that the datapath supports
+     * in matches. */
+    size_t max_mpls_depth;
 };
 
 struct xbundle {
@@ -122,6 +143,7 @@ struct xport {
     struct xport *peer;              /* Patch port peer or null. */
 
     enum ofputil_port_config config; /* OpenFlow port configuration. */
+    enum ofputil_port_state state;   /* OpenFlow port state. */
     int stp_port_no;                 /* STP port number or -1 if not in use. */
 
     struct hmap skb_priorities;      /* Map of 'skb_priority_to_dscp's. */
@@ -158,22 +180,37 @@ struct xlate_ctx {
     /* The rule that we are currently translating, or NULL. */
     struct rule_dpif *rule;
 
-    int recurse;                /* Recursion level, via xlate_table_action. */
+    /* Resubmit statistics, via xlate_table_action(). */
+    int recurse;                /* Current resubmit nesting depth. */
+    int resubmits;              /* Total number of resubmits. */
+    bool in_group;              /* Currently translating ofgroup, if true. */
+
     uint32_t orig_skb_priority; /* Priority when packet arrived. */
     uint8_t table_id;           /* OpenFlow table ID where flow was found. */
     uint32_t sflow_n_outputs;   /* Number of output ports. */
     odp_port_t sflow_odp_port;  /* Output port for composing sFlow action. */
     uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
     bool exit;                  /* No further actions should be processed. */
+
+    /* OpenFlow 1.1+ action set.
+     *
+     * 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
+     * When translation is otherwise complete, ofpacts_execute_action_set()
+     * converts it to a set of "struct ofpact"s that can be translated into
+     * datapath actions.   */
+    struct ofpbuf action_set;   /* Action set. */
+    uint64_t action_set_stub[1024 / 8];
 };
 
 /* A controller may use OFPP_NONE as the ingress port to indicate that
  * it did not arrive on a "real" port.  'ofpp_none_bundle' exists for
  * when an input bundle is needed for validation (e.g., mirroring or
  * OFPP_NORMAL processing).  It is not connected to an 'ofproto' or have
- * any 'port' structs, so care must be taken when dealing with it.
- * The bundle's name and vlan mode are initialized in lookup_input_bundle() */
-static struct xbundle ofpp_none_bundle;
+ * any 'port' structs, so care must be taken when dealing with it. */
+static struct xbundle ofpp_none_bundle = {
+    .name      = "OFPP_NONE",
+    .vlan_mode = PORT_VLAN_TRUNK
+};
 
 /* Node in 'xport''s 'skb_priorities' map.  Used to maintain a map from
  * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
@@ -185,6 +222,72 @@ struct skb_priority_to_dscp {
     uint8_t dscp;               /* DSCP bits to mark outgoing traffic with. */
 };
 
+enum xc_type {
+    XC_RULE,
+    XC_BOND,
+    XC_NETDEV,
+    XC_NETFLOW,
+    XC_MIRROR,
+    XC_LEARN,
+    XC_NORMAL,
+    XC_FIN_TIMEOUT,
+};
+
+/* xlate_cache entries hold enough information to perform the side effects of
+ * xlate_actions() for a rule, without needing to perform rule translation
+ * from scratch. The primary usage of these is to submit statistics to objects
+ * that a flow relates to, although they may be used for other effects as well
+ * (for instance, refreshing hard timeouts for learned flows). */
+struct xc_entry {
+    enum xc_type type;
+    union {
+        struct rule_dpif *rule;
+        struct {
+            struct netdev *tx;
+            struct netdev *rx;
+            struct bfd *bfd;
+        } dev;
+        struct {
+            struct netflow *netflow;
+            struct flow *flow;
+            ofp_port_t iface;
+        } nf;
+        struct {
+            struct mbridge *mbridge;
+            mirror_mask_t mirrors;
+        } mirror;
+        struct {
+            struct bond *bond;
+            struct flow *flow;
+            uint16_t vid;
+        } bond;
+        struct {
+            struct ofproto_dpif *ofproto;
+            struct rule_dpif *rule;
+        } learn;
+        struct {
+            struct ofproto_dpif *ofproto;
+            struct flow *flow;
+            int vlan;
+        } normal;
+        struct {
+            struct rule_dpif *rule;
+            uint16_t idle;
+            uint16_t hard;
+        } fin;
+    } u;
+};
+
+#define XC_ENTRY_FOR_EACH(entry, entries, xcache)               \
+    entries = xcache->entries;                                  \
+    for (entry = ofpbuf_try_pull(&entries, sizeof *entry);      \
+         entry;                                                 \
+         entry = ofpbuf_try_pull(&entries, sizeof *entry))
+
+struct xlate_cache {
+    struct ofpbuf entries;
+};
+
 static struct hmap xbridges = HMAP_INITIALIZER(&xbridges);
 static struct hmap xbundles = HMAP_INITIALIZER(&xbundles);
 static struct hmap xports = HMAP_INITIALIZER(&xports);
@@ -192,10 +295,13 @@ static struct hmap xports = HMAP_INITIALIZER(&xports);
 static bool may_receive(const struct xport *, struct xlate_ctx *);
 static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
                              struct xlate_ctx *);
+static void xlate_actions__(struct xlate_in *, struct xlate_out *)
+    OVS_REQ_RDLOCK(xlate_rwlock);
 static void xlate_normal(struct xlate_ctx *);
 static void xlate_report(struct xlate_ctx *, const char *);
 static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
-                               uint8_t table_id, bool may_packet_in);
+                               uint8_t table_id, bool may_packet_in,
+                               bool honor_table_miss);
 static bool input_vid_is_valid(uint16_t vid, struct xbundle *, bool warn);
 static uint16_t input_vid_to_vlan(const struct xbundle *, uint16_t vid);
 static void output_normal(struct xlate_ctx *, const struct xbundle *,
@@ -212,6 +318,9 @@ static void clear_skb_priorities(struct xport *);
 static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
                                    uint8_t *dscp);
 
+static struct xc_entry *xlate_cache_add_entry(struct xlate_cache *xc,
+                                              enum xc_type type);
+
 void
 xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
                   struct dpif *dpif, struct rule_dpif *miss_rule,
@@ -219,8 +328,12 @@ xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
                   const struct mac_learning *ml, struct stp *stp,
                   const struct mbridge *mbridge,
                   const struct dpif_sflow *sflow,
-                  const struct dpif_ipfix *ipfix, enum ofp_config_flags frag,
-                  bool forward_bpdu, bool has_in_band, bool has_netflow)
+                  const struct dpif_ipfix *ipfix,
+                  const struct netflow *netflow, enum ofp_config_flags frag,
+                  bool forward_bpdu, bool has_in_band,
+                  bool enable_recirc,
+                  bool variable_length_userdata,
+                  size_t max_mpls_depth)
 {
     struct xbridge *xbridge = xbridge_lookup(ofproto);
 
@@ -258,16 +371,23 @@ xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
         xbridge->stp = stp_ref(stp);
     }
 
+    if (xbridge->netflow != netflow) {
+        netflow_unref(xbridge->netflow);
+        xbridge->netflow = netflow_ref(netflow);
+    }
+
     free(xbridge->name);
     xbridge->name = xstrdup(name);
 
     xbridge->dpif = dpif;
     xbridge->forward_bpdu = forward_bpdu;
     xbridge->has_in_band = has_in_band;
-    xbridge->has_netflow = has_netflow;
     xbridge->frag = frag;
     xbridge->miss_rule = miss_rule;
     xbridge->no_packet_in_rule = no_packet_in_rule;
+    xbridge->enable_recirc = enable_recirc;
+    xbridge->variable_length_userdata = variable_length_userdata;
+    xbridge->max_mpls_depth = max_mpls_depth;
 }
 
 void
@@ -371,7 +491,8 @@ xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
                  const struct cfm *cfm, const struct bfd *bfd,
                  struct ofport_dpif *peer, int stp_port_no,
                  const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
-                 enum ofputil_port_config config, bool is_tunnel,
+                 enum ofputil_port_config config,
+                 enum ofputil_port_state state, bool is_tunnel,
                  bool may_enable)
 {
     struct xport *xport = xport_lookup(ofport);
@@ -392,6 +513,7 @@ xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
     ovs_assert(xport->ofp_port == ofp_port);
 
     xport->config = config;
+    xport->state = state;
     xport->stp_port_no = stp_port_no;
     xport->is_tunnel = is_tunnel;
     xport->may_enable = may_enable;
@@ -478,10 +600,10 @@ xlate_ofport_remove(struct ofport_dpif *ofport)
 
 /* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key'
  * respectively), populates 'flow' with the result of odp_flow_key_to_flow().
- * Optionally, if nonnull, populates 'fitnessp' with the fitness of 'flow' as
- * returned by odp_flow_key_to_flow().  Also, optionally populates 'ofproto'
- * with the ofproto_dpif, and 'odp_in_port' with the datapath in_port, that
- * 'packet' ingressed.
+ * Optionally populates 'ofproto' with the ofproto_dpif, 'odp_in_port' with
+ * the datapath in_port, that 'packet' ingressed, and 'ipfix', 'sflow', and
+ * 'netflow' with the appropriate handles for those protocols if they're
+ * enabled.  Caller is responsible for unrefing them.
  *
  * If 'ofproto' is nonnull, requires 'flow''s in_port to exist.  Otherwise sets
  * 'flow''s in_port to OFPP_NONE.
@@ -501,17 +623,16 @@ xlate_ofport_remove(struct ofport_dpif *ofport)
  * or some other positive errno if there are other problems. */
 int
 xlate_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
-              const struct nlattr *key, size_t key_len,
-              struct flow *flow, enum odp_key_fitness *fitnessp,
-              struct ofproto_dpif **ofproto, odp_port_t *odp_in_port)
+              const struct nlattr *key, size_t key_len, struct flow *flow,
+              struct ofproto_dpif **ofproto, struct dpif_ipfix **ipfix,
+              struct dpif_sflow **sflow, struct netflow **netflow,
+              odp_port_t *odp_in_port)
 {
-    enum odp_key_fitness fitness;
     const struct xport *xport;
     int error = ENODEV;
 
     ovs_rwlock_rdlock(&xlate_rwlock);
-    fitness = odp_flow_key_to_flow(key, key_len, flow);
-    if (fitness == ODP_FIT_ERROR) {
+    if (odp_flow_key_to_flow(key, key_len, flow) == ODP_FIT_ERROR) {
         error = EINVAL;
         goto exit;
     }
@@ -521,8 +642,8 @@ xlate_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
     }
 
     xport = xport_lookup(tnl_port_should_receive(flow)
-            ? tnl_port_receive(flow)
-            : odp_port_to_ofport(backer, flow->in_port.odp_port));
+                         ? tnl_port_receive(flow)
+                         : odp_port_to_ofport(backer, flow->in_port.odp_port));
 
     flow->in_port.ofp_port = xport ? xport->ofp_port : OFPP_NONE;
     if (!xport) {
@@ -534,22 +655,9 @@ xlate_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
             /* Make the packet resemble the flow, so that it gets sent to
              * an OpenFlow controller properly, so that it looks correct
              * for sFlow, and so that flow_extract() will get the correct
-             * vlan_tci if it is called on 'packet'.
-             *
-             * The allocated space inside 'packet' probably also contains
-             * 'key', that is, both 'packet' and 'key' are probably part of
-             * a struct dpif_upcall (see the large comment on that
-             * structure definition), so pushing data on 'packet' is in
-             * general not a good idea since it could overwrite 'key' or
-             * free it as a side effect.  However, it's OK in this special
-             * case because we know that 'packet' is inside a Netlink
-             * attribute: pushing 4 bytes will just overwrite the 4-byte
-             * "struct nlattr", which is fine since we don't need that
-             * header anymore. */
-            eth_push_vlan(packet, flow->vlan_tci);
-        }
-        /* We can't reproduce 'key' from 'flow'. */
-        fitness = fitness == ODP_FIT_PERFECT ? ODP_FIT_TOO_MUCH : fitness;
+             * vlan_tci if it is called on 'packet'. */
+            eth_push_vlan(packet, htons(ETH_TYPE_VLAN), flow->vlan_tci);
+        }
     }
     error = 0;
 
@@ -557,10 +665,19 @@ xlate_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
         *ofproto = xport->xbridge->ofproto;
     }
 
-exit:
-    if (fitnessp) {
-        *fitnessp = fitness;
+    if (ipfix) {
+        *ipfix = dpif_ipfix_ref(xport->xbridge->ipfix);
     }
+
+    if (sflow) {
+        *sflow = dpif_sflow_ref(xport->xbridge->sflow);
+    }
+
+    if (netflow) {
+        *netflow = netflow_ref(xport->xbridge->netflow);
+    }
+
+exit:
     ovs_rwlock_unlock(&xlate_rwlock);
     return error;
 }
@@ -627,7 +744,7 @@ xport_get_stp_port(const struct xport *xport)
         : NULL;
 }
 
-static enum stp_state
+static bool
 xport_stp_learn_state(const struct xport *xport)
 {
     struct stp_port *sp = xport_get_stp_port(xport);
@@ -641,13 +758,21 @@ xport_stp_forward_state(const struct xport *xport)
     return stp_forward_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
 }
 
+static bool
+xport_stp_listen_state(const struct xport *xport)
+{
+    struct stp_port *sp = xport_get_stp_port(xport);
+    return stp_listen_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
+}
+
 /* Returns true if STP should process 'flow'.  Sets fields in 'wc' that
  * were used to make the determination.*/
 static bool
 stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
 {
+    /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
     memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
-    return eth_addr_equals(flow->dl_dst, eth_addr_stp);
+    return is_stp(flow);
 }
 
 static void
@@ -655,7 +780,7 @@ stp_process_packet(const struct xport *xport, const struct ofpbuf *packet)
 {
     struct stp_port *sp = xport_get_stp_port(xport);
     struct ofpbuf payload = *packet;
-    struct eth_header *eth = payload.data;
+    struct eth_header *eth = ofpbuf_data(&payload);
 
     /* Sink packets on ports that have STP disabled when the bridge has
      * STP enabled. */
@@ -664,12 +789,12 @@ stp_process_packet(const struct xport *xport, const struct ofpbuf *packet)
     }
 
     /* Trim off padding on payload. */
-    if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
-        payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN;
+    if (ofpbuf_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
+        ofpbuf_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
     }
 
     if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
-        stp_received_bpdu(sp, payload.data, payload.size);
+        stp_received_bpdu(sp, ofpbuf_data(&payload), ofpbuf_size(&payload));
     }
 }
 
@@ -694,6 +819,106 @@ ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
     return xport ? xport->odp_port : ODPP_NONE;
 }
 
+static bool
+odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
+{
+    struct xport *xport;
+
+    xport = get_ofp_port(ctx->xbridge, ofp_port);
+    if (!xport || xport->config & OFPUTIL_PC_PORT_DOWN ||
+        xport->state & OFPUTIL_PS_LINK_DOWN) {
+        return false;
+    }
+
+    return true;
+}
+
+static const struct ofputil_bucket *
+group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
+                        int depth);
+
+static bool
+group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
+{
+    struct group_dpif *group;
+    bool hit;
+
+    hit = group_dpif_lookup(ctx->xbridge->ofproto, group_id, &group);
+    if (!hit) {
+        return false;
+    }
+
+    hit = group_first_live_bucket(ctx, group, depth) != NULL;
+
+    group_dpif_release(group);
+    return hit;
+}
+
+#define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */
+
+static bool
+bucket_is_alive(const struct xlate_ctx *ctx,
+                const struct ofputil_bucket *bucket, int depth)
+{
+    if (depth >= MAX_LIVENESS_RECURSION) {
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+        VLOG_WARN_RL(&rl, "bucket chaining exceeded %d links",
+                     MAX_LIVENESS_RECURSION);
+        return false;
+    }
+
+    return !ofputil_bucket_has_liveness(bucket) ||
+        (bucket->watch_port != OFPP_ANY &&
+         odp_port_is_alive(ctx, bucket->watch_port)) ||
+        (bucket->watch_group != OFPG_ANY &&
+         group_is_alive(ctx, bucket->watch_group, depth + 1));
+}
+
+static const struct ofputil_bucket *
+group_first_live_bucket(const struct xlate_ctx *ctx,
+                        const struct group_dpif *group, int depth)
+{
+    struct ofputil_bucket *bucket;
+    const struct list *buckets;
+
+    group_dpif_get_buckets(group, &buckets);
+    LIST_FOR_EACH (bucket, list_node, buckets) {
+        if (bucket_is_alive(ctx, bucket, depth)) {
+            return bucket;
+        }
+    }
+
+    return NULL;
+}
+
+static const struct ofputil_bucket *
+group_best_live_bucket(const struct xlate_ctx *ctx,
+                       const struct group_dpif *group,
+                       uint32_t basis)
+{
+    const struct ofputil_bucket *best_bucket = NULL;
+    uint32_t best_score = 0;
+    int i = 0;
+
+    const struct ofputil_bucket *bucket;
+    const struct list *buckets;
+
+    group_dpif_get_buckets(group, &buckets);
+    LIST_FOR_EACH (bucket, list_node, buckets) {
+        if (bucket_is_alive(ctx, bucket, 0)) {
+            uint32_t score = (hash_int(i, basis) & 0xffff) * bucket->weight;
+            if (score >= best_score) {
+                best_bucket = bucket;
+                best_score = score;
+            }
+        }
+        i++;
+    }
+
+    return best_bucket;
+}
+
 static bool
 xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
 {
@@ -749,8 +974,6 @@ lookup_input_bundle(const struct xbridge *xbridge, ofp_port_t in_port,
     /* Special-case OFPP_NONE, which a controller may use as the ingress
      * port for traffic that it is sourcing. */
     if (in_port == OFPP_NONE) {
-        ofpp_none_bundle.name = "OFPP_NONE";
-        ofpp_none_bundle.vlan_mode = PORT_VLAN_TRUNK;
         return &ofpp_none_bundle;
     }
 
@@ -829,7 +1052,7 @@ add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
         bool has_mirror;
         int out_vlan;
 
-        has_mirror = mirror_get(xbridge->mbridge, mirror_mask_ffs(mirrors) - 1,
+        has_mirror = mirror_get(xbridge->mbridge, raw_ctz(mirrors),
                                 &vlans, &dup_mirrors, &out, &out_vlan);
         ovs_assert(has_mirror);
 
@@ -887,7 +1110,7 @@ input_vid_to_vlan(const struct xbundle *in_xbundle, uint16_t vid)
         return vid ? vid : in_xbundle->vlan;
 
     default:
-        NOT_REACHED();
+        OVS_NOT_REACHED();
     }
 }
 
@@ -940,7 +1163,7 @@ input_vid_is_valid(uint16_t vid, struct xbundle *in_xbundle, bool warn)
         return true;
 
     default:
-        NOT_REACHED();
+        OVS_NOT_REACHED();
     }
 
 }
@@ -967,7 +1190,7 @@ output_vlan_to_vid(const struct xbundle *out_xbundle, uint16_t vlan)
         return vlan == out_xbundle->vlan ? 0 : vlan;
 
     default:
-        NOT_REACHED();
+        OVS_NOT_REACHED();
     }
 }
 
@@ -985,19 +1208,55 @@ output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
         /* Partially configured bundle with no slaves.  Drop the packet. */
         return;
     } else if (!out_xbundle->bond) {
+        ctx->xout->use_recirc = false;
         xport = CONTAINER_OF(list_front(&out_xbundle->xports), struct xport,
                              bundle_node);
     } else {
         struct ofport_dpif *ofport;
+        struct xlate_recirc *xr = &ctx->xout->recirc;
+        struct flow_wildcards *wc = &ctx->xout->wc;
+
+        if (ctx->xbridge->enable_recirc) {
+            ctx->xout->use_recirc = bond_may_recirc(
+                out_xbundle->bond, &xr->recirc_id, &xr->hash_basis);
+
+            if (ctx->xout->use_recirc) {
+                /* Only TCP mode uses recirculation. */
+                xr->hash_alg = OVS_HASH_ALG_L4;
+                bond_update_post_recirc_rules(out_xbundle->bond, false);
 
-        ofport = bond_choose_output_slave(out_xbundle->bond, &ctx->xin->flow,
-                                          &ctx->xout->wc, vid);
+                /* Recirculation does not require unmasking hash fields. */
+                wc = NULL;
+            }
+        }
+
+        ofport = bond_choose_output_slave(out_xbundle->bond,
+                                          &ctx->xin->flow, wc, vid);
         xport = xport_lookup(ofport);
 
         if (!xport) {
             /* No slaves enabled, so drop packet. */
             return;
         }
+
+        /* If ctx->xout->use_recirc is set, the main thread will handle stats
+         * accounting for this bond. */
+        if (!ctx->xout->use_recirc) {
+            if (ctx->xin->resubmit_stats) {
+                bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
+                             ctx->xin->resubmit_stats->n_bytes);
+            }
+            if (ctx->xin->xcache) {
+                struct xc_entry *entry;
+                struct flow *flow;
+
+                flow = &ctx->xin->flow;
+                entry = xlate_cache_add_entry(ctx->xin->xcache, XC_BOND);
+                entry->u.bond.bond = bond_ref(out_xbundle->bond);
+                entry->u.bond.flow = xmemdup(flow, sizeof *flow);
+                entry->u.bond.vid = vid;
+            }
+        }
     }
 
     old_tci = *flow_tci;
@@ -1057,7 +1316,7 @@ is_mac_learning_update_needed(const struct mac_learning *ml,
                               const struct flow *flow,
                               struct flow_wildcards *wc,
                               int vlan, struct xbundle *in_xbundle)
-    OVS_REQ_RDLOCK(ml->rwlock)
+OVS_REQ_RDLOCK(ml->rwlock)
 {
     struct mac_entry *mac;
 
@@ -1097,7 +1356,7 @@ static void
 update_learning_table__(const struct xbridge *xbridge,
                         const struct flow *flow, struct flow_wildcards *wc,
                         int vlan, struct xbundle *in_xbundle)
-    OVS_REQ_WRLOCK(xbridge->ml->rwlock)
+OVS_REQ_WRLOCK(xbridge->ml->rwlock)
 {
     struct mac_entry *mac;
 
@@ -1206,7 +1465,7 @@ is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
                  || mac_entry_is_grat_arp_locked(mac))) {
                 ovs_rwlock_unlock(&xbridge->ml->rwlock);
                 xlate_report(ctx, "SLB bond thinks this packet looped back, "
-                            "dropping");
+                             "dropping");
                 return false;
             }
             ovs_rwlock_unlock(&xbridge->ml->rwlock);
@@ -1284,6 +1543,15 @@ xlate_normal(struct xlate_ctx *ctx)
     if (ctx->xin->may_learn) {
         update_learning_table(ctx->xbridge, flow, wc, vlan, in_xbundle);
     }
+    if (ctx->xin->xcache) {
+        struct xc_entry *entry;
+
+        /* Save enough info to update mac learning table later. */
+        entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL);
+        entry->u.normal.ofproto = ctx->xin->ofproto;
+        entry->u.normal.flow = xmemdup(flow, sizeof *flow);
+        entry->u.normal.vlan = vlan;
+    }
 
     /* Determine output bundle. */
     ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
@@ -1341,8 +1609,10 @@ compose_sample_action(const struct xbridge *xbridge,
     actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
 
     odp_port = ofp_port_to_odp_port(xbridge, flow->in_port.ofp_port);
-    pid = dpif_port_get_pid(xbridge->dpif, odp_port);
-    cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size, odp_actions);
+    pid = dpif_port_get_pid(xbridge->dpif, odp_port,
+                            flow_hash_5tuple(flow, 0));
+    cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size,
+                                             odp_actions);
 
     nl_msg_end_nested(odp_actions, actions_offset);
     nl_msg_end_nested(odp_actions, sample_offset);
@@ -1501,6 +1771,14 @@ process_special(struct xlate_ctx *ctx, const struct flow *flow,
     } else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
         if (packet) {
             bfd_process_packet(xport->bfd, flow, packet);
+            /* If POLL received, immediately sends FINAL back. */
+            if (bfd_should_send_packet(xport->bfd)) {
+                if (xport->peer) {
+                    ofproto_dpif_monitor_port_send_soon(xport->ofport);
+                } else {
+                    ofproto_dpif_monitor_port_send_soon_safe(xport->ofport);
+                }
+            }
         }
         return SLOW_BFD;
     } else if (xport->xbundle && xport->xbundle->lacp
@@ -1534,7 +1812,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
 
     /* If 'struct flow' gets additional metadata, we'll need to zero it out
      * before traversing a patch port. */
-    BUILD_ASSERT_DECL(FLOW_WC_SEQ == 20);
+    BUILD_ASSERT_DECL(FLOW_WC_SEQ == 26);
 
     if (!xport) {
         xlate_report(ctx, "Nonexistent output port");
@@ -1542,9 +1820,18 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
     } else if (xport->config & OFPUTIL_PC_NO_FWD) {
         xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
         return;
-    } else if (check_stp && !xport_stp_forward_state(xport)) {
-        xlate_report(ctx, "STP not in forwarding state, skipping output");
-        return;
+    } else if (check_stp) {
+        if (is_stp(&ctx->base_flow)) {
+            if (!xport_stp_listen_state(xport)) {
+                xlate_report(ctx, "STP not in listening state, "
+                             "skipping bpdu output");
+                return;
+            }
+        } else if (!xport_stp_forward_state(xport)) {
+            xlate_report(ctx, "STP not in forwarding state, "
+                         "skipping output");
+            return;
+        }
     }
 
     if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
@@ -1566,20 +1853,20 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
         special = process_special(ctx, &ctx->xin->flow, peer,
                                   ctx->xin->packet);
         if (special) {
-            ctx->xout->slow = special;
+            ctx->xout->slow |= special;
         } else if (may_receive(peer, ctx)) {
             if (xport_stp_forward_state(peer)) {
-                xlate_table_action(ctx, flow->in_port.ofp_port, 0, true);
+                xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
             } else {
                 /* Forwarding is disabled by STP.  Let OFPP_NORMAL and the
                  * learning action look at the packet, then drop it. */
                 struct flow old_base_flow = ctx->base_flow;
-                size_t old_size = ctx->xout->odp_actions.size;
+                size_t old_size = ofpbuf_size(&ctx->xout->odp_actions);
                 mirror_mask_t old_mirrors = ctx->xout->mirrors;
-                xlate_table_action(ctx, flow->in_port.ofp_port, 0, true);
+                xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
                 ctx->xout->mirrors = old_mirrors;
                 ctx->base_flow = old_base_flow;
-                ctx->xout->odp_actions.size = old_size;
+                ofpbuf_set_size(&ctx->xout->odp_actions, old_size);
             }
         }
 
@@ -1589,6 +1876,17 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
         if (ctx->xin->resubmit_stats) {
             netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
             netdev_vport_inc_rx(peer->netdev, ctx->xin->resubmit_stats);
+            if (peer->bfd) {
+                bfd_account_rx(peer->bfd, ctx->xin->resubmit_stats);
+            }
+        }
+        if (ctx->xin->xcache) {
+            struct xc_entry *entry;
+
+            entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
+            entry->u.dev.tx = netdev_ref(xport->netdev);
+            entry->u.dev.rx = netdev_ref(peer->netdev);
+            entry->u.dev.bfd = bfd_ref(peer->bfd);
         }
 
         return;
@@ -1599,7 +1897,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
     flow_nw_tos = flow->nw_tos;
 
     if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
-        wc->masks.nw_tos |= IP_ECN_MASK;
+        wc->masks.nw_tos |= IP_DSCP_MASK;
         flow->nw_tos &= ~IP_DSCP_MASK;
         flow->nw_tos |= dscp;
     }
@@ -1622,32 +1920,55 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
         if (ctx->xin->resubmit_stats) {
             netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
         }
+        if (ctx->xin->xcache) {
+            struct xc_entry *entry;
+
+            entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
+            entry->u.dev.tx = netdev_ref(xport->netdev);
+        }
         out_port = odp_port;
         commit_odp_tunnel_action(flow, &ctx->base_flow,
                                  &ctx->xout->odp_actions);
         flow->tunnel = flow_tnl; /* Restore tunnel metadata */
     } else {
-        ofp_port_t vlandev_port;
-
         odp_port = xport->odp_port;
+        out_port = odp_port;
         if (ofproto_has_vlan_splinters(ctx->xbridge->ofproto)) {
+            ofp_port_t vlandev_port;
+
             wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
-        }
-        vlandev_port = vsp_realdev_to_vlandev(ctx->xbridge->ofproto, ofp_port,
-                                              flow->vlan_tci);
-        if (vlandev_port == ofp_port) {
-            out_port = odp_port;
-        } else {
-            out_port = ofp_port_to_odp_port(ctx->xbridge, vlandev_port);
-            flow->vlan_tci = htons(0);
+            vlandev_port = vsp_realdev_to_vlandev(ctx->xbridge->ofproto,
+                                                  ofp_port, flow->vlan_tci);
+            if (vlandev_port != ofp_port) {
+                out_port = ofp_port_to_odp_port(ctx->xbridge, vlandev_port);
+                flow->vlan_tci = htons(0);
+            }
         }
     }
 
     if (out_port != ODPP_NONE) {
-        commit_odp_actions(flow, &ctx->base_flow,
-                           &ctx->xout->odp_actions, &ctx->xout->wc);
-        nl_msg_put_odp_port(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT,
-                            out_port);
+        ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
+                                              &ctx->xout->odp_actions,
+                                              &ctx->xout->wc);
+
+        if (ctx->xout->use_recirc) {
+            struct ovs_action_hash *act_hash;
+            struct xlate_recirc *xr = &ctx->xout->recirc;
+
+            /* Hash action. */
+            act_hash = nl_msg_put_unspec_uninit(&ctx->xout->odp_actions,
+                                                OVS_ACTION_ATTR_HASH,
+                                                sizeof *act_hash);
+            act_hash->hash_alg = xr->hash_alg;
+            act_hash->hash_basis = xr->hash_basis;
+
+            /* Recirc action. */
+            nl_msg_put_u32(&ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC,
+                           xr->recirc_id);
+        } else {
+            nl_msg_put_odp_port(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT,
+                                out_port);
+        }
 
         ctx->sflow_odp_port = odp_port;
         ctx->sflow_n_outputs++;
@@ -1669,35 +1990,55 @@ compose_output_action(struct xlate_ctx *ctx, ofp_port_t ofp_port)
 
 static void
 xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule)
-    OVS_RELEASES(rule)
 {
     struct rule_dpif *old_rule = ctx->rule;
-    struct rule_actions *actions;
+    const struct rule_actions *actions;
 
     if (ctx->xin->resubmit_stats) {
         rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
     }
 
+    ctx->resubmits++;
     ctx->recurse++;
     ctx->rule = rule;
     actions = rule_dpif_get_actions(rule);
     do_xlate_actions(actions->ofpacts, actions->ofpacts_len, ctx);
-    rule_actions_unref(actions);
     ctx->rule = old_rule;
     ctx->recurse--;
+}
+
+static bool
+xlate_resubmit_resource_check(struct xlate_ctx *ctx)
+{
+    static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+    if (ctx->recurse >= MAX_RESUBMIT_RECURSION + MAX_INTERNAL_RESUBMITS) {
+        VLOG_ERR_RL(&rl, "resubmit actions recursed over %d times",
+                    MAX_RESUBMIT_RECURSION);
+    } else if (ctx->resubmits >= MAX_RESUBMITS + MAX_INTERNAL_RESUBMITS) {
+        VLOG_ERR_RL(&rl, "over %d resubmit actions", MAX_RESUBMITS);
+    } else if (ofpbuf_size(&ctx->xout->odp_actions) > UINT16_MAX) {
+        VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of actions");
+    } else if (ofpbuf_size(&ctx->stack) >= 65536) {
+        VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of stack");
+    } else {
+        return true;
+    }
 
-    rule_dpif_release(rule);
+    return false;
 }
 
 static void
-xlate_table_action(struct xlate_ctx *ctx,
-                   ofp_port_t in_port, uint8_t table_id, bool may_packet_in)
+xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
+                   bool may_packet_in, bool honor_table_miss)
 {
-    if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
-        struct rule_dpif *rule;
+    if (xlate_resubmit_resource_check(ctx)) {
         ofp_port_t old_in_port = ctx->xin->flow.in_port.ofp_port;
+        bool skip_wildcards = ctx->xin->skip_wildcards;
         uint8_t old_table_id = ctx->table_id;
-        bool got_rule;
+        struct rule_dpif *rule;
+        enum rule_dpif_lookup_verdict verdict;
+        enum ofputil_port_config config = 0;
 
         ctx->table_id = table_id;
 
@@ -1705,40 +2046,199 @@ xlate_table_action(struct xlate_ctx *ctx,
          * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
          * have surprising behavior). */
         ctx->xin->flow.in_port.ofp_port = in_port;
-        got_rule = rule_dpif_lookup_in_table(ctx->xbridge->ofproto,
-                                             &ctx->xin->flow, &ctx->xout->wc,
-                                             table_id, &rule);
+        verdict = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
+                                              &ctx->xin->flow,
+                                              !skip_wildcards
+                                              ? &ctx->xout->wc : NULL,
+                                              honor_table_miss,
+                                              &ctx->table_id, &rule,
+                                              ctx->xin->xcache != NULL);
         ctx->xin->flow.in_port.ofp_port = old_in_port;
 
         if (ctx->xin->resubmit_hook) {
             ctx->xin->resubmit_hook(ctx->xin, rule, ctx->recurse);
         }
 
-        if (got_rule) {
-            xlate_recursively(ctx, rule);
-        } else if (may_packet_in) {
-            struct xport *xport;
-
-            /* XXX
-             * check if table configuration flags
-             * OFPTC_TABLE_MISS_CONTROLLER, default.
-             * OFPTC_TABLE_MISS_CONTINUE,
-             * OFPTC_TABLE_MISS_DROP
-             * When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do? */
-            xport = get_ofp_port(ctx->xbridge, ctx->xin->flow.in_port.ofp_port);
-            choose_miss_rule(xport ? xport->config : 0,
-                             ctx->xbridge->miss_rule,
-                             ctx->xbridge->no_packet_in_rule, &rule);
+        switch (verdict) {
+        case RULE_DPIF_LOOKUP_VERDICT_MATCH:
+           goto match;
+        case RULE_DPIF_LOOKUP_VERDICT_CONTROLLER:
+            if (may_packet_in) {
+                struct xport *xport;
+
+                xport = get_ofp_port(ctx->xbridge,
+                                     ctx->xin->flow.in_port.ofp_port);
+                config = xport ? xport->config : 0;
+                break;
+            }
+            /* Fall through to drop */
+        case RULE_DPIF_LOOKUP_VERDICT_DROP:
+            config = OFPUTIL_PC_NO_PACKET_IN;
+            break;
+        case RULE_DPIF_LOOKUP_VERDICT_DEFAULT:
+            if (!ofproto_dpif_wants_packet_in_on_miss(ctx->xbridge->ofproto)) {
+                config = OFPUTIL_PC_NO_PACKET_IN;
+            }
+            break;
+        default:
+            OVS_NOT_REACHED();
+        }
+
+        choose_miss_rule(config, ctx->xbridge->miss_rule,
+                         ctx->xbridge->no_packet_in_rule, &rule,
+                         ctx->xin->xcache != NULL);
+
+match:
+        if (rule) {
+            /* Fill in the cache entry here instead of xlate_recursively
+             * to make the reference counting more explicit.  We take a
+             * reference in the lookups above if we are going to cache the
+             * rule. */
+            if (ctx->xin->xcache) {
+                struct xc_entry *entry;
+
+                entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
+                entry->u.rule = rule;
+            }
             xlate_recursively(ctx, rule);
         }
 
         ctx->table_id = old_table_id;
+        return;
+    }
+
+    ctx->exit = true;
+}
+
+static void
+xlate_group_bucket(struct xlate_ctx *ctx, const struct ofputil_bucket *bucket)
+{
+    uint64_t action_list_stub[1024 / 8];
+    struct ofpbuf action_list, action_set;
+
+    ofpbuf_use_const(&action_set, bucket->ofpacts, bucket->ofpacts_len);
+    ofpbuf_use_stub(&action_list, action_list_stub, sizeof action_list_stub);
+
+    ofpacts_execute_action_set(&action_list, &action_set);
+    ctx->recurse++;
+    do_xlate_actions(ofpbuf_data(&action_list), ofpbuf_size(&action_list), ctx);
+    ctx->recurse--;
+
+    ofpbuf_uninit(&action_set);
+    ofpbuf_uninit(&action_list);
+}
+
+static void
+xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group)
+{
+    const struct ofputil_bucket *bucket;
+    const struct list *buckets;
+    struct flow old_flow = ctx->xin->flow;
+
+    group_dpif_get_buckets(group, &buckets);
+
+    LIST_FOR_EACH (bucket, list_node, buckets) {
+        xlate_group_bucket(ctx, bucket);
+        /* Roll back flow to previous state.
+         * This is equivalent to cloning the packet for each bucket.
+         *
+         * As a side effect any subsequently applied actions will
+         * also effectively be applied to a clone of the packet taken
+         * just before applying the all or indirect group. */
+        ctx->xin->flow = old_flow;
+    }
+}
+
+static void
+xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group)
+{
+    const struct ofputil_bucket *bucket;
+
+    bucket = group_first_live_bucket(ctx, group, 0);
+    if (bucket) {
+        xlate_group_bucket(ctx, bucket);
+    }
+}
+
+static void
+xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
+{
+    struct flow_wildcards *wc = &ctx->xout->wc;
+    const struct ofputil_bucket *bucket;
+    uint32_t basis;
+
+    basis = hash_mac(ctx->xin->flow.dl_dst, 0, 0);
+    bucket = group_best_live_bucket(ctx, group, basis);
+    if (bucket) {
+        memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
+        xlate_group_bucket(ctx, bucket);
+    }
+}
+
+static void
+xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group)
+{
+    ctx->in_group = true;
+
+    switch (group_dpif_get_type(group)) {
+    case OFPGT11_ALL:
+    case OFPGT11_INDIRECT:
+        xlate_all_group(ctx, group);
+        break;
+    case OFPGT11_SELECT:
+        xlate_select_group(ctx, group);
+        break;
+    case OFPGT11_FF:
+        xlate_ff_group(ctx, group);
+        break;
+    default:
+        OVS_NOT_REACHED();
+    }
+    group_dpif_release(group);
+
+    ctx->in_group = false;
+}
+
+static bool
+xlate_group_resource_check(struct xlate_ctx *ctx)
+{
+    if (!xlate_resubmit_resource_check(ctx)) {
+        return false;
+    } else if (ctx->in_group) {
+        /* Prevent nested translation of OpenFlow groups.
+         *
+         * OpenFlow allows this restriction.  We enforce this restriction only
+         * because, with the current architecture, we would otherwise have to
+         * take a possibly recursive read lock on the ofgroup rwlock, which is
+         * unsafe given that POSIX allows taking a read lock to block if there
+         * is a thread blocked on taking the write lock.  Other solutions
+         * without this restriction are also possible, but seem unwarranted
+         * given the current limited use of groups. */
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+        VLOG_ERR_RL(&rl, "cannot recursively translate OpenFlow group");
+        return false;
     } else {
-        static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
+        return true;
+    }
+}
 
-        VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times",
-                    MAX_RESUBMIT_RECURSION);
+static bool
+xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id)
+{
+    if (xlate_group_resource_check(ctx)) {
+        struct group_dpif *group;
+        bool got_group;
+
+        got_group = group_dpif_lookup(ctx->xbridge->ofproto, group_id, &group);
+        if (got_group) {
+            xlate_group_action__(ctx, group);
+        } else {
+            return true;
+        }
     }
+
+    return false;
 }
 
 static void
@@ -1747,6 +2247,15 @@ xlate_ofpact_resubmit(struct xlate_ctx *ctx,
 {
     ofp_port_t in_port;
     uint8_t table_id;
+    bool may_packet_in = false;
+    bool honor_table_miss = false;
+
+    if (ctx->rule && rule_dpif_is_internal(ctx->rule)) {
+        /* Still allow missed packets to be sent to the controller
+         * if resubmitting from an internal table. */
+        may_packet_in = true;
+        honor_table_miss = true;
+    }
 
     in_port = resubmit->in_port;
     if (in_port == OFPP_IN_PORT) {
@@ -1758,7 +2267,8 @@ xlate_ofpact_resubmit(struct xlate_ctx *ctx,
         table_id = ctx->table_id;
     }
 
-    xlate_table_action(ctx, in_port, table_id, false);
+    xlate_table_action(ctx, in_port, table_id, may_packet_in,
+                       honor_table_miss);
 }
 
 static void
@@ -1786,74 +2296,88 @@ execute_controller_action(struct xlate_ctx *ctx, int len,
                           enum ofp_packet_in_reason reason,
                           uint16_t controller_id)
 {
-    struct ofputil_packet_in *pin;
+    struct ofproto_packet_in *pin;
     struct ofpbuf *packet;
-    struct flow key;
+    struct pkt_metadata md = PKT_METADATA_INITIALIZER(0);
 
-    ovs_assert(!ctx->xout->slow || ctx->xout->slow == SLOW_CONTROLLER);
-    ctx->xout->slow = SLOW_CONTROLLER;
+    ctx->xout->slow |= SLOW_CONTROLLER;
     if (!ctx->xin->packet) {
         return;
     }
 
     packet = ofpbuf_clone(ctx->xin->packet);
 
-    key.skb_priority = 0;
-    key.pkt_mark = 0;
-    memset(&key.tunnel, 0, sizeof key.tunnel);
-
-    commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
-                       &ctx->xout->odp_actions, &ctx->xout->wc);
+    ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
+                                          &ctx->xout->odp_actions,
+                                          &ctx->xout->wc);
 
-    odp_execute_actions(NULL, packet, &key, ctx->xout->odp_actions.data,
-                        ctx->xout->odp_actions.size, NULL, NULL);
+    odp_execute_actions(NULL, packet, false, &md,
+                        ofpbuf_data(&ctx->xout->odp_actions),
+                        ofpbuf_size(&ctx->xout->odp_actions), NULL);
 
     pin = xmalloc(sizeof *pin);
-    pin->packet_len = packet->size;
-    pin->packet = ofpbuf_steal_data(packet);
-    pin->reason = reason;
-    pin->controller_id = controller_id;
-    pin->table_id = ctx->table_id;
-    pin->cookie = ctx->rule ? rule_dpif_get_flow_cookie(ctx->rule) : 0;
+    pin->up.packet_len = ofpbuf_size(packet);
+    pin->up.packet = ofpbuf_steal_data(packet);
+    pin->up.reason = reason;
+    pin->up.table_id = ctx->table_id;
+    pin->up.cookie = (ctx->rule
+                      ? rule_dpif_get_flow_cookie(ctx->rule)
+                      : OVS_BE64_MAX);
 
-    pin->send_len = len;
-    flow_get_metadata(&ctx->xin->flow, &pin->fmd);
+    flow_get_metadata(&ctx->xin->flow, &pin->up.fmd);
 
+    pin->controller_id = controller_id;
+    pin->send_len = len;
+    /* If a rule is a table-miss rule then this is
+     * a table-miss handled by a table-miss rule.
+     *
+     * Else, if rule is internal and has a controller action,
+     * the later being implied by the rule being processed here,
+     * then this is a table-miss handled without a table-miss rule.
+     *
+     * Otherwise this is not a table-miss. */
+    pin->miss_type = OFPROTO_PACKET_IN_NO_MISS;
+    if (ctx->rule) {
+        if (rule_dpif_is_table_miss(ctx->rule)) {
+            pin->miss_type = OFPROTO_PACKET_IN_MISS_FLOW;
+        } else if (rule_dpif_is_internal(ctx->rule)) {
+            pin->miss_type = OFPROTO_PACKET_IN_MISS_WITHOUT_FLOW;
+        }
+    }
     ofproto_dpif_send_packet_in(ctx->xbridge->ofproto, pin);
     ofpbuf_delete(packet);
 }
 
 static void
-compose_mpls_push_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
+compose_mpls_push_action(struct xlate_ctx *ctx, struct ofpact_push_mpls *mpls)
 {
     struct flow_wildcards *wc = &ctx->xout->wc;
     struct flow *flow = &ctx->xin->flow;
+    int n;
 
-    ovs_assert(eth_type_mpls(eth_type));
-
-    memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
-    memset(&wc->masks.mpls_depth, 0xff, sizeof wc->masks.mpls_depth);
+    ovs_assert(eth_type_mpls(mpls->ethertype));
 
-    if (flow->mpls_depth) {
-        flow->mpls_lse &= ~htonl(MPLS_BOS_MASK);
-        flow->mpls_depth++;
-    } else {
-        ovs_be32 label;
-        uint8_t tc, ttl;
-
-        if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
-            label = htonl(0x2); /* IPV6 Explicit Null. */
-        } else {
-            label = htonl(0x0); /* IPV4 Explicit Null. */
+    n = flow_count_mpls_labels(flow, wc);
+    if (!n) {
+        ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
+                                              &ctx->xout->odp_actions,
+                                              &ctx->xout->wc);
+    } else if (n >= FLOW_MAX_MPLS_LABELS) {
+        if (ctx->xin->packet != NULL) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+            VLOG_WARN_RL(&rl, "bridge %s: dropping packet on which an "
+                         "MPLS push action can't be performed as it would "
+                         "have more MPLS LSEs than the %d supported.",
+                         ctx->xbridge->name, FLOW_MAX_MPLS_LABELS);
         }
-        wc->masks.nw_tos |= IP_DSCP_MASK;
-        wc->masks.nw_ttl = 0xff;
-        tc = (flow->nw_tos & IP_DSCP_MASK) >> 2;
-        ttl = flow->nw_ttl ? flow->nw_ttl : 0x40;
-        flow->mpls_lse = set_mpls_lse_values(ttl, tc, 1, label);
-        flow->mpls_depth = 1;
+        ctx->exit = true;
+        return;
+    } else if (n >= ctx->xbridge->max_mpls_depth) {
+        COVERAGE_INC(xlate_actions_mpls_overflow);
+        ctx->xout->slow |= SLOW_ACTION;
     }
-    flow->dl_type = eth_type;
+
+    flow_push_mpls(flow, n, mpls->ethertype, wc);
 }
 
 static void
@@ -1861,19 +2385,18 @@ compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
 {
     struct flow_wildcards *wc = &ctx->xout->wc;
     struct flow *flow = &ctx->xin->flow;
+    int n = flow_count_mpls_labels(flow, wc);
 
-    ovs_assert(eth_type_mpls(ctx->xin->flow.dl_type));
-    ovs_assert(!eth_type_mpls(eth_type));
-
-    memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
-    memset(&wc->masks.mpls_depth, 0xff, sizeof wc->masks.mpls_depth);
-
-    if (flow->mpls_depth) {
-        flow->mpls_depth--;
-        flow->mpls_lse = htonl(0);
-        if (!flow->mpls_depth) {
-            flow->dl_type = eth_type;
+    if (!flow_pop_mpls(flow, n, eth_type, wc) && n >= FLOW_MAX_MPLS_LABELS) {
+        if (ctx->xin->packet != NULL) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+            VLOG_WARN_RL(&rl, "bridge %s: dropping packet on which an "
+                         "MPLS pop action can't be performed as it has "
+                         "more MPLS LSEs than the %d supported.",
+                         ctx->xbridge->name, FLOW_MAX_MPLS_LABELS);
         }
+        ctx->exit = true;
+        ofpbuf_clear(&ctx->xout->odp_actions);
     }
 }
 
@@ -1903,39 +2426,53 @@ compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
     }
 }
 
-static bool
-compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
+static void
+compose_set_mpls_label_action(struct xlate_ctx *ctx, ovs_be32 label)
 {
-    if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
-        return true;
+    if (eth_type_mpls(ctx->xin->flow.dl_type)) {
+        ctx->xout->wc.masks.mpls_lse[0] |= htonl(MPLS_LABEL_MASK);
+        set_mpls_lse_label(&ctx->xin->flow.mpls_lse[0], label);
     }
+}
 
-    ctx->xout->wc.masks.mpls_lse |= htonl(MPLS_TTL_MASK);
-    set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse, ttl);
-    return false;
+static void
+compose_set_mpls_tc_action(struct xlate_ctx *ctx, uint8_t tc)
+{
+    if (eth_type_mpls(ctx->xin->flow.dl_type)) {
+        ctx->xout->wc.masks.mpls_lse[0] |= htonl(MPLS_TC_MASK);
+        set_mpls_lse_tc(&ctx->xin->flow.mpls_lse[0], tc);
+    }
+}
+
+static void
+compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
+{
+    if (eth_type_mpls(ctx->xin->flow.dl_type)) {
+        ctx->xout->wc.masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
+        set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse[0], ttl);
+    }
 }
 
 static bool
 compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
 {
     struct flow *flow = &ctx->xin->flow;
-    uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse);
+    uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse[0]);
     struct flow_wildcards *wc = &ctx->xout->wc;
 
     memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
+    if (eth_type_mpls(flow->dl_type)) {
+        if (ttl > 1) {
+            ttl--;
+            set_mpls_lse_ttl(&flow->mpls_lse[0], ttl);
+            return false;
+        } else {
+            execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
 
-    if (!eth_type_mpls(flow->dl_type)) {
-        return false;
-    }
-
-    if (ttl > 1) {
-        ttl--;
-        set_mpls_lse_ttl(&flow->mpls_lse, ttl);
-        return false;
+            /* Stop processing for current table. */
+            return true;
+        }
     } else {
-        execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
-
-        /* Stop processing for current table. */
         return true;
     }
 }
@@ -1954,7 +2491,7 @@ xlate_output_action(struct xlate_ctx *ctx,
         break;
     case OFPP_TABLE:
         xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
-                           0, may_packet_in);
+                           0, may_packet_in, true);
         break;
     case OFPP_NORMAL:
         xlate_normal(ctx);
@@ -2099,7 +2636,8 @@ static void
 xlate_learn_action(struct xlate_ctx *ctx,
                    const struct ofpact_learn *learn)
 {
-    struct ofputil_flow_mod *fm;
+    uint64_t ofpacts_stub[1024 / 8];
+    struct ofputil_flow_mod fm;
     struct ofpbuf ofpacts;
 
     ctx->xout->has_learn = true;
@@ -2110,20 +2648,49 @@ xlate_learn_action(struct xlate_ctx *ctx,
         return;
     }
 
-    fm = xmalloc(sizeof *fm);
-    ofpbuf_init(&ofpacts, 0);
-    learn_execute(learn, &ctx->xin->flow, fm, &ofpacts);
+    ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
+    learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
+    ofproto_dpif_flow_mod(ctx->xbridge->ofproto, &fm);
+    ofpbuf_uninit(&ofpacts);
 
-    ofproto_dpif_flow_mod(ctx->xbridge->ofproto, fm);
+    if (ctx->xin->xcache) {
+        struct xc_entry *entry;
+
+        entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
+        entry->u.learn.ofproto = ctx->xin->ofproto;
+        /* Lookup the learned rule, taking a reference on it.  The reference
+         * is released when this cache entry is deleted. */
+        rule_dpif_lookup(ctx->xbridge->ofproto, &ctx->xin->flow, NULL,
+                         &entry->u.learn.rule, true);
+    }
+}
+
+static void
+xlate_fin_timeout__(struct rule_dpif *rule, uint16_t tcp_flags,
+                    uint16_t idle_timeout, uint16_t hard_timeout)
+{
+    if (tcp_flags & (TCP_FIN | TCP_RST)) {
+        rule_dpif_reduce_timeouts(rule, idle_timeout, hard_timeout);
+    }
 }
 
 static void
 xlate_fin_timeout(struct xlate_ctx *ctx,
                   const struct ofpact_fin_timeout *oft)
 {
-    if (ctx->xin->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
-        rule_dpif_reduce_timeouts(ctx->rule, oft->fin_idle_timeout,
-                                  oft->fin_hard_timeout);
+    if (ctx->rule) {
+        xlate_fin_timeout__(ctx->rule, ctx->xin->tcp_flags,
+                            oft->fin_idle_timeout, oft->fin_hard_timeout);
+        if (ctx->xin->xcache) {
+            struct xc_entry *entry;
+
+            entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
+            /* XC_RULE already holds a reference on the rule, none is taken
+             * here. */
+            entry->u.fin.rule = ctx->rule;
+            entry->u.fin.idle = oft->fin_idle_timeout;
+            entry->u.fin.hard = oft->fin_hard_timeout;
+        }
     }
 }
 
@@ -2136,8 +2703,18 @@ xlate_sample_action(struct xlate_ctx *ctx,
    * the same percentage. */
   uint32_t probability = (os->probability << 16) | os->probability;
 
-  commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
-                     &ctx->xout->odp_actions, &ctx->xout->wc);
+  if (!ctx->xbridge->variable_length_userdata) {
+      static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+      VLOG_ERR_RL(&rl, "ignoring NXAST_SAMPLE action because datapath "
+                  "lacks support (needs Linux 3.10+ or kernel module from "
+                  "OVS 1.11+)");
+      return;
+  }
+
+  ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
+                                        &ctx->xout->odp_actions,
+                                        &ctx->xout->wc);
 
   compose_flow_sample_cookie(os->probability, os->collector_set_id,
                              os->obs_domain_id, os->obs_point_id, &cookie);
@@ -2148,7 +2725,7 @@ xlate_sample_action(struct xlate_ctx *ctx,
 static bool
 may_receive(const struct xport *xport, struct xlate_ctx *ctx)
 {
-    if (xport->config & (eth_addr_equals(ctx->xin->flow.dl_dst, eth_addr_stp)
+    if (xport->config & (is_stp(&ctx->xin->flow)
                          ? OFPUTIL_PC_NO_RECV_STP
                          : OFPUTIL_PC_NO_RECV)) {
         return false;
@@ -2165,6 +2742,26 @@ may_receive(const struct xport *xport, struct xlate_ctx *ctx)
     return true;
 }
 
+static void
+xlate_write_actions(struct xlate_ctx *ctx, const struct ofpact *a)
+{
+    struct ofpact_nest *on = ofpact_get_WRITE_ACTIONS(a);
+    ofpbuf_put(&ctx->action_set, on->actions, ofpact_nest_get_action_len(on));
+    ofpact_pad(&ctx->action_set);
+}
+
+static void
+xlate_action_set(struct xlate_ctx *ctx)
+{
+    uint64_t action_list_stub[1024 / 64];
+    struct ofpbuf action_list;
+
+    ofpbuf_use_stub(&action_list, action_list_stub, sizeof action_list_stub);
+    ofpacts_execute_action_set(&action_list, &ctx->action_set);
+    do_xlate_actions(ofpbuf_data(&action_list), ofpbuf_size(&action_list), ctx);
+    ofpbuf_uninit(&action_list);
+}
+
 static void
 do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
                  struct xlate_ctx *ctx)
@@ -2173,9 +2770,13 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
     struct flow *flow = &ctx->xin->flow;
     const struct ofpact *a;
 
+    /* dl_type already in the mask, not set below. */
+
     OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
         struct ofpact_controller *controller;
         const struct ofpact_metadata *metadata;
+        const struct ofpact_set_field *set_field;
+        const struct mf_field *mf;
 
         if (ctx->exit) {
             break;
@@ -2188,7 +2789,9 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
             break;
 
         case OFPACT_GROUP:
-            /* XXX not yet implemented */
+            if (xlate_group_action(ctx, ofpact_get_GROUP(a)->group_id)) {
+                return;
+            }
             break;
 
         case OFPACT_CONTROLLER:
@@ -2204,17 +2807,22 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
 
         case OFPACT_SET_VLAN_VID:
             wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
-            flow->vlan_tci &= ~htons(VLAN_VID_MASK);
-            flow->vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
-                               | htons(VLAN_CFI));
+            if (flow->vlan_tci & htons(VLAN_CFI) ||
+                ofpact_get_SET_VLAN_VID(a)->push_vlan_if_needed) {
+                flow->vlan_tci &= ~htons(VLAN_VID_MASK);
+                flow->vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
+                                   | htons(VLAN_CFI));
+            }
             break;
 
         case OFPACT_SET_VLAN_PCP:
             wc->masks.vlan_tci |= htons(VLAN_PCP_MASK | VLAN_CFI);
-            flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
-            flow->vlan_tci |=
-                htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp << VLAN_PCP_SHIFT)
-                      | VLAN_CFI);
+            if (flow->vlan_tci & htons(VLAN_CFI) ||
+                ofpact_get_SET_VLAN_PCP(a)->push_vlan_if_needed) {
+                flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
+                flow->vlan_tci |= htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
+                                         << VLAN_PCP_SHIFT) | VLAN_CFI);
+            }
             break;
 
         case OFPACT_STRIP_VLAN:
@@ -2239,40 +2847,54 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
             break;
 
         case OFPACT_SET_IPV4_SRC:
-            memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
             if (flow->dl_type == htons(ETH_TYPE_IP)) {
+                memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
                 flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
             }
             break;
 
         case OFPACT_SET_IPV4_DST:
-            memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
             if (flow->dl_type == htons(ETH_TYPE_IP)) {
+                memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
                 flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
             }
             break;
 
-        case OFPACT_SET_IPV4_DSCP:
-            wc->masks.nw_tos |= IP_DSCP_MASK;
-            /* OpenFlow 1.0 only supports IPv4. */
-            if (flow->dl_type == htons(ETH_TYPE_IP)) {
+        case OFPACT_SET_IP_DSCP:
+            if (is_ip_any(flow)) {
+                wc->masks.nw_tos |= IP_DSCP_MASK;
                 flow->nw_tos &= ~IP_DSCP_MASK;
-                flow->nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp;
+                flow->nw_tos |= ofpact_get_SET_IP_DSCP(a)->dscp;
+            }
+            break;
+
+        case OFPACT_SET_IP_ECN:
+            if (is_ip_any(flow)) {
+                wc->masks.nw_tos |= IP_ECN_MASK;
+                flow->nw_tos &= ~IP_ECN_MASK;
+                flow->nw_tos |= ofpact_get_SET_IP_ECN(a)->ecn;
+            }
+            break;
+
+        case OFPACT_SET_IP_TTL:
+            if (is_ip_any(flow)) {
+                wc->masks.nw_ttl = 0xff;
+                flow->nw_ttl = ofpact_get_SET_IP_TTL(a)->ttl;
             }
             break;
 
         case OFPACT_SET_L4_SRC_PORT:
-            memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
-            memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
             if (is_ip_any(flow)) {
+                memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
+                memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
                 flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
             }
             break;
 
         case OFPACT_SET_L4_DST_PORT:
-            memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
-            memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
             if (is_ip_any(flow)) {
+                memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
+                memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
                 flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
             }
             break;
@@ -2298,7 +2920,28 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
             break;
 
         case OFPACT_REG_LOAD:
-            nxm_execute_reg_load(ofpact_get_REG_LOAD(a), flow);
+            nxm_execute_reg_load(ofpact_get_REG_LOAD(a), flow, wc);
+            break;
+
+        case OFPACT_SET_FIELD:
+            set_field = ofpact_get_SET_FIELD(a);
+            mf = set_field->field;
+
+            /* Set field action only ever overwrites packet's outermost
+             * applicable header fields.  Do nothing if no header exists. */
+            if (mf->id == MFF_VLAN_VID) {
+                wc->masks.vlan_tci |= htons(VLAN_CFI);
+                if (!(flow->vlan_tci & htons(VLAN_CFI))) {
+                    break;
+                }
+            } else if ((mf->id == MFF_MPLS_LABEL || mf->id == MFF_MPLS_TC)
+                       /* 'dl_type' is already unwildcarded. */
+                       && !eth_type_mpls(flow->dl_type)) {
+                break;
+            }
+
+            mf_mask_field_and_prereqs(mf, &wc->masks);
+            mf_set_flow_value(mf, &set_field->value, flow);
             break;
 
         case OFPACT_STACK_PUSH:
@@ -2312,18 +2955,24 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
             break;
 
         case OFPACT_PUSH_MPLS:
-            compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a)->ethertype);
+            compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a));
             break;
 
         case OFPACT_POP_MPLS:
             compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
             break;
 
+        case OFPACT_SET_MPLS_LABEL:
+            compose_set_mpls_label_action(
+                ctx, ofpact_get_SET_MPLS_LABEL(a)->label);
+        break;
+
+        case OFPACT_SET_MPLS_TC:
+            compose_set_mpls_tc_action(ctx, ofpact_get_SET_MPLS_TC(a)->tc);
+            break;
+
         case OFPACT_SET_MPLS_TTL:
-            if (compose_set_mpls_ttl_action(ctx,
-                                            ofpact_get_SET_MPLS_TTL(a)->ttl)) {
-                return;
-            }
+            compose_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl);
             break;
 
         case OFPACT_DEC_MPLS_TTL:
@@ -2370,11 +3019,11 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
             break;
 
         case OFPACT_CLEAR_ACTIONS:
-            /* XXX
-             * Nothing to do because writa-actions is not supported for now.
-             * When writa-actions is supported, clear-actions also must
-             * be supported at the same time.
-             */
+            ofpbuf_clear(&ctx->action_set);
+            break;
+
+        case OFPACT_WRITE_ACTIONS:
+            xlate_write_actions(ctx, a);
             break;
 
         case OFPACT_WRITE_METADATA:
@@ -2388,12 +3037,11 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
             break;
 
         case OFPACT_GOTO_TABLE: {
-            /* It is assumed that goto-table is the last action. */
             struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
 
             ovs_assert(ctx->table_id < ogt->table_id);
             xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
-                               ogt->table_id, true);
+                               ogt->table_id, true, true);
             break;
         }
 
@@ -2407,19 +3055,21 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
 void
 xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
               const struct flow *flow, struct rule_dpif *rule,
-              uint8_t tcp_flags, const struct ofpbuf *packet)
+              uint16_t tcp_flags, const struct ofpbuf *packet)
 {
     xin->ofproto = ofproto;
     xin->flow = *flow;
     xin->packet = packet;
     xin->may_learn = packet != NULL;
     xin->rule = rule;
+    xin->xcache = NULL;
     xin->ofpacts = NULL;
     xin->ofpacts_len = 0;
     xin->tcp_flags = tcp_flags;
     xin->resubmit_hook = NULL;
     xin->report_hook = NULL;
     xin->resubmit_stats = NULL;
+    xin->skip_wildcards = false;
 }
 
 void
@@ -2462,8 +3112,8 @@ xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src)
 
     ofpbuf_use_stub(&dst->odp_actions, dst->odp_actions_stub,
                     sizeof dst->odp_actions_stub);
-    ofpbuf_put(&dst->odp_actions, src->odp_actions.data,
-               src->odp_actions.size);
+    ofpbuf_put(&dst->odp_actions, ofpbuf_data(&src->odp_actions),
+               ofpbuf_size(&src->odp_actions));
 }
 \f
 static struct skb_priority_to_dscp *
@@ -2508,8 +3158,8 @@ actions_output_to_local_port(const struct xlate_ctx *ctx)
     const struct nlattr *a;
     unsigned int left;
 
-    NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->xout->odp_actions.data,
-                             ctx->xout->odp_actions.size) {
+    NL_ATTR_FOR_EACH_UNSAFE (a, left, ofpbuf_data(&ctx->xout->odp_actions),
+                             ofpbuf_size(&ctx->xout->odp_actions)) {
         if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
             && nl_attr_get_odp_port(a) == local_odp_port) {
             return true;
@@ -2518,18 +3168,30 @@ actions_output_to_local_port(const struct xlate_ctx *ctx)
     return false;
 }
 
+/* Thread safe call to xlate_actions__(). */
+void
+xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
+    OVS_EXCLUDED(xlate_rwlock)
+{
+    ovs_rwlock_rdlock(&xlate_rwlock);
+    xlate_actions__(xin, xout);
+    ovs_rwlock_unlock(&xlate_rwlock);
+}
+
 /* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
  * into datapath actions in 'odp_actions', using 'ctx'.
  *
  * The caller must take responsibility for eventually freeing 'xout', with
  * xlate_out_uninit(). */
-void
-xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
+static void
+xlate_actions__(struct xlate_in *xin, struct xlate_out *xout)
+    OVS_REQ_RDLOCK(xlate_rwlock)
 {
     struct flow_wildcards *wc = &xout->wc;
     struct flow *flow = &xin->flow;
+    struct rule_dpif *rule = NULL;
 
-    struct rule_actions *actions = NULL;
+    const struct rule_actions *actions = NULL;
     enum slow_path_reason special;
     const struct ofpact *ofpacts;
     struct xport *in_port;
@@ -2537,11 +3199,10 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
     struct xlate_ctx ctx;
     size_t ofpacts_len;
     bool tnl_may_send;
+    bool is_icmp;
 
     COVERAGE_INC(xlate_actions);
 
-    ovs_rwlock_rdlock(&xlate_rwlock);
-
     /* Flow initialization rules:
      * - 'base_flow' must match the kernel's view of the packet at the
      *   time that action processing starts.  'flow' represents any
@@ -2577,7 +3238,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
 
     ctx.xbridge = xbridge_lookup(xin->ofproto);
     if (!ctx.xbridge) {
-        goto out;
+        return;
     }
 
     ctx.rule = xin->rule;
@@ -2590,30 +3251,55 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
     memset(&wc->masks.in_port, 0xff, sizeof wc->masks.in_port);
     memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
     memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
-    wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
+    if (is_ip_any(flow)) {
+        wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
+    }
+    is_icmp = is_icmpv4(flow) || is_icmpv6(flow);
 
     tnl_may_send = tnl_xlate_init(&ctx.base_flow, flow, wc);
-    if (ctx.xbridge->has_netflow) {
+    if (ctx.xbridge->netflow) {
         netflow_mask_wc(flow, wc);
     }
 
     ctx.recurse = 0;
+    ctx.resubmits = 0;
+    ctx.in_group = false;
     ctx.orig_skb_priority = flow->skb_priority;
     ctx.table_id = 0;
     ctx.exit = false;
 
+    if (!xin->ofpacts && !ctx.rule) {
+        ctx.table_id = rule_dpif_lookup(ctx.xbridge->ofproto, flow,
+                                        !xin->skip_wildcards ? wc : NULL,
+                                        &rule, ctx.xin->xcache != NULL);
+        if (ctx.xin->resubmit_stats) {
+            rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats);
+        }
+        if (ctx.xin->xcache) {
+            struct xc_entry *entry;
+
+            entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
+            entry->u.rule = rule;
+        }
+        ctx.rule = rule;
+    }
+    xout->fail_open = ctx.rule && rule_dpif_is_fail_open(ctx.rule);
+    xout->use_recirc = false;
+
     if (xin->ofpacts) {
         ofpacts = xin->ofpacts;
         ofpacts_len = xin->ofpacts_len;
-    } else if (xin->rule) {
-        actions = rule_dpif_get_actions(xin->rule);
+    } else if (ctx.rule) {
+        actions = rule_dpif_get_actions(ctx.rule);
         ofpacts = actions->ofpacts;
         ofpacts_len = actions->ofpacts_len;
     } else {
-        NOT_REACHED();
+        OVS_NOT_REACHED();
     }
 
     ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
+    ofpbuf_use_stub(&ctx.action_set,
+                    ctx.action_set_stub, sizeof ctx.action_set_stub);
 
     if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
         /* Do this conditionally because the copy is expensive enough that it
@@ -2630,24 +3316,40 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
             break;
 
         case OFPC_FRAG_DROP:
-            goto out;
+            return;
 
         case OFPC_FRAG_REASM:
-            NOT_REACHED();
+            OVS_NOT_REACHED();
 
         case OFPC_FRAG_NX_MATCH:
             /* Nothing to do. */
             break;
 
         case OFPC_INVALID_TTL_TO_CONTROLLER:
-            NOT_REACHED();
+            OVS_NOT_REACHED();
         }
     }
 
     in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port);
+    if (in_port && in_port->is_tunnel) {
+        if (ctx.xin->resubmit_stats) {
+            netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats);
+            if (in_port->bfd) {
+                bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats);
+            }
+        }
+        if (ctx.xin->xcache) {
+            struct xc_entry *entry;
+
+            entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV);
+            entry->u.dev.rx = netdev_ref(in_port->netdev);
+            entry->u.dev.bfd = bfd_ref(in_port->bfd);
+        }
+    }
+
     special = process_special(&ctx, flow, in_port, ctx.xin->packet);
     if (special) {
-        ctx.xout->slow = special;
+        ctx.xout->slow |= special;
     } else {
         size_t sample_actions_len;
 
@@ -2660,7 +3362,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
 
         add_sflow_action(&ctx);
         add_ipfix_action(&ctx);
-        sample_actions_len = ctx.xout->odp_actions.size;
+        sample_actions_len = ofpbuf_size(&ctx.xout->odp_actions);
 
         if (tnl_may_send && (!in_port || may_receive(in_port, &ctx))) {
             do_xlate_actions(ofpacts, ofpacts_len, &ctx);
@@ -2668,10 +3370,14 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
             /* We've let OFPP_NORMAL and the learning action look at the
              * packet, so drop it now if forwarding is disabled. */
             if (in_port && !xport_stp_forward_state(in_port)) {
-                ctx.xout->odp_actions.size = sample_actions_len;
+                ofpbuf_set_size(&ctx.xout->odp_actions, sample_actions_len);
             }
         }
 
+        if (ofpbuf_size(&ctx.action_set)) {
+            xlate_action_set(&ctx);
+        }
+
         if (ctx.xbridge->has_in_band
             && in_band_must_output_to_local_port(flow)
             && !actions_output_to_local_port(&ctx)) {
@@ -2685,15 +3391,292 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
         }
     }
 
+    if (nl_attr_oversized(ofpbuf_size(&ctx.xout->odp_actions))) {
+        /* These datapath actions are too big for a Netlink attribute, so we
+         * can't hand them to the kernel directly.  dpif_execute() can execute
+         * them one by one with help, so just mark the result as SLOW_ACTION to
+         * prevent the flow from being installed. */
+        COVERAGE_INC(xlate_actions_oversize);
+        ctx.xout->slow |= SLOW_ACTION;
+    }
+
+    if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
+        if (ctx.xin->resubmit_stats) {
+            mirror_update_stats(ctx.xbridge->mbridge, xout->mirrors,
+                                ctx.xin->resubmit_stats->n_packets,
+                                ctx.xin->resubmit_stats->n_bytes);
+        }
+        if (ctx.xin->xcache) {
+            struct xc_entry *entry;
+
+            entry = xlate_cache_add_entry(ctx.xin->xcache, XC_MIRROR);
+            entry->u.mirror.mbridge = mbridge_ref(ctx.xbridge->mbridge);
+            entry->u.mirror.mirrors = xout->mirrors;
+        }
+    }
+
+    if (ctx.xbridge->netflow) {
+        const struct ofpact *ofpacts = actions->ofpacts;
+        size_t ofpacts_len = actions->ofpacts_len;
+
+        /* Only update netflow if we don't have controller flow.  We don't
+         * report NetFlow expiration messages for such facets because they
+         * are just part of the control logic for the network, not real
+         * traffic. */
+        if (ofpacts_len == 0
+            || ofpacts->type != OFPACT_CONTROLLER
+            || ofpact_next(ofpacts) < ofpact_end(ofpacts, ofpacts_len)) {
+            if (ctx.xin->resubmit_stats) {
+                netflow_flow_update(ctx.xbridge->netflow, flow,
+                                    xout->nf_output_iface,
+                                    ctx.xin->resubmit_stats);
+            }
+            if (ctx.xin->xcache) {
+                struct xc_entry *entry;
+
+                entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
+                entry->u.nf.netflow = netflow_ref(ctx.xbridge->netflow);
+                entry->u.nf.flow = xmemdup(flow, sizeof *flow);
+                entry->u.nf.iface = xout->nf_output_iface;
+            }
+        }
+    }
+
     ofpbuf_uninit(&ctx.stack);
+    ofpbuf_uninit(&ctx.action_set);
 
     /* Clear the metadata and register wildcard masks, because we won't
      * use non-header fields as part of the cache. */
-    memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
-    memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
+    flow_wildcards_clear_non_packet_fields(wc);
+
+    /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields.  struct flow uses
+     * the low 8 bits of the 16-bit tp_src and tp_dst members to represent
+     * these fields.  The datapath interface, on the other hand, represents
+     * them with just 8 bits each.  This means that if the high 8 bits of the
+     * masks for these fields somehow become set, then they will get chopped
+     * off by a round trip through the datapath, and revalidation will spot
+     * that as an inconsistency and delete the flow.  Avoid the problem here by
+     * making sure that only the low 8 bits of either field can be unwildcarded
+     * for ICMP.
+     */
+    if (is_icmp) {
+        wc->masks.tp_src &= htons(UINT8_MAX);
+        wc->masks.tp_dst &= htons(UINT8_MAX);
+    }
+}
+
+/* Sends 'packet' out 'ofport'.
+ * May modify 'packet'.
+ * Returns 0 if successful, otherwise a positive errno value. */
+int
+xlate_send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet)
+{
+    struct xport *xport;
+    struct ofpact_output output;
+    struct flow flow;
 
-out:
+    ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
+    /* Use OFPP_NONE as the in_port to avoid special packet processing. */
+    flow_extract(packet, NULL, &flow);
+    flow.in_port.ofp_port = OFPP_NONE;
+
+    ovs_rwlock_rdlock(&xlate_rwlock);
+    xport = xport_lookup(ofport);
+    if (!xport) {
+        ovs_rwlock_unlock(&xlate_rwlock);
+        return EINVAL;
+    }
+    output.port = xport->ofp_port;
+    output.max_len = 0;
     ovs_rwlock_unlock(&xlate_rwlock);
 
-    rule_actions_unref(actions);
+    return ofproto_dpif_execute_actions(xport->xbridge->ofproto, &flow, NULL,
+                                        &output.ofpact, sizeof output,
+                                        packet);
+}
+
+struct xlate_cache *
+xlate_cache_new(void)
+{
+    struct xlate_cache *xcache = xmalloc(sizeof *xcache);
+
+    ofpbuf_init(&xcache->entries, 512);
+    return xcache;
+}
+
+static struct xc_entry *
+xlate_cache_add_entry(struct xlate_cache *xcache, enum xc_type type)
+{
+    struct xc_entry *entry;
+
+    entry = ofpbuf_put_zeros(&xcache->entries, sizeof *entry);
+    entry->type = type;
+
+    return entry;
+}
+
+static void
+xlate_cache_netdev(struct xc_entry *entry, const struct dpif_flow_stats *stats)
+{
+    if (entry->u.dev.tx) {
+        netdev_vport_inc_tx(entry->u.dev.tx, stats);
+    }
+    if (entry->u.dev.rx) {
+        netdev_vport_inc_rx(entry->u.dev.rx, stats);
+    }
+    if (entry->u.dev.bfd) {
+        bfd_account_rx(entry->u.dev.bfd, stats);
+    }
+}
+
+static void
+xlate_cache_normal(struct ofproto_dpif *ofproto, struct flow *flow, int vlan)
+{
+    struct xbridge *xbridge;
+    struct xbundle *xbundle;
+    struct flow_wildcards wc;
+
+    xbridge = xbridge_lookup(ofproto);
+    if (!xbridge) {
+        return;
+    }
+
+    xbundle = lookup_input_bundle(xbridge, flow->in_port.ofp_port, false,
+                                  NULL);
+    if (!xbundle) {
+        return;
+    }
+
+    update_learning_table(xbridge, flow, &wc, vlan, xbundle);
+}
+
+/* Push stats and perform side effects of flow translation. */
+void
+xlate_push_stats(struct xlate_cache *xcache, bool may_learn,
+                 const struct dpif_flow_stats *stats)
+{
+    struct xc_entry *entry;
+    struct ofpbuf entries = xcache->entries;
+
+    XC_ENTRY_FOR_EACH (entry, entries, xcache) {
+        switch (entry->type) {
+        case XC_RULE:
+            rule_dpif_credit_stats(entry->u.rule, stats);
+            break;
+        case XC_BOND:
+            bond_account(entry->u.bond.bond, entry->u.bond.flow,
+                         entry->u.bond.vid, stats->n_bytes);
+            break;
+        case XC_NETDEV:
+            xlate_cache_netdev(entry, stats);
+            break;
+        case XC_NETFLOW:
+            netflow_flow_update(entry->u.nf.netflow, entry->u.nf.flow,
+                                entry->u.nf.iface, stats);
+            break;
+        case XC_MIRROR:
+            mirror_update_stats(entry->u.mirror.mbridge,
+                                entry->u.mirror.mirrors,
+                                stats->n_packets, stats->n_bytes);
+            break;
+        case XC_LEARN:
+            if (may_learn) {
+                struct rule_dpif *rule = entry->u.learn.rule;
+
+                /* Reset the modified time for a rule that is equivalent to
+                 * the currently cached rule.  If the rule is not the exact
+                 * rule we have cached, update the reference that we have. */
+                entry->u.learn.rule = ofproto_dpif_refresh_rule(rule);
+            }
+            break;
+        case XC_NORMAL:
+            xlate_cache_normal(entry->u.normal.ofproto, entry->u.normal.flow,
+                               entry->u.normal.vlan);
+            break;
+        case XC_FIN_TIMEOUT:
+            xlate_fin_timeout__(entry->u.fin.rule, stats->tcp_flags,
+                                entry->u.fin.idle, entry->u.fin.hard);
+            break;
+        default:
+            OVS_NOT_REACHED();
+        }
+    }
+}
+
+static void
+xlate_dev_unref(struct xc_entry *entry)
+{
+    if (entry->u.dev.tx) {
+        netdev_close(entry->u.dev.tx);
+    }
+    if (entry->u.dev.rx) {
+        netdev_close(entry->u.dev.rx);
+    }
+    if (entry->u.dev.bfd) {
+        bfd_unref(entry->u.dev.bfd);
+    }
+}
+
+static void
+xlate_cache_clear_netflow(struct netflow *netflow, struct flow *flow)
+{
+    netflow_expire(netflow, flow);
+    netflow_flow_clear(netflow, flow);
+    netflow_unref(netflow);
+    free(flow);
+}
+
+void
+xlate_cache_clear(struct xlate_cache *xcache)
+{
+    struct xc_entry *entry;
+    struct ofpbuf entries;
+
+    if (!xcache) {
+        return;
+    }
+
+    XC_ENTRY_FOR_EACH (entry, entries, xcache) {
+        switch (entry->type) {
+        case XC_RULE:
+            rule_dpif_unref(entry->u.rule);
+            break;
+        case XC_BOND:
+            free(entry->u.bond.flow);
+            bond_unref(entry->u.bond.bond);
+            break;
+        case XC_NETDEV:
+            xlate_dev_unref(entry);
+            break;
+        case XC_NETFLOW:
+            xlate_cache_clear_netflow(entry->u.nf.netflow, entry->u.nf.flow);
+            break;
+        case XC_MIRROR:
+            mbridge_unref(entry->u.mirror.mbridge);
+            break;
+        case XC_LEARN:
+            /* 'u.learn.rule' is the learned rule. */
+            rule_dpif_unref(entry->u.learn.rule);
+            break;
+        case XC_NORMAL:
+            free(entry->u.normal.flow);
+            break;
+        case XC_FIN_TIMEOUT:
+            /* 'u.fin.rule' is always already held as a XC_RULE, which
+             * has already released it's reference above. */
+            break;
+        default:
+            OVS_NOT_REACHED();
+        }
+    }
+
+    ofpbuf_clear(&xcache->entries);
+}
+
+void
+xlate_cache_delete(struct xlate_cache *xcache)
+{
+    xlate_cache_clear(xcache);
+    ofpbuf_uninit(&xcache->entries);
+    free(xcache);
 }