ofproto: Lock hard_timeout and idle_timeout of struct rule.
[sliver-openvswitch.git] / ofproto / ofproto-dpif-xlate.c
index 852afdb..8aa2bb1 100644 (file)
@@ -16,6 +16,8 @@
 
 #include "ofproto/ofproto-dpif-xlate.h"
 
+#include <errno.h>
+
 #include "bfd.h"
 #include "bitmap.h"
 #include "bond.h"
@@ -29,6 +31,7 @@
 #include "in-band.h"
 #include "lacp.h"
 #include "learn.h"
+#include "list.h"
 #include "mac-learning.h"
 #include "meta-flow.h"
 #include "multipath.h"
 #include "odp-execute.h"
 #include "ofp-actions.h"
 #include "ofproto/ofproto-dpif-ipfix.h"
+#include "ofproto/ofproto-dpif-mirror.h"
 #include "ofproto/ofproto-dpif-sflow.h"
 #include "ofproto/ofproto-dpif.h"
 #include "tunnel.h"
 #include "vlog.h"
 
-COVERAGE_DEFINE(ofproto_dpif_xlate);
+COVERAGE_DEFINE(xlate_actions);
 
 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
 
@@ -51,11 +55,82 @@ VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
  * flow translation. */
 #define MAX_RESUBMIT_RECURSION 64
 
+struct xbridge {
+    struct hmap_node hmap_node;   /* Node in global 'xbridges' map. */
+    struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
+
+    struct list xbundles;         /* Owned xbundles. */
+    struct hmap xports;           /* Indexed by ofp_port. */
+
+    char *name;                   /* Name used in log messages. */
+    struct dpif *dpif;            /* Datapath interface. */
+    struct mac_learning *ml;      /* Mac learning handle. */
+    struct mbridge *mbridge;      /* Mirroring. */
+    struct dpif_sflow *sflow;     /* SFlow handle, or null. */
+    struct dpif_ipfix *ipfix;     /* Ipfix handle, or null. */
+    struct stp *stp;              /* STP or null if disabled. */
+
+    enum ofp_config_flags frag;   /* Fragmentation handling. */
+    bool has_netflow;             /* Bridge runs netflow? */
+    bool has_in_band;             /* Bridge has in band control? */
+    bool forward_bpdu;            /* Bridge forwards STP BPDUs? */
+};
+
+struct xbundle {
+    struct hmap_node hmap_node;    /* In global 'xbundles' map. */
+    struct ofbundle *ofbundle;     /* Key in global 'xbundles' map. */
+
+    struct list list_node;         /* In parent 'xbridges' list. */
+    struct xbridge *xbridge;       /* Parent xbridge. */
+
+    struct list xports;            /* Contains "struct xport"s. */
+
+    char *name;                    /* Name used in log messages. */
+    struct bond *bond;             /* Nonnull iff more than one port. */
+    struct lacp *lacp;             /* LACP handle or null. */
+
+    enum port_vlan_mode vlan_mode; /* VLAN mode. */
+    int vlan;                      /* -1=trunk port, else a 12-bit VLAN ID. */
+    unsigned long *trunks;         /* Bitmap of trunked VLANs, if 'vlan' == -1.
+                                    * NULL if all VLANs are trunked. */
+    bool use_priority_tags;        /* Use 802.1p tag for frames in VLAN 0? */
+    bool floodable;                /* No port has OFPUTIL_PC_NO_FLOOD set? */
+};
+
+struct xport {
+    struct hmap_node hmap_node;      /* Node in global 'xports' map. */
+    struct ofport_dpif *ofport;      /* Key in global 'xports map. */
+
+    struct hmap_node ofp_node;       /* Node in parent xbridge 'xports' map. */
+    ofp_port_t ofp_port;             /* Key in parent xbridge 'xports' map. */
+
+    odp_port_t odp_port;             /* Datapath port number or ODPP_NONE. */
+
+    struct list bundle_node;         /* In parent xbundle (if it exists). */
+    struct xbundle *xbundle;         /* Parent xbundle or null. */
+
+    struct netdev *netdev;           /* 'ofport''s netdev. */
+
+    struct xbridge *xbridge;         /* Parent bridge. */
+    struct xport *peer;              /* Patch port peer or null. */
+
+    enum ofputil_port_config config; /* OpenFlow port configuration. */
+    int stp_port_no;                 /* STP port number or 0 if not in use. */
+
+    struct hmap skb_priorities;      /* Map of 'skb_priority_to_dscp's. */
+
+    bool may_enable;                 /* May be enabled in bonds. */
+    bool is_tunnel;                  /* Is a tunnel port. */
+
+    struct cfm *cfm;                 /* CFM handle or null. */
+    struct bfd *bfd;                 /* BFD handle or null. */
+};
+
 struct xlate_ctx {
     struct xlate_in *xin;
     struct xlate_out *xout;
 
-    struct ofproto_dpif *ofproto;
+    const struct xbridge *xbridge;
 
     /* Flow at the last commit. */
     struct flow base_flow;
@@ -77,7 +152,6 @@ struct xlate_ctx {
     struct rule_dpif *rule;
 
     int recurse;                /* Recursion level, via xlate_table_action. */
-    bool max_resubmit_trigger;  /* Recursed too deeply during translation. */
     uint32_t orig_skb_priority; /* Priority when packet arrived. */
     uint8_t table_id;           /* OpenFlow table ID where flow was found. */
     uint32_t sflow_n_outputs;   /* Number of output ports. */
@@ -90,64 +164,576 @@ struct xlate_ctx {
  * it did not arrive on a "real" port.  'ofpp_none_bundle' exists for
  * when an input bundle is needed for validation (e.g., mirroring or
  * OFPP_NORMAL processing).  It is not connected to an 'ofproto' or have
- * any 'port' structs, so care must be taken when dealing with it. */
-static struct ofbundle ofpp_none_bundle = {
-    .name      = "OFPP_NONE",
-    .vlan_mode = PORT_VLAN_TRUNK
+ * any 'port' structs, so care must be taken when dealing with it.
+ * The bundle's name and vlan mode are initialized in lookup_input_bundle() */
+static struct xbundle ofpp_none_bundle;
+
+/* Node in 'xport''s 'skb_priorities' map.  Used to maintain a map from
+ * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
+ * traffic egressing the 'ofport' with that priority should be marked with. */
+struct skb_priority_to_dscp {
+    struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'skb_priorities'. */
+    uint32_t skb_priority;      /* Priority of this queue (see struct flow). */
+
+    uint8_t dscp;               /* DSCP bits to mark outgoing traffic with. */
 };
 
-static bool may_receive(const struct ofport_dpif *, struct xlate_ctx *);
+static struct hmap xbridges = HMAP_INITIALIZER(&xbridges);
+static struct hmap xbundles = HMAP_INITIALIZER(&xbundles);
+static struct hmap xports = HMAP_INITIALIZER(&xports);
+
+static bool may_receive(const struct xport *, struct xlate_ctx *);
 static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
                              struct xlate_ctx *);
 static void xlate_normal(struct xlate_ctx *);
 static void xlate_report(struct xlate_ctx *, const char *);
 static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
                                uint8_t table_id, bool may_packet_in);
-static bool input_vid_is_valid(uint16_t vid, struct ofbundle *, bool warn);
-static uint16_t input_vid_to_vlan(const struct ofbundle *, uint16_t vid);
-static void output_normal(struct xlate_ctx *, const struct ofbundle *,
+static bool input_vid_is_valid(uint16_t vid, struct xbundle *, bool warn);
+static uint16_t input_vid_to_vlan(const struct xbundle *, uint16_t vid);
+static void output_normal(struct xlate_ctx *, const struct xbundle *,
                           uint16_t vlan);
 static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port);
 
 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
 
+static struct xbridge *xbridge_lookup(const struct ofproto_dpif *);
+static struct xbundle *xbundle_lookup(const struct ofbundle *);
+static struct xport *xport_lookup(const struct ofport_dpif *);
+static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
+static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
+                                                     uint32_t skb_priority);
+static void clear_skb_priorities(struct xport *);
+static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
+                                   uint8_t *dscp);
+
+void
+xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
+                  struct dpif *dpif, const struct mac_learning *ml,
+                  struct stp *stp, const struct mbridge *mbridge,
+                  const struct dpif_sflow *sflow,
+                  const struct dpif_ipfix *ipfix, enum ofp_config_flags frag,
+                  bool forward_bpdu, bool has_in_band, bool has_netflow)
+{
+    struct xbridge *xbridge = xbridge_lookup(ofproto);
+
+    if (!xbridge) {
+        xbridge = xzalloc(sizeof *xbridge);
+        xbridge->ofproto = ofproto;
+
+        hmap_insert(&xbridges, &xbridge->hmap_node, hash_pointer(ofproto, 0));
+        hmap_init(&xbridge->xports);
+        list_init(&xbridge->xbundles);
+    }
+
+    if (xbridge->ml != ml) {
+        mac_learning_unref(xbridge->ml);
+        xbridge->ml = mac_learning_ref(ml);
+    }
+
+    if (xbridge->mbridge != mbridge) {
+        mbridge_unref(xbridge->mbridge);
+        xbridge->mbridge = mbridge_ref(mbridge);
+    }
+
+    if (xbridge->sflow != sflow) {
+        dpif_sflow_unref(xbridge->sflow);
+        xbridge->sflow = dpif_sflow_ref(sflow);
+    }
+
+    if (xbridge->ipfix != ipfix) {
+        dpif_ipfix_unref(xbridge->ipfix);
+        xbridge->ipfix = dpif_ipfix_ref(ipfix);
+    }
+
+    if (xbridge->stp != stp) {
+        stp_unref(xbridge->stp);
+        xbridge->stp = stp_ref(stp);
+    }
+
+    free(xbridge->name);
+    xbridge->name = xstrdup(name);
+
+    xbridge->dpif = dpif;
+    xbridge->forward_bpdu = forward_bpdu;
+    xbridge->has_in_band = has_in_band;
+    xbridge->has_netflow = has_netflow;
+    xbridge->frag = frag;
+}
+
+void
+xlate_remove_ofproto(struct ofproto_dpif *ofproto)
+{
+    struct xbridge *xbridge = xbridge_lookup(ofproto);
+    struct xbundle *xbundle, *next_xbundle;
+    struct xport *xport, *next_xport;
+
+    if (!xbridge) {
+        return;
+    }
+
+    HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
+        xlate_ofport_remove(xport->ofport);
+    }
+
+    LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
+        xlate_bundle_remove(xbundle->ofbundle);
+    }
+
+    hmap_remove(&xbridges, &xbridge->hmap_node);
+    free(xbridge->name);
+    free(xbridge);
+}
+
+void
+xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
+                 const char *name, enum port_vlan_mode vlan_mode, int vlan,
+                 unsigned long *trunks, bool use_priority_tags,
+                 const struct bond *bond, const struct lacp *lacp,
+                 bool floodable)
+{
+    struct xbundle *xbundle = xbundle_lookup(ofbundle);
+
+    if (!xbundle) {
+        xbundle = xzalloc(sizeof *xbundle);
+        xbundle->ofbundle = ofbundle;
+        xbundle->xbridge = xbridge_lookup(ofproto);
+
+        hmap_insert(&xbundles, &xbundle->hmap_node, hash_pointer(ofbundle, 0));
+        list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
+        list_init(&xbundle->xports);
+    }
+
+    ovs_assert(xbundle->xbridge);
+
+    free(xbundle->name);
+    xbundle->name = xstrdup(name);
+
+    xbundle->vlan_mode = vlan_mode;
+    xbundle->vlan = vlan;
+    xbundle->trunks = trunks;
+    xbundle->use_priority_tags = use_priority_tags;
+    xbundle->floodable = floodable;
+
+    if (xbundle->bond != bond) {
+        bond_unref(xbundle->bond);
+        xbundle->bond = bond_ref(bond);
+    }
+
+    if (xbundle->lacp != lacp) {
+        lacp_unref(xbundle->lacp);
+        xbundle->lacp = lacp_ref(lacp);
+    }
+}
+
+void
+xlate_bundle_remove(struct ofbundle *ofbundle)
+{
+    struct xbundle *xbundle = xbundle_lookup(ofbundle);
+    struct xport *xport, *next;
+
+    if (!xbundle) {
+        return;
+    }
+
+    LIST_FOR_EACH_SAFE (xport, next, bundle_node, &xbundle->xports) {
+        list_remove(&xport->bundle_node);
+        xport->xbundle = NULL;
+    }
+
+    hmap_remove(&xbundles, &xbundle->hmap_node);
+    list_remove(&xbundle->list_node);
+    bond_unref(xbundle->bond);
+    lacp_unref(xbundle->lacp);
+    free(xbundle->name);
+    free(xbundle);
+}
+
+void
+xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
+                 struct ofport_dpif *ofport, ofp_port_t ofp_port,
+                 odp_port_t odp_port, const struct netdev *netdev,
+                 const struct cfm *cfm, const struct bfd *bfd,
+                 struct ofport_dpif *peer, int stp_port_no,
+                 const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
+                 enum ofputil_port_config config, bool is_tunnel,
+                 bool may_enable)
+{
+    struct xport *xport = xport_lookup(ofport);
+    size_t i;
+
+    if (!xport) {
+        xport = xzalloc(sizeof *xport);
+        xport->ofport = ofport;
+        xport->xbridge = xbridge_lookup(ofproto);
+        xport->ofp_port = ofp_port;
+
+        hmap_init(&xport->skb_priorities);
+        hmap_insert(&xports, &xport->hmap_node, hash_pointer(ofport, 0));
+        hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
+                    hash_ofp_port(xport->ofp_port));
+    }
+
+    ovs_assert(xport->ofp_port == ofp_port);
+
+    xport->config = config;
+    xport->stp_port_no = stp_port_no;
+    xport->is_tunnel = is_tunnel;
+    xport->may_enable = may_enable;
+    xport->odp_port = odp_port;
+
+    if (xport->netdev != netdev) {
+        netdev_close(xport->netdev);
+        xport->netdev = netdev_ref(netdev);
+    }
+
+    if (xport->cfm != cfm) {
+        cfm_unref(xport->cfm);
+        xport->cfm = cfm_ref(cfm);
+    }
+
+    if (xport->bfd != bfd) {
+        bfd_unref(xport->bfd);
+        xport->bfd = bfd_ref(bfd);
+    }
+
+    if (xport->peer) {
+        xport->peer->peer = NULL;
+    }
+    xport->peer = xport_lookup(peer);
+    if (xport->peer) {
+        xport->peer->peer = xport;
+    }
+
+    if (xport->xbundle) {
+        list_remove(&xport->bundle_node);
+    }
+    xport->xbundle = xbundle_lookup(ofbundle);
+    if (xport->xbundle) {
+        list_insert(&xport->xbundle->xports, &xport->bundle_node);
+    }
+
+    clear_skb_priorities(xport);
+    for (i = 0; i < n_qdscp; i++) {
+        struct skb_priority_to_dscp *pdscp;
+        uint32_t skb_priority;
+
+        if (dpif_queue_to_priority(xport->xbridge->dpif, qdscp_list[i].queue,
+                                   &skb_priority)) {
+            continue;
+        }
+
+        pdscp = xmalloc(sizeof *pdscp);
+        pdscp->skb_priority = skb_priority;
+        pdscp->dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
+        hmap_insert(&xport->skb_priorities, &pdscp->hmap_node,
+                    hash_int(pdscp->skb_priority, 0));
+    }
+}
+
+void
+xlate_ofport_remove(struct ofport_dpif *ofport)
+{
+    struct xport *xport = xport_lookup(ofport);
+
+    if (!xport) {
+        return;
+    }
+
+    if (xport->peer) {
+        xport->peer->peer = NULL;
+        xport->peer = NULL;
+    }
+
+    if (xport->xbundle) {
+        list_remove(&xport->bundle_node);
+    }
+
+    clear_skb_priorities(xport);
+    hmap_destroy(&xport->skb_priorities);
+
+    hmap_remove(&xports, &xport->hmap_node);
+    hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
+
+    netdev_close(xport->netdev);
+    cfm_unref(xport->cfm);
+    bfd_unref(xport->bfd);
+    free(xport);
+}
+
+/* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key'
+ * respectively), populates 'flow' with the result of odp_flow_key_to_flow().
+ * Optionally, if nonnull, populates 'fitnessp' with the fitness of 'flow' as
+ * returned by odp_flow_key_to_flow().  Also, optionally populates 'ofproto'
+ * with the ofproto_dpif, and 'odp_in_port' with the datapath in_port, that
+ * 'packet' ingressed.
+ *
+ * If 'ofproto' is nonnull, requires 'flow''s in_port to exist.  Otherwise sets
+ * 'flow''s in_port to OFPP_NONE.
+ *
+ * This function does post-processing on data returned from
+ * odp_flow_key_to_flow() to help make VLAN splinters transparent to the rest
+ * of the upcall processing logic.  In particular, if the extracted in_port is
+ * a VLAN splinter port, it replaces flow->in_port by the "real" port, sets
+ * flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes
+ * a VLAN header onto 'packet' (if it is nonnull).
+ *
+ * Similarly, this function also includes some logic to help with tunnels.  It
+ * may modify 'flow' as necessary to make the tunneling implementation
+ * transparent to the upcall processing logic.
+ *
+ * Returns 0 if successful, ENODEV if the parsed flow has no associated ofport,
+ * or some other positive errno if there are other problems. */
+int
+xlate_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
+              const struct nlattr *key, size_t key_len,
+              struct flow *flow, enum odp_key_fitness *fitnessp,
+              struct ofproto_dpif **ofproto, odp_port_t *odp_in_port)
+{
+    enum odp_key_fitness fitness;
+    const struct xport *xport;
+    int error = ENODEV;
+
+    fitness = odp_flow_key_to_flow(key, key_len, flow);
+    if (fitness == ODP_FIT_ERROR) {
+        error = EINVAL;
+        goto exit;
+    }
+
+    if (odp_in_port) {
+        *odp_in_port = flow->in_port.odp_port;
+    }
+
+    xport = xport_lookup(tnl_port_should_receive(flow)
+            ? tnl_port_receive(flow)
+            : odp_port_to_ofport(backer, flow->in_port.odp_port));
+
+    flow->in_port.ofp_port = xport ? xport->ofp_port : OFPP_NONE;
+    if (!xport) {
+        goto exit;
+    }
+
+    if (vsp_adjust_flow(xport->xbridge->ofproto, flow)) {
+        if (packet) {
+            /* Make the packet resemble the flow, so that it gets sent to
+             * an OpenFlow controller properly, so that it looks correct
+             * for sFlow, and so that flow_extract() will get the correct
+             * vlan_tci if it is called on 'packet'.
+             *
+             * The allocated space inside 'packet' probably also contains
+             * 'key', that is, both 'packet' and 'key' are probably part of
+             * a struct dpif_upcall (see the large comment on that
+             * structure definition), so pushing data on 'packet' is in
+             * general not a good idea since it could overwrite 'key' or
+             * free it as a side effect.  However, it's OK in this special
+             * case because we know that 'packet' is inside a Netlink
+             * attribute: pushing 4 bytes will just overwrite the 4-byte
+             * "struct nlattr", which is fine since we don't need that
+             * header anymore. */
+            eth_push_vlan(packet, flow->vlan_tci);
+        }
+        /* We can't reproduce 'key' from 'flow'. */
+        fitness = fitness == ODP_FIT_PERFECT ? ODP_FIT_TOO_MUCH : fitness;
+    }
+    error = 0;
+
+    if (ofproto) {
+        *ofproto = xport->xbridge->ofproto;
+    }
+
+exit:
+    if (fitnessp) {
+        *fitnessp = fitness;
+    }
+    return error;
+}
+
+static struct xbridge *
+xbridge_lookup(const struct ofproto_dpif *ofproto)
+{
+    struct xbridge *xbridge;
+
+    if (!ofproto) {
+        return NULL;
+    }
+
+    HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
+                             &xbridges) {
+        if (xbridge->ofproto == ofproto) {
+            return xbridge;
+        }
+    }
+    return NULL;
+}
+
+static struct xbundle *
+xbundle_lookup(const struct ofbundle *ofbundle)
+{
+    struct xbundle *xbundle;
+
+    if (!ofbundle) {
+        return NULL;
+    }
+
+    HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
+                             &xbundles) {
+        if (xbundle->ofbundle == ofbundle) {
+            return xbundle;
+        }
+    }
+    return NULL;
+}
+
+static struct xport *
+xport_lookup(const struct ofport_dpif *ofport)
+{
+    struct xport *xport;
+
+    if (!ofport) {
+        return NULL;
+    }
+
+    HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
+                             &xports) {
+        if (xport->ofport == ofport) {
+            return xport;
+        }
+    }
+    return NULL;
+}
+
+static struct stp_port *
+xport_get_stp_port(const struct xport *xport)
+{
+    return xport->xbridge->stp && xport->stp_port_no
+        ? stp_get_port(xport->xbridge->stp, xport->stp_port_no)
+        : NULL;
+}
+
+static enum stp_state
+xport_stp_learn_state(const struct xport *xport)
+{
+    struct stp_port *sp = xport_get_stp_port(xport);
+    return stp_learn_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
+}
+
+static bool
+xport_stp_forward_state(const struct xport *xport)
+{
+    struct stp_port *sp = xport_get_stp_port(xport);
+    return stp_forward_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
+}
+
+/* Returns true if STP should process 'flow'.  Sets fields in 'wc' that
+ * were used to make the determination.*/
+static bool
+stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
+{
+    memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
+    return eth_addr_equals(flow->dl_dst, eth_addr_stp);
+}
+
+static void
+stp_process_packet(const struct xport *xport, const struct ofpbuf *packet)
+{
+    struct stp_port *sp = xport_get_stp_port(xport);
+    struct ofpbuf payload = *packet;
+    struct eth_header *eth = payload.data;
+
+    /* Sink packets on ports that have STP disabled when the bridge has
+     * STP enabled. */
+    if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
+        return;
+    }
+
+    /* Trim off padding on payload. */
+    if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
+        payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN;
+    }
+
+    if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
+        stp_received_bpdu(sp, payload.data, payload.size);
+    }
+}
+
+static struct xport *
+get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
+{
+    struct xport *xport;
+
+    HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
+                             &xbridge->xports) {
+        if (xport->ofp_port == ofp_port) {
+            return xport;
+        }
+    }
+    return NULL;
+}
+
+static odp_port_t
+ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
+{
+    const struct xport *xport = get_ofp_port(xbridge, ofp_port);
+    return xport ? xport->odp_port : ODPP_NONE;
+}
+
 static bool
-ofbundle_trunks_vlan(const struct ofbundle *bundle, uint16_t vlan)
+xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
 {
     return (bundle->vlan_mode != PORT_VLAN_ACCESS
             && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
 }
 
 static bool
-ofbundle_includes_vlan(const struct ofbundle *bundle, uint16_t vlan)
+xbundle_includes_vlan(const struct xbundle *xbundle, uint16_t vlan)
 {
-    return vlan == bundle->vlan || ofbundle_trunks_vlan(bundle, vlan);
+    return vlan == xbundle->vlan || xbundle_trunks_vlan(xbundle, vlan);
 }
 
-static bool
-vlan_is_mirrored(const struct ofmirror *m, int vlan)
+static mirror_mask_t
+xbundle_mirror_out(const struct xbridge *xbridge, struct xbundle *xbundle)
+{
+    return xbundle != &ofpp_none_bundle
+        ? mirror_bundle_out(xbridge->mbridge, xbundle->ofbundle)
+        : 0;
+}
+
+static mirror_mask_t
+xbundle_mirror_src(const struct xbridge *xbridge, struct xbundle *xbundle)
+{
+    return xbundle != &ofpp_none_bundle
+        ? mirror_bundle_src(xbridge->mbridge, xbundle->ofbundle)
+        : 0;
+}
+
+static mirror_mask_t
+xbundle_mirror_dst(const struct xbridge *xbridge, struct xbundle *xbundle)
 {
-    return !m->vlans || bitmap_is_set(m->vlans, vlan);
+    return xbundle != &ofpp_none_bundle
+        ? mirror_bundle_dst(xbridge->mbridge, xbundle->ofbundle)
+        : 0;
 }
 
-static struct ofbundle *
-lookup_input_bundle(const struct ofproto_dpif *ofproto, ofp_port_t in_port,
-                    bool warn, struct ofport_dpif **in_ofportp)
+static struct xbundle *
+lookup_input_bundle(const struct xbridge *xbridge, ofp_port_t in_port,
+                    bool warn, struct xport **in_xportp)
 {
-    struct ofport_dpif *ofport;
+    struct xport *xport;
 
     /* Find the port and bundle for the received packet. */
-    ofport = get_ofp_port(ofproto, in_port);
-    if (in_ofportp) {
-        *in_ofportp = ofport;
+    xport = get_ofp_port(xbridge, in_port);
+    if (in_xportp) {
+        *in_xportp = xport;
     }
-    if (ofport && ofport->bundle) {
-        return ofport->bundle;
+    if (xport && xport->xbundle) {
+        return xport->xbundle;
     }
 
     /* Special-case OFPP_NONE, which a controller may use as the ingress
      * port for traffic that it is sourcing. */
     if (in_port == OFPP_NONE) {
+        ofpp_none_bundle.name = "OFPP_NONE";
+        ofpp_none_bundle.vlan_mode = PORT_VLAN_TRUNK;
         return &ofpp_none_bundle;
     }
 
@@ -168,7 +754,7 @@ lookup_input_bundle(const struct ofproto_dpif *ofproto, ofp_port_t in_port,
         static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
 
         VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
-                     "port %"PRIu16, ofproto->up.name, in_port);
+                     "port %"PRIu16, xbridge->name, in_port);
     }
     return NULL;
 }
@@ -176,55 +762,40 @@ lookup_input_bundle(const struct ofproto_dpif *ofproto, ofp_port_t in_port,
 static void
 add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
 {
-    struct ofproto_dpif *ofproto = ctx->ofproto;
+    const struct xbridge *xbridge = ctx->xbridge;
     mirror_mask_t mirrors;
-    struct ofbundle *in_bundle;
+    struct xbundle *in_xbundle;
     uint16_t vlan;
     uint16_t vid;
-    const struct nlattr *a;
-    size_t left;
 
-    in_bundle = lookup_input_bundle(ctx->ofproto, orig_flow->in_port.ofp_port,
-                                    ctx->xin->packet != NULL, NULL);
-    if (!in_bundle) {
+    mirrors = ctx->xout->mirrors;
+    ctx->xout->mirrors = 0;
+
+    in_xbundle = lookup_input_bundle(xbridge, orig_flow->in_port.ofp_port,
+                                     ctx->xin->packet != NULL, NULL);
+    if (!in_xbundle) {
         return;
     }
-    mirrors = in_bundle->src_mirrors;
+    mirrors |= xbundle_mirror_src(xbridge, in_xbundle);
 
     /* Drop frames on bundles reserved for mirroring. */
-    if (in_bundle->mirror_out) {
+    if (xbundle_mirror_out(xbridge, in_xbundle)) {
         if (ctx->xin->packet != NULL) {
             static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
             VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
                          "%s, which is reserved exclusively for mirroring",
-                         ctx->ofproto->up.name, in_bundle->name);
+                         ctx->xbridge->name, in_xbundle->name);
         }
+        ofpbuf_clear(&ctx->xout->odp_actions);
         return;
     }
 
     /* Check VLAN. */
     vid = vlan_tci_to_vid(orig_flow->vlan_tci);
-    if (!input_vid_is_valid(vid, in_bundle, ctx->xin->packet != NULL)) {
+    if (!input_vid_is_valid(vid, in_xbundle, ctx->xin->packet != NULL)) {
         return;
     }
-    vlan = input_vid_to_vlan(in_bundle, vid);
-
-    /* Look at the output ports to check for destination selections. */
-
-    NL_ATTR_FOR_EACH (a, left, ctx->xout->odp_actions.data,
-                      ctx->xout->odp_actions.size) {
-        enum ovs_action_attr type = nl_attr_type(a);
-        struct ofport_dpif *ofport;
-
-        if (type != OVS_ACTION_ATTR_OUTPUT) {
-            continue;
-        }
-
-        ofport = get_odp_port(ofproto, nl_attr_get_odp_port(a));
-        if (ofport && ofport->bundle) {
-            mirrors |= ofport->bundle->dst_mirrors;
-        }
-    }
+    vlan = input_vid_to_vlan(in_xbundle, vid);
 
     if (!mirrors) {
         return;
@@ -234,31 +805,43 @@ add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
     ctx->xin->flow = *orig_flow;
 
     while (mirrors) {
-        struct ofmirror *m;
-
-        m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
-
-        if (m->vlans) {
+        mirror_mask_t dup_mirrors;
+        struct ofbundle *out;
+        unsigned long *vlans;
+        bool vlan_mirrored;
+        bool has_mirror;
+        int out_vlan;
+
+        has_mirror = mirror_get(xbridge->mbridge, mirror_mask_ffs(mirrors) - 1,
+                                &vlans, &dup_mirrors, &out, &out_vlan);
+        ovs_assert(has_mirror);
+
+        if (vlans) {
             ctx->xout->wc.masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK);
         }
+        vlan_mirrored = !vlans || bitmap_is_set(vlans, vlan);
+        free(vlans);
 
-        if (!vlan_is_mirrored(m, vlan)) {
+        if (!vlan_mirrored) {
             mirrors = zero_rightmost_1bit(mirrors);
             continue;
         }
 
-        mirrors &= ~m->dup_mirrors;
-        ctx->xout->mirrors |= m->dup_mirrors;
-        if (m->out) {
-            output_normal(ctx, m->out, vlan);
-        } else if (vlan != m->out_vlan
+        mirrors &= ~dup_mirrors;
+        ctx->xout->mirrors |= dup_mirrors;
+        if (out) {
+            struct xbundle *out_xbundle = xbundle_lookup(out);
+            if (out_xbundle) {
+                output_normal(ctx, out_xbundle, vlan);
+            }
+        } else if (vlan != out_vlan
                    && !eth_addr_is_reserved(orig_flow->dl_dst)) {
-            struct ofbundle *bundle;
+            struct xbundle *xbundle;
 
-            HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
-                if (ofbundle_includes_vlan(bundle, m->out_vlan)
-                    && !bundle->mirror_out) {
-                    output_normal(ctx, bundle, m->out_vlan);
+            LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
+                if (xbundle_includes_vlan(xbundle, out_vlan)
+                    && !xbundle_mirror_out(xbridge, xbundle)) {
+                    output_normal(ctx, xbundle, out_vlan);
                 }
             }
         }
@@ -266,17 +849,17 @@ add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
 }
 
 /* Given 'vid', the VID obtained from the 802.1Q header that was received as
- * part of a packet (specify 0 if there was no 802.1Q header), and 'in_bundle',
+ * part of a packet (specify 0 if there was no 802.1Q header), and 'in_xbundle',
  * the bundle on which the packet was received, returns the VLAN to which the
  * packet belongs.
  *
  * Both 'vid' and the return value are in the range 0...4095. */
 static uint16_t
-input_vid_to_vlan(const struct ofbundle *in_bundle, uint16_t vid)
+input_vid_to_vlan(const struct xbundle *in_xbundle, uint16_t vid)
 {
-    switch (in_bundle->vlan_mode) {
+    switch (in_xbundle->vlan_mode) {
     case PORT_VLAN_ACCESS:
-        return in_bundle->vlan;
+        return in_xbundle->vlan;
         break;
 
     case PORT_VLAN_TRUNK:
@@ -284,14 +867,14 @@ input_vid_to_vlan(const struct ofbundle *in_bundle, uint16_t vid)
 
     case PORT_VLAN_NATIVE_UNTAGGED:
     case PORT_VLAN_NATIVE_TAGGED:
-        return vid ? vid : in_bundle->vlan;
+        return vid ? vid : in_xbundle->vlan;
 
     default:
         NOT_REACHED();
     }
 }
 
-/* Checks whether a packet with the given 'vid' may ingress on 'in_bundle'.
+/* Checks whether a packet with the given 'vid' may ingress on 'in_xbundle'.
  * If so, returns true.  Otherwise, returns false and, if 'warn' is true, logs
  * a warning.
  *
@@ -299,23 +882,22 @@ input_vid_to_vlan(const struct ofbundle *in_bundle, uint16_t vid)
  * part of a packet (specify 0 if there was no 802.1Q header), in the range
  * 0...4095. */
 static bool
-input_vid_is_valid(uint16_t vid, struct ofbundle *in_bundle, bool warn)
+input_vid_is_valid(uint16_t vid, struct xbundle *in_xbundle, bool warn)
 {
     /* Allow any VID on the OFPP_NONE port. */
-    if (in_bundle == &ofpp_none_bundle) {
+    if (in_xbundle == &ofpp_none_bundle) {
         return true;
     }
 
-    switch (in_bundle->vlan_mode) {
+    switch (in_xbundle->vlan_mode) {
     case PORT_VLAN_ACCESS:
         if (vid) {
             if (warn) {
                 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
-                VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" tagged "
+                VLOG_WARN_RL(&rl, "dropping VLAN %"PRIu16" tagged "
                              "packet received on port %s configured as VLAN "
-                             "%"PRIu16" access port",
-                             in_bundle->ofproto->up.name, vid,
-                             in_bundle->name, in_bundle->vlan);
+                             "%"PRIu16" access port", vid, in_xbundle->name,
+                             in_xbundle->vlan);
             }
             return false;
         }
@@ -329,14 +911,12 @@ input_vid_is_valid(uint16_t vid, struct ofbundle *in_bundle, bool warn)
         }
         /* Fall through. */
     case PORT_VLAN_TRUNK:
-        if (!ofbundle_includes_vlan(in_bundle, vid)) {
+        if (!xbundle_includes_vlan(in_xbundle, vid)) {
             if (warn) {
                 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
-                VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" packet "
+                VLOG_WARN_RL(&rl, "dropping VLAN %"PRIu16" packet "
                              "received on port %s not configured for trunking "
-                             "VLAN %"PRIu16,
-                             in_bundle->ofproto->up.name, vid,
-                             in_bundle->name, vid);
+                             "VLAN %"PRIu16, vid, in_xbundle->name, vid);
             }
             return false;
         }
@@ -349,16 +929,16 @@ input_vid_is_valid(uint16_t vid, struct ofbundle *in_bundle, bool warn)
 }
 
 /* Given 'vlan', the VLAN that a packet belongs to, and
- * 'out_bundle', a bundle on which the packet is to be output, returns the VID
+ * 'out_xbundle', a bundle on which the packet is to be output, returns the VID
  * that should be included in the 802.1Q header.  (If the return value is 0,
  * then the 802.1Q header should only be included in the packet if there is a
  * nonzero PCP.)
  *
  * Both 'vlan' and the return value are in the range 0...4095. */
 static uint16_t
-output_vlan_to_vid(const struct ofbundle *out_bundle, uint16_t vlan)
+output_vlan_to_vid(const struct xbundle *out_xbundle, uint16_t vlan)
 {
-    switch (out_bundle->vlan_mode) {
+    switch (out_xbundle->vlan_mode) {
     case PORT_VLAN_ACCESS:
         return 0;
 
@@ -367,7 +947,7 @@ output_vlan_to_vid(const struct ofbundle *out_bundle, uint16_t vlan)
         return vlan;
 
     case PORT_VLAN_NATIVE_UNTAGGED:
-        return vlan == out_bundle->vlan ? 0 : vlan;
+        return vlan == out_xbundle->vlan ? 0 : vlan;
 
     default:
         NOT_REACHED();
@@ -375,21 +955,29 @@ output_vlan_to_vid(const struct ofbundle *out_bundle, uint16_t vlan)
 }
 
 static void
-output_normal(struct xlate_ctx *ctx, const struct ofbundle *out_bundle,
+output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
               uint16_t vlan)
 {
     ovs_be16 *flow_tci = &ctx->xin->flow.vlan_tci;
-    struct ofport_dpif *port;
     uint16_t vid;
     ovs_be16 tci, old_tci;
+    struct xport *xport;
 
-    vid = output_vlan_to_vid(out_bundle, vlan);
-    if (!out_bundle->bond) {
-        port = ofbundle_get_a_port(out_bundle);
+    vid = output_vlan_to_vid(out_xbundle, vlan);
+    if (list_is_empty(&out_xbundle->xports)) {
+        /* Partially configured bundle with no slaves.  Drop the packet. */
+        return;
+    } else if (!out_xbundle->bond) {
+        xport = CONTAINER_OF(list_front(&out_xbundle->xports), struct xport,
+                             bundle_node);
     } else {
-        port = bond_choose_output_slave(out_bundle->bond, &ctx->xin->flow,
-                                        &ctx->xout->wc, vid, &ctx->xout->tags);
-        if (!port) {
+        struct ofport_dpif *ofport;
+
+        ofport = bond_choose_output_slave(out_xbundle->bond, &ctx->xin->flow,
+                                          &ctx->xout->wc, vid);
+        xport = xport_lookup(ofport);
+
+        if (!xport) {
             /* No slaves enabled, so drop packet. */
             return;
         }
@@ -397,7 +985,7 @@ output_normal(struct xlate_ctx *ctx, const struct ofbundle *out_bundle,
 
     old_tci = *flow_tci;
     tci = htons(vid);
-    if (tci || out_bundle->use_priority_tags) {
+    if (tci || out_xbundle->use_priority_tags) {
         tci |= *flow_tci & htons(VLAN_PCP_MASK);
         if (tci) {
             tci |= htons(VLAN_CFI);
@@ -405,7 +993,7 @@ output_normal(struct xlate_ctx *ctx, const struct ofbundle *out_bundle,
     }
     *flow_tci = tci;
 
-    compose_output_action(ctx, port->up.ofp_port);
+    compose_output_action(ctx, xport->ofp_port);
     *flow_tci = old_tci;
 }
 
@@ -438,51 +1026,54 @@ is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
 }
 
 static void
-update_learning_table(struct ofproto_dpif *ofproto,
+update_learning_table(const struct xbridge *xbridge,
                       const struct flow *flow, struct flow_wildcards *wc,
-                      int vlan, struct ofbundle *in_bundle)
+                      int vlan, struct xbundle *in_xbundle)
 {
     struct mac_entry *mac;
 
     /* Don't learn the OFPP_NONE port. */
-    if (in_bundle == &ofpp_none_bundle) {
+    if (in_xbundle == &ofpp_none_bundle) {
         return;
     }
 
-    if (!mac_learning_may_learn(ofproto->ml, flow->dl_src, vlan)) {
-        return;
+    ovs_rwlock_wrlock(&xbridge->ml->rwlock);
+    if (!mac_learning_may_learn(xbridge->ml, flow->dl_src, vlan)) {
+        goto out;
     }
 
-    mac = mac_learning_insert(ofproto->ml, flow->dl_src, vlan);
+    mac = mac_learning_insert(xbridge->ml, flow->dl_src, vlan);
     if (is_gratuitous_arp(flow, wc)) {
         /* We don't want to learn from gratuitous ARP packets that are
          * reflected back over bond slaves so we lock the learning table. */
-        if (!in_bundle->bond) {
+        if (!in_xbundle->bond) {
             mac_entry_set_grat_arp_lock(mac);
         } else if (mac_entry_is_grat_arp_locked(mac)) {
-            return;
+            goto out;
         }
     }
 
-    if (mac_entry_is_new(mac) || mac->port.p != in_bundle) {
+    if (mac->port.p != in_xbundle->ofbundle) {
         /* The log messages here could actually be useful in debugging,
          * so keep the rate limit relatively high. */
         static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
         VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
                     "on port %s in VLAN %d",
-                    ofproto->up.name, ETH_ADDR_ARGS(flow->dl_src),
-                    in_bundle->name, vlan);
+                    xbridge->name, ETH_ADDR_ARGS(flow->dl_src),
+                    in_xbundle->name, vlan);
 
-        mac->port.p = in_bundle;
-        mac_learning_changed(ofproto->ml, mac);
+        mac->port.p = in_xbundle->ofbundle;
+        mac_learning_changed(xbridge->ml);
     }
+out:
+    ovs_rwlock_unlock(&xbridge->ml->rwlock);
 }
 
-/* Determines whether packets in 'flow' within 'ofproto' should be forwarded or
+/* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
  * dropped.  Returns true if they may be forwarded, false if they should be
  * dropped.
  *
- * 'in_port' must be the ofport_dpif that corresponds to flow->in_port.
+ * 'in_port' must be the xport that corresponds to flow->in_port.
  * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
  *
  * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
@@ -493,25 +1084,25 @@ update_learning_table(struct ofproto_dpif *ofproto,
  * so in one special case.
  */
 static bool
-is_admissible(struct xlate_ctx *ctx, struct ofport_dpif *in_port,
+is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
               uint16_t vlan)
 {
-    struct ofproto_dpif *ofproto = ctx->ofproto;
+    struct xbundle *in_xbundle = in_port->xbundle;
+    const struct xbridge *xbridge = ctx->xbridge;
     struct flow *flow = &ctx->xin->flow;
-    struct ofbundle *in_bundle = in_port->bundle;
 
     /* Drop frames for reserved multicast addresses
      * only if forward_bpdu option is absent. */
-    if (!ofproto->up.forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
+    if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
         xlate_report(ctx, "packet has reserved destination MAC, dropping");
         return false;
     }
 
-    if (in_bundle->bond) {
+    if (in_xbundle->bond) {
         struct mac_entry *mac;
 
-        switch (bond_check_admissibility(in_bundle->bond, in_port,
-                                         flow->dl_dst, &ctx->xout->tags)) {
+        switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
+                                         flow->dl_dst)) {
         case BV_ACCEPT:
             break;
 
@@ -520,14 +1111,17 @@ is_admissible(struct xlate_ctx *ctx, struct ofport_dpif *in_port,
             return false;
 
         case BV_DROP_IF_MOVED:
-            mac = mac_learning_lookup(ofproto->ml, flow->dl_src, vlan, NULL);
-            if (mac && mac->port.p != in_bundle &&
+            ovs_rwlock_rdlock(&xbridge->ml->rwlock);
+            mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
+            if (mac && mac->port.p != in_xbundle->ofbundle &&
                 (!is_gratuitous_arp(flow, &ctx->xout->wc)
                  || mac_entry_is_grat_arp_locked(mac))) {
+                ovs_rwlock_unlock(&xbridge->ml->rwlock);
                 xlate_report(ctx, "SLB bond thinks this packet looped back, "
                             "dropping");
                 return false;
             }
+            ovs_rwlock_unlock(&xbridge->ml->rwlock);
             break;
         }
     }
@@ -540,22 +1134,21 @@ xlate_normal(struct xlate_ctx *ctx)
 {
     struct flow_wildcards *wc = &ctx->xout->wc;
     struct flow *flow = &ctx->xin->flow;
-    struct ofport_dpif *in_port;
-    struct ofbundle *in_bundle;
+    struct xbundle *in_xbundle;
+    struct xport *in_port;
     struct mac_entry *mac;
     uint16_t vlan;
     uint16_t vid;
 
     ctx->xout->has_normal = true;
 
-    /* Check the dl_type, since we may check for gratuituous ARP. */
     memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
     memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
     wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
 
-    in_bundle = lookup_input_bundle(ctx->ofproto, flow->in_port.ofp_port,
-                                    ctx->xin->packet != NULL, &in_port);
-    if (!in_bundle) {
+    in_xbundle = lookup_input_bundle(ctx->xbridge, flow->in_port.ofp_port,
+                                     ctx->xin->packet != NULL, &in_port);
+    if (!in_xbundle) {
         xlate_report(ctx, "no input bundle, dropping");
         return;
     }
@@ -567,19 +1160,19 @@ xlate_normal(struct xlate_ctx *ctx)
             static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
             VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial "
                          "VLAN tag received on port %s",
-                         ctx->ofproto->up.name, in_bundle->name);
+                         ctx->xbridge->name, in_xbundle->name);
         }
         xlate_report(ctx, "partial VLAN tag, dropping");
         return;
     }
 
     /* Drop frames on bundles reserved for mirroring. */
-    if (in_bundle->mirror_out) {
+    if (xbundle_mirror_out(ctx->xbridge, in_xbundle)) {
         if (ctx->xin->packet != NULL) {
             static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
             VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
                          "%s, which is reserved exclusively for mirroring",
-                         ctx->ofproto->up.name, in_bundle->name);
+                         ctx->xbridge->name, in_xbundle->name);
         }
         xlate_report(ctx, "input port is mirror output port, dropping");
         return;
@@ -587,11 +1180,11 @@ xlate_normal(struct xlate_ctx *ctx)
 
     /* Check VLAN. */
     vid = vlan_tci_to_vid(flow->vlan_tci);
-    if (!input_vid_is_valid(vid, in_bundle, ctx->xin->packet != NULL)) {
+    if (!input_vid_is_valid(vid, in_xbundle, ctx->xin->packet != NULL)) {
         xlate_report(ctx, "disallowed VLAN VID for this input port, dropping");
         return;
     }
-    vlan = input_vid_to_vlan(in_bundle, vid);
+    vlan = input_vid_to_vlan(in_xbundle, vid);
 
     /* Check other admissibility requirements. */
     if (in_port && !is_admissible(ctx, in_port, vlan)) {
@@ -600,33 +1193,37 @@ xlate_normal(struct xlate_ctx *ctx)
 
     /* Learn source MAC. */
     if (ctx->xin->may_learn) {
-        update_learning_table(ctx->ofproto, flow, wc, vlan, in_bundle);
+        update_learning_table(ctx->xbridge, flow, wc, vlan, in_xbundle);
     }
 
     /* Determine output bundle. */
-    mac = mac_learning_lookup(ctx->ofproto->ml, flow->dl_dst, vlan,
-                              &ctx->xout->tags);
+    ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
+    mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
     if (mac) {
-        if (mac->port.p != in_bundle) {
+        struct xbundle *mac_xbundle = xbundle_lookup(mac->port.p);
+        if (mac_xbundle && mac_xbundle != in_xbundle) {
             xlate_report(ctx, "forwarding to learned port");
-            output_normal(ctx, mac->port.p, vlan);
+            output_normal(ctx, mac_xbundle, vlan);
+        } else if (!mac_xbundle) {
+            xlate_report(ctx, "learned port is unknown, dropping");
         } else {
             xlate_report(ctx, "learned port is input port, dropping");
         }
     } else {
-        struct ofbundle *bundle;
+        struct xbundle *xbundle;
 
         xlate_report(ctx, "no learned MAC for destination, flooding");
-        HMAP_FOR_EACH (bundle, hmap_node, &ctx->ofproto->bundles) {
-            if (bundle != in_bundle
-                && ofbundle_includes_vlan(bundle, vlan)
-                && bundle->floodable
-                && !bundle->mirror_out) {
-                output_normal(ctx, bundle, vlan);
+        LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
+            if (xbundle != in_xbundle
+                && xbundle_includes_vlan(xbundle, vlan)
+                && xbundle->floodable
+                && !xbundle_mirror_out(ctx->xbridge, xbundle)) {
+                output_normal(ctx, xbundle, vlan);
             }
         }
         ctx->xout->nf_output_iface = NF_OUT_FLOOD;
     }
+    ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
 }
 
 /* Compose SAMPLE action for sFlow or IPFIX.  The given probability is
@@ -634,7 +1231,7 @@ xlate_normal(struct xlate_ctx *ctx)
  * cookie is passed back in the callback for each sampled packet.
  */
 static size_t
-compose_sample_action(const struct ofproto_dpif *ofproto,
+compose_sample_action(const struct xbridge *xbridge,
                       struct ofpbuf *odp_actions,
                       const struct flow *flow,
                       const uint32_t probability,
@@ -642,15 +1239,19 @@ compose_sample_action(const struct ofproto_dpif *ofproto,
                       const size_t cookie_size)
 {
     size_t sample_offset, actions_offset;
+    odp_port_t odp_port;
     int cookie_offset;
+    uint32_t pid;
 
     sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
 
     nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
 
     actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
-    cookie_offset = put_userspace_action(ofproto, odp_actions, flow, cookie,
-                                         cookie_size);
+
+    odp_port = ofp_port_to_odp_port(xbridge, flow->in_port.ofp_port);
+    pid = dpif_port_get_pid(xbridge->dpif, odp_port);
+    cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size, odp_actions);
 
     nl_msg_end_nested(odp_actions, actions_offset);
     nl_msg_end_nested(odp_actions, sample_offset);
@@ -658,9 +1259,9 @@ compose_sample_action(const struct ofproto_dpif *ofproto,
 }
 
 static void
-compose_sflow_cookie(const struct ofproto_dpif *ofproto,
-                     ovs_be16 vlan_tci, odp_port_t odp_port,
-                     unsigned int n_outputs, union user_action_cookie *cookie)
+compose_sflow_cookie(const struct xbridge *xbridge, ovs_be16 vlan_tci,
+                     odp_port_t odp_port, unsigned int n_outputs,
+                     union user_action_cookie *cookie)
 {
     int ifindex;
 
@@ -676,7 +1277,7 @@ compose_sflow_cookie(const struct ofproto_dpif *ofproto,
         break;
 
     case 1:
-        ifindex = dpif_sflow_odp_port_to_ifindex(ofproto->sflow, odp_port);
+        ifindex = dpif_sflow_odp_port_to_ifindex(xbridge->sflow, odp_port);
         if (ifindex) {
             cookie->sflow.output = ifindex;
             break;
@@ -691,7 +1292,7 @@ compose_sflow_cookie(const struct ofproto_dpif *ofproto,
 
 /* Compose SAMPLE action for sFlow bridge sampling. */
 static size_t
-compose_sflow_action(const struct ofproto_dpif *ofproto,
+compose_sflow_action(const struct xbridge *xbridge,
                      struct ofpbuf *odp_actions,
                      const struct flow *flow,
                      odp_port_t odp_port)
@@ -699,15 +1300,15 @@ compose_sflow_action(const struct ofproto_dpif *ofproto,
     uint32_t probability;
     union user_action_cookie cookie;
 
-    if (!ofproto->sflow || flow->in_port.ofp_port == OFPP_NONE) {
+    if (!xbridge->sflow || flow->in_port.ofp_port == OFPP_NONE) {
         return 0;
     }
 
-    probability = dpif_sflow_get_probability(ofproto->sflow);
-    compose_sflow_cookie(ofproto, htons(0), odp_port,
+    probability = dpif_sflow_get_probability(xbridge->sflow);
+    compose_sflow_cookie(xbridge, htons(0), odp_port,
                          odp_port == ODPP_NONE ? 0 : 1, &cookie);
 
-    return compose_sample_action(ofproto, odp_actions, flow,  probability,
+    return compose_sample_action(xbridge, odp_actions, flow,  probability,
                                  &cookie, sizeof cookie.sflow);
 }
 
@@ -731,21 +1332,21 @@ compose_ipfix_cookie(union user_action_cookie *cookie)
 
 /* Compose SAMPLE action for IPFIX bridge sampling. */
 static void
-compose_ipfix_action(const struct ofproto_dpif *ofproto,
+compose_ipfix_action(const struct xbridge *xbridge,
                      struct ofpbuf *odp_actions,
                      const struct flow *flow)
 {
     uint32_t probability;
     union user_action_cookie cookie;
 
-    if (!ofproto->ipfix || flow->in_port.ofp_port == OFPP_NONE) {
+    if (!xbridge->ipfix || flow->in_port.ofp_port == OFPP_NONE) {
         return;
     }
 
-    probability = dpif_ipfix_get_bridge_exporter_probability(ofproto->ipfix);
+    probability = dpif_ipfix_get_bridge_exporter_probability(xbridge->ipfix);
     compose_ipfix_cookie(&cookie);
 
-    compose_sample_action(ofproto, odp_actions, flow,  probability,
+    compose_sample_action(xbridge, odp_actions, flow,  probability,
                           &cookie, sizeof cookie.ipfix);
 }
 
@@ -755,7 +1356,7 @@ compose_ipfix_action(const struct ofproto_dpif *ofproto,
 static void
 add_sflow_action(struct xlate_ctx *ctx)
 {
-    ctx->user_cookie_offset = compose_sflow_action(ctx->ofproto,
+    ctx->user_cookie_offset = compose_sflow_action(ctx->xbridge,
                                                    &ctx->xout->odp_actions,
                                                    &ctx->xin->flow, ODPP_NONE);
     ctx->sflow_odp_port = 0;
@@ -767,7 +1368,7 @@ add_sflow_action(struct xlate_ctx *ctx)
 static void
 add_ipfix_action(struct xlate_ctx *ctx)
 {
-    compose_ipfix_action(ctx->ofproto, &ctx->xout->odp_actions,
+    compose_ipfix_action(ctx->xbridge, &ctx->xout->odp_actions,
                          &ctx->xin->flow);
 }
 
@@ -788,38 +1389,38 @@ fix_sflow_action(struct xlate_ctx *ctx)
                        sizeof cookie->sflow);
     ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
 
-    compose_sflow_cookie(ctx->ofproto, base->vlan_tci,
+    compose_sflow_cookie(ctx->xbridge, base->vlan_tci,
                          ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
 }
 
 static enum slow_path_reason
 process_special(struct xlate_ctx *ctx, const struct flow *flow,
-                const struct ofport_dpif *ofport, const struct ofpbuf *packet)
+                const struct xport *xport, const struct ofpbuf *packet)
 {
-    struct ofproto_dpif *ofproto = ctx->ofproto;
     struct flow_wildcards *wc = &ctx->xout->wc;
+    const struct xbridge *xbridge = ctx->xbridge;
 
-    if (!ofport) {
+    if (!xport) {
         return 0;
-    } else if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow, wc)) {
+    } else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
         if (packet) {
-            cfm_process_heartbeat(ofport->cfm, packet);
+            cfm_process_heartbeat(xport->cfm, packet);
         }
         return SLOW_CFM;
-    } else if (ofport->bfd && bfd_should_process_flow(flow, wc)) {
+    } else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
         if (packet) {
-            bfd_process_packet(ofport->bfd, flow, packet);
+            bfd_process_packet(xport->bfd, flow, packet);
         }
         return SLOW_BFD;
-    } else if (ofport->bundle && ofport->bundle->lacp
+    } else if (xport->xbundle && xport->xbundle->lacp
                && flow->dl_type == htons(ETH_TYPE_LACP)) {
         if (packet) {
-            lacp_process_packet(ofport->bundle->lacp, ofport, packet);
+            lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
         }
         return SLOW_LACP;
-    } else if (ofproto->stp && stp_should_process_flow(flow, wc)) {
+    } else if (xbridge->stp && stp_should_process_flow(flow, wc)) {
         if (packet) {
-            stp_process_packet(ofport, packet);
+            stp_process_packet(xport, packet);
         }
         return SLOW_STP;
     } else {
@@ -831,7 +1432,7 @@ static void
 compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
                         bool check_stp)
 {
-    const struct ofport_dpif *ofport = get_ofp_port(ctx->ofproto, ofp_port);
+    const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
     struct flow_wildcards *wc = &ctx->xout->wc;
     struct flow *flow = &ctx->xin->flow;
     ovs_be16 flow_vlan_tci;
@@ -844,24 +1445,29 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
      * before traversing a patch port. */
     BUILD_ASSERT_DECL(FLOW_WC_SEQ == 20);
 
-    if (!ofport) {
+    if (!xport) {
         xlate_report(ctx, "Nonexistent output port");
         return;
-    } else if (ofport->up.pp.config & OFPUTIL_PC_NO_FWD) {
+    } else if (xport->config & OFPUTIL_PC_NO_FWD) {
         xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
         return;
-    } else if (check_stp && !stp_forward_in_state(ofport->stp_state)) {
+    } else if (check_stp && !xport_stp_forward_state(xport)) {
         xlate_report(ctx, "STP not in forwarding state, skipping output");
         return;
     }
 
-    if (ofport->peer) {
-        struct ofport_dpif *peer = ofport->peer;
+    if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
+        ctx->xout->mirrors |= xbundle_mirror_dst(xport->xbundle->xbridge,
+                                                 xport->xbundle);
+    }
+
+    if (xport->peer) {
+        const struct xport *peer = xport->peer;
         struct flow old_flow = ctx->xin->flow;
         enum slow_path_reason special;
 
-        ctx->ofproto = ofproto_dpif_cast(peer->up.ofproto);
-        flow->in_port.ofp_port = peer->up.ofp_port;
+        ctx->xbridge = peer->xbridge;
+        flow->in_port.ofp_port = peer->ofp_port;
         flow->metadata = htonll(0);
         memset(&flow->tunnel, 0, sizeof flow->tunnel);
         memset(flow->regs, 0, sizeof flow->regs);
@@ -871,25 +1477,27 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
         if (special) {
             ctx->xout->slow = special;
         } else if (may_receive(peer, ctx)) {
-            if (stp_forward_in_state(peer->stp_state)) {
+            if (xport_stp_forward_state(peer)) {
                 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true);
             } else {
                 /* Forwarding is disabled by STP.  Let OFPP_NORMAL and the
                  * learning action look at the packet, then drop it. */
                 struct flow old_base_flow = ctx->base_flow;
                 size_t old_size = ctx->xout->odp_actions.size;
+                mirror_mask_t old_mirrors = ctx->xout->mirrors;
                 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true);
+                ctx->xout->mirrors = old_mirrors;
                 ctx->base_flow = old_base_flow;
                 ctx->xout->odp_actions.size = old_size;
             }
         }
 
         ctx->xin->flow = old_flow;
-        ctx->ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+        ctx->xbridge = xport->xbridge;
 
         if (ctx->xin->resubmit_stats) {
-            netdev_vport_inc_tx(ofport->up.netdev, ctx->xin->resubmit_stats);
-            netdev_vport_inc_rx(peer->up.netdev, ctx->xin->resubmit_stats);
+            netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
+            netdev_vport_inc_rx(peer->netdev, ctx->xin->resubmit_stats);
         }
 
         return;
@@ -899,19 +1507,19 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
     flow_skb_mark = flow->skb_mark;
     flow_nw_tos = flow->nw_tos;
 
-    if (ofproto_dpif_dscp_from_priority(ofport, flow->skb_priority, &dscp)) {
+    if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
         wc->masks.nw_tos |= IP_ECN_MASK;
         flow->nw_tos &= ~IP_DSCP_MASK;
         flow->nw_tos |= dscp;
     }
 
-    if (ofport->tnl_port) {
+    if (xport->is_tunnel) {
          /* Save tunnel metadata so that changes made due to
           * the Logical (tunnel) Port are not visible for any further
           * matches, while explicit set actions on tunnel metadata are.
           */
         struct flow_tnl flow_tnl = flow->tunnel;
-        odp_port = tnl_port_send(ofport->tnl_port, flow, &ctx->xout->wc);
+        odp_port = tnl_port_send(xport->ofport, flow, &ctx->xout->wc);
         if (odp_port == ODPP_NONE) {
             xlate_report(ctx, "Tunneling decided against output");
             goto out; /* restore flow_nw_tos */
@@ -921,7 +1529,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
             goto out; /* restore flow_nw_tos */
         }
         if (ctx->xin->resubmit_stats) {
-            netdev_vport_inc_tx(ofport->up.netdev, ctx->xin->resubmit_stats);
+            netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
         }
         out_port = odp_port;
         commit_odp_tunnel_action(flow, &ctx->base_flow,
@@ -930,16 +1538,16 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
     } else {
         ofp_port_t vlandev_port;
 
-        odp_port = ofport->odp_port;
-        if (!hmap_is_empty(&ctx->ofproto->realdev_vid_map)) {
+        odp_port = xport->odp_port;
+        if (ofproto_has_vlan_splinters(ctx->xbridge->ofproto)) {
             wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
         }
-        vlandev_port = vsp_realdev_to_vlandev(ctx->ofproto, ofp_port,
+        vlandev_port = vsp_realdev_to_vlandev(ctx->xbridge->ofproto, ofp_port,
                                               flow->vlan_tci);
         if (vlandev_port == ofp_port) {
             out_port = odp_port;
         } else {
-            out_port = ofp_port_to_odp_port(ctx->ofproto, vlandev_port);
+            out_port = ofp_port_to_odp_port(ctx->xbridge, vlandev_port);
             flow->vlan_tci = htons(0);
         }
         flow->skb_mark &= ~IPSEC_MARK;
@@ -969,24 +1577,6 @@ compose_output_action(struct xlate_ctx *ctx, ofp_port_t ofp_port)
     compose_output_action__(ctx, ofp_port, true);
 }
 
-static void
-tag_the_flow(struct xlate_ctx *ctx, struct rule_dpif *rule)
-{
-    struct ofproto_dpif *ofproto = ctx->ofproto;
-    uint8_t table_id = ctx->table_id;
-
-    if (table_id > 0 && table_id < N_TABLES) {
-        struct table_dpif *table = &ofproto->tables[table_id];
-        if (table->other_table) {
-            ctx->xout->tags |= (rule && rule->tag
-                                ? rule->tag
-                                : rule_calculate_tag(&ctx->xin->flow,
-                                                     &table->other_table->mask,
-                                                     table->basis));
-        }
-    }
-}
-
 /* Common rule processing in one place to avoid duplicating code. */
 static struct rule_dpif *
 ctx_rule_hooks(struct xlate_ctx *ctx, struct rule_dpif *rule,
@@ -1003,7 +1593,7 @@ ctx_rule_hooks(struct xlate_ctx *ctx, struct rule_dpif *rule,
          * OFPTC_TABLE_MISS_DROP
          * When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do?
          */
-        rule = rule_dpif_miss_rule(ctx->ofproto, &ctx->xin->flow);
+        rule = rule_dpif_miss_rule(ctx->xbridge->ofproto, &ctx->xin->flow);
     }
     if (rule && ctx->xin->resubmit_stats) {
         rule_credit_stats(rule, ctx->xin->resubmit_stats);
@@ -1024,10 +1614,9 @@ xlate_table_action(struct xlate_ctx *ctx,
 
         /* Look up a flow with 'in_port' as the input port. */
         ctx->xin->flow.in_port.ofp_port = in_port;
-        rule = rule_dpif_lookup_in_table(ctx->ofproto, &ctx->xin->flow,
-                                         &ctx->xout->wc, table_id);
-
-        tag_the_flow(ctx, rule);
+        rule = rule_dpif_lookup_in_table(ctx->xbridge->ofproto,
+                                         &ctx->xin->flow, &ctx->xout->wc,
+                                         table_id);
 
         /* Restore the original input port.  Otherwise OFPP_NORMAL and
          * OFPP_IN_PORT will have surprising behavior. */
@@ -1051,7 +1640,6 @@ xlate_table_action(struct xlate_ctx *ctx,
 
         VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times",
                     MAX_RESUBMIT_RECURSION);
-        ctx->max_resubmit_trigger = true;
     }
 }
 
@@ -1078,19 +1666,17 @@ xlate_ofpact_resubmit(struct xlate_ctx *ctx,
 static void
 flood_packets(struct xlate_ctx *ctx, bool all)
 {
-    struct ofport_dpif *ofport;
+    const struct xport *xport;
 
-    HMAP_FOR_EACH (ofport, up.hmap_node, &ctx->ofproto->up.ports) {
-        ofp_port_t ofp_port = ofport->up.ofp_port;
-
-        if (ofp_port == ctx->xin->flow.in_port.ofp_port) {
+    HMAP_FOR_EACH (xport, ofp_node, &ctx->xbridge->xports) {
+        if (xport->ofp_port == ctx->xin->flow.in_port.ofp_port) {
             continue;
         }
 
         if (all) {
-            compose_output_action__(ctx, ofp_port, false);
-        } else if (!(ofport->up.pp.config & OFPUTIL_PC_NO_FLOOD)) {
-            compose_output_action(ctx, ofp_port);
+            compose_output_action__(ctx, xport->ofp_port, false);
+        } else if (!(xport->config & OFPUTIL_PC_NO_FLOOD)) {
+            compose_output_action(ctx, xport->ofp_port);
         }
     }
 
@@ -1102,7 +1688,7 @@ execute_controller_action(struct xlate_ctx *ctx, int len,
                           enum ofp_packet_in_reason reason,
                           uint16_t controller_id)
 {
-    struct ofputil_packet_in pin;
+    struct ofputil_packet_in *pin;
     struct ofpbuf *packet;
     struct flow key;
 
@@ -1124,17 +1710,18 @@ execute_controller_action(struct xlate_ctx *ctx, int len,
     odp_execute_actions(NULL, packet, &key, ctx->xout->odp_actions.data,
                         ctx->xout->odp_actions.size, NULL, NULL);
 
-    pin.packet = packet->data;
-    pin.packet_len = packet->size;
-    pin.reason = reason;
-    pin.controller_id = controller_id;
-    pin.table_id = ctx->table_id;
-    pin.cookie = ctx->rule ? ctx->rule->up.flow_cookie : 0;
+    pin = xmalloc(sizeof *pin);
+    pin->packet_len = packet->size;
+    pin->packet = ofpbuf_steal_data(packet);
+    pin->reason = reason;
+    pin->controller_id = controller_id;
+    pin->table_id = ctx->table_id;
+    pin->cookie = ctx->rule ? ctx->rule->up.flow_cookie : 0;
 
-    pin.send_len = len;
-    flow_get_metadata(&ctx->xin->flow, &pin.fmd);
+    pin->send_len = len;
+    flow_get_metadata(&ctx->xin->flow, &pin->fmd);
 
-    connmgr_send_packet_in(ctx->ofproto->up.connmgr, &pin);
+    ofproto_dpif_send_packet_in(ctx->xbridge->ofproto, pin);
     ofpbuf_delete(packet);
 }
 
@@ -1225,6 +1812,7 @@ compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
         return true;
     }
 
+    ctx->xout->wc.masks.mpls_lse |= htonl(MPLS_TTL_MASK);
     set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse, ttl);
     return false;
 }
@@ -1329,7 +1917,7 @@ xlate_enqueue_action(struct xlate_ctx *ctx,
     int error;
 
     /* Translate queue to priority. */
-    error = ofproto_dpif_queue_to_priority(ctx->ofproto, queue_id, &priority);
+    error = dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &priority);
     if (error) {
         /* Fall back to ordinary output action. */
         xlate_output_action(ctx, enqueue->port, 0, false);
@@ -1362,8 +1950,7 @@ xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
 {
     uint32_t skb_priority;
 
-    if (!ofproto_dpif_queue_to_priority(ctx->ofproto, queue_id,
-                                        &skb_priority)) {
+    if (!dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &skb_priority)) {
         ctx->xin->flow.skb_priority = skb_priority;
     } else {
         /* Couldn't translate queue to a priority.  Nothing to do.  A warning
@@ -1372,10 +1959,10 @@ xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
 }
 
 static bool
-slave_enabled_cb(ofp_port_t ofp_port, void *ofproto_)
+slave_enabled_cb(ofp_port_t ofp_port, void *xbridge_)
 {
-    struct ofproto_dpif *ofproto = ofproto_;
-    struct ofport_dpif *port;
+    const struct xbridge *xbridge = xbridge_;
+    struct xport *port;
 
     switch (ofp_port) {
     case OFPP_IN_PORT:
@@ -1388,7 +1975,7 @@ slave_enabled_cb(ofp_port_t ofp_port, void *ofproto_)
     case OFPP_CONTROLLER: /* Not supported by the bundle action. */
         return false;
     default:
-        port = get_ofp_port(ofproto, ofp_port);
+        port = get_ofp_port(xbridge, ofp_port);
         return port ? port->may_enable : false;
     }
 }
@@ -1400,9 +1987,11 @@ xlate_bundle_action(struct xlate_ctx *ctx,
     ofp_port_t port;
 
     port = bundle_execute(bundle, &ctx->xin->flow, &ctx->xout->wc,
-                          slave_enabled_cb, ctx->ofproto);
+                          slave_enabled_cb,
+                          CONST_CAST(struct xbridge *, ctx->xbridge));
     if (bundle->dst.field) {
-        nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow);
+        nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow,
+                     &ctx->xout->wc);
     } else {
         xlate_output_action(ctx, port, 0, false);
     }
@@ -1412,11 +2001,8 @@ static void
 xlate_learn_action(struct xlate_ctx *ctx,
                    const struct ofpact_learn *learn)
 {
-    static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
-    struct ofputil_flow_mod fm;
-    uint64_t ofpacts_stub[1024 / 8];
+    struct ofputil_flow_mod *fm;
     struct ofpbuf ofpacts;
-    int error;
 
     ctx->xout->has_learn = true;
 
@@ -1426,16 +2012,11 @@ xlate_learn_action(struct xlate_ctx *ctx,
         return;
     }
 
-    ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
-    learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
+    fm = xmalloc(sizeof *fm);
+    ofpbuf_init(&ofpacts, 0);
+    learn_execute(learn, &ctx->xin->flow, fm, &ofpacts);
 
-    error = ofproto_flow_mod(&ctx->ofproto->up, &fm);
-    if (error && !VLOG_DROP_WARN(&rl)) {
-        VLOG_WARN("learning action failed to modify flow table (%s)",
-                  ofperr_get_name(error));
-    }
-
-    ofpbuf_uninit(&ofpacts);
+    ofproto_dpif_flow_mod(ctx->xbridge->ofproto, fm);
 }
 
 /* Reduces '*timeout' to no more than 'max'.  A value of zero in either case
@@ -1455,8 +2036,14 @@ xlate_fin_timeout(struct xlate_ctx *ctx,
     if (ctx->xin->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
         struct rule_dpif *rule = ctx->rule;
 
-        reduce_timeout(oft->fin_idle_timeout, &rule->up.idle_timeout);
-        reduce_timeout(oft->fin_hard_timeout, &rule->up.hard_timeout);
+         if (list_is_empty(&rule->up.expirable)) {
+             list_insert(&rule->up.ofproto->expirable, &rule->up.expirable);
+         }
+
+         ovs_mutex_lock(&rule->up.timeout_mutex);
+         reduce_timeout(oft->fin_idle_timeout, &rule->up.idle_timeout);
+         reduce_timeout(oft->fin_hard_timeout, &rule->up.hard_timeout);
+         ovs_mutex_unlock(&rule->up.timeout_mutex);
     }
 }
 
@@ -1474,17 +2061,16 @@ xlate_sample_action(struct xlate_ctx *ctx,
 
   compose_flow_sample_cookie(os->probability, os->collector_set_id,
                              os->obs_domain_id, os->obs_point_id, &cookie);
-  compose_sample_action(ctx->ofproto, &ctx->xout->odp_actions, &ctx->xin->flow,
+  compose_sample_action(ctx->xbridge, &ctx->xout->odp_actions, &ctx->xin->flow,
                         probability, &cookie, sizeof cookie.flow_sample);
 }
 
 static bool
-may_receive(const struct ofport_dpif *port, struct xlate_ctx *ctx)
+may_receive(const struct xport *xport, struct xlate_ctx *ctx)
 {
-    if (port->up.pp.config & (eth_addr_equals(ctx->xin->flow.dl_dst,
-                                              eth_addr_stp)
-                              ? OFPUTIL_PC_NO_RECV_STP
-                              : OFPUTIL_PC_NO_RECV)) {
+    if (xport->config & (eth_addr_equals(ctx->xin->flow.dl_dst, eth_addr_stp)
+                         ? OFPUTIL_PC_NO_RECV_STP
+                         : OFPUTIL_PC_NO_RECV)) {
         return false;
     }
 
@@ -1492,8 +2078,7 @@ may_receive(const struct ofport_dpif *port, struct xlate_ctx *ctx)
      * disabled.  If just learning is enabled, we need to have
      * OFPP_NORMAL and the learning action have a look at the packet
      * before we can drop it. */
-    if (!stp_forward_in_state(port->stp_state)
-            && !stp_learn_in_state(port->stp_state)) {
+    if (!xport_stp_forward_state(xport) && !xport_stp_learn_state(xport)) {
         return false;
     }
 
@@ -1533,7 +2118,6 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
         ctx->rule->up.evictable = false;
     }
 
- do_xlate_actions_again:
     OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
         struct ofpact_controller *controller;
         const struct ofpact_metadata *metadata;
@@ -1560,12 +2144,14 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
             break;
 
         case OFPACT_SET_VLAN_VID:
+            wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
             flow->vlan_tci &= ~htons(VLAN_VID_MASK);
             flow->vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
                                | htons(VLAN_CFI));
             break;
 
         case OFPACT_SET_VLAN_PCP:
+            wc->masks.vlan_tci |= htons(VLAN_PCP_MASK | VLAN_CFI);
             flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
             flow->vlan_tci |=
                 htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp << VLAN_PCP_SHIFT)
@@ -1573,35 +2159,42 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
             break;
 
         case OFPACT_STRIP_VLAN:
+            memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
             flow->vlan_tci = htons(0);
             break;
 
         case OFPACT_PUSH_VLAN:
             /* XXX 802.1AD(QinQ) */
+            memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
             flow->vlan_tci = htons(VLAN_CFI);
             break;
 
         case OFPACT_SET_ETH_SRC:
+            memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
             memcpy(flow->dl_src, ofpact_get_SET_ETH_SRC(a)->mac, ETH_ADDR_LEN);
             break;
 
         case OFPACT_SET_ETH_DST:
+            memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
             memcpy(flow->dl_dst, ofpact_get_SET_ETH_DST(a)->mac, ETH_ADDR_LEN);
             break;
 
         case OFPACT_SET_IPV4_SRC:
+            memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
             if (flow->dl_type == htons(ETH_TYPE_IP)) {
                 flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
             }
             break;
 
         case OFPACT_SET_IPV4_DST:
+            memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
             if (flow->dl_type == htons(ETH_TYPE_IP)) {
                 flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
             }
             break;
 
         case OFPACT_SET_IPV4_DSCP:
+            wc->masks.nw_tos |= IP_DSCP_MASK;
             /* OpenFlow 1.0 only supports IPv4. */
             if (flow->dl_type == htons(ETH_TYPE_IP)) {
                 flow->nw_tos &= ~IP_DSCP_MASK;
@@ -1611,6 +2204,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
 
         case OFPACT_SET_L4_SRC_PORT:
             memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
+            memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
             if (is_ip_any(flow)) {
                 flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
             }
@@ -1618,6 +2212,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
 
         case OFPACT_SET_L4_DST_PORT:
             memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
+            memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
             if (is_ip_any(flow)) {
                 flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
             }
@@ -1653,7 +2248,8 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
             break;
 
         case OFPACT_STACK_POP:
-            nxm_execute_stack_pop(ofpact_get_STACK_POP(a), flow, &ctx->stack);
+            nxm_execute_stack_pop(ofpact_get_STACK_POP(a), flow, wc,
+                                  &ctx->stack);
             break;
 
         case OFPACT_PUSH_MPLS:
@@ -1678,6 +2274,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
             break;
 
         case OFPACT_DEC_TTL:
+            wc->masks.nw_ttl = 0xff;
             if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
                 goto out;
             }
@@ -1734,33 +2331,10 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
         case OFPACT_GOTO_TABLE: {
             /* It is assumed that goto-table is the last action. */
             struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
-            struct rule_dpif *rule;
 
             ovs_assert(ctx->table_id < ogt->table_id);
-
-            ctx->table_id = ogt->table_id;
-
-            /* Look up a flow from the new table. */
-            rule = rule_dpif_lookup_in_table(ctx->ofproto, flow, wc,
-                                             ctx->table_id);
-
-            tag_the_flow(ctx, rule);
-
-            rule = ctx_rule_hooks(ctx, rule, true);
-
-            if (rule) {
-                if (ctx->rule) {
-                    ctx->rule->up.evictable = was_evictable;
-                }
-                ctx->rule = rule;
-                was_evictable = rule->up.evictable;
-                rule->up.evictable = false;
-
-                /* Tail recursion removal. */
-                ofpacts = rule->up.ofpacts;
-                ofpacts_len = rule->up.ofpacts_len;
-                goto do_xlate_actions_again;
-            }
+            xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
+                               ogt->table_id, true);
             break;
         }
 
@@ -1825,7 +2399,6 @@ void
 xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src)
 {
     dst->wc = src->wc;
-    dst->tags = src->tags;
     dst->slow = src->slow;
     dst->has_learn = src->has_learn;
     dst->has_normal = src->has_normal;
@@ -1839,28 +2412,74 @@ xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src)
                src->odp_actions.size);
 }
 \f
+static struct skb_priority_to_dscp *
+get_skb_priority(const struct xport *xport, uint32_t skb_priority)
+{
+    struct skb_priority_to_dscp *pdscp;
+    uint32_t hash;
+
+    hash = hash_int(skb_priority, 0);
+    HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &xport->skb_priorities) {
+        if (pdscp->skb_priority == skb_priority) {
+            return pdscp;
+        }
+    }
+    return NULL;
+}
+
+static bool
+dscp_from_skb_priority(const struct xport *xport, uint32_t skb_priority,
+                       uint8_t *dscp)
+{
+    struct skb_priority_to_dscp *pdscp = get_skb_priority(xport, skb_priority);
+    *dscp = pdscp ? pdscp->dscp : 0;
+    return pdscp != NULL;
+}
+
+static void
+clear_skb_priorities(struct xport *xport)
+{
+    struct skb_priority_to_dscp *pdscp, *next;
+
+    HMAP_FOR_EACH_SAFE (pdscp, next, hmap_node, &xport->skb_priorities) {
+        hmap_remove(&xport->skb_priorities, &pdscp->hmap_node);
+        free(pdscp);
+    }
+}
+
+static bool
+actions_output_to_local_port(const struct xlate_ctx *ctx)
+{
+    odp_port_t local_odp_port = ofp_port_to_odp_port(ctx->xbridge, OFPP_LOCAL);
+    const struct nlattr *a;
+    unsigned int left;
+
+    NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->xout->odp_actions.data,
+                             ctx->xout->odp_actions.size) {
+        if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
+            && nl_attr_get_odp_port(a) == local_odp_port) {
+            return true;
+        }
+    }
+    return false;
+}
 
 /* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
  * into datapath actions in 'odp_actions', using 'ctx'. */
 void
 xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
 {
-    /* Normally false.  Set to true if we ever hit MAX_RESUBMIT_RECURSION, so
-     * that in the future we always keep a copy of the original flow for
-     * tracing purposes. */
-    static bool hit_resubmit_limit;
-
     struct flow_wildcards *wc = &xout->wc;
     struct flow *flow = &xin->flow;
 
     enum slow_path_reason special;
     const struct ofpact *ofpacts;
-    struct ofport_dpif *in_port;
+    struct xport *in_port;
     struct flow orig_flow;
     struct xlate_ctx ctx;
     size_t ofpacts_len;
 
-    COVERAGE_INC(ofproto_dpif_xlate);
+    COVERAGE_INC(xlate_actions);
 
     /* Flow initialization rules:
      * - 'base_flow' must match the kernel's view of the packet at the
@@ -1885,8 +2504,21 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
 
     ctx.xin = xin;
     ctx.xout = xout;
+    ctx.xout->slow = 0;
+    ctx.xout->has_learn = false;
+    ctx.xout->has_normal = false;
+    ctx.xout->has_fin_timeout = false;
+    ctx.xout->nf_output_iface = NF_OUT_DROP;
+    ctx.xout->mirrors = 0;
+    ofpbuf_use_stub(&ctx.xout->odp_actions, ctx.xout->odp_actions_stub,
+                    sizeof ctx.xout->odp_actions_stub);
+    ofpbuf_reserve(&ctx.xout->odp_actions, NL_A_U32_SIZE);
+
+    ctx.xbridge = xbridge_lookup(xin->ofproto);
+    if (!ctx.xbridge) {
+        return;
+    }
 
-    ctx.ofproto = xin->ofproto;
     ctx.rule = xin->rule;
 
     ctx.base_flow = *flow;
@@ -1901,25 +2533,15 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
 
     if (tnl_port_should_receive(&ctx.xin->flow)) {
         memset(&wc->masks.tunnel, 0xff, sizeof wc->masks.tunnel);
+        /* skb_mark is currently used only by tunnels but that will likely
+         * change in the future. */
+        memset(&wc->masks.skb_mark, 0xff, sizeof wc->masks.skb_mark);
     }
-    if (xin->ofproto->netflow) {
-        netflow_mask_wc(wc);
+    if (ctx.xbridge->has_netflow) {
+        netflow_mask_wc(flow, wc);
     }
 
-    ctx.xout->tags = 0;
-    ctx.xout->slow = 0;
-    ctx.xout->has_learn = false;
-    ctx.xout->has_normal = false;
-    ctx.xout->has_fin_timeout = false;
-    ctx.xout->nf_output_iface = NF_OUT_DROP;
-    ctx.xout->mirrors = 0;
-
-    ofpbuf_use_stub(&ctx.xout->odp_actions, ctx.xout->odp_actions_stub,
-                    sizeof ctx.xout->odp_actions_stub);
-    ofpbuf_reserve(&ctx.xout->odp_actions, NL_A_U32_SIZE);
-
     ctx.recurse = 0;
-    ctx.max_resubmit_trigger = false;
     ctx.orig_skb_priority = flow->skb_priority;
     ctx.table_id = 0;
     ctx.exit = false;
@@ -1936,14 +2558,14 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
 
     ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
 
-    if (ctx.ofproto->has_mirrors || hit_resubmit_limit) {
+    if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
         /* Do this conditionally because the copy is expensive enough that it
          * shows up in profiles. */
         orig_flow = *flow;
     }
 
     if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
-        switch (ctx.ofproto->up.frag_handling) {
+        switch (ctx.xbridge->frag) {
         case OFPC_FRAG_NORMAL:
             /* We must pretend that transport ports are unavailable. */
             flow->tp_src = ctx.base_flow.tp_src = htons(0);
@@ -1965,17 +2587,16 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
         }
     }
 
-    in_port = get_ofp_port(ctx.ofproto, flow->in_port.ofp_port);
+    in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port);
     special = process_special(&ctx, flow, in_port, ctx.xin->packet);
     if (special) {
         ctx.xout->slow = special;
     } else {
-        static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1);
         size_t sample_actions_len;
-        odp_port_t local_odp_port;
 
         if (flow->in_port.ofp_port
-            != vsp_realdev_to_vlandev(ctx.ofproto, flow->in_port.ofp_port,
+            != vsp_realdev_to_vlandev(ctx.xbridge->ofproto,
+                                      flow->in_port.ofp_port,
                                       flow->vlan_tci)) {
             ctx.base_flow.vlan_tci = 0;
         }
@@ -1989,37 +2610,22 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
 
             /* We've let OFPP_NORMAL and the learning action look at the
              * packet, so drop it now if forwarding is disabled. */
-            if (in_port && !stp_forward_in_state(in_port->stp_state)) {
+            if (in_port && !xport_stp_forward_state(in_port)) {
                 ctx.xout->odp_actions.size = sample_actions_len;
             }
         }
 
-        if (ctx.max_resubmit_trigger && !ctx.xin->resubmit_hook) {
-            if (!hit_resubmit_limit) {
-                /* We didn't record the original flow.  Make sure we do from
-                 * now on. */
-                hit_resubmit_limit = true;
-            } else if (!VLOG_DROP_ERR(&trace_rl)) {
-                struct ds ds = DS_EMPTY_INITIALIZER;
-
-                ofproto_trace(ctx.ofproto, &orig_flow, ctx.xin->packet, &ds);
-                VLOG_ERR("Trace triggered by excessive resubmit "
-                         "recursion:\n%s", ds_cstr(&ds));
-                ds_destroy(&ds);
-            }
-        }
-
-        local_odp_port = ofp_port_to_odp_port(ctx.ofproto, OFPP_LOCAL);
-        if (connmgr_has_in_band(ctx.ofproto->up.connmgr)
-            && !in_band_rule_check(flow, local_odp_port,
-                                   ctx.xout->odp_actions.data,
-                                   ctx.xout->odp_actions.size)) {
+        if (ctx.xbridge->has_in_band
+            && in_band_must_output_to_local_port(flow)
+            && !actions_output_to_local_port(&ctx)) {
             compose_output_action(&ctx, OFPP_LOCAL);
         }
-        if (ctx.ofproto->has_mirrors) {
+
+        fix_sflow_action(&ctx);
+
+        if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
             add_mirror_actions(&ctx, &orig_flow);
         }
-        fix_sflow_action(&ctx);
     }
 
     ofpbuf_uninit(&ctx.stack);