+static struct xbridge *xbridge_lookup(const struct ofproto_dpif *);
+static struct xbundle *xbundle_lookup(const struct ofbundle *);
+static struct xport *xport_lookup(const struct ofport_dpif *);
+static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
+static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
+ uint32_t skb_priority);
+static void clear_skb_priorities(struct xport *);
+static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
+ uint8_t *dscp);
+
+void
+xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
+ struct dpif *dpif, struct rule_dpif *miss_rule,
+ struct rule_dpif *no_packet_in_rule,
+ const struct mac_learning *ml, struct stp *stp,
+ const struct mbridge *mbridge,
+ const struct dpif_sflow *sflow,
+ const struct dpif_ipfix *ipfix,
+ const struct netflow *netflow, enum ofp_config_flags frag,
+ bool forward_bpdu, bool has_in_band)
+{
+ struct xbridge *xbridge = xbridge_lookup(ofproto);
+
+ if (!xbridge) {
+ xbridge = xzalloc(sizeof *xbridge);
+ xbridge->ofproto = ofproto;
+
+ hmap_insert(&xbridges, &xbridge->hmap_node, hash_pointer(ofproto, 0));
+ hmap_init(&xbridge->xports);
+ list_init(&xbridge->xbundles);
+ }
+
+ if (xbridge->ml != ml) {
+ mac_learning_unref(xbridge->ml);
+ xbridge->ml = mac_learning_ref(ml);
+ }
+
+ if (xbridge->mbridge != mbridge) {
+ mbridge_unref(xbridge->mbridge);
+ xbridge->mbridge = mbridge_ref(mbridge);
+ }
+
+ if (xbridge->sflow != sflow) {
+ dpif_sflow_unref(xbridge->sflow);
+ xbridge->sflow = dpif_sflow_ref(sflow);
+ }
+
+ if (xbridge->ipfix != ipfix) {
+ dpif_ipfix_unref(xbridge->ipfix);
+ xbridge->ipfix = dpif_ipfix_ref(ipfix);
+ }
+
+ if (xbridge->stp != stp) {
+ stp_unref(xbridge->stp);
+ xbridge->stp = stp_ref(stp);
+ }
+
+ if (xbridge->netflow != netflow) {
+ netflow_unref(xbridge->netflow);
+ xbridge->netflow = netflow_ref(netflow);
+ }
+
+ free(xbridge->name);
+ xbridge->name = xstrdup(name);
+
+ xbridge->dpif = dpif;
+ xbridge->forward_bpdu = forward_bpdu;
+ xbridge->has_in_band = has_in_band;
+ xbridge->frag = frag;
+ xbridge->miss_rule = miss_rule;
+ xbridge->no_packet_in_rule = no_packet_in_rule;
+}
+
+void
+xlate_remove_ofproto(struct ofproto_dpif *ofproto)
+{
+ struct xbridge *xbridge = xbridge_lookup(ofproto);
+ struct xbundle *xbundle, *next_xbundle;
+ struct xport *xport, *next_xport;
+
+ if (!xbridge) {
+ return;
+ }
+
+ HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
+ xlate_ofport_remove(xport->ofport);
+ }
+
+ LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
+ xlate_bundle_remove(xbundle->ofbundle);
+ }
+
+ hmap_remove(&xbridges, &xbridge->hmap_node);
+ mac_learning_unref(xbridge->ml);
+ mbridge_unref(xbridge->mbridge);
+ dpif_sflow_unref(xbridge->sflow);
+ dpif_ipfix_unref(xbridge->ipfix);
+ stp_unref(xbridge->stp);
+ hmap_destroy(&xbridge->xports);
+ free(xbridge->name);
+ free(xbridge);
+}
+
+void
+xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
+ const char *name, enum port_vlan_mode vlan_mode, int vlan,
+ unsigned long *trunks, bool use_priority_tags,
+ const struct bond *bond, const struct lacp *lacp,
+ bool floodable)
+{
+ struct xbundle *xbundle = xbundle_lookup(ofbundle);
+
+ if (!xbundle) {
+ xbundle = xzalloc(sizeof *xbundle);
+ xbundle->ofbundle = ofbundle;
+ xbundle->xbridge = xbridge_lookup(ofproto);
+
+ hmap_insert(&xbundles, &xbundle->hmap_node, hash_pointer(ofbundle, 0));
+ list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
+ list_init(&xbundle->xports);
+ }
+
+ ovs_assert(xbundle->xbridge);
+
+ free(xbundle->name);
+ xbundle->name = xstrdup(name);
+
+ xbundle->vlan_mode = vlan_mode;
+ xbundle->vlan = vlan;
+ xbundle->trunks = trunks;
+ xbundle->use_priority_tags = use_priority_tags;
+ xbundle->floodable = floodable;
+
+ if (xbundle->bond != bond) {
+ bond_unref(xbundle->bond);
+ xbundle->bond = bond_ref(bond);
+ }
+
+ if (xbundle->lacp != lacp) {
+ lacp_unref(xbundle->lacp);
+ xbundle->lacp = lacp_ref(lacp);
+ }
+}
+
+void
+xlate_bundle_remove(struct ofbundle *ofbundle)
+{
+ struct xbundle *xbundle = xbundle_lookup(ofbundle);
+ struct xport *xport, *next;
+
+ if (!xbundle) {
+ return;
+ }
+
+ LIST_FOR_EACH_SAFE (xport, next, bundle_node, &xbundle->xports) {
+ list_remove(&xport->bundle_node);
+ xport->xbundle = NULL;
+ }
+
+ hmap_remove(&xbundles, &xbundle->hmap_node);
+ list_remove(&xbundle->list_node);
+ bond_unref(xbundle->bond);
+ lacp_unref(xbundle->lacp);
+ free(xbundle->name);
+ free(xbundle);
+}
+
+void
+xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
+ struct ofport_dpif *ofport, ofp_port_t ofp_port,
+ odp_port_t odp_port, const struct netdev *netdev,
+ const struct cfm *cfm, const struct bfd *bfd,
+ struct ofport_dpif *peer, int stp_port_no,
+ const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
+ enum ofputil_port_config config,
+ enum ofputil_port_state state, bool is_tunnel,
+ bool may_enable)
+{
+ struct xport *xport = xport_lookup(ofport);
+ size_t i;
+
+ if (!xport) {
+ xport = xzalloc(sizeof *xport);
+ xport->ofport = ofport;
+ xport->xbridge = xbridge_lookup(ofproto);
+ xport->ofp_port = ofp_port;
+
+ hmap_init(&xport->skb_priorities);
+ hmap_insert(&xports, &xport->hmap_node, hash_pointer(ofport, 0));
+ hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
+ hash_ofp_port(xport->ofp_port));
+ }
+
+ ovs_assert(xport->ofp_port == ofp_port);
+
+ xport->config = config;
+ xport->state = state;
+ xport->stp_port_no = stp_port_no;
+ xport->is_tunnel = is_tunnel;
+ xport->may_enable = may_enable;
+ xport->odp_port = odp_port;
+
+ if (xport->netdev != netdev) {
+ netdev_close(xport->netdev);
+ xport->netdev = netdev_ref(netdev);
+ }
+
+ if (xport->cfm != cfm) {
+ cfm_unref(xport->cfm);
+ xport->cfm = cfm_ref(cfm);
+ }
+
+ if (xport->bfd != bfd) {
+ bfd_unref(xport->bfd);
+ xport->bfd = bfd_ref(bfd);
+ }
+
+ if (xport->peer) {
+ xport->peer->peer = NULL;
+ }
+ xport->peer = xport_lookup(peer);
+ if (xport->peer) {
+ xport->peer->peer = xport;
+ }
+
+ if (xport->xbundle) {
+ list_remove(&xport->bundle_node);
+ }
+ xport->xbundle = xbundle_lookup(ofbundle);
+ if (xport->xbundle) {
+ list_insert(&xport->xbundle->xports, &xport->bundle_node);
+ }
+
+ clear_skb_priorities(xport);
+ for (i = 0; i < n_qdscp; i++) {
+ struct skb_priority_to_dscp *pdscp;
+ uint32_t skb_priority;
+
+ if (dpif_queue_to_priority(xport->xbridge->dpif, qdscp_list[i].queue,
+ &skb_priority)) {
+ continue;
+ }
+
+ pdscp = xmalloc(sizeof *pdscp);
+ pdscp->skb_priority = skb_priority;
+ pdscp->dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
+ hmap_insert(&xport->skb_priorities, &pdscp->hmap_node,
+ hash_int(pdscp->skb_priority, 0));
+ }
+}
+
+void
+xlate_ofport_remove(struct ofport_dpif *ofport)
+{
+ struct xport *xport = xport_lookup(ofport);
+
+ if (!xport) {
+ return;
+ }
+
+ if (xport->peer) {
+ xport->peer->peer = NULL;
+ xport->peer = NULL;
+ }
+
+ if (xport->xbundle) {
+ list_remove(&xport->bundle_node);
+ }
+
+ clear_skb_priorities(xport);
+ hmap_destroy(&xport->skb_priorities);
+
+ hmap_remove(&xports, &xport->hmap_node);
+ hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
+
+ netdev_close(xport->netdev);
+ cfm_unref(xport->cfm);
+ bfd_unref(xport->bfd);
+ free(xport);
+}
+
+/* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key'
+ * respectively), populates 'flow' with the result of odp_flow_key_to_flow().
+ * Optionally, if nonnull, populates 'fitnessp' with the fitness of 'flow' as
+ * returned by odp_flow_key_to_flow(). Also, optionally populates 'ofproto'
+ * with the ofproto_dpif, 'odp_in_port' with the datapath in_port, that
+ * 'packet' ingressed, and 'ipfix', 'sflow', and 'netflow' with the appropriate
+ * handles for those protocols if they're enabled. Caller is responsible for
+ * unrefing them.
+ *
+ * If 'ofproto' is nonnull, requires 'flow''s in_port to exist. Otherwise sets
+ * 'flow''s in_port to OFPP_NONE.
+ *
+ * This function does post-processing on data returned from
+ * odp_flow_key_to_flow() to help make VLAN splinters transparent to the rest
+ * of the upcall processing logic. In particular, if the extracted in_port is
+ * a VLAN splinter port, it replaces flow->in_port by the "real" port, sets
+ * flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes
+ * a VLAN header onto 'packet' (if it is nonnull).
+ *
+ * Similarly, this function also includes some logic to help with tunnels. It
+ * may modify 'flow' as necessary to make the tunneling implementation
+ * transparent to the upcall processing logic.
+ *
+ * Returns 0 if successful, ENODEV if the parsed flow has no associated ofport,
+ * or some other positive errno if there are other problems. */
+int
+xlate_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
+ const struct nlattr *key, size_t key_len,
+ struct flow *flow, enum odp_key_fitness *fitnessp,
+ struct ofproto_dpif **ofproto, struct dpif_ipfix **ipfix,
+ struct dpif_sflow **sflow, struct netflow **netflow,
+ odp_port_t *odp_in_port)
+{
+ enum odp_key_fitness fitness;
+ const struct xport *xport;
+ int error = ENODEV;
+
+ ovs_rwlock_rdlock(&xlate_rwlock);
+ fitness = odp_flow_key_to_flow(key, key_len, flow);
+ if (fitness == ODP_FIT_ERROR) {
+ error = EINVAL;
+ goto exit;
+ }
+
+ if (odp_in_port) {
+ *odp_in_port = flow->in_port.odp_port;
+ }
+
+ xport = xport_lookup(tnl_port_should_receive(flow)
+ ? tnl_port_receive(flow)
+ : odp_port_to_ofport(backer, flow->in_port.odp_port));
+
+ flow->in_port.ofp_port = xport ? xport->ofp_port : OFPP_NONE;
+ if (!xport) {
+ goto exit;
+ }
+
+ if (vsp_adjust_flow(xport->xbridge->ofproto, flow)) {
+ if (packet) {
+ /* Make the packet resemble the flow, so that it gets sent to
+ * an OpenFlow controller properly, so that it looks correct
+ * for sFlow, and so that flow_extract() will get the correct
+ * vlan_tci if it is called on 'packet'. */
+ eth_push_vlan(packet, flow->vlan_tci);
+ }
+ /* We can't reproduce 'key' from 'flow'. */
+ fitness = fitness == ODP_FIT_PERFECT ? ODP_FIT_TOO_MUCH : fitness;
+ }
+ error = 0;
+
+ if (ofproto) {
+ *ofproto = xport->xbridge->ofproto;
+ }
+
+ if (ipfix) {
+ *ipfix = dpif_ipfix_ref(xport->xbridge->ipfix);
+ }
+
+ if (sflow) {
+ *sflow = dpif_sflow_ref(xport->xbridge->sflow);
+ }
+
+ if (netflow) {
+ *netflow = netflow_ref(xport->xbridge->netflow);
+ }
+
+exit:
+ if (fitnessp) {
+ *fitnessp = fitness;
+ }
+ ovs_rwlock_unlock(&xlate_rwlock);
+ return error;
+}
+
+static struct xbridge *
+xbridge_lookup(const struct ofproto_dpif *ofproto)
+{
+ struct xbridge *xbridge;
+
+ if (!ofproto) {
+ return NULL;
+ }
+
+ HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
+ &xbridges) {
+ if (xbridge->ofproto == ofproto) {
+ return xbridge;
+ }
+ }
+ return NULL;
+}
+
+static struct xbundle *
+xbundle_lookup(const struct ofbundle *ofbundle)
+{
+ struct xbundle *xbundle;
+
+ if (!ofbundle) {
+ return NULL;
+ }
+
+ HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
+ &xbundles) {
+ if (xbundle->ofbundle == ofbundle) {
+ return xbundle;
+ }
+ }
+ return NULL;
+}
+
+static struct xport *
+xport_lookup(const struct ofport_dpif *ofport)
+{
+ struct xport *xport;
+
+ if (!ofport) {
+ return NULL;
+ }
+
+ HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
+ &xports) {
+ if (xport->ofport == ofport) {
+ return xport;
+ }
+ }
+ return NULL;
+}
+
+static struct stp_port *
+xport_get_stp_port(const struct xport *xport)
+{
+ return xport->xbridge->stp && xport->stp_port_no != -1
+ ? stp_get_port(xport->xbridge->stp, xport->stp_port_no)
+ : NULL;
+}
+
+static enum stp_state
+xport_stp_learn_state(const struct xport *xport)
+{
+ struct stp_port *sp = xport_get_stp_port(xport);
+ return stp_learn_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
+}
+
+static bool
+xport_stp_forward_state(const struct xport *xport)
+{
+ struct stp_port *sp = xport_get_stp_port(xport);
+ return stp_forward_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
+}
+
+/* Returns true if STP should process 'flow'. Sets fields in 'wc' that
+ * were used to make the determination.*/
+static bool
+stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
+{
+ memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
+ return eth_addr_equals(flow->dl_dst, eth_addr_stp);
+}
+
+static void
+stp_process_packet(const struct xport *xport, const struct ofpbuf *packet)
+{
+ struct stp_port *sp = xport_get_stp_port(xport);
+ struct ofpbuf payload = *packet;
+ struct eth_header *eth = payload.data;
+
+ /* Sink packets on ports that have STP disabled when the bridge has
+ * STP enabled. */
+ if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
+ return;
+ }
+
+ /* Trim off padding on payload. */
+ if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
+ payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN;
+ }
+
+ if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
+ stp_received_bpdu(sp, payload.data, payload.size);
+ }
+}
+
+static struct xport *
+get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
+{
+ struct xport *xport;
+
+ HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
+ &xbridge->xports) {
+ if (xport->ofp_port == ofp_port) {
+ return xport;
+ }
+ }
+ return NULL;
+}
+
+static odp_port_t
+ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
+{
+ const struct xport *xport = get_ofp_port(xbridge, ofp_port);
+ return xport ? xport->odp_port : ODPP_NONE;
+}
+
+static bool
+odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
+{
+ struct xport *xport;
+
+ xport = get_ofp_port(ctx->xbridge, ofp_port);
+ if (!xport || xport->config & OFPUTIL_PC_PORT_DOWN ||
+ xport->state & OFPUTIL_PS_LINK_DOWN) {
+ return false;
+ }
+
+ return true;
+}
+
+static const struct ofputil_bucket *
+group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
+ int depth);
+
+static bool
+group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
+{
+ struct group_dpif *group;
+ bool hit;
+
+ hit = group_dpif_lookup(ctx->xbridge->ofproto, group_id, &group);
+ if (!hit) {
+ return false;
+ }
+
+ hit = group_first_live_bucket(ctx, group, depth) != NULL;
+
+ group_dpif_release(group);
+ return hit;
+}
+
+#define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */
+
+static bool
+bucket_is_alive(const struct xlate_ctx *ctx,
+ const struct ofputil_bucket *bucket, int depth)
+{
+ if (depth >= MAX_LIVENESS_RECURSION) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+ VLOG_WARN_RL(&rl, "bucket chaining exceeded %d links",
+ MAX_LIVENESS_RECURSION);
+ return false;
+ }
+
+ return !ofputil_bucket_has_liveness(bucket) ||
+ (bucket->watch_port != OFPP_ANY &&
+ odp_port_is_alive(ctx, bucket->watch_port)) ||
+ (bucket->watch_group != OFPG_ANY &&
+ group_is_alive(ctx, bucket->watch_group, depth + 1));
+}
+
+static const struct ofputil_bucket *
+group_first_live_bucket(const struct xlate_ctx *ctx,
+ const struct group_dpif *group, int depth)
+{
+ struct ofputil_bucket *bucket;
+ const struct list *buckets;
+
+ group_dpif_get_buckets(group, &buckets);
+ LIST_FOR_EACH (bucket, list_node, buckets) {
+ if (bucket_is_alive(ctx, bucket, depth)) {
+ return bucket;
+ }
+ }
+
+ return NULL;
+}
+
+static const struct ofputil_bucket *
+group_best_live_bucket(const struct xlate_ctx *ctx,
+ const struct group_dpif *group,
+ uint32_t basis)
+{
+ const struct ofputil_bucket *best_bucket = NULL;
+ uint32_t best_score = 0;
+ int i = 0;
+
+ const struct ofputil_bucket *bucket;
+ const struct list *buckets;
+
+ group_dpif_get_buckets(group, &buckets);
+ LIST_FOR_EACH (bucket, list_node, buckets) {
+ if (bucket_is_alive(ctx, bucket, 0)) {
+ uint32_t score = (hash_int(i, basis) & 0xffff) * bucket->weight;
+ if (score >= best_score) {
+ best_bucket = bucket;
+ best_score = score;
+ }
+ }
+ i++;
+ }
+
+ return best_bucket;
+}