#include <errno.h>
-#include "autopath.h"
#include "bond.h"
#include "bundle.h"
#include "byte-order.h"
uint32_t sflow_odp_port; /* Output port for composing sFlow action. */
uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
bool exit; /* No further actions should be processed. */
- struct flow orig_flow; /* Copy of original flow. */
};
static void action_xlate_ctx_init(struct action_xlate_ctx *,
struct list bundle_node; /* In struct ofbundle's "ports" list. */
struct cfm *cfm; /* Connectivity Fault Management, if any. */
tag_type tag; /* Tag associated with this port. */
- uint32_t bond_stable_id; /* stable_id to use as bond slave, or 0. */
bool may_enable; /* May be enabled in bonds. */
long long int carrier_seq; /* Carrier status changes. */
struct tnl_port *tnl_port; /* Tunnel handle, or null. */
struct timer next_expiration;
struct hmap odp_to_ofport_map; /* ODP port to ofport mapping. */
- struct sset tnl_backers; /* Set of dpif ports backing tunnels. */
+ struct simap tnl_backers; /* Set of dpif ports backing tunnels. */
/* Facet revalidation flags applying to facets which use this backer. */
enum revalidate_reason need_revalidate; /* Revalidate every facet. */
drop_key_clear(backer);
}
+ /* Clear the revalidation flags. */
+ tag_set_init(&backer->revalidate_set);
+ backer->need_revalidate = 0;
+
HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
struct facet *facet;
continue;
}
- /* Clear the revalidation flags. */
- tag_set_init(&backer->revalidate_set);
- backer->need_revalidate = 0;
-
HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
if (need_revalidate
|| tag_set_intersects(&revalidate_set, facet->tags)) {
HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
&all_ofproto_dpifs) {
- if (sset_contains(&ofproto->backer->tnl_backers, devname)) {
+ if (simap_contains(&ofproto->backer->tnl_backers, devname)) {
goto next;
}
}
drop_key_clear(backer);
hmap_destroy(&backer->drop_keys);
- sset_destroy(&backer->tnl_backers);
+ simap_destroy(&backer->tnl_backers);
hmap_destroy(&backer->odp_to_ofport_map);
node = shash_find(&all_dpif_backers, backer->type);
free(backer->type);
hmap_init(&backer->drop_keys);
timer_set_duration(&backer->next_expiration, 1000);
backer->need_revalidate = 0;
- sset_init(&backer->tnl_backers);
+ simap_init(&backer->tnl_backers);
tag_set_init(&backer->revalidate_set);
*backerp = backer;
if (odp_port_to_ofp_port(ofproto, port->odp_port) != OFPP_NONE) {
VLOG_ERR("port %s already has an OpenFlow port number",
dpif_port.name);
+ dpif_port_destroy(&dpif_port);
return EBUSY;
}
hmap_insert(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node,
hash_int(port->odp_port, 0));
}
+ dpif_port_destroy(&dpif_port);
if (ofproto->sflow) {
dpif_sflow_add_port(ofproto->sflow, port_, port->odp_port);
* assumes that removal of attached ports will happen as part of
* destruction. */
dpif_port_del(ofproto->backer->dpif, port->odp_port);
- sset_find_and_delete(&ofproto->backer->tnl_backers, dp_port_name);
+ simap_find_and_delete(&ofproto->backer->tnl_backers, dp_port_name);
}
if (port->odp_port != OVSP_NONE && !port->tnl_port) {
static bool
bundle_add_port(struct ofbundle *bundle, uint32_t ofp_port,
- struct lacp_slave_settings *lacp,
- uint32_t bond_stable_id)
+ struct lacp_slave_settings *lacp)
{
struct ofport_dpif *port;
lacp_slave_register(bundle->lacp, port, lacp);
}
- port->bond_stable_id = bond_stable_id;
-
return true;
}
ok = true;
for (i = 0; i < s->n_slaves; i++) {
if (!bundle_add_port(bundle, s->slaves[i],
- s->lacp ? &s->lacp_slaves[i] : NULL,
- s->bond_stable_ids ? s->bond_stable_ids[i] : 0)) {
+ s->lacp ? &s->lacp_slaves[i] : NULL)) {
ok = false;
}
}
}
LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
- bond_slave_register(bundle->bond, port, port->bond_stable_id,
- port->up.netdev);
+ bond_slave_register(bundle->bond, port, port->up.netdev);
}
} else {
bond_destroy(bundle->bond);
}
if (!dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
- int error = dpif_port_add(ofproto->backer->dpif, netdev, NULL);
+ uint32_t port_no = UINT32_MAX;
+ int error;
+
+ error = dpif_port_add(ofproto->backer->dpif, netdev, &port_no);
if (error) {
return error;
}
+ if (netdev_get_tunnel_config(netdev)) {
+ simap_put(&ofproto->backer->tnl_backers, dp_port_name, port_no);
+ }
}
if (netdev_get_tunnel_config(netdev)) {
sset_add(&ofproto->ghost_ports, devname);
- sset_add(&ofproto->backer->tnl_backers, dp_port_name);
} else {
sset_add(&ofproto->ports, devname);
}
* from the bond. The client will need to reconfigure everything
* after deleting ports, so then the slave will get re-added. */
dpif_port = netdev_vport_get_dpif_port(ofport->up.netdev);
- sset_find_and_delete(&ofproto->backer->tnl_backers, dpif_port);
+ simap_find_and_delete(&ofproto->backer->tnl_backers, dpif_port);
bundle_remove(&ofport->up);
}
}
uint32_t bucket;
uint32_t offset;
bool ghost;
+
+ struct ofproto_port port;
+ bool has_port;
};
static int
const struct sset *sset;
struct sset_node *node;
+ if (state->has_port) {
+ ofproto_port_destroy(&state->port);
+ state->has_port = false;
+ }
sset = state->ghost ? &ofproto->ghost_ports : &ofproto->ports;
while ((node = sset_at_position(sset, &state->bucket, &state->offset))) {
int error;
- error = port_query_by_name(ofproto_, node->name, port);
- if (error != ENODEV) {
+ error = port_query_by_name(ofproto_, node->name, &state->port);
+ if (!error) {
+ *port = state->port;
+ state->has_port = true;
+ return 0;
+ } else if (error != ENODEV) {
return error;
}
}
{
struct port_dump_state *state = state_;
+ if (state->has_port) {
+ ofproto_port_destroy(&state->port);
+ }
free(state);
return 0;
}
static enum slow_path_reason
process_special(struct ofproto_dpif *ofproto, const struct flow *flow,
- const struct ofpbuf *packet)
+ const struct ofport_dpif *ofport, const struct ofpbuf *packet)
{
- struct ofport_dpif *ofport = get_ofp_port(ofproto, flow->in_port);
-
if (!ofport) {
return 0;
- }
-
- if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow)) {
+ } else if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow)) {
if (packet) {
cfm_process_heartbeat(ofport->cfm, packet);
}
stp_process_packet(ofport, packet);
}
return SLOW_STP;
+ } else {
+ return 0;
}
- return 0;
}
static struct flow_miss *
\f
/* OpenFlow to datapath action translation. */
+static bool may_receive(const struct ofport_dpif *, struct action_xlate_ctx *);
static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
struct action_xlate_ctx *);
static void xlate_normal(struct action_xlate_ctx *);
/* If 'struct flow' gets additional metadata, we'll need to zero it out
* before traversing a patch port. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 18);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 19);
if (!ofport) {
xlate_report(ctx, "Nonexistent output port");
struct ofport_dpif *peer = ofport_get_peer(ofport);
struct flow old_flow = ctx->flow;
const struct ofproto_dpif *peer_ofproto;
+ struct ofport_dpif *in_port;
if (!peer) {
xlate_report(ctx, "Nonexistent patch port peer");
ctx->flow.metadata = htonll(0);
memset(&ctx->flow.tunnel, 0, sizeof ctx->flow.tunnel);
memset(ctx->flow.regs, 0, sizeof ctx->flow.regs);
- xlate_table_action(ctx, ctx->flow.in_port, 0, true);
+
+ in_port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
+ if (!in_port || may_receive(in_port, ctx)) {
+ if (!in_port || stp_forward_in_state(in_port->stp_state)) {
+ xlate_table_action(ctx, ctx->flow.in_port, 0, true);
+ } else {
+ /* Forwarding is disabled by STP. Let OFPP_NORMAL and the
+ * learning action look at the packet, then drop it. */
+ struct flow old_base_flow = ctx->base_flow;
+ size_t old_size = ctx->odp_actions->size;
+ xlate_table_action(ctx, ctx->flow.in_port, 0, true);
+ ctx->base_flow = old_base_flow;
+ ctx->odp_actions->size = old_size;
+ }
+ }
+
ctx->flow = old_flow;
ctx->ofproto = ofproto_dpif_cast(ofport->up.ofproto);
if (packet->l2 && packet->l3) {
struct eth_header *eh;
+ uint16_t mpls_depth;
eth_pop_vlan(packet);
eh = packet->l2;
- /* If the Ethernet type is less than ETH_TYPE_MIN, it's likely an 802.2
- * LLC frame. Calculating the Ethernet type of these frames is more
- * trouble than seems appropriate for a simple assertion. */
- ovs_assert(ntohs(eh->eth_type) < ETH_TYPE_MIN
- || eh->eth_type == ctx->flow.dl_type);
-
memcpy(eh->eth_src, ctx->flow.dl_src, sizeof eh->eth_src);
memcpy(eh->eth_dst, ctx->flow.dl_dst, sizeof eh->eth_dst);
eth_push_vlan(packet, ctx->flow.vlan_tci);
}
+ mpls_depth = eth_mpls_depth(packet);
+
+ if (mpls_depth < ctx->flow.mpls_depth) {
+ push_mpls(packet, ctx->flow.dl_type, ctx->flow.mpls_lse);
+ } else if (mpls_depth > ctx->flow.mpls_depth) {
+ pop_mpls(packet, ctx->flow.dl_type);
+ } else if (mpls_depth) {
+ set_mpls_lse(packet, ctx->flow.mpls_lse);
+ }
+
if (packet->l4) {
if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
packet_set_ipv4(packet, ctx->flow.nw_src, ctx->flow.nw_dst,
ofpbuf_delete(packet);
}
+static void
+execute_mpls_push_action(struct action_xlate_ctx *ctx, ovs_be16 eth_type)
+{
+ ovs_assert(eth_type_mpls(eth_type));
+
+ if (ctx->base_flow.mpls_depth) {
+ ctx->flow.mpls_lse &= ~htonl(MPLS_BOS_MASK);
+ ctx->flow.mpls_depth++;
+ } else {
+ ovs_be32 label;
+ uint8_t tc, ttl;
+
+ if (ctx->flow.dl_type == htons(ETH_TYPE_IPV6)) {
+ label = htonl(0x2); /* IPV6 Explicit Null. */
+ } else {
+ label = htonl(0x0); /* IPV4 Explicit Null. */
+ }
+ tc = (ctx->flow.nw_tos & IP_DSCP_MASK) >> 2;
+ ttl = ctx->flow.nw_ttl ? ctx->flow.nw_ttl : 0x40;
+ ctx->flow.mpls_lse = set_mpls_lse_values(ttl, tc, 1, label);
+ ctx->flow.encap_dl_type = ctx->flow.dl_type;
+ ctx->flow.mpls_depth = 1;
+ }
+ ctx->flow.dl_type = eth_type;
+}
+
+static void
+execute_mpls_pop_action(struct action_xlate_ctx *ctx, ovs_be16 eth_type)
+{
+ ovs_assert(eth_type_mpls(ctx->flow.dl_type));
+ ovs_assert(!eth_type_mpls(eth_type));
+
+ if (ctx->flow.mpls_depth) {
+ ctx->flow.mpls_depth--;
+ ctx->flow.mpls_lse = htonl(0);
+ if (!ctx->flow.mpls_depth) {
+ ctx->flow.dl_type = eth_type;
+ ctx->flow.encap_dl_type = htons(0);
+ }
+ }
+}
+
static bool
compose_dec_ttl(struct action_xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
{
ovs_be64 tun_id;
};
-static void
-xlate_autopath(struct action_xlate_ctx *ctx,
- const struct ofpact_autopath *ap)
-{
- uint16_t ofp_port = ap->port;
- struct ofport_dpif *port = get_ofp_port(ctx->ofproto, ofp_port);
-
- if (!port || !port->bundle) {
- ofp_port = OFPP_NONE;
- } else if (port->bundle->bond) {
- /* Autopath does not support VLAN hashing. */
- struct ofport_dpif *slave = bond_choose_output_slave(
- port->bundle->bond, &ctx->flow, 0, &ctx->tags);
- if (slave) {
- ofp_port = slave->up.ofp_port;
- }
- }
- nxm_reg_load(&ap->dst, ofp_port, &ctx->flow);
-}
-
static bool
slave_enabled_cb(uint16_t ofp_port, void *ofproto_)
{
do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
struct action_xlate_ctx *ctx)
{
- const struct ofport_dpif *port;
bool was_evictable = true;
const struct ofpact *a;
- port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
- if (port && !may_receive(port, ctx)) {
- /* Drop this flow. */
- return;
- }
-
if (ctx->rule) {
/* Don't let the rule we're working on get evicted underneath us. */
was_evictable = ctx->rule->up.evictable;
nxm_execute_reg_load(ofpact_get_REG_LOAD(a), &ctx->flow);
break;
+ case OFPACT_PUSH_MPLS:
+ execute_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a)->ethertype);
+ break;
+
+ case OFPACT_POP_MPLS:
+ execute_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
+ break;
+
case OFPACT_DEC_TTL:
if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
goto out;
multipath_execute(ofpact_get_MULTIPATH(a), &ctx->flow);
break;
- case OFPACT_AUTOPATH:
- xlate_autopath(ctx, ofpact_get_AUTOPATH(a));
- break;
-
case OFPACT_BUNDLE:
ctx->ofproto->has_bundle_action = true;
xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
}
out:
- /* We've let OFPP_NORMAL and the learning action look at the packet,
- * so drop it now if forwarding is disabled. */
- if (port && !stp_forward_in_state(port->stp_state)) {
- ofpbuf_clear(ctx->odp_actions);
- add_sflow_action(ctx);
- }
if (ctx->rule) {
ctx->rule->up.evictable = was_evictable;
}
static bool hit_resubmit_limit;
enum slow_path_reason special;
+ struct ofport_dpif *in_port;
+ struct flow orig_flow;
COVERAGE_INC(ofproto_dpif_xlate);
if (ctx->ofproto->has_mirrors || hit_resubmit_limit) {
/* Do this conditionally because the copy is expensive enough that it
- * shows up in profiles.
- *
- * We keep orig_flow in 'ctx' only because I couldn't make GCC 4.4
- * believe that I wasn't using it without initializing it if I kept it
- * in a local variable. */
- ctx->orig_flow = ctx->flow;
+ * shows up in profiles. */
+ orig_flow = ctx->flow;
}
if (ctx->flow.nw_frag & FLOW_NW_FRAG_ANY) {
}
}
- special = process_special(ctx->ofproto, &ctx->flow, ctx->packet);
+ in_port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
+ special = process_special(ctx->ofproto, &ctx->flow, in_port, ctx->packet);
if (special) {
ctx->slow |= special;
} else {
uint32_t local_odp_port;
add_sflow_action(ctx);
- do_xlate_actions(ofpacts, ofpacts_len, ctx);
+
+ if (!in_port || may_receive(in_port, ctx)) {
+ do_xlate_actions(ofpacts, ofpacts_len, ctx);
+
+ /* We've let OFPP_NORMAL and the learning action look at the
+ * packet, so drop it now if forwarding is disabled. */
+ if (in_port && !stp_forward_in_state(in_port->stp_state)) {
+ ofpbuf_clear(ctx->odp_actions);
+ add_sflow_action(ctx);
+ }
+ }
if (ctx->max_resubmit_trigger && !ctx->resubmit_hook) {
if (!hit_resubmit_limit) {
} else if (!VLOG_DROP_ERR(&trace_rl)) {
struct ds ds = DS_EMPTY_INITIALIZER;
- ofproto_trace(ctx->ofproto, &ctx->orig_flow, ctx->packet,
+ ofproto_trace(ctx->ofproto, &orig_flow, ctx->packet,
initial_tci, &ds);
VLOG_ERR("Trace triggered by excessive resubmit "
"recursion:\n%s", ds_cstr(&ds));
}
}
if (ctx->ofproto->has_mirrors) {
- add_mirror_actions(ctx, &ctx->orig_flow);
+ add_mirror_actions(ctx, &orig_flow);
}
fix_sflow_action(ctx);
}