uint64_t packets, uint64_t bytes,
long long int used);
-static uint32_t rule_calculate_tag(const struct flow *,
+static tag_type rule_calculate_tag(const struct flow *,
const struct flow_wildcards *,
uint32_t basis);
static void rule_invalidate(const struct rule_dpif *);
uint64_t packets, uint64_t bytes);
struct ofbundle {
- struct ofproto_dpif *ofproto; /* Owning ofproto. */
struct hmap_node hmap_node; /* In struct ofproto's "bundles" hmap. */
+ struct ofproto_dpif *ofproto; /* Owning ofproto. */
void *aux; /* Key supplied by ofproto's client. */
char *name; /* Identifier for log messages. */
bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
/* Status. */
- bool floodable; /* True if no port has OFPPC_NO_FLOOD set. */
+ bool floodable; /* True if no port has OFPUTIL_PC_NO_FLOOD set. */
/* Port mirroring info. */
mirror_mask_t src_mirrors; /* Mirrors triggered when packet received. */
* revalidating without a packet to refer to. */
const struct ofpbuf *packet;
- /* Should OFPP_NORMAL MAC learning and NXAST_LEARN actions execute? We
- * want to execute them if we are actually processing a packet, or if we
- * are accounting for packets that the datapath has processed, but not if
- * we are just revalidating. */
- bool may_learn;
+ /* Should OFPP_NORMAL update the MAC learning table? We want to update it
+ * if we are actually processing a packet, or if we are accounting for
+ * packets that the datapath has processed, but not if we are just
+ * revalidating. */
+ bool may_learn_macs;
+
+ /* Should "learn" actions update the flow table? We want to update it if
+ * we are actually processing a packet, or in most cases if we are
+ * accounting for packets that the datapath has processed, but not if we
+ * are just revalidating. */
+ bool may_flow_mod;
/* The rule that we are currently translating, or NULL. */
struct rule_dpif *rule;
* timeouts.) */
uint8_t tcp_flags;
- /* If nonnull, called just before executing a resubmit action.
+ /* If nonnull, called just before executing a resubmit action. In
+ * addition, disables logging of traces when the recursion depth is
+ * exceeded.
*
* This is normally null so the client has to set it manually after
* calling action_xlate_ctx_init(). */
* reason to look at them. */
int recurse; /* Recursion level, via xlate_table_action. */
+ bool max_resubmit_trigger; /* Recursed too deeply during translation. */
struct flow base_flow; /* Flow at the last commit. */
uint32_t orig_skb_priority; /* Priority when packet arrived. */
uint8_t table_id; /* OpenFlow table ID where flow was found. */
static void facet_update_time(struct facet *, long long int used);
static void facet_reset_counters(struct facet *);
static void facet_push_stats(struct facet *);
-static void facet_account(struct facet *);
+static void facet_account(struct facet *, bool may_flow_mod);
static bool facet_is_controller_flow(struct facet *);
tag_type tag; /* Tag associated with this port. */
uint32_t bond_stable_id; /* stable_id to use as bond slave, or 0. */
bool may_enable; /* May be enabled in bonds. */
+ long long int carrier_seq; /* Carrier status changes. */
/* Spanning tree. */
struct stp_port *stp_port; /* Spanning Tree Protocol, if any. */
uint16_t ofp_port);
static struct ofport_dpif *get_odp_port(struct ofproto_dpif *,
uint32_t odp_port);
+static void ofproto_trace(struct ofproto_dpif *, const struct flow *,
+ const struct ofpbuf *, ovs_be16 initial_tci,
+ struct ds *);
/* Packet processing. */
static void update_learning_table(struct ofproto_dpif *,
static void
get_features(struct ofproto *ofproto_ OVS_UNUSED,
- bool *arp_match_ip, uint32_t *actions)
+ bool *arp_match_ip, enum ofputil_action_bitmap *actions)
{
*arp_match_ip = true;
- *actions = ((1u << OFPAT_OUTPUT) |
- (1u << OFPAT_SET_VLAN_VID) |
- (1u << OFPAT_SET_VLAN_PCP) |
- (1u << OFPAT_STRIP_VLAN) |
- (1u << OFPAT_SET_DL_SRC) |
- (1u << OFPAT_SET_DL_DST) |
- (1u << OFPAT_SET_NW_SRC) |
- (1u << OFPAT_SET_NW_DST) |
- (1u << OFPAT_SET_NW_TOS) |
- (1u << OFPAT_SET_TP_SRC) |
- (1u << OFPAT_SET_TP_DST) |
- (1u << OFPAT_ENQUEUE));
+ *actions = (OFPUTIL_A_OUTPUT |
+ OFPUTIL_A_SET_VLAN_VID |
+ OFPUTIL_A_SET_VLAN_PCP |
+ OFPUTIL_A_STRIP_VLAN |
+ OFPUTIL_A_SET_DL_SRC |
+ OFPUTIL_A_SET_DL_DST |
+ OFPUTIL_A_SET_NW_SRC |
+ OFPUTIL_A_SET_NW_DST |
+ OFPUTIL_A_SET_NW_TOS |
+ OFPUTIL_A_SET_TP_SRC |
+ OFPUTIL_A_SET_TP_DST |
+ OFPUTIL_A_ENQUEUE);
}
static void
hmap_init(&port->priorities);
port->realdev_ofp_port = 0;
port->vlandev_vid = 0;
+ port->carrier_seq = netdev_get_carrier_resets(port->up.netdev);
if (ofproto->sflow) {
dpif_sflow_add_port(ofproto->sflow, port_);
}
static void
-port_reconfigured(struct ofport *port_, ovs_be32 old_config)
+port_reconfigured(struct ofport *port_, enum ofputil_port_config old_config)
{
struct ofport_dpif *port = ofport_dpif_cast(port_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
- ovs_be32 changed = old_config ^ port->up.opp.config;
+ enum ofputil_port_config changed = old_config ^ port->up.pp.config;
- if (changed & htonl(OFPPC_NO_RECV | OFPPC_NO_RECV_STP |
- OFPPC_NO_FWD | OFPPC_NO_FLOOD)) {
+ if (changed & (OFPUTIL_PC_NO_RECV | OFPUTIL_PC_NO_RECV_STP |
+ OFPUTIL_PC_NO_FWD | OFPUTIL_PC_NO_FLOOD)) {
ofproto->need_revalidate = true;
- if (changed & htonl(OFPPC_NO_FLOOD) && port->bundle) {
+ if (changed & OFPUTIL_PC_NO_FLOOD && port->bundle) {
bundle_update(port->bundle);
}
}
/* Update state. */
if (ofport->stp_state != state) {
- ovs_be32 of_state;
+ enum ofputil_port_state of_state;
bool fwd_change;
VLOG_DBG_RL(&rl, "port %s: STP state changed from %s to %s",
}
/* Update the STP state bits in the OpenFlow port description. */
- of_state = (ofport->up.opp.state & htonl(~OFPPS_STP_MASK))
- | htonl(state == STP_LISTENING ? OFPPS_STP_LISTEN
- : state == STP_LEARNING ? OFPPS_STP_LEARN
- : state == STP_FORWARDING ? OFPPS_STP_FORWARD
- : state == STP_BLOCKING ? OFPPS_STP_BLOCK
- : 0);
+ of_state = ofport->up.pp.state & ~OFPUTIL_PS_STP_MASK;
+ of_state |= (state == STP_LISTENING ? OFPUTIL_PS_STP_LISTEN
+ : state == STP_LEARNING ? OFPUTIL_PS_STP_LEARN
+ : state == STP_FORWARDING ? OFPUTIL_PS_STP_FORWARD
+ : state == STP_BLOCKING ? OFPUTIL_PS_STP_BLOCK
+ : 0);
ofproto_port_set_state(&ofport->up, of_state);
}
}
bundle->floodable = true;
LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
- if (port->up.opp.config & htonl(OFPPC_NO_FLOOD)) {
+ if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
+ || !stp_forward_in_state(port->stp_state)) {
bundle->floodable = false;
break;
}
port->bundle = bundle;
list_push_back(&bundle->ports, &port->bundle_node);
- if (port->up.opp.config & htonl(OFPPC_NO_FLOOD)) {
+ if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
+ || !stp_forward_in_state(port->stp_state)) {
bundle->floodable = false;
}
}
static void
port_run(struct ofport_dpif *ofport)
{
+ long long int carrier_seq = netdev_get_carrier_resets(ofport->up.netdev);
+ bool carrier_changed = carrier_seq != ofport->carrier_seq;
bool enable = netdev_get_carrier(ofport->up.netdev);
+ ofport->carrier_seq = carrier_seq;
+
if (ofport->cfm) {
cfm_run(ofport->cfm);
struct ofpbuf packet;
ofpbuf_init(&packet, 0);
- cfm_compose_ccm(ofport->cfm, &packet, ofport->up.opp.hw_addr);
+ cfm_compose_ccm(ofport->cfm, &packet, ofport->up.pp.hw_addr);
send_packet(ofport, &packet);
ofpbuf_uninit(&packet);
}
if (ofport->bundle) {
enable = enable && lacp_slave_may_enable(ofport->bundle->lacp, ofport);
+ if (carrier_changed) {
+ lacp_slave_carrier_changed(ofport->bundle->lacp, ofport);
+ }
}
if (ofport->may_enable != enable) {
pin.packet = packet->data;
pin.packet_len = packet->size;
- pin.total_len = packet->size;
pin.reason = OFPR_NO_MATCH;
pin.controller_id = 0;
pin.table_id = 0;
pin.cookie = 0;
- pin.buffer_id = 0; /* not yet known */
pin.send_len = 0; /* not used for flow table misses */
flow_get_metadata(flow, &pin.fmd);
/* Registers aren't meaningful on a miss. */
memset(pin.fmd.reg_masks, 0, sizeof pin.fmd.reg_masks);
- connmgr_send_packet_in(ofproto->up.connmgr, &pin, flow);
+ connmgr_send_packet_in(ofproto->up.connmgr, &pin);
}
static bool
rule = rule_dpif_lookup(ofproto, flow, 0);
if (!rule) {
- /* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */
+ /* Don't send a packet-in if OFPUTIL_PC_NO_PACKET_IN asserted. */
struct ofport_dpif *port = get_ofp_port(ofproto, flow->in_port);
if (port) {
- if (port->up.opp.config & htonl(OFPPC_NO_PACKET_IN)) {
+ if (port->up.pp.config & OFPUTIL_PC_NO_PACKET_IN) {
COVERAGE_INC(ofproto_dpif_no_packet_in);
/* XXX install 'drop' flow entry */
return;
facet->tcp_flags |= stats->tcp_flags;
subfacet_update_time(subfacet, stats->used);
- facet_account(facet);
+ facet_account(facet, true);
facet_push_stats(facet);
} else {
if (!VLOG_DROP_WARN(&rl)) {
}
static void
-facet_account(struct facet *facet)
+facet_account(struct facet *facet, bool may_flow_mod)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
uint64_t n_bytes;
action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
facet->flow.vlan_tci,
facet->rule, facet->tcp_flags, NULL);
- ctx.may_learn = true;
+ ctx.may_learn_macs = true;
+ ctx.may_flow_mod = may_flow_mod;
ofpbuf_delete(xlate_actions(&ctx, facet->rule->up.actions,
facet->rule->up.n_actions));
}
}
facet_push_stats(facet);
- facet_account(facet);
+ facet_account(facet, false);
if (ofproto->netflow && !facet_is_controller_flow(facet)) {
struct ofexpired expired;
if (ofport) {
struct priority_to_dscp *pdscp;
- if (ofport->up.opp.config & htonl(OFPPC_NO_FWD)
+ if (ofport->up.pp.config & OFPUTIL_PC_NO_FWD
|| (check_stp && !stp_forward_in_state(ofport->stp_state))) {
return;
}
if (table_id > 0 && table_id < N_TABLES) {
struct table_dpif *table = &ofproto->tables[table_id];
if (table->other_table) {
- ctx->tags |= (rule
+ ctx->tags |= (rule && rule->tag
? rule->tag
: rule_calculate_tag(&ctx->flow,
&table->other_table->wc,
VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times",
MAX_RESUBMIT_RECURSION);
+ ctx->max_resubmit_trigger = true;
}
}
if (all) {
compose_output_action__(ctx, ofp_port, false);
- } else if (!(ofport->up.opp.config & htonl(OFPPC_NO_FLOOD))) {
+ } else if (!(ofport->up.pp.config & OFPUTIL_PC_NO_FLOOD)) {
compose_output_action(ctx, ofp_port);
}
}
eth_pop_vlan(packet);
eh = packet->l2;
- assert(eh->eth_type == ctx->flow.dl_type);
+
+ /* If the Ethernet type is less than ETH_TYPE_MIN, it's likely an 802.2
+ * LLC frame. Calculating the Ethernet type of these frames is more
+ * trouble than seems appropriate for a simple assertion. */
+ assert(ntohs(eh->eth_type) < ETH_TYPE_MIN
+ || eh->eth_type == ctx->flow.dl_type);
+
memcpy(eh->eth_src, ctx->flow.dl_src, sizeof eh->eth_src);
memcpy(eh->eth_dst, ctx->flow.dl_dst, sizeof eh->eth_dst);
pin.table_id = ctx->table_id;
pin.cookie = ctx->rule ? ctx->rule->up.flow_cookie : 0;
- pin.buffer_id = 0;
pin.send_len = len;
- pin.total_len = packet->size;
flow_get_metadata(&ctx->flow, &pin.fmd);
- connmgr_send_packet_in(ctx->ofproto->up.connmgr, &pin, &ctx->flow);
+ connmgr_send_packet_in(ctx->ofproto->up.connmgr, &pin);
ofpbuf_delete(packet);
}
static bool
may_receive(const struct ofport_dpif *port, struct action_xlate_ctx *ctx)
{
- if (port->up.opp.config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp)
- ? htonl(OFPPC_NO_RECV_STP)
- : htonl(OFPPC_NO_RECV))) {
+ if (port->up.pp.config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp)
+ ? OFPUTIL_PC_NO_RECV_STP
+ : OFPUTIL_PC_NO_RECV)) {
return false;
}
code = ofputil_decode_action_unsafe(ia);
switch (code) {
- case OFPUTIL_OFPAT_OUTPUT:
+ case OFPUTIL_OFPAT10_OUTPUT:
xlate_output_action(ctx, &ia->output);
break;
- case OFPUTIL_OFPAT_SET_VLAN_VID:
+ case OFPUTIL_OFPAT10_SET_VLAN_VID:
ctx->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
ctx->flow.vlan_tci |= ia->vlan_vid.vlan_vid | htons(VLAN_CFI);
break;
- case OFPUTIL_OFPAT_SET_VLAN_PCP:
+ case OFPUTIL_OFPAT10_SET_VLAN_PCP:
ctx->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
ctx->flow.vlan_tci |= htons(
(ia->vlan_pcp.vlan_pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
break;
- case OFPUTIL_OFPAT_STRIP_VLAN:
+ case OFPUTIL_OFPAT10_STRIP_VLAN:
ctx->flow.vlan_tci = htons(0);
break;
- case OFPUTIL_OFPAT_SET_DL_SRC:
+ case OFPUTIL_OFPAT10_SET_DL_SRC:
oada = ((struct ofp_action_dl_addr *) ia);
memcpy(ctx->flow.dl_src, oada->dl_addr, ETH_ADDR_LEN);
break;
- case OFPUTIL_OFPAT_SET_DL_DST:
+ case OFPUTIL_OFPAT10_SET_DL_DST:
oada = ((struct ofp_action_dl_addr *) ia);
memcpy(ctx->flow.dl_dst, oada->dl_addr, ETH_ADDR_LEN);
break;
- case OFPUTIL_OFPAT_SET_NW_SRC:
+ case OFPUTIL_OFPAT10_SET_NW_SRC:
ctx->flow.nw_src = ia->nw_addr.nw_addr;
break;
- case OFPUTIL_OFPAT_SET_NW_DST:
+ case OFPUTIL_OFPAT10_SET_NW_DST:
ctx->flow.nw_dst = ia->nw_addr.nw_addr;
break;
- case OFPUTIL_OFPAT_SET_NW_TOS:
+ case OFPUTIL_OFPAT10_SET_NW_TOS:
/* OpenFlow 1.0 only supports IPv4. */
if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
ctx->flow.nw_tos &= ~IP_DSCP_MASK;
}
break;
- case OFPUTIL_OFPAT_SET_TP_SRC:
+ case OFPUTIL_OFPAT10_SET_TP_SRC:
ctx->flow.tp_src = ia->tp_port.tp_port;
break;
- case OFPUTIL_OFPAT_SET_TP_DST:
+ case OFPUTIL_OFPAT10_SET_TP_DST:
ctx->flow.tp_dst = ia->tp_port.tp_port;
break;
- case OFPUTIL_OFPAT_ENQUEUE:
+ case OFPUTIL_OFPAT10_ENQUEUE:
xlate_enqueue_action(ctx, (const struct ofp_action_enqueue *) ia);
break;
case OFPUTIL_NXAST_LEARN:
ctx->has_learn = true;
- if (ctx->may_learn) {
+ if (ctx->may_flow_mod) {
xlate_learn_action(ctx, (const struct nx_action_learn *) ia);
}
break;
ctx->base_flow.vlan_tci = initial_tci;
ctx->rule = rule;
ctx->packet = packet;
- ctx->may_learn = packet != NULL;
+ ctx->may_learn_macs = packet != NULL;
+ ctx->may_flow_mod = packet != NULL;
ctx->tcp_flags = tcp_flags;
ctx->resubmit_hook = NULL;
}
ctx->nf_output_iface = NF_OUT_DROP;
ctx->mirrors = 0;
ctx->recurse = 0;
+ ctx->max_resubmit_trigger = false;
ctx->orig_skb_priority = ctx->flow.skb_priority;
ctx->table_id = 0;
ctx->exit = false;
ctx->may_set_up_flow = false;
return ctx->odp_actions;
} else {
+ static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1);
+ struct flow original_flow = ctx->flow;
+ ovs_be16 initial_tci = ctx->base_flow.vlan_tci;
+
add_sflow_action(ctx);
do_xlate_actions(in, n_in, ctx);
+ if (ctx->max_resubmit_trigger && !ctx->resubmit_hook
+ && !VLOG_DROP_ERR(&trace_rl)) {
+ struct ds ds = DS_EMPTY_INITIALIZER;
+
+ ofproto_trace(ctx->ofproto, &original_flow, ctx->packet,
+ initial_tci, &ds);
+ VLOG_ERR("Trace triggered by excessive resubmit recursion:\n%s",
+ ds_cstr(&ds));
+ ds_destroy(&ds);
+ }
+
if (!connmgr_may_set_up_flow(ctx->ofproto->up.connmgr, &ctx->flow,
ctx->odp_actions->data,
ctx->odp_actions->size)) {
}
/* Learn source MAC. */
- if (ctx->may_learn) {
+ if (ctx->may_learn_macs) {
update_learning_table(ctx->ofproto, &ctx->flow, vlan, in_bundle);
}
/* Calculates the tag to use for 'flow' and wildcards 'wc' when it is inserted
* into an OpenFlow table with the given 'basis'. */
-static uint32_t
+static tag_type
rule_calculate_tag(const struct flow *flow, const struct flow_wildcards *wc,
uint32_t secret)
{
ds_destroy(&ds);
}
-struct ofproto_trace {
+struct trace_ctx {
struct action_xlate_ctx ctx;
struct flow flow;
struct ds *result;
static void
trace_format_flow(struct ds *result, int level, const char *title,
- struct ofproto_trace *trace)
+ struct trace_ctx *trace)
{
ds_put_char_multiple(result, '\t', level);
ds_put_format(result, "%s: ", title);
static void
trace_format_regs(struct ds *result, int level, const char *title,
- struct ofproto_trace *trace)
+ struct trace_ctx *trace)
{
size_t i;
static void
trace_format_odp(struct ds *result, int level, const char *title,
- struct ofproto_trace *trace)
+ struct trace_ctx *trace)
{
struct ofpbuf *odp_actions = trace->ctx.odp_actions;
static void
trace_resubmit(struct action_xlate_ctx *ctx, struct rule_dpif *rule)
{
- struct ofproto_trace *trace = CONTAINER_OF(ctx, struct ofproto_trace, ctx);
+ struct trace_ctx *trace = CONTAINER_OF(ctx, struct trace_ctx, ctx);
struct ds *result = trace->result;
ds_put_char(result, '\n');
struct ofproto_dpif *ofproto;
struct ofpbuf odp_key;
struct ofpbuf *packet;
- struct rule_dpif *rule;
ovs_be16 initial_tci;
struct ds result;
struct flow flow;
goto exit;
}
- ds_put_cstr(&result, "Flow: ");
- flow_format(&result, &flow);
- ds_put_char(&result, '\n');
+ ofproto_trace(ofproto, &flow, packet, initial_tci, &result);
+ unixctl_command_reply(conn, ds_cstr(&result));
+
+exit:
+ ds_destroy(&result);
+ ofpbuf_delete(packet);
+ ofpbuf_uninit(&odp_key);
+}
+
+static void
+ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow,
+ const struct ofpbuf *packet, ovs_be16 initial_tci,
+ struct ds *ds)
+{
+ struct rule_dpif *rule;
- rule = rule_dpif_lookup(ofproto, &flow, 0);
- trace_format_rule(&result, 0, 0, rule);
+ ds_put_cstr(ds, "Flow: ");
+ flow_format(ds, flow);
+ ds_put_char(ds, '\n');
+
+ rule = rule_dpif_lookup(ofproto, flow, 0);
+ trace_format_rule(ds, 0, 0, rule);
if (rule) {
- struct ofproto_trace trace;
+ struct trace_ctx trace;
struct ofpbuf *odp_actions;
uint8_t tcp_flags;
- tcp_flags = packet ? packet_get_tcp_flags(packet, &flow) : 0;
- trace.result = &result;
- trace.flow = flow;
- action_xlate_ctx_init(&trace.ctx, ofproto, &flow, initial_tci,
+ tcp_flags = packet ? packet_get_tcp_flags(packet, flow) : 0;
+ trace.result = ds;
+ trace.flow = *flow;
+ action_xlate_ctx_init(&trace.ctx, ofproto, flow, initial_tci,
rule, tcp_flags, packet);
trace.ctx.resubmit_hook = trace_resubmit;
odp_actions = xlate_actions(&trace.ctx,
rule->up.actions, rule->up.n_actions);
- ds_put_char(&result, '\n');
- trace_format_flow(&result, 0, "Final flow", &trace);
- ds_put_cstr(&result, "Datapath actions: ");
- format_odp_actions(&result, odp_actions->data, odp_actions->size);
+ ds_put_char(ds, '\n');
+ trace_format_flow(ds, 0, "Final flow", &trace);
+ ds_put_cstr(ds, "Datapath actions: ");
+ format_odp_actions(ds, odp_actions->data, odp_actions->size);
ofpbuf_delete(odp_actions);
if (!trace.ctx.may_set_up_flow) {
if (packet) {
- ds_put_cstr(&result, "\nThis flow is not cachable.");
+ ds_put_cstr(ds, "\nThis flow is not cachable.");
} else {
- ds_put_cstr(&result, "\nThe datapath actions are incomplete--"
+ ds_put_cstr(ds, "\nThe datapath actions are incomplete--"
"for complete actions, please supply a packet.");
}
}
}
-
- unixctl_command_reply(conn, ds_cstr(&result));
-
-exit:
- ds_destroy(&result);
- ofpbuf_delete(packet);
- ofpbuf_uninit(&odp_key);
}
static void
return hash_2words(realdev_ofp_port, vid);
}
+/* Returns the ODP port number of the Linux VLAN device that corresponds to
+ * 'vlan_tci' on the network device with port number 'realdev_odp_port' in
+ * 'ofproto'. For example, given 'realdev_odp_port' of eth0 and 'vlan_tci' 9,
+ * it would return the port number of eth0.9.
+ *
+ * Unless VLAN splinters are enabled for port 'realdev_odp_port', this
+ * function just returns its 'realdev_odp_port' argument. */
static uint32_t
vsp_realdev_to_vlandev(const struct ofproto_dpif *ofproto,
uint32_t realdev_odp_port, ovs_be16 vlan_tci)
return NULL;
}
+/* Returns the OpenFlow port number of the "real" device underlying the Linux
+ * VLAN device with OpenFlow port number 'vlandev_ofp_port' and stores the
+ * VLAN VID of the Linux VLAN device in '*vid'. For example, given
+ * 'vlandev_ofp_port' of eth0.9, it would return the OpenFlow port number of
+ * eth0 and store 9 in '*vid'.
+ *
+ * Returns 0 and does not modify '*vid' if 'vlandev_ofp_port' is not a Linux
+ * VLAN device. Unless VLAN splinters are enabled, this is what this function
+ * always does.*/
static uint16_t
vsp_vlandev_to_realdev(const struct ofproto_dpif *ofproto,
- uint16_t vlandev_ofp_port, int *vid)
+ uint16_t vlandev_ofp_port, int *vid)
{
if (!hmap_is_empty(&ofproto->vlandev_map)) {
const struct vlan_splinter *vsp;