#include "mac-learning.h"
#include "meta-flow.h"
#include "multipath.h"
+#include "netdev-vport.h"
#include "netdev.h"
#include "netlink.h"
#include "nx-match.h"
static void xlate_actions_for_side_effects(struct action_xlate_ctx *,
const struct ofpact *ofpacts,
size_t ofpacts_len);
+static void xlate_table_action(struct action_xlate_ctx *, uint16_t in_port,
+ uint8_t table_id, bool may_packet_in);
static size_t put_userspace_action(const struct ofproto_dpif *,
struct ofpbuf *odp_actions,
struct dpif *dpif;
struct timer next_expiration;
struct hmap odp_to_ofport_map; /* ODP port to ofport mapping. */
+
+ /* Facet revalidation flags applying to facets which use this backer. */
+ enum revalidate_reason need_revalidate; /* Revalidate every facet. */
+ struct tag_set revalidate_set; /* Revalidate only matching facets. */
};
/* All existing ofproto_backer instances, indexed by ofproto->up.type. */
/* Revalidation. */
struct table_dpif tables[N_TABLES];
- enum revalidate_reason need_revalidate;
- struct tag_set revalidate_set;
/* Support for debugging async flow mods. */
struct list completions;
struct hmap vlandev_map; /* vlandev -> (realdev,vid). */
/* Ports. */
- struct sset ports; /* Set of port names. */
+ struct sset ports; /* Set of standard port names. */
+ struct sset ghost_ports; /* Ports with no datapath port. */
struct sset port_poll_set; /* Queued names for port_poll() reply. */
int port_poll_errno; /* Last errno for port_poll() reply. */
};
dpif_run(backer->dpif);
+ if (backer->need_revalidate
+ || !tag_set_is_empty(&backer->revalidate_set)) {
+ struct tag_set revalidate_set = backer->revalidate_set;
+ bool need_revalidate = backer->need_revalidate;
+ struct ofproto_dpif *ofproto;
+
+ switch (backer->need_revalidate) {
+ case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break;
+ case REV_STP: COVERAGE_INC(rev_stp); break;
+ case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break;
+ case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break;
+ case REV_INCONSISTENCY: COVERAGE_INC(rev_inconsistency); break;
+ }
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ struct facet *facet;
+
+ if (ofproto->backer != backer) {
+ continue;
+ }
+
+ /* Clear the revalidation flags. */
+ tag_set_init(&backer->revalidate_set);
+ backer->need_revalidate = 0;
+
+ HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
+ if (need_revalidate
+ || tag_set_intersects(&revalidate_set, facet->tags)) {
+ facet_revalidate(facet);
+ }
+ }
+ }
+
+ }
+
if (timer_expired(&backer->next_expiration)) {
int delay = expire(backer);
timer_set_duration(&backer->next_expiration, delay);
if (error) {
VLOG_ERR("failed to open datapath of type %s: %s", type,
strerror(error));
+ free(backer);
return error;
}
backer->refcount = 1;
hmap_init(&backer->odp_to_ofport_map);
timer_set_duration(&backer->next_expiration, 1000);
+ backer->need_revalidate = 0;
+ tag_set_init(&backer->revalidate_set);
*backerp = backer;
dpif_flow_flush(backer->dpif);
table->other_table = NULL;
table->basis = random_uint32();
}
- ofproto->need_revalidate = 0;
- tag_set_init(&ofproto->revalidate_set);
list_init(&ofproto->completions);
hmap_init(&ofproto->realdev_vid_map);
sset_init(&ofproto->ports);
+ sset_init(&ofproto->ghost_ports);
sset_init(&ofproto->port_poll_set);
ofproto->port_poll_errno = 0;
hmap_destroy(&ofproto->realdev_vid_map);
sset_destroy(&ofproto->ports);
+ sset_destroy(&ofproto->ghost_ports);
sset_destroy(&ofproto->port_poll_set);
close_dpif_backer(ofproto->backer);
}
stp_run(ofproto);
- mac_learning_run(ofproto->ml, &ofproto->revalidate_set);
-
- /* Now revalidate if there's anything to do. */
- if (ofproto->need_revalidate
- || !tag_set_is_empty(&ofproto->revalidate_set)) {
- struct tag_set revalidate_set = ofproto->revalidate_set;
- bool revalidate_all = ofproto->need_revalidate;
- struct facet *facet;
-
- switch (ofproto->need_revalidate) {
- case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break;
- case REV_STP: COVERAGE_INC(rev_stp); break;
- case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break;
- case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break;
- case REV_INCONSISTENCY: COVERAGE_INC(rev_inconsistency); break;
- }
-
- /* Clear the revalidation flags. */
- tag_set_init(&ofproto->revalidate_set);
- ofproto->need_revalidate = 0;
-
- HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
- if (revalidate_all
- || tag_set_intersects(&revalidate_set, facet->tags)) {
- facet_revalidate(facet);
- }
- }
- }
+ mac_learning_run(ofproto->ml, &ofproto->backer->revalidate_set);
/* Check the consistency of a random facet, to aid debugging. */
- if (!hmap_is_empty(&ofproto->facets) && !ofproto->need_revalidate) {
+ if (!hmap_is_empty(&ofproto->facets)
+ && !ofproto->backer->need_revalidate) {
struct facet *facet;
facet = CONTAINER_OF(hmap_random_node(&ofproto->facets),
struct facet, hmap_node);
- if (!tag_set_intersects(&ofproto->revalidate_set, facet->tags)) {
+ if (!tag_set_intersects(&ofproto->backer->revalidate_set,
+ facet->tags)) {
if (!facet_check_consistency(facet)) {
- ofproto->need_revalidate = REV_INCONSISTENCY;
+ ofproto->backer->need_revalidate = REV_INCONSISTENCY;
}
}
}
if (ofproto->sflow) {
dpif_sflow_wait(ofproto->sflow);
}
- if (!tag_set_is_empty(&ofproto->revalidate_set)) {
+ if (!tag_set_is_empty(&ofproto->backer->revalidate_set)) {
poll_immediate_wake();
}
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
}
mac_learning_wait(ofproto->ml);
stp_wait(ofproto);
- if (ofproto->need_revalidate) {
+ if (ofproto->backer->need_revalidate) {
/* Shouldn't happen, but if it does just go around again. */
VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
poll_immediate_wake();
struct dpif_port dpif_port;
int error;
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
port->bundle = NULL;
port->cfm = NULL;
port->tag = tag_create_random();
port->vlandev_vid = 0;
port->carrier_seq = netdev_get_carrier_resets(port->up.netdev);
+ if (netdev_vport_is_patch(port->up.netdev)) {
+ /* XXX By bailing out here, we don't do required sFlow work. */
+ port->odp_port = OVSP_NONE;
+ return 0;
+ }
+
error = dpif_port_query_by_name(ofproto->backer->dpif,
netdev_get_name(port->up.netdev),
&dpif_port);
dpif_port_del(ofproto->backer->dpif, port->odp_port);
}
+ if (port->odp_port != OVSP_NONE) {
+ hmap_remove(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node);
+ }
+
sset_find_and_delete(&ofproto->ports, devname);
- hmap_remove(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node);
- ofproto->need_revalidate = REV_RECONFIGURE;
+ sset_find_and_delete(&ofproto->ghost_ports, devname);
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
bundle_remove(port_);
set_cfm(port_, NULL);
if (ofproto->sflow) {
if (changed & (OFPUTIL_PC_NO_RECV | OFPUTIL_PC_NO_RECV_STP |
OFPUTIL_PC_NO_FWD | OFPUTIL_PC_NO_FLOOD |
OFPUTIL_PC_NO_PACKET_IN)) {
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
if (changed & OFPUTIL_PC_NO_FLOOD && port->bundle) {
bundle_update(port->bundle);
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
dpif_sflow_add_port(ds, &ofport->up, ofport->odp_port);
}
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
dpif_sflow_set_options(ds, sflow_options);
} else {
if (ds) {
dpif_sflow_destroy(ds);
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
ofproto->sflow = NULL;
}
}
struct ofproto_dpif *ofproto;
ofproto = ofproto_dpif_cast(ofport->up.ofproto);
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
ofport->cfm = cfm_create(netdev_get_name(ofport->up.netdev));
}
/* Only revalidate flows if the configuration changed. */
if (!s != !ofproto->stp) {
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
if (s) {
if (stp_learn_in_state(ofport->stp_state)
!= stp_learn_in_state(state)) {
/* xxx Learning action flows should also be flushed. */
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ mac_learning_flush(ofproto->ml,
+ &ofproto->backer->revalidate_set);
}
fwd_change = stp_forward_in_state(ofport->stp_state)
!= stp_forward_in_state(state);
- ofproto->need_revalidate = REV_STP;
+ ofproto->backer->need_revalidate = REV_STP;
ofport->stp_state = state;
ofport->stp_state_entered = time_msec();
}
if (stp_check_and_reset_fdb_flush(ofproto->stp)) {
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
}
}
}
pdscp = xmalloc(sizeof *pdscp);
pdscp->priority = priority;
pdscp->dscp = dscp;
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
if (pdscp->dscp != dscp) {
pdscp->dscp = dscp;
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
hmap_insert(&new, &pdscp->hmap_node, hash_int(pdscp->priority, 0));
if (!hmap_is_empty(&ofport->priorities)) {
ofport_clear_priorities(ofport);
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
hmap_swap(&new, &ofport->priorities);
struct mac_learning *ml = ofproto->ml;
struct mac_entry *mac, *next_mac;
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
if (mac->port.p == bundle) {
if (all_ofprotos) {
e = mac_learning_lookup(o->ml, mac->mac, mac->vlan,
NULL);
if (e) {
- tag_set_add(&o->revalidate_set, e->tag);
mac_learning_expire(o->ml, e);
}
}
{
struct ofbundle *bundle = port->bundle;
- bundle->ofproto->need_revalidate = REV_RECONFIGURE;
+ bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
list_remove(&port->bundle_node);
port->bundle = NULL;
}
if (port->bundle != bundle) {
- bundle->ofproto->need_revalidate = REV_RECONFIGURE;
+ bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
if (port->bundle) {
bundle_del_port(port);
}
}
}
if (lacp) {
- port->bundle->ofproto->need_revalidate = REV_RECONFIGURE;
+ bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
lacp_slave_register(bundle->lacp, port, lacp);
}
mirror_destroy(m);
} else if (hmapx_find_and_delete(&m->srcs, bundle)
|| hmapx_find_and_delete(&m->dsts, bundle)) {
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
}
}
/* LACP. */
if (s->lacp) {
if (!bundle->lacp) {
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
bundle->lacp = lacp_create();
}
lacp_configure(bundle->lacp, s->lacp);
bundle->ofproto->has_bonded_bundles = true;
if (bundle->bond) {
if (bond_reconfigure(bundle->bond, s->bond)) {
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
} else {
bundle->bond = bond_create(s->bond);
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
bond_slave_set_may_enable(bundle->bond, port, port->may_enable);
}
- bond_run(bundle->bond, &bundle->ofproto->revalidate_set,
+ bond_run(bundle->bond, &bundle->ofproto->backer->revalidate_set,
lacp_status(bundle->lacp));
if (bond_should_send_learning_packets(bundle->bond)) {
bundle_send_learning_packets(bundle);
}
}
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
ofproto->has_mirrors = true;
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ mac_learning_flush(ofproto->ml,
+ &ofproto->backer->revalidate_set);
mirror_update_dups(ofproto);
return 0;
}
ofproto = mirror->ofproto;
- ofproto->need_revalidate = REV_RECONFIGURE;
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
mirror_bit = MIRROR_MASK_C(1) << mirror->idx;
HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
if (mac_learning_set_flood_vlans(ofproto->ml, flood_vlans)) {
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
}
return 0;
}
forward_bpdu_changed(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
static void
ofproto_port->ofp_port = odp_port_to_ofp_port(ofproto, dpif_port->port_no);
}
+static struct ofport_dpif *
+ofport_get_peer(const struct ofport_dpif *ofport_dpif)
+{
+ const struct ofproto_dpif *ofproto;
+ const char *peer;
+
+ peer = netdev_vport_patch_peer(ofport_dpif->up.netdev);
+ if (!peer) {
+ return NULL;
+ }
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ struct ofport *ofport;
+
+ ofport = shash_find_data(&ofproto->up.port_by_name, peer);
+ if (ofport && ofport->ofproto->ofproto_class == &ofproto_dpif_class) {
+ return ofport_dpif_cast(ofport);
+ }
+ }
+ return NULL;
+}
+
static void
port_run_fast(struct ofport_dpif *ofport)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
if (ofproto->has_bundle_action) {
- ofproto->need_revalidate = REV_PORT_TOGGLED;
+ ofproto->backer->need_revalidate = REV_PORT_TOGGLED;
}
}
struct dpif_port dpif_port;
int error;
+ if (sset_contains(&ofproto->ghost_ports, devname)) {
+ const char *type = netdev_get_type_from_name(devname);
+
+ /* We may be called before ofproto->up.port_by_name is populated with
+ * the appropriate ofport. For this reason, we must get the name and
+ * type from the netdev layer directly. */
+ if (type) {
+ const struct ofport *ofport;
+
+ ofport = shash_find_data(&ofproto->up.port_by_name, devname);
+ ofproto_port->ofp_port = ofport ? ofport->ofp_port : OFPP_NONE;
+ ofproto_port->name = xstrdup(devname);
+ ofproto_port->type = xstrdup(type);
+ return 0;
+ }
+ return ENODEV;
+ }
+
if (!sset_contains(&ofproto->ports, devname)) {
return ENODEV;
}
uint32_t odp_port = UINT32_MAX;
int error;
+ if (netdev_vport_is_patch(netdev)) {
+ sset_add(&ofproto->ghost_ports, netdev_get_name(netdev));
+ return 0;
+ }
+
error = dpif_port_add(ofproto->backer->dpif, netdev, &odp_port);
if (!error) {
sset_add(&ofproto->ports, netdev_get_name(netdev));
struct port_dump_state {
uint32_t bucket;
uint32_t offset;
+ bool ghost;
};
static int
port_dump_start(const struct ofproto *ofproto_ OVS_UNUSED, void **statep)
{
- struct port_dump_state *state;
-
- *statep = state = xmalloc(sizeof *state);
- state->bucket = 0;
- state->offset = 0;
+ *statep = xzalloc(sizeof(struct port_dump_state));
return 0;
}
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct port_dump_state *state = state_;
+ const struct sset *sset;
struct sset_node *node;
- while ((node = sset_at_position(&ofproto->ports, &state->bucket,
- &state->offset))) {
+ sset = state->ghost ? &ofproto->ghost_ports : &ofproto->ports;
+ while ((node = sset_at_position(sset, &state->bucket, &state->offset))) {
int error;
error = port_query_by_name(ofproto_, node->name, port);
}
}
+ if (!state->ghost) {
+ state->ghost = true;
+ state->bucket = 0;
+ state->offset = 0;
+ return port_dump_next(ofproto_, state_, port);
+ }
+
return EOF;
}
update_stats(backer);
HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
- struct rule_dpif *rule, *next_rule;
- struct oftable *table;
+ struct rule *rule, *next_rule;
int dp_max_idle;
if (ofproto->backer != backer) {
/* Expire OpenFlow flows whose idle_timeout or hard_timeout
* has passed. */
- OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
- struct cls_cursor cursor;
-
- cls_cursor_init(&cursor, &table->cls, NULL);
- CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
- rule_expire(rule);
- }
+ LIST_FOR_EACH_SAFE (rule, next_rule, expirable,
+ &ofproto->up.expirable) {
+ rule_expire(rule_dpif_cast(rule));
}
/* All outstanding data in existing flows has been accounted, so it's a
HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
if (bundle->bond) {
- bond_rebalance(bundle->bond, &ofproto->revalidate_set);
+ bond_rebalance(bundle->bond, &backer->revalidate_set);
}
}
}
}
/* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
- * 'packet', which arrived on 'in_port'.
- *
- * Takes ownership of 'packet'. */
+ * 'packet', which arrived on 'in_port'. */
static bool
execute_odp_actions(struct ofproto_dpif *ofproto, const struct flow *flow,
const struct nlattr *odp_actions, size_t actions_len,
error = dpif_execute(ofproto->backer->dpif, key.data, key.size,
odp_actions, actions_len, packet);
-
- ofpbuf_delete(packet);
return !error;
}
facet = facet_find(ofproto, flow, hash);
if (facet
- && (ofproto->need_revalidate
- || tag_set_intersects(&ofproto->revalidate_set, facet->tags))) {
+ && (ofproto->backer->need_revalidate
+ || tag_set_intersects(&ofproto->backer->revalidate_set,
+ facet->tags))) {
facet_revalidate(facet);
}
}
}
-static enum ofperr
-rule_execute(struct rule *rule_, const struct flow *flow,
- struct ofpbuf *packet)
+static void
+rule_dpif_execute(struct rule_dpif *rule, const struct flow *flow,
+ struct ofpbuf *packet)
{
- struct rule_dpif *rule = rule_dpif_cast(rule_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
struct dpif_flow_stats stats;
odp_actions.size, packet);
ofpbuf_uninit(&odp_actions);
+}
+static enum ofperr
+rule_execute(struct rule *rule, const struct flow *flow,
+ struct ofpbuf *packet)
+{
+ rule_dpif_execute(rule_dpif_cast(rule), flow, packet);
+ ofpbuf_delete(packet);
return 0;
}
int error;
flow_extract(packet, 0, 0, NULL, OFPP_LOCAL, &flow);
+ if (netdev_vport_is_patch(ofport->up.netdev)) {
+ struct ofproto_dpif *peer_ofproto;
+ struct dpif_flow_stats stats;
+ struct ofport_dpif *peer;
+ struct rule_dpif *rule;
+
+ peer = ofport_get_peer(ofport);
+ if (!peer) {
+ return ENODEV;
+ }
+
+ dpif_flow_stats_extract(&flow, packet, time_msec(), &stats);
+ netdev_vport_patch_inc_tx(ofport->up.netdev, &stats);
+ netdev_vport_patch_inc_rx(peer->up.netdev, &stats);
+
+ flow.in_port = peer->up.ofp_port;
+ peer_ofproto = ofproto_dpif_cast(peer->up.ofproto);
+ rule = rule_dpif_lookup(peer_ofproto, &flow);
+ rule_dpif_execute(rule, &flow, packet);
+
+ return 0;
+ }
+
odp_port = vsp_realdev_to_vlandev(ofproto, ofport->odp_port,
flow.vlan_tci);
if (odp_port != ofport->odp_port) {
bool check_stp)
{
const struct ofport_dpif *ofport = get_ofp_port(ctx->ofproto, ofp_port);
- uint32_t odp_port = ofp_port_to_odp_port(ctx->ofproto, ofp_port);
ovs_be16 flow_vlan_tci = ctx->flow.vlan_tci;
uint8_t flow_nw_tos = ctx->flow.nw_tos;
struct priority_to_dscp *pdscp;
- uint32_t out_port;
+ uint32_t out_port, odp_port;
+
+ /* If 'struct flow' gets additional metadata, we'll need to zero it out
+ * before traversing a patch port. */
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 18);
if (!ofport) {
xlate_report(ctx, "Nonexistent output port");
return;
}
+ if (netdev_vport_is_patch(ofport->up.netdev)) {
+ struct ofport_dpif *peer = ofport_get_peer(ofport);
+ struct flow old_flow = ctx->flow;
+ const struct ofproto_dpif *peer_ofproto;
+
+ if (!peer) {
+ xlate_report(ctx, "Nonexistent patch port peer");
+ return;
+ }
+
+ peer_ofproto = ofproto_dpif_cast(peer->up.ofproto);
+ if (peer_ofproto->backer != ctx->ofproto->backer) {
+ xlate_report(ctx, "Patch port peer on a different datapath");
+ return;
+ }
+
+ ctx->ofproto = ofproto_dpif_cast(peer->up.ofproto);
+ ctx->flow.in_port = peer->up.ofp_port;
+ ctx->flow.metadata = htonll(0);
+ memset(&ctx->flow.tunnel, 0, sizeof ctx->flow.tunnel);
+ memset(ctx->flow.regs, 0, sizeof ctx->flow.regs);
+ xlate_table_action(ctx, ctx->flow.in_port, 0, true);
+ ctx->flow = old_flow;
+ ctx->ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+
+ if (ctx->resubmit_stats) {
+ netdev_vport_patch_inc_tx(ofport->up.netdev, ctx->resubmit_stats);
+ netdev_vport_patch_inc_rx(peer->up.netdev, ctx->resubmit_stats);
+ }
+
+ return;
+ }
+
pdscp = get_priority(ofport, ctx->flow.skb_priority);
if (pdscp) {
ctx->flow.nw_tos &= ~IP_DSCP_MASK;
ctx->flow.nw_tos |= pdscp->dscp;
}
+ odp_port = ofp_port_to_odp_port(ctx->ofproto, ofp_port);
out_port = vsp_realdev_to_vlandev(ctx->ofproto, odp_port,
ctx->flow.vlan_tci);
if (out_port != odp_port) {
in_bundle->name, vlan);
mac->port.p = in_bundle;
- tag_set_add(&ofproto->revalidate_set,
+ tag_set_add(&ofproto->backer->revalidate_set,
mac_learning_changed(ofproto->ml, mac));
}
}
if (table->catchall_table != catchall || table->other_table != other) {
table->catchall_table = catchall;
table->other_table = other;
- ofproto->need_revalidate = REV_FLOW_TABLE;
+ ofproto->backer->need_revalidate = REV_FLOW_TABLE;
}
}
table_update_taggable(ofproto, rule->up.table_id);
- if (!ofproto->need_revalidate) {
+ if (!ofproto->backer->need_revalidate) {
struct table_dpif *table = &ofproto->tables[rule->up.table_id];
if (table->other_table && rule->tag) {
- tag_set_add(&ofproto->revalidate_set, rule->tag);
+ tag_set_add(&ofproto->backer->revalidate_set, rule->tag);
} else {
- ofproto->need_revalidate = REV_FLOW_TABLE;
+ ofproto->backer->need_revalidate = REV_FLOW_TABLE;
}
}
}
enum ofp_config_flags frag_handling)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
-
if (frag_handling != OFPC_FRAG_REASM) {
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
return true;
} else {
return false;
unixctl_command_reply_error(conn, "no such bridge");
return;
}
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
} else {
HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
}
}
}
}
if (errors) {
- ofproto->need_revalidate = REV_INCONSISTENCY;
+ ofproto->backer->need_revalidate = REV_INCONSISTENCY;
}
if (errors) {
struct ofport *ofport = node->data;
const char *name = netdev_get_name(ofport->netdev);
const char *type = netdev_get_type(ofport->netdev);
+ uint32_t odp_port;
+
+ ds_put_format(ds, "\t%s %u/", name, ofport->ofp_port);
+
+ odp_port = ofp_port_to_odp_port(ofproto, ofport->ofp_port);
+ if (odp_port != OVSP_NONE) {
+ ds_put_format(ds, "%"PRIu32":", odp_port);
+ } else {
+ ds_put_cstr(ds, "none:");
+ }
- ds_put_format(ds, "\t%s %u/%u:", name, ofport->ofp_port,
- ofp_port_to_odp_port(ofproto, ofport->ofp_port));
if (strcmp(type, "system")) {
struct netdev *netdev;
int error;
return;
}
+ update_stats(ofproto->backer);
+
HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->subfacets) {
struct odputil_keybuf keybuf;
struct ofpbuf key;
return 0;
}
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
if (ofport->realdev_ofp_port) {
vsp_remove(ofport);