ofproto_dpif_unixctl_init();
ofproto->has_mirrors = false;
- ofproto->has_bundle_action = false;
-
hmap_init(&ofproto->vlandev_map);
hmap_init(&ofproto->realdev_vid_map);
}
netflow_destroy(ofproto->netflow);
- dpif_sflow_destroy(ofproto->sflow);
+ dpif_sflow_unref(ofproto->sflow);
hmap_destroy(&ofproto->bundles);
- mac_learning_destroy(ofproto->ml);
+ mac_learning_unref(ofproto->ml);
classifier_destroy(&ofproto->facets);
}
hmap_insert(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node,
- hash_int(odp_to_u32(port->odp_port), 0));
+ hash_odp_port(port->odp_port));
}
dpif_port_destroy(&dpif_port);
char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
const char *dp_port_name;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+
dp_port_name = netdev_vport_get_dpif_port(port->up.netdev, namebuf,
sizeof namebuf);
if (dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
if (!port->tnl_port) {
dpif_port_del(ofproto->backer->dpif, port->odp_port);
}
- ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
if (port->peer) {
tnl_port_del(port->tnl_port);
sset_find_and_delete(&ofproto->ports, devname);
sset_find_and_delete(&ofproto->ghost_ports, devname);
- ofproto->backer->need_revalidate = REV_RECONFIGURE;
bundle_remove(port_);
set_cfm(port_, NULL);
set_bfd(port_, NULL);
if (port->tnl_port && tnl_port_reconfigure(&port->up, port->odp_port,
&port->tnl_port)) {
- ofproto_dpif_cast(port->up.ofproto)->backer->need_revalidate = true;
+ ofproto_dpif_cast(port->up.ofproto)->backer->need_revalidate =
+ REV_RECONFIGURE;
}
ofport_update_peer(port);
dpif_sflow_set_options(ds, sflow_options);
} else {
if (ds) {
- dpif_sflow_destroy(ds);
+ dpif_sflow_unref(ds);
ofproto->backer->need_revalidate = REV_RECONFIGURE;
ofproto->sflow = NULL;
}
n_flow_exporters_options);
} else {
if (di) {
- dpif_ipfix_destroy(di);
+ dpif_ipfix_unref(di);
ofproto->ipfix = NULL;
}
}
error = EINVAL;
}
- cfm_destroy(ofport->cfm);
+ cfm_unref(ofport->cfm);
ofport->cfm = NULL;
return error;
}
hmap_remove(&ofproto->bundles, &bundle->hmap_node);
free(bundle->name);
free(bundle->trunks);
- lacp_destroy(bundle->lacp);
- bond_destroy(bundle->bond);
+ lacp_unref(bundle->lacp);
+ bond_unref(bundle->bond);
free(bundle);
}
}
lacp_configure(bundle->lacp, s->lacp);
} else {
- lacp_destroy(bundle->lacp);
+ lacp_unref(bundle->lacp);
bundle->lacp = NULL;
}
bond_slave_register(bundle->bond, port, port->up.netdev);
}
} else {
- bond_destroy(bundle->bond);
+ bond_unref(bundle->bond);
bundle->bond = NULL;
}
if (list_is_empty(&bundle->ports)) {
bundle_destroy(bundle);
} else if (list_is_short(&bundle->ports)) {
- bond_destroy(bundle->bond);
+ bond_unref(bundle->bond);
bundle->bond = NULL;
}
}
}
backer = ofproto_dpif_cast(ofport->up.ofproto)->backer;
- backer->need_revalidate = true;
+ backer->need_revalidate = REV_RECONFIGURE;
if (ofport->peer) {
ofport->peer->peer = NULL;
if (ofport->may_enable != enable) {
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
-
- if (ofproto->has_bundle_action) {
- ofproto->backer->need_revalidate = REV_PORT_TOGGLED;
- }
+ ofproto->backer->need_revalidate = REV_PORT_TOGGLED;
}
ofport->may_enable = enable;
struct xlate_out xout;
bool xout_garbage; /* 'xout' needs to be uninitialized? */
+ struct ofpbuf mask; /* Flow mask for "put" ops. */
+ struct odputil_keybuf maskbuf;
+
/* If this is a "put" op, then a pointer to the subfacet that should
* be marked as uninstalled if the operation fails. */
struct subfacet *subfacet;
op->dpif_op.u.execute.key = miss->key;
op->dpif_op.u.execute.key_len = miss->key_len;
op->dpif_op.u.execute.packet = packet;
+ ofpbuf_use_stack(&op->mask, &op->maskbuf, sizeof op->maskbuf);
}
/* Helper for handle_flow_miss_without_facet() and
struct dpif_backer *backer = miss->ofproto->backer;
uint32_t hash;
+ switch (flow_miss_model) {
+ case OFPROTO_HANDLE_MISS_AUTO:
+ break;
+ case OFPROTO_HANDLE_MISS_WITH_FACETS:
+ return true;
+ case OFPROTO_HANDLE_MISS_WITHOUT_FACETS:
+ return false;
+ }
+
if (!backer->governor) {
size_t n_subfacets;
subfacet->path = want_path;
+ ofpbuf_use_stack(&op->mask, &op->maskbuf, sizeof op->maskbuf);
+ odp_flow_key_from_mask(&op->mask, &facet->xout.wc.masks,
+ &miss->flow, UINT32_MAX);
+
op->xout_garbage = false;
op->dpif_op.type = DPIF_OP_FLOW_PUT;
op->subfacet = subfacet;
put->flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
put->key = miss->key;
put->key_len = miss->key_len;
- put->mask = NULL;
- put->mask_len = 0;
+ put->mask = op->mask.data;
+ put->mask_len = op->mask.size;
+
if (want_path == SF_FAST_PATH) {
put->actions = facet->xout.odp_actions.data;
put->actions_len = facet->xout.odp_actions.size;
{
const struct dpif_flow_stats *stats;
struct dpif_flow_dump dump;
- const struct nlattr *key;
- size_t key_len;
+ const struct nlattr *key, *mask;
+ size_t key_len, mask_len;
dpif_flow_dump_start(&dump, backer->dpif);
while (dpif_flow_dump_next(&dump, &key, &key_len,
- NULL, NULL, NULL, NULL, &stats)) {
+ &mask, &mask_len, NULL, NULL, &stats)) {
struct subfacet *subfacet;
uint32_t key_hash;
enum subfacet_path path = facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH;
const struct nlattr *actions = odp_actions->data;
size_t actions_len = odp_actions->size;
+ struct odputil_keybuf maskbuf;
+ struct ofpbuf mask;
uint64_t slow_path_stub[128 / 8];
enum dpif_flow_put_flags flags;
&actions, &actions_len);
}
- ret = dpif_flow_put(ofproto->backer->dpif, flags, subfacet->key,
- subfacet->key_len, NULL, 0,
+ ofpbuf_use_stack(&mask, &maskbuf, sizeof maskbuf);
+ odp_flow_key_from_mask(&mask, &facet->xout.wc.masks,
+ &facet->flow, UINT32_MAX);
+
+ ret = dpif_flow_put(subfacet->backer->dpif, flags, subfacet->key,
+ subfacet->key_len, mask.data, mask.size,
actions, actions_len, stats);
if (stats) {
}
if (wc) {
+ memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
}
}
}
+tag_type
+calculate_flow_tag(struct ofproto_dpif *ofproto, const struct flow *flow,
+ uint8_t table_id, struct rule_dpif *rule)
+{
+ if (table_id > 0 && table_id < N_TABLES) {
+ struct table_dpif *table = &ofproto->tables[table_id];
+ if (table->other_table) {
+ return (rule && rule->tag
+ ? rule->tag
+ : rule_calculate_tag(flow, &table->other_table->mask,
+ table->basis));
+ }
+ }
+
+ return 0;
+}
\f
/* Optimized flow revalidation.
*
ds_put_cstr(&ds, " port VLAN MAC Age\n");
LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
struct ofbundle *bundle = e->port.p;
- ds_put_format(&ds, "%5d %4d "ETH_ADDR_FMT" %3d\n",
- ofbundle_get_a_port(bundle)->odp_port,
- e->vlan, ETH_ADDR_ARGS(e->mac),
+ char name[OFP_MAX_PORT_NAME_LEN];
+
+ ofputil_port_to_string(ofbundle_get_a_port(bundle)->up.ofp_port,
+ name, sizeof name);
+ ds_put_format(&ds, "%5s %4d "ETH_ADDR_FMT" %3d\n",
+ name, e->vlan, ETH_ADDR_ARGS(e->mac),
mac_entry_age(ofproto->ml, e));
}
unixctl_command_reply(conn, ds_cstr(&ds));
struct vlan_splinter *vsp;
HMAP_FOR_EACH_WITH_HASH (vsp, vlandev_node,
- hash_int(ofp_to_u16(vlandev_ofp_port), 0),
+ hash_ofp_port(vlandev_ofp_port),
&ofproto->vlandev_map) {
if (vsp->vlandev_ofp_port == vlandev_ofp_port) {
return vsp;
vsp = xmalloc(sizeof *vsp);
hmap_insert(&ofproto->vlandev_map, &vsp->vlandev_node,
- hash_int(ofp_to_u16(port->up.ofp_port), 0));
+ hash_ofp_port(port->up.ofp_port));
hmap_insert(&ofproto->realdev_vid_map, &vsp->realdev_vid_node,
hash_realdev_vid(realdev_ofp_port, vid));
vsp->realdev_ofp_port = realdev_ofp_port;
{
struct ofport_dpif *port;
- HMAP_FOR_EACH_IN_BUCKET (port, odp_port_node,
- hash_int(odp_to_u32(odp_port), 0),
+ HMAP_FOR_EACH_IN_BUCKET (port, odp_port_node, hash_odp_port(odp_port),
&backer->odp_to_ofport_map) {
if (port->odp_port == odp_port) {
return port;
forward_bpdu_changed,
set_mac_table_config,
set_realdev,
+ NULL, /* meter_get_features */
+ NULL, /* meter_set */
+ NULL, /* meter_get */
+ NULL, /* meter_del */
};