struct ofport_dpif;
struct ofproto_dpif;
struct flow_miss;
+struct facet;
struct rule_dpif {
struct rule up;
static void rule_credit_stats(struct rule_dpif *,
const struct dpif_flow_stats *);
-static void flow_push_stats(struct rule_dpif *, const struct flow *,
- const struct dpif_flow_stats *);
+static void flow_push_stats(struct facet *, const struct dpif_flow_stats *);
static tag_type rule_calculate_tag(const struct flow *,
const struct minimask *, uint32_t basis);
static void rule_invalidate(const struct rule_dpif *);
* this flow when actions change header fields. */
struct flow flow;
+ /* stack for the push and pop actions.
+ * Each stack element is of the type "union mf_subvalue". */
+ struct ofpbuf stack;
+ union mf_subvalue init_stack[1024 / sizeof(union mf_subvalue)];
+
/* The packet corresponding to 'flow', or a null pointer if we are
* revalidating without a packet to refer to. */
const struct ofpbuf *packet;
bool exit; /* No further actions should be processed. */
};
+/* Initial values of fields of the packet that may be changed during
+ * flow processing and needed later. */
+struct initial_vals {
+ /* This is the value of vlan_tci in the packet as actually received from
+ * dpif. This is the same as the facet's flow.vlan_tci unless the packet
+ * was received via a VLAN splinter. In that case, this value is 0
+ * (because the packet as actually received from the dpif had no 802.1Q
+ * tag) but the facet's flow.vlan_tci is set to the VLAN that the splinter
+ * represents.
+ *
+ * This member should be removed when the VLAN splinters feature is no
+ * longer needed. */
+ ovs_be16 vlan_tci;
+
+ /* If received on a tunnel, the IP TOS value of the tunnel. */
+ uint8_t tunnel_ip_tos;
+};
+
static void action_xlate_ctx_init(struct action_xlate_ctx *,
struct ofproto_dpif *, const struct flow *,
- ovs_be16 initial_tci, struct rule_dpif *,
+ const struct initial_vals *initial_vals,
+ struct rule_dpif *,
uint8_t tcp_flags, const struct ofpbuf *);
static void xlate_actions(struct action_xlate_ctx *,
const struct ofpact *ofpacts, size_t ofpacts_len,
struct list list_node; /* In struct facet's 'facets' list. */
struct facet *facet; /* Owning facet. */
- /* Key.
- *
- * To save memory in the common case, 'key' is NULL if 'key_fitness' is
- * ODP_FIT_PERFECT, that is, odp_flow_key_from_flow() can accurately
- * regenerate the ODP flow key from ->facet->flow. */
enum odp_key_fitness key_fitness;
struct nlattr *key;
int key_len;
enum slow_path_reason slow; /* 0 if fast path may be used. */
enum subfacet_path path; /* Installed in datapath? */
- /* This value is normally the same as ->facet->flow.vlan_tci. Only VLAN
- * splinters can cause it to differ. This value should be removed when
- * the VLAN splinters feature is no longer needed. */
- ovs_be16 initial_tci; /* Initial VLAN TCI value. */
+ /* Initial values of the packet that may be needed later. */
+ struct initial_vals initial_vals;
/* Datapath port the packet arrived on. This is needed to remove
* flows for ports that are no longer part of the bridge. Since the
long long int now);
static struct subfacet *subfacet_find(struct ofproto_dpif *,
const struct nlattr *key, size_t key_len,
- uint32_t key_hash,
- const struct flow *flow);
+ uint32_t key_hash);
static void subfacet_destroy(struct subfacet *);
static void subfacet_destroy__(struct subfacet *);
static void subfacet_destroy_batch(struct ofproto_dpif *,
struct subfacet **, int n);
-static void subfacet_get_key(struct subfacet *, struct odputil_keybuf *,
- struct ofpbuf *key);
static void subfacet_reset_dp_stats(struct subfacet *,
struct dpif_flow_stats *);
static void subfacet_update_time(struct subfacet *, long long int used);
/* Storage for a single subfacet, to reduce malloc() time and space
* overhead. (A facet always has at least one subfacet and in the common
- * case has exactly one subfacet.) */
+ * case has exactly one subfacet. However, 'one_subfacet' may not
+ * always be valid, since it could have been removed after newer
+ * subfacets were pushed onto the 'subfacets' list.) */
struct subfacet one_subfacet;
};
static struct ofport_dpif *get_odp_port(const struct ofproto_dpif *,
uint32_t odp_port);
static void ofproto_trace(struct ofproto_dpif *, const struct flow *,
- const struct ofpbuf *, ovs_be16 initial_tci,
- struct ds *);
-static bool may_dpif_port_del(struct ofport_dpif *);
+ const struct ofpbuf *,
+ const struct initial_vals *, struct ds *);
/* Packet processing. */
static void update_learning_table(struct ofproto_dpif *,
struct tag_set revalidate_set = backer->revalidate_set;
bool need_revalidate = backer->need_revalidate;
struct ofproto_dpif *ofproto;
+ struct simap_node *node;
+ struct simap tmp_backers;
+
+ /* Handle tunnel garbage collection. */
+ simap_init(&tmp_backers);
+ simap_swap(&backer->tnl_backers, &tmp_backers);
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ struct ofport_dpif *iter;
+
+ if (backer != ofproto->backer) {
+ continue;
+ }
+
+ HMAP_FOR_EACH (iter, up.hmap_node, &ofproto->up.ports) {
+ const char *dp_port;
+
+ if (!iter->tnl_port) {
+ continue;
+ }
+
+ dp_port = netdev_vport_get_dpif_port(iter->up.netdev);
+ node = simap_find(&tmp_backers, dp_port);
+ if (node) {
+ simap_put(&backer->tnl_backers, dp_port, node->data);
+ simap_delete(&tmp_backers, node);
+ node = simap_find(&backer->tnl_backers, dp_port);
+ } else {
+ node = simap_find(&backer->tnl_backers, dp_port);
+ if (!node) {
+ uint32_t odp_port = UINT32_MAX;
+
+ if (!dpif_port_add(backer->dpif, iter->up.netdev,
+ &odp_port)) {
+ simap_put(&backer->tnl_backers, dp_port, odp_port);
+ node = simap_find(&backer->tnl_backers, dp_port);
+ }
+ }
+ }
+
+ iter->odp_port = node ? node->data : OVSP_NONE;
+ if (tnl_port_reconfigure(&iter->up, iter->odp_port,
+ &iter->tnl_port)) {
+ backer->need_revalidate = REV_RECONFIGURE;
+ }
+ }
+ }
+
+ SIMAP_FOR_EACH (node, &tmp_backers) {
+ dpif_port_del(backer->dpif, node->data);
+ }
+ simap_destroy(&tmp_backers);
switch (backer->need_revalidate) {
case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break;
backer->need_revalidate = 0;
HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
- struct facet *facet;
+ struct facet *facet, *next;
if (ofproto->backer != backer) {
continue;
}
- HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
+ HMAP_FOR_EACH_SAFE (facet, next, hmap_node, &ofproto->facets) {
if (need_revalidate
|| tag_set_intersects(&revalidate_set, facet->tags)) {
facet_revalidate(facet);
const char *dp_port_name = netdev_vport_get_dpif_port(port->up.netdev);
const char *devname = netdev_get_name(port->up.netdev);
- if (dpif_port_exists(ofproto->backer->dpif, dp_port_name)
- && may_dpif_port_del(port)) {
+ if (dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
/* The underlying device is still there, so delete it. This
* happens when the ofproto is being destroyed, since the caller
* assumes that removal of attached ports will happen as part of
* destruction. */
- dpif_port_del(ofproto->backer->dpif, port->odp_port);
- simap_find_and_delete(&ofproto->backer->tnl_backers, dp_port_name);
+ if (!port->tnl_port) {
+ dpif_port_del(ofproto->backer->dpif, port->odp_port);
+ }
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
if (port->odp_port != OVSP_NONE && !port->tnl_port) {
return error;
}
-static int
-get_cfm_fault(const struct ofport *ofport_)
-{
- struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
-
- return ofport->cfm ? cfm_get_fault(ofport->cfm) : -1;
-}
-
-static int
-get_cfm_opup(const struct ofport *ofport_)
-{
- struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
-
- return ofport->cfm ? cfm_get_opup(ofport->cfm) : -1;
-}
-
-static int
-get_cfm_remote_mpids(const struct ofport *ofport_, const uint64_t **rmps,
- size_t *n_rmps)
+static bool
+get_cfm_status(const struct ofport *ofport_,
+ struct ofproto_cfm_status *status)
{
struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
if (ofport->cfm) {
- cfm_get_remote_mpids(ofport->cfm, rmps, n_rmps);
- return 0;
+ status->faults = cfm_get_fault(ofport->cfm);
+ status->remote_opstate = cfm_get_opup(ofport->cfm);
+ status->health = cfm_get_health(ofport->cfm);
+ cfm_get_remote_mpids(ofport->cfm, &status->rmps, &status->n_rmps);
+ return true;
} else {
- return -1;
+ return false;
}
}
-
-static int
-get_cfm_health(const struct ofport *ofport_)
-{
- struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
-
- return ofport->cfm ? cfm_get_health(ofport->cfm) : -1;
-}
\f
/* Spanning Tree. */
return 0;
}
-/* Returns true if the odp_port backing 'ofport' may be deleted from the
- * datapath. In most cases, this function simply returns true. However, for
- * tunnels it's possible that multiple ofports use the same odp_port, in which
- * case we need to keep the odp_port backer around until the last ofport is
- * deleted. */
-static bool
-may_dpif_port_del(struct ofport_dpif *ofport)
-{
- struct dpif_backer *backer = ofproto_dpif_cast(ofport->up.ofproto)->backer;
- struct ofproto_dpif *ofproto_iter;
-
- if (!ofport->tnl_port) {
- return true;
- }
-
- HMAP_FOR_EACH (ofproto_iter, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
- struct ofport_dpif *iter;
-
- if (backer != ofproto_iter->backer) {
- continue;
- }
-
- HMAP_FOR_EACH (iter, up.hmap_node, &ofproto_iter->up.ports) {
- if (ofport == iter) {
- continue;
- }
-
- if (!strcmp(netdev_vport_get_dpif_port(ofport->up.netdev),
- netdev_vport_get_dpif_port(iter->up.netdev))) {
- return false;
- }
- }
- }
-
- return true;
-}
-
static int
port_del(struct ofproto *ofproto_, uint16_t ofp_port)
{
sset_find_and_delete(&ofproto->ghost_ports,
netdev_get_name(ofport->up.netdev));
- if (may_dpif_port_del(ofport)) {
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ if (!ofport->tnl_port) {
error = dpif_port_del(ofproto->backer->dpif, ofport->odp_port);
if (!error) {
- const char *dpif_port;
-
/* The caller is going to close ofport->up.netdev. If this is a
* bonded port, then the bond is using that netdev, so remove it
* from the bond. The client will need to reconfigure everything
* after deleting ports, so then the slave will get re-added. */
- dpif_port = netdev_vport_get_dpif_port(ofport->up.netdev);
- simap_find_and_delete(&ofproto->backer->tnl_backers, dpif_port);
bundle_remove(&ofport->up);
}
}
enum odp_key_fitness key_fitness;
const struct nlattr *key;
size_t key_len;
- ovs_be16 initial_tci;
+ struct initial_vals initial_vals;
struct list packets;
enum dpif_upcall_type upcall_type;
uint32_t odp_in_port;
struct flow_miss_op {
struct dpif_op dpif_op;
- struct subfacet *subfacet; /* Subfacet */
void *garbage; /* Pointer to pass to free(), NULL if none. */
uint64_t stub[1024 / 8]; /* Temporary buffer. */
};
init_flow_miss_execute_op(struct flow_miss *miss, struct ofpbuf *packet,
struct flow_miss_op *op)
{
- if (miss->flow.vlan_tci != miss->initial_tci) {
+ if (miss->flow.vlan_tci != miss->initial_vals.vlan_tci) {
/* This packet was received on a VLAN splinter port. We
* added a VLAN to the packet to make the packet resemble
* the flow, but the actions were composed assuming that
eth_pop_vlan(packet);
}
- op->subfacet = NULL;
op->garbage = NULL;
op->dpif_op.type = DPIF_OP_EXECUTE;
op->dpif_op.u.execute.key = miss->key;
dpif_flow_stats_extract(&miss->flow, packet, now, &stats);
rule_credit_stats(rule, &stats);
- action_xlate_ctx_init(&ctx, ofproto, &miss->flow, miss->initial_tci,
- rule, 0, packet);
+ action_xlate_ctx_init(&ctx, ofproto, &miss->flow,
+ &miss->initial_vals, rule, 0, packet);
ctx.resubmit_stats = &stats;
xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len,
&odp_actions);
struct dpif_execute *execute = &op->dpif_op.u.execute;
init_flow_miss_execute_op(miss, packet, op);
- op->subfacet = subfacet;
if (!subfacet->slow) {
execute->actions = subfacet->actions;
execute->actions_len = subfacet->actions_len;
struct flow_miss_op *op = &ops[(*n_ops)++];
struct dpif_flow_put *put = &op->dpif_op.u.flow_put;
- op->subfacet = subfacet;
+ subfacet->path = want_path;
+
op->garbage = NULL;
op->dpif_op.type = DPIF_OP_FLOW_PUT;
put->flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
* flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes
* a VLAN header onto 'packet' (if it is nonnull).
*
- * Optionally, if nonnull, sets '*initial_tci' to the VLAN TCI with which the
- * packet was really received, that is, the actual VLAN TCI extracted by
- * odp_flow_key_to_flow(). (This differs from the value returned in
- * flow->vlan_tci only for packets received on VLAN splinters.)
+ * Optionally, if 'initial_vals' is nonnull, sets 'initial_vals->vlan_tci'
+ * to the VLAN TCI with which the packet was really received, that is, the
+ * actual VLAN TCI extracted by odp_flow_key_to_flow(). (This differs from
+ * the value returned in flow->vlan_tci only for packets received on
+ * VLAN splinters.) Also, if received on an IP tunnel, sets
+ * 'initial_vals->tunnel_ip_tos' to the tunnel's IP TOS.
*
* Similarly, this function also includes some logic to help with tunnels. It
* may modify 'flow' as necessary to make the tunneling implementation
const struct nlattr *key, size_t key_len,
struct flow *flow, enum odp_key_fitness *fitnessp,
struct ofproto_dpif **ofproto, uint32_t *odp_in_port,
- ovs_be16 *initial_tci)
+ struct initial_vals *initial_vals)
{
const struct ofport_dpif *port;
enum odp_key_fitness fitness;
goto exit;
}
- if (initial_tci) {
- *initial_tci = flow->vlan_tci;
+ if (initial_vals) {
+ initial_vals->vlan_tci = flow->vlan_tci;
+ initial_vals->tunnel_ip_tos = flow->tunnel.ip_tos;
}
if (odp_in_port) {
error = ofproto_receive(backer, upcall->packet, upcall->key,
upcall->key_len, &flow, &miss->key_fitness,
- &ofproto, &odp_in_port, &miss->initial_tci);
+ &ofproto, &odp_in_port, &miss->initial_vals);
if (error == ENODEV) {
struct drop_key *drop_key;
}
dpif_operate(backer->dpif, dpif_ops, n_ops);
- /* Free memory and update facets. */
+ /* Free memory. */
for (i = 0; i < n_ops; i++) {
- struct flow_miss_op *op = &flow_miss_ops[i];
-
- switch (op->dpif_op.type) {
- case DPIF_OP_EXECUTE:
- break;
-
- case DPIF_OP_FLOW_PUT:
- if (!op->dpif_op.error) {
- op->subfacet->path = subfacet_want_path(op->subfacet->slow);
- }
- break;
-
- case DPIF_OP_FLOW_DEL:
- NOT_REACHED();
- }
-
- free(op->garbage);
+ free(flow_miss_ops[i].garbage);
}
hmap_destroy(&todo);
}
}
key_hash = odp_flow_key_hash(key, key_len);
- subfacet = subfacet_find(ofproto, key, key_len, key_hash, &flow);
+ subfacet = subfacet_find(ofproto, key, key_len, key_hash);
switch (subfacet ? subfacet->path : SF_NOT_INSTALLED) {
case SF_FAST_PATH:
update_subfacet_stats(subfacet, stats);
facet_learn(struct facet *facet)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
+ struct subfacet *subfacet= CONTAINER_OF(list_front(&facet->subfacets),
+ struct subfacet, list_node);
struct action_xlate_ctx ctx;
if (!facet->has_learn
}
action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
- facet->flow.vlan_tci,
+ &subfacet->initial_vals,
facet->rule, facet->tcp_flags, NULL);
ctx.may_learn = true;
xlate_actions_for_side_effects(&ctx, facet->rule->up.ofpacts,
|| tag_set_intersects(&ofproto->backer->revalidate_set,
facet->tags))) {
facet_revalidate(facet);
+
+ /* facet_revalidate() may have destroyed 'facet'. */
+ facet = facet_find(ofproto, flow, hash);
}
return facet;
ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
enum subfacet_path want_path;
- struct odputil_keybuf keybuf;
struct action_xlate_ctx ctx;
- struct ofpbuf key;
struct ds s;
action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
- subfacet->initial_tci, rule, 0, NULL);
+ &subfacet->initial_vals, rule, 0, NULL);
xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len,
&odp_actions);
}
ds_init(&s);
- subfacet_get_key(subfacet, &keybuf, &key);
- odp_flow_key_format(key.data, key.size, &s);
+ odp_flow_key_format(subfacet->key, subfacet->key_len, &s);
ds_put_cstr(&s, ": inconsistency in subfacet");
if (want_path != subfacet->path) {
* 'facet' to the new rule and recompiles its actions.
*
* - If the rule found is the same as 'facet''s current rule, leaves 'facet'
- * where it is and recompiles its actions anyway. */
+ * where it is and recompiles its actions anyway.
+ *
+ * - If any of 'facet''s subfacets correspond to a new flow according to
+ * ofproto_receive(), 'facet' is removed. */
static void
facet_revalidate(struct facet *facet)
{
COVERAGE_INC(facet_revalidate);
+ /* Check that child subfacets still correspond to this facet. Tunnel
+ * configuration changes could cause a subfacet's OpenFlow in_port to
+ * change. */
+ LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
+ struct ofproto_dpif *recv_ofproto;
+ struct flow recv_flow;
+ int error;
+
+ error = ofproto_receive(ofproto->backer, NULL, subfacet->key,
+ subfacet->key_len, &recv_flow, NULL,
+ &recv_ofproto, NULL, NULL);
+ if (error
+ || recv_ofproto != ofproto
+ || memcmp(&recv_flow, &facet->flow, sizeof recv_flow)) {
+ facet_remove(facet);
+ return;
+ }
+ }
+
new_rule = rule_dpif_lookup(ofproto, &facet->flow);
/* Calculate new datapath actions.
enum slow_path_reason slow;
action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
- subfacet->initial_tci, new_rule, 0, NULL);
+ &subfacet->initial_vals, new_rule, 0, NULL);
xlate_actions(&ctx, new_rule->up.ofpacts, new_rule->up.ofpacts_len,
&odp_actions);
facet->prev_byte_count = facet->byte_count;
facet->prev_used = facet->used;
- flow_push_stats(facet->rule, &facet->flow, &stats);
+ flow_push_stats(facet, &stats);
update_mirror_stats(ofproto_dpif_cast(facet->rule->up.ofproto),
facet->mirrors, stats.n_packets, stats.n_bytes);
ofproto_rule_update_used(&rule->up, stats->used);
}
-/* Pushes flow statistics to the rules which 'flow' resubmits into given
- * 'rule''s actions and mirrors. */
+/* Pushes flow statistics to the rules which 'facet->flow' resubmits
+ * into given 'facet->rule''s actions and mirrors. */
static void
-flow_push_stats(struct rule_dpif *rule,
- const struct flow *flow, const struct dpif_flow_stats *stats)
+flow_push_stats(struct facet *facet, const struct dpif_flow_stats *stats)
{
+ struct rule_dpif *rule = facet->rule;
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
+ struct subfacet *subfacet = CONTAINER_OF(list_front(&facet->subfacets),
+ struct subfacet, list_node);
struct action_xlate_ctx ctx;
ofproto_rule_update_used(&rule->up, stats->used);
- action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci, rule,
- 0, NULL);
+ action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
+ &subfacet->initial_vals, rule, 0, NULL);
ctx.resubmit_stats = stats;
xlate_actions_for_side_effects(&ctx, rule->up.ofpacts,
rule->up.ofpacts_len);
static struct subfacet *
subfacet_find(struct ofproto_dpif *ofproto,
- const struct nlattr *key, size_t key_len, uint32_t key_hash,
- const struct flow *flow)
+ const struct nlattr *key, size_t key_len, uint32_t key_hash)
{
struct subfacet *subfacet;
HMAP_FOR_EACH_WITH_HASH (subfacet, hmap_node, key_hash,
&ofproto->subfacets) {
- if (subfacet->key
- ? (subfacet->key_len == key_len
- && !memcmp(key, subfacet->key, key_len))
- : flow_equal(flow, &subfacet->facet->flow)) {
+ if (subfacet->key_len == key_len
+ && !memcmp(key, subfacet->key, key_len)) {
return subfacet;
}
}
if (list_is_empty(&facet->subfacets)) {
subfacet = &facet->one_subfacet;
} else {
- subfacet = subfacet_find(ofproto, key, key_len, key_hash,
- &facet->flow);
+ subfacet = subfacet_find(ofproto, key, key_len, key_hash);
if (subfacet) {
if (subfacet->facet == facet) {
return subfacet;
list_push_back(&facet->subfacets, &subfacet->list_node);
subfacet->facet = facet;
subfacet->key_fitness = key_fitness;
- if (key_fitness != ODP_FIT_PERFECT) {
- subfacet->key = xmemdup(key, key_len);
- subfacet->key_len = key_len;
- } else {
- subfacet->key = NULL;
- subfacet->key_len = 0;
- }
+ subfacet->key = xmemdup(key, key_len);
+ subfacet->key_len = key_len;
subfacet->used = now;
subfacet->dp_packet_count = 0;
subfacet->dp_byte_count = 0;
? SLOW_MATCH
: 0);
subfacet->path = SF_NOT_INSTALLED;
- subfacet->initial_tci = miss->initial_tci;
+ subfacet->initial_vals = miss->initial_vals;
subfacet->odp_in_port = miss->odp_in_port;
return subfacet;
subfacet_destroy_batch(struct ofproto_dpif *ofproto,
struct subfacet **subfacets, int n)
{
- struct odputil_keybuf keybufs[SUBFACET_DESTROY_MAX_BATCH];
struct dpif_op ops[SUBFACET_DESTROY_MAX_BATCH];
struct dpif_op *opsp[SUBFACET_DESTROY_MAX_BATCH];
- struct ofpbuf keys[SUBFACET_DESTROY_MAX_BATCH];
struct dpif_flow_stats stats[SUBFACET_DESTROY_MAX_BATCH];
int i;
for (i = 0; i < n; i++) {
ops[i].type = DPIF_OP_FLOW_DEL;
- subfacet_get_key(subfacets[i], &keybufs[i], &keys[i]);
- ops[i].u.flow_del.key = keys[i].data;
- ops[i].u.flow_del.key_len = keys[i].size;
+ ops[i].u.flow_del.key = subfacets[i]->key;
+ ops[i].u.flow_del.key_len = subfacets[i]->key_len;
ops[i].u.flow_del.stats = &stats[i];
opsp[i] = &ops[i];
}
}
}
-/* Initializes 'key' with the sequence of OVS_KEY_ATTR_* Netlink attributes
- * that can be used to refer to 'subfacet'. The caller must provide 'keybuf'
- * for use as temporary storage. */
-static void
-subfacet_get_key(struct subfacet *subfacet, struct odputil_keybuf *keybuf,
- struct ofpbuf *key)
-{
-
- if (!subfacet->key) {
- struct flow *flow = &subfacet->facet->flow;
-
- ofpbuf_use_stack(key, keybuf, sizeof *keybuf);
- odp_flow_key_from_flow(key, flow, subfacet->odp_in_port);
- } else {
- ofpbuf_use_const(key, subfacet->key, subfacet->key_len);
- }
-}
-
/* Composes the datapath actions for 'subfacet' based on its rule's actions.
* Translates the actions into 'odp_actions', which the caller must have
* initialized and is responsible for uninitializing. */
struct action_xlate_ctx ctx;
- action_xlate_ctx_init(&ctx, ofproto, &facet->flow, subfacet->initial_tci,
- rule, 0, packet);
+ action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
+ &subfacet->initial_vals, rule, 0, packet);
xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, odp_actions);
facet->tags = ctx.tags;
facet->has_learn = ctx.has_learn;
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
enum subfacet_path path = subfacet_want_path(slow);
uint64_t slow_path_stub[128 / 8];
- struct odputil_keybuf keybuf;
enum dpif_flow_put_flags flags;
- struct ofpbuf key;
int ret;
flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
&actions, &actions_len);
}
- subfacet_get_key(subfacet, &keybuf, &key);
- ret = dpif_flow_put(ofproto->backer->dpif, flags, key.data, key.size,
- actions, actions_len, stats);
+ ret = dpif_flow_put(ofproto->backer->dpif, flags, subfacet->key,
+ subfacet->key_len, actions, actions_len, stats);
if (stats) {
subfacet_reset_dp_stats(subfacet, stats);
if (subfacet->path != SF_NOT_INSTALLED) {
struct rule_dpif *rule = subfacet->facet->rule;
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
- struct odputil_keybuf keybuf;
struct dpif_flow_stats stats;
- struct ofpbuf key;
int error;
- subfacet_get_key(subfacet, &keybuf, &key);
- error = dpif_flow_del(ofproto->backer->dpif,
- key.data, key.size, &stats);
+ error = dpif_flow_del(ofproto->backer->dpif, subfacet->key,
+ subfacet->key_len, &stats);
subfacet_reset_dp_stats(subfacet, &stats);
if (!error) {
subfacet_update_stats(subfacet, &stats);
struct ofpbuf *packet)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
-
+ struct initial_vals initial_vals;
struct dpif_flow_stats stats;
-
struct action_xlate_ctx ctx;
uint64_t odp_actions_stub[1024 / 8];
struct ofpbuf odp_actions;
dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
rule_credit_stats(rule, &stats);
+ initial_vals.vlan_tci = flow->vlan_tci;
+ initial_vals.tunnel_ip_tos = flow->tunnel.ip_tos;
ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
- action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci,
+ action_xlate_ctx_init(&ctx, ofproto, flow, &initial_vals,
rule, stats.tcp_flags, packet);
ctx.resubmit_stats = &stats;
xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, &odp_actions);
}
}
+static bool
+execute_set_mpls_ttl_action(struct action_xlate_ctx *ctx, uint8_t ttl)
+{
+ if (!eth_type_mpls(ctx->flow.dl_type)) {
+ return true;
+ }
+
+ set_mpls_lse_ttl(&ctx->flow.mpls_lse, ttl);
+ return false;
+}
+
+static bool
+execute_dec_mpls_ttl_action(struct action_xlate_ctx *ctx)
+{
+ uint8_t ttl = mpls_lse_to_ttl(ctx->flow.mpls_lse);
+
+ if (!eth_type_mpls(ctx->flow.dl_type)) {
+ return false;
+ }
+
+ if (ttl > 1) {
+ ttl--;
+ set_mpls_lse_ttl(&ctx->flow.mpls_lse, ttl);
+ return false;
+ } else {
+ execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
+
+ /* Stop processing for current table. */
+ return true;
+ }
+}
+
static void
xlate_output_action(struct action_xlate_ctx *ctx,
uint16_t port, uint16_t max_len, bool may_packet_in)
return true;
}
+static bool
+tunnel_ecn_ok(struct action_xlate_ctx *ctx)
+{
+ if (is_ip_any(&ctx->base_flow)
+ && (ctx->base_flow.tunnel.ip_tos & IP_ECN_MASK) == IP_ECN_CE) {
+ if ((ctx->base_flow.nw_tos & IP_ECN_MASK) == IP_ECN_NOT_ECT) {
+ VLOG_WARN_RL(&rl, "dropping tunnel packet marked ECN CE"
+ " but is not ECN capable");
+ return false;
+ } else {
+ /* Set the ECN CE value in the tunneled packet. */
+ ctx->flow.nw_tos |= IP_ECN_CE;
+ }
+ }
+
+ return true;
+}
+
static void
do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
struct action_xlate_ctx *ctx)
nxm_execute_reg_load(ofpact_get_REG_LOAD(a), &ctx->flow);
break;
+ case OFPACT_STACK_PUSH:
+ nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), &ctx->flow,
+ &ctx->stack);
+ break;
+
+ case OFPACT_STACK_POP:
+ nxm_execute_stack_pop(ofpact_get_STACK_POP(a), &ctx->flow,
+ &ctx->stack);
+ break;
+
case OFPACT_PUSH_MPLS:
execute_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a)->ethertype);
break;
execute_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
break;
+ case OFPACT_SET_MPLS_TTL:
+ if (execute_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl)) {
+ goto out;
+ }
+ break;
+
+ case OFPACT_DEC_MPLS_TTL:
+ if (execute_dec_mpls_ttl_action(ctx)) {
+ goto out;
+ }
+ break;
+
case OFPACT_DEC_TTL:
if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
goto out;
static void
action_xlate_ctx_init(struct action_xlate_ctx *ctx,
struct ofproto_dpif *ofproto, const struct flow *flow,
- ovs_be16 initial_tci, struct rule_dpif *rule,
+ const struct initial_vals *initial_vals,
+ struct rule_dpif *rule,
uint8_t tcp_flags, const struct ofpbuf *packet)
{
ovs_be64 initial_tun_id = flow->tunnel.tun_id;
ctx->flow = *flow;
memset(&ctx->flow.tunnel, 0, sizeof ctx->flow.tunnel);
ctx->base_flow = ctx->flow;
- ctx->base_flow.vlan_tci = initial_tci;
+ ctx->base_flow.vlan_tci = initial_vals->vlan_tci;
+ ctx->base_flow.tunnel.ip_tos = initial_vals->tunnel_ip_tos;
ctx->flow.tunnel.tun_id = initial_tun_id;
ctx->rule = rule;
ctx->packet = packet;
ctx->table_id = 0;
ctx->exit = false;
+ ofpbuf_use_stub(&ctx->stack, ctx->init_stack, sizeof ctx->init_stack);
+
if (ctx->ofproto->has_mirrors || hit_resubmit_limit) {
/* Do this conditionally because the copy is expensive enough that it
* shows up in profiles. */
ctx->slow |= special;
} else {
static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1);
- ovs_be16 initial_tci = ctx->base_flow.vlan_tci;
+ struct initial_vals initial_vals;
uint32_t local_odp_port;
+ initial_vals.vlan_tci = ctx->base_flow.vlan_tci;
+ initial_vals.tunnel_ip_tos = ctx->base_flow.tunnel.ip_tos;
+
add_sflow_action(ctx);
- if (!in_port || may_receive(in_port, ctx)) {
+ if (tunnel_ecn_ok(ctx) && (!in_port || may_receive(in_port, ctx))) {
do_xlate_actions(ofpacts, ofpacts_len, ctx);
/* We've let OFPP_NORMAL and the learning action look at the
struct ds ds = DS_EMPTY_INITIALIZER;
ofproto_trace(ctx->ofproto, &orig_flow, ctx->packet,
- initial_tci, &ds);
+ &initial_vals, &ds);
VLOG_ERR("Trace triggered by excessive resubmit "
"recursion:\n%s", ds_cstr(&ds));
ds_destroy(&ds);
}
fix_sflow_action(ctx);
}
+
+ ofpbuf_uninit(&ctx->stack);
}
/* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
const struct ofpact *ofpacts, size_t ofpacts_len)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ struct initial_vals initial_vals;
struct odputil_keybuf keybuf;
struct dpif_flow_stats stats;
dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
- action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci, NULL,
+ initial_vals.vlan_tci = flow->vlan_tci;
+ initial_vals.tunnel_ip_tos = 0;
+ action_xlate_ctx_init(&ctx, ofproto, flow, &initial_vals, NULL,
packet_get_tcp_flags(packet, flow), packet);
ctx.resubmit_stats = &stats;
struct ofproto_dpif *ofproto;
struct ofpbuf odp_key;
struct ofpbuf *packet;
- ovs_be16 initial_tci;
+ struct initial_vals initial_vals;
struct ds result;
struct flow flow;
char *s;
* ability to specify the ofproto. */
if (ofproto_receive(ofproto->backer, NULL, odp_key.data,
odp_key.size, &flow, NULL, NULL, NULL,
- &initial_tci)) {
+ &initial_vals)) {
unixctl_command_reply_error(conn, "Invalid flow");
goto exit;
}
goto exit;
}
- initial_tci = flow.vlan_tci;
+ initial_vals.vlan_tci = flow.vlan_tci;
+ initial_vals.tunnel_ip_tos = flow.tunnel.ip_tos;
}
/* Generate a packet, if requested. */
flow_extract(packet, priority, mark, NULL, in_port, &flow);
flow.tunnel.tun_id = tun_id;
- initial_tci = flow.vlan_tci;
+ initial_vals.vlan_tci = flow.vlan_tci;
+ initial_vals.tunnel_ip_tos = flow.tunnel.ip_tos;
} else {
unixctl_command_reply_error(conn, "Bad command syntax");
goto exit;
}
- ofproto_trace(ofproto, &flow, packet, initial_tci, &result);
+ ofproto_trace(ofproto, &flow, packet, &initial_vals, &result);
unixctl_command_reply(conn, ds_cstr(&result));
exit:
static void
ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow,
- const struct ofpbuf *packet, ovs_be16 initial_tci,
- struct ds *ds)
+ const struct ofpbuf *packet,
+ const struct initial_vals *initial_vals, struct ds *ds)
{
struct rule_dpif *rule;
trace.flow = *flow;
ofpbuf_use_stub(&odp_actions,
odp_actions_stub, sizeof odp_actions_stub);
- action_xlate_ctx_init(&trace.ctx, ofproto, flow, initial_tci,
+ action_xlate_ctx_init(&trace.ctx, ofproto, flow, initial_vals,
rule, tcp_flags, packet);
trace.ctx.resubmit_hook = trace_resubmit;
trace.ctx.report_hook = trace_report;
update_stats(ofproto->backer);
HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->subfacets) {
- struct odputil_keybuf keybuf;
- struct ofpbuf key;
-
- subfacet_get_key(subfacet, &keybuf, &key);
- odp_flow_key_format(key.data, key.size, &ds);
+ odp_flow_key_format(subfacet->key, subfacet->key_len, &ds);
ds_put_format(&ds, ", packets:%"PRIu64", bytes:%"PRIu64", used:",
subfacet->dp_packet_count, subfacet->dp_byte_count);
get_netflow_ids,
set_sflow,
set_cfm,
- get_cfm_fault,
- get_cfm_opup,
- get_cfm_remote_mpids,
- get_cfm_health,
+ get_cfm_status,
set_stp,
get_stp_status,
set_stp_port,