/* Maximum depth of flow table recursion (due to resubmit actions) in a
* flow translation. */
#define MAX_RESUBMIT_RECURSION 64
+#define MAX_INTERNAL_RESUBMITS 1 /* Max resbmits allowed using rules in
+ internal table. */
/* Maximum number of resubmit actions in a flow translation, whether they are
* recursive or not. */
bool has_in_band; /* Bridge has in band control? */
bool forward_bpdu; /* Bridge forwards STP BPDUs? */
+ /* True if the datapath supports recirculation. */
+ bool enable_recirc;
+
/* True if the datapath supports variable-length
* OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions.
* False if the datapath supports only 8-byte (or shorter) userdata. */
uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
};
+enum xc_type {
+ XC_RULE,
+ XC_BOND,
+ XC_NETDEV,
+ XC_NETFLOW,
+ XC_MIRROR,
+ XC_LEARN,
+ XC_NORMAL,
+ XC_FIN_TIMEOUT,
+};
+
+/* xlate_cache entries hold enough information to perform the side effects of
+ * xlate_actions() for a rule, without needing to perform rule translation
+ * from scratch. The primary usage of these is to submit statistics to objects
+ * that a flow relates to, although they may be used for other effects as well
+ * (for instance, refreshing hard timeouts for learned flows). */
+struct xc_entry {
+ enum xc_type type;
+ union {
+ struct rule_dpif *rule;
+ struct {
+ struct netdev *tx;
+ struct netdev *rx;
+ struct bfd *bfd;
+ } dev;
+ struct {
+ struct netflow *netflow;
+ struct flow *flow;
+ ofp_port_t iface;
+ } nf;
+ struct {
+ struct mbridge *mbridge;
+ mirror_mask_t mirrors;
+ } mirror;
+ struct {
+ struct bond *bond;
+ struct flow *flow;
+ uint16_t vid;
+ } bond;
+ struct {
+ struct ofproto_dpif *ofproto;
+ struct rule_dpif *rule;
+ } learn;
+ struct {
+ struct ofproto_dpif *ofproto;
+ struct flow *flow;
+ int vlan;
+ } normal;
+ struct {
+ struct rule_dpif *rule;
+ uint16_t idle;
+ uint16_t hard;
+ } fin;
+ } u;
+};
+
+#define XC_ENTRY_FOR_EACH(entry, entries, xcache) \
+ entries = xcache->entries; \
+ for (entry = ofpbuf_try_pull(&entries, sizeof *entry); \
+ entry; \
+ entry = ofpbuf_try_pull(&entries, sizeof *entry))
+
+struct xlate_cache {
+ struct ofpbuf entries;
+};
+
static struct hmap xbridges = HMAP_INITIALIZER(&xbridges);
static struct hmap xbundles = HMAP_INITIALIZER(&xbundles);
static struct hmap xports = HMAP_INITIALIZER(&xports);
struct xlate_ctx *);
static void xlate_actions__(struct xlate_in *, struct xlate_out *)
OVS_REQ_RDLOCK(xlate_rwlock);
- static void xlate_normal(struct xlate_ctx *);
- static void xlate_report(struct xlate_ctx *, const char *);
+static void xlate_normal(struct xlate_ctx *);
+static void xlate_report(struct xlate_ctx *, const char *);
static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
uint8_t table_id, bool may_packet_in,
bool honor_table_miss);
static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
uint8_t *dscp);
+static struct xc_entry *xlate_cache_add_entry(struct xlate_cache *xc,
+ enum xc_type type);
+
void
xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
struct dpif *dpif, struct rule_dpif *miss_rule,
const struct dpif_ipfix *ipfix,
const struct netflow *netflow, enum ofp_config_flags frag,
bool forward_bpdu, bool has_in_band,
+ bool enable_recirc,
bool variable_length_userdata,
size_t max_mpls_depth)
{
xbridge->frag = frag;
xbridge->miss_rule = miss_rule;
xbridge->no_packet_in_rule = no_packet_in_rule;
+ xbridge->enable_recirc = enable_recirc;
xbridge->variable_length_userdata = variable_length_userdata;
xbridge->max_mpls_depth = max_mpls_depth;
}
static bool
stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
{
+ /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
- return eth_addr_equals(flow->dl_dst, eth_addr_stp);
+ return is_stp(flow);
}
static void
{
struct stp_port *sp = xport_get_stp_port(xport);
struct ofpbuf payload = *packet;
- struct eth_header *eth = payload.data;
+ struct eth_header *eth = ofpbuf_data(&payload);
/* Sink packets on ports that have STP disabled when the bridge has
* STP enabled. */
}
/* Trim off padding on payload. */
- if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
- payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN;
+ if (ofpbuf_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
+ ofpbuf_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
}
if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
- stp_received_bpdu(sp, payload.data, payload.size);
+ stp_received_bpdu(sp, ofpbuf_data(&payload), ofpbuf_size(&payload));
}
}
/* Partially configured bundle with no slaves. Drop the packet. */
return;
} else if (!out_xbundle->bond) {
+ ctx->xout->use_recirc = false;
xport = CONTAINER_OF(list_front(&out_xbundle->xports), struct xport,
bundle_node);
} else {
struct ofport_dpif *ofport;
+ struct xlate_recirc *xr = &ctx->xout->recirc;
+ struct flow_wildcards *wc = &ctx->xout->wc;
- ofport = bond_choose_output_slave(out_xbundle->bond, &ctx->xin->flow,
- &ctx->xout->wc, vid);
+ if (ctx->xbridge->enable_recirc) {
+ ctx->xout->use_recirc = bond_may_recirc(
+ out_xbundle->bond, &xr->recirc_id, &xr->hash_basis);
+
+ if (ctx->xout->use_recirc) {
+ /* Only TCP mode uses recirculation. */
+ xr->hash_alg = OVS_HASH_ALG_L4;
+ bond_update_post_recirc_rules(out_xbundle->bond, false);
+
+ /* Recirculation does not require unmasking hash fields. */
+ wc = NULL;
+ }
+ }
+
+ ofport = bond_choose_output_slave(out_xbundle->bond,
+ &ctx->xin->flow, wc, vid);
xport = xport_lookup(ofport);
if (!xport) {
return;
}
- if (ctx->xin->resubmit_stats) {
- bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
- ctx->xin->resubmit_stats->n_bytes);
+ /* If ctx->xout->use_recirc is set, the main thread will handle stats
+ * accounting for this bond. */
+ if (!ctx->xout->use_recirc) {
+ if (ctx->xin->resubmit_stats) {
+ bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
+ ctx->xin->resubmit_stats->n_bytes);
+ }
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+ struct flow *flow;
+
+ flow = &ctx->xin->flow;
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_BOND);
+ entry->u.bond.bond = bond_ref(out_xbundle->bond);
+ entry->u.bond.flow = xmemdup(flow, sizeof *flow);
+ entry->u.bond.vid = vid;
+ }
}
}
if (ctx->xin->may_learn) {
update_learning_table(ctx->xbridge, flow, wc, vlan, in_xbundle);
}
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ /* Save enough info to update mac learning table later. */
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL);
+ entry->u.normal.ofproto = ctx->xin->ofproto;
+ entry->u.normal.flow = xmemdup(flow, sizeof *flow);
+ entry->u.normal.vlan = vlan;
+ }
/* Determine output bundle. */
ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
odp_port = ofp_port_to_odp_port(xbridge, flow->in_port.ofp_port);
- pid = dpif_port_get_pid(xbridge->dpif, odp_port, 0);
- cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size, odp_actions);
+ pid = dpif_port_get_pid(xbridge->dpif, odp_port,
+ flow_hash_5tuple(flow, 0));
+ cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size,
+ odp_actions);
nl_msg_end_nested(odp_actions, actions_offset);
nl_msg_end_nested(odp_actions, sample_offset);
/* If 'struct flow' gets additional metadata, we'll need to zero it out
* before traversing a patch port. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 25);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 26);
if (!xport) {
xlate_report(ctx, "Nonexistent output port");
xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
return;
} else if (check_stp) {
- if (eth_addr_equals(ctx->base_flow.dl_dst, eth_addr_stp)) {
+ if (is_stp(&ctx->base_flow)) {
if (!xport_stp_listen_state(xport)) {
xlate_report(ctx, "STP not in listening state, "
"skipping bpdu output");
/* Forwarding is disabled by STP. Let OFPP_NORMAL and the
* learning action look at the packet, then drop it. */
struct flow old_base_flow = ctx->base_flow;
- size_t old_size = ctx->xout->odp_actions.size;
+ size_t old_size = ofpbuf_size(&ctx->xout->odp_actions);
mirror_mask_t old_mirrors = ctx->xout->mirrors;
xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
ctx->xout->mirrors = old_mirrors;
ctx->base_flow = old_base_flow;
- ctx->xout->odp_actions.size = old_size;
+ ofpbuf_set_size(&ctx->xout->odp_actions, old_size);
}
}
bfd_account_rx(peer->bfd, ctx->xin->resubmit_stats);
}
}
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
+ entry->u.dev.tx = netdev_ref(xport->netdev);
+ entry->u.dev.rx = netdev_ref(peer->netdev);
+ entry->u.dev.bfd = bfd_ref(peer->bfd);
+ }
return;
}
flow_nw_tos = flow->nw_tos;
if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
- wc->masks.nw_tos |= IP_ECN_MASK;
+ wc->masks.nw_tos |= IP_DSCP_MASK;
flow->nw_tos &= ~IP_DSCP_MASK;
flow->nw_tos |= dscp;
}
if (ctx->xin->resubmit_stats) {
netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
}
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
+ entry->u.dev.tx = netdev_ref(xport->netdev);
+ }
out_port = odp_port;
commit_odp_tunnel_action(flow, &ctx->base_flow,
&ctx->xout->odp_actions);
ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
&ctx->xout->odp_actions,
&ctx->xout->wc);
- nl_msg_put_odp_port(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT,
- out_port);
+
+ if (ctx->xout->use_recirc) {
+ struct ovs_action_hash *act_hash;
+ struct xlate_recirc *xr = &ctx->xout->recirc;
+
+ /* Hash action. */
+ act_hash = nl_msg_put_unspec_uninit(&ctx->xout->odp_actions,
+ OVS_ACTION_ATTR_HASH,
+ sizeof *act_hash);
+ act_hash->hash_alg = xr->hash_alg;
+ act_hash->hash_basis = xr->hash_basis;
+
+ /* Recirc action. */
+ nl_msg_put_u32(&ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC,
+ xr->recirc_id);
+ } else {
+ nl_msg_put_odp_port(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT,
+ out_port);
+ }
ctx->sflow_odp_port = odp_port;
ctx->sflow_n_outputs++;
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
- if (ctx->recurse >= MAX_RESUBMIT_RECURSION) {
+ if (ctx->recurse >= MAX_RESUBMIT_RECURSION + MAX_INTERNAL_RESUBMITS) {
VLOG_ERR_RL(&rl, "resubmit actions recursed over %d times",
MAX_RESUBMIT_RECURSION);
- } else if (ctx->resubmits >= MAX_RESUBMITS) {
+ } else if (ctx->resubmits >= MAX_RESUBMITS + MAX_INTERNAL_RESUBMITS) {
VLOG_ERR_RL(&rl, "over %d resubmit actions", MAX_RESUBMITS);
- } else if (ctx->xout->odp_actions.size > UINT16_MAX) {
+ } else if (ofpbuf_size(&ctx->xout->odp_actions) > UINT16_MAX) {
VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of actions");
- } else if (ctx->stack.size >= 65536) {
+ } else if (ofpbuf_size(&ctx->stack) >= 65536) {
VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of stack");
} else {
return true;
!skip_wildcards
? &ctx->xout->wc : NULL,
honor_table_miss,
- &ctx->table_id, &rule);
+ &ctx->table_id, &rule,
+ ctx->xin->xcache != NULL);
ctx->xin->flow.in_port.ofp_port = old_in_port;
if (ctx->xin->resubmit_hook) {
case RULE_DPIF_LOOKUP_VERDICT_DROP:
config = OFPUTIL_PC_NO_PACKET_IN;
break;
+ case RULE_DPIF_LOOKUP_VERDICT_DEFAULT:
+ if (!ofproto_dpif_wants_packet_in_on_miss(ctx->xbridge->ofproto)) {
+ config = OFPUTIL_PC_NO_PACKET_IN;
+ }
+ break;
default:
OVS_NOT_REACHED();
}
choose_miss_rule(config, ctx->xbridge->miss_rule,
- ctx->xbridge->no_packet_in_rule, &rule);
+ ctx->xbridge->no_packet_in_rule, &rule,
+ ctx->xin->xcache != NULL);
match:
if (rule) {
+ /* Fill in the cache entry here instead of xlate_recursively
+ * to make the reference counting more explicit. We take a
+ * reference in the lookups above if we are going to cache the
+ * rule. */
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
+ entry->u.rule = rule;
+ }
xlate_recursively(ctx, rule);
- rule_dpif_unref(rule);
}
ctx->table_id = old_table_id;
ofpacts_execute_action_set(&action_list, &action_set);
ctx->recurse++;
- do_xlate_actions(action_list.data, action_list.size, ctx);
+ do_xlate_actions(ofpbuf_data(&action_list), ofpbuf_size(&action_list), ctx);
ctx->recurse--;
ofpbuf_uninit(&action_set);
const struct ofputil_bucket *bucket;
uint32_t basis;
- basis = hash_bytes(ctx->xin->flow.dl_dst, sizeof ctx->xin->flow.dl_dst, 0);
+ basis = hash_mac(ctx->xin->flow.dl_dst, 0, 0);
bucket = group_best_live_bucket(ctx, group, basis);
if (bucket) {
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
{
ofp_port_t in_port;
uint8_t table_id;
+ bool may_packet_in = false;
+ bool honor_table_miss = false;
+
+ if (ctx->rule && rule_dpif_is_internal(ctx->rule)) {
+ /* Still allow missed packets to be sent to the controller
+ * if resubmitting from an internal table. */
+ may_packet_in = true;
+ honor_table_miss = true;
+ }
in_port = resubmit->in_port;
if (in_port == OFPP_IN_PORT) {
table_id = ctx->table_id;
}
- xlate_table_action(ctx, in_port, table_id, false, false);
+ xlate_table_action(ctx, in_port, table_id, may_packet_in,
+ honor_table_miss);
}
static void
&ctx->xout->odp_actions,
&ctx->xout->wc);
- odp_execute_actions(NULL, packet, false, &md, ctx->xout->odp_actions.data,
- ctx->xout->odp_actions.size, NULL);
+ odp_execute_actions(NULL, packet, false, &md,
+ ofpbuf_data(&ctx->xout->odp_actions),
+ ofpbuf_size(&ctx->xout->odp_actions), NULL);
pin = xmalloc(sizeof *pin);
- pin->up.packet_len = packet->size;
+ pin->up.packet_len = ofpbuf_size(packet);
pin->up.packet = ofpbuf_steal_data(packet);
pin->up.reason = reason;
pin->up.table_id = ctx->table_id;
learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
ofproto_dpif_flow_mod(ctx->xbridge->ofproto, &fm);
ofpbuf_uninit(&ofpacts);
+
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
+ entry->u.learn.ofproto = ctx->xin->ofproto;
+ /* Lookup the learned rule, taking a reference on it. The reference
+ * is released when this cache entry is deleted. */
+ rule_dpif_lookup(ctx->xbridge->ofproto, &ctx->xin->flow, NULL,
+ &entry->u.learn.rule, true);
+ }
+}
+
+static void
+xlate_fin_timeout__(struct rule_dpif *rule, uint16_t tcp_flags,
+ uint16_t idle_timeout, uint16_t hard_timeout)
+{
+ if (tcp_flags & (TCP_FIN | TCP_RST)) {
+ rule_dpif_reduce_timeouts(rule, idle_timeout, hard_timeout);
+ }
}
static void
xlate_fin_timeout(struct xlate_ctx *ctx,
const struct ofpact_fin_timeout *oft)
{
- if (ctx->xin->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
- rule_dpif_reduce_timeouts(ctx->rule, oft->fin_idle_timeout,
- oft->fin_hard_timeout);
+ if (ctx->rule) {
+ xlate_fin_timeout__(ctx->rule, ctx->xin->tcp_flags,
+ oft->fin_idle_timeout, oft->fin_hard_timeout);
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
+ /* XC_RULE already holds a reference on the rule, none is taken
+ * here. */
+ entry->u.fin.rule = ctx->rule;
+ entry->u.fin.idle = oft->fin_idle_timeout;
+ entry->u.fin.hard = oft->fin_hard_timeout;
+ }
}
}
static bool
may_receive(const struct xport *xport, struct xlate_ctx *ctx)
{
- if (xport->config & (eth_addr_equals(ctx->xin->flow.dl_dst, eth_addr_stp)
+ if (xport->config & (is_stp(&ctx->xin->flow)
? OFPUTIL_PC_NO_RECV_STP
: OFPUTIL_PC_NO_RECV)) {
return false;
ofpbuf_use_stub(&action_list, action_list_stub, sizeof action_list_stub);
ofpacts_execute_action_set(&action_list, &ctx->action_set);
- do_xlate_actions(action_list.data, action_list.size, ctx);
+ do_xlate_actions(ofpbuf_data(&action_list), ofpbuf_size(&action_list), ctx);
ofpbuf_uninit(&action_list);
}
case OFPACT_SET_FIELD:
set_field = ofpact_get_SET_FIELD(a);
mf = set_field->field;
- mf_mask_field_and_prereqs(mf, &wc->masks);
/* Set field action only ever overwrites packet's outermost
* applicable header fields. Do nothing if no header exists. */
- if ((mf->id != MFF_VLAN_VID || flow->vlan_tci & htons(VLAN_CFI))
- && ((mf->id != MFF_MPLS_LABEL && mf->id != MFF_MPLS_TC)
- || eth_type_mpls(flow->dl_type))) {
- mf_set_flow_value(mf, &set_field->value, flow);
+ if (mf->id == MFF_VLAN_VID) {
+ wc->masks.vlan_tci |= htons(VLAN_CFI);
+ if (!(flow->vlan_tci & htons(VLAN_CFI))) {
+ break;
+ }
+ } else if ((mf->id == MFF_MPLS_LABEL || mf->id == MFF_MPLS_TC)
+ /* 'dl_type' is already unwildcarded. */
+ && !eth_type_mpls(flow->dl_type)) {
+ break;
}
+
+ mf_mask_field_and_prereqs(mf, &wc->masks);
+ mf_set_flow_value(mf, &set_field->value, flow);
break;
case OFPACT_STACK_PUSH:
xin->packet = packet;
xin->may_learn = packet != NULL;
xin->rule = rule;
+ xin->xcache = NULL;
xin->ofpacts = NULL;
xin->ofpacts_len = 0;
xin->tcp_flags = tcp_flags;
ofpbuf_use_stub(&dst->odp_actions, dst->odp_actions_stub,
sizeof dst->odp_actions_stub);
- ofpbuf_put(&dst->odp_actions, src->odp_actions.data,
- src->odp_actions.size);
+ ofpbuf_put(&dst->odp_actions, ofpbuf_data(&src->odp_actions),
+ ofpbuf_size(&src->odp_actions));
}
\f
static struct skb_priority_to_dscp *
const struct nlattr *a;
unsigned int left;
- NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->xout->odp_actions.data,
- ctx->xout->odp_actions.size) {
+ NL_ATTR_FOR_EACH_UNSAFE (a, left, ofpbuf_data(&ctx->xout->odp_actions),
+ ofpbuf_size(&ctx->xout->odp_actions)) {
if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
&& nl_attr_get_odp_port(a) == local_odp_port) {
return true;
ctx.xbridge = xbridge_lookup(xin->ofproto);
if (!ctx.xbridge) {
- goto out;
+ return;
}
ctx.rule = xin->rule;
if (!xin->ofpacts && !ctx.rule) {
ctx.table_id = rule_dpif_lookup(ctx.xbridge->ofproto, flow,
!xin->skip_wildcards ? wc : NULL,
- &rule);
+ &rule, ctx.xin->xcache != NULL);
if (ctx.xin->resubmit_stats) {
rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats);
}
+ if (ctx.xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
+ entry->u.rule = rule;
+ }
ctx.rule = rule;
}
xout->fail_open = ctx.rule && rule_dpif_is_fail_open(ctx.rule);
+ xout->use_recirc = false;
if (xin->ofpacts) {
ofpacts = xin->ofpacts;
break;
case OFPC_FRAG_DROP:
- goto out;
+ return;
case OFPC_FRAG_REASM:
OVS_NOT_REACHED();
}
in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port);
- if (in_port && in_port->is_tunnel && ctx.xin->resubmit_stats) {
- netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats);
- if (in_port->bfd) {
- bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats);
+ if (in_port && in_port->is_tunnel) {
+ if (ctx.xin->resubmit_stats) {
+ netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats);
+ if (in_port->bfd) {
+ bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats);
+ }
+ }
+ if (ctx.xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV);
+ entry->u.dev.rx = netdev_ref(in_port->netdev);
+ entry->u.dev.bfd = bfd_ref(in_port->bfd);
}
}
add_sflow_action(&ctx);
add_ipfix_action(&ctx);
- sample_actions_len = ctx.xout->odp_actions.size;
+ sample_actions_len = ofpbuf_size(&ctx.xout->odp_actions);
if (tnl_may_send && (!in_port || may_receive(in_port, &ctx))) {
do_xlate_actions(ofpacts, ofpacts_len, &ctx);
/* We've let OFPP_NORMAL and the learning action look at the
* packet, so drop it now if forwarding is disabled. */
if (in_port && !xport_stp_forward_state(in_port)) {
- ctx.xout->odp_actions.size = sample_actions_len;
+ ofpbuf_set_size(&ctx.xout->odp_actions, sample_actions_len);
}
}
- if (ctx.action_set.size) {
+ if (ofpbuf_size(&ctx.action_set)) {
xlate_action_set(&ctx);
}
}
}
- if (nl_attr_oversized(ctx.xout->odp_actions.size)) {
+ if (nl_attr_oversized(ofpbuf_size(&ctx.xout->odp_actions))) {
/* These datapath actions are too big for a Netlink attribute, so we
* can't hand them to the kernel directly. dpif_execute() can execute
* them one by one with help, so just mark the result as SLOW_ACTION to
ctx.xout->slow |= SLOW_ACTION;
}
- if (ctx.xin->resubmit_stats) {
- mirror_update_stats(ctx.xbridge->mbridge, xout->mirrors,
- ctx.xin->resubmit_stats->n_packets,
- ctx.xin->resubmit_stats->n_bytes);
-
- if (ctx.xbridge->netflow) {
- const struct ofpact *ofpacts;
- size_t ofpacts_len;
-
- ofpacts_len = actions->ofpacts_len;
- ofpacts = actions->ofpacts;
- if (ofpacts_len == 0
- || ofpacts->type != OFPACT_CONTROLLER
- || ofpact_next(ofpacts) < ofpact_end(ofpacts, ofpacts_len)) {
- /* Only update netflow if we don't have controller flow. We don't
- * report NetFlow expiration messages for such facets because they
- * are just part of the control logic for the network, not real
- * traffic. */
+ if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
+ if (ctx.xin->resubmit_stats) {
+ mirror_update_stats(ctx.xbridge->mbridge, xout->mirrors,
+ ctx.xin->resubmit_stats->n_packets,
+ ctx.xin->resubmit_stats->n_bytes);
+ }
+ if (ctx.xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx.xin->xcache, XC_MIRROR);
+ entry->u.mirror.mbridge = mbridge_ref(ctx.xbridge->mbridge);
+ entry->u.mirror.mirrors = xout->mirrors;
+ }
+ }
+
+ if (ctx.xbridge->netflow) {
+ const struct ofpact *ofpacts = actions->ofpacts;
+ size_t ofpacts_len = actions->ofpacts_len;
+
+ /* Only update netflow if we don't have controller flow. We don't
+ * report NetFlow expiration messages for such facets because they
+ * are just part of the control logic for the network, not real
+ * traffic. */
+ if (ofpacts_len == 0
+ || ofpacts->type != OFPACT_CONTROLLER
+ || ofpact_next(ofpacts) < ofpact_end(ofpacts, ofpacts_len)) {
+ if (ctx.xin->resubmit_stats) {
netflow_flow_update(ctx.xbridge->netflow, flow,
xout->nf_output_iface,
ctx.xin->resubmit_stats);
}
+ if (ctx.xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
+ entry->u.nf.netflow = netflow_ref(ctx.xbridge->netflow);
+ entry->u.nf.flow = xmemdup(flow, sizeof *flow);
+ entry->u.nf.iface = xout->nf_output_iface;
+ }
}
}
wc->masks.tp_src &= htons(UINT8_MAX);
wc->masks.tp_dst &= htons(UINT8_MAX);
}
-
-out:
- rule_dpif_unref(rule);
}
/* Sends 'packet' out 'ofport'.
&output.ofpact, sizeof output,
packet);
}
+
+struct xlate_cache *
+xlate_cache_new(void)
+{
+ struct xlate_cache *xcache = xmalloc(sizeof *xcache);
+
+ ofpbuf_init(&xcache->entries, 512);
+ return xcache;
+}
+
+static struct xc_entry *
+xlate_cache_add_entry(struct xlate_cache *xcache, enum xc_type type)
+{
+ struct xc_entry *entry;
+
+ entry = ofpbuf_put_zeros(&xcache->entries, sizeof *entry);
+ entry->type = type;
+
+ return entry;
+}
+
+static void
+xlate_cache_netdev(struct xc_entry *entry, const struct dpif_flow_stats *stats)
+{
+ if (entry->u.dev.tx) {
+ netdev_vport_inc_tx(entry->u.dev.tx, stats);
+ }
+ if (entry->u.dev.rx) {
+ netdev_vport_inc_rx(entry->u.dev.rx, stats);
+ }
+ if (entry->u.dev.bfd) {
+ bfd_account_rx(entry->u.dev.bfd, stats);
+ }
+}
+
+static void
+xlate_cache_normal(struct ofproto_dpif *ofproto, struct flow *flow, int vlan)
+{
+ struct xbridge *xbridge;
+ struct xbundle *xbundle;
+ struct flow_wildcards wc;
+
+ xbridge = xbridge_lookup(ofproto);
+ if (!xbridge) {
+ return;
+ }
+
+ xbundle = lookup_input_bundle(xbridge, flow->in_port.ofp_port, false,
+ NULL);
+ if (!xbundle) {
+ return;
+ }
+
+ update_learning_table(xbridge, flow, &wc, vlan, xbundle);
+}
+
+/* Push stats and perform side effects of flow translation. */
+void
+xlate_push_stats(struct xlate_cache *xcache, bool may_learn,
+ const struct dpif_flow_stats *stats)
+{
+ struct xc_entry *entry;
+ struct ofpbuf entries = xcache->entries;
+
+ XC_ENTRY_FOR_EACH (entry, entries, xcache) {
+ switch (entry->type) {
+ case XC_RULE:
+ rule_dpif_credit_stats(entry->u.rule, stats);
+ break;
+ case XC_BOND:
+ bond_account(entry->u.bond.bond, entry->u.bond.flow,
+ entry->u.bond.vid, stats->n_bytes);
+ break;
+ case XC_NETDEV:
+ xlate_cache_netdev(entry, stats);
+ break;
+ case XC_NETFLOW:
+ netflow_flow_update(entry->u.nf.netflow, entry->u.nf.flow,
+ entry->u.nf.iface, stats);
+ break;
+ case XC_MIRROR:
+ mirror_update_stats(entry->u.mirror.mbridge,
+ entry->u.mirror.mirrors,
+ stats->n_packets, stats->n_bytes);
+ break;
+ case XC_LEARN:
+ if (may_learn) {
+ struct rule_dpif *rule = entry->u.learn.rule;
+
+ /* Reset the modified time for a rule that is equivalent to
+ * the currently cached rule. If the rule is not the exact
+ * rule we have cached, update the reference that we have. */
+ entry->u.learn.rule = ofproto_dpif_refresh_rule(rule);
+ }
+ break;
+ case XC_NORMAL:
+ xlate_cache_normal(entry->u.normal.ofproto, entry->u.normal.flow,
+ entry->u.normal.vlan);
+ break;
+ case XC_FIN_TIMEOUT:
+ xlate_fin_timeout__(entry->u.fin.rule, stats->tcp_flags,
+ entry->u.fin.idle, entry->u.fin.hard);
+ break;
+ default:
+ OVS_NOT_REACHED();
+ }
+ }
+}
+
+static void
+xlate_dev_unref(struct xc_entry *entry)
+{
+ if (entry->u.dev.tx) {
+ netdev_close(entry->u.dev.tx);
+ }
+ if (entry->u.dev.rx) {
+ netdev_close(entry->u.dev.rx);
+ }
+ if (entry->u.dev.bfd) {
+ bfd_unref(entry->u.dev.bfd);
+ }
+}
+
+static void
+xlate_cache_clear_netflow(struct netflow *netflow, struct flow *flow)
+{
+ netflow_expire(netflow, flow);
+ netflow_flow_clear(netflow, flow);
+ netflow_unref(netflow);
+ free(flow);
+}
+
+void
+xlate_cache_clear(struct xlate_cache *xcache)
+{
+ struct xc_entry *entry;
+ struct ofpbuf entries;
+
+ if (!xcache) {
+ return;
+ }
+
+ XC_ENTRY_FOR_EACH (entry, entries, xcache) {
+ switch (entry->type) {
+ case XC_RULE:
+ rule_dpif_unref(entry->u.rule);
+ break;
+ case XC_BOND:
+ free(entry->u.bond.flow);
+ bond_unref(entry->u.bond.bond);
+ break;
+ case XC_NETDEV:
+ xlate_dev_unref(entry);
+ break;
+ case XC_NETFLOW:
+ xlate_cache_clear_netflow(entry->u.nf.netflow, entry->u.nf.flow);
+ break;
+ case XC_MIRROR:
+ mbridge_unref(entry->u.mirror.mbridge);
+ break;
+ case XC_LEARN:
+ /* 'u.learn.rule' is the learned rule. */
+ rule_dpif_unref(entry->u.learn.rule);
+ break;
+ case XC_NORMAL:
+ free(entry->u.normal.flow);
+ break;
+ case XC_FIN_TIMEOUT:
+ /* 'u.fin.rule' is always already held as a XC_RULE, which
+ * has already released it's reference above. */
+ break;
+ default:
+ OVS_NOT_REACHED();
+ }
+ }
+
+ ofpbuf_clear(&xcache->entries);
+}
+
+void
+xlate_cache_delete(struct xlate_cache *xcache)
+{
+ xlate_cache_clear(xcache);
+ ofpbuf_uninit(&xcache->entries);
+ free(xcache);
+}