-/* Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "ofp-actions.h"
#include "ofproto/ofproto-dpif-ipfix.h"
#include "ofproto/ofproto-dpif-mirror.h"
+#include "ofproto/ofproto-dpif-monitor.h"
#include "ofproto/ofproto-dpif-sflow.h"
#include "ofproto/ofproto-dpif.h"
#include "ofproto/ofproto-provider.h"
COVERAGE_DEFINE(xlate_actions);
COVERAGE_DEFINE(xlate_actions_oversize);
+COVERAGE_DEFINE(xlate_actions_mpls_overflow);
VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
/* Maximum depth of flow table recursion (due to resubmit actions) in a
* flow translation. */
#define MAX_RESUBMIT_RECURSION 64
+#define MAX_INTERNAL_RESUBMITS 1 /* Max resbmits allowed using rules in
+ internal table. */
/* Maximum number of resubmit actions in a flow translation, whether they are
* recursive or not. */
enum ofp_config_flags frag; /* Fragmentation handling. */
bool has_in_band; /* Bridge has in band control? */
bool forward_bpdu; /* Bridge forwards STP BPDUs? */
+
+ /* True if the datapath supports recirculation. */
+ bool enable_recirc;
+
+ /* True if the datapath supports variable-length
+ * OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions.
+ * False if the datapath supports only 8-byte (or shorter) userdata. */
+ bool variable_length_userdata;
+
+ /* Number of MPLS label stack entries that the datapath supports
+ * in matches. */
+ size_t max_mpls_depth;
};
struct xbundle {
/* The rule that we are currently translating, or NULL. */
struct rule_dpif *rule;
- int mpls_depth_delta; /* Delta of the mpls stack depth since
- * actions were last committed.
- * Must be between -1 and 1 inclusive. */
- ovs_be32 pre_push_mpls_lse; /* Used to record the top-most MPLS LSE
- * prior to an mpls_push so that it may be
- * used for a subsequent mpls_pop. */
-
/* Resubmit statistics, via xlate_table_action(). */
int recurse; /* Current resubmit nesting depth. */
int resubmits; /* Total number of resubmits. */
+ bool in_group; /* Currently translating ofgroup, if true. */
uint32_t orig_skb_priority; /* Priority when packet arrived. */
uint8_t table_id; /* OpenFlow table ID where flow was found. */
uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
bool exit; /* No further actions should be processed. */
+ bool use_recirc; /* Should generate recirc? */
+ struct xlate_recirc recirc; /* Information used for generating
+ * recirculation actions */
+
/* OpenFlow 1.1+ action set.
*
* 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
* it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
* when an input bundle is needed for validation (e.g., mirroring or
* OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
- * any 'port' structs, so care must be taken when dealing with it.
- * The bundle's name and vlan mode are initialized in lookup_input_bundle() */
-static struct xbundle ofpp_none_bundle;
+ * any 'port' structs, so care must be taken when dealing with it. */
+static struct xbundle ofpp_none_bundle = {
+ .name = "OFPP_NONE",
+ .vlan_mode = PORT_VLAN_TRUNK
+};
/* Node in 'xport''s 'skb_priorities' map. Used to maintain a map from
* 'priority' (the datapath's term for QoS queue) to the dscp bits which all
uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
};
+enum xc_type {
+ XC_RULE,
+ XC_BOND,
+ XC_NETDEV,
+ XC_NETFLOW,
+ XC_MIRROR,
+ XC_LEARN,
+ XC_NORMAL,
+ XC_FIN_TIMEOUT,
+};
+
+/* xlate_cache entries hold enough information to perform the side effects of
+ * xlate_actions() for a rule, without needing to perform rule translation
+ * from scratch. The primary usage of these is to submit statistics to objects
+ * that a flow relates to, although they may be used for other effects as well
+ * (for instance, refreshing hard timeouts for learned flows). */
+struct xc_entry {
+ enum xc_type type;
+ union {
+ struct rule_dpif *rule;
+ struct {
+ struct netdev *tx;
+ struct netdev *rx;
+ struct bfd *bfd;
+ } dev;
+ struct {
+ struct netflow *netflow;
+ struct flow *flow;
+ ofp_port_t iface;
+ } nf;
+ struct {
+ struct mbridge *mbridge;
+ mirror_mask_t mirrors;
+ } mirror;
+ struct {
+ struct bond *bond;
+ struct flow *flow;
+ uint16_t vid;
+ } bond;
+ struct {
+ struct ofproto_dpif *ofproto;
+ struct rule_dpif *rule;
+ } learn;
+ struct {
+ struct ofproto_dpif *ofproto;
+ struct flow *flow;
+ int vlan;
+ } normal;
+ struct {
+ struct rule_dpif *rule;
+ uint16_t idle;
+ uint16_t hard;
+ } fin;
+ } u;
+};
+
+#define XC_ENTRY_FOR_EACH(entry, entries, xcache) \
+ entries = xcache->entries; \
+ for (entry = ofpbuf_try_pull(&entries, sizeof *entry); \
+ entry; \
+ entry = ofpbuf_try_pull(&entries, sizeof *entry))
+
+struct xlate_cache {
+ struct ofpbuf entries;
+};
+
static struct hmap xbridges = HMAP_INITIALIZER(&xbridges);
static struct hmap xbundles = HMAP_INITIALIZER(&xbundles);
static struct hmap xports = HMAP_INITIALIZER(&xports);
struct xlate_ctx *);
static void xlate_actions__(struct xlate_in *, struct xlate_out *)
OVS_REQ_RDLOCK(xlate_rwlock);
- static void xlate_normal(struct xlate_ctx *);
- static void xlate_report(struct xlate_ctx *, const char *);
- static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
- uint8_t table_id, bool may_packet_in);
+static void xlate_normal(struct xlate_ctx *);
+static void xlate_report(struct xlate_ctx *, const char *);
+static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
+ uint8_t table_id, bool may_packet_in,
+ bool honor_table_miss);
static bool input_vid_is_valid(uint16_t vid, struct xbundle *, bool warn);
static uint16_t input_vid_to_vlan(const struct xbundle *, uint16_t vid);
static void output_normal(struct xlate_ctx *, const struct xbundle *,
static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
uint8_t *dscp);
+static struct xc_entry *xlate_cache_add_entry(struct xlate_cache *xc,
+ enum xc_type type);
+
void
xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
struct dpif *dpif, struct rule_dpif *miss_rule,
const struct dpif_sflow *sflow,
const struct dpif_ipfix *ipfix,
const struct netflow *netflow, enum ofp_config_flags frag,
- bool forward_bpdu, bool has_in_band)
+ bool forward_bpdu, bool has_in_band,
+ bool enable_recirc,
+ bool variable_length_userdata,
+ size_t max_mpls_depth)
{
struct xbridge *xbridge = xbridge_lookup(ofproto);
xbridge->frag = frag;
xbridge->miss_rule = miss_rule;
xbridge->no_packet_in_rule = no_packet_in_rule;
+ xbridge->enable_recirc = enable_recirc;
+ xbridge->variable_length_userdata = variable_length_userdata;
+ xbridge->max_mpls_depth = max_mpls_depth;
}
void
/* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key'
* respectively), populates 'flow' with the result of odp_flow_key_to_flow().
- * Optionally, if nonnull, populates 'fitnessp' with the fitness of 'flow' as
- * returned by odp_flow_key_to_flow(). Also, optionally populates 'ofproto'
- * with the ofproto_dpif, 'odp_in_port' with the datapath in_port, that
- * 'packet' ingressed, and 'ipfix', 'sflow', and 'netflow' with the appropriate
- * handles for those protocols if they're enabled. Caller is responsible for
- * unrefing them.
+ * Optionally populates 'ofproto' with the ofproto_dpif, 'odp_in_port' with
+ * the datapath in_port, that 'packet' ingressed, and 'ipfix', 'sflow', and
+ * 'netflow' with the appropriate handles for those protocols if they're
+ * enabled. Caller is responsible for unrefing them.
*
* If 'ofproto' is nonnull, requires 'flow''s in_port to exist. Otherwise sets
* 'flow''s in_port to OFPP_NONE.
* or some other positive errno if there are other problems. */
int
xlate_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
- const struct nlattr *key, size_t key_len,
- struct flow *flow, enum odp_key_fitness *fitnessp,
+ const struct nlattr *key, size_t key_len, struct flow *flow,
struct ofproto_dpif **ofproto, struct dpif_ipfix **ipfix,
struct dpif_sflow **sflow, struct netflow **netflow,
odp_port_t *odp_in_port)
{
- enum odp_key_fitness fitness;
const struct xport *xport;
int error = ENODEV;
ovs_rwlock_rdlock(&xlate_rwlock);
- fitness = odp_flow_key_to_flow(key, key_len, flow);
- if (fitness == ODP_FIT_ERROR) {
+ if (odp_flow_key_to_flow(key, key_len, flow) == ODP_FIT_ERROR) {
error = EINVAL;
goto exit;
}
* an OpenFlow controller properly, so that it looks correct
* for sFlow, and so that flow_extract() will get the correct
* vlan_tci if it is called on 'packet'. */
- eth_push_vlan(packet, flow->vlan_tci);
+ eth_push_vlan(packet, htons(ETH_TYPE_VLAN), flow->vlan_tci);
}
- /* We can't reproduce 'key' from 'flow'. */
- fitness = fitness == ODP_FIT_PERFECT ? ODP_FIT_TOO_MUCH : fitness;
}
error = 0;
}
exit:
- if (fitnessp) {
- *fitnessp = fitness;
- }
ovs_rwlock_unlock(&xlate_rwlock);
return error;
}
: NULL;
}
-static enum stp_state
+static bool
xport_stp_learn_state(const struct xport *xport)
{
struct stp_port *sp = xport_get_stp_port(xport);
return stp_forward_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
}
+static bool
+xport_stp_listen_state(const struct xport *xport)
+{
+ struct stp_port *sp = xport_get_stp_port(xport);
+ return stp_listen_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
+}
+
/* Returns true if STP should process 'flow'. Sets fields in 'wc' that
* were used to make the determination.*/
static bool
stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
{
+ /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
- return eth_addr_equals(flow->dl_dst, eth_addr_stp);
+ return is_stp(flow);
}
static void
{
struct stp_port *sp = xport_get_stp_port(xport);
struct ofpbuf payload = *packet;
- struct eth_header *eth = payload.data;
+ struct eth_header *eth = ofpbuf_data(&payload);
/* Sink packets on ports that have STP disabled when the bridge has
* STP enabled. */
}
/* Trim off padding on payload. */
- if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
- payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN;
+ if (ofpbuf_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
+ ofpbuf_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
}
if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
- stp_received_bpdu(sp, payload.data, payload.size);
+ stp_received_bpdu(sp, ofpbuf_data(&payload), ofpbuf_size(&payload));
}
}
return xport->xbundle;
}
- /* Special-case OFPP_NONE, which a controller may use as the ingress
- * port for traffic that it is sourcing. */
- if (in_port == OFPP_NONE) {
- ofpp_none_bundle.name = "OFPP_NONE";
- ofpp_none_bundle.vlan_mode = PORT_VLAN_TRUNK;
+ /* Special-case OFPP_NONE (OF1.0) and OFPP_CONTROLLER (OF1.1+),
+ * which a controller may use as the ingress port for traffic that
+ * it is sourcing. */
+ if (in_port == OFPP_CONTROLLER || in_port == OFPP_NONE) {
return &ofpp_none_bundle;
}
bool has_mirror;
int out_vlan;
- has_mirror = mirror_get(xbridge->mbridge, mirror_mask_ffs(mirrors) - 1,
+ has_mirror = mirror_get(xbridge->mbridge, raw_ctz(mirrors),
&vlans, &dup_mirrors, &out, &out_vlan);
ovs_assert(has_mirror);
/* Partially configured bundle with no slaves. Drop the packet. */
return;
} else if (!out_xbundle->bond) {
+ ctx->use_recirc = false;
xport = CONTAINER_OF(list_front(&out_xbundle->xports), struct xport,
bundle_node);
} else {
struct ofport_dpif *ofport;
+ struct xlate_recirc *xr = &ctx->recirc;
+ struct flow_wildcards *wc = &ctx->xout->wc;
+
+ if (ctx->xbridge->enable_recirc) {
+ ctx->use_recirc = bond_may_recirc(
+ out_xbundle->bond, &xr->recirc_id, &xr->hash_basis);
+
+ if (ctx->use_recirc) {
+ /* Only TCP mode uses recirculation. */
+ xr->hash_alg = OVS_HASH_ALG_L4;
+ bond_update_post_recirc_rules(out_xbundle->bond, false);
- ofport = bond_choose_output_slave(out_xbundle->bond, &ctx->xin->flow,
- &ctx->xout->wc, vid);
+ /* Recirculation does not require unmasking hash fields. */
+ wc = NULL;
+ }
+ }
+
+ ofport = bond_choose_output_slave(out_xbundle->bond,
+ &ctx->xin->flow, wc, vid);
xport = xport_lookup(ofport);
if (!xport) {
return;
}
- if (ctx->xin->resubmit_stats) {
- bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
- ctx->xin->resubmit_stats->n_bytes);
+ /* If ctx->xout->use_recirc is set, the main thread will handle stats
+ * accounting for this bond. */
+ if (!ctx->use_recirc) {
+ if (ctx->xin->resubmit_stats) {
+ bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
+ ctx->xin->resubmit_stats->n_bytes);
+ }
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+ struct flow *flow;
+
+ flow = &ctx->xin->flow;
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_BOND);
+ entry->u.bond.bond = bond_ref(out_xbundle->bond);
+ entry->u.bond.flow = xmemdup(flow, sizeof *flow);
+ entry->u.bond.vid = vid;
+ }
}
}
if (ctx->xin->may_learn) {
update_learning_table(ctx->xbridge, flow, wc, vlan, in_xbundle);
}
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ /* Save enough info to update mac learning table later. */
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL);
+ entry->u.normal.ofproto = ctx->xin->ofproto;
+ entry->u.normal.flow = xmemdup(flow, sizeof *flow);
+ entry->u.normal.vlan = vlan;
+ }
/* Determine output bundle. */
ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
odp_port = ofp_port_to_odp_port(xbridge, flow->in_port.ofp_port);
- pid = dpif_port_get_pid(xbridge->dpif, odp_port);
- cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size, odp_actions);
+ pid = dpif_port_get_pid(xbridge->dpif, odp_port,
+ flow_hash_5tuple(flow, 0));
+ cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size,
+ odp_actions);
nl_msg_end_nested(odp_actions, actions_offset);
nl_msg_end_nested(odp_actions, sample_offset);
} else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
if (packet) {
bfd_process_packet(xport->bfd, flow, packet);
+ /* If POLL received, immediately sends FINAL back. */
+ if (bfd_should_send_packet(xport->bfd)) {
+ ofproto_dpif_monitor_port_send_soon(xport->ofport);
+ }
}
return SLOW_BFD;
} else if (xport->xbundle && xport->xbundle->lacp
/* If 'struct flow' gets additional metadata, we'll need to zero it out
* before traversing a patch port. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 23);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 26);
if (!xport) {
xlate_report(ctx, "Nonexistent output port");
} else if (xport->config & OFPUTIL_PC_NO_FWD) {
xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
return;
- } else if (check_stp && !xport_stp_forward_state(xport)) {
- xlate_report(ctx, "STP not in forwarding state, skipping output");
- return;
+ } else if (check_stp) {
+ if (is_stp(&ctx->base_flow)) {
+ if (!xport_stp_listen_state(xport)) {
+ xlate_report(ctx, "STP not in listening state, "
+ "skipping bpdu output");
+ return;
+ }
+ } else if (!xport_stp_forward_state(xport)) {
+ xlate_report(ctx, "STP not in forwarding state, "
+ "skipping output");
+ return;
+ }
}
if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
ctx->xout->slow |= special;
} else if (may_receive(peer, ctx)) {
if (xport_stp_forward_state(peer)) {
- xlate_table_action(ctx, flow->in_port.ofp_port, 0, true);
+ xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
} else {
/* Forwarding is disabled by STP. Let OFPP_NORMAL and the
* learning action look at the packet, then drop it. */
struct flow old_base_flow = ctx->base_flow;
- size_t old_size = ctx->xout->odp_actions.size;
+ size_t old_size = ofpbuf_size(&ctx->xout->odp_actions);
mirror_mask_t old_mirrors = ctx->xout->mirrors;
- xlate_table_action(ctx, flow->in_port.ofp_port, 0, true);
+ xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
ctx->xout->mirrors = old_mirrors;
ctx->base_flow = old_base_flow;
- ctx->xout->odp_actions.size = old_size;
+ ofpbuf_set_size(&ctx->xout->odp_actions, old_size);
}
}
bfd_account_rx(peer->bfd, ctx->xin->resubmit_stats);
}
}
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
+ entry->u.dev.tx = netdev_ref(xport->netdev);
+ entry->u.dev.rx = netdev_ref(peer->netdev);
+ entry->u.dev.bfd = bfd_ref(peer->bfd);
+ }
return;
}
flow_nw_tos = flow->nw_tos;
if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
- wc->masks.nw_tos |= IP_ECN_MASK;
+ wc->masks.nw_tos |= IP_DSCP_MASK;
flow->nw_tos &= ~IP_DSCP_MASK;
flow->nw_tos |= dscp;
}
if (ctx->xin->resubmit_stats) {
netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
}
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
+ entry->u.dev.tx = netdev_ref(xport->netdev);
+ }
out_port = odp_port;
commit_odp_tunnel_action(flow, &ctx->base_flow,
&ctx->xout->odp_actions);
flow->tunnel = flow_tnl; /* Restore tunnel metadata */
} else {
- ofp_port_t vlandev_port;
-
odp_port = xport->odp_port;
+ out_port = odp_port;
if (ofproto_has_vlan_splinters(ctx->xbridge->ofproto)) {
+ ofp_port_t vlandev_port;
+
wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
- }
- vlandev_port = vsp_realdev_to_vlandev(ctx->xbridge->ofproto, ofp_port,
- flow->vlan_tci);
- if (vlandev_port == ofp_port) {
- out_port = odp_port;
- } else {
- out_port = ofp_port_to_odp_port(ctx->xbridge, vlandev_port);
- flow->vlan_tci = htons(0);
+ vlandev_port = vsp_realdev_to_vlandev(ctx->xbridge->ofproto,
+ ofp_port, flow->vlan_tci);
+ if (vlandev_port != ofp_port) {
+ out_port = ofp_port_to_odp_port(ctx->xbridge, vlandev_port);
+ flow->vlan_tci = htons(0);
+ }
}
}
if (out_port != ODPP_NONE) {
ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
&ctx->xout->odp_actions,
- &ctx->xout->wc,
- &ctx->mpls_depth_delta);
- nl_msg_put_odp_port(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT,
- out_port);
+ &ctx->xout->wc);
+
+ if (ctx->use_recirc) {
+ struct ovs_action_hash *act_hash;
+ struct xlate_recirc *xr = &ctx->recirc;
+
+ /* Hash action. */
+ act_hash = nl_msg_put_unspec_uninit(&ctx->xout->odp_actions,
+ OVS_ACTION_ATTR_HASH,
+ sizeof *act_hash);
+ act_hash->hash_alg = xr->hash_alg;
+ act_hash->hash_basis = xr->hash_basis;
+
+ /* Recirc action. */
+ nl_msg_put_u32(&ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC,
+ xr->recirc_id);
+ } else {
+ nl_msg_put_odp_port(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT,
+ out_port);
+ }
ctx->sflow_odp_port = odp_port;
ctx->sflow_n_outputs++;
xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule)
{
struct rule_dpif *old_rule = ctx->rule;
- struct rule_actions *actions;
+ const struct rule_actions *actions;
if (ctx->xin->resubmit_stats) {
rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
ctx->rule = rule;
actions = rule_dpif_get_actions(rule);
do_xlate_actions(actions->ofpacts, actions->ofpacts_len, ctx);
- rule_actions_unref(actions);
ctx->rule = old_rule;
ctx->recurse--;
}
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
- if (ctx->recurse >= MAX_RESUBMIT_RECURSION) {
+ if (ctx->recurse >= MAX_RESUBMIT_RECURSION + MAX_INTERNAL_RESUBMITS) {
VLOG_ERR_RL(&rl, "resubmit actions recursed over %d times",
MAX_RESUBMIT_RECURSION);
- } else if (ctx->resubmits >= MAX_RESUBMITS) {
+ } else if (ctx->resubmits >= MAX_RESUBMITS + MAX_INTERNAL_RESUBMITS) {
VLOG_ERR_RL(&rl, "over %d resubmit actions", MAX_RESUBMITS);
- } else if (ctx->xout->odp_actions.size > UINT16_MAX) {
+ } else if (ofpbuf_size(&ctx->xout->odp_actions) > UINT16_MAX) {
VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of actions");
- } else if (ctx->stack.size >= 65536) {
+ } else if (ofpbuf_size(&ctx->stack) >= 65536) {
VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of stack");
} else {
return true;
}
static void
-xlate_table_action(struct xlate_ctx *ctx,
- ofp_port_t in_port, uint8_t table_id, bool may_packet_in)
+xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
+ bool may_packet_in, bool honor_table_miss)
{
if (xlate_resubmit_resource_check(ctx)) {
ofp_port_t old_in_port = ctx->xin->flow.in_port.ofp_port;
bool skip_wildcards = ctx->xin->skip_wildcards;
uint8_t old_table_id = ctx->table_id;
struct rule_dpif *rule;
+ enum rule_dpif_lookup_verdict verdict;
+ enum ofputil_port_config config = 0;
ctx->table_id = table_id;
* original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
* have surprising behavior). */
ctx->xin->flow.in_port.ofp_port = in_port;
- rule_dpif_lookup_in_table(ctx->xbridge->ofproto, &ctx->xin->flow,
- !skip_wildcards ? &ctx->xout->wc : NULL,
- table_id, &rule);
+ verdict = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
+ &ctx->xin->flow,
+ !skip_wildcards
+ ? &ctx->xout->wc : NULL,
+ honor_table_miss,
+ &ctx->table_id, &rule,
+ ctx->xin->xcache != NULL);
ctx->xin->flow.in_port.ofp_port = old_in_port;
if (ctx->xin->resubmit_hook) {
ctx->xin->resubmit_hook(ctx->xin, rule, ctx->recurse);
}
- if (!rule && may_packet_in) {
- struct xport *xport;
-
- /* XXX
- * check if table configuration flags
- * OFPTC11_TABLE_MISS_CONTROLLER, default.
- * OFPTC11_TABLE_MISS_CONTINUE,
- * OFPTC11_TABLE_MISS_DROP
- * When OF1.0, OFPTC11_TABLE_MISS_CONTINUE is used. What to do? */
- xport = get_ofp_port(ctx->xbridge, ctx->xin->flow.in_port.ofp_port);
- choose_miss_rule(xport ? xport->config : 0,
- ctx->xbridge->miss_rule,
- ctx->xbridge->no_packet_in_rule, &rule);
+ switch (verdict) {
+ case RULE_DPIF_LOOKUP_VERDICT_MATCH:
+ goto match;
+ case RULE_DPIF_LOOKUP_VERDICT_CONTROLLER:
+ if (may_packet_in) {
+ struct xport *xport;
+
+ xport = get_ofp_port(ctx->xbridge,
+ ctx->xin->flow.in_port.ofp_port);
+ config = xport ? xport->config : 0;
+ break;
+ }
+ /* Fall through to drop */
+ case RULE_DPIF_LOOKUP_VERDICT_DROP:
+ config = OFPUTIL_PC_NO_PACKET_IN;
+ break;
+ case RULE_DPIF_LOOKUP_VERDICT_DEFAULT:
+ if (!ofproto_dpif_wants_packet_in_on_miss(ctx->xbridge->ofproto)) {
+ config = OFPUTIL_PC_NO_PACKET_IN;
+ }
+ break;
+ default:
+ OVS_NOT_REACHED();
}
+
+ choose_miss_rule(config, ctx->xbridge->miss_rule,
+ ctx->xbridge->no_packet_in_rule, &rule,
+ ctx->xin->xcache != NULL);
+
+match:
if (rule) {
+ /* Fill in the cache entry here instead of xlate_recursively
+ * to make the reference counting more explicit. We take a
+ * reference in the lookups above if we are going to cache the
+ * rule. */
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
+ entry->u.rule = rule;
+ }
xlate_recursively(ctx, rule);
- rule_dpif_unref(rule);
}
ctx->table_id = old_table_id;
ofpacts_execute_action_set(&action_list, &action_set);
ctx->recurse++;
- do_xlate_actions(action_list.data, action_list.size, ctx);
+ do_xlate_actions(ofpbuf_data(&action_list), ofpbuf_size(&action_list), ctx);
ctx->recurse--;
ofpbuf_uninit(&action_set);
const struct ofputil_bucket *bucket;
uint32_t basis;
- basis = hash_bytes(ctx->xin->flow.dl_dst, sizeof ctx->xin->flow.dl_dst, 0);
+ basis = hash_mac(ctx->xin->flow.dl_dst, 0, 0);
bucket = group_best_live_bucket(ctx, group, basis);
if (bucket) {
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
static void
xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group)
{
+ ctx->in_group = true;
+
switch (group_dpif_get_type(group)) {
case OFPGT11_ALL:
case OFPGT11_INDIRECT:
OVS_NOT_REACHED();
}
group_dpif_release(group);
+
+ ctx->in_group = false;
+}
+
+static bool
+xlate_group_resource_check(struct xlate_ctx *ctx)
+{
+ if (!xlate_resubmit_resource_check(ctx)) {
+ return false;
+ } else if (ctx->in_group) {
+ /* Prevent nested translation of OpenFlow groups.
+ *
+ * OpenFlow allows this restriction. We enforce this restriction only
+ * because, with the current architecture, we would otherwise have to
+ * take a possibly recursive read lock on the ofgroup rwlock, which is
+ * unsafe given that POSIX allows taking a read lock to block if there
+ * is a thread blocked on taking the write lock. Other solutions
+ * without this restriction are also possible, but seem unwarranted
+ * given the current limited use of groups. */
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+ VLOG_ERR_RL(&rl, "cannot recursively translate OpenFlow group");
+ return false;
+ } else {
+ return true;
+ }
}
static bool
xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id)
{
- if (xlate_resubmit_resource_check(ctx)) {
+ if (xlate_group_resource_check(ctx)) {
struct group_dpif *group;
bool got_group;
{
ofp_port_t in_port;
uint8_t table_id;
+ bool may_packet_in = false;
+ bool honor_table_miss = false;
+
+ if (ctx->rule && rule_dpif_is_internal(ctx->rule)) {
+ /* Still allow missed packets to be sent to the controller
+ * if resubmitting from an internal table. */
+ may_packet_in = true;
+ honor_table_miss = true;
+ }
in_port = resubmit->in_port;
if (in_port == OFPP_IN_PORT) {
table_id = ctx->table_id;
}
- xlate_table_action(ctx, in_port, table_id, false);
+ xlate_table_action(ctx, in_port, table_id, may_packet_in,
+ honor_table_miss);
}
static void
{
struct ofproto_packet_in *pin;
struct ofpbuf *packet;
- struct flow key;
+ struct pkt_metadata md = PKT_METADATA_INITIALIZER(0);
ctx->xout->slow |= SLOW_CONTROLLER;
if (!ctx->xin->packet) {
packet = ofpbuf_clone(ctx->xin->packet);
- key.skb_priority = 0;
- key.pkt_mark = 0;
- memset(&key.tunnel, 0, sizeof key.tunnel);
-
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
&ctx->xout->odp_actions,
- &ctx->xout->wc,
- &ctx->mpls_depth_delta);
+ &ctx->xout->wc);
- odp_execute_actions(NULL, packet, &key, ctx->xout->odp_actions.data,
- ctx->xout->odp_actions.size, NULL, NULL);
+ odp_execute_actions(NULL, packet, false, &md,
+ ofpbuf_data(&ctx->xout->odp_actions),
+ ofpbuf_size(&ctx->xout->odp_actions), NULL);
pin = xmalloc(sizeof *pin);
- pin->up.packet_len = packet->size;
+ pin->up.packet_len = ofpbuf_size(packet);
pin->up.packet = ofpbuf_steal_data(packet);
pin->up.reason = reason;
pin->up.table_id = ctx->table_id;
pin->controller_id = controller_id;
pin->send_len = len;
- pin->generated_by_table_miss = (ctx->rule
- && rule_dpif_is_table_miss(ctx->rule));
+ /* If a rule is a table-miss rule then this is
+ * a table-miss handled by a table-miss rule.
+ *
+ * Else, if rule is internal and has a controller action,
+ * the later being implied by the rule being processed here,
+ * then this is a table-miss handled without a table-miss rule.
+ *
+ * Otherwise this is not a table-miss. */
+ pin->miss_type = OFPROTO_PACKET_IN_NO_MISS;
+ if (ctx->rule) {
+ if (rule_dpif_is_table_miss(ctx->rule)) {
+ pin->miss_type = OFPROTO_PACKET_IN_MISS_FLOW;
+ } else if (rule_dpif_is_internal(ctx->rule)) {
+ pin->miss_type = OFPROTO_PACKET_IN_MISS_WITHOUT_FLOW;
+ }
+ }
ofproto_dpif_send_packet_in(ctx->xbridge->ofproto, pin);
ofpbuf_delete(packet);
}
-static bool
-compose_mpls_push_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
+static void
+compose_mpls_push_action(struct xlate_ctx *ctx, struct ofpact_push_mpls *mpls)
{
struct flow_wildcards *wc = &ctx->xout->wc;
struct flow *flow = &ctx->xin->flow;
+ int n;
- ovs_assert(eth_type_mpls(eth_type));
-
- /* If mpls_depth_delta is negative then an MPLS POP action has been
- * composed and the resulting MPLS label stack is unknown. This means
- * an MPLS PUSH action can't be composed as it needs to know either the
- * top-most MPLS LSE to use as a template for the new MPLS LSE, or that
- * there is no MPLS label stack present. Thus, stop processing.
- *
- * If mpls_depth_delta is positive then an MPLS PUSH action has been
- * composed and no further MPLS PUSH action may be performed without
- * losing MPLS LSE and ether type information held in xtx->xin->flow.
- * Thus, stop processing.
- *
- * If the MPLS LSE of the flow and base_flow differ then the MPLS LSE
- * has been updated. Performing a MPLS PUSH action may be would result in
- * losing MPLS LSE and ether type information held in xtx->xin->flow.
- * Thus, stop processing.
- *
- * It is planned that in the future this case will be handled
- * by recirculation */
- if (ctx->mpls_depth_delta ||
- ctx->xin->flow.mpls_lse != ctx->base_flow.mpls_lse) {
- return true;
- }
-
- memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
-
- ctx->pre_push_mpls_lse = ctx->xin->flow.mpls_lse;
-
- if (eth_type_mpls(ctx->xin->flow.dl_type)) {
- flow->mpls_lse &= ~htonl(MPLS_BOS_MASK);
- } else {
- ovs_be32 label;
- uint8_t tc, ttl;
+ ovs_assert(eth_type_mpls(mpls->ethertype));
- if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
- label = htonl(0x2); /* IPV6 Explicit Null. */
- } else {
- label = htonl(0x0); /* IPV4 Explicit Null. */
+ n = flow_count_mpls_labels(flow, wc);
+ if (!n) {
+ ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
+ &ctx->xout->odp_actions,
+ &ctx->xout->wc);
+ } else if (n >= FLOW_MAX_MPLS_LABELS) {
+ if (ctx->xin->packet != NULL) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ VLOG_WARN_RL(&rl, "bridge %s: dropping packet on which an "
+ "MPLS push action can't be performed as it would "
+ "have more MPLS LSEs than the %d supported.",
+ ctx->xbridge->name, FLOW_MAX_MPLS_LABELS);
}
- wc->masks.nw_tos |= IP_DSCP_MASK;
- wc->masks.nw_ttl = 0xff;
- tc = (flow->nw_tos & IP_DSCP_MASK) >> 2;
- ttl = flow->nw_ttl ? flow->nw_ttl : 0x40;
- flow->mpls_lse = set_mpls_lse_values(ttl, tc, 1, label);
+ ctx->exit = true;
+ return;
+ } else if (n >= ctx->xbridge->max_mpls_depth) {
+ COVERAGE_INC(xlate_actions_mpls_overflow);
+ ctx->xout->slow |= SLOW_ACTION;
}
- flow->dl_type = eth_type;
- ctx->mpls_depth_delta++;
- return false;
+ flow_push_mpls(flow, n, mpls->ethertype, wc);
}
-static bool
+static void
compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
{
struct flow_wildcards *wc = &ctx->xout->wc;
+ struct flow *flow = &ctx->xin->flow;
+ int n = flow_count_mpls_labels(flow, wc);
- if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
- return true;
- }
-
- /* If mpls_depth_delta is negative then an MPLS POP action has been
- * composed. Performing another MPLS POP action
- * would result in losing ether type that results from
- * the already composed MPLS POP. Thus, stop processing.
- *
- * It is planned that in the future this case will be handled
- * by recirculation */
- if (ctx->mpls_depth_delta < 0) {
- return true;
- }
-
- memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
-
- /* If mpls_depth_delta is positive then an MPLS PUSH action has been
- * executed and the previous MPLS LSE saved in ctx->pre_push_mpls_lse. The
- * flow's MPLS LSE should be restored to that value to allow any
- * subsequent actions that update of the LSE to be executed correctly.
- */
- if (ctx->mpls_depth_delta > 0) {
- ctx->xin->flow.mpls_lse = ctx->pre_push_mpls_lse;
+ if (!flow_pop_mpls(flow, n, eth_type, wc) && n >= FLOW_MAX_MPLS_LABELS) {
+ if (ctx->xin->packet != NULL) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ VLOG_WARN_RL(&rl, "bridge %s: dropping packet on which an "
+ "MPLS pop action can't be performed as it has "
+ "more MPLS LSEs than the %d supported.",
+ ctx->xbridge->name, FLOW_MAX_MPLS_LABELS);
+ }
+ ctx->exit = true;
+ ofpbuf_clear(&ctx->xout->odp_actions);
}
-
- ctx->xin->flow.dl_type = eth_type;
- ctx->mpls_depth_delta--;
-
- return false;
}
static bool
}
}
-static bool
+static void
compose_set_mpls_label_action(struct xlate_ctx *ctx, ovs_be32 label)
{
- if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
- return true;
- }
-
- /* If mpls_depth_delta is negative then an MPLS POP action has been
- * executed and the resulting MPLS label stack is unknown. This means
- * a SET MPLS LABEL action can't be executed as it needs to manipulate
- * the top-most MPLS LSE. Thus, stop processing.
- *
- * It is planned that in the future this case will be handled
- * by recirculation.
- */
- if (ctx->mpls_depth_delta < 0) {
- return true;
+ if (eth_type_mpls(ctx->xin->flow.dl_type)) {
+ ctx->xout->wc.masks.mpls_lse[0] |= htonl(MPLS_LABEL_MASK);
+ set_mpls_lse_label(&ctx->xin->flow.mpls_lse[0], label);
}
-
- ctx->xout->wc.masks.mpls_lse |= htonl(MPLS_LABEL_MASK);
- set_mpls_lse_label(&ctx->xin->flow.mpls_lse, label);
- return false;
}
-static bool
+static void
compose_set_mpls_tc_action(struct xlate_ctx *ctx, uint8_t tc)
{
- if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
- return true;
- }
-
- /* If mpls_depth_delta is negative then an MPLS POP action has been
- * executed and the resulting MPLS label stack is unknown. This means
- * a SET MPLS TC action can't be executed as it needs to manipulate
- * the top-most MPLS LSE. Thus, stop processing.
- *
- * It is planned that in the future this case will be handled
- * by recirculation.
- */
- if (ctx->mpls_depth_delta < 0) {
- return true;
+ if (eth_type_mpls(ctx->xin->flow.dl_type)) {
+ ctx->xout->wc.masks.mpls_lse[0] |= htonl(MPLS_TC_MASK);
+ set_mpls_lse_tc(&ctx->xin->flow.mpls_lse[0], tc);
}
-
- ctx->xout->wc.masks.mpls_lse |= htonl(MPLS_TC_MASK);
- set_mpls_lse_tc(&ctx->xin->flow.mpls_lse, tc);
- return false;
}
-static bool
+static void
compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
{
- if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
- return true;
- }
-
- /* If mpls_depth_delta is negative then an MPLS POP action has been
- * executed and the resulting MPLS label stack is unknown. This means
- * a SET MPLS TTL push action can't be executed as it needs to manipulate
- * the top-most MPLS LSE. Thus, stop processing.
- *
- * It is planned that in the future this case will be handled
- * by recirculation.
- */
- if (ctx->mpls_depth_delta < 0) {
- return true;
+ if (eth_type_mpls(ctx->xin->flow.dl_type)) {
+ ctx->xout->wc.masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
+ set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse[0], ttl);
}
-
- ctx->xout->wc.masks.mpls_lse |= htonl(MPLS_TTL_MASK);
- set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse, ttl);
- return false;
}
static bool
compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
{
struct flow *flow = &ctx->xin->flow;
- uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse);
+ uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse[0]);
struct flow_wildcards *wc = &ctx->xout->wc;
memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
+ if (eth_type_mpls(flow->dl_type)) {
+ if (ttl > 1) {
+ ttl--;
+ set_mpls_lse_ttl(&flow->mpls_lse[0], ttl);
+ return false;
+ } else {
+ execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
- if (!eth_type_mpls(flow->dl_type)) {
- return false;
- }
-
- if (ttl > 1) {
- ttl--;
- set_mpls_lse_ttl(&flow->mpls_lse, ttl);
- return false;
+ /* Stop processing for current table. */
+ return true;
+ }
} else {
- execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
-
- /* Stop processing for current table. */
return true;
}
}
break;
case OFPP_TABLE:
xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
- 0, may_packet_in);
+ 0, may_packet_in, true);
break;
case OFPP_NORMAL:
xlate_normal(ctx);
learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
ofproto_dpif_flow_mod(ctx->xbridge->ofproto, &fm);
ofpbuf_uninit(&ofpacts);
+
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
+ entry->u.learn.ofproto = ctx->xin->ofproto;
+ /* Lookup the learned rule, taking a reference on it. The reference
+ * is released when this cache entry is deleted. */
+ rule_dpif_lookup(ctx->xbridge->ofproto, &ctx->xin->flow, NULL,
+ &entry->u.learn.rule, true);
+ }
+}
+
+static void
+xlate_fin_timeout__(struct rule_dpif *rule, uint16_t tcp_flags,
+ uint16_t idle_timeout, uint16_t hard_timeout)
+{
+ if (tcp_flags & (TCP_FIN | TCP_RST)) {
+ rule_dpif_reduce_timeouts(rule, idle_timeout, hard_timeout);
+ }
}
static void
xlate_fin_timeout(struct xlate_ctx *ctx,
const struct ofpact_fin_timeout *oft)
{
- if (ctx->xin->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
- rule_dpif_reduce_timeouts(ctx->rule, oft->fin_idle_timeout,
- oft->fin_hard_timeout);
+ if (ctx->rule) {
+ xlate_fin_timeout__(ctx->rule, ctx->xin->tcp_flags,
+ oft->fin_idle_timeout, oft->fin_hard_timeout);
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
+ /* XC_RULE already holds a reference on the rule, none is taken
+ * here. */
+ entry->u.fin.rule = ctx->rule;
+ entry->u.fin.idle = oft->fin_idle_timeout;
+ entry->u.fin.hard = oft->fin_hard_timeout;
+ }
}
}
* the same percentage. */
uint32_t probability = (os->probability << 16) | os->probability;
+ if (!ctx->xbridge->variable_length_userdata) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+ VLOG_ERR_RL(&rl, "ignoring NXAST_SAMPLE action because datapath "
+ "lacks support (needs Linux 3.10+ or kernel module from "
+ "OVS 1.11+)");
+ return;
+ }
+
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
&ctx->xout->odp_actions,
- &ctx->xout->wc,
- &ctx->mpls_depth_delta);
+ &ctx->xout->wc);
compose_flow_sample_cookie(os->probability, os->collector_set_id,
os->obs_domain_id, os->obs_point_id, &cookie);
static bool
may_receive(const struct xport *xport, struct xlate_ctx *ctx)
{
- if (xport->config & (eth_addr_equals(ctx->xin->flow.dl_dst, eth_addr_stp)
+ if (xport->config & (is_stp(&ctx->xin->flow)
? OFPUTIL_PC_NO_RECV_STP
: OFPUTIL_PC_NO_RECV)) {
return false;
ofpbuf_use_stub(&action_list, action_list_stub, sizeof action_list_stub);
ofpacts_execute_action_set(&action_list, &ctx->action_set);
- do_xlate_actions(action_list.data, action_list.size, ctx);
+ do_xlate_actions(ofpbuf_data(&action_list), ofpbuf_size(&action_list), ctx);
ofpbuf_uninit(&action_list);
}
case OFPACT_SET_FIELD:
set_field = ofpact_get_SET_FIELD(a);
mf = set_field->field;
- mf_mask_field_and_prereqs(mf, &wc->masks);
/* Set field action only ever overwrites packet's outermost
* applicable header fields. Do nothing if no header exists. */
- if ((mf->id != MFF_VLAN_VID || flow->vlan_tci & htons(VLAN_CFI))
- && ((mf->id != MFF_MPLS_LABEL && mf->id != MFF_MPLS_TC)
- || flow->mpls_lse)) {
- mf_set_flow_value(mf, &set_field->value, flow);
+ if (mf->id == MFF_VLAN_VID) {
+ wc->masks.vlan_tci |= htons(VLAN_CFI);
+ if (!(flow->vlan_tci & htons(VLAN_CFI))) {
+ break;
+ }
+ } else if ((mf->id == MFF_MPLS_LABEL || mf->id == MFF_MPLS_TC)
+ /* 'dl_type' is already unwildcarded. */
+ && !eth_type_mpls(flow->dl_type)) {
+ break;
}
+
+ mf_mask_field_and_prereqs(mf, &wc->masks);
+ mf_set_flow_value(mf, &set_field->value, flow);
break;
case OFPACT_STACK_PUSH:
break;
case OFPACT_PUSH_MPLS:
- if (compose_mpls_push_action(ctx,
- ofpact_get_PUSH_MPLS(a)->ethertype)) {
- return;
- }
+ compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a));
break;
case OFPACT_POP_MPLS:
- if (compose_mpls_pop_action(ctx,
- ofpact_get_POP_MPLS(a)->ethertype)) {
- return;
- }
+ compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
break;
case OFPACT_SET_MPLS_LABEL:
- if (compose_set_mpls_label_action(ctx,
- ofpact_get_SET_MPLS_LABEL(a)->label)) {
- return;
- }
- break;
+ compose_set_mpls_label_action(
+ ctx, ofpact_get_SET_MPLS_LABEL(a)->label);
+ break;
case OFPACT_SET_MPLS_TC:
- if (compose_set_mpls_tc_action(ctx,
- ofpact_get_SET_MPLS_TC(a)->tc)) {
- return;
- }
+ compose_set_mpls_tc_action(ctx, ofpact_get_SET_MPLS_TC(a)->tc);
break;
case OFPACT_SET_MPLS_TTL:
- if (compose_set_mpls_ttl_action(ctx,
- ofpact_get_SET_MPLS_TTL(a)->ttl)) {
- return;
- }
+ compose_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl);
break;
case OFPACT_DEC_MPLS_TTL:
ovs_assert(ctx->table_id < ogt->table_id);
xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
- ogt->table_id, true);
+ ogt->table_id, true, true);
break;
}
xin->packet = packet;
xin->may_learn = packet != NULL;
xin->rule = rule;
+ xin->xcache = NULL;
xin->ofpacts = NULL;
xin->ofpacts_len = 0;
xin->tcp_flags = tcp_flags;
ofpbuf_use_stub(&dst->odp_actions, dst->odp_actions_stub,
sizeof dst->odp_actions_stub);
- ofpbuf_put(&dst->odp_actions, src->odp_actions.data,
- src->odp_actions.size);
+ ofpbuf_put(&dst->odp_actions, ofpbuf_data(&src->odp_actions),
+ ofpbuf_size(&src->odp_actions));
}
\f
static struct skb_priority_to_dscp *
const struct nlattr *a;
unsigned int left;
- NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->xout->odp_actions.data,
- ctx->xout->odp_actions.size) {
+ NL_ATTR_FOR_EACH_UNSAFE (a, left, ofpbuf_data(&ctx->xout->odp_actions),
+ ofpbuf_size(&ctx->xout->odp_actions)) {
if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
&& nl_attr_get_odp_port(a) == local_odp_port) {
return true;
/* Thread safe call to xlate_actions__(). */
void
xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
+ OVS_EXCLUDED(xlate_rwlock)
{
ovs_rwlock_rdlock(&xlate_rwlock);
xlate_actions__(xin, xout);
struct flow *flow = &xin->flow;
struct rule_dpif *rule = NULL;
- struct rule_actions *actions = NULL;
+ const struct rule_actions *actions = NULL;
enum slow_path_reason special;
const struct ofpact *ofpacts;
struct xport *in_port;
struct xlate_ctx ctx;
size_t ofpacts_len;
bool tnl_may_send;
+ bool is_icmp;
COVERAGE_INC(xlate_actions);
ctx.xbridge = xbridge_lookup(xin->ofproto);
if (!ctx.xbridge) {
- goto out;
+ return;
}
ctx.rule = xin->rule;
memset(&wc->masks.in_port, 0xff, sizeof wc->masks.in_port);
memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
- wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
+ if (is_ip_any(flow)) {
+ wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
+ }
+ is_icmp = is_icmpv4(flow) || is_icmpv6(flow);
tnl_may_send = tnl_xlate_init(&ctx.base_flow, flow, wc);
if (ctx.xbridge->netflow) {
ctx.recurse = 0;
ctx.resubmits = 0;
+ ctx.in_group = false;
ctx.orig_skb_priority = flow->skb_priority;
ctx.table_id = 0;
ctx.exit = false;
- ctx.mpls_depth_delta = 0;
+ ctx.use_recirc = false;
if (!xin->ofpacts && !ctx.rule) {
- rule_dpif_lookup(ctx.xbridge->ofproto, flow,
- !xin->skip_wildcards ? wc : NULL, &rule);
+ ctx.table_id = rule_dpif_lookup(ctx.xbridge->ofproto, flow,
+ !xin->skip_wildcards ? wc : NULL,
+ &rule, ctx.xin->xcache != NULL);
if (ctx.xin->resubmit_stats) {
rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats);
}
+ if (ctx.xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
+ entry->u.rule = rule;
+ }
ctx.rule = rule;
}
xout->fail_open = ctx.rule && rule_dpif_is_fail_open(ctx.rule);
break;
case OFPC_FRAG_DROP:
- goto out;
+ return;
case OFPC_FRAG_REASM:
OVS_NOT_REACHED();
}
in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port);
- if (in_port && in_port->is_tunnel && ctx.xin->resubmit_stats) {
- netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats);
- if (in_port->bfd) {
- bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats);
+ if (in_port && in_port->is_tunnel) {
+ if (ctx.xin->resubmit_stats) {
+ netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats);
+ if (in_port->bfd) {
+ bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats);
+ }
+ }
+ if (ctx.xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV);
+ entry->u.dev.rx = netdev_ref(in_port->netdev);
+ entry->u.dev.bfd = bfd_ref(in_port->bfd);
}
}
add_sflow_action(&ctx);
add_ipfix_action(&ctx);
- sample_actions_len = ctx.xout->odp_actions.size;
+ sample_actions_len = ofpbuf_size(&ctx.xout->odp_actions);
if (tnl_may_send && (!in_port || may_receive(in_port, &ctx))) {
do_xlate_actions(ofpacts, ofpacts_len, &ctx);
/* We've let OFPP_NORMAL and the learning action look at the
* packet, so drop it now if forwarding is disabled. */
if (in_port && !xport_stp_forward_state(in_port)) {
- ctx.xout->odp_actions.size = sample_actions_len;
+ ofpbuf_set_size(&ctx.xout->odp_actions, sample_actions_len);
}
}
- if (ctx.action_set.size) {
+ if (ofpbuf_size(&ctx.action_set)) {
xlate_action_set(&ctx);
}
}
}
- if (nl_attr_oversized(ctx.xout->odp_actions.size)) {
+ if (nl_attr_oversized(ofpbuf_size(&ctx.xout->odp_actions))) {
/* These datapath actions are too big for a Netlink attribute, so we
* can't hand them to the kernel directly. dpif_execute() can execute
* them one by one with help, so just mark the result as SLOW_ACTION to
ctx.xout->slow |= SLOW_ACTION;
}
- if (ctx.xin->resubmit_stats) {
- mirror_update_stats(ctx.xbridge->mbridge, xout->mirrors,
- ctx.xin->resubmit_stats->n_packets,
- ctx.xin->resubmit_stats->n_bytes);
-
- if (ctx.xbridge->netflow) {
- const struct ofpact *ofpacts;
- size_t ofpacts_len;
-
- ofpacts_len = actions->ofpacts_len;
- ofpacts = actions->ofpacts;
- if (ofpacts_len == 0
- || ofpacts->type != OFPACT_CONTROLLER
- || ofpact_next(ofpacts) < ofpact_end(ofpacts, ofpacts_len)) {
- /* Only update netflow if we don't have controller flow. We don't
- * report NetFlow expiration messages for such facets because they
- * are just part of the control logic for the network, not real
- * traffic. */
+ if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
+ if (ctx.xin->resubmit_stats) {
+ mirror_update_stats(ctx.xbridge->mbridge, xout->mirrors,
+ ctx.xin->resubmit_stats->n_packets,
+ ctx.xin->resubmit_stats->n_bytes);
+ }
+ if (ctx.xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx.xin->xcache, XC_MIRROR);
+ entry->u.mirror.mbridge = mbridge_ref(ctx.xbridge->mbridge);
+ entry->u.mirror.mirrors = xout->mirrors;
+ }
+ }
+
+ if (ctx.xbridge->netflow) {
+ const struct ofpact *ofpacts = actions->ofpacts;
+ size_t ofpacts_len = actions->ofpacts_len;
+
+ /* Only update netflow if we don't have controller flow. We don't
+ * report NetFlow expiration messages for such facets because they
+ * are just part of the control logic for the network, not real
+ * traffic. */
+ if (ofpacts_len == 0
+ || ofpacts->type != OFPACT_CONTROLLER
+ || ofpact_next(ofpacts) < ofpact_end(ofpacts, ofpacts_len)) {
+ if (ctx.xin->resubmit_stats) {
netflow_flow_update(ctx.xbridge->netflow, flow,
xout->nf_output_iface,
ctx.xin->resubmit_stats);
}
+ if (ctx.xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
+ entry->u.nf.netflow = netflow_ref(ctx.xbridge->netflow);
+ entry->u.nf.flow = xmemdup(flow, sizeof *flow);
+ entry->u.nf.iface = xout->nf_output_iface;
+ }
}
}
* use non-header fields as part of the cache. */
flow_wildcards_clear_non_packet_fields(wc);
-out:
- rule_actions_unref(actions);
- rule_dpif_unref(rule);
+ /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow uses
+ * the low 8 bits of the 16-bit tp_src and tp_dst members to represent
+ * these fields. The datapath interface, on the other hand, represents
+ * them with just 8 bits each. This means that if the high 8 bits of the
+ * masks for these fields somehow become set, then they will get chopped
+ * off by a round trip through the datapath, and revalidation will spot
+ * that as an inconsistency and delete the flow. Avoid the problem here by
+ * making sure that only the low 8 bits of either field can be unwildcarded
+ * for ICMP.
+ */
+ if (is_icmp) {
+ wc->masks.tp_src &= htons(UINT8_MAX);
+ wc->masks.tp_dst &= htons(UINT8_MAX);
+ }
}
/* Sends 'packet' out 'ofport'.
struct xport *xport;
struct ofpact_output output;
struct flow flow;
- union flow_in_port in_port_;
- int error;
ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
/* Use OFPP_NONE as the in_port to avoid special packet processing. */
- in_port_.ofp_port = OFPP_NONE;
- flow_extract(packet, 0, 0, NULL, &in_port_, &flow);
+ flow_extract(packet, NULL, &flow);
+ flow.in_port.ofp_port = OFPP_NONE;
ovs_rwlock_rdlock(&xlate_rwlock);
xport = xport_lookup(ofport);
}
output.port = xport->ofp_port;
output.max_len = 0;
- error = ofproto_dpif_execute_actions(xport->xbridge->ofproto, &flow, NULL,
- &output.ofpact, sizeof output,
- packet);
ovs_rwlock_unlock(&xlate_rwlock);
- return error;
+
+ return ofproto_dpif_execute_actions(xport->xbridge->ofproto, &flow, NULL,
+ &output.ofpact, sizeof output,
+ packet);
+}
+
+struct xlate_cache *
+xlate_cache_new(void)
+{
+ struct xlate_cache *xcache = xmalloc(sizeof *xcache);
+
+ ofpbuf_init(&xcache->entries, 512);
+ return xcache;
+}
+
+static struct xc_entry *
+xlate_cache_add_entry(struct xlate_cache *xcache, enum xc_type type)
+{
+ struct xc_entry *entry;
+
+ entry = ofpbuf_put_zeros(&xcache->entries, sizeof *entry);
+ entry->type = type;
+
+ return entry;
+}
+
+static void
+xlate_cache_netdev(struct xc_entry *entry, const struct dpif_flow_stats *stats)
+{
+ if (entry->u.dev.tx) {
+ netdev_vport_inc_tx(entry->u.dev.tx, stats);
+ }
+ if (entry->u.dev.rx) {
+ netdev_vport_inc_rx(entry->u.dev.rx, stats);
+ }
+ if (entry->u.dev.bfd) {
+ bfd_account_rx(entry->u.dev.bfd, stats);
+ }
+}
+
+static void
+xlate_cache_normal(struct ofproto_dpif *ofproto, struct flow *flow, int vlan)
+{
+ struct xbridge *xbridge;
+ struct xbundle *xbundle;
+ struct flow_wildcards wc;
+
+ xbridge = xbridge_lookup(ofproto);
+ if (!xbridge) {
+ return;
+ }
+
+ xbundle = lookup_input_bundle(xbridge, flow->in_port.ofp_port, false,
+ NULL);
+ if (!xbundle) {
+ return;
+ }
+
+ update_learning_table(xbridge, flow, &wc, vlan, xbundle);
+}
+
+/* Push stats and perform side effects of flow translation. */
+void
+xlate_push_stats(struct xlate_cache *xcache, bool may_learn,
+ const struct dpif_flow_stats *stats)
+{
+ struct xc_entry *entry;
+ struct ofpbuf entries = xcache->entries;
+
+ XC_ENTRY_FOR_EACH (entry, entries, xcache) {
+ switch (entry->type) {
+ case XC_RULE:
+ rule_dpif_credit_stats(entry->u.rule, stats);
+ break;
+ case XC_BOND:
+ bond_account(entry->u.bond.bond, entry->u.bond.flow,
+ entry->u.bond.vid, stats->n_bytes);
+ break;
+ case XC_NETDEV:
+ xlate_cache_netdev(entry, stats);
+ break;
+ case XC_NETFLOW:
+ netflow_flow_update(entry->u.nf.netflow, entry->u.nf.flow,
+ entry->u.nf.iface, stats);
+ break;
+ case XC_MIRROR:
+ mirror_update_stats(entry->u.mirror.mbridge,
+ entry->u.mirror.mirrors,
+ stats->n_packets, stats->n_bytes);
+ break;
+ case XC_LEARN:
+ if (may_learn) {
+ struct rule_dpif *rule = entry->u.learn.rule;
+
+ /* Reset the modified time for a rule that is equivalent to
+ * the currently cached rule. If the rule is not the exact
+ * rule we have cached, update the reference that we have. */
+ entry->u.learn.rule = ofproto_dpif_refresh_rule(rule);
+ }
+ break;
+ case XC_NORMAL:
+ xlate_cache_normal(entry->u.normal.ofproto, entry->u.normal.flow,
+ entry->u.normal.vlan);
+ break;
+ case XC_FIN_TIMEOUT:
+ xlate_fin_timeout__(entry->u.fin.rule, stats->tcp_flags,
+ entry->u.fin.idle, entry->u.fin.hard);
+ break;
+ default:
+ OVS_NOT_REACHED();
+ }
+ }
+}
+
+static void
+xlate_dev_unref(struct xc_entry *entry)
+{
+ if (entry->u.dev.tx) {
+ netdev_close(entry->u.dev.tx);
+ }
+ if (entry->u.dev.rx) {
+ netdev_close(entry->u.dev.rx);
+ }
+ if (entry->u.dev.bfd) {
+ bfd_unref(entry->u.dev.bfd);
+ }
+}
+
+static void
+xlate_cache_clear_netflow(struct netflow *netflow, struct flow *flow)
+{
+ netflow_expire(netflow, flow);
+ netflow_flow_clear(netflow, flow);
+ netflow_unref(netflow);
+ free(flow);
+}
+
+void
+xlate_cache_clear(struct xlate_cache *xcache)
+{
+ struct xc_entry *entry;
+ struct ofpbuf entries;
+
+ if (!xcache) {
+ return;
+ }
+
+ XC_ENTRY_FOR_EACH (entry, entries, xcache) {
+ switch (entry->type) {
+ case XC_RULE:
+ rule_dpif_unref(entry->u.rule);
+ break;
+ case XC_BOND:
+ free(entry->u.bond.flow);
+ bond_unref(entry->u.bond.bond);
+ break;
+ case XC_NETDEV:
+ xlate_dev_unref(entry);
+ break;
+ case XC_NETFLOW:
+ xlate_cache_clear_netflow(entry->u.nf.netflow, entry->u.nf.flow);
+ break;
+ case XC_MIRROR:
+ mbridge_unref(entry->u.mirror.mbridge);
+ break;
+ case XC_LEARN:
+ /* 'u.learn.rule' is the learned rule. */
+ rule_dpif_unref(entry->u.learn.rule);
+ break;
+ case XC_NORMAL:
+ free(entry->u.normal.flow);
+ break;
+ case XC_FIN_TIMEOUT:
+ /* 'u.fin.rule' is always already held as a XC_RULE, which
+ * has already released it's reference above. */
+ break;
+ default:
+ OVS_NOT_REACHED();
+ }
+ }
+
+ ofpbuf_clear(&xcache->entries);
+}
+
+void
+xlate_cache_delete(struct xlate_cache *xcache)
+{
+ xlate_cache_clear(xcache);
+ ofpbuf_uninit(&xcache->entries);
+ free(xcache);
}