#include "fail-open.h"
#include "hmapx.h"
#include "lacp.h"
+#include "learn.h"
#include "mac-learning.h"
#include "multipath.h"
#include "netdev.h"
* flow translation. */
#define MAX_RESUBMIT_RECURSION 16
+/* Number of implemented OpenFlow tables. */
+enum { N_TABLES = 255 };
+BUILD_ASSERT_DECL(N_TABLES >= 1 && N_TABLES <= 255);
+
struct ofport_dpif;
struct ofproto_dpif;
uint64_t packet_count; /* Number of packets received. */
uint64_t byte_count; /* Number of bytes received. */
+ tag_type tag; /* Caches rule_calculate_tag() result. */
+
struct list facets; /* List of "struct facet"s. */
};
/* Configuration. */
struct list ports; /* Contains "struct ofport"s. */
+ enum port_vlan_mode vlan_mode; /* VLAN mode */
int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
* NULL if all VLANs are trunked. */
* revalidating without a packet to refer to. */
const struct ofpbuf *packet;
+ /* Should OFPP_NORMAL MAC learning and NXAST_LEARN actions execute? We
+ * want to execute them if we are actually processing a packet, or if we
+ * are accounting for packets that the datapath has processed, but not if
+ * we are just revalidating. */
+ bool may_learn;
+
/* If nonnull, called just before executing a resubmit action.
*
* This is normally null so the client has to set it manually after
* to look at them after it returns. */
struct ofpbuf *odp_actions; /* Datapath actions. */
- tag_type tags; /* Tags associated with OFPP_NORMAL actions. */
+ tag_type tags; /* Tags associated with actions. */
bool may_set_up_flow; /* True ordinarily; false if the actions must
* be reassessed for every packet. */
+ bool has_learn; /* Actions include NXAST_LEARN? */
+ bool has_normal; /* Actions output to OFPP_NORMAL? */
uint16_t nf_output_iface; /* Output interface index for NetFlow. */
/* xlate_actions() initializes and uses these members, but the client has no
struct flow base_flow; /* Flow at the last commit. */
uint32_t base_priority; /* Priority at the last commit. */
uint8_t table_id; /* OpenFlow table ID where flow was found. */
+ uint32_t sflow_n_outputs; /* Number of output ports. */
+ uint16_t sflow_odp_port; /* Output port for composing sFlow action. */
+ uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
};
static void action_xlate_ctx_init(struct action_xlate_ctx *,
bool installed; /* Installed in datapath? */
bool may_install; /* True ordinarily; false if actions must
* be reassessed for every packet. */
+ bool has_learn; /* Actions include NXAST_LEARN? */
+ bool has_normal; /* Actions output to OFPP_NORMAL? */
size_t actions_len; /* Number of bytes in actions[]. */
struct nlattr *actions; /* Datapath actions. */
tag_type tags; /* Tags. */
struct flow *, uint64_t packets, uint64_t bytes,
long long int used);
+static uint32_t rule_calculate_tag(const struct flow *,
+ const struct flow_wildcards *,
+ uint32_t basis);
+static void rule_invalidate(const struct rule_dpif *);
+
struct ofport_dpif {
struct ofport up;
struct ofoperation *op;
};
+/* Extra information about a classifier table.
+ * Currently used just for optimized flow revalidation. */
+struct table_dpif {
+ /* If either of these is nonnull, then this table has a form that allows
+ * flows to be tagged to avoid revalidating most flows for the most common
+ * kinds of flow table changes. */
+ struct cls_table *catchall_table; /* Table that wildcards all fields. */
+ struct cls_table *other_table; /* Table with any other wildcard set. */
+ uint32_t basis; /* Keeps each table's tags separate. */
+};
+
struct ofproto_dpif {
struct ofproto up;
struct dpif *dpif;
/* Facets. */
struct hmap facets;
+
+ /* Revalidation. */
+ struct table_dpif tables[N_TABLES];
bool need_revalidate;
struct tag_set revalidate_set;
/* Utilities. */
static int send_packet(struct ofproto_dpif *, uint32_t odp_port,
const struct ofpbuf *packet);
-
+static size_t
+compose_sflow_action(const struct ofproto_dpif *, struct ofpbuf *odp_actions,
+ const struct flow *, uint32_t odp_port);
/* Global variables. */
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
\f
ofproto->max_ports = dpif_get_max_ports(ofproto->dpif);
ofproto->n_matches = 0;
+ dpif_flow_flush(ofproto->dpif);
+ dpif_recv_purge(ofproto->dpif);
+
error = dpif_recv_set_mask(ofproto->dpif,
((1u << DPIF_UC_MISS) |
- (1u << DPIF_UC_ACTION) |
- (1u << DPIF_UC_SAMPLE)));
+ (1u << DPIF_UC_ACTION)));
if (error) {
VLOG_ERR("failed to listen on datapath %s: %s", name, strerror(error));
dpif_close(ofproto->dpif);
return error;
}
- dpif_flow_flush(ofproto->dpif);
- dpif_recv_purge(ofproto->dpif);
ofproto->netflow = NULL;
ofproto->sflow = NULL;
timer_set_duration(&ofproto->next_expiration, 1000);
hmap_init(&ofproto->facets);
+
+ for (i = 0; i < N_TABLES; i++) {
+ struct table_dpif *table = &ofproto->tables[i];
+
+ table->catchall_table = NULL;
+ table->other_table = NULL;
+ table->basis = random_uint32();
+ }
ofproto->need_revalidate = false;
tag_set_init(&ofproto->revalidate_set);
ofproto->has_bundle_action = false;
- *n_tablesp = 255;
+ *n_tablesp = N_TABLES;
return 0;
}
get_tables(struct ofproto *ofproto_, struct ofp_table_stats *ots)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct ovs_dp_stats s;
+ struct dpif_dp_stats s;
strcpy(ots->name, "classifier");
struct ofport_dpif *port = ofport_dpif_cast(port_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
+ ofproto->need_revalidate = true;
port->odp_port = ofp_port_to_odp_port(port->up.ofp_port);
port->bundle = NULL;
port->cfm = NULL;
struct ofport_dpif *port = ofport_dpif_cast(port_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
+ ofproto->need_revalidate = true;
bundle_remove(port_);
set_cfm(port_, NULL);
if (ofproto->sflow) {
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct dpif_sflow *ds = ofproto->sflow;
+
if (sflow_options) {
if (!ds) {
struct ofport_dpif *ofport;
dpif_sflow_add_port(ds, ofport->odp_port,
netdev_get_name(ofport->up.netdev));
}
+ ofproto->need_revalidate = true;
}
dpif_sflow_set_options(ds, sflow_options);
} else {
- dpif_sflow_destroy(ds);
- ofproto->sflow = NULL;
+ if (ds) {
+ dpif_sflow_destroy(ds);
+ ofproto->need_revalidate = true;
+ ofproto->sflow = NULL;
+ }
}
return 0;
}
error = 0;
} else {
if (!ofport->cfm) {
+ struct ofproto_dpif *ofproto;
+
+ ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+ ofproto->need_revalidate = true;
ofport->cfm = cfm_create(netdev_get_name(ofport->up.netdev));
}
}
}
if (lacp) {
+ port->bundle->ofproto->need_revalidate = true;
lacp_slave_register(bundle->lacp, port, lacp);
}
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
bool need_flush = false;
- const unsigned long *trunks;
struct ofport_dpif *port;
struct ofbundle *bundle;
+ unsigned long *trunks;
+ int vlan;
size_t i;
bool ok;
bundle->name = NULL;
list_init(&bundle->ports);
+ bundle->vlan_mode = PORT_VLAN_TRUNK;
bundle->vlan = -1;
bundle->trunks = NULL;
bundle->lacp = NULL;
/* LACP. */
if (s->lacp) {
if (!bundle->lacp) {
+ ofproto->need_revalidate = true;
bundle->lacp = lacp_create();
}
lacp_configure(bundle->lacp, s->lacp);
return EINVAL;
}
+ /* Set VLAN tagging mode */
+ if (s->vlan_mode != bundle->vlan_mode) {
+ bundle->vlan_mode = s->vlan_mode;
+ need_flush = true;
+ }
+
/* Set VLAN tag. */
- if (s->vlan != bundle->vlan) {
- bundle->vlan = s->vlan;
+ vlan = (s->vlan_mode == PORT_VLAN_TRUNK ? -1
+ : s->vlan >= 0 && s->vlan <= 4095 ? s->vlan
+ : 0);
+ if (vlan != bundle->vlan) {
+ bundle->vlan = vlan;
need_flush = true;
}
/* Get trunked VLANs. */
- trunks = s->vlan == -1 ? NULL : s->trunks;
+ switch (s->vlan_mode) {
+ case PORT_VLAN_ACCESS:
+ trunks = NULL;
+ break;
+
+ case PORT_VLAN_TRUNK:
+ trunks = (unsigned long *) s->trunks;
+ break;
+
+ case PORT_VLAN_NATIVE_UNTAGGED:
+ case PORT_VLAN_NATIVE_TAGGED:
+ if (vlan != 0 && (!s->trunks
+ || !bitmap_is_set(s->trunks, vlan)
+ || bitmap_is_set(s->trunks, 0))) {
+ /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
+ if (s->trunks) {
+ trunks = bitmap_clone(s->trunks, 4096);
+ } else {
+ trunks = bitmap_allocate1(4096);
+ }
+ bitmap_set1(trunks, vlan);
+ bitmap_set0(trunks, 0);
+ } else {
+ trunks = (unsigned long *) s->trunks;
+ }
+ break;
+
+ default:
+ NOT_REACHED();
+ }
if (!vlan_bitmap_equal(trunks, bundle->trunks)) {
free(bundle->trunks);
- bundle->trunks = vlan_bitmap_clone(trunks);
+ if (trunks == s->trunks) {
+ bundle->trunks = vlan_bitmap_clone(trunks);
+ } else {
+ bundle->trunks = trunks;
+ trunks = NULL;
+ }
need_flush = true;
}
+ if (trunks != s->trunks) {
+ free(trunks);
+ }
/* Bonding. */
if (!list_is_short(&bundle->ports)) {
ofpbuf_uninit(&packet);
}
- enable = enable && !cfm_get_fault(ofport->cfm);
+ enable = enable && !cfm_get_fault(ofport->cfm)
+ && cfm_get_opup(ofport->cfm);
}
if (ofport->bundle) {
const struct flow *flow, bool clone)
{
struct ofputil_packet_in pin;
+ struct user_action_cookie cookie;
pin.packet = upcall->packet;
pin.in_port = flow->in_port;
pin.reason = upcall->type == DPIF_UC_MISS ? OFPR_NO_MATCH : OFPR_ACTION;
pin.buffer_id = 0; /* not yet known */
- pin.send_len = upcall->userdata;
+
+ memcpy(&cookie, &upcall->userdata, sizeof(cookie));
+ pin.send_len = cookie.data;
connmgr_send_packet_in(ofproto->up.connmgr, &pin, flow,
clone ? NULL : upcall->packet);
}
}
static void
-handle_upcall(struct ofproto_dpif *ofproto, struct dpif_upcall *upcall)
+handle_userspace_upcall(struct ofproto_dpif *ofproto,
+ struct dpif_upcall *upcall)
{
struct flow flow;
+ struct user_action_cookie cookie;
- switch (upcall->type) {
- case DPIF_UC_ACTION:
- COVERAGE_INC(ofproto_dpif_ctlr_action);
- odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow);
- send_packet_in(ofproto, upcall, &flow, false);
- break;
+ memcpy(&cookie, &upcall->userdata, sizeof(cookie));
- case DPIF_UC_SAMPLE:
+ if (cookie.type == USER_ACTION_COOKIE_SFLOW) {
if (ofproto->sflow) {
odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow);
- dpif_sflow_received(ofproto->sflow, upcall, &flow);
+ dpif_sflow_received(ofproto->sflow, upcall->packet, &flow, &cookie);
}
ofpbuf_delete(upcall->packet);
+
+ } else if (cookie.type == USER_ACTION_COOKIE_CONTROLLER) {
+ COVERAGE_INC(ofproto_dpif_ctlr_action);
+ odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow);
+ send_packet_in(ofproto, upcall, &flow, false);
+ } else {
+ VLOG_WARN_RL(&rl, "invalid user cookie : 0x%"PRIx64, upcall->userdata);
+ }
+}
+
+static void
+handle_upcall(struct ofproto_dpif *ofproto, struct dpif_upcall *upcall)
+{
+ switch (upcall->type) {
+ case DPIF_UC_ACTION:
+ handle_userspace_upcall(ofproto, upcall);
break;
case DPIF_UC_MISS:
/* Has 'rule' expired? */
now = time_msec();
if (rule->up.hard_timeout
- && now > rule->up.created + rule->up.hard_timeout * 1000) {
+ && now > rule->up.modified + rule->up.hard_timeout * 1000) {
reason = OFPRR_HARD_TIMEOUT;
} else if (rule->up.idle_timeout && list_is_empty(&rule->facets)
&& now > rule->used + rule->up.idle_timeout * 1000) {
const struct nlattr *odp_actions, size_t actions_len,
struct ofpbuf *packet)
{
- if (actions_len == NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t))
- && odp_actions->nla_type == OVS_ACTION_ATTR_USERSPACE) {
- /* As an optimization, avoid a round-trip from userspace to kernel to
- * userspace. This also avoids possibly filling up kernel packet
- * buffers along the way. */
- struct dpif_upcall upcall;
-
- upcall.type = DPIF_UC_ACTION;
- upcall.packet = packet;
- upcall.key = NULL;
- upcall.key_len = 0;
- upcall.userdata = nl_attr_get_u64(odp_actions);
- upcall.sample_pool = 0;
- upcall.actions = NULL;
- upcall.actions_len = 0;
-
- send_packet_in(ofproto, &upcall, flow, false);
+ struct odputil_keybuf keybuf;
+ struct ofpbuf key;
+ int error;
- return true;
- } else {
- struct odputil_keybuf keybuf;
- struct ofpbuf key;
- int error;
+ if (odp_actions->nla_type == OVS_ACTION_ATTR_USERSPACE
+ && NLA_ALIGN(odp_actions->nla_len) == actions_len) {
+ struct user_action_cookie cookie;
+ struct dpif_upcall upcall;
+ uint64_t cookie_u64;
+
+ cookie_u64 = nl_attr_get_u64(nl_attr_find_nested(
+ odp_actions,
+ OVS_USERSPACE_ATTR_USERDATA));
+ memcpy(&cookie, &cookie_u64, sizeof cookie);
+ if (cookie.type == USER_ACTION_COOKIE_CONTROLLER) {
+ /* As an optimization, avoid a round-trip from userspace to kernel
+ * to userspace. This also avoids possibly filling up kernel packet
+ * buffers along the way.
+ * This optimization does not work in case of sFlow is turned ON.
+ * Since first action would be sFlow SAMPLE action followed by
+ * Controller action. */
+
+ upcall.type = DPIF_UC_ACTION;
+ upcall.packet = packet;
+ upcall.key = NULL;
+ upcall.key_len = 0;
+ upcall.userdata = nl_attr_get_u64(odp_actions);
+
+ send_packet_in(ofproto, &upcall, flow, false);
+ return true;
+ }
+ }
- ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
- odp_flow_key_from_flow(&key, flow);
+ ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
+ odp_flow_key_from_flow(&key, flow);
- error = dpif_execute(ofproto->dpif, key.data, key.size,
- odp_actions, actions_len, packet);
+ error = dpif_execute(ofproto->dpif, key.data, key.size,
+ odp_actions, actions_len, packet);
- ofpbuf_delete(packet);
- return !error;
- }
+ ofpbuf_delete(packet);
+ return !error;
}
/* Executes the actions indicated by 'facet' on 'packet' and credits 'facet''s
assert(ofpbuf_headroom(packet) >= sizeof(struct ofp_packet_in));
- flow_extract_stats(&facet->flow, packet, &stats);
+ dpif_flow_stats_extract(&facet->flow, packet, &stats);
stats.used = time_msec();
if (execute_odp_actions(ofproto, &facet->flow,
facet->actions, facet->actions_len, packet)) {
odp_actions = xlate_actions(&ctx, rule->up.actions, rule->up.n_actions);
facet->tags = ctx.tags;
facet->may_install = ctx.may_set_up_flow;
+ facet->has_learn = ctx.has_learn;
+ facet->has_normal = ctx.has_normal;
facet->nf_flow.output_iface = ctx.nf_output_iface;
if (facet->actions_len != odp_actions->size
}
}
-static int
-vlan_tci_to_openflow_vlan(ovs_be16 vlan_tci)
-{
- return vlan_tci != htons(0) ? vlan_tci_to_vid(vlan_tci) : OFP_VLAN_NONE;
-}
-
static void
facet_account(struct ofproto_dpif *ofproto, struct facet *facet)
{
uint64_t n_bytes;
- struct ofbundle *in_bundle;
const struct nlattr *a;
- tag_type dummy = 0;
unsigned int left;
ovs_be16 vlan_tci;
- int vlan;
if (facet->byte_count <= facet->accounted_bytes) {
return;
n_bytes = facet->byte_count - facet->accounted_bytes;
facet->accounted_bytes = facet->byte_count;
- /* Test that 'tags' is nonzero to ensure that only flows that include an
- * OFPP_NORMAL action are used for learning and bond slave rebalancing.
- * This works because OFPP_NORMAL always sets a nonzero tag value.
- *
- * Feed information from the active flows back into the learning table to
+ /* Feed information from the active flows back into the learning table to
* ensure that table is always in sync with what is actually flowing
* through the datapath. */
- if (!facet->tags
- || !is_admissible(ofproto, &facet->flow, false, &dummy,
- &vlan, &in_bundle)) {
- return;
- }
+ if (facet->has_learn || facet->has_normal) {
+ struct action_xlate_ctx ctx;
- update_learning_table(ofproto, &facet->flow, vlan, in_bundle);
+ action_xlate_ctx_init(&ctx, ofproto, &facet->flow, NULL);
+ ctx.may_learn = true;
+ ofpbuf_delete(xlate_actions(&ctx, facet->rule->up.actions,
+ facet->rule->up.n_actions));
+ }
- if (!ofproto->has_bonded_bundles) {
+ if (!facet->has_normal || !ofproto->has_bonded_bundles) {
return;
}
port = get_odp_port(ofproto, nl_attr_get_u32(a));
if (port && port->bundle && port->bundle->bond) {
bond_account(port->bundle->bond, &facet->flow,
- vlan_tci_to_openflow_vlan(vlan_tci), n_bytes);
+ vlan_tci_to_vid(vlan_tci), n_bytes);
}
break;
facet->tags = ctx.tags;
facet->nf_flow.output_iface = ctx.nf_output_iface;
facet->may_install = ctx.may_set_up_flow;
+ facet->has_learn = ctx.has_learn;
+ facet->has_normal = ctx.has_normal;
if (actions_changed) {
free(facet->actions);
facet->actions_len = odp_actions->size;
rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow,
uint8_t table_id)
{
+ if (table_id >= N_TABLES) {
+ return NULL;
+ }
+
return rule_dpif_cast(rule_from_cls_rule(
classifier_lookup(&ofproto->up.tables[table_id],
flow)));
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
- ofproto->need_revalidate = true;
+ rule_invalidate(rule);
if (clogged) {
struct dpif_completion *c = xmalloc(sizeof *c);
c->op = rule->up.pending;
struct rule_dpif *rule = rule_dpif_cast(rule_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
struct rule_dpif *victim;
+ uint8_t table_id;
int error;
error = validate_actions(rule->up.actions, rule->up.n_actions,
list_init(&rule->facets);
}
+ table_id = rule->up.table_id;
+ rule->tag = (victim ? victim->tag
+ : table_id == 0 ? 0
+ : rule_calculate_tag(&rule->up.cr.flow, &rule->up.cr.wc,
+ ofproto->tables[table_id].basis));
+
complete_operation(rule);
return 0;
}
complete_operation(rule);
}
\f
-/* Sends 'packet' out of port 'odp_port' within 'p'.
+/* Sends 'packet' out of port 'odp_port' within 'ofproto'.
* Returns 0 if successful, otherwise a positive errno value. */
static int
send_packet(struct ofproto_dpif *ofproto, uint32_t odp_port,
odp_flow_key_from_flow(&key, &flow);
ofpbuf_init(&odp_actions, 32);
+ compose_sflow_action(ofproto, &odp_actions, &flow, odp_port);
+
nl_msg_put_u32(&odp_actions, OVS_ACTION_ATTR_OUTPUT, odp_port);
error = dpif_execute(ofproto->dpif,
key.data, key.size,
struct action_xlate_ctx *ctx);
static void xlate_normal(struct action_xlate_ctx *);
+static size_t
+put_userspace_action(const struct ofproto_dpif *ofproto,
+ struct ofpbuf *odp_actions,
+ const struct flow *flow,
+ const struct user_action_cookie *cookie)
+{
+ size_t offset;
+ uint32_t pid;
+
+ pid = dpif_port_get_pid(ofproto->dpif,
+ ofp_port_to_odp_port(flow->in_port));
+
+ offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_USERSPACE);
+ nl_msg_put_u32(odp_actions, OVS_USERSPACE_ATTR_PID, pid);
+ nl_msg_put_unspec(odp_actions, OVS_USERSPACE_ATTR_USERDATA,
+ cookie, sizeof *cookie);
+ nl_msg_end_nested(odp_actions, offset);
+
+ return odp_actions->size - NLA_ALIGN(sizeof *cookie);
+}
+
+/* Compose SAMPLE action for sFlow. */
+static size_t
+compose_sflow_action(const struct ofproto_dpif *ofproto,
+ struct ofpbuf *odp_actions,
+ const struct flow *flow,
+ uint32_t odp_port)
+{
+ uint32_t port_ifindex;
+ uint32_t probability;
+ struct user_action_cookie cookie;
+ size_t sample_offset, actions_offset;
+ int cookie_offset, n_output;
+
+ if (!ofproto->sflow || flow->in_port == OFPP_NONE) {
+ return 0;
+ }
+
+ if (odp_port == OVSP_NONE) {
+ port_ifindex = 0;
+ n_output = 0;
+ } else {
+ port_ifindex = dpif_sflow_odp_port_to_ifindex(ofproto->sflow, odp_port);
+ n_output = 1;
+ }
+
+ sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
+
+ /* Number of packets out of UINT_MAX to sample. */
+ probability = dpif_sflow_get_probability(ofproto->sflow);
+ nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
+
+ actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
+
+ cookie.type = USER_ACTION_COOKIE_SFLOW;
+ cookie.data = port_ifindex;
+ cookie.n_output = n_output;
+ cookie.vlan_tci = 0;
+ cookie_offset = put_userspace_action(ofproto, odp_actions, flow, &cookie);
+
+ nl_msg_end_nested(odp_actions, actions_offset);
+ nl_msg_end_nested(odp_actions, sample_offset);
+ return cookie_offset;
+}
+
+/* SAMPLE action must be first action in any given list of actions.
+ * At this point we do not have all information required to build it. So try to
+ * build sample action as complete as possible. */
+static void
+add_sflow_action(struct action_xlate_ctx *ctx)
+{
+ ctx->user_cookie_offset = compose_sflow_action(ctx->ofproto,
+ ctx->odp_actions,
+ &ctx->flow, OVSP_NONE);
+ ctx->sflow_odp_port = 0;
+ ctx->sflow_n_outputs = 0;
+}
+
+/* Fix SAMPLE action according to data collected while composing ODP actions.
+ * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
+ * USERSPACE action's user-cookie which is required for sflow. */
+static void
+fix_sflow_action(struct action_xlate_ctx *ctx)
+{
+ const struct flow *base = &ctx->base_flow;
+ struct user_action_cookie *cookie;
+
+ if (!ctx->user_cookie_offset) {
+ return;
+ }
+
+ cookie = ofpbuf_at(ctx->odp_actions, ctx->user_cookie_offset,
+ sizeof(*cookie));
+ assert(cookie != NULL);
+ assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
+
+ if (ctx->sflow_n_outputs) {
+ cookie->data = dpif_sflow_odp_port_to_ifindex(ctx->ofproto->sflow,
+ ctx->sflow_odp_port);
+ }
+ if (ctx->sflow_n_outputs >= 255) {
+ cookie->n_output = 255;
+ } else {
+ cookie->n_output = ctx->sflow_n_outputs;
+ }
+ cookie->vlan_tci = base->vlan_tci;
+}
+
+static void
+commit_vlan_tci(struct action_xlate_ctx *ctx, ovs_be16 vlan_tci)
+{
+ struct flow *base = &ctx->base_flow;
+ struct ofpbuf *odp_actions = ctx->odp_actions;
+
+ if (base->vlan_tci != vlan_tci) {
+ if (!(vlan_tci & htons(VLAN_CFI))) {
+ nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
+ } else {
+ if (base->vlan_tci != htons(0)) {
+ nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
+ }
+ nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_PUSH_VLAN,
+ vlan_tci & ~htons(VLAN_CFI));
+ }
+ base->vlan_tci = vlan_tci;
+ }
+}
+
static void
commit_odp_actions(struct action_xlate_ctx *ctx)
{
base->nw_tos = flow->nw_tos;
}
- if (base->vlan_tci != flow->vlan_tci) {
- if (!(flow->vlan_tci & htons(VLAN_CFI))) {
- nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
- } else {
- if (base->vlan_tci != OFP_VLAN_NONE) {
- nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
- }
- nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_PUSH_VLAN,
- flow->vlan_tci & ~htons(VLAN_CFI));
- }
- base->vlan_tci = flow->vlan_tci;
- }
+ commit_vlan_tci(ctx, flow->vlan_tci);
if (base->tp_src != flow->tp_src) {
nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_SET_TP_SRC, flow->tp_src);
}
}
+static void
+compose_output_action(struct action_xlate_ctx *ctx, uint16_t odp_port)
+{
+ nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT, odp_port);
+ ctx->sflow_odp_port = odp_port;
+ ctx->sflow_n_outputs++;
+}
+
static void
add_output_action(struct action_xlate_ctx *ctx, uint16_t ofp_port)
{
}
commit_odp_actions(ctx);
- nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT, odp_port);
+ compose_output_action(ctx, odp_port);
ctx->nf_output_iface = ofp_port;
}
uint16_t in_port, uint8_t table_id)
{
if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
+ struct ofproto_dpif *ofproto = ctx->ofproto;
struct rule_dpif *rule;
uint16_t old_in_port;
uint8_t old_table_id;
old_table_id = ctx->table_id;
ctx->table_id = table_id;
- /* Look up a flow with 'in_port' as the input port. Then restore the
- * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
- * have surprising behavior). */
+ /* Look up a flow with 'in_port' as the input port. */
old_in_port = ctx->flow.in_port;
ctx->flow.in_port = in_port;
- rule = rule_dpif_lookup(ctx->ofproto, &ctx->flow, table_id);
+ rule = rule_dpif_lookup(ofproto, &ctx->flow, table_id);
+
+ /* Tag the flow. */
+ if (table_id > 0 && table_id < N_TABLES) {
+ struct table_dpif *table = &ofproto->tables[table_id];
+ if (table->other_table) {
+ ctx->tags |= (rule
+ ? rule->tag
+ : rule_calculate_tag(&ctx->flow,
+ &table->other_table->wc,
+ table->basis));
+ }
+ }
+
+ /* Restore the original input port. Otherwise OFPP_NORMAL and
+ * OFPP_IN_PORT will have surprising behavior. */
ctx->flow.in_port = old_in_port;
if (ctx->resubmit_hook) {
HMAP_FOR_EACH (ofport, up.hmap_node, &ctx->ofproto->up.ports) {
uint16_t ofp_port = ofport->up.ofp_port;
if (ofp_port != ctx->flow.in_port && !(ofport->up.opp.config & mask)) {
- nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT,
- ofport->odp_port);
+ compose_output_action(ctx, ofport->odp_port);
}
}
ctx->nf_output_iface = NF_OUT_FLOOD;
}
+static void
+compose_controller_action(struct action_xlate_ctx *ctx, int len)
+{
+ struct user_action_cookie cookie;
+
+ cookie.type = USER_ACTION_COOKIE_CONTROLLER;
+ cookie.data = len;
+ cookie.n_output = 0;
+ cookie.vlan_tci = 0;
+ put_userspace_action(ctx->ofproto, ctx->odp_actions, &ctx->flow, &cookie);
+}
+
static void
xlate_output_action__(struct action_xlate_ctx *ctx,
uint16_t port, uint16_t max_len)
break;
case OFPP_CONTROLLER:
commit_odp_actions(ctx);
- nl_msg_put_u64(ctx->odp_actions, OVS_ACTION_ATTR_USERSPACE, max_len);
+ compose_controller_action(ctx, max_len);
break;
case OFPP_LOCAL:
add_output_action(ctx, OFPP_LOCAL);
} else if (port->bundle->bond) {
/* Autopath does not support VLAN hashing. */
struct ofport_dpif *slave = bond_choose_output_slave(
- port->bundle->bond, &ctx->flow, OFP_VLAN_NONE, &ctx->tags);
+ port->bundle->bond, &ctx->flow, 0, &ctx->tags);
if (slave) {
ofp_port = slave->up.ofp_port;
}
}
}
+static void
+xlate_learn_action(struct action_xlate_ctx *ctx,
+ const struct nx_action_learn *learn)
+{
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+ struct ofputil_flow_mod fm;
+ int error;
+
+ learn_execute(learn, &ctx->flow, &fm);
+
+ error = ofproto_flow_mod(&ctx->ofproto->up, &fm);
+ if (error && !VLOG_DROP_WARN(&rl)) {
+ char *msg = ofputil_error_to_string(error);
+ VLOG_WARN("learning action failed to modify flow table (%s)", msg);
+ free(msg);
+ }
+
+ free(fm.actions);
+}
+
static void
do_xlate_actions(const union ofp_action *in, size_t n_in,
struct action_xlate_ctx *ctx)
naor = (const struct nx_action_output_reg *) ia;
xlate_output_reg_action(ctx, naor);
break;
+
+ case OFPUTIL_NXAST_LEARN:
+ ctx->has_learn = true;
+ if (ctx->may_learn) {
+ xlate_learn_action(ctx, (const struct nx_action_learn *) ia);
+ }
+ break;
}
}
}
ctx->ofproto = ofproto;
ctx->flow = *flow;
ctx->packet = packet;
+ ctx->may_learn = packet != NULL;
ctx->resubmit_hook = NULL;
}
ctx->odp_actions = ofpbuf_new(512);
ctx->tags = 0;
ctx->may_set_up_flow = true;
+ ctx->has_learn = false;
+ ctx->has_normal = false;
ctx->nf_output_iface = NF_OUT_DROP;
ctx->recurse = 0;
ctx->priority = 0;
ctx->base_priority = 0;
ctx->base_flow = ctx->flow;
+ ctx->base_flow.tun_id = 0;
ctx->table_id = 0;
if (process_special(ctx->ofproto, &ctx->flow, ctx->packet)) {
ctx->may_set_up_flow = false;
} else {
+ add_sflow_action(ctx);
do_xlate_actions(in, n_in, ctx);
+ fix_sflow_action(ctx);
}
/* Check with in-band control to see if we're allowed to set up this
struct dst {
struct ofport_dpif *port;
- uint16_t vlan;
+ uint16_t vid;
};
struct dst_set {
static struct ofport_dpif *ofbundle_get_a_port(const struct ofbundle *);
+/* Given 'vid', the VID obtained from the 802.1Q header that was received as
+ * part of a packet (specify 0 if there was no 802.1Q header), and 'in_bundle',
+ * the bundle on which the packet was received, returns the VLAN to which the
+ * packet belongs.
+ *
+ * Both 'vid' and the return value are in the range 0...4095. */
+static uint16_t
+input_vid_to_vlan(const struct ofbundle *in_bundle, uint16_t vid)
+{
+ switch (in_bundle->vlan_mode) {
+ case PORT_VLAN_ACCESS:
+ return in_bundle->vlan;
+ break;
+
+ case PORT_VLAN_TRUNK:
+ return vid;
+
+ case PORT_VLAN_NATIVE_UNTAGGED:
+ case PORT_VLAN_NATIVE_TAGGED:
+ return vid ? vid : in_bundle->vlan;
+
+ default:
+ NOT_REACHED();
+ }
+}
+
+/* Given 'vlan', the VLAN that a packet belongs to, and
+ * 'out_bundle', a bundle on which the packet is to be output, returns the VID
+ * that should be included in the 802.1Q header. (If the return value is 0,
+ * then the 802.1Q header should only be included in the packet if there is a
+ * nonzero PCP.)
+ *
+ * Both 'vlan' and the return value are in the range 0...4095. */
+static uint16_t
+output_vlan_to_vid(const struct ofbundle *out_bundle, uint16_t vlan)
+{
+ switch (out_bundle->vlan_mode) {
+ case PORT_VLAN_ACCESS:
+ return 0;
+
+ case PORT_VLAN_TRUNK:
+ case PORT_VLAN_NATIVE_TAGGED:
+ return vlan;
+
+ case PORT_VLAN_NATIVE_UNTAGGED:
+ return vlan == out_bundle->vlan ? 0 : vlan;
+
+ default:
+ NOT_REACHED();
+ }
+}
+
static bool
set_dst(struct action_xlate_ctx *ctx, struct dst *dst,
const struct ofbundle *in_bundle, const struct ofbundle *out_bundle)
{
- dst->vlan = (out_bundle->vlan >= 0 ? OFP_VLAN_NONE
- : in_bundle->vlan >= 0 ? in_bundle->vlan
- : ctx->flow.vlan_tci == 0 ? OFP_VLAN_NONE
- : vlan_tci_to_vid(ctx->flow.vlan_tci));
+ uint16_t vlan;
+
+ vlan = input_vid_to_vlan(in_bundle, vlan_tci_to_vid(ctx->flow.vlan_tci));
+ dst->vid = output_vlan_to_vid(out_bundle, vlan);
dst->port = (!out_bundle->bond
? ofbundle_get_a_port(out_bundle)
: bond_choose_output_slave(out_bundle->bond, &ctx->flow,
- dst->vlan, &ctx->tags));
-
+ dst->vid, &ctx->tags));
return dst->port != NULL;
}
{
size_t i;
for (i = 0; i < set->n; i++) {
- if (set->dsts[i].vlan == test->vlan
+ if (set->dsts[i].vid == test->vid
&& set->dsts[i].port == test->port) {
return true;
}
static bool
ofbundle_trunks_vlan(const struct ofbundle *bundle, uint16_t vlan)
{
- return (bundle->vlan < 0
+ return (bundle->vlan_mode != PORT_VLAN_ACCESS
&& (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
}
{
struct ofproto_dpif *ofproto = ctx->ofproto;
mirror_mask_t mirrors;
- int flow_vlan;
+ uint16_t flow_vid;
size_t i;
mirrors = in_bundle->src_mirrors;
return;
}
- flow_vlan = vlan_tci_to_vid(ctx->flow.vlan_tci);
- if (flow_vlan == 0) {
- flow_vlan = OFP_VLAN_NONE;
- }
-
+ flow_vid = vlan_tci_to_vid(ctx->flow.vlan_tci);
while (mirrors) {
struct ofmirror *m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
if (vlan_is_mirrored(m, vlan)) {
if (ofbundle_includes_vlan(bundle, m->out_vlan)
&& set_dst(ctx, &dst, in_bundle, bundle))
{
- if (bundle->vlan < 0) {
- dst.vlan = m->out_vlan;
- }
+ /* set_dst() got dst->vid from the input packet's VLAN,
+ * not from m->out_vlan, so recompute it. */
+ dst.vid = output_vlan_to_vid(bundle, m->out_vlan);
+
if (dst_is_duplicate(set, &dst)) {
continue;
}
- /* Use the vlan tag on the original flow instead of
- * the one passed in the vlan parameter. This ensures
- * that we compare the vlan from before any implicit
- * tagging tags place. This is necessary because
- * dst->vlan is the final vlan, after removing implicit
- * tags. */
- if (bundle == in_bundle && dst.vlan == flow_vlan) {
+ if (bundle == in_bundle && dst.vid == flow_vid) {
/* Don't send out input port on same VLAN. */
continue;
}
const struct ofbundle *in_bundle,
const struct ofbundle *out_bundle)
{
- uint16_t initial_vlan, cur_vlan;
+ uint16_t initial_vid, cur_vid;
const struct dst *dst;
struct dst_set set;
dst_set_init(&set);
compose_dsts(ctx, vlan, in_bundle, out_bundle, &set);
compose_mirror_dsts(ctx, vlan, in_bundle, &set);
+ if (!set.n) {
+ dst_set_free(&set);
+ return;
+ }
/* Output all the packets we can without having to change the VLAN. */
- initial_vlan = vlan_tci_to_vid(ctx->flow.vlan_tci);
- if (initial_vlan == 0) {
- initial_vlan = OFP_VLAN_NONE;
- }
+ commit_odp_actions(ctx);
+ initial_vid = vlan_tci_to_vid(ctx->flow.vlan_tci);
for (dst = set.dsts; dst < &set.dsts[set.n]; dst++) {
- if (dst->vlan != initial_vlan) {
+ if (dst->vid != initial_vid) {
continue;
}
- nl_msg_put_u32(ctx->odp_actions,
- OVS_ACTION_ATTR_OUTPUT, dst->port->odp_port);
+ compose_output_action(ctx, dst->port->odp_port);
}
/* Then output the rest. */
- cur_vlan = initial_vlan;
+ cur_vid = initial_vid;
for (dst = set.dsts; dst < &set.dsts[set.n]; dst++) {
- if (dst->vlan == initial_vlan) {
+ if (dst->vid == initial_vid) {
continue;
}
- if (dst->vlan != cur_vlan) {
- if (dst->vlan == OFP_VLAN_NONE) {
- nl_msg_put_flag(ctx->odp_actions, OVS_ACTION_ATTR_POP_VLAN);
- } else {
- ovs_be16 tci;
+ if (dst->vid != cur_vid) {
+ ovs_be16 tci;
- if (cur_vlan != OFP_VLAN_NONE) {
- nl_msg_put_flag(ctx->odp_actions, OVS_ACTION_ATTR_POP_VLAN);
- }
- tci = htons(dst->vlan & VLAN_VID_MASK);
- tci |= ctx->flow.vlan_tci & htons(VLAN_PCP_MASK);
- nl_msg_put_be16(ctx->odp_actions,
- OVS_ACTION_ATTR_PUSH_VLAN, tci);
+ tci = htons(dst->vid);
+ tci |= ctx->flow.vlan_tci & htons(VLAN_PCP_MASK);
+ if (tci) {
+ tci |= htons(VLAN_CFI);
}
- cur_vlan = dst->vlan;
+ commit_vlan_tci(ctx, tci);
+
+ cur_vid = dst->vid;
}
- nl_msg_put_u32(ctx->odp_actions,
- OVS_ACTION_ATTR_OUTPUT, dst->port->odp_port);
+ compose_output_action(ctx, dst->port->odp_port);
}
dst_set_free(&set);
struct ofbundle *in_bundle, bool have_packet)
{
int vlan = vlan_tci_to_vid(flow->vlan_tci);
- if (in_bundle->vlan >= 0) {
- if (vlan) {
+ if (vlan) {
+ if (in_bundle->vlan_mode == PORT_VLAN_ACCESS) {
+ /* Drop tagged packet on access port */
if (have_packet) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %d tagged "
in_bundle->name, in_bundle->vlan);
}
return -1;
- }
- vlan = in_bundle->vlan;
- } else {
- if (!ofbundle_includes_vlan(in_bundle, vlan)) {
+ } else if (ofbundle_includes_vlan(in_bundle, vlan)) {
+ return vlan;
+ } else {
+ /* Drop packets from a VLAN not member of the trunk */
if (have_packet) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %d tagged "
}
return -1;
}
+ } else {
+ if (in_bundle->vlan_mode != PORT_VLAN_TRUNK) {
+ return in_bundle->vlan;
+ } else {
+ return ofbundle_includes_vlan(in_bundle, 0) ? 0 : -1;
+ }
}
-
- return vlan;
}
/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
"port %"PRIu16,
ofproto->up.name, flow->in_port);
}
+ *vlanp = -1;
return false;
}
*vlanp = vlan = flow_get_vlan(ofproto, flow, in_bundle, have_packet);
struct mac_entry *mac;
int vlan;
+ ctx->has_normal = true;
+
/* Check whether we should drop packets in this flow. */
if (!is_admissible(ctx->ofproto, &ctx->flow, ctx->packet != NULL,
&ctx->tags, &vlan, &in_bundle)) {
goto done;
}
- /* Learn source MAC (but don't try to learn from revalidation). */
- if (ctx->packet) {
+ /* Learn source MAC. */
+ if (ctx->may_learn) {
update_learning_table(ctx->ofproto, &ctx->flow, vlan, in_bundle);
}
}
}
\f
+/* Optimized flow revalidation.
+ *
+ * It's a difficult problem, in general, to tell which facets need to have
+ * their actions recalculated whenever the OpenFlow flow table changes. We
+ * don't try to solve that general problem: for most kinds of OpenFlow flow
+ * table changes, we recalculate the actions for every facet. This is
+ * relatively expensive, but it's good enough if the OpenFlow flow table
+ * doesn't change very often.
+ *
+ * However, we can expect one particular kind of OpenFlow flow table change to
+ * happen frequently: changes caused by MAC learning. To avoid wasting a lot
+ * of CPU on revalidating every facet whenever MAC learning modifies the flow
+ * table, we add a special case that applies to flow tables in which every rule
+ * has the same form (that is, the same wildcards), except that the table is
+ * also allowed to have a single "catch-all" flow that matches all packets. We
+ * optimize this case by tagging all of the facets that resubmit into the table
+ * and invalidating the same tag whenever a flow changes in that table. The
+ * end result is that we revalidate just the facets that need it (and sometimes
+ * a few more, but not all of the facets or even all of the facets that
+ * resubmit to the table modified by MAC learning). */
+
+/* Calculates the tag to use for 'flow' and wildcards 'wc' when it is inserted
+ * into an OpenFlow table with the given 'basis'. */
+static uint32_t
+rule_calculate_tag(const struct flow *flow, const struct flow_wildcards *wc,
+ uint32_t secret)
+{
+ if (flow_wildcards_is_catchall(wc)) {
+ return 0;
+ } else {
+ struct flow tag_flow = *flow;
+ flow_zero_wildcards(&tag_flow, wc);
+ return tag_create_deterministic(flow_hash(&tag_flow, secret));
+ }
+}
+
+/* Following a change to OpenFlow table 'table_id' in 'ofproto', update the
+ * taggability of that table.
+ *
+ * This function must be called after *each* change to a flow table. If you
+ * skip calling it on some changes then the pointer comparisons at the end can
+ * be invalid if you get unlucky. For example, if a flow removal causes a
+ * cls_table to be destroyed and then a flow insertion causes a cls_table with
+ * different wildcards to be created with the same address, then this function
+ * will incorrectly skip revalidation. */
+static void
+table_update_taggable(struct ofproto_dpif *ofproto, uint8_t table_id)
+{
+ struct table_dpif *table = &ofproto->tables[table_id];
+ const struct classifier *cls = &ofproto->up.tables[table_id];
+ struct cls_table *catchall, *other;
+ struct cls_table *t;
+
+ catchall = other = NULL;
+
+ switch (hmap_count(&cls->tables)) {
+ case 0:
+ /* We could tag this OpenFlow table but it would make the logic a
+ * little harder and it's a corner case that doesn't seem worth it
+ * yet. */
+ break;
+
+ case 1:
+ case 2:
+ HMAP_FOR_EACH (t, hmap_node, &cls->tables) {
+ if (cls_table_is_catchall(t)) {
+ catchall = t;
+ } else if (!other) {
+ other = t;
+ } else {
+ /* Indicate that we can't tag this by setting both tables to
+ * NULL. (We know that 'catchall' is already NULL.) */
+ other = NULL;
+ }
+ }
+ break;
+
+ default:
+ /* Can't tag this table. */
+ break;
+ }
+
+ if (table->catchall_table != catchall || table->other_table != other) {
+ table->catchall_table = catchall;
+ table->other_table = other;
+ ofproto->need_revalidate = true;
+ }
+}
+
+/* Given 'rule' that has changed in some way (either it is a rule being
+ * inserted, a rule being deleted, or a rule whose actions are being
+ * modified), marks facets for revalidation to ensure that packets will be
+ * forwarded correctly according to the new state of the flow table.
+ *
+ * This function must be called after *each* change to a flow table. See
+ * the comment on table_update_taggable() for more information. */
+static void
+rule_invalidate(const struct rule_dpif *rule)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
+
+ table_update_taggable(ofproto, rule->up.table_id);
+
+ if (!ofproto->need_revalidate) {
+ struct table_dpif *table = &ofproto->tables[rule->up.table_id];
+
+ if (table->other_table && rule->tag) {
+ tag_set_add(&ofproto->revalidate_set, rule->tag);
+ } else {
+ ofproto->need_revalidate = true;
+ }
+ }
+}
+\f
static bool
get_drop_frags(struct ofproto *ofproto_)
{
arg1 = strtok_r(NULL, " ", &save_ptr);
arg2 = strtok_r(NULL, " ", &save_ptr);
arg3 = strtok_r(NULL, "", &save_ptr); /* Get entire rest of line. */
- if (dpname && arg1 && !arg2 && !arg3) {
- /* ofproto/trace dpname flow */
+ if (dpname && arg1 && (!arg2 || !strcmp(arg2, "-generate")) && !arg3) {
+ /* ofproto/trace dpname flow [-generate] */
int error;
/* Convert string to datapath key. */
unixctl_command_reply(conn, 501, "Invalid flow");
goto exit;
}
+
+ /* Generate a packet, if requested. */
+ if (arg2) {
+ packet = ofpbuf_new(0);
+ flow_compose(packet, &flow);
+ }
} else if (dpname && arg1 && arg2 && arg3) {
/* ofproto/trace dpname tun_id in_port packet */
uint16_t in_port;
}
registered = true;
- unixctl_command_register("ofproto/trace", ofproto_unixctl_trace, NULL);
- unixctl_command_register("fdb/show", ofproto_unixctl_fdb_show, NULL);
-
- unixctl_command_register("ofproto/clog", ofproto_dpif_clog, NULL);
- unixctl_command_register("ofproto/unclog", ofproto_dpif_unclog, NULL);
+ unixctl_command_register("ofproto/trace",
+ "bridge {tun_id in_port packet | odp_flow [-generate]}",
+ ofproto_unixctl_trace, NULL);
+ unixctl_command_register("fdb/show", "bridge", ofproto_unixctl_fdb_show,
+ NULL);
+ unixctl_command_register("ofproto/clog", "", ofproto_dpif_clog, NULL);
+ unixctl_command_register("ofproto/unclog", "", ofproto_dpif_unclog, NULL);
}
\f
const struct ofproto_class ofproto_dpif_class = {