/*
- * Copyright (c) 2009, 2010, 2011 Nicira Networks.
+ * Copyright (c) 2009, 2010, 2011, 2012 Nicira Networks.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
/* Maximum depth of flow table recursion (due to resubmit actions) in a
* flow translation. */
-#define MAX_RESUBMIT_RECURSION 16
+#define MAX_RESUBMIT_RECURSION 32
/* Number of implemented OpenFlow tables. */
enum { N_TABLES = 255 };
/* Configuration. */
struct list ports; /* Contains "struct ofport"s. */
+ enum port_vlan_mode vlan_mode; /* VLAN mode */
int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
* NULL if all VLANs are trunked. */
};
static void bundle_remove(struct ofport *);
+static void bundle_update(struct ofbundle *);
static void bundle_destroy(struct ofbundle *);
static void bundle_del_port(struct ofport_dpif *);
static void bundle_run(struct ofbundle *);
static void bundle_wait(struct ofbundle *);
+static void stp_run(struct ofproto_dpif *ofproto);
+static void stp_wait(struct ofproto_dpif *ofproto);
+
struct action_xlate_ctx {
/* action_xlate_ctx_init() initializes these members. */
struct flow base_flow; /* Flow at the last commit. */
uint32_t base_priority; /* Priority at the last commit. */
uint8_t table_id; /* OpenFlow table ID where flow was found. */
+ uint32_t sflow_n_outputs; /* Number of output ports. */
+ uint16_t sflow_odp_port; /* Output port for composing sFlow action. */
+ uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
};
static void action_xlate_ctx_init(struct action_xlate_ctx *,
struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
};
-static struct facet *facet_create(struct rule_dpif *, const struct flow *,
- const struct ofpbuf *packet);
+static struct facet *facet_create(struct rule_dpif *, const struct flow *);
static void facet_remove(struct ofproto_dpif *, struct facet *);
static void facet_free(struct facet *);
const struct flow *);
static bool facet_revalidate(struct ofproto_dpif *, struct facet *);
+static bool execute_controller_action(struct ofproto_dpif *,
+ const struct flow *,
+ const struct nlattr *odp_actions,
+ size_t actions_len,
+ struct ofpbuf *packet, bool clone);
static void facet_execute(struct ofproto_dpif *, struct facet *,
struct ofpbuf *packet);
tag_type tag; /* Tag associated with this port. */
uint32_t bond_stable_id; /* stable_id to use as bond slave, or 0. */
bool may_enable; /* May be enabled in bonds. */
+
+ struct stp_port *stp_port; /* Spanning Tree Protocol, if any. */
+ enum stp_state stp_state; /* Always STP_DISABLED if STP not in use. */
+ long long int stp_state_entered;
};
static struct ofport_dpif *
struct list completions;
bool has_bundle_action; /* True when the first bundle action appears. */
+
+ /* Spanning tree. */
+ struct stp *stp;
+ long long int stp_last_tick;
};
/* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
static bool is_admissible(struct ofproto_dpif *, const struct flow *,
bool have_packet, tag_type *, int *vlanp,
struct ofbundle **in_bundlep);
-static void handle_upcall(struct ofproto_dpif *, struct dpif_upcall *);
+
+/* Upcalls. */
+#define FLOW_MISS_MAX_BATCH 50
+static int handle_upcalls(struct ofproto_dpif *, unsigned int max_batch);
/* Flow expiration. */
static int expire(struct ofproto_dpif *);
/* Utilities. */
static int send_packet(struct ofproto_dpif *, uint32_t odp_port,
const struct ofpbuf *packet);
-
+static size_t
+compose_sflow_action(const struct ofproto_dpif *, struct ofpbuf *odp_actions,
+ const struct flow *, uint32_t odp_port);
/* Global variables. */
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
\f
ofproto->max_ports = dpif_get_max_ports(ofproto->dpif);
ofproto->n_matches = 0;
+ dpif_flow_flush(ofproto->dpif);
+ dpif_recv_purge(ofproto->dpif);
+
error = dpif_recv_set_mask(ofproto->dpif,
((1u << DPIF_UC_MISS) |
- (1u << DPIF_UC_ACTION) |
- (1u << DPIF_UC_SAMPLE)));
+ (1u << DPIF_UC_ACTION)));
if (error) {
VLOG_ERR("failed to listen on datapath %s: %s", name, strerror(error));
dpif_close(ofproto->dpif);
return error;
}
- dpif_flow_flush(ofproto->dpif);
- dpif_recv_purge(ofproto->dpif);
ofproto->netflow = NULL;
ofproto->sflow = NULL;
+ ofproto->stp = NULL;
hmap_init(&ofproto->bundles);
ofproto->ml = mac_learning_create();
for (i = 0; i < MAX_MIRRORS; i++) {
dpif_close(ofproto->dpif);
}
+static int
+run_fast(struct ofproto *ofproto_)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ unsigned int work;
+
+ /* Handle one or more batches of upcalls, until there's nothing left to do
+ * or until we do a fixed total amount of work.
+ *
+ * We do work in batches because it can be much cheaper to set up a number
+ * of flows and fire off their patches all at once. We do multiple batches
+ * because in some cases handling a packet can cause another packet to be
+ * queued almost immediately as part of the return flow. Both
+ * optimizations can make major improvements on some benchmarks and
+ * presumably for real traffic as well. */
+ work = 0;
+ while (work < FLOW_MISS_MAX_BATCH) {
+ int retval = handle_upcalls(ofproto, FLOW_MISS_MAX_BATCH - work);
+ if (retval <= 0) {
+ return -retval;
+ }
+ work += retval;
+ }
+ return 0;
+}
+
static int
run(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct ofport_dpif *ofport;
struct ofbundle *bundle;
- int i;
+ int error;
if (!clogged) {
complete_operations(ofproto);
}
dpif_run(ofproto->dpif);
- for (i = 0; i < 50; i++) {
- struct dpif_upcall packet;
- int error;
-
- error = dpif_recv(ofproto->dpif, &packet);
- if (error) {
- if (error == ENODEV) {
- /* Datapath destroyed. */
- return error;
- }
- break;
- }
-
- handle_upcall(ofproto, &packet);
+ error = run_fast(ofproto_);
+ if (error) {
+ return error;
}
if (timer_expired(&ofproto->next_expiration)) {
bundle_run(bundle);
}
+ stp_run(ofproto);
mac_learning_run(ofproto->ml, &ofproto->revalidate_set);
/* Now revalidate if there's anything to do. */
bundle_wait(bundle);
}
mac_learning_wait(ofproto->ml);
+ stp_wait(ofproto);
if (ofproto->need_revalidate) {
/* Shouldn't happen, but if it does just go around again. */
VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
get_tables(struct ofproto *ofproto_, struct ofp_table_stats *ots)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct ovs_dp_stats s;
+ struct dpif_dp_stats s;
strcpy(ots->name, "classifier");
struct ofport_dpif *port = ofport_dpif_cast(port_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
+ ofproto->need_revalidate = true;
port->odp_port = ofp_port_to_odp_port(port->up.ofp_port);
port->bundle = NULL;
port->cfm = NULL;
port->tag = tag_create_random();
port->may_enable = true;
+ port->stp_port = NULL;
+ port->stp_state = STP_DISABLED;
if (ofproto->sflow) {
dpif_sflow_add_port(ofproto->sflow, port->odp_port,
struct ofport_dpif *port = ofport_dpif_cast(port_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
+ ofproto->need_revalidate = true;
bundle_remove(port_);
set_cfm(port_, NULL);
if (ofproto->sflow) {
if (changed & htonl(OFPPC_NO_RECV | OFPPC_NO_RECV_STP |
OFPPC_NO_FWD | OFPPC_NO_FLOOD)) {
ofproto->need_revalidate = true;
+
+ if (changed & htonl(OFPPC_NO_FLOOD) && port->bundle) {
+ bundle_update(port->bundle);
+ }
}
}
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct dpif_sflow *ds = ofproto->sflow;
+
if (sflow_options) {
if (!ds) {
struct ofport_dpif *ofport;
dpif_sflow_add_port(ds, ofport->odp_port,
netdev_get_name(ofport->up.netdev));
}
+ ofproto->need_revalidate = true;
}
dpif_sflow_set_options(ds, sflow_options);
} else {
- dpif_sflow_destroy(ds);
- ofproto->sflow = NULL;
+ if (ds) {
+ dpif_sflow_destroy(ds);
+ ofproto->need_revalidate = true;
+ ofproto->sflow = NULL;
+ }
}
return 0;
}
}
}
\f
+/* Spanning Tree. */
+
+static void
+send_bpdu_cb(struct ofpbuf *pkt, int port_num, void *ofproto_)
+{
+ struct ofproto_dpif *ofproto = ofproto_;
+ struct stp_port *sp = stp_get_port(ofproto->stp, port_num);
+ struct ofport_dpif *ofport;
+
+ ofport = stp_port_get_aux(sp);
+ if (!ofport) {
+ VLOG_WARN_RL(&rl, "%s: cannot send BPDU on unknown port %d",
+ ofproto->up.name, port_num);
+ } else {
+ struct eth_header *eth = pkt->l2;
+
+ netdev_get_etheraddr(ofport->up.netdev, eth->eth_src);
+ if (eth_addr_is_zero(eth->eth_src)) {
+ VLOG_WARN_RL(&rl, "%s: cannot send BPDU on port %d "
+ "with unknown MAC", ofproto->up.name, port_num);
+ } else {
+ int error = netdev_send(ofport->up.netdev, pkt);
+ if (error) {
+ VLOG_WARN_RL(&rl, "%s: sending BPDU on port %s failed (%s)",
+ ofproto->up.name,
+ netdev_get_name(ofport->up.netdev),
+ strerror(error));
+ }
+ }
+ }
+ ofpbuf_delete(pkt);
+}
+
+/* Configures STP on 'ofproto_' using the settings defined in 's'. */
+static int
+set_stp(struct ofproto *ofproto_, const struct ofproto_stp_settings *s)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+
+ /* Only revalidate flows if the configuration changed. */
+ if (!s != !ofproto->stp) {
+ ofproto->need_revalidate = true;
+ }
+
+ if (s) {
+ if (!ofproto->stp) {
+ ofproto->stp = stp_create(ofproto_->name, s->system_id,
+ send_bpdu_cb, ofproto);
+ ofproto->stp_last_tick = time_msec();
+ }
+
+ stp_set_bridge_id(ofproto->stp, s->system_id);
+ stp_set_bridge_priority(ofproto->stp, s->priority);
+ stp_set_hello_time(ofproto->stp, s->hello_time);
+ stp_set_max_age(ofproto->stp, s->max_age);
+ stp_set_forward_delay(ofproto->stp, s->fwd_delay);
+ } else {
+ stp_destroy(ofproto->stp);
+ ofproto->stp = NULL;
+ }
+
+ return 0;
+}
+
+static int
+get_stp_status(struct ofproto *ofproto_, struct ofproto_stp_status *s)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+
+ if (ofproto->stp) {
+ s->enabled = true;
+ s->bridge_id = stp_get_bridge_id(ofproto->stp);
+ s->designated_root = stp_get_designated_root(ofproto->stp);
+ s->root_path_cost = stp_get_root_path_cost(ofproto->stp);
+ } else {
+ s->enabled = false;
+ }
+
+ return 0;
+}
+
+static void
+update_stp_port_state(struct ofport_dpif *ofport)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+ enum stp_state state;
+
+ /* Figure out new state. */
+ state = ofport->stp_port ? stp_port_get_state(ofport->stp_port)
+ : STP_DISABLED;
+
+ /* Update state. */
+ if (ofport->stp_state != state) {
+ ovs_be32 of_state;
+ bool fwd_change;
+
+ VLOG_DBG_RL(&rl, "port %s: STP state changed from %s to %s",
+ netdev_get_name(ofport->up.netdev),
+ stp_state_name(ofport->stp_state),
+ stp_state_name(state));
+ if (stp_learn_in_state(ofport->stp_state)
+ != stp_learn_in_state(state)) {
+ /* xxx Learning action flows should also be flushed. */
+ mac_learning_flush(ofproto->ml);
+ }
+ fwd_change = stp_forward_in_state(ofport->stp_state)
+ != stp_forward_in_state(state);
+
+ ofproto->need_revalidate = true;
+ ofport->stp_state = state;
+ ofport->stp_state_entered = time_msec();
+
+ if (fwd_change && ofport->bundle) {
+ bundle_update(ofport->bundle);
+ }
+
+ /* Update the STP state bits in the OpenFlow port description. */
+ of_state = (ofport->up.opp.state & htonl(~OFPPS_STP_MASK))
+ | htonl(state == STP_LISTENING ? OFPPS_STP_LISTEN
+ : state == STP_LEARNING ? OFPPS_STP_LEARN
+ : state == STP_FORWARDING ? OFPPS_STP_FORWARD
+ : state == STP_BLOCKING ? OFPPS_STP_BLOCK
+ : 0);
+ ofproto_port_set_state(&ofport->up, of_state);
+ }
+}
+
+/* Configures STP on 'ofport_' using the settings defined in 's'. The
+ * caller is responsible for assigning STP port numbers and ensuring
+ * there are no duplicates. */
+static int
+set_stp_port(struct ofport *ofport_,
+ const struct ofproto_port_stp_settings *s)
+{
+ struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+ struct stp_port *sp = ofport->stp_port;
+
+ if (!s || !s->enable) {
+ if (sp) {
+ ofport->stp_port = NULL;
+ stp_port_disable(sp);
+ update_stp_port_state(ofport);
+ }
+ return 0;
+ } else if (sp && stp_port_no(sp) != s->port_num
+ && ofport == stp_port_get_aux(sp)) {
+ /* The port-id changed, so disable the old one if it's not
+ * already in use by another port. */
+ stp_port_disable(sp);
+ }
+
+ sp = ofport->stp_port = stp_get_port(ofproto->stp, s->port_num);
+ stp_port_enable(sp);
+
+ stp_port_set_aux(sp, ofport);
+ stp_port_set_priority(sp, s->priority);
+ stp_port_set_path_cost(sp, s->path_cost);
+
+ update_stp_port_state(ofport);
+
+ return 0;
+}
+
+static int
+get_stp_port_status(struct ofport *ofport_,
+ struct ofproto_port_stp_status *s)
+{
+ struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+ struct stp_port *sp = ofport->stp_port;
+
+ if (!ofproto->stp || !sp) {
+ s->enabled = false;
+ return 0;
+ }
+
+ s->enabled = true;
+ s->port_id = stp_port_get_id(sp);
+ s->state = stp_port_get_state(sp);
+ s->sec_in_state = (time_msec() - ofport->stp_state_entered) / 1000;
+ s->role = stp_port_get_role(sp);
+ stp_port_get_counts(sp, &s->tx_count, &s->rx_count, &s->error_count);
+
+ return 0;
+}
+
+static void
+stp_run(struct ofproto_dpif *ofproto)
+{
+ if (ofproto->stp) {
+ long long int now = time_msec();
+ long long int elapsed = now - ofproto->stp_last_tick;
+ struct stp_port *sp;
+
+ if (elapsed > 0) {
+ stp_tick(ofproto->stp, MIN(INT_MAX, elapsed));
+ ofproto->stp_last_tick = now;
+ }
+ while (stp_get_changed_port(ofproto->stp, &sp)) {
+ struct ofport_dpif *ofport = stp_port_get_aux(sp);
+
+ if (ofport) {
+ update_stp_port_state(ofport);
+ }
+ }
+ }
+}
+
+static void
+stp_wait(struct ofproto_dpif *ofproto)
+{
+ if (ofproto->stp) {
+ poll_timer_wait(1000);
+ }
+}
+
+/* Returns true if STP should process 'flow'. */
+static bool
+stp_should_process_flow(const struct flow *flow)
+{
+ return eth_addr_equals(flow->dl_dst, eth_addr_stp);
+}
+
+static void
+stp_process_packet(const struct ofport_dpif *ofport,
+ const struct ofpbuf *packet)
+{
+ struct ofpbuf payload = *packet;
+ struct eth_header *eth = payload.data;
+ struct stp_port *sp = ofport->stp_port;
+
+ /* Sink packets on ports that have STP disabled when the bridge has
+ * STP enabled. */
+ if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
+ return;
+ }
+
+ /* Trim off padding on payload. */
+ if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
+ payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN;
+ }
+
+ if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
+ stp_received_bpdu(sp, payload.data, payload.size);
+ }
+}
+\f
/* Bundles. */
/* Expires all MAC learning entries associated with 'port' and forces ofproto
}
}
+static void
+bundle_update(struct ofbundle *bundle)
+{
+ struct ofport_dpif *port;
+
+ bundle->floodable = true;
+ LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
+ if (port->up.opp.config & htonl(OFPPC_NO_FLOOD)
+ || !stp_forward_in_state(port->stp_state)) {
+ bundle->floodable = false;
+ break;
+ }
+ }
+}
+
static void
bundle_del_port(struct ofport_dpif *port)
{
bond_slave_unregister(bundle->bond, port);
}
- bundle->floodable = true;
- LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
- if (port->up.opp.config & htonl(OFPPC_NO_FLOOD)) {
- bundle->floodable = false;
- }
- }
+ bundle_update(bundle);
}
static bool
port->bundle = bundle;
list_push_back(&bundle->ports, &port->bundle_node);
- if (port->up.opp.config & htonl(OFPPC_NO_FLOOD)) {
+ if (port->up.opp.config & htonl(OFPPC_NO_FLOOD)
+ || !stp_forward_in_state(port->stp_state)) {
bundle->floodable = false;
}
}
if (lacp) {
+ port->bundle->ofproto->need_revalidate = true;
lacp_slave_register(bundle->lacp, port, lacp);
}
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
bool need_flush = false;
- const unsigned long *trunks;
struct ofport_dpif *port;
struct ofbundle *bundle;
+ unsigned long *trunks;
+ int vlan;
size_t i;
bool ok;
bundle->name = NULL;
list_init(&bundle->ports);
+ bundle->vlan_mode = PORT_VLAN_TRUNK;
bundle->vlan = -1;
bundle->trunks = NULL;
bundle->lacp = NULL;
return EINVAL;
}
+ /* Set VLAN tagging mode */
+ if (s->vlan_mode != bundle->vlan_mode) {
+ bundle->vlan_mode = s->vlan_mode;
+ need_flush = true;
+ }
+
/* Set VLAN tag. */
- if (s->vlan != bundle->vlan) {
- bundle->vlan = s->vlan;
+ vlan = (s->vlan_mode == PORT_VLAN_TRUNK ? -1
+ : s->vlan >= 0 && s->vlan <= 4095 ? s->vlan
+ : 0);
+ if (vlan != bundle->vlan) {
+ bundle->vlan = vlan;
need_flush = true;
}
/* Get trunked VLANs. */
- trunks = s->vlan == -1 ? NULL : s->trunks;
+ switch (s->vlan_mode) {
+ case PORT_VLAN_ACCESS:
+ trunks = NULL;
+ break;
+
+ case PORT_VLAN_TRUNK:
+ trunks = (unsigned long *) s->trunks;
+ break;
+
+ case PORT_VLAN_NATIVE_UNTAGGED:
+ case PORT_VLAN_NATIVE_TAGGED:
+ if (vlan != 0 && (!s->trunks
+ || !bitmap_is_set(s->trunks, vlan)
+ || bitmap_is_set(s->trunks, 0))) {
+ /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
+ if (s->trunks) {
+ trunks = bitmap_clone(s->trunks, 4096);
+ } else {
+ trunks = bitmap_allocate1(4096);
+ }
+ bitmap_set1(trunks, vlan);
+ bitmap_set0(trunks, 0);
+ } else {
+ trunks = (unsigned long *) s->trunks;
+ }
+ break;
+
+ default:
+ NOT_REACHED();
+ }
if (!vlan_bitmap_equal(trunks, bundle->trunks)) {
free(bundle->trunks);
- bundle->trunks = vlan_bitmap_clone(trunks);
+ if (trunks == s->trunks) {
+ bundle->trunks = vlan_bitmap_clone(trunks);
+ } else {
+ bundle->trunks = trunks;
+ trunks = NULL;
+ }
need_flush = true;
}
+ if (trunks != s->trunks) {
+ free(trunks);
+ }
/* Bonding. */
if (!list_is_short(&bundle->ports)) {
}
static bool
-is_mirror_output_bundle(struct ofproto *ofproto_, void *aux)
+is_mirror_output_bundle(const struct ofproto *ofproto_, void *aux)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct ofbundle *bundle = bundle_lookup(ofproto, aux);
ofpbuf_uninit(&packet);
}
- enable = enable && !cfm_get_fault(ofport->cfm);
+ enable = enable && !cfm_get_fault(ofport->cfm)
+ && cfm_get_opup(ofport->cfm);
}
if (ofport->bundle) {
\f
/* Upcall handling. */
-/* Given 'upcall', of type DPIF_UC_ACTION or DPIF_UC_MISS, sends an
- * OFPT_PACKET_IN message to each OpenFlow controller as necessary according to
- * their individual configurations.
+/* Flow miss batching.
+ *
+ * Some dpifs implement operations faster when you hand them off in a batch.
+ * To allow batching, "struct flow_miss" queues the dpif-related work needed
+ * for a given flow. Each "struct flow_miss" corresponds to sending one or
+ * more packets, plus possibly installing the flow in the dpif.
+ *
+ * So far we only batch the operations that affect flow setup time the most.
+ * It's possible to batch more than that, but the benefit might be minimal. */
+struct flow_miss {
+ struct hmap_node hmap_node;
+ struct flow flow;
+ const struct nlattr *key;
+ size_t key_len;
+ struct list packets;
+};
+
+struct flow_miss_op {
+ union dpif_op dpif_op;
+ struct facet *facet;
+};
+
+/* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_NO_MATCH to each
+ * OpenFlow controller as necessary according to their individual
+ * configurations.
+ *
+ * If 'clone' is true, the caller retains ownership of 'packet'. Otherwise,
+ * ownership is transferred to this function. */
+static void
+send_packet_in_miss(struct ofproto_dpif *ofproto, struct ofpbuf *packet,
+ const struct flow *flow, bool clone)
+{
+ struct ofputil_packet_in pin;
+
+ pin.packet = packet;
+ pin.in_port = flow->in_port;
+ pin.reason = OFPR_NO_MATCH;
+ pin.buffer_id = 0; /* not yet known */
+ pin.send_len = 0; /* not used for flow table misses */
+ connmgr_send_packet_in(ofproto->up.connmgr, &pin, flow,
+ clone ? NULL : packet);
+}
+
+/* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_ACTION to each
+ * OpenFlow controller as necessary according to their individual
+ * configurations.
+ *
+ * 'send_len' should be the number of bytes of 'packet' to send to the
+ * controller, as specified in the action that caused the packet to be sent.
*
* If 'clone' is true, the caller retains ownership of 'upcall->packet'.
* Otherwise, ownership is transferred to this function. */
static void
-send_packet_in(struct ofproto_dpif *ofproto, struct dpif_upcall *upcall,
- const struct flow *flow, bool clone)
+send_packet_in_action(struct ofproto_dpif *ofproto, struct ofpbuf *packet,
+ uint64_t userdata, const struct flow *flow, bool clone)
{
struct ofputil_packet_in pin;
+ struct user_action_cookie cookie;
+
+ memcpy(&cookie, &userdata, sizeof(cookie));
- pin.packet = upcall->packet;
+ pin.packet = packet;
pin.in_port = flow->in_port;
- pin.reason = upcall->type == DPIF_UC_MISS ? OFPR_NO_MATCH : OFPR_ACTION;
+ pin.reason = OFPR_ACTION;
pin.buffer_id = 0; /* not yet known */
- pin.send_len = upcall->userdata;
+ pin.send_len = cookie.data;
connmgr_send_packet_in(ofproto->up.connmgr, &pin, flow,
- clone ? NULL : upcall->packet);
+ clone ? NULL : packet);
}
static bool
lacp_process_packet(ofport->bundle->lacp, ofport, packet);
}
return true;
+ } else if (ofproto->stp && stp_should_process_flow(flow)) {
+ if (packet) {
+ stp_process_packet(ofport, packet);
+ }
+ return true;
}
return false;
}
-static void
-handle_miss_upcall(struct ofproto_dpif *ofproto, struct dpif_upcall *upcall)
+static struct flow_miss *
+flow_miss_create(struct hmap *todo, const struct flow *flow,
+ const struct nlattr *key, size_t key_len)
{
- struct facet *facet;
- struct flow flow;
-
- /* Obtain in_port and tun_id, at least. */
- odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow);
+ uint32_t hash = flow_hash(flow, 0);
+ struct flow_miss *miss;
- /* Set header pointers in 'flow'. */
- flow_extract(upcall->packet, flow.tun_id, flow.in_port, &flow);
-
- /* Handle 802.1ag and LACP. */
- if (process_special(ofproto, &flow, upcall->packet)) {
- ofpbuf_delete(upcall->packet);
- ofproto->n_matches++;
- return;
+ HMAP_FOR_EACH_WITH_HASH (miss, hmap_node, hash, todo) {
+ if (flow_equal(&miss->flow, flow)) {
+ return miss;
+ }
}
- /* Check with in-band control to see if this packet should be sent
- * to the local port regardless of the flow table. */
- if (connmgr_msg_in_hook(ofproto->up.connmgr, &flow, upcall->packet)) {
- send_packet(ofproto, OVSP_LOCAL, upcall->packet);
- }
+ miss = xmalloc(sizeof *miss);
+ hmap_insert(todo, &miss->hmap_node, hash);
+ miss->flow = *flow;
+ miss->key = key;
+ miss->key_len = key_len;
+ list_init(&miss->packets);
+ return miss;
+}
+
+static void
+handle_flow_miss(struct ofproto_dpif *ofproto, struct flow_miss *miss,
+ struct flow_miss_op *ops, size_t *n_ops)
+{
+ const struct flow *flow = &miss->flow;
+ struct ofpbuf *packet, *next_packet;
+ struct facet *facet;
- facet = facet_lookup_valid(ofproto, &flow);
+ facet = facet_lookup_valid(ofproto, flow);
if (!facet) {
- struct rule_dpif *rule = rule_dpif_lookup(ofproto, &flow, 0);
+ struct rule_dpif *rule;
+
+ rule = rule_dpif_lookup(ofproto, flow, 0);
if (!rule) {
/* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */
- struct ofport_dpif *port = get_ofp_port(ofproto, flow.in_port);
+ struct ofport_dpif *port = get_ofp_port(ofproto, flow->in_port);
if (port) {
if (port->up.opp.config & htonl(OFPPC_NO_PACKET_IN)) {
COVERAGE_INC(ofproto_dpif_no_packet_in);
/* XXX install 'drop' flow entry */
- ofpbuf_delete(upcall->packet);
return;
}
} else {
VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16,
- flow.in_port);
+ flow->in_port);
+ }
+
+ LIST_FOR_EACH_SAFE (packet, next_packet, list_node,
+ &miss->packets) {
+ list_remove(&packet->list_node);
+ send_packet_in_miss(ofproto, packet, flow, false);
}
- send_packet_in(ofproto, upcall, &flow, false);
return;
}
- facet = facet_create(rule, &flow, upcall->packet);
- } else if (!facet->may_install) {
- /* The facet is not installable, that is, we need to process every
- * packet, so process the current packet's actions into 'facet'. */
- facet_make_actions(ofproto, facet, upcall->packet);
+ facet = facet_create(rule, flow);
}
- if (facet->rule->up.cr.priority == FAIL_OPEN_PRIORITY) {
- /*
- * Extra-special case for fail-open mode.
- *
- * We are in fail-open mode and the packet matched the fail-open rule,
- * but we are connected to a controller too. We should send the packet
- * up to the controller in the hope that it will try to set up a flow
- * and thereby allow us to exit fail-open.
- *
- * See the top-level comment in fail-open.c for more information.
- */
- send_packet_in(ofproto, upcall, &flow, true);
+ LIST_FOR_EACH_SAFE (packet, next_packet, list_node, &miss->packets) {
+ list_remove(&packet->list_node);
+ ofproto->n_matches++;
+
+ if (facet->rule->up.cr.priority == FAIL_OPEN_PRIORITY) {
+ /*
+ * Extra-special case for fail-open mode.
+ *
+ * We are in fail-open mode and the packet matched the fail-open
+ * rule, but we are connected to a controller too. We should send
+ * the packet up to the controller in the hope that it will try to
+ * set up a flow and thereby allow us to exit fail-open.
+ *
+ * See the top-level comment in fail-open.c for more information.
+ */
+ send_packet_in_miss(ofproto, packet, flow, true);
+ }
+
+ if (!facet->may_install) {
+ facet_make_actions(ofproto, facet, packet);
+ }
+ if (!execute_controller_action(ofproto, &facet->flow,
+ facet->actions, facet->actions_len,
+ packet, true)) {
+ struct flow_miss_op *op = &ops[(*n_ops)++];
+ struct dpif_execute *execute = &op->dpif_op.execute;
+
+ op->facet = facet;
+ execute->type = DPIF_OP_EXECUTE;
+ execute->key = miss->key;
+ execute->key_len = miss->key_len;
+ execute->actions
+ = (facet->may_install
+ ? facet->actions
+ : xmemdup(facet->actions, facet->actions_len));
+ execute->actions_len = facet->actions_len;
+ execute->packet = packet;
+ }
}
- facet_execute(ofproto, facet, upcall->packet);
- facet_install(ofproto, facet, false);
- ofproto->n_matches++;
+ if (facet->may_install) {
+ struct flow_miss_op *op = &ops[(*n_ops)++];
+ struct dpif_flow_put *put = &op->dpif_op.flow_put;
+
+ op->facet = facet;
+ put->type = DPIF_OP_FLOW_PUT;
+ put->flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
+ put->key = miss->key;
+ put->key_len = miss->key_len;
+ put->actions = facet->actions;
+ put->actions_len = facet->actions_len;
+ put->stats = NULL;
+ }
}
static void
-handle_upcall(struct ofproto_dpif *ofproto, struct dpif_upcall *upcall)
+handle_miss_upcalls(struct ofproto_dpif *ofproto, struct dpif_upcall *upcalls,
+ size_t n_upcalls)
+{
+ struct dpif_upcall *upcall;
+ struct flow_miss *miss, *next_miss;
+ struct flow_miss_op flow_miss_ops[FLOW_MISS_MAX_BATCH * 2];
+ union dpif_op *dpif_ops[FLOW_MISS_MAX_BATCH * 2];
+ struct hmap todo;
+ size_t n_ops;
+ size_t i;
+
+ if (!n_upcalls) {
+ return;
+ }
+
+ /* Construct the to-do list.
+ *
+ * This just amounts to extracting the flow from each packet and sticking
+ * the packets that have the same flow in the same "flow_miss" structure so
+ * that we can process them together. */
+ hmap_init(&todo);
+ for (upcall = upcalls; upcall < &upcalls[n_upcalls]; upcall++) {
+ struct flow_miss *miss;
+ struct flow flow;
+
+ /* Obtain in_port and tun_id, at least, then set 'flow''s header
+ * pointers. */
+ odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow);
+ flow_extract(upcall->packet, flow.tun_id, flow.in_port, &flow);
+
+ /* Handle 802.1ag, LACP, and STP specially. */
+ if (process_special(ofproto, &flow, upcall->packet)) {
+ ofpbuf_delete(upcall->packet);
+ ofproto->n_matches++;
+ continue;
+ }
+
+ /* Add other packets to a to-do list. */
+ miss = flow_miss_create(&todo, &flow, upcall->key, upcall->key_len);
+ list_push_back(&miss->packets, &upcall->packet->list_node);
+ }
+
+ /* Process each element in the to-do list, constructing the set of
+ * operations to batch. */
+ n_ops = 0;
+ HMAP_FOR_EACH_SAFE (miss, next_miss, hmap_node, &todo) {
+ handle_flow_miss(ofproto, miss, flow_miss_ops, &n_ops);
+ ofpbuf_list_delete(&miss->packets);
+ hmap_remove(&todo, &miss->hmap_node);
+ free(miss);
+ }
+ assert(n_ops <= ARRAY_SIZE(flow_miss_ops));
+ hmap_destroy(&todo);
+
+ /* Execute batch. */
+ for (i = 0; i < n_ops; i++) {
+ dpif_ops[i] = &flow_miss_ops[i].dpif_op;
+ }
+ dpif_operate(ofproto->dpif, dpif_ops, n_ops);
+
+ /* Free memory and update facets. */
+ for (i = 0; i < n_ops; i++) {
+ struct flow_miss_op *op = &flow_miss_ops[i];
+ struct dpif_execute *execute;
+ struct dpif_flow_put *put;
+
+ switch (op->dpif_op.type) {
+ case DPIF_OP_EXECUTE:
+ execute = &op->dpif_op.execute;
+ if (op->facet->actions != execute->actions) {
+ free((struct nlattr *) execute->actions);
+ }
+ ofpbuf_delete((struct ofpbuf *) execute->packet);
+ break;
+
+ case DPIF_OP_FLOW_PUT:
+ put = &op->dpif_op.flow_put;
+ if (!put->error) {
+ op->facet->installed = true;
+ }
+ break;
+ }
+ }
+}
+
+static void
+handle_userspace_upcall(struct ofproto_dpif *ofproto,
+ struct dpif_upcall *upcall)
{
struct flow flow;
+ struct user_action_cookie cookie;
- switch (upcall->type) {
- case DPIF_UC_ACTION:
- COVERAGE_INC(ofproto_dpif_ctlr_action);
- odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow);
- send_packet_in(ofproto, upcall, &flow, false);
- break;
+ memcpy(&cookie, &upcall->userdata, sizeof(cookie));
- case DPIF_UC_SAMPLE:
+ if (cookie.type == USER_ACTION_COOKIE_SFLOW) {
if (ofproto->sflow) {
odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow);
- dpif_sflow_received(ofproto->sflow, upcall, &flow);
+ dpif_sflow_received(ofproto->sflow, upcall->packet, &flow, &cookie);
}
ofpbuf_delete(upcall->packet);
- break;
- case DPIF_UC_MISS:
- handle_miss_upcall(ofproto, upcall);
- break;
+ } else if (cookie.type == USER_ACTION_COOKIE_CONTROLLER) {
+ COVERAGE_INC(ofproto_dpif_ctlr_action);
+ odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow);
+ send_packet_in_action(ofproto, upcall->packet, upcall->userdata,
+ &flow, false);
+ } else {
+ VLOG_WARN_RL(&rl, "invalid user cookie : 0x%"PRIx64, upcall->userdata);
+ }
+}
- case DPIF_N_UC_TYPES:
- default:
- VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, upcall->type);
- break;
+static int
+handle_upcalls(struct ofproto_dpif *ofproto, unsigned int max_batch)
+{
+ struct dpif_upcall misses[FLOW_MISS_MAX_BATCH];
+ int n_misses;
+ int i;
+
+ assert (max_batch <= FLOW_MISS_MAX_BATCH);
+
+ n_misses = 0;
+ for (i = 0; i < max_batch; i++) {
+ struct dpif_upcall *upcall = &misses[n_misses];
+ int error;
+
+ error = dpif_recv(ofproto->dpif, upcall);
+ if (error) {
+ break;
+ }
+
+ switch (upcall->type) {
+ case DPIF_UC_ACTION:
+ handle_userspace_upcall(ofproto, upcall);
+ break;
+
+ case DPIF_UC_MISS:
+ /* Handle it later. */
+ n_misses++;
+ break;
+
+ case DPIF_N_UC_TYPES:
+ default:
+ VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32,
+ upcall->type);
+ break;
+ }
}
+
+ handle_miss_upcalls(ofproto, misses, n_misses);
+
+ return i;
}
\f
/* Flow expiration. */
\f
/* Facets. */
-/* Creates and returns a new facet owned by 'rule', given a 'flow' and an
- * example 'packet' within that flow.
+/* Creates and returns a new facet owned by 'rule', given a 'flow'.
*
* The caller must already have determined that no facet with an identical
* 'flow' exists in 'ofproto' and that 'flow' is the best match for 'rule' in
- * the ofproto's classifier table. */
+ * the ofproto's classifier table.
+ *
+ * The facet will initially have no ODP actions. The caller should fix that
+ * by calling facet_make_actions(). */
static struct facet *
-facet_create(struct rule_dpif *rule, const struct flow *flow,
- const struct ofpbuf *packet)
+facet_create(struct rule_dpif *rule, const struct flow *flow)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
struct facet *facet;
netflow_flow_init(&facet->nf_flow);
netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
- facet_make_actions(ofproto, facet, packet);
-
return facet;
}
free(facet);
}
+/* If the 'actions_len' bytes of actions in 'odp_actions' are just a single
+ * OVS_ACTION_ATTR_USERSPACE action, executes it internally and returns true.
+ * Otherwise, returns false without doing anything.
+ *
+ * If 'clone' is true, the caller always retains ownership of 'packet'.
+ * Otherwise, ownership is transferred to this function if it returns true. */
+static bool
+execute_controller_action(struct ofproto_dpif *ofproto,
+ const struct flow *flow,
+ const struct nlattr *odp_actions, size_t actions_len,
+ struct ofpbuf *packet, bool clone)
+{
+ if (actions_len
+ && odp_actions->nla_type == OVS_ACTION_ATTR_USERSPACE
+ && NLA_ALIGN(odp_actions->nla_len) == actions_len) {
+ /* As an optimization, avoid a round-trip from userspace to kernel to
+ * userspace. This also avoids possibly filling up kernel packet
+ * buffers along the way.
+ *
+ * This optimization will not accidentally catch sFlow
+ * OVS_ACTION_ATTR_USERSPACE actions, since those are encapsulated
+ * inside OVS_ACTION_ATTR_SAMPLE. */
+ const struct nlattr *nla;
+
+ nla = nl_attr_find_nested(odp_actions, OVS_USERSPACE_ATTR_USERDATA);
+ send_packet_in_action(ofproto, packet, nl_attr_get_u64(nla), flow,
+ clone);
+ return true;
+ } else {
+ return false;
+ }
+}
+
/* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
* 'packet', which arrived on 'in_port'.
*
const struct nlattr *odp_actions, size_t actions_len,
struct ofpbuf *packet)
{
- if (actions_len == NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t))
- && odp_actions->nla_type == OVS_ACTION_ATTR_USERSPACE) {
- /* As an optimization, avoid a round-trip from userspace to kernel to
- * userspace. This also avoids possibly filling up kernel packet
- * buffers along the way. */
- struct dpif_upcall upcall;
-
- upcall.type = DPIF_UC_ACTION;
- upcall.packet = packet;
- upcall.key = NULL;
- upcall.key_len = 0;
- upcall.userdata = nl_attr_get_u64(odp_actions);
- upcall.sample_pool = 0;
- upcall.actions = NULL;
- upcall.actions_len = 0;
-
- send_packet_in(ofproto, &upcall, flow, false);
+ struct odputil_keybuf keybuf;
+ struct ofpbuf key;
+ int error;
+ if (execute_controller_action(ofproto, flow, odp_actions, actions_len,
+ packet, false)) {
return true;
- } else {
- struct odputil_keybuf keybuf;
- struct ofpbuf key;
- int error;
+ }
- ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
- odp_flow_key_from_flow(&key, flow);
+ ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
+ odp_flow_key_from_flow(&key, flow);
- error = dpif_execute(ofproto->dpif, key.data, key.size,
- odp_actions, actions_len, packet);
+ error = dpif_execute(ofproto->dpif, key.data, key.size,
+ odp_actions, actions_len, packet);
- ofpbuf_delete(packet);
- return !error;
- }
+ ofpbuf_delete(packet);
+ return !error;
}
/* Executes the actions indicated by 'facet' on 'packet' and credits 'facet''s
assert(ofpbuf_headroom(packet) >= sizeof(struct ofp_packet_in));
- flow_extract_stats(&facet->flow, packet, &stats);
+ dpif_flow_stats_extract(&facet->flow, packet, &stats);
stats.used = time_msec();
if (execute_odp_actions(ofproto, &facet->flow,
facet->actions, facet->actions_len, packet)) {
}
}
-static int
-vlan_tci_to_openflow_vlan(ovs_be16 vlan_tci)
-{
- return vlan_tci != htons(0) ? vlan_tci_to_vid(vlan_tci) : OFP_VLAN_NONE;
-}
-
static void
facet_account(struct ofproto_dpif *ofproto, struct facet *facet)
{
struct ofport_dpif *port;
switch (nl_attr_type(a)) {
+ const struct nlattr *nested;
case OVS_ACTION_ATTR_OUTPUT:
port = get_odp_port(ofproto, nl_attr_get_u32(a));
if (port && port->bundle && port->bundle->bond) {
bond_account(port->bundle->bond, &facet->flow,
- vlan_tci_to_openflow_vlan(vlan_tci), n_bytes);
+ vlan_tci_to_vid(vlan_tci), n_bytes);
}
break;
- case OVS_ACTION_ATTR_POP_VLAN:
- vlan_tci = htons(0);
+ case OVS_ACTION_ATTR_POP:
+ if (nl_attr_get_u16(a) == OVS_KEY_ATTR_8021Q) {
+ vlan_tci = htons(0);
+ }
break;
- case OVS_ACTION_ATTR_PUSH_VLAN:
- vlan_tci = nl_attr_get_be16(a);
+ case OVS_ACTION_ATTR_PUSH:
+ nested = nl_attr_get(a);
+ if (nl_attr_type(nested) == OVS_KEY_ATTR_8021Q) {
+ const struct ovs_key_8021q *q_key;
+
+ q_key = nl_attr_get_unspec(nested, sizeof(*q_key));
+ vlan_tci = q_key->q_tci;
+ }
break;
}
}
/* The facet we found might not be valid, since we could be in need of
* revalidation. If it is not valid, don't return it. */
if (facet
- && ofproto->need_revalidate
+ && (ofproto->need_revalidate
+ || tag_set_intersects(&ofproto->revalidate_set, facet->tags))
&& !facet_revalidate(ofproto, facet)) {
COVERAGE_INC(facet_invalidated);
return NULL;
rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow,
uint8_t table_id)
{
+ struct cls_rule *cls_rule;
+ struct classifier *cls;
+
if (table_id >= N_TABLES) {
return NULL;
}
- return rule_dpif_cast(rule_from_cls_rule(
- classifier_lookup(&ofproto->up.tables[table_id],
- flow)));
+ cls = &ofproto->up.tables[table_id];
+ if (flow->tos_frag & FLOW_FRAG_ANY
+ && ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
+ /* For OFPC_NORMAL frag_handling, we must pretend that transport ports
+ * are unavailable. */
+ struct flow ofpc_normal_flow = *flow;
+ ofpc_normal_flow.tp_src = htons(0);
+ ofpc_normal_flow.tp_dst = htons(0);
+ cls_rule = classifier_lookup(cls, &ofpc_normal_flow);
+ } else {
+ cls_rule = classifier_lookup(cls, flow);
+ }
+ return rule_dpif_cast(rule_from_cls_rule(cls_rule));
}
static void
/* First look for a related facet. If we find one, account it to that. */
facet = facet_lookup_valid(ofproto, flow);
if (facet && facet->rule == rule) {
+ if (!facet->may_install) {
+ facet_make_actions(ofproto, facet, packet);
+ }
facet_execute(ofproto, facet, packet);
return 0;
}
/* Otherwise, if 'rule' is in fact the correct rule for 'packet', then
* create a new facet for it and use that. */
if (rule_dpif_lookup(ofproto, flow, 0) == rule) {
- facet = facet_create(rule, flow, packet);
+ facet = facet_create(rule, flow);
+ facet_make_actions(ofproto, facet, packet);
facet_execute(ofproto, facet, packet);
facet_install(ofproto, facet, true);
return 0;
complete_operation(rule);
}
\f
-/* Sends 'packet' out of port 'odp_port' within 'p'.
+/* Sends 'packet' out of port 'odp_port' within 'ofproto'.
* Returns 0 if successful, otherwise a positive errno value. */
static int
send_packet(struct ofproto_dpif *ofproto, uint32_t odp_port,
odp_flow_key_from_flow(&key, &flow);
ofpbuf_init(&odp_actions, 32);
+ compose_sflow_action(ofproto, &odp_actions, &flow, odp_port);
+
nl_msg_put_u32(&odp_actions, OVS_ACTION_ATTR_OUTPUT, odp_port);
error = dpif_execute(ofproto->dpif,
key.data, key.size,
struct action_xlate_ctx *ctx);
static void xlate_normal(struct action_xlate_ctx *);
+static size_t
+put_userspace_action(const struct ofproto_dpif *ofproto,
+ struct ofpbuf *odp_actions,
+ const struct flow *flow,
+ const struct user_action_cookie *cookie)
+{
+ size_t offset;
+ uint32_t pid;
+
+ pid = dpif_port_get_pid(ofproto->dpif,
+ ofp_port_to_odp_port(flow->in_port));
+
+ offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_USERSPACE);
+ nl_msg_put_u32(odp_actions, OVS_USERSPACE_ATTR_PID, pid);
+ nl_msg_put_unspec(odp_actions, OVS_USERSPACE_ATTR_USERDATA,
+ cookie, sizeof *cookie);
+ nl_msg_end_nested(odp_actions, offset);
+
+ return odp_actions->size - NLA_ALIGN(sizeof *cookie);
+}
+
+/* Compose SAMPLE action for sFlow. */
+static size_t
+compose_sflow_action(const struct ofproto_dpif *ofproto,
+ struct ofpbuf *odp_actions,
+ const struct flow *flow,
+ uint32_t odp_port)
+{
+ uint32_t port_ifindex;
+ uint32_t probability;
+ struct user_action_cookie cookie;
+ size_t sample_offset, actions_offset;
+ int cookie_offset, n_output;
+
+ if (!ofproto->sflow || flow->in_port == OFPP_NONE) {
+ return 0;
+ }
+
+ if (odp_port == OVSP_NONE) {
+ port_ifindex = 0;
+ n_output = 0;
+ } else {
+ port_ifindex = dpif_sflow_odp_port_to_ifindex(ofproto->sflow, odp_port);
+ n_output = 1;
+ }
+
+ sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
+
+ /* Number of packets out of UINT_MAX to sample. */
+ probability = dpif_sflow_get_probability(ofproto->sflow);
+ nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
+
+ actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
+
+ cookie.type = USER_ACTION_COOKIE_SFLOW;
+ cookie.data = port_ifindex;
+ cookie.n_output = n_output;
+ cookie.vlan_tci = 0;
+ cookie_offset = put_userspace_action(ofproto, odp_actions, flow, &cookie);
+
+ nl_msg_end_nested(odp_actions, actions_offset);
+ nl_msg_end_nested(odp_actions, sample_offset);
+ return cookie_offset;
+}
+
+/* SAMPLE action must be first action in any given list of actions.
+ * At this point we do not have all information required to build it. So try to
+ * build sample action as complete as possible. */
static void
-commit_odp_actions(struct action_xlate_ctx *ctx)
+add_sflow_action(struct action_xlate_ctx *ctx)
+{
+ ctx->user_cookie_offset = compose_sflow_action(ctx->ofproto,
+ ctx->odp_actions,
+ &ctx->flow, OVSP_NONE);
+ ctx->sflow_odp_port = 0;
+ ctx->sflow_n_outputs = 0;
+}
+
+/* Fix SAMPLE action according to data collected while composing ODP actions.
+ * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
+ * USERSPACE action's user-cookie which is required for sflow. */
+static void
+fix_sflow_action(struct action_xlate_ctx *ctx)
+{
+ const struct flow *base = &ctx->base_flow;
+ struct user_action_cookie *cookie;
+
+ if (!ctx->user_cookie_offset) {
+ return;
+ }
+
+ cookie = ofpbuf_at(ctx->odp_actions, ctx->user_cookie_offset,
+ sizeof(*cookie));
+ assert(cookie != NULL);
+ assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
+
+ if (ctx->sflow_n_outputs) {
+ cookie->data = dpif_sflow_odp_port_to_ifindex(ctx->ofproto->sflow,
+ ctx->sflow_odp_port);
+ }
+ if (ctx->sflow_n_outputs >= 255) {
+ cookie->n_output = 255;
+ } else {
+ cookie->n_output = ctx->sflow_n_outputs;
+ }
+ cookie->vlan_tci = base->vlan_tci;
+}
+
+static void
+commit_action__(struct ofpbuf *odp_actions,
+ enum ovs_action_attr act_type,
+ enum ovs_key_attr key_type,
+ const void *key, size_t key_size)
+{
+ size_t offset = nl_msg_start_nested(odp_actions, act_type);
+
+ nl_msg_put_unspec(odp_actions, key_type, key, key_size);
+ nl_msg_end_nested(odp_actions, offset);
+}
+
+static void
+commit_set_tun_id_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions)
+{
+ if (base->tun_id == flow->tun_id) {
+ return;
+ }
+ base->tun_id = flow->tun_id;
+
+ commit_action__(odp_actions, OVS_ACTION_ATTR_SET,
+ OVS_KEY_ATTR_TUN_ID, &base->tun_id, sizeof(base->tun_id));
+}
+
+static void
+commit_set_ether_addr_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions)
+{
+ struct ovs_key_ethernet eth_key;
+
+ if (eth_addr_equals(base->dl_src, flow->dl_src) &&
+ eth_addr_equals(base->dl_dst, flow->dl_dst)) {
+ return;
+ }
+
+ memcpy(base->dl_src, flow->dl_src, ETH_ADDR_LEN);
+ memcpy(base->dl_dst, flow->dl_dst, ETH_ADDR_LEN);
+
+ memcpy(eth_key.eth_src, base->dl_src, ETH_ADDR_LEN);
+ memcpy(eth_key.eth_dst, base->dl_dst, ETH_ADDR_LEN);
+
+ commit_action__(odp_actions, OVS_ACTION_ATTR_SET,
+ OVS_KEY_ATTR_ETHERNET, ð_key, sizeof(eth_key));
+}
+
+static void
+commit_vlan_action(struct action_xlate_ctx *ctx, ovs_be16 new_tci)
{
- const struct flow *flow = &ctx->flow;
struct flow *base = &ctx->base_flow;
- struct ofpbuf *odp_actions = ctx->odp_actions;
- if (base->tun_id != flow->tun_id) {
- nl_msg_put_be64(odp_actions, OVS_ACTION_ATTR_SET_TUNNEL, flow->tun_id);
- base->tun_id = flow->tun_id;
+ if (base->vlan_tci == new_tci) {
+ return;
}
- if (base->nw_src != flow->nw_src) {
- nl_msg_put_be32(odp_actions, OVS_ACTION_ATTR_SET_NW_SRC, flow->nw_src);
- base->nw_src = flow->nw_src;
+ if (base->vlan_tci & htons(VLAN_CFI)) {
+ nl_msg_put_u16(ctx->odp_actions, OVS_ACTION_ATTR_POP,
+ OVS_KEY_ATTR_8021Q);
}
- if (base->nw_dst != flow->nw_dst) {
- nl_msg_put_be32(odp_actions, OVS_ACTION_ATTR_SET_NW_DST, flow->nw_dst);
- base->nw_dst = flow->nw_dst;
+ if (new_tci & htons(VLAN_CFI)) {
+ struct ovs_key_8021q q_key;
+
+ q_key.q_tpid = htons(ETH_TYPE_VLAN);
+ q_key.q_tci = new_tci & ~htons(VLAN_CFI);
+
+ commit_action__(ctx->odp_actions, OVS_ACTION_ATTR_PUSH,
+ OVS_KEY_ATTR_8021Q, &q_key, sizeof(q_key));
}
+ base->vlan_tci = new_tci;
+}
+
+static void
+commit_set_nw_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions)
+{
+ int frag = base->tos_frag & FLOW_FRAG_MASK;
+ struct ovs_key_ipv4 ipv4_key;
- if (base->nw_tos != flow->nw_tos) {
- nl_msg_put_u8(odp_actions, OVS_ACTION_ATTR_SET_NW_TOS, flow->nw_tos);
- base->nw_tos = flow->nw_tos;
+ if (base->dl_type != htons(ETH_TYPE_IP) ||
+ !base->nw_src || !base->nw_dst) {
+ return;
}
- if (base->vlan_tci != flow->vlan_tci) {
- if (!(flow->vlan_tci & htons(VLAN_CFI))) {
- nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
- } else {
- if (base->vlan_tci != htons(0)) {
- nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
- }
- nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_PUSH_VLAN,
- flow->vlan_tci & ~htons(VLAN_CFI));
- }
- base->vlan_tci = flow->vlan_tci;
+ if (base->nw_src == flow->nw_src &&
+ base->nw_dst == flow->nw_dst &&
+ base->tos_frag == flow->tos_frag) {
+ return;
}
- if (base->tp_src != flow->tp_src) {
- nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_SET_TP_SRC, flow->tp_src);
- base->tp_src = flow->tp_src;
+
+ memset(&ipv4_key, 0, sizeof(ipv4_key));
+ ipv4_key.ipv4_src = base->nw_src = flow->nw_src;
+ ipv4_key.ipv4_dst = base->nw_dst = flow->nw_dst;
+ ipv4_key.ipv4_proto = base->nw_proto;
+ ipv4_key.ipv4_tos = flow->tos_frag & IP_DSCP_MASK;
+ ipv4_key.ipv4_frag = (frag == 0 ? OVS_FRAG_TYPE_NONE
+ : frag == FLOW_FRAG_ANY ? OVS_FRAG_TYPE_FIRST
+ : OVS_FRAG_TYPE_LATER);
+
+ commit_action__(odp_actions, OVS_ACTION_ATTR_SET,
+ OVS_KEY_ATTR_IPV4, &ipv4_key, sizeof(ipv4_key));
+}
+
+static void
+commit_set_port_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions)
+{
+ if (!base->tp_src || !base->tp_dst) {
+ return;
}
- if (base->tp_dst != flow->tp_dst) {
- nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_SET_TP_DST, flow->tp_dst);
- base->tp_dst = flow->tp_dst;
+ if (base->tp_src == flow->tp_src &&
+ base->tp_dst == flow->tp_dst) {
+ return;
}
- if (!eth_addr_equals(base->dl_src, flow->dl_src)) {
- nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_SET_DL_SRC,
- flow->dl_src, ETH_ADDR_LEN);
- memcpy(base->dl_src, flow->dl_src, ETH_ADDR_LEN);
+ if (flow->nw_proto == IPPROTO_TCP) {
+ struct ovs_key_tcp port_key;
+
+ port_key.tcp_src = base->tp_src = flow->tp_src;
+ port_key.tcp_dst = base->tp_dst = flow->tp_dst;
+
+ commit_action__(odp_actions, OVS_ACTION_ATTR_SET,
+ OVS_KEY_ATTR_TCP, &port_key, sizeof(port_key));
+
+ } else if (flow->nw_proto == IPPROTO_UDP) {
+ struct ovs_key_udp port_key;
+
+ port_key.udp_src = base->tp_src = flow->tp_src;
+ port_key.udp_dst = base->tp_dst = flow->tp_dst;
+
+ commit_action__(odp_actions, OVS_ACTION_ATTR_SET,
+ OVS_KEY_ATTR_UDP, &port_key, sizeof(port_key));
}
+}
- if (!eth_addr_equals(base->dl_dst, flow->dl_dst)) {
- nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_SET_DL_DST,
- flow->dl_dst, ETH_ADDR_LEN);
- memcpy(base->dl_dst, flow->dl_dst, ETH_ADDR_LEN);
+static void
+commit_priority_action(struct action_xlate_ctx *ctx)
+{
+ if (ctx->base_priority == ctx->priority) {
+ return;
}
- if (ctx->base_priority != ctx->priority) {
- if (ctx->priority) {
- nl_msg_put_u32(odp_actions, OVS_ACTION_ATTR_SET_PRIORITY,
- ctx->priority);
- } else {
- nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_PRIORITY);
- }
- ctx->base_priority = ctx->priority;
+ if (ctx->priority) {
+ nl_msg_put_u32(ctx->odp_actions,
+ OVS_ACTION_ATTR_SET_PRIORITY, ctx->priority);
+ } else {
+ nl_msg_put_flag(ctx->odp_actions, OVS_ACTION_ATTR_POP_PRIORITY);
}
+ ctx->base_priority = ctx->priority;
+}
+
+static void
+commit_odp_actions(struct action_xlate_ctx *ctx)
+{
+ const struct flow *flow = &ctx->flow;
+ struct flow *base = &ctx->base_flow;
+ struct ofpbuf *odp_actions = ctx->odp_actions;
+
+ commit_set_tun_id_action(flow, base, odp_actions);
+ commit_set_ether_addr_action(flow, base, odp_actions);
+ commit_vlan_action(ctx, flow->vlan_tci);
+ commit_set_nw_action(flow, base, odp_actions);
+ commit_set_port_action(flow, base, odp_actions);
+ commit_priority_action(ctx);
+}
+
+static void
+compose_output_action(struct action_xlate_ctx *ctx, uint16_t odp_port)
+{
+ nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT, odp_port);
+ ctx->sflow_odp_port = odp_port;
+ ctx->sflow_n_outputs++;
}
static void
uint16_t odp_port = ofp_port_to_odp_port(ofp_port);
if (ofport) {
- if (ofport->up.opp.config & htonl(OFPPC_NO_FWD)) {
+ if (ofport->up.opp.config & htonl(OFPPC_NO_FWD)
+ || !stp_forward_in_state(ofport->stp_state)) {
/* Forwarding disabled on port. */
return;
}
}
commit_odp_actions(ctx);
- nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT, odp_port);
+ compose_output_action(ctx, odp_port);
ctx->nf_output_iface = ofp_port;
}
commit_odp_actions(ctx);
HMAP_FOR_EACH (ofport, up.hmap_node, &ctx->ofproto->up.ports) {
uint16_t ofp_port = ofport->up.ofp_port;
- if (ofp_port != ctx->flow.in_port && !(ofport->up.opp.config & mask)) {
- nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT,
- ofport->odp_port);
+ if (ofp_port != ctx->flow.in_port
+ && !(ofport->up.opp.config & mask)
+ && stp_forward_in_state(ofport->stp_state)) {
+ compose_output_action(ctx, ofport->odp_port);
}
}
ctx->nf_output_iface = NF_OUT_FLOOD;
}
+static void
+compose_controller_action(struct action_xlate_ctx *ctx, int len)
+{
+ struct user_action_cookie cookie;
+
+ cookie.type = USER_ACTION_COOKIE_CONTROLLER;
+ cookie.data = len;
+ cookie.n_output = 0;
+ cookie.vlan_tci = 0;
+ put_userspace_action(ctx->ofproto, ctx->odp_actions, &ctx->flow, &cookie);
+}
+
static void
xlate_output_action__(struct action_xlate_ctx *ctx,
uint16_t port, uint16_t max_len)
break;
case OFPP_CONTROLLER:
commit_odp_actions(ctx);
- nl_msg_put_u64(ctx->odp_actions, OVS_ACTION_ATTR_USERSPACE, max_len);
- break;
- case OFPP_LOCAL:
- add_output_action(ctx, OFPP_LOCAL);
+ compose_controller_action(ctx, max_len);
break;
case OFPP_NONE:
break;
+ case OFPP_LOCAL:
default:
if (port != ctx->flow.in_port) {
add_output_action(ctx, port);
ofp_port = ntohs(oae->port);
if (ofp_port == OFPP_IN_PORT) {
ofp_port = ctx->flow.in_port;
+ } else if (ofp_port == ctx->flow.in_port) {
+ return;
}
odp_port = ofp_port_to_odp_port(ofp_port);
} else if (port->bundle->bond) {
/* Autopath does not support VLAN hashing. */
struct ofport_dpif *slave = bond_choose_output_slave(
- port->bundle->bond, &ctx->flow, OFP_VLAN_NONE, &ctx->tags);
+ port->bundle->bond, &ctx->flow, 0, &ctx->tags);
if (slave) {
ofp_port = slave->up.ofp_port;
}
free(fm.actions);
}
+static bool
+may_receive(const struct ofport_dpif *port, struct action_xlate_ctx *ctx)
+{
+ if (port->up.opp.config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp)
+ ? htonl(OFPPC_NO_RECV_STP)
+ : htonl(OFPPC_NO_RECV))) {
+ return false;
+ }
+
+ /* Only drop packets here if both forwarding and learning are
+ * disabled. If just learning is enabled, we need to have
+ * OFPP_NORMAL and the learning action have a look at the packet
+ * before we can drop it. */
+ if (!stp_forward_in_state(port->stp_state)
+ && !stp_learn_in_state(port->stp_state)) {
+ return false;
+ }
+
+ return true;
+}
+
static void
do_xlate_actions(const union ofp_action *in, size_t n_in,
struct action_xlate_ctx *ctx)
size_t left;
port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
- if (port
- && port->up.opp.config & htonl(OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
- port->up.opp.config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp)
- ? htonl(OFPPC_NO_RECV_STP)
- : htonl(OFPPC_NO_RECV))) {
+ if (port && !may_receive(port, ctx)) {
/* Drop this flow. */
return;
}
break;
case OFPUTIL_OFPAT_SET_NW_TOS:
- ctx->flow.nw_tos = ia->nw_tos.nw_tos & IP_DSCP_MASK;
+ ctx->flow.tos_frag &= ~IP_DSCP_MASK;
+ ctx->flow.tos_frag |= ia->nw_tos.nw_tos & IP_DSCP_MASK;
break;
case OFPUTIL_OFPAT_SET_TP_SRC:
break;
}
}
+
+ /* We've let OFPP_NORMAL and the learning action look at the packet,
+ * so drop it now if forwarding is disabled. */
+ if (port && !stp_forward_in_state(port->stp_state)) {
+ ofpbuf_clear(ctx->odp_actions);
+ add_sflow_action(ctx);
+ }
}
static void
COVERAGE_INC(ofproto_dpif_xlate);
ctx->odp_actions = ofpbuf_new(512);
+ ofpbuf_reserve(ctx->odp_actions, NL_A_U32_SIZE);
ctx->tags = 0;
ctx->may_set_up_flow = true;
ctx->has_learn = false;
ctx->base_flow.tun_id = 0;
ctx->table_id = 0;
+ if (ctx->flow.tos_frag & FLOW_FRAG_ANY) {
+ switch (ctx->ofproto->up.frag_handling) {
+ case OFPC_FRAG_NORMAL:
+ /* We must pretend that transport ports are unavailable. */
+ ctx->flow.tp_src = ctx->base_flow.tp_src = htons(0);
+ ctx->flow.tp_dst = ctx->base_flow.tp_dst = htons(0);
+ break;
+
+ case OFPC_FRAG_DROP:
+ return ctx->odp_actions;
+
+ case OFPC_FRAG_REASM:
+ NOT_REACHED();
+
+ case OFPC_FRAG_NX_MATCH:
+ /* Nothing to do. */
+ break;
+ }
+ }
+
if (process_special(ctx->ofproto, &ctx->flow, ctx->packet)) {
ctx->may_set_up_flow = false;
+ return ctx->odp_actions;
} else {
+ add_sflow_action(ctx);
do_xlate_actions(in, n_in, ctx);
- }
- /* Check with in-band control to see if we're allowed to set up this
- * flow. */
- if (!connmgr_may_set_up_flow(ctx->ofproto->up.connmgr, &ctx->flow,
- ctx->odp_actions->data,
- ctx->odp_actions->size)) {
- ctx->may_set_up_flow = false;
+ if (!connmgr_may_set_up_flow(ctx->ofproto->up.connmgr, &ctx->flow,
+ ctx->odp_actions->data,
+ ctx->odp_actions->size)) {
+ ctx->may_set_up_flow = false;
+ if (ctx->packet
+ && connmgr_msg_in_hook(ctx->ofproto->up.connmgr, &ctx->flow,
+ ctx->packet)) {
+ compose_output_action(ctx, OVSP_LOCAL);
+ }
+ }
+ fix_sflow_action(ctx);
}
return ctx->odp_actions;
struct dst {
struct ofport_dpif *port;
- uint16_t vlan;
+ uint16_t vid;
};
struct dst_set {
static struct ofport_dpif *ofbundle_get_a_port(const struct ofbundle *);
+/* Given 'vid', the VID obtained from the 802.1Q header that was received as
+ * part of a packet (specify 0 if there was no 802.1Q header), and 'in_bundle',
+ * the bundle on which the packet was received, returns the VLAN to which the
+ * packet belongs.
+ *
+ * Both 'vid' and the return value are in the range 0...4095. */
+static uint16_t
+input_vid_to_vlan(const struct ofbundle *in_bundle, uint16_t vid)
+{
+ switch (in_bundle->vlan_mode) {
+ case PORT_VLAN_ACCESS:
+ return in_bundle->vlan;
+ break;
+
+ case PORT_VLAN_TRUNK:
+ return vid;
+
+ case PORT_VLAN_NATIVE_UNTAGGED:
+ case PORT_VLAN_NATIVE_TAGGED:
+ return vid ? vid : in_bundle->vlan;
+
+ default:
+ NOT_REACHED();
+ }
+}
+
+/* Given 'vlan', the VLAN that a packet belongs to, and
+ * 'out_bundle', a bundle on which the packet is to be output, returns the VID
+ * that should be included in the 802.1Q header. (If the return value is 0,
+ * then the 802.1Q header should only be included in the packet if there is a
+ * nonzero PCP.)
+ *
+ * Both 'vlan' and the return value are in the range 0...4095. */
+static uint16_t
+output_vlan_to_vid(const struct ofbundle *out_bundle, uint16_t vlan)
+{
+ switch (out_bundle->vlan_mode) {
+ case PORT_VLAN_ACCESS:
+ return 0;
+
+ case PORT_VLAN_TRUNK:
+ case PORT_VLAN_NATIVE_TAGGED:
+ return vlan;
+
+ case PORT_VLAN_NATIVE_UNTAGGED:
+ return vlan == out_bundle->vlan ? 0 : vlan;
+
+ default:
+ NOT_REACHED();
+ }
+}
+
static bool
set_dst(struct action_xlate_ctx *ctx, struct dst *dst,
const struct ofbundle *in_bundle, const struct ofbundle *out_bundle)
{
- dst->vlan = (out_bundle->vlan >= 0 ? OFP_VLAN_NONE
- : in_bundle->vlan >= 0 ? in_bundle->vlan
- : ctx->flow.vlan_tci == 0 ? OFP_VLAN_NONE
- : vlan_tci_to_vid(ctx->flow.vlan_tci));
+ uint16_t vlan;
+
+ vlan = input_vid_to_vlan(in_bundle, vlan_tci_to_vid(ctx->flow.vlan_tci));
+ dst->vid = output_vlan_to_vid(out_bundle, vlan);
dst->port = (!out_bundle->bond
? ofbundle_get_a_port(out_bundle)
: bond_choose_output_slave(out_bundle->bond, &ctx->flow,
- dst->vlan, &ctx->tags));
-
+ dst->vid, &ctx->tags));
return dst->port != NULL;
}
{
size_t i;
for (i = 0; i < set->n; i++) {
- if (set->dsts[i].vlan == test->vlan
+ if (set->dsts[i].vid == test->vid
&& set->dsts[i].port == test->port) {
return true;
}
static bool
ofbundle_trunks_vlan(const struct ofbundle *bundle, uint16_t vlan)
{
- return (bundle->vlan < 0
+ return (bundle->vlan_mode != PORT_VLAN_ACCESS
&& (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
}
{
struct ofproto_dpif *ofproto = ctx->ofproto;
mirror_mask_t mirrors;
- int flow_vlan;
+ uint16_t flow_vid;
size_t i;
mirrors = in_bundle->src_mirrors;
return;
}
- flow_vlan = vlan_tci_to_vid(ctx->flow.vlan_tci);
- if (flow_vlan == 0) {
- flow_vlan = OFP_VLAN_NONE;
- }
-
+ flow_vid = vlan_tci_to_vid(ctx->flow.vlan_tci);
while (mirrors) {
struct ofmirror *m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
if (vlan_is_mirrored(m, vlan)) {
if (ofbundle_includes_vlan(bundle, m->out_vlan)
&& set_dst(ctx, &dst, in_bundle, bundle))
{
- if (bundle->vlan < 0) {
- dst.vlan = m->out_vlan;
- }
+ /* set_dst() got dst->vid from the input packet's VLAN,
+ * not from m->out_vlan, so recompute it. */
+ dst.vid = output_vlan_to_vid(bundle, m->out_vlan);
+
if (dst_is_duplicate(set, &dst)) {
continue;
}
- /* Use the vlan tag on the original flow instead of
- * the one passed in the vlan parameter. This ensures
- * that we compare the vlan from before any implicit
- * tagging tags place. This is necessary because
- * dst->vlan is the final vlan, after removing implicit
- * tags. */
- if (bundle == in_bundle && dst.vlan == flow_vlan) {
+ if (bundle == in_bundle && dst.vid == flow_vid) {
/* Don't send out input port on same VLAN. */
continue;
}
}
}
+static void
+compose_dst_output_action(struct action_xlate_ctx *ctx, const struct dst *dst)
+{
+ ovs_be16 tci;
+
+ tci = htons(dst->vid);
+ if (tci) {
+ tci |= ctx->flow.vlan_tci & htons(VLAN_PCP_MASK);
+ tci |= htons(VLAN_CFI);
+ }
+ commit_vlan_action(ctx, tci);
+
+ compose_output_action(ctx, dst->port->odp_port);
+}
+
static void
compose_actions(struct action_xlate_ctx *ctx, uint16_t vlan,
const struct ofbundle *in_bundle,
const struct ofbundle *out_bundle)
{
- uint16_t initial_vlan, cur_vlan;
+ uint16_t initial_vid;
const struct dst *dst;
struct dst_set set;
dst_set_init(&set);
compose_dsts(ctx, vlan, in_bundle, out_bundle, &set);
compose_mirror_dsts(ctx, vlan, in_bundle, &set);
+ if (!set.n) {
+ dst_set_free(&set);
+ return;
+ }
/* Output all the packets we can without having to change the VLAN. */
- initial_vlan = vlan_tci_to_vid(ctx->flow.vlan_tci);
- if (initial_vlan == 0) {
- initial_vlan = OFP_VLAN_NONE;
- }
+ commit_odp_actions(ctx);
+ initial_vid = vlan_tci_to_vid(ctx->flow.vlan_tci);
for (dst = set.dsts; dst < &set.dsts[set.n]; dst++) {
- if (dst->vlan != initial_vlan) {
- continue;
+ if (dst->vid == initial_vid) {
+ compose_dst_output_action(ctx, dst);
}
- nl_msg_put_u32(ctx->odp_actions,
- OVS_ACTION_ATTR_OUTPUT, dst->port->odp_port);
}
/* Then output the rest. */
- cur_vlan = initial_vlan;
for (dst = set.dsts; dst < &set.dsts[set.n]; dst++) {
- if (dst->vlan == initial_vlan) {
- continue;
- }
- if (dst->vlan != cur_vlan) {
- if (dst->vlan == OFP_VLAN_NONE) {
- nl_msg_put_flag(ctx->odp_actions, OVS_ACTION_ATTR_POP_VLAN);
- } else {
- ovs_be16 tci;
-
- if (cur_vlan != OFP_VLAN_NONE) {
- nl_msg_put_flag(ctx->odp_actions, OVS_ACTION_ATTR_POP_VLAN);
- }
- tci = htons(dst->vlan & VLAN_VID_MASK);
- tci |= ctx->flow.vlan_tci & htons(VLAN_PCP_MASK);
- nl_msg_put_be16(ctx->odp_actions,
- OVS_ACTION_ATTR_PUSH_VLAN, tci);
- }
- cur_vlan = dst->vlan;
+ if (dst->vid != initial_vid) {
+ compose_dst_output_action(ctx, dst);
}
- nl_msg_put_u32(ctx->odp_actions,
- OVS_ACTION_ATTR_OUTPUT, dst->port->odp_port);
}
dst_set_free(&set);
struct ofbundle *in_bundle, bool have_packet)
{
int vlan = vlan_tci_to_vid(flow->vlan_tci);
- if (in_bundle->vlan >= 0) {
- if (vlan) {
+ if (vlan) {
+ if (in_bundle->vlan_mode == PORT_VLAN_ACCESS) {
+ /* Drop tagged packet on access port */
if (have_packet) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %d tagged "
in_bundle->name, in_bundle->vlan);
}
return -1;
- }
- vlan = in_bundle->vlan;
- } else {
- if (!ofbundle_includes_vlan(in_bundle, vlan)) {
+ } else if (ofbundle_includes_vlan(in_bundle, vlan)) {
+ return vlan;
+ } else {
+ /* Drop packets from a VLAN not member of the trunk */
if (have_packet) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %d tagged "
}
return -1;
}
+ } else {
+ if (in_bundle->vlan_mode != PORT_VLAN_TRUNK) {
+ return in_bundle->vlan;
+ } else {
+ return ofbundle_includes_vlan(in_bundle, 0) ? 0 : -1;
+ }
}
-
- return vlan;
}
/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
return false;
}
- /* Drop frames for reserved multicast addresses
- * only if forward_bpdu option is absent. */
- if (eth_addr_is_reserved(flow->dl_dst) &&
- !ofproto->up.forward_bpdu) {
+ /* Drop frames for reserved multicast addresses only if forward_bpdu
+ * option is absent. */
+ if (eth_addr_is_reserved(flow->dl_dst) && !ofproto->up.forward_bpdu) {
return false;
}
}
\f
static bool
-get_drop_frags(struct ofproto *ofproto_)
-{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- bool drop_frags;
-
- dpif_get_drop_frags(ofproto->dpif, &drop_frags);
- return drop_frags;
-}
-
-static void
-set_drop_frags(struct ofproto *ofproto_, bool drop_frags)
+set_frag_handling(struct ofproto *ofproto_,
+ enum ofp_config_flags frag_handling)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- dpif_set_drop_frags(ofproto->dpif, drop_frags);
+ if (frag_handling != OFPC_FRAG_REASM) {
+ ofproto->need_revalidate = true;
+ return true;
+ } else {
+ return false;
+ }
}
static int
}
registered = true;
- unixctl_command_register("ofproto/trace", ofproto_unixctl_trace, NULL);
- unixctl_command_register("fdb/show", ofproto_unixctl_fdb_show, NULL);
-
- unixctl_command_register("ofproto/clog", ofproto_dpif_clog, NULL);
- unixctl_command_register("ofproto/unclog", ofproto_dpif_unclog, NULL);
+ unixctl_command_register("ofproto/trace",
+ "bridge {tun_id in_port packet | odp_flow [-generate]}",
+ ofproto_unixctl_trace, NULL);
+ unixctl_command_register("fdb/show", "bridge", ofproto_unixctl_fdb_show,
+ NULL);
+ unixctl_command_register("ofproto/clog", "", ofproto_dpif_clog, NULL);
+ unixctl_command_register("ofproto/unclog", "", ofproto_dpif_unclog, NULL);
}
\f
const struct ofproto_class ofproto_dpif_class = {
destruct,
dealloc,
run,
+ run_fast,
wait,
flush,
get_features,
rule_get_stats,
rule_execute,
rule_modify_actions,
- get_drop_frags,
- set_drop_frags,
+ set_frag_handling,
packet_out,
set_netflow,
get_netflow_ids,
set_cfm,
get_cfm_fault,
get_cfm_remote_mpids,
+ set_stp,
+ get_stp_status,
+ set_stp_port,
+ get_stp_port_status,
bundle_set,
bundle_remove,
mirror_set,