#include <stdlib.h>
#include "coverage.h"
-#include "dpif.h"
#include "fail-open.h"
#include "in-band.h"
#include "odp-util.h"
#include "pinsched.h"
#include "poll-loop.h"
#include "pktbuf.h"
+#include "private.h"
#include "rconn.h"
#include "shash.h"
#include "timeval.h"
struct rconn *rconn; /* OpenFlow connection. */
enum ofconn_type type; /* Type. */
enum nx_flow_format flow_format; /* Currently selected flow format. */
+ bool flow_mod_table_id; /* NXT_FLOW_MOD_TABLE_ID enabled? */
/* OFPT_PACKET_IN related data. */
struct rconn_packet_counter *packet_in_counter; /* # queued on 'rconn'. */
const char *target);
static void update_fail_open(struct connmgr *);
static int set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp,
- const struct svec *);
+ const struct sset *);
/* Returns true if 'mgr' has any configured primary controllers.
*
const struct ofproto_controller *controllers,
size_t n_controllers)
{
+ bool had_controllers = connmgr_has_controllers(mgr);
struct shash new_controllers;
struct ofconn *ofconn, *next_ofconn;
struct ofservice *ofservice, *next_ofservice;
- bool ss_exists;
size_t i;
/* Create newly configured controllers and services.
/* Delete controllers that are no longer configured.
* Update configuration of all now-existing controllers. */
- ss_exists = false;
HMAP_FOR_EACH_SAFE (ofconn, next_ofconn, hmap_node, &mgr->controllers) {
struct ofproto_controller *c;
update_in_band_remotes(mgr);
update_fail_open(mgr);
+ if (had_controllers != connmgr_has_controllers(mgr)) {
+ ofproto_flush_flows(mgr->ofproto);
+ }
}
/* Drops the connections between 'mgr' and all of its primary and secondary
* A "snoop" is a pvconn to which every OpenFlow message to or from the most
* important controller on 'mgr' is mirrored. */
int
-connmgr_set_snoops(struct connmgr *mgr, const struct svec *snoops)
+connmgr_set_snoops(struct connmgr *mgr, const struct sset *snoops)
{
return set_pvconns(&mgr->snoops, &mgr->n_snoops, snoops);
}
/* Adds each of the snoops currently configured on 'mgr' to 'snoops'. */
void
-connmgr_get_snoops(const struct connmgr *mgr, struct svec *snoops)
+connmgr_get_snoops(const struct connmgr *mgr, struct sset *snoops)
{
size_t i;
for (i = 0; i < mgr->n_snoops; i++) {
- svec_add(snoops, pvconn_get_name(mgr->snoops[i]));
+ sset_add(snoops, pvconn_get_name(mgr->snoops[i]));
}
}
+/* Returns true if 'mgr' has at least one snoop, false if it has none. */
+bool
+connmgr_has_snoops(const struct connmgr *mgr)
+{
+ return mgr->n_snoops > 0;
+}
+
/* Creates a new controller for 'target' in 'mgr'. update_controller() needs
* to be called later to finish the new ofconn's configuration. */
static void
static int
set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp,
- const struct svec *svec)
+ const struct sset *sset)
{
struct pvconn **pvconns = *pvconnsp;
size_t n_pvconns = *n_pvconnsp;
+ const char *name;
int retval = 0;
size_t i;
}
free(pvconns);
- pvconns = xmalloc(svec->n * sizeof *pvconns);
+ pvconns = xmalloc(sset_count(sset) * sizeof *pvconns);
n_pvconns = 0;
- for (i = 0; i < svec->n; i++) {
- const char *name = svec->names[i];
+ SSET_FOR_EACH (name, sset) {
struct pvconn *pvconn;
int error;
ofconn->flow_format = flow_format;
}
+/* Returns true if the NXT_FLOW_MOD_TABLE_ID extension is enabled, false
+ * otherwise.
+ *
+ * By default the extension is not enabled. */
+bool
+ofconn_get_flow_mod_table_id(const struct ofconn *ofconn)
+{
+ return ofconn->flow_mod_table_id;
+}
+
+/* Enables or disables (according to 'enable') the NXT_FLOW_MOD_TABLE_ID
+ * extension on 'ofconn'. */
+void
+ofconn_set_flow_mod_table_id(struct ofconn *ofconn, bool enable)
+{
+ ofconn->flow_mod_table_id = enable;
+}
+
/* Returns the default miss send length for 'ofconn'. */
int
ofconn_get_miss_send_len(const struct ofconn *ofconn)
ofconn->rconn = rconn;
ofconn->type = type;
ofconn->flow_format = NXFF_OPENFLOW10;
+ ofconn->flow_mod_table_id = false;
ofconn->role = NX_ROLE_OTHER;
ofconn->packet_in_counter = rconn_packet_counter_create ();
ofconn->pktbuf = NULL;
\f
/* Sending asynchronous messages. */
-static void schedule_packet_in(struct ofconn *, const struct dpif_upcall *,
+static void schedule_packet_in(struct ofconn *, struct ofputil_packet_in,
const struct flow *, struct ofpbuf *rw_packet);
/* Sends an OFPT_PORT_STATUS message with 'opp' and 'reason' to appropriate
- * controllers managed by 'mgr'.
- *
- * 'opp' is in *HOST* byte order. */
+ * controllers managed by 'mgr'. */
void
connmgr_send_port_status(struct connmgr *mgr, const struct ofp_phy_port *opp,
uint8_t reason)
ops = make_openflow_xid(sizeof *ops, OFPT_PORT_STATUS, 0, &b);
ops->reason = reason;
ops->desc = *opp;
- hton_ofp_phy_port(&ops->desc);
ofconn_send(ofconn, b, NULL);
}
}
}
}
-/* Given 'upcall', of type DPIF_UC_ACTION or DPIF_UC_MISS, sends an
- * OFPT_PACKET_IN message to each OpenFlow controller as necessary according to
- * their individual configurations.
+/* Given 'pin', sends an OFPT_PACKET_IN message to each OpenFlow controller as
+ * necessary according to their individual configurations.
*
* 'rw_packet' may be NULL. Otherwise, 'rw_packet' must contain the same data
- * as upcall->packet. (rw_packet == upcall->packet is also valid.) Ownership
- * of 'rw_packet' is transferred to this function. */
+ * as pin->packet. (rw_packet == pin->packet is also valid.) Ownership of
+ * 'rw_packet' is transferred to this function. */
void
-connmgr_send_packet_in(struct connmgr *mgr, const struct dpif_upcall *upcall,
+connmgr_send_packet_in(struct connmgr *mgr,
+ const struct ofputil_packet_in *pin,
const struct flow *flow, struct ofpbuf *rw_packet)
{
struct ofconn *ofconn, *prev;
LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
if (ofconn_receives_async_msgs(ofconn)) {
if (prev) {
- schedule_packet_in(prev, upcall, flow, NULL);
+ schedule_packet_in(prev, *pin, flow, NULL);
}
prev = ofconn;
}
}
if (prev) {
- schedule_packet_in(prev, upcall, flow, rw_packet);
+ schedule_packet_in(prev, *pin, flow, rw_packet);
} else {
ofpbuf_delete(rw_packet);
}
ofconn->packet_in_counter, 100);
}
-/* Takes 'upcall', whose packet has the flow specified by 'flow', composes an
+/* Takes 'pin', whose packet has the flow specified by 'flow', composes an
* OpenFlow packet-in message from it, and passes it to 'ofconn''s packet
* scheduler for sending.
*
* 'rw_packet' may be NULL. Otherwise, 'rw_packet' must contain the same data
- * as upcall->packet. (rw_packet == upcall->packet is also valid.) Ownership
- * of 'rw_packet' is transferred to this function. */
+ * as pin->packet. (rw_packet == pin->packet is also valid.) Ownership of
+ * 'rw_packet' is transferred to this function. */
static void
-schedule_packet_in(struct ofconn *ofconn, const struct dpif_upcall *upcall,
+schedule_packet_in(struct ofconn *ofconn, struct ofputil_packet_in pin,
const struct flow *flow, struct ofpbuf *rw_packet)
{
struct connmgr *mgr = ofconn->connmgr;
- struct ofputil_packet_in pin;
-
- /* Figure out the easy parts. */
- pin.packet = upcall->packet;
- pin.in_port = odp_port_to_ofp_port(flow->in_port);
- pin.reason = upcall->type == DPIF_UC_MISS ? OFPR_NO_MATCH : OFPR_ACTION;
/* Get OpenFlow buffer_id. */
- if (upcall->type == DPIF_UC_ACTION) {
+ if (pin.reason == OFPR_ACTION) {
pin.buffer_id = UINT32_MAX;
} else if (mgr->fail_open && fail_open_is_active(mgr->fail_open)) {
pin.buffer_id = pktbuf_get_null();
} else if (!ofconn->pktbuf) {
pin.buffer_id = UINT32_MAX;
} else {
- pin.buffer_id = pktbuf_save(ofconn->pktbuf, upcall->packet,
- flow->in_port);
+ pin.buffer_id = pktbuf_save(ofconn->pktbuf, pin.packet, flow->in_port);
}
/* Figure out how much of the packet to send. */
- pin.send_len = upcall->packet->size;
+ if (pin.reason == OFPR_NO_MATCH) {
+ pin.send_len = pin.packet->size;
+ } else {
+ /* Caller should have initialized 'send_len' to 'max_len' specified in
+ * struct ofp_action_output. */
+ }
if (pin.buffer_id != UINT32_MAX) {
pin.send_len = MIN(pin.send_len, ofconn->miss_send_len);
}
- if (upcall->type == DPIF_UC_ACTION) {
- pin.send_len = MIN(pin.send_len, upcall->userdata);
- }
/* Make OFPT_PACKET_IN and hand over to packet scheduler. It might
* immediately call into do_send_packet_in() or it might buffer it for a
* while (until a later call to pinsched_run()). */
- pinsched_send(ofconn->schedulers[upcall->type == DPIF_UC_MISS ? 0 : 1],
+ pinsched_send(ofconn->schedulers[pin.reason == OFPR_NO_MATCH ? 0 : 1],
flow->in_port, ofputil_encode_packet_in(&pin, rw_packet),
do_send_packet_in, ofconn);
}
void
connmgr_set_fail_mode(struct connmgr *mgr, enum ofproto_fail_mode fail_mode)
{
- mgr->fail_mode = fail_mode;
- update_fail_open(mgr);
+ if (mgr->fail_mode != fail_mode) {
+ mgr->fail_mode = fail_mode;
+ update_fail_open(mgr);
+ if (!connmgr_has_controllers(mgr)) {
+ ofproto_flush_flows(mgr->ofproto);
+ }
+ }
}
\f
/* Fail-open implementation. */
if (mgr->fail_open) {
fail_open_flushed(mgr->fail_open);
}
+
+ /* If there are no controllers and we're in standalone mode, set up a flow
+ * that matches every packet and directs them to OFPP_NORMAL (which goes to
+ * us). Otherwise, the switch is in secure mode and we won't pass any
+ * traffic until a controller has been defined and it tells us to do so. */
+ if (!connmgr_has_controllers(mgr)
+ && mgr->fail_mode == OFPROTO_FAIL_STANDALONE) {
+ union ofp_action action;
+ struct cls_rule rule;
+
+ memset(&action, 0, sizeof action);
+ action.type = htons(OFPAT_OUTPUT);
+ action.output.len = htons(sizeof action);
+ action.output.port = htons(OFPP_NORMAL);
+ cls_rule_init_catchall(&rule, 0);
+ ofproto_add_flow(mgr->ofproto, &rule, &action, 1);
+ }
}
\f
/* Creates a new ofservice for 'target' in 'mgr'. Returns 0 if successful,