/*
- * Copyright (c) 2008, 2009, 2010, 2011, 2012 Nicira, Inc.
+ * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
struct lswitch_port {
struct hmap_node hmap_node; /* Hash node for port number. */
- uint16_t port_no; /* OpenFlow port number, in host byte order. */
+ ofp_port_t port_no; /* OpenFlow port number. */
uint32_t queue_id; /* OpenFlow queue number. */
};
+enum lswitch_state {
+ S_CONNECTING, /* Waiting for connection to complete. */
+ S_FEATURES_REPLY, /* Waiting for features reply. */
+ S_SWITCHING, /* Switching flows. */
+};
+
struct lswitch {
+ struct rconn *rconn;
+ enum lswitch_state state;
+
/* If nonnegative, the switch sets up flows that expire after the given
* number of seconds (or never expire, if the value is OFP_FLOW_PERMANENT).
* Otherwise, the switch processes every packet. */
enum ofputil_protocol protocol;
unsigned long long int datapath_id;
- time_t last_features_request;
struct mac_learning *ml; /* NULL to act as hub instead of switch. */
struct flow_wildcards wc; /* Wildcards to apply to flows. */
bool action_normal; /* Use OFPP_NORMAL? */
/* Number of outgoing queued packets on the rconn. */
struct rconn_packet_counter *queued;
+
+ /* If true, do not reply to any messages from the switch (for debugging
+ * fail-open mode). */
+ bool mute;
+
+ /* Optional "flow mod" requests to send to the switch at connection time,
+ * to set up the flow table. */
+ const struct ofputil_flow_mod *default_flows;
+ size_t n_default_flows;
+ enum ofputil_protocol usable_protocols;
};
/* The log messages here could actually be useful in debugging, so keep the
* rate limit relatively high. */
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
-static void queue_tx(struct lswitch *, struct rconn *, struct ofpbuf *);
-static void send_features_request(struct lswitch *, struct rconn *);
+static void queue_tx(struct lswitch *, struct ofpbuf *);
+static void send_features_request(struct lswitch *);
+static void lswitch_process_packet(struct lswitch *, const struct ofpbuf *);
static enum ofperr process_switch_features(struct lswitch *,
struct ofp_header *);
-static void process_packet_in(struct lswitch *, struct rconn *,
- const struct ofp_header *);
-static void process_echo_request(struct lswitch *, struct rconn *,
- const struct ofp_header *);
+static void process_packet_in(struct lswitch *, const struct ofp_header *);
+static void process_echo_request(struct lswitch *, const struct ofp_header *);
/* Creates and returns a new learning switch whose configuration is given by
* 'cfg'.
struct lswitch *
lswitch_create(struct rconn *rconn, const struct lswitch_config *cfg)
{
- enum ofputil_protocol protocol;
struct lswitch *sw;
+ uint32_t ofpfw;
sw = xzalloc(sizeof *sw);
+ sw->rconn = rconn;
+ sw->state = S_CONNECTING;
sw->max_idle = cfg->max_idle;
sw->datapath_id = 0;
- sw->last_features_request = time_now() - 1;
sw->ml = (cfg->mode == LSW_LEARN
? mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME)
: NULL);
sw->action_normal = cfg->mode == LSW_NORMAL;
- flow_wildcards_init_exact(&sw->wc);
- if (cfg->wildcards) {
- uint32_t ofpfw;
-
- if (cfg->wildcards == UINT32_MAX) {
- /* Try to wildcard as many fields as possible, but we cannot
- * wildcard all fields. We need in_port to detect moves. We need
- * Ethernet source and dest and VLAN VID to do L2 learning. */
- ofpfw = (OFPFW10_DL_TYPE | OFPFW10_DL_VLAN_PCP
- | OFPFW10_NW_SRC_ALL | OFPFW10_NW_DST_ALL
- | OFPFW10_NW_TOS | OFPFW10_NW_PROTO
- | OFPFW10_TP_SRC | OFPFW10_TP_DST);
- } else {
- ofpfw = cfg->wildcards;
- }
+ switch (cfg->wildcards) {
+ case 0:
+ ofpfw = 0;
+ break;
+
+ case UINT32_MAX:
+ /* Try to wildcard as many fields as possible, but we cannot
+ * wildcard all fields. We need in_port to detect moves. We need
+ * Ethernet source and dest and VLAN VID to do L2 learning. */
+ ofpfw = (OFPFW10_DL_TYPE | OFPFW10_DL_VLAN_PCP
+ | OFPFW10_NW_SRC_ALL | OFPFW10_NW_DST_ALL
+ | OFPFW10_NW_TOS | OFPFW10_NW_PROTO
+ | OFPFW10_TP_SRC | OFPFW10_TP_DST);
+ break;
- ofputil_wildcard_from_ofpfw10(ofpfw, &sw->wc);
+ default:
+ ofpfw = cfg->wildcards;
+ break;
}
+ ofputil_wildcard_from_ofpfw10(ofpfw, &sw->wc);
sw->default_queue = cfg->default_queue;
hmap_init(&sw->queue_numbers);
}
}
+ sw->default_flows = cfg->default_flows;
+ sw->n_default_flows = cfg->n_default_flows;
+ sw->usable_protocols = cfg->usable_protocols;
+
sw->queued = rconn_packet_counter_create();
- send_features_request(sw, rconn);
- protocol = ofputil_protocol_from_ofp_version(rconn_get_version(rconn));
- if (cfg->default_flows) {
- enum ofputil_protocol usable_protocols;
+ return sw;
+}
+
+static void
+lswitch_handshake(struct lswitch *sw)
+{
+ enum ofputil_protocol protocol;
+
+ send_features_request(sw);
+
+ protocol = ofputil_protocol_from_ofp_version(rconn_get_version(sw->rconn));
+ if (sw->default_flows) {
struct ofpbuf *msg = NULL;
int error = 0;
size_t i;
* This could be improved by actually negotiating a mutually acceptable
* flow format with the switch, but that would require an asynchronous
* state machine. This version ought to work fine in practice. */
- usable_protocols = ofputil_flow_mod_usable_protocols(
- cfg->default_flows, cfg->n_default_flows);
- if (!(protocol & usable_protocols)) {
- enum ofputil_protocol want = rightmost_1bit(usable_protocols);
+ if (!(protocol & sw->usable_protocols)) {
+ enum ofputil_protocol want = rightmost_1bit(sw->usable_protocols);
while (!error) {
msg = ofputil_encode_set_protocol(protocol, want, &protocol);
if (!msg) {
break;
}
- error = rconn_send(rconn, msg, NULL);
+ error = rconn_send(sw->rconn, msg, NULL);
}
}
+ if (protocol & sw->usable_protocols) {
+ for (i = 0; !error && i < sw->n_default_flows; i++) {
+ msg = ofputil_encode_flow_mod(&sw->default_flows[i], protocol);
+ error = rconn_send(sw->rconn, msg, NULL);
+ }
- for (i = 0; !error && i < cfg->n_default_flows; i++) {
- msg = ofputil_encode_flow_mod(&cfg->default_flows[i], protocol);
- error = rconn_send(rconn, msg, NULL);
- }
-
- if (error) {
- VLOG_INFO_RL(&rl, "%s: failed to queue default flows (%s)",
- rconn_get_name(rconn), strerror(error));
+ if (error) {
+ VLOG_INFO_RL(&rl, "%s: failed to queue default flows (%s)",
+ rconn_get_name(sw->rconn), ovs_strerror(error));
+ }
+ } else {
+ VLOG_INFO_RL(&rl, "%s: failed to set usable protocol",
+ rconn_get_name(sw->rconn));
}
}
sw->protocol = protocol;
+}
- return sw;
+bool
+lswitch_is_alive(const struct lswitch *sw)
+{
+ return rconn_is_alive(sw->rconn);
}
/* Destroys 'sw'. */
if (sw) {
struct lswitch_port *node, *next;
+ rconn_destroy(sw->rconn);
HMAP_FOR_EACH_SAFE (node, next, hmap_node, &sw->queue_numbers) {
hmap_remove(&sw->queue_numbers, &node->hmap_node);
free(node);
}
shash_destroy(&sw->queue_names);
- mac_learning_destroy(sw->ml);
+ mac_learning_unref(sw->ml);
rconn_packet_counter_destroy(sw->queued);
free(sw);
}
void
lswitch_run(struct lswitch *sw)
{
+ int i;
+
if (sw->ml) {
- mac_learning_run(sw->ml, NULL);
+ ovs_rwlock_wrlock(&sw->ml->rwlock);
+ mac_learning_run(sw->ml);
+ ovs_rwlock_unlock(&sw->ml->rwlock);
+ }
+
+ rconn_run(sw->rconn);
+
+ if (sw->state == S_CONNECTING) {
+ if (rconn_get_version(sw->rconn) != -1) {
+ lswitch_handshake(sw);
+ sw->state = S_FEATURES_REPLY;
+ }
+ return;
+ }
+
+ for (i = 0; i < 50; i++) {
+ struct ofpbuf *msg;
+
+ msg = rconn_recv(sw->rconn);
+ if (!msg) {
+ break;
+ }
+
+ if (!sw->mute) {
+ lswitch_process_packet(sw, msg);
+ }
+ ofpbuf_delete(msg);
}
}
lswitch_wait(struct lswitch *sw)
{
if (sw->ml) {
+ ovs_rwlock_rdlock(&sw->ml->rwlock);
mac_learning_wait(sw->ml);
+ ovs_rwlock_unlock(&sw->ml->rwlock);
}
+ rconn_run_wait(sw->rconn);
+ rconn_recv_wait(sw->rconn);
}
/* Processes 'msg', which should be an OpenFlow received on 'rconn', according
* to the learning switch state in 'sw'. The most likely result of processing
* is that flow-setup and packet-out OpenFlow messages will be sent out on
* 'rconn'. */
-void
-lswitch_process_packet(struct lswitch *sw, struct rconn *rconn,
- const struct ofpbuf *msg)
+static void
+lswitch_process_packet(struct lswitch *sw, const struct ofpbuf *msg)
{
enum ofptype type;
struct ofpbuf b;
return;
}
- if (sw->datapath_id == 0
+ if (sw->state == S_FEATURES_REPLY
&& type != OFPTYPE_ECHO_REQUEST
&& type != OFPTYPE_FEATURES_REPLY) {
- send_features_request(sw, rconn);
return;
}
switch (type) {
case OFPTYPE_ECHO_REQUEST:
- process_echo_request(sw, rconn, msg->data);
+ process_echo_request(sw, msg->data);
break;
case OFPTYPE_FEATURES_REPLY:
- process_switch_features(sw, msg->data);
+ if (sw->state == S_FEATURES_REPLY) {
+ if (!process_switch_features(sw, msg->data)) {
+ sw->state = S_SWITCHING;
+ } else {
+ rconn_disconnect(sw->rconn);
+ }
+ }
break;
case OFPTYPE_PACKET_IN:
- process_packet_in(sw, rconn, msg->data);
+ process_packet_in(sw, msg->data);
break;
case OFPTYPE_FLOW_REMOVED:
case OFPTYPE_PORT_STATUS:
case OFPTYPE_PACKET_OUT:
case OFPTYPE_FLOW_MOD:
+ case OFPTYPE_GROUP_MOD:
case OFPTYPE_PORT_MOD:
+ case OFPTYPE_TABLE_MOD:
case OFPTYPE_BARRIER_REQUEST:
case OFPTYPE_BARRIER_REPLY:
+ case OFPTYPE_QUEUE_GET_CONFIG_REQUEST:
+ case OFPTYPE_QUEUE_GET_CONFIG_REPLY:
case OFPTYPE_DESC_STATS_REQUEST:
case OFPTYPE_DESC_STATS_REPLY:
case OFPTYPE_FLOW_STATS_REQUEST:
case OFPTYPE_PORT_DESC_STATS_REPLY:
case OFPTYPE_ROLE_REQUEST:
case OFPTYPE_ROLE_REPLY:
+ case OFPTYPE_ROLE_STATUS:
case OFPTYPE_SET_FLOW_FORMAT:
case OFPTYPE_FLOW_MOD_TABLE_ID:
case OFPTYPE_SET_PACKET_IN_FORMAT:
case OFPTYPE_FLOW_AGE:
- case OFPTYPE_SET_ASYNC_CONFIG:
case OFPTYPE_SET_CONTROLLER_ID:
case OFPTYPE_FLOW_MONITOR_STATS_REQUEST:
case OFPTYPE_FLOW_MONITOR_STATS_REPLY:
case OFPTYPE_FLOW_MONITOR_CANCEL:
case OFPTYPE_FLOW_MONITOR_PAUSED:
case OFPTYPE_FLOW_MONITOR_RESUMED:
+ case OFPTYPE_GET_ASYNC_REQUEST:
+ case OFPTYPE_GET_ASYNC_REPLY:
+ case OFPTYPE_SET_ASYNC_CONFIG:
+ case OFPTYPE_METER_MOD:
+ case OFPTYPE_GROUP_STATS_REQUEST:
+ case OFPTYPE_GROUP_STATS_REPLY:
+ case OFPTYPE_GROUP_DESC_STATS_REQUEST:
+ case OFPTYPE_GROUP_DESC_STATS_REPLY:
+ case OFPTYPE_GROUP_FEATURES_STATS_REQUEST:
+ case OFPTYPE_GROUP_FEATURES_STATS_REPLY:
+ case OFPTYPE_METER_STATS_REQUEST:
+ case OFPTYPE_METER_STATS_REPLY:
+ case OFPTYPE_METER_CONFIG_STATS_REQUEST:
+ case OFPTYPE_METER_CONFIG_STATS_REPLY:
+ case OFPTYPE_METER_FEATURES_STATS_REQUEST:
+ case OFPTYPE_METER_FEATURES_STATS_REPLY:
+ case OFPTYPE_TABLE_FEATURES_STATS_REQUEST:
+ case OFPTYPE_TABLE_FEATURES_STATS_REPLY:
default:
if (VLOG_IS_DBG_ENABLED()) {
char *s = ofp_to_string(msg->data, msg->size, 2);
}
\f
static void
-send_features_request(struct lswitch *sw, struct rconn *rconn)
+send_features_request(struct lswitch *sw)
{
- time_t now = time_now();
- if (now >= sw->last_features_request + 1) {
- struct ofpbuf *b;
- struct ofp_switch_config *osc;
-
- /* Send OFPT_FEATURES_REQUEST. */
- b = ofpraw_alloc(OFPRAW_OFPT_FEATURES_REQUEST, OFP10_VERSION, 0);
- queue_tx(sw, rconn, b);
-
- /* Send OFPT_SET_CONFIG. */
- b = ofpraw_alloc(OFPRAW_OFPT_SET_CONFIG, OFP10_VERSION, sizeof *osc);
- osc = ofpbuf_put_uninit(b, sizeof *osc);
- osc->miss_send_len = htons(OFP_DEFAULT_MISS_SEND_LEN);
- queue_tx(sw, rconn, b);
-
- sw->last_features_request = now;
- }
+ struct ofpbuf *b;
+ struct ofp_switch_config *osc;
+ int ofp_version = rconn_get_version(sw->rconn);
+
+ ovs_assert(ofp_version > 0 && ofp_version < 0xff);
+
+ /* Send OFPT_FEATURES_REQUEST. */
+ b = ofpraw_alloc(OFPRAW_OFPT_FEATURES_REQUEST, ofp_version, 0);
+ queue_tx(sw, b);
+
+ /* Send OFPT_SET_CONFIG. */
+ b = ofpraw_alloc(OFPRAW_OFPT_SET_CONFIG, ofp_version, sizeof *osc);
+ osc = ofpbuf_put_zeros(b, sizeof *osc);
+ osc->miss_send_len = htons(OFP_DEFAULT_MISS_SEND_LEN);
+ queue_tx(sw, b);
}
static void
-queue_tx(struct lswitch *sw, struct rconn *rconn, struct ofpbuf *b)
+queue_tx(struct lswitch *sw, struct ofpbuf *b)
{
- int retval = rconn_send_with_limit(rconn, b, sw->queued, 10);
+ int retval = rconn_send_with_limit(sw->rconn, b, sw->queued, 10);
if (retval && retval != ENOTCONN) {
if (retval == EAGAIN) {
VLOG_INFO_RL(&rl, "%016llx: %s: tx queue overflow",
- sw->datapath_id, rconn_get_name(rconn));
+ sw->datapath_id, rconn_get_name(sw->rconn));
} else {
VLOG_WARN_RL(&rl, "%016llx: %s: send: %s",
- sw->datapath_id, rconn_get_name(rconn),
- strerror(retval));
+ sw->datapath_id, rconn_get_name(sw->rconn),
+ ovs_strerror(retval));
}
}
}
if (lp && hmap_node_is_null(&lp->hmap_node)) {
lp->port_no = port.port_no;
hmap_insert(&sw->queue_numbers, &lp->hmap_node,
- hash_int(lp->port_no, 0));
+ hash_ofp_port(lp->port_no));
}
}
return 0;
}
-static uint16_t
+static ofp_port_t
lswitch_choose_destination(struct lswitch *sw, const struct flow *flow)
{
- uint16_t out_port;
+ ofp_port_t out_port;
/* Learn the source MAC. */
- if (mac_learning_may_learn(sw->ml, flow->dl_src, 0)) {
- struct mac_entry *mac = mac_learning_insert(sw->ml, flow->dl_src, 0);
- if (mac_entry_is_new(mac) || mac->port.i != flow->in_port) {
- VLOG_DBG_RL(&rl, "%016llx: learned that "ETH_ADDR_FMT" is on "
- "port %"PRIu16, sw->datapath_id,
- ETH_ADDR_ARGS(flow->dl_src), flow->in_port);
-
- mac->port.i = flow->in_port;
- mac_learning_changed(sw->ml, mac);
+ if (sw->ml) {
+ ovs_rwlock_wrlock(&sw->ml->rwlock);
+ if (mac_learning_may_learn(sw->ml, flow->dl_src, 0)) {
+ struct mac_entry *mac = mac_learning_insert(sw->ml, flow->dl_src,
+ 0);
+ if (mac->port.ofp_port != flow->in_port.ofp_port) {
+ VLOG_DBG_RL(&rl, "%016llx: learned that "ETH_ADDR_FMT" is on "
+ "port %"PRIu16, sw->datapath_id,
+ ETH_ADDR_ARGS(flow->dl_src),
+ flow->in_port.ofp_port);
+
+ mac->port.ofp_port = flow->in_port.ofp_port;
+ mac_learning_changed(sw->ml);
+ }
}
+ ovs_rwlock_unlock(&sw->ml->rwlock);
}
/* Drop frames for reserved multicast addresses. */
if (sw->ml) {
struct mac_entry *mac;
- mac = mac_learning_lookup(sw->ml, flow->dl_dst, 0, NULL);
+ ovs_rwlock_rdlock(&sw->ml->rwlock);
+ mac = mac_learning_lookup(sw->ml, flow->dl_dst, 0);
if (mac) {
- out_port = mac->port.i;
- if (out_port == flow->in_port) {
+ out_port = mac->port.ofp_port;
+ if (out_port == flow->in_port.ofp_port) {
/* Don't send a packet back out its input port. */
+ ovs_rwlock_unlock(&sw->ml->rwlock);
return OFPP_NONE;
}
}
+ ovs_rwlock_unlock(&sw->ml->rwlock);
}
/* Check if we need to use "NORMAL" action. */
}
static uint32_t
-get_queue_id(const struct lswitch *sw, uint16_t in_port)
+get_queue_id(const struct lswitch *sw, ofp_port_t in_port)
{
const struct lswitch_port *port;
- HMAP_FOR_EACH_WITH_HASH (port, hmap_node, hash_int(in_port, 0),
+ HMAP_FOR_EACH_WITH_HASH (port, hmap_node, hash_ofp_port(in_port),
&sw->queue_numbers) {
if (port->port_no == in_port) {
return port->queue_id;
}
static void
-process_packet_in(struct lswitch *sw, struct rconn *rconn,
- const struct ofp_header *oh)
+process_packet_in(struct lswitch *sw, const struct ofp_header *oh)
{
struct ofputil_packet_in pi;
uint32_t queue_id;
- uint16_t out_port;
+ ofp_port_t out_port;
uint64_t ofpacts_stub[64 / 8];
struct ofpbuf ofpacts;
struct ofpbuf pkt;
struct flow flow;
+ union flow_in_port in_port_;
error = ofputil_decode_packet_in(&pi, oh);
if (error) {
/* Extract flow data from 'opi' into 'flow'. */
ofpbuf_use_const(&pkt, pi.packet, pi.packet_len);
- flow_extract(&pkt, 0, pi.fmd.tun_id, pi.fmd.in_port, &flow);
+ in_port_.ofp_port = pi.fmd.in_port;
+ flow_extract(&pkt, 0, 0, NULL, &in_port_, &flow);
+ flow.tunnel.tun_id = pi.fmd.tun_id;
/* Choose output port. */
out_port = lswitch_choose_destination(sw, &flow);
ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
if (out_port == OFPP_NONE) {
/* No actions. */
- } else if (queue_id == UINT32_MAX || out_port >= OFPP_MAX) {
+ } else if (queue_id == UINT32_MAX
+ || ofp_to_u16(out_port) >= ofp_to_u16(OFPP_MAX)) {
ofpact_put_OUTPUT(&ofpacts)->port = out_port;
} else {
struct ofpact_enqueue *enqueue = ofpact_put_ENQUEUE(&ofpacts);
/* The output port is known, or we always flood everything, so add a
* new flow. */
memset(&fm, 0, sizeof fm);
- cls_rule_init(&flow, &sw->wc, 0, &fm.cr);
+ match_init(&fm.match, &flow, &sw->wc);
+ ofputil_normalize_match_quiet(&fm.match);
+ fm.priority = 0;
fm.table_id = 0xff;
fm.command = OFPFC_ADD;
fm.idle_timeout = sw->max_idle;
fm.ofpacts_len = ofpacts.size;
buffer = ofputil_encode_flow_mod(&fm, sw->protocol);
- queue_tx(sw, rconn, buffer);
+ queue_tx(sw, buffer);
/* If the switch didn't buffer the packet, we need to send a copy. */
if (pi.buffer_id == UINT32_MAX && out_port != OFPP_NONE) {
- queue_tx(sw, rconn, ofputil_encode_packet_out(&po));
+ queue_tx(sw, ofputil_encode_packet_out(&po, sw->protocol));
}
} else {
/* We don't know that MAC, or we don't set up flows. Send along the
* packet without setting up a flow. */
if (pi.buffer_id != UINT32_MAX || out_port != OFPP_NONE) {
- queue_tx(sw, rconn, ofputil_encode_packet_out(&po));
+ queue_tx(sw, ofputil_encode_packet_out(&po, sw->protocol));
}
}
}
static void
-process_echo_request(struct lswitch *sw, struct rconn *rconn,
- const struct ofp_header *rq)
+process_echo_request(struct lswitch *sw, const struct ofp_header *rq)
{
- queue_tx(sw, rconn, make_echo_reply(rq));
+ queue_tx(sw, make_echo_reply(rq));
}