2 * Copyright (c) 2008, 2009, 2010, 2011, 2012 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "learning-switch.h"
22 #include <netinet/in.h>
26 #include "byte-order.h"
27 #include "classifier.h"
30 #include "mac-learning.h"
32 #include "ofp-actions.h"
33 #include "ofp-errors.h"
35 #include "ofp-parse.h"
36 #include "ofp-print.h"
38 #include "openflow/openflow.h"
39 #include "poll-loop.h"
47 VLOG_DEFINE_THIS_MODULE(learning_switch);
50 struct hmap_node hmap_node; /* Hash node for port number. */
51 uint16_t port_no; /* OpenFlow port number, in host byte order. */
52 uint32_t queue_id; /* OpenFlow queue number. */
56 S_CONNECTING, /* Waiting for connection to complete. */
57 S_FEATURES_REPLY, /* Waiting for features reply. */
58 S_SWITCHING, /* Switching flows. */
63 enum lswitch_state state;
65 /* If nonnegative, the switch sets up flows that expire after the given
66 * number of seconds (or never expire, if the value is OFP_FLOW_PERMANENT).
67 * Otherwise, the switch processes every packet. */
70 enum ofputil_protocol protocol;
71 unsigned long long int datapath_id;
72 struct mac_learning *ml; /* NULL to act as hub instead of switch. */
73 struct flow_wildcards wc; /* Wildcards to apply to flows. */
74 bool action_normal; /* Use OFPP_NORMAL? */
76 /* Queue distribution. */
77 uint32_t default_queue; /* Default OpenFlow queue, or UINT32_MAX. */
78 struct hmap queue_numbers; /* Map from port number to lswitch_port. */
79 struct shash queue_names; /* Map from port name to lswitch_port. */
81 /* Number of outgoing queued packets on the rconn. */
82 struct rconn_packet_counter *queued;
84 /* If true, do not reply to any messages from the switch (for debugging
88 /* Optional "flow mod" requests to send to the switch at connection time,
89 * to set up the flow table. */
90 const struct ofputil_flow_mod *default_flows;
91 size_t n_default_flows;
94 /* The log messages here could actually be useful in debugging, so keep the
95 * rate limit relatively high. */
96 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
98 static void queue_tx(struct lswitch *, struct ofpbuf *);
99 static void send_features_request(struct lswitch *);
101 static void lswitch_process_packet(struct lswitch *, const struct ofpbuf *);
102 static enum ofperr process_switch_features(struct lswitch *,
103 struct ofp_header *);
104 static void process_packet_in(struct lswitch *, const struct ofp_header *);
105 static void process_echo_request(struct lswitch *, const struct ofp_header *);
107 /* Creates and returns a new learning switch whose configuration is given by
110 * 'rconn' is used to send out an OpenFlow features request. */
112 lswitch_create(struct rconn *rconn, const struct lswitch_config *cfg)
117 sw = xzalloc(sizeof *sw);
119 sw->state = S_CONNECTING;
120 sw->max_idle = cfg->max_idle;
122 sw->ml = (cfg->mode == LSW_LEARN
123 ? mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME)
125 sw->action_normal = cfg->mode == LSW_NORMAL;
127 switch (cfg->wildcards) {
133 /* Try to wildcard as many fields as possible, but we cannot
134 * wildcard all fields. We need in_port to detect moves. We need
135 * Ethernet source and dest and VLAN VID to do L2 learning. */
136 ofpfw = (OFPFW10_DL_TYPE | OFPFW10_DL_VLAN_PCP
137 | OFPFW10_NW_SRC_ALL | OFPFW10_NW_DST_ALL
138 | OFPFW10_NW_TOS | OFPFW10_NW_PROTO
139 | OFPFW10_TP_SRC | OFPFW10_TP_DST);
143 ofpfw = cfg->wildcards;
146 ofputil_wildcard_from_ofpfw10(ofpfw, &sw->wc);
148 sw->default_queue = cfg->default_queue;
149 hmap_init(&sw->queue_numbers);
150 shash_init(&sw->queue_names);
151 if (cfg->port_queues) {
152 struct simap_node *node;
154 SIMAP_FOR_EACH (node, cfg->port_queues) {
155 struct lswitch_port *port = xmalloc(sizeof *port);
156 hmap_node_nullify(&port->hmap_node);
157 port->queue_id = node->data;
158 shash_add(&sw->queue_names, node->name, port);
162 sw->default_flows = cfg->default_flows;
163 sw->n_default_flows = cfg->n_default_flows;
165 sw->queued = rconn_packet_counter_create();
171 lswitch_handshake(struct lswitch *sw)
173 enum ofputil_protocol protocol;
175 send_features_request(sw);
177 protocol = ofputil_protocol_from_ofp_version(rconn_get_version(sw->rconn));
178 if (sw->default_flows) {
179 enum ofputil_protocol usable_protocols;
180 struct ofpbuf *msg = NULL;
184 /* If the initial protocol isn't good enough for default_flows, then
185 * pick one that will work and encode messages to set up that
188 * This could be improved by actually negotiating a mutually acceptable
189 * flow format with the switch, but that would require an asynchronous
190 * state machine. This version ought to work fine in practice. */
191 usable_protocols = ofputil_flow_mod_usable_protocols(
192 sw->default_flows, sw->n_default_flows);
193 if (!(protocol & usable_protocols)) {
194 enum ofputil_protocol want = rightmost_1bit(usable_protocols);
196 msg = ofputil_encode_set_protocol(protocol, want, &protocol);
200 error = rconn_send(sw->rconn, msg, NULL);
203 if (protocol & usable_protocols) {
204 for (i = 0; !error && i < sw->n_default_flows; i++) {
205 msg = ofputil_encode_flow_mod(&sw->default_flows[i], protocol);
206 error = rconn_send(sw->rconn, msg, NULL);
210 VLOG_INFO_RL(&rl, "%s: failed to queue default flows (%s)",
211 rconn_get_name(sw->rconn), strerror(error));
214 VLOG_INFO_RL(&rl, "%s: failed to set usable protocol",
215 rconn_get_name(sw->rconn));
218 sw->protocol = protocol;
222 lswitch_is_alive(const struct lswitch *sw)
224 return rconn_is_alive(sw->rconn);
229 lswitch_destroy(struct lswitch *sw)
232 struct lswitch_port *node, *next;
234 rconn_destroy(sw->rconn);
235 HMAP_FOR_EACH_SAFE (node, next, hmap_node, &sw->queue_numbers) {
236 hmap_remove(&sw->queue_numbers, &node->hmap_node);
239 shash_destroy(&sw->queue_names);
240 mac_learning_destroy(sw->ml);
241 rconn_packet_counter_destroy(sw->queued);
246 /* Takes care of necessary 'sw' activity, except for receiving packets (which
247 * the caller must do). */
249 lswitch_run(struct lswitch *sw)
254 mac_learning_run(sw->ml, NULL);
257 rconn_run(sw->rconn);
259 if (sw->state == S_CONNECTING) {
260 if (rconn_get_version(sw->rconn) != -1) {
261 lswitch_handshake(sw);
262 sw->state = S_FEATURES_REPLY;
267 for (i = 0; i < 50; i++) {
270 msg = rconn_recv(sw->rconn);
276 lswitch_process_packet(sw, msg);
283 lswitch_wait(struct lswitch *sw)
286 mac_learning_wait(sw->ml);
288 rconn_run_wait(sw->rconn);
289 rconn_recv_wait(sw->rconn);
292 /* Processes 'msg', which should be an OpenFlow received on 'rconn', according
293 * to the learning switch state in 'sw'. The most likely result of processing
294 * is that flow-setup and packet-out OpenFlow messages will be sent out on
297 lswitch_process_packet(struct lswitch *sw, const struct ofpbuf *msg)
303 if (ofptype_pull(&type, &b)) {
307 if (sw->state == S_FEATURES_REPLY
308 && type != OFPTYPE_ECHO_REQUEST
309 && type != OFPTYPE_FEATURES_REPLY) {
314 case OFPTYPE_ECHO_REQUEST:
315 process_echo_request(sw, msg->data);
318 case OFPTYPE_FEATURES_REPLY:
319 if (sw->state == S_FEATURES_REPLY) {
320 if (!process_switch_features(sw, msg->data)) {
321 sw->state = S_SWITCHING;
323 rconn_disconnect(sw->rconn);
328 case OFPTYPE_PACKET_IN:
329 process_packet_in(sw, msg->data);
332 case OFPTYPE_FLOW_REMOVED:
338 case OFPTYPE_ECHO_REPLY:
339 case OFPTYPE_FEATURES_REQUEST:
340 case OFPTYPE_GET_CONFIG_REQUEST:
341 case OFPTYPE_GET_CONFIG_REPLY:
342 case OFPTYPE_SET_CONFIG:
343 case OFPTYPE_PORT_STATUS:
344 case OFPTYPE_PACKET_OUT:
345 case OFPTYPE_FLOW_MOD:
346 case OFPTYPE_PORT_MOD:
347 case OFPTYPE_BARRIER_REQUEST:
348 case OFPTYPE_BARRIER_REPLY:
349 case OFPTYPE_QUEUE_GET_CONFIG_REQUEST:
350 case OFPTYPE_QUEUE_GET_CONFIG_REPLY:
351 case OFPTYPE_DESC_STATS_REQUEST:
352 case OFPTYPE_DESC_STATS_REPLY:
353 case OFPTYPE_FLOW_STATS_REQUEST:
354 case OFPTYPE_FLOW_STATS_REPLY:
355 case OFPTYPE_AGGREGATE_STATS_REQUEST:
356 case OFPTYPE_AGGREGATE_STATS_REPLY:
357 case OFPTYPE_TABLE_STATS_REQUEST:
358 case OFPTYPE_TABLE_STATS_REPLY:
359 case OFPTYPE_PORT_STATS_REQUEST:
360 case OFPTYPE_PORT_STATS_REPLY:
361 case OFPTYPE_QUEUE_STATS_REQUEST:
362 case OFPTYPE_QUEUE_STATS_REPLY:
363 case OFPTYPE_PORT_DESC_STATS_REQUEST:
364 case OFPTYPE_PORT_DESC_STATS_REPLY:
365 case OFPTYPE_ROLE_REQUEST:
366 case OFPTYPE_ROLE_REPLY:
367 case OFPTYPE_SET_FLOW_FORMAT:
368 case OFPTYPE_FLOW_MOD_TABLE_ID:
369 case OFPTYPE_SET_PACKET_IN_FORMAT:
370 case OFPTYPE_FLOW_AGE:
371 case OFPTYPE_SET_CONTROLLER_ID:
372 case OFPTYPE_FLOW_MONITOR_STATS_REQUEST:
373 case OFPTYPE_FLOW_MONITOR_STATS_REPLY:
374 case OFPTYPE_FLOW_MONITOR_CANCEL:
375 case OFPTYPE_FLOW_MONITOR_PAUSED:
376 case OFPTYPE_FLOW_MONITOR_RESUMED:
377 case OFPTYPE_GET_ASYNC_REQUEST:
378 case OFPTYPE_GET_ASYNC_REPLY:
379 case OFPTYPE_SET_ASYNC_CONFIG:
380 case OFPTYPE_METER_MOD:
381 case OFPTYPE_GROUP_REQUEST:
382 case OFPTYPE_GROUP_REPLY:
383 case OFPTYPE_GROUP_DESC_REQUEST:
384 case OFPTYPE_GROUP_DESC_REPLY:
385 case OFPTYPE_GROUP_FEATURES_REQUEST:
386 case OFPTYPE_GROUP_FEATURES_REPLY:
387 case OFPTYPE_METER_REQUEST:
388 case OFPTYPE_METER_REPLY:
389 case OFPTYPE_METER_CONFIG_REQUEST:
390 case OFPTYPE_METER_CONFIG_REPLY:
391 case OFPTYPE_METER_FEATURES_REQUEST:
392 case OFPTYPE_METER_FEATURES_REPLY:
393 case OFPTYPE_TABLE_FEATURES_REQUEST:
394 case OFPTYPE_TABLE_FEATURES_REPLY:
396 if (VLOG_IS_DBG_ENABLED()) {
397 char *s = ofp_to_string(msg->data, msg->size, 2);
398 VLOG_DBG_RL(&rl, "%016llx: OpenFlow packet ignored: %s",
406 send_features_request(struct lswitch *sw)
409 struct ofp_switch_config *osc;
410 int ofp_version = rconn_get_version(sw->rconn);
412 ovs_assert(ofp_version > 0 && ofp_version < 0xff);
414 /* Send OFPT_FEATURES_REQUEST. */
415 b = ofpraw_alloc(OFPRAW_OFPT_FEATURES_REQUEST, ofp_version, 0);
418 /* Send OFPT_SET_CONFIG. */
419 b = ofpraw_alloc(OFPRAW_OFPT_SET_CONFIG, ofp_version, sizeof *osc);
420 osc = ofpbuf_put_zeros(b, sizeof *osc);
421 osc->miss_send_len = htons(OFP_DEFAULT_MISS_SEND_LEN);
426 queue_tx(struct lswitch *sw, struct ofpbuf *b)
428 int retval = rconn_send_with_limit(sw->rconn, b, sw->queued, 10);
429 if (retval && retval != ENOTCONN) {
430 if (retval == EAGAIN) {
431 VLOG_INFO_RL(&rl, "%016llx: %s: tx queue overflow",
432 sw->datapath_id, rconn_get_name(sw->rconn));
434 VLOG_WARN_RL(&rl, "%016llx: %s: send: %s",
435 sw->datapath_id, rconn_get_name(sw->rconn),
442 process_switch_features(struct lswitch *sw, struct ofp_header *oh)
444 struct ofputil_switch_features features;
445 struct ofputil_phy_port port;
449 error = ofputil_decode_switch_features(oh, &features, &b);
451 VLOG_ERR("received invalid switch feature reply (%s)",
452 ofperr_to_string(error));
456 sw->datapath_id = features.datapath_id;
458 while (!ofputil_pull_phy_port(oh->version, &b, &port)) {
459 struct lswitch_port *lp = shash_find_data(&sw->queue_names, port.name);
460 if (lp && hmap_node_is_null(&lp->hmap_node)) {
461 lp->port_no = port.port_no;
462 hmap_insert(&sw->queue_numbers, &lp->hmap_node,
463 hash_int(lp->port_no, 0));
470 lswitch_choose_destination(struct lswitch *sw, const struct flow *flow)
474 /* Learn the source MAC. */
475 if (mac_learning_may_learn(sw->ml, flow->dl_src, 0)) {
476 struct mac_entry *mac = mac_learning_insert(sw->ml, flow->dl_src, 0);
477 if (mac_entry_is_new(mac) || mac->port.i != flow->in_port) {
478 VLOG_DBG_RL(&rl, "%016llx: learned that "ETH_ADDR_FMT" is on "
479 "port %"PRIu16, sw->datapath_id,
480 ETH_ADDR_ARGS(flow->dl_src), flow->in_port);
482 mac->port.i = flow->in_port;
483 mac_learning_changed(sw->ml, mac);
487 /* Drop frames for reserved multicast addresses. */
488 if (eth_addr_is_reserved(flow->dl_dst)) {
492 out_port = OFPP_FLOOD;
494 struct mac_entry *mac;
496 mac = mac_learning_lookup(sw->ml, flow->dl_dst, 0, NULL);
498 out_port = mac->port.i;
499 if (out_port == flow->in_port) {
500 /* Don't send a packet back out its input port. */
506 /* Check if we need to use "NORMAL" action. */
507 if (sw->action_normal && out_port != OFPP_FLOOD) {
515 get_queue_id(const struct lswitch *sw, uint16_t in_port)
517 const struct lswitch_port *port;
519 HMAP_FOR_EACH_WITH_HASH (port, hmap_node, hash_int(in_port, 0),
520 &sw->queue_numbers) {
521 if (port->port_no == in_port) {
522 return port->queue_id;
526 return sw->default_queue;
530 process_packet_in(struct lswitch *sw, const struct ofp_header *oh)
532 struct ofputil_packet_in pi;
536 uint64_t ofpacts_stub[64 / 8];
537 struct ofpbuf ofpacts;
539 struct ofputil_packet_out po;
545 error = ofputil_decode_packet_in(&pi, oh);
547 VLOG_WARN_RL(&rl, "failed to decode packet-in: %s",
548 ofperr_to_string(error));
552 /* Ignore packets sent via output to OFPP_CONTROLLER. This library never
553 * uses such an action. You never know what experiments might be going on,
554 * though, and it seems best not to interfere with them. */
555 if (pi.reason != OFPR_NO_MATCH) {
559 /* Extract flow data from 'opi' into 'flow'. */
560 ofpbuf_use_const(&pkt, pi.packet, pi.packet_len);
561 flow_extract(&pkt, 0, 0, NULL, pi.fmd.in_port, &flow);
562 flow.tunnel.tun_id = pi.fmd.tun_id;
564 /* Choose output port. */
565 out_port = lswitch_choose_destination(sw, &flow);
568 queue_id = get_queue_id(sw, pi.fmd.in_port);
569 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
570 if (out_port == OFPP_NONE) {
572 } else if (queue_id == UINT32_MAX || out_port >= OFPP_MAX) {
573 ofpact_put_OUTPUT(&ofpacts)->port = out_port;
575 struct ofpact_enqueue *enqueue = ofpact_put_ENQUEUE(&ofpacts);
576 enqueue->port = out_port;
577 enqueue->queue = queue_id;
579 ofpact_pad(&ofpacts);
581 /* Prepare packet_out in case we need one. */
582 po.buffer_id = pi.buffer_id;
583 if (po.buffer_id == UINT32_MAX) {
584 po.packet = pkt.data;
585 po.packet_len = pkt.size;
590 po.in_port = pi.fmd.in_port;
591 po.ofpacts = ofpacts.data;
592 po.ofpacts_len = ofpacts.size;
594 /* Send the packet, and possibly the whole flow, to the output port. */
595 if (sw->max_idle >= 0 && (!sw->ml || out_port != OFPP_FLOOD)) {
596 struct ofputil_flow_mod fm;
597 struct ofpbuf *buffer;
599 /* The output port is known, or we always flood everything, so add a
601 memset(&fm, 0, sizeof fm);
602 match_init(&fm.match, &flow, &sw->wc);
603 ofputil_normalize_match_quiet(&fm.match);
606 fm.command = OFPFC_ADD;
607 fm.idle_timeout = sw->max_idle;
608 fm.buffer_id = pi.buffer_id;
609 fm.out_port = OFPP_NONE;
610 fm.ofpacts = ofpacts.data;
611 fm.ofpacts_len = ofpacts.size;
612 buffer = ofputil_encode_flow_mod(&fm, sw->protocol);
614 queue_tx(sw, buffer);
616 /* If the switch didn't buffer the packet, we need to send a copy. */
617 if (pi.buffer_id == UINT32_MAX && out_port != OFPP_NONE) {
618 queue_tx(sw, ofputil_encode_packet_out(&po, sw->protocol));
621 /* We don't know that MAC, or we don't set up flows. Send along the
622 * packet without setting up a flow. */
623 if (pi.buffer_id != UINT32_MAX || out_port != OFPP_NONE) {
624 queue_tx(sw, ofputil_encode_packet_out(&po, sw->protocol));
630 process_echo_request(struct lswitch *sw, const struct ofp_header *rq)
632 queue_tx(sw, make_echo_reply(rq));