X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=ofproto%2Fconnmgr.c;h=887080acbbad9fc592af5c228d96c86f276116af;hb=f0fd1a1772665ea57662281d9cccadb0f0146196;hp=a76dc8e8084b790874e38dba40780e0b8bba9a77;hpb=3112cfa4084eb5d4bacbced1295c0e14c5f27743;p=sliver-openvswitch.git diff --git a/ofproto/connmgr.c b/ofproto/connmgr.c index a76dc8e80..887080acb 100644 --- a/ofproto/connmgr.c +++ b/ofproto/connmgr.c @@ -22,17 +22,18 @@ #include #include "coverage.h" -#include "dpif.h" #include "fail-open.h" #include "in-band.h" #include "odp-util.h" #include "ofp-util.h" #include "ofpbuf.h" +#include "ofproto-provider.h" #include "pinsched.h" #include "poll-loop.h" #include "pktbuf.h" #include "rconn.h" #include "shash.h" +#include "stream.h" #include "timeval.h" #include "vconn.h" #include "vlog.h" @@ -40,8 +41,6 @@ VLOG_DEFINE_THIS_MODULE(connmgr); static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); -COVERAGE_DEFINE(ofconn_stuck); - /* An OpenFlow connection. */ struct ofconn { struct connmgr *connmgr; /* Connection's manager. */ @@ -49,6 +48,13 @@ struct ofconn { struct rconn *rconn; /* OpenFlow connection. */ enum ofconn_type type; /* Type. */ enum nx_flow_format flow_format; /* Currently selected flow format. */ + enum nx_packet_in_format packet_in_format; /* OFPT_PACKET_IN format. */ + bool flow_mod_table_id; /* NXT_FLOW_MOD_TABLE_ID enabled? */ + + /* Asynchronous flow table operation support. */ + struct list opgroups; /* Contains pending "ofopgroups", if any. */ + struct ofpbuf *blocked; /* Postponed OpenFlow message, if any. */ + bool retry; /* True if 'blocked' is ready to try again. */ /* OFPT_PACKET_IN related data. */ struct rconn_packet_counter *packet_in_counter; /* # queued on 'rconn'. */ @@ -65,6 +71,8 @@ struct ofconn { /* type == OFCONN_PRIMARY only. */ enum nx_role role; /* Role. */ + bool invalid_ttl_to_controller; /* Send packets with invalid TTL + to the controller. */ struct hmap_node hmap_node; /* In struct connmgr's "controllers" map. */ enum ofproto_band band; /* In-band or out-of-band? */ }; @@ -77,9 +85,9 @@ static void ofconn_reconfigure(struct ofconn *, const struct ofproto_controller *); static void ofconn_run(struct ofconn *, - void (*handle_openflow)(struct ofconn *, + bool (*handle_openflow)(struct ofconn *, struct ofpbuf *ofp_msg)); -static void ofconn_wait(struct ofconn *); +static void ofconn_wait(struct ofconn *, bool handling_openflow); static const char *ofconn_get_target(const struct ofconn *); static char *ofconn_make_name(const struct connmgr *, const char *target); @@ -133,7 +141,6 @@ struct connmgr { /* In-band control. */ struct in_band *in_band; - long long int next_in_band_update; struct sockaddr_in *extra_in_band_remotes; size_t n_extra_remotes; int in_band_queue; @@ -168,7 +175,6 @@ connmgr_create(struct ofproto *ofproto, mgr->fail_mode = OFPROTO_FAIL_SECURE; mgr->in_band = NULL; - mgr->next_in_band_update = LLONG_MAX; mgr->extra_in_band_remotes = NULL; mgr->n_extra_remotes = 0; mgr->in_band_queue = -1; @@ -215,23 +221,33 @@ connmgr_destroy(struct connmgr *mgr) free(mgr); } -/* Does all of the periodic maintenance required by 'mgr'. Calls - * 'handle_openflow' for each message received on an OpenFlow connection, - * passing along the OpenFlow connection itself and the message that was sent. - * The 'handle_openflow' callback must not free the message. */ +/* Does all of the periodic maintenance required by 'mgr'. + * + * If 'handle_openflow' is nonnull, calls 'handle_openflow' for each message + * received on an OpenFlow connection, passing along the OpenFlow connection + * itself and the message that was sent. If 'handle_openflow' returns true, + * the message is considered to be fully processed. If 'handle_openflow' + * returns false, the message is considered not to have been processed at all; + * it will be stored and re-presented to 'handle_openflow' following the next + * call to connmgr_retry(). 'handle_openflow' must not modify or free the + * message. + * + * If 'handle_openflow' is NULL, no OpenFlow messages will be processed and + * other activities that could affect the flow table (in-band processing, + * fail-open processing) are suppressed too. */ void connmgr_run(struct connmgr *mgr, - void (*handle_openflow)(struct ofconn *, struct ofpbuf *ofp_msg)) + bool (*handle_openflow)(struct ofconn *, struct ofpbuf *ofp_msg)) { struct ofconn *ofconn, *next_ofconn; struct ofservice *ofservice; size_t i; - if (mgr->in_band) { - if (time_msec() >= mgr->next_in_band_update) { - update_in_band_remotes(mgr); + if (handle_openflow && mgr->in_band) { + if (!in_band_run(mgr->in_band)) { + in_band_destroy(mgr->in_band); + mgr->in_band = NULL; } - in_band_run(mgr->in_band); } LIST_FOR_EACH_SAFE (ofconn, next_ofconn, node, &mgr->all_conns) { @@ -240,7 +256,7 @@ connmgr_run(struct connmgr *mgr, /* Fail-open maintenance. Do this after processing the ofconns since * fail-open checks the status of the controller rconn. */ - if (mgr->fail_open) { + if (handle_openflow && mgr->fail_open) { fail_open_run(mgr->fail_open); } @@ -279,22 +295,25 @@ connmgr_run(struct connmgr *mgr, } } -/* Causes the poll loop to wake up when connmgr_run() needs to run. */ +/* Causes the poll loop to wake up when connmgr_run() needs to run. + * + * If 'handling_openflow' is true, arriving OpenFlow messages and other + * activities that affect the flow table will wake up the poll loop. If + * 'handling_openflow' is false, they will not. */ void -connmgr_wait(struct connmgr *mgr) +connmgr_wait(struct connmgr *mgr, bool handling_openflow) { struct ofservice *ofservice; struct ofconn *ofconn; size_t i; LIST_FOR_EACH (ofconn, node, &mgr->all_conns) { - ofconn_wait(ofconn); + ofconn_wait(ofconn, handling_openflow); } - if (mgr->in_band) { - poll_timer_wait_until(mgr->next_in_band_update); + if (handling_openflow && mgr->in_band) { in_band_wait(mgr->in_band); } - if (mgr->fail_open) { + if (handling_openflow && mgr->fail_open) { fail_open_wait(mgr->fail_open); } HMAP_FOR_EACH (ofservice, node, &mgr->services) { @@ -311,6 +330,19 @@ ofconn_get_ofproto(const struct ofconn *ofconn) { return ofconn->connmgr->ofproto; } + +/* If processing of OpenFlow messages was blocked on any 'mgr' ofconns by + * returning false to the 'handle_openflow' callback to connmgr_run(), this + * re-enables them. */ +void +connmgr_retry(struct connmgr *mgr) +{ + struct ofconn *ofconn; + + LIST_FOR_EACH (ofconn, node, &mgr->all_conns) { + ofconn->retry = true; + } +} /* OpenFlow configuration. */ @@ -342,45 +374,62 @@ connmgr_get_controller_info(struct connmgr *mgr, struct shash *info) { const struct ofconn *ofconn; - shash_init(info); - HMAP_FOR_EACH (ofconn, hmap_node, &mgr->controllers) { const struct rconn *rconn = ofconn->rconn; - time_t now = time_now(); - time_t last_connection = rconn_get_last_connection(rconn); - time_t last_disconnect = rconn_get_last_disconnect(rconn); - int last_error = rconn_get_last_error(rconn); - struct ofproto_controller_info *cinfo = xmalloc(sizeof *cinfo); + const char *target = rconn_get_target(rconn); - shash_add(info, rconn_get_target(rconn), cinfo); + if (!shash_find(info, target)) { + struct ofproto_controller_info *cinfo = xmalloc(sizeof *cinfo); + time_t now = time_now(); + time_t last_connection = rconn_get_last_connection(rconn); + time_t last_disconnect = rconn_get_last_disconnect(rconn); + int last_error = rconn_get_last_error(rconn); - cinfo->is_connected = rconn_is_connected(rconn); - cinfo->role = ofconn->role; + shash_add(info, target, cinfo); - cinfo->pairs.n = 0; + cinfo->is_connected = rconn_is_connected(rconn); + cinfo->role = ofconn->role; - if (last_error) { - cinfo->pairs.keys[cinfo->pairs.n] = "last_error"; - cinfo->pairs.values[cinfo->pairs.n++] = - xstrdup(ovs_retval_to_string(last_error)); - } + cinfo->pairs.n = 0; - cinfo->pairs.keys[cinfo->pairs.n] = "state"; - cinfo->pairs.values[cinfo->pairs.n++] = - xstrdup(rconn_get_state(rconn)); + if (last_error) { + cinfo->pairs.keys[cinfo->pairs.n] = "last_error"; + cinfo->pairs.values[cinfo->pairs.n++] + = xstrdup(ovs_retval_to_string(last_error)); + } - if (last_connection != TIME_MIN) { - cinfo->pairs.keys[cinfo->pairs.n] = "sec_since_connect"; + cinfo->pairs.keys[cinfo->pairs.n] = "state"; cinfo->pairs.values[cinfo->pairs.n++] - = xasprintf("%ld", (long int) (now - last_connection)); + = xstrdup(rconn_get_state(rconn)); + + if (last_connection != TIME_MIN) { + cinfo->pairs.keys[cinfo->pairs.n] = "sec_since_connect"; + cinfo->pairs.values[cinfo->pairs.n++] + = xasprintf("%ld", (long int) (now - last_connection)); + } + + if (last_disconnect != TIME_MIN) { + cinfo->pairs.keys[cinfo->pairs.n] = "sec_since_disconnect"; + cinfo->pairs.values[cinfo->pairs.n++] + = xasprintf("%ld", (long int) (now - last_disconnect)); + } } + } +} - if (last_disconnect != TIME_MIN) { - cinfo->pairs.keys[cinfo->pairs.n] = "sec_since_disconnect"; - cinfo->pairs.values[cinfo->pairs.n++] - = xasprintf("%ld", (long int) (now - last_disconnect)); +void +connmgr_free_controller_info(struct shash *info) +{ + struct shash_node *node; + + SHASH_FOR_EACH (node, info) { + struct ofproto_controller_info *cinfo = node->data; + while (cinfo->pairs.n) { + free((char *) cinfo->pairs.values[--cinfo->pairs.n]); } + free(cinfo); } + shash_destroy(info); } /* Changes 'mgr''s set of controllers to the 'n_controllers' controllers in @@ -390,6 +439,7 @@ connmgr_set_controllers(struct connmgr *mgr, const struct ofproto_controller *controllers, size_t n_controllers) { + bool had_controllers = connmgr_has_controllers(mgr); struct shash new_controllers; struct ofconn *ofconn, *next_ofconn; struct ofservice *ofservice, *next_ofservice; @@ -449,6 +499,9 @@ connmgr_set_controllers(struct connmgr *mgr, update_in_band_remotes(mgr); update_fail_open(mgr); + if (had_controllers != connmgr_has_controllers(mgr)) { + ofproto_flush_flows(mgr->ofproto); + } } /* Drops the connections between 'mgr' and all of its primary and secondary @@ -538,14 +591,16 @@ update_in_band_remotes(struct connmgr *mgr) /* Add all the remotes. */ HMAP_FOR_EACH (ofconn, hmap_node, &mgr->controllers) { struct sockaddr_in *sin = &addrs[n_addrs]; + const char *target = rconn_get_target(ofconn->rconn); if (ofconn->band == OFPROTO_OUT_OF_BAND) { continue; } - sin->sin_addr.s_addr = rconn_get_remote_ip(ofconn->rconn); - if (sin->sin_addr.s_addr) { - sin->sin_port = rconn_get_remote_port(ofconn->rconn); + if (stream_parse_target_with_default_ports(target, + OFP_TCP_PORT, + OFP_SSL_PORT, + sin)) { n_addrs++; } } @@ -558,14 +613,13 @@ update_in_band_remotes(struct connmgr *mgr) if (!mgr->in_band) { in_band_create(mgr->ofproto, mgr->local_port_name, &mgr->in_band); } - if (mgr->in_band) { - in_band_set_remotes(mgr->in_band, addrs, n_addrs); - } in_band_set_queue(mgr->in_band, mgr->in_band_queue); - mgr->next_in_band_update = time_msec() + 1000; } else { - in_band_destroy(mgr->in_band); - mgr->in_band = NULL; + /* in_band_run() needs a chance to delete any existing in-band flows. + * We will destroy mgr->in_band after it's done with that. */ + } + if (mgr->in_band) { + in_band_set_remotes(mgr->in_band, addrs, n_addrs); } /* Clean up. */ @@ -702,6 +756,18 @@ ofconn_set_role(struct ofconn *ofconn, enum nx_role role) ofconn->role = role; } +void +ofconn_set_invalid_ttl_to_controller(struct ofconn *ofconn, bool val) +{ + ofconn->invalid_ttl_to_controller = val; +} + +bool +ofconn_get_invalid_ttl_to_controller(struct ofconn *ofconn) +{ + return ofconn->invalid_ttl_to_controller; +} + /* Returns the currently configured flow format for 'ofconn', one of NXFF_*. * * The default, if no other format has been set, is NXFF_OPENFLOW10. */ @@ -718,6 +784,43 @@ ofconn_set_flow_format(struct ofconn *ofconn, enum nx_flow_format flow_format) ofconn->flow_format = flow_format; } +/* Returns the currently configured packet in format for 'ofconn', one of + * NXPIF_*. + * + * The default, if no other format has been set, is NXPIF_OPENFLOW10. */ +enum nx_packet_in_format +ofconn_get_packet_in_format(struct ofconn *ofconn) +{ + return ofconn->packet_in_format; +} + +/* Sets the packet in format for 'ofconn' to 'packet_in_format' (one of + * NXPIF_*). */ +void +ofconn_set_packet_in_format(struct ofconn *ofconn, + enum nx_packet_in_format packet_in_format) +{ + ofconn->packet_in_format = packet_in_format; +} + +/* Returns true if the NXT_FLOW_MOD_TABLE_ID extension is enabled, false + * otherwise. + * + * By default the extension is not enabled. */ +bool +ofconn_get_flow_mod_table_id(const struct ofconn *ofconn) +{ + return ofconn->flow_mod_table_id; +} + +/* Enables or disables (according to 'enable') the NXT_FLOW_MOD_TABLE_ID + * extension on 'ofconn'. */ +void +ofconn_set_flow_mod_table_id(struct ofconn *ofconn, bool enable) +{ + ofconn->flow_mod_table_id = enable; +} + /* Returns the default miss send length for 'ofconn'. */ int ofconn_get_miss_send_len(const struct ofconn *ofconn) @@ -742,13 +845,80 @@ ofconn_send_reply(const struct ofconn *ofconn, struct ofpbuf *msg) ofconn_send(ofconn, msg, ofconn->reply_counter); } +/* Sends each of the messages in list 'replies' on 'ofconn' in order, + * accounting them as replies. */ +void +ofconn_send_replies(const struct ofconn *ofconn, struct list *replies) +{ + struct ofpbuf *reply, *next; + + LIST_FOR_EACH_SAFE (reply, next, list_node, replies) { + list_remove(&reply->list_node); + ofconn_send_reply(ofconn, reply); + } +} + +/* Sends 'error' on 'ofconn', as a reply to 'request'. Only at most the + * first 64 bytes of 'request' are used. */ +void +ofconn_send_error(const struct ofconn *ofconn, + const struct ofp_header *request, enum ofperr error) +{ + struct ofpbuf *reply; + + reply = ofperr_encode_reply(error, request); + if (reply) { + static struct vlog_rate_limit err_rl = VLOG_RATE_LIMIT_INIT(10, 10); + + if (!VLOG_DROP_INFO(&err_rl)) { + const struct ofputil_msg_type *type; + const char *type_name; + size_t request_len; + + request_len = ntohs(request->length); + type_name = (!ofputil_decode_msg_type_partial(request, + MIN(64, request_len), + &type) + ? ofputil_msg_type_name(type) + : "invalid"); + + VLOG_INFO("%s: sending %s error reply to %s message", + rconn_get_name(ofconn->rconn), ofperr_to_string(error), + type_name); + } + ofconn_send_reply(ofconn, reply); + } +} + /* Same as pktbuf_retrieve(), using the pktbuf owned by 'ofconn'. */ -int +enum ofperr ofconn_pktbuf_retrieve(struct ofconn *ofconn, uint32_t id, struct ofpbuf **bufferp, uint16_t *in_port) { return pktbuf_retrieve(ofconn->pktbuf, id, bufferp, in_port); } + +/* Returns true if 'ofconn' has any pending opgroups. */ +bool +ofconn_has_pending_opgroups(const struct ofconn *ofconn) +{ + return !list_is_empty(&ofconn->opgroups); +} + +/* Adds 'ofconn_node' to 'ofconn''s list of pending opgroups. + * + * If 'ofconn' is destroyed or its connection drops, then 'ofconn' will remove + * 'ofconn_node' from the list and re-initialize it with list_init(). The + * client may, therefore, use list_is_empty(ofconn_node) to determine whether + * 'ofconn_node' is still associated with an active ofconn. + * + * The client may also remove ofconn_node from the list itself, with + * list_remove(). */ +void +ofconn_add_opgroup(struct ofconn *ofconn, struct list *ofconn_node) +{ + list_push_back(&ofconn->opgroups, ofconn_node); +} /* Private ofconn functions. */ @@ -767,17 +937,39 @@ ofconn_create(struct connmgr *mgr, struct rconn *rconn, enum ofconn_type type) ofconn->rconn = rconn; ofconn->type = type; ofconn->flow_format = NXFF_OPENFLOW10; + ofconn->packet_in_format = NXPIF_OPENFLOW10; + ofconn->flow_mod_table_id = false; + list_init(&ofconn->opgroups); ofconn->role = NX_ROLE_OTHER; ofconn->packet_in_counter = rconn_packet_counter_create (); ofconn->pktbuf = NULL; ofconn->miss_send_len = 0; ofconn->reply_counter = rconn_packet_counter_create (); + ofconn->invalid_ttl_to_controller = false; return ofconn; } +/* Disassociates 'ofconn' from all of the ofopgroups that it initiated that + * have not yet completed. (Those ofopgroups will still run to completion in + * the usual way, but any errors that they run into will not be reported on any + * OpenFlow channel.) + * + * Also discards any blocked operation on 'ofconn'. */ +static void +ofconn_flush(struct ofconn *ofconn) +{ + while (!list_is_empty(&ofconn->opgroups)) { + list_init(list_pop_front(&ofconn->opgroups)); + } + ofpbuf_delete(ofconn->blocked); + ofconn->blocked = NULL; +} + static void ofconn_destroy(struct ofconn *ofconn) { + ofconn_flush(ofconn); + if (ofconn->type == OFCONN_PRIMARY) { hmap_remove(&ofconn->connmgr->controllers, &ofconn->hmap_node); } @@ -807,12 +999,20 @@ ofconn_reconfigure(struct ofconn *ofconn, const struct ofproto_controller *c) ofconn_set_rate_limit(ofconn, c->rate_limit, c->burst_limit); } +/* Returns true if it makes sense for 'ofconn' to receive and process OpenFlow + * messages. */ +static bool +ofconn_may_recv(const struct ofconn *ofconn) +{ + int count = rconn_packet_counter_read (ofconn->reply_counter); + return (!ofconn->blocked || ofconn->retry) && count < OFCONN_REPLY_MAX; +} + static void ofconn_run(struct ofconn *ofconn, - void (*handle_openflow)(struct ofconn *, struct ofpbuf *ofp_msg)) + bool (*handle_openflow)(struct ofconn *, struct ofpbuf *ofp_msg)) { struct connmgr *mgr = ofconn->connmgr; - int iteration; size_t i; for (i = 0; i < N_SCHEDULERS; i++) { @@ -821,29 +1021,40 @@ ofconn_run(struct ofconn *ofconn, rconn_run(ofconn->rconn); - if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) { - /* Limit the number of iterations to prevent other tasks from - * starving. */ - for (iteration = 0; iteration < 50; iteration++) { - struct ofpbuf *of_msg = rconn_recv(ofconn->rconn); + if (handle_openflow) { + /* Limit the number of iterations to avoid starving other tasks. */ + for (i = 0; i < 50 && ofconn_may_recv(ofconn); i++) { + struct ofpbuf *of_msg; + + of_msg = (ofconn->blocked + ? ofconn->blocked + : rconn_recv(ofconn->rconn)); if (!of_msg) { break; } if (mgr->fail_open) { fail_open_maybe_recover(mgr->fail_open); } - handle_openflow(ofconn, of_msg); - ofpbuf_delete(of_msg); + + if (handle_openflow(ofconn, of_msg)) { + ofpbuf_delete(of_msg); + ofconn->blocked = NULL; + } else { + ofconn->blocked = of_msg; + ofconn->retry = false; + } } } if (!rconn_is_alive(ofconn->rconn)) { ofconn_destroy(ofconn); + } else if (!rconn_is_connected(ofconn->rconn)) { + ofconn_flush(ofconn); } } static void -ofconn_wait(struct ofconn *ofconn) +ofconn_wait(struct ofconn *ofconn, bool handling_openflow) { int i; @@ -851,20 +1062,16 @@ ofconn_wait(struct ofconn *ofconn) pinsched_wait(ofconn->schedulers[i]); } rconn_run_wait(ofconn->rconn); - if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) { + if (handling_openflow && ofconn_may_recv(ofconn)) { rconn_recv_wait(ofconn->rconn); - } else { - COVERAGE_INC(ofconn_stuck); } } /* Returns true if 'ofconn' should receive asynchronous messages. */ static bool -ofconn_receives_async_msgs(const struct ofconn *ofconn) +ofconn_receives_async_msgs__(const struct ofconn *ofconn) { - if (!rconn_is_connected(ofconn->rconn)) { - return false; - } else if (ofconn->type == OFCONN_PRIMARY) { + if (ofconn->type == OFCONN_PRIMARY) { /* Primary controllers always get asynchronous messages unless they * have configured themselves as "slaves". */ return ofconn->role != NX_ROLE_SLAVE; @@ -875,6 +1082,29 @@ ofconn_receives_async_msgs(const struct ofconn *ofconn) } } +static bool +ofconn_receives_async_msgs(const struct ofconn *ofconn) +{ + if (!rconn_is_connected(ofconn->rconn)) { + return false; + } else { + return ofconn_receives_async_msgs__(ofconn); + } +} + +static bool +ofconn_interested_in_packet(const struct ofconn *ofconn, + const struct ofputil_packet_in *pin) +{ + if (!rconn_is_connected(ofconn->rconn)) { + return false; + } else if (pin->reason == OFPR_INVALID_TTL) { + return ofconn->invalid_ttl_to_controller; + } else { + return ofconn_receives_async_msgs__(ofconn); + } +} + /* Returns a human-readable name for an OpenFlow connection between 'mgr' and * 'target', suitable for use in log messages for identifying the connection. * @@ -919,13 +1149,11 @@ ofconn_send(const struct ofconn *ofconn, struct ofpbuf *msg, /* Sending asynchronous messages. */ -static void schedule_packet_in(struct ofconn *, const struct dpif_upcall *, - const struct flow *, struct ofpbuf *rw_packet); +static void schedule_packet_in(struct ofconn *, struct ofputil_packet_in, + const struct flow *); /* Sends an OFPT_PORT_STATUS message with 'opp' and 'reason' to appropriate - * controllers managed by 'mgr'. - * - * 'opp' is in *HOST* byte order. */ + * controllers managed by 'mgr'. */ void connmgr_send_port_status(struct connmgr *mgr, const struct ofp_phy_port *opp, uint8_t reason) @@ -947,7 +1175,6 @@ connmgr_send_port_status(struct connmgr *mgr, const struct ofp_phy_port *opp, ops = make_openflow_xid(sizeof *ops, OFPT_PORT_STATUS, 0, &b); ops->reason = reason; ops->desc = *opp; - hton_ofp_phy_port(&ops->desc); ofconn_send(ofconn, b, NULL); } } @@ -977,33 +1204,20 @@ connmgr_send_flow_removed(struct connmgr *mgr, } } -/* Given 'upcall', of type DPIF_UC_ACTION or DPIF_UC_MISS, sends an - * OFPT_PACKET_IN message to each OpenFlow controller as necessary according to - * their individual configurations. - * - * 'rw_packet' may be NULL. Otherwise, 'rw_packet' must contain the same data - * as upcall->packet. (rw_packet == upcall->packet is also valid.) Ownership - * of 'rw_packet' is transferred to this function. */ +/* Given 'pin', sends an OFPT_PACKET_IN message to each OpenFlow controller as + * necessary according to their individual configurations. */ void -connmgr_send_packet_in(struct connmgr *mgr, const struct dpif_upcall *upcall, - const struct flow *flow, struct ofpbuf *rw_packet) +connmgr_send_packet_in(struct connmgr *mgr, + const struct ofputil_packet_in *pin, + const struct flow *flow) { - struct ofconn *ofconn, *prev; + struct ofconn *ofconn; - prev = NULL; LIST_FOR_EACH (ofconn, node, &mgr->all_conns) { - if (ofconn_receives_async_msgs(ofconn)) { - if (prev) { - schedule_packet_in(prev, upcall, flow, NULL); - } - prev = ofconn; + if (ofconn_interested_in_packet(ofconn, pin)) { + schedule_packet_in(ofconn, *pin, flow); } } - if (prev) { - schedule_packet_in(prev, upcall, flow, rw_packet); - } else { - ofpbuf_delete(rw_packet); - } } /* pinsched callback for sending 'ofp_packet_in' on 'ofconn'. */ @@ -1016,51 +1230,44 @@ do_send_packet_in(struct ofpbuf *ofp_packet_in, void *ofconn_) ofconn->packet_in_counter, 100); } -/* Takes 'upcall', whose packet has the flow specified by 'flow', composes an +/* Takes 'pin', whose packet has the flow specified by 'flow', composes an * OpenFlow packet-in message from it, and passes it to 'ofconn''s packet - * scheduler for sending. - * - * 'rw_packet' may be NULL. Otherwise, 'rw_packet' must contain the same data - * as upcall->packet. (rw_packet == upcall->packet is also valid.) Ownership - * of 'rw_packet' is transferred to this function. */ + * scheduler for sending. */ static void -schedule_packet_in(struct ofconn *ofconn, const struct dpif_upcall *upcall, - const struct flow *flow, struct ofpbuf *rw_packet) +schedule_packet_in(struct ofconn *ofconn, struct ofputil_packet_in pin, + const struct flow *flow) { struct connmgr *mgr = ofconn->connmgr; - struct ofputil_packet_in pin; - - /* Figure out the easy parts. */ - pin.packet = upcall->packet; - pin.in_port = odp_port_to_ofp_port(flow->in_port); - pin.reason = upcall->type == DPIF_UC_MISS ? OFPR_NO_MATCH : OFPR_ACTION; /* Get OpenFlow buffer_id. */ - if (upcall->type == DPIF_UC_ACTION) { + if (pin.reason == OFPR_ACTION) { pin.buffer_id = UINT32_MAX; } else if (mgr->fail_open && fail_open_is_active(mgr->fail_open)) { pin.buffer_id = pktbuf_get_null(); } else if (!ofconn->pktbuf) { pin.buffer_id = UINT32_MAX; } else { - pin.buffer_id = pktbuf_save(ofconn->pktbuf, upcall->packet, + pin.buffer_id = pktbuf_save(ofconn->pktbuf, pin.packet, pin.packet_len, flow->in_port); } /* Figure out how much of the packet to send. */ - pin.send_len = upcall->packet->size; + if (pin.reason == OFPR_NO_MATCH) { + pin.send_len = pin.packet_len; + } else { + /* Caller should have initialized 'send_len' to 'max_len' specified in + * struct ofp_action_output. */ + } if (pin.buffer_id != UINT32_MAX) { pin.send_len = MIN(pin.send_len, ofconn->miss_send_len); } - if (upcall->type == DPIF_UC_ACTION) { - pin.send_len = MIN(pin.send_len, upcall->userdata); - } /* Make OFPT_PACKET_IN and hand over to packet scheduler. It might * immediately call into do_send_packet_in() or it might buffer it for a * while (until a later call to pinsched_run()). */ - pinsched_send(ofconn->schedulers[upcall->type == DPIF_UC_MISS ? 0 : 1], - flow->in_port, ofputil_encode_packet_in(&pin, rw_packet), + pinsched_send(ofconn->schedulers[pin.reason == OFPR_NO_MATCH ? 0 : 1], + flow->in_port, + ofputil_encode_packet_in(&pin, ofconn->packet_in_format), do_send_packet_in, ofconn); } @@ -1079,8 +1286,13 @@ connmgr_get_fail_mode(const struct connmgr *mgr) void connmgr_set_fail_mode(struct connmgr *mgr, enum ofproto_fail_mode fail_mode) { - mgr->fail_mode = fail_mode; - update_fail_open(mgr); + if (mgr->fail_mode != fail_mode) { + mgr->fail_mode = fail_mode; + update_fail_open(mgr); + if (!connmgr_has_controllers(mgr)) { + ofproto_flush_flows(mgr->ofproto); + } + } } /* Fail-open implementation. */ @@ -1253,16 +1465,32 @@ connmgr_may_set_up_flow(struct connmgr *mgr, const struct flow *flow, /* Fail-open and in-band implementation. */ /* Called by 'ofproto' after all flows have been flushed, to allow fail-open - * and in-band control to re-create their flows. */ + * and standalone mode to re-create their flows. + * + * In-band control has more sophisticated code that manages flows itself. */ void connmgr_flushed(struct connmgr *mgr) { - if (mgr->in_band) { - in_band_flushed(mgr->in_band); - } if (mgr->fail_open) { fail_open_flushed(mgr->fail_open); } + + /* If there are no controllers and we're in standalone mode, set up a flow + * that matches every packet and directs them to OFPP_NORMAL (which goes to + * us). Otherwise, the switch is in secure mode and we won't pass any + * traffic until a controller has been defined and it tells us to do so. */ + if (!connmgr_has_controllers(mgr) + && mgr->fail_mode == OFPROTO_FAIL_STANDALONE) { + union ofp_action action; + struct cls_rule rule; + + memset(&action, 0, sizeof action); + action.type = htons(OFPAT_OUTPUT); + action.output.len = htons(sizeof action); + action.output.port = htons(OFPP_NORMAL); + cls_rule_init_catchall(&rule, 0); + ofproto_add_flow(mgr->ofproto, &rule, &action, 1); + } } /* Creates a new ofservice for 'target' in 'mgr'. Returns 0 if successful,