+put_features(struct ds *ds, const char *name, uint32_t features)
+{
+ if (features & (OFPPF_10MB_HD | OFPPF_10MB_FD
+ | OFPPF_100MB_HD | OFPPF_100MB_FD
+ | OFPPF_1GB_HD | OFPPF_1GB_FD | OFPPF_10GB_FD)) {
+ ds_put_cstr(ds, name);
+ put_duplexes(ds, "10M", features, OFPPF_10MB_HD, OFPPF_10MB_FD);
+ put_duplexes(ds, "100M", features,
+ OFPPF_100MB_HD, OFPPF_100MB_FD);
+ put_duplexes(ds, "1G", features, OFPPF_100MB_HD, OFPPF_100MB_FD);
+ if (features & OFPPF_10GB_FD) {
+ ds_put_cstr(ds, " 10G");
+ }
+ if (features & OFPPF_AUTONEG) {
+ ds_put_cstr(ds, " AUTO_NEG");
+ }
+ if (features & OFPPF_PAUSE) {
+ ds_put_cstr(ds, " PAUSE");
+ }
+ if (features & OFPPF_PAUSE_ASYM) {
+ ds_put_cstr(ds, " PAUSE_ASYM");
+ }
+ }
+}
+
+static void
+log_port_status(uint16_t port_no,
+ const struct ofp_phy_port *old,
+ const struct ofp_phy_port *new,
+ void *aux)
+{
+ if (VLOG_IS_DBG_ENABLED()) {
+ bool was_enabled = old->port_no != htons(OFPP_NONE);
+ bool now_enabled = new->port_no != htons(OFPP_NONE);
+ uint32_t curr = ntohl(new->curr);
+ uint32_t supported = ntohl(new->supported);
+ struct ds ds;
+
+ if (((old->config != new->config) || (old->state != new->state))
+ && opp_differs(old, new) == 1) {
+ /* Don't care if only flags changed. */
+ return;
+ }
+
+ ds_init(&ds);
+ ds_put_format(&ds, "\"%s\", "ETH_ADDR_FMT, new->name,
+ ETH_ADDR_ARGS(new->hw_addr));
+ if (curr) {
+ put_features(&ds, ", current", curr);
+ }
+ if (supported) {
+ put_features(&ds, ", supports", supported);
+ }
+ if (was_enabled != now_enabled) {
+ if (now_enabled) {
+ VLOG_DBG("Port %d added: %s", port_no, ds_cstr(&ds));
+ } else {
+ VLOG_DBG("Port %d deleted", port_no);
+ }
+ } else {
+ VLOG_DBG("Port %d changed: %s", port_no, ds_cstr(&ds));
+ }
+ ds_destroy(&ds);
+ }
+}
+
+static void
+port_watcher_register_callback(struct port_watcher *pw,
+ port_changed_cb_func *port_changed,
+ void *aux)
+{
+ assert(pw->n_cbs < ARRAY_SIZE(pw->cbs));
+ pw->cbs[pw->n_cbs].port_changed = port_changed;
+ pw->cbs[pw->n_cbs].aux = aux;
+ pw->n_cbs++;
+}
+
+static void
+port_watcher_register_local_port_callback(struct port_watcher *pw,
+ local_port_changed_cb_func *cb,
+ void *aux)
+{
+ assert(pw->n_local_cbs < ARRAY_SIZE(pw->local_cbs));
+ pw->local_cbs[pw->n_local_cbs].local_port_changed = cb;
+ pw->local_cbs[pw->n_local_cbs].aux = aux;
+ pw->n_local_cbs++;
+}
+
+static uint32_t
+port_watcher_get_config(const struct port_watcher *pw, int port_no)
+{
+ int idx = port_no_to_pw_idx(port_no);
+ return idx >= 0 ? ntohl(pw->ports[idx].config) : 0;
+}
+
+static void
+port_watcher_set_flags(struct port_watcher *pw, int port_no,
+ uint32_t config, uint32_t c_mask,
+ uint32_t state, uint32_t s_mask)
+{
+ struct ofp_phy_port old;
+ struct ofp_phy_port *p;
+ struct ofp_port_mod *opm;
+ struct ofp_port_status *ops;
+ struct ofpbuf *b;
+ int idx;
+
+ idx = port_no_to_pw_idx(port_no);
+ if (idx < 0) {
+ return;
+ }
+
+ p = &pw->ports[idx];
+ if (!((ntohl(p->state) ^ state) & s_mask)
+ && (!((ntohl(p->config) ^ config) & c_mask))) {
+ return;
+ }
+ old = *p;
+
+ /* Update our idea of the flags. */
+ p->config = htonl((ntohl(p->config) & ~c_mask) | (config & c_mask));
+ p->state = htonl((ntohl(p->state) & ~s_mask) | (state & s_mask));
+ call_port_changed_callbacks(pw, port_no, &old, p);
+
+ /* Change the flags in the datapath. */
+ opm = make_openflow(sizeof *opm, OFPT_PORT_MOD, &b);
+ opm->port_no = p->port_no;
+ memcpy(opm->hw_addr, p->hw_addr, OFP_ETH_ALEN);
+ opm->config = p->config;
+ opm->mask = htonl(c_mask);
+ opm->advertise = htonl(0);
+ rconn_send(pw->local_rconn, b, NULL);
+
+ /* Notify the controller that the flags changed. */
+ ops = make_openflow(sizeof *ops, OFPT_PORT_STATUS, &b);
+ ops->reason = OFPPR_MODIFY;
+ ops->desc = *p;
+ rconn_send(pw->remote_rconn, b, NULL);
+}
+
+static bool
+port_watcher_is_ready(const struct port_watcher *pw)
+{
+ return pw->got_feature_reply;
+}
+
+static struct hook
+port_watcher_create(struct rconn *local_rconn, struct rconn *remote_rconn,
+ struct port_watcher **pwp)
+{
+ struct port_watcher *pw;
+ int i;
+
+ pw = *pwp = xcalloc(1, sizeof *pw);
+ pw->local_rconn = local_rconn;
+ pw->remote_rconn = remote_rconn;
+ pw->last_feature_request = TIME_MIN;
+ for (i = 0; i < OFPP_MAX; i++) {
+ pw->ports[i].port_no = htons(OFPP_NONE);
+ }
+ pw->local_port_name[0] = '\0';
+ port_watcher_register_callback(pw, log_port_status, NULL);
+ return make_hook(port_watcher_local_packet_cb,
+ port_watcher_remote_packet_cb,
+ port_watcher_periodic_cb,
+ port_watcher_wait_cb, pw);
+}
+\f
+#ifdef SUPPORT_SNAT
+struct snat_port_conf {
+ struct list node;
+ struct nx_snat_config config;
+};
+
+struct snat_data {
+ struct port_watcher *pw;
+ struct list port_list;
+};
+
+
+/* Source-NAT configuration monitor. */
+#define SNAT_CMD_LEN 1024
+
+/* Commands to configure iptables. There is no programmatic interface
+ * to iptables from the kernel, so we're stuck making command-line calls
+ * in user-space. */
+#define SNAT_FLUSH_ALL_CMD "/sbin/iptables -t nat -F"
+#define SNAT_FLUSH_CHAIN_CMD "/sbin/iptables -t nat -F of-snat-%s"
+
+#define SNAT_ADD_CHAIN_CMD "/sbin/iptables -t nat -N of-snat-%s"
+#define SNAT_CONF_CHAIN_CMD "/sbin/iptables -t nat -A POSTROUTING -o %s -j of-snat-%s"
+
+#define SNAT_ADD_IP_CMD "/sbin/iptables -t nat -A of-snat-%s -j SNAT --to %s-%s"
+#define SNAT_ADD_TCP_CMD "/sbin/iptables -t nat -A of-snat-%s -j SNAT -p TCP --to %s-%s:%d-%d"
+#define SNAT_ADD_UDP_CMD "/sbin/iptables -t nat -A of-snat-%s -j SNAT -p UDP --to %s-%s:%d-%d"
+
+#define SNAT_UNSET_CHAIN_CMD "/sbin/iptables -t nat -D POSTROUTING -o %s -j of-snat-%s"
+#define SNAT_DEL_CHAIN_CMD "/sbin/iptables -t nat -X of-snat-%s"
+
+static void
+snat_add_rules(const struct nx_snat_config *sc, const uint8_t *dev_name)
+{
+ char command[SNAT_CMD_LEN];
+ char ip_str_start[16];
+ char ip_str_end[16];
+
+
+ snprintf(ip_str_start, sizeof ip_str_start, IP_FMT,
+ IP_ARGS(&sc->ip_addr_start));
+ snprintf(ip_str_end, sizeof ip_str_end, IP_FMT,
+ IP_ARGS(&sc->ip_addr_end));
+
+ /* We always attempt to remove existing entries, so that we know
+ * there's a pristine state for SNAT on the interface. We just ignore
+ * the results of these calls, since iptables will complain about
+ * any non-existent entries. */
+
+ /* Flush the chain that does the SNAT. */
+ snprintf(command, sizeof(command), SNAT_FLUSH_CHAIN_CMD, dev_name);
+ system(command);
+
+ /* We always try to create the a new chain. */
+ snprintf(command, sizeof(command), SNAT_ADD_CHAIN_CMD, dev_name);
+ system(command);
+
+ /* Disassociate any old SNAT chain from the POSTROUTING chain. */
+ snprintf(command, sizeof(command), SNAT_UNSET_CHAIN_CMD, dev_name,
+ dev_name);
+ system(command);
+
+ /* Associate the new chain with the POSTROUTING hook. */
+ snprintf(command, sizeof(command), SNAT_CONF_CHAIN_CMD, dev_name,
+ dev_name);
+ if (system(command) != 0) {
+ VLOG_ERR("SNAT: problem flushing chain for add");
+ return;
+ }
+
+ /* If configured, restrict TCP source port ranges. */
+ if ((sc->tcp_start != 0) && (sc->tcp_end != 0)) {
+ snprintf(command, sizeof(command), SNAT_ADD_TCP_CMD,
+ dev_name, ip_str_start, ip_str_end,
+ ntohs(sc->tcp_start), ntohs(sc->tcp_end));
+ if (system(command) != 0) {
+ VLOG_ERR("SNAT: problem adding TCP rule");
+ return;
+ }
+ }
+
+ /* If configured, restrict UDP source port ranges. */
+ if ((sc->udp_start != 0) && (sc->udp_end != 0)) {
+ snprintf(command, sizeof(command), SNAT_ADD_UDP_CMD,
+ dev_name, ip_str_start, ip_str_end,
+ ntohs(sc->udp_start), ntohs(sc->udp_end));
+ if (system(command) != 0) {
+ VLOG_ERR("SNAT: problem adding UDP rule");
+ return;
+ }
+ }
+
+ /* Add a rule that covers all IP traffic that would not be covered
+ * by the prior TCP or UDP ranges. */
+ snprintf(command, sizeof(command), SNAT_ADD_IP_CMD,
+ dev_name, ip_str_start, ip_str_end);
+ if (system(command) != 0) {
+ VLOG_ERR("SNAT: problem adding base rule");
+ return;
+ }
+}
+
+static void
+snat_del_rules(const uint8_t *dev_name)
+{
+ char command[SNAT_CMD_LEN];
+
+ /* Flush the chain that does the SNAT. */
+ snprintf(command, sizeof(command), SNAT_FLUSH_CHAIN_CMD, dev_name);
+ if (system(command) != 0) {
+ VLOG_ERR("SNAT: problem flushing chain for deletion");
+ return;
+ }
+
+ /* Disassociate the SNAT chain from the POSTROUTING chain. */
+ snprintf(command, sizeof(command), SNAT_UNSET_CHAIN_CMD, dev_name,
+ dev_name);
+ if (system(command) != 0) {
+ VLOG_ERR("SNAT: problem unsetting chain");
+ return;
+ }
+
+ /* Now we can finally delete our SNAT chain. */
+ snprintf(command, sizeof(command), SNAT_DEL_CHAIN_CMD, dev_name);
+ if (system(command) != 0) {
+ VLOG_ERR("SNAT: problem deleting chain");
+ return;
+ }
+}
+
+static void
+snat_config(const struct nx_snat_config *sc, struct snat_data *snat)
+{
+ int idx;
+ struct port_watcher *pw = snat->pw;
+ struct ofp_phy_port *pw_opp;
+ struct snat_port_conf *c, *spc=NULL;
+ uint16_t port_no;
+
+ port_no = ntohs(sc->port);
+ idx = port_no_to_pw_idx(port_no);
+ if (idx < 0) {
+ return;
+ }
+
+ pw_opp = &pw->ports[idx];
+ if (htons(pw_opp->port_no) != port_no) {
+ return;
+ }
+
+ LIST_FOR_EACH(c, struct snat_port_conf, node, &snat->port_list) {
+ if (c->config.port == sc->port) {
+ spc = c;
+ break;
+ }
+ }
+
+ if (sc->command == NXSC_ADD) {
+ if (!spc) {
+ spc = xmalloc(sizeof(*c));
+ if (!spc) {
+ VLOG_ERR("SNAT: no memory for new entry");
+ return;
+ }
+ list_push_back(&snat->port_list, &spc->node);
+ }
+ memcpy(&spc->config, sc, sizeof(spc->config));
+ snat_add_rules(sc, pw_opp->name);
+ } else if (spc) {
+ snat_del_rules(pw_opp->name);
+ list_remove(&spc->node);
+ }
+}
+
+static bool
+snat_remote_packet_cb(struct relay *r, void *snat_)
+{
+ struct snat_data *snat = snat_;
+ struct ofpbuf *msg = r->halves[HALF_REMOTE].rxbuf;
+ struct nicira_header *request = msg->data;
+ struct nx_act_config *nac = msg->data;
+ int n_configs, i;
+
+
+ if (msg->size < sizeof(struct nx_act_config)) {
+ return false;
+ }
+ request = msg->data;
+ if (request->header.type != OFPT_VENDOR
+ || request->vendor != htonl(NX_VENDOR_ID)
+ || request->subtype != htonl(NXT_ACT_SET_CONFIG)) {
+ return false;
+ }
+
+ /* We're only interested in attempts to configure SNAT */
+ if (nac->type != htons(NXAST_SNAT)) {
+ return false;
+ }
+
+ n_configs = (msg->size - sizeof *nac) / sizeof *nac->snat;
+ for (i=0; i<n_configs; i++) {
+ snat_config(&nac->snat[i], snat);
+ }
+
+ return false;
+}
+
+static void
+snat_port_changed_cb(uint16_t port_no,
+ const struct ofp_phy_port *old,
+ const struct ofp_phy_port *new,
+ void *snat_)
+{
+ struct snat_data *snat = snat_;
+ struct snat_port_conf *c;
+
+ /* We're only interested in ports that went away */
+ if (new->port_no != htons(OFPP_NONE)) {
+ return;
+ }
+
+ LIST_FOR_EACH(c, struct snat_port_conf, node, &snat->port_list) {
+ if (c->config.port == old->port_no) {
+ snat_del_rules(old->name);
+ list_remove(&c->node);
+ return;
+ }
+ }
+}
+
+static struct hook
+snat_hook_create(struct port_watcher *pw)
+{
+ int ret;
+ struct snat_data *snat;
+
+ ret = system(SNAT_FLUSH_ALL_CMD);
+ if (ret != 0) {
+ VLOG_ERR("SNAT: problem flushing tables");
+ }
+
+ snat = xcalloc(1, sizeof *snat);
+ snat->pw = pw;
+ list_init(&snat->port_list);
+
+ port_watcher_register_callback(pw, snat_port_changed_cb, snat);
+ return make_hook(NULL, snat_remote_packet_cb, NULL, NULL, snat);
+}
+#endif /* SUPPORT_SNAT */
+\f
+/* Spanning tree protocol. */
+
+/* Extra time, in seconds, at boot before going into fail-open, to give the
+ * spanning tree protocol time to figure out the network layout. */
+#define STP_EXTRA_BOOT_TIME 30
+
+struct stp_data {
+ struct stp *stp;
+ struct port_watcher *pw;
+ struct rconn *local_rconn;
+ struct rconn *remote_rconn;
+ long long int last_tick_256ths;
+ int n_txq;
+};
+
+static bool
+stp_local_packet_cb(struct relay *r, void *stp_)
+{
+ struct ofpbuf *msg = r->halves[HALF_LOCAL].rxbuf;
+ struct ofp_header *oh;
+ struct stp_data *stp = stp_;
+ struct ofp_packet_in *opi;
+ struct eth_header *eth;
+ struct llc_header *llc;
+ struct ofpbuf payload;
+ uint16_t port_no;
+ struct flow flow;
+
+ oh = msg->data;
+ if (oh->type == OFPT_FEATURES_REPLY
+ && msg->size >= offsetof(struct ofp_switch_features, ports)) {
+ struct ofp_switch_features *osf = msg->data;
+ osf->capabilities |= htonl(OFPC_STP);
+ return false;
+ }
+
+ if (!get_ofp_packet_eth_header(r, &opi, ð)
+ || !eth_addr_equals(eth->eth_dst, stp_eth_addr)) {
+ return false;
+ }
+
+ port_no = ntohs(opi->in_port);
+ if (port_no >= STP_MAX_PORTS) {
+ /* STP only supports 255 ports. */
+ return false;
+ }
+ if (port_watcher_get_config(stp->pw, port_no) & OFPPC_NO_STP) {
+ /* We're not doing STP on this port. */
+ return false;
+ }
+
+ if (opi->reason == OFPR_ACTION) {
+ /* The controller set up a flow for this, so we won't intercept it. */
+ return false;
+ }
+
+ get_ofp_packet_payload(opi, &payload);
+ flow_extract(&payload, port_no, &flow);
+ if (flow.dl_type != htons(OFP_DL_TYPE_NOT_ETH_TYPE)) {
+ VLOG_DBG("non-LLC frame received on STP multicast address");
+ return false;
+ }
+ llc = ofpbuf_at_assert(&payload, sizeof *eth, sizeof *llc);
+ if (llc->llc_dsap != STP_LLC_DSAP) {
+ VLOG_DBG("bad DSAP 0x%02"PRIx8" received on STP multicast address",
+ llc->llc_dsap);
+ return false;
+ }
+
+ /* Trim off padding on payload. */
+ if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
+ payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN;
+ }
+ if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
+ struct stp_port *p = stp_get_port(stp->stp, port_no);
+ stp_received_bpdu(p, payload.data, payload.size);
+ }
+
+ return true;
+}
+
+static long long int
+time_256ths(void)
+{
+ return time_msec() * 256 / 1000;
+}
+
+static void
+stp_periodic_cb(void *stp_)
+{
+ struct stp_data *stp = stp_;
+ long long int now_256ths = time_256ths();
+ long long int elapsed_256ths = now_256ths - stp->last_tick_256ths;
+ struct stp_port *p;
+
+ if (!port_watcher_is_ready(stp->pw)) {
+ /* Can't start STP until we know port flags, because port flags can
+ * disable STP. */
+ return;
+ }
+ if (elapsed_256ths <= 0) {
+ return;
+ }
+
+ stp_tick(stp->stp, MIN(INT_MAX, elapsed_256ths));
+ stp->last_tick_256ths = now_256ths;
+
+ while (stp_get_changed_port(stp->stp, &p)) {
+ int port_no = stp_port_no(p);
+ enum stp_state s_state = stp_port_get_state(p);
+
+ if (s_state != STP_DISABLED) {
+ VLOG_WARN("STP: Port %d entered %s state",
+ port_no, stp_state_name(s_state));
+ }
+ if (!(port_watcher_get_config(stp->pw, port_no) & OFPPC_NO_STP)) {
+ uint32_t p_config = 0;
+ uint32_t p_state;
+ switch (s_state) {
+ case STP_LISTENING:
+ p_state = OFPPS_STP_LISTEN;
+ break;
+ case STP_LEARNING:
+ p_state = OFPPS_STP_LEARN;
+ break;
+ case STP_DISABLED:
+ case STP_FORWARDING:
+ p_state = OFPPS_STP_FORWARD;
+ break;
+ case STP_BLOCKING:
+ p_state = OFPPS_STP_BLOCK;
+ break;
+ default:
+ VLOG_DBG_RL(&vrl, "STP: Port %d has bad state %x",
+ port_no, s_state);
+ p_state = OFPPS_STP_FORWARD;
+ break;
+ }
+ if (!stp_forward_in_state(s_state)) {
+ p_config = OFPPC_NO_FLOOD;
+ }
+ port_watcher_set_flags(stp->pw, port_no,
+ p_config, OFPPC_NO_FLOOD,
+ p_state, OFPPS_STP_MASK);
+ } else {
+ /* We don't own those flags. */
+ }
+ }
+}
+
+static void
+stp_wait_cb(void *stp_ UNUSED)
+{
+ poll_timer_wait(1000);
+}
+
+static void
+send_bpdu(const void *bpdu, size_t bpdu_size, int port_no, void *stp_)
+{
+ struct stp_data *stp = stp_;
+ struct eth_header *eth;
+ struct llc_header *llc;
+ struct ofpbuf pkt, *opo;
+
+ /* Packet skeleton. */
+ ofpbuf_init(&pkt, ETH_HEADER_LEN + LLC_HEADER_LEN + bpdu_size);
+ eth = ofpbuf_put_uninit(&pkt, sizeof *eth);
+ llc = ofpbuf_put_uninit(&pkt, sizeof *llc);
+ ofpbuf_put(&pkt, bpdu, bpdu_size);
+
+ /* 802.2 header. */
+ memcpy(eth->eth_dst, stp_eth_addr, ETH_ADDR_LEN);
+ memcpy(eth->eth_src, stp->pw->ports[port_no].hw_addr, ETH_ADDR_LEN);
+ eth->eth_type = htons(pkt.size - ETH_HEADER_LEN);
+
+ /* LLC header. */
+ llc->llc_dsap = STP_LLC_DSAP;
+ llc->llc_ssap = STP_LLC_SSAP;
+ llc->llc_cntl = STP_LLC_CNTL;
+
+ opo = make_unbuffered_packet_out(&pkt, OFPP_NONE, port_no);
+ ofpbuf_uninit(&pkt);
+ rconn_send_with_limit(stp->local_rconn, opo, &stp->n_txq, OFPP_MAX);
+}
+
+static bool
+stp_is_port_supported(uint16_t port_no)
+{
+ /* We should be able to support STP on all possible OpenFlow physical
+ * ports. (But we don't support STP on OFPP_LOCAL.) */
+ BUILD_ASSERT_DECL(STP_MAX_PORTS >= OFPP_MAX);
+ return port_no < STP_MAX_PORTS;
+}
+
+static void
+stp_port_changed_cb(uint16_t port_no,
+ const struct ofp_phy_port *old,
+ const struct ofp_phy_port *new,
+ void *stp_)
+{
+ struct stp_data *stp = stp_;
+ struct stp_port *p;
+
+ if (!stp_is_port_supported(port_no)) {
+ return;
+ }
+
+ p = stp_get_port(stp->stp, port_no);
+ if (new->port_no == htons(OFPP_NONE)
+ || new->config & htonl(OFPPC_NO_STP | OFPPC_PORT_DOWN)
+ || new->state & htonl(OFPPS_LINK_DOWN)) {
+ stp_port_disable(p);
+ } else {
+ int speed = 0;
+ stp_port_enable(p);
+ if (new->curr & (OFPPF_10MB_HD | OFPPF_10MB_FD)) {
+ speed = 10;
+ } else if (new->curr & (OFPPF_100MB_HD | OFPPF_100MB_FD)) {
+ speed = 100;
+ } else if (new->curr & (OFPPF_1GB_HD | OFPPF_1GB_FD)) {
+ speed = 1000;
+ } else if (new->curr & OFPPF_100MB_FD) {
+ speed = 10000;
+ }
+ stp_port_set_speed(p, speed);
+ }
+}
+
+static void
+stp_local_port_changed_cb(const struct ofp_phy_port *port, void *stp_)
+{
+ struct stp_data *stp = stp_;
+ if (port) {
+ stp_set_bridge_id(stp->stp, eth_addr_to_uint64(port->hw_addr));
+ }
+}
+
+static struct hook
+stp_hook_create(const struct settings *s, struct port_watcher *pw,
+ struct rconn *local, struct rconn *remote)
+{
+ uint8_t dpid[ETH_ADDR_LEN];
+ struct stp_data *stp;
+
+ stp = xcalloc(1, sizeof *stp);
+ eth_addr_random(dpid);
+ stp->stp = stp_create("stp", eth_addr_to_uint64(dpid), send_bpdu, stp);
+ stp->pw = pw;
+ stp->local_rconn = local;
+ stp->remote_rconn = remote;
+ stp->last_tick_256ths = time_256ths();
+
+ port_watcher_register_callback(pw, stp_port_changed_cb, stp);
+ port_watcher_register_local_port_callback(pw, stp_local_port_changed_cb,
+ stp);
+ return make_hook(stp_local_packet_cb, NULL,
+ stp_periodic_cb, stp_wait_cb, stp);
+}
+\f
+/* In-band control. */
+
+struct in_band_data {
+ const struct settings *s;
+ struct mac_learning *ml;
+ struct netdev *of_device;
+ struct rconn *controller;
+ int n_queued;
+};
+
+static void
+queue_tx(struct rconn *rc, struct in_band_data *in_band, struct ofpbuf *b)
+{
+ rconn_send_with_limit(rc, b, &in_band->n_queued, 10);
+}
+
+static const uint8_t *
+get_controller_mac(struct in_band_data *in_band)
+{
+ static uint32_t ip, last_nonzero_ip;
+ static uint8_t mac[ETH_ADDR_LEN], last_nonzero_mac[ETH_ADDR_LEN];
+ static time_t next_refresh = 0;
+
+ uint32_t last_ip = ip;
+
+ time_t now = time_now();
+
+ ip = rconn_get_ip(in_band->controller);
+ if (last_ip != ip || !next_refresh || now >= next_refresh) {
+ bool have_mac;
+
+ /* Look up MAC address. */
+ memset(mac, 0, sizeof mac);
+ if (ip && in_band->of_device) {
+ int retval = netdev_arp_lookup(in_band->of_device, ip, mac);
+ if (retval) {
+ VLOG_DBG_RL(&vrl, "cannot look up controller hw address "
+ "("IP_FMT"): %s", IP_ARGS(&ip), strerror(retval));
+ }
+ }
+ have_mac = !eth_addr_is_zero(mac);
+
+ /* Log changes in IP, MAC addresses. */
+ if (ip && ip != last_nonzero_ip) {
+ VLOG_DBG("controller IP address changed from "IP_FMT
+ " to "IP_FMT, IP_ARGS(&last_nonzero_ip), IP_ARGS(&ip));
+ last_nonzero_ip = ip;
+ }
+ if (have_mac && memcmp(last_nonzero_mac, mac, ETH_ADDR_LEN)) {
+ VLOG_DBG("controller MAC address changed from "ETH_ADDR_FMT" to "
+ ETH_ADDR_FMT,
+ ETH_ADDR_ARGS(last_nonzero_mac), ETH_ADDR_ARGS(mac));
+ memcpy(last_nonzero_mac, mac, ETH_ADDR_LEN);
+ }
+
+ /* Schedule next refresh.
+ *
+ * If we have an IP address but not a MAC address, then refresh
+ * quickly, since we probably will get a MAC address soon (via ARP).
+ * Otherwise, we can afford to wait a little while. */
+ next_refresh = now + (!ip || have_mac ? 10 : 1);
+ }
+ return !eth_addr_is_zero(mac) ? mac : NULL;
+}
+
+static bool
+is_controller_mac(const uint8_t dl_addr[ETH_ADDR_LEN],
+ struct in_band_data *in_band)
+{
+ const uint8_t *mac = get_controller_mac(in_band);
+ return mac && eth_addr_equals(mac, dl_addr);
+}
+
+static void
+in_band_learn_mac(struct in_band_data *in_band,
+ uint16_t in_port, const uint8_t src_mac[ETH_ADDR_LEN])
+{
+ if (mac_learning_learn(in_band->ml, src_mac, in_port)) {
+ VLOG_DBG_RL(&vrl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16,
+ ETH_ADDR_ARGS(src_mac), in_port);
+ }
+}
+
+static bool
+in_band_local_packet_cb(struct relay *r, void *in_band_)
+{
+ struct in_band_data *in_band = in_band_;
+ struct rconn *rc = r->halves[HALF_LOCAL].rconn;
+ struct ofp_packet_in *opi;
+ struct eth_header *eth;
+ struct ofpbuf payload;
+ struct flow flow;
+ uint16_t in_port;
+ int out_port;
+
+ if (!get_ofp_packet_eth_header(r, &opi, ð) || !in_band->of_device) {
+ return false;
+ }
+ in_port = ntohs(opi->in_port);
+
+ /* Deal with local stuff. */
+ if (in_port == OFPP_LOCAL) {
+ /* Sent by secure channel. */
+ out_port = mac_learning_lookup(in_band->ml, eth->eth_dst);
+ } else if (eth_addr_equals(eth->eth_dst,
+ netdev_get_etheraddr(in_band->of_device))) {
+ /* Sent to secure channel. */
+ out_port = OFPP_LOCAL;
+ in_band_learn_mac(in_band, in_port, eth->eth_src);
+ } else if (eth->eth_type == htons(ETH_TYPE_ARP)
+ && eth_addr_is_broadcast(eth->eth_dst)
+ && is_controller_mac(eth->eth_src, in_band)) {
+ /* ARP sent by controller. */
+ out_port = OFPP_FLOOD;
+ } else if (is_controller_mac(eth->eth_dst, in_band)
+ || is_controller_mac(eth->eth_src, in_band)) {
+ /* Traffic to or from controller. Switch it by hand. */
+ in_band_learn_mac(in_band, in_port, eth->eth_src);
+ out_port = mac_learning_lookup(in_band->ml, eth->eth_dst);
+ } else {
+ const uint8_t *controller_mac;
+ controller_mac = get_controller_mac(in_band);
+ if (eth->eth_type == htons(ETH_TYPE_ARP)
+ && eth_addr_is_broadcast(eth->eth_dst)
+ && is_controller_mac(eth->eth_src, in_band)) {
+ /* ARP sent by controller. */
+ out_port = OFPP_FLOOD;
+ } else if (is_controller_mac(eth->eth_dst, in_band)
+ && in_port == mac_learning_lookup(in_band->ml,
+ controller_mac)) {
+ /* Drop controller traffic that arrives on the controller port. */
+ out_port = -1;
+ } else {
+ return false;
+ }
+ }
+
+ get_ofp_packet_payload(opi, &payload);
+ flow_extract(&payload, in_port, &flow);
+ if (in_port == out_port) {
+ /* The input and output port match. Set up a flow to drop packets. */
+ queue_tx(rc, in_band, make_add_flow(&flow, ntohl(opi->buffer_id),
+ in_band->s->max_idle, 0));
+ } else if (out_port != OFPP_FLOOD) {
+ /* The output port is known, so add a new flow. */
+ queue_tx(rc, in_band,
+ make_add_simple_flow(&flow, ntohl(opi->buffer_id),
+ out_port, in_band->s->max_idle));
+
+ /* If the switch didn't buffer the packet, we need to send a copy. */
+ if (ntohl(opi->buffer_id) == UINT32_MAX) {
+ queue_tx(rc, in_band,
+ make_unbuffered_packet_out(&payload, in_port, out_port));
+ }
+ } else {
+ /* We don't know that MAC. Send along the packet without setting up a
+ * flow. */
+ struct ofpbuf *b;
+ if (ntohl(opi->buffer_id) == UINT32_MAX) {
+ b = make_unbuffered_packet_out(&payload, in_port, out_port);
+ } else {
+ b = make_buffered_packet_out(ntohl(opi->buffer_id),
+ in_port, out_port);
+ }
+ queue_tx(rc, in_band, b);
+ }
+ return true;
+}
+
+static void
+in_band_status_cb(struct status_reply *sr, void *in_band_)
+{
+ struct in_band_data *in_band = in_band_;
+ struct in_addr local_ip;
+ uint32_t controller_ip;
+ const uint8_t *controller_mac;
+
+ if (in_band->of_device) {
+ const uint8_t *mac = netdev_get_etheraddr(in_band->of_device);
+ if (netdev_get_in4(in_band->of_device, &local_ip)) {
+ status_reply_put(sr, "local-ip="IP_FMT, IP_ARGS(&local_ip.s_addr));
+ }
+ status_reply_put(sr, "local-mac="ETH_ADDR_FMT, ETH_ADDR_ARGS(mac));
+
+ controller_ip = rconn_get_ip(in_band->controller);
+ if (controller_ip) {
+ status_reply_put(sr, "controller-ip="IP_FMT,
+ IP_ARGS(&controller_ip));
+ }
+ controller_mac = get_controller_mac(in_band);
+ if (controller_mac) {
+ status_reply_put(sr, "controller-mac="ETH_ADDR_FMT,
+ ETH_ADDR_ARGS(controller_mac));
+ }
+ }
+}
+
+static void
+get_ofp_packet_payload(struct ofp_packet_in *opi, struct ofpbuf *payload)
+{
+ payload->data = opi->data;
+ payload->size = ntohs(opi->header.length) - offsetof(struct ofp_packet_in,
+ data);
+}
+
+static void
+in_band_local_port_cb(const struct ofp_phy_port *port, void *in_band_)
+{
+ struct in_band_data *in_band = in_band_;
+ if (port) {
+ char name[sizeof port->name + 1];
+ get_port_name(port, name, sizeof name);
+
+ if (!in_band->of_device
+ || strcmp(netdev_get_name(in_band->of_device), name))
+ {
+ int error;
+ netdev_close(in_band->of_device);
+ error = netdev_open(name, NETDEV_ETH_TYPE_NONE,
+ &in_band->of_device);
+ if (error) {
+ VLOG_ERR("failed to open in-band control network device "
+ "\"%s\": %s", name, strerror(errno));
+ }
+ }
+ } else {
+ netdev_close(in_band->of_device);
+ in_band->of_device = NULL;
+ }
+}
+
+static struct hook
+in_band_hook_create(const struct settings *s, struct switch_status *ss,
+ struct port_watcher *pw, struct rconn *remote)
+{
+ struct in_band_data *in_band;
+
+ in_band = xcalloc(1, sizeof *in_band);
+ in_band->s = s;
+ in_band->ml = mac_learning_create();
+ in_band->of_device = NULL;
+ in_band->controller = remote;
+ switch_status_register_category(ss, "in-band", in_band_status_cb, in_band);
+ port_watcher_register_local_port_callback(pw, in_band_local_port_cb,
+ in_band);
+ return make_hook(in_band_local_packet_cb, NULL, NULL, NULL, in_band);
+}
+\f
+/* Fail open support. */
+
+struct fail_open_data {
+ const struct settings *s;
+ struct rconn *local_rconn;
+ struct rconn *remote_rconn;
+ struct lswitch *lswitch;
+ int last_disconn_secs;
+ time_t boot_deadline;
+};
+
+/* Causes 'r' to enter or leave fail-open mode, if appropriate. */
+static void
+fail_open_periodic_cb(void *fail_open_)
+{
+ struct fail_open_data *fail_open = fail_open_;
+ int disconn_secs;
+ bool open;
+
+ if (time_now() < fail_open->boot_deadline) {
+ return;
+ }
+ disconn_secs = rconn_failure_duration(fail_open->remote_rconn);
+ open = disconn_secs >= fail_open->s->probe_interval * 3;
+ if (open != (fail_open->lswitch != NULL)) {
+ if (!open) {
+ VLOG_WARN("No longer in fail-open mode");
+ lswitch_destroy(fail_open->lswitch);
+ fail_open->lswitch = NULL;
+ } else {
+ VLOG_WARN("Could not connect to controller for %d seconds, "
+ "failing open", disconn_secs);
+ fail_open->lswitch = lswitch_create(fail_open->local_rconn, true,
+ fail_open->s->max_idle);
+ fail_open->last_disconn_secs = disconn_secs;
+ }
+ } else if (open && disconn_secs > fail_open->last_disconn_secs + 60) {
+ VLOG_WARN("Still in fail-open mode after %d seconds disconnected "
+ "from controller", disconn_secs);
+ fail_open->last_disconn_secs = disconn_secs;
+ }
+}
+
+static bool
+fail_open_local_packet_cb(struct relay *r, void *fail_open_)
+{
+ struct fail_open_data *fail_open = fail_open_;
+ if (!fail_open->lswitch) {
+ return false;
+ } else {
+ lswitch_process_packet(fail_open->lswitch, fail_open->local_rconn,
+ r->halves[HALF_LOCAL].rxbuf);
+ rconn_run(fail_open->local_rconn);
+ return true;
+ }
+}
+
+static void
+fail_open_status_cb(struct status_reply *sr, void *fail_open_)
+{
+ struct fail_open_data *fail_open = fail_open_;
+ const struct settings *s = fail_open->s;
+ int trigger_duration = s->probe_interval * 3;
+ int cur_duration = rconn_failure_duration(fail_open->remote_rconn);
+
+ status_reply_put(sr, "trigger-duration=%d", trigger_duration);
+ status_reply_put(sr, "current-duration=%d", cur_duration);
+ status_reply_put(sr, "triggered=%s",
+ cur_duration >= trigger_duration ? "true" : "false");
+ status_reply_put(sr, "max-idle=%d", s->max_idle);
+}
+
+static struct hook
+fail_open_hook_create(const struct settings *s, struct switch_status *ss,
+ struct rconn *local_rconn, struct rconn *remote_rconn)
+{
+ struct fail_open_data *fail_open = xmalloc(sizeof *fail_open);
+ fail_open->s = s;
+ fail_open->local_rconn = local_rconn;
+ fail_open->remote_rconn = remote_rconn;
+ fail_open->lswitch = NULL;
+ fail_open->boot_deadline = time_now() + s->probe_interval * 3;
+ if (s->enable_stp) {
+ fail_open->boot_deadline += STP_EXTRA_BOOT_TIME;
+ }
+ switch_status_register_category(ss, "fail-open",
+ fail_open_status_cb, fail_open);
+ return make_hook(fail_open_local_packet_cb, NULL,
+ fail_open_periodic_cb, NULL, fail_open);
+}
+\f
+struct rate_limiter {
+ const struct settings *s;
+ struct rconn *remote_rconn;
+
+ /* One queue per physical port. */
+ struct ofp_queue queues[OFPP_MAX];
+ int n_queued; /* Sum over queues[*].n. */
+ int next_tx_port; /* Next port to check in round-robin. */
+
+ /* Token bucket.
+ *
+ * It costs 1000 tokens to send a single packet_in message. A single token
+ * per message would be more straightforward, but this choice lets us avoid
+ * round-off error in refill_bucket()'s calculation of how many tokens to
+ * add to the bucket, since no division step is needed. */
+ long long int last_fill; /* Time at which we last added tokens. */
+ int tokens; /* Current number of tokens. */
+
+ /* Transmission queue. */
+ int n_txq; /* No. of packets waiting in rconn for tx. */
+
+ /* Statistics reporting. */
+ unsigned long long n_normal; /* # txed w/o rate limit queuing. */
+ unsigned long long n_limited; /* # queued for rate limiting. */
+ unsigned long long n_queue_dropped; /* # dropped due to queue overflow. */
+ unsigned long long n_tx_dropped; /* # dropped due to tx overflow. */
+};
+
+/* Drop a packet from the longest queue in 'rl'. */
+static void
+drop_packet(struct rate_limiter *rl)
+{
+ struct ofp_queue *longest; /* Queue currently selected as longest. */
+ int n_longest; /* # of queues of same length as 'longest'. */
+ struct ofp_queue *q;
+
+ longest = &rl->queues[0];
+ n_longest = 1;
+ for (q = &rl->queues[0]; q < &rl->queues[OFPP_MAX]; q++) {
+ if (longest->n < q->n) {
+ longest = q;
+ n_longest = 1;
+ } else if (longest->n == q->n) {
+ n_longest++;
+
+ /* Randomly select one of the longest queues, with a uniform
+ * distribution (Knuth algorithm 3.4.2R). */
+ if (!random_range(n_longest)) {
+ longest = q;
+ }
+ }
+ }
+
+ /* FIXME: do we want to pop the tail instead? */
+ ofpbuf_delete(queue_pop_head(longest));
+ rl->n_queued--;
+}
+
+/* Remove and return the next packet to transmit (in round-robin order). */
+static struct ofpbuf *
+dequeue_packet(struct rate_limiter *rl)
+{
+ unsigned int i;
+
+ for (i = 0; i < OFPP_MAX; i++) {
+ unsigned int port = (rl->next_tx_port + i) % OFPP_MAX;
+ struct ofp_queue *q = &rl->queues[port];
+ if (q->n) {
+ rl->next_tx_port = (port + 1) % OFPP_MAX;
+ rl->n_queued--;
+ return queue_pop_head(q);
+ }
+ }
+ NOT_REACHED();
+}
+
+/* Add tokens to the bucket based on elapsed time. */
+static void
+refill_bucket(struct rate_limiter *rl)
+{
+ const struct settings *s = rl->s;
+ long long int now = time_msec();
+ long long int tokens = (now - rl->last_fill) * s->rate_limit + rl->tokens;
+ if (tokens >= 1000) {
+ rl->last_fill = now;
+ rl->tokens = MIN(tokens, s->burst_limit * 1000);
+ }
+}
+
+/* Attempts to remove enough tokens from 'rl' to transmit a packet. Returns
+ * true if successful, false otherwise. (In the latter case no tokens are
+ * removed.) */
+static bool
+get_token(struct rate_limiter *rl)
+{
+ if (rl->tokens >= 1000) {
+ rl->tokens -= 1000;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static bool
+rate_limit_local_packet_cb(struct relay *r, void *rl_)
+{
+ struct rate_limiter *rl = rl_;
+ const struct settings *s = rl->s;
+ struct ofp_packet_in *opi;
+
+ opi = get_ofp_packet_in(r);
+ if (!opi) {
+ return false;
+ }
+
+ if (!rl->n_queued && get_token(rl)) {
+ /* In the common case where we are not constrained by the rate limit,
+ * let the packet take the normal path. */
+ rl->n_normal++;
+ return false;
+ } else {
+ /* Otherwise queue it up for the periodic callback to drain out. */
+ struct ofpbuf *msg = r->halves[HALF_LOCAL].rxbuf;
+ int port = ntohs(opi->in_port) % OFPP_MAX;
+ if (rl->n_queued >= s->burst_limit) {
+ drop_packet(rl);
+ }
+ queue_push_tail(&rl->queues[port], ofpbuf_clone(msg));
+ rl->n_queued++;
+ rl->n_limited++;
+ return true;
+ }
+}
+
+static void
+rate_limit_status_cb(struct status_reply *sr, void *rl_)
+{
+ struct rate_limiter *rl = rl_;
+
+ status_reply_put(sr, "normal=%llu", rl->n_normal);
+ status_reply_put(sr, "limited=%llu", rl->n_limited);
+ status_reply_put(sr, "queue-dropped=%llu", rl->n_queue_dropped);
+ status_reply_put(sr, "tx-dropped=%llu", rl->n_tx_dropped);
+}
+
+static void
+rate_limit_periodic_cb(void *rl_)
+{
+ struct rate_limiter *rl = rl_;
+ int i;
+
+ /* Drain some packets out of the bucket if possible, but limit the number
+ * of iterations to allow other code to get work done too. */
+ refill_bucket(rl);
+ for (i = 0; rl->n_queued && get_token(rl) && i < 50; i++) {
+ /* Use a small, arbitrary limit for the amount of queuing to do here,
+ * because the TCP connection is responsible for buffering and there is
+ * no point in trying to transmit faster than the TCP connection can
+ * handle. */
+ struct ofpbuf *b = dequeue_packet(rl);
+ if (rconn_send_with_limit(rl->remote_rconn, b, &rl->n_txq, 10)) {
+ rl->n_tx_dropped++;
+ }
+ }
+}
+
+static void
+rate_limit_wait_cb(void *rl_)
+{
+ struct rate_limiter *rl = rl_;
+ if (rl->n_queued) {
+ if (rl->tokens >= 1000) {
+ /* We can transmit more packets as soon as we're called again. */
+ poll_immediate_wake();
+ } else {
+ /* We have to wait for the bucket to re-fill. We could calculate
+ * the exact amount of time here for increased smoothness. */
+ poll_timer_wait(TIME_UPDATE_INTERVAL / 2);
+ }
+ }
+}
+
+static struct hook
+rate_limit_hook_create(const struct settings *s, struct switch_status *ss,
+ struct rconn *local, struct rconn *remote)
+{
+ struct rate_limiter *rl;
+ size_t i;
+
+ rl = xcalloc(1, sizeof *rl);
+ rl->s = s;
+ rl->remote_rconn = remote;
+ for (i = 0; i < ARRAY_SIZE(rl->queues); i++) {
+ queue_init(&rl->queues[i]);
+ }
+ rl->last_fill = time_msec();
+ rl->tokens = s->rate_limit * 100;
+ switch_status_register_category(ss, "rate-limit",
+ rate_limit_status_cb, rl);
+ return make_hook(rate_limit_local_packet_cb, NULL, rate_limit_periodic_cb,
+ rate_limit_wait_cb, rl);
+}
+\f
+/* OFPST_SWITCH statistics. */
+
+struct switch_status_category {
+ char *name;
+ void (*cb)(struct status_reply *, void *aux);
+ void *aux;
+};
+
+struct switch_status {
+ const struct settings *s;
+ time_t booted;
+ struct switch_status_category categories[8];
+ int n_categories;
+};
+
+struct status_reply {
+ struct switch_status_category *category;
+ struct ds request;
+ struct ds output;
+};
+
+static bool
+switch_status_remote_packet_cb(struct relay *r, void *ss_)
+{
+ struct switch_status *ss = ss_;
+ struct rconn *rc = r->halves[HALF_REMOTE].rconn;
+ struct ofpbuf *msg = r->halves[HALF_REMOTE].rxbuf;
+ struct switch_status_category *c;
+ struct nicira_header *request;
+ struct nicira_header *reply;
+ struct status_reply sr;
+ struct ofpbuf *b;
+ int retval;
+
+ if (msg->size < sizeof(struct nicira_header)) {
+ return false;
+ }
+ request = msg->data;
+ if (request->header.type != OFPT_VENDOR
+ || request->vendor != htonl(NX_VENDOR_ID)
+ || request->subtype != htonl(NXT_STATUS_REQUEST)) {
+ return false;
+ }
+
+ sr.request.string = (void *) (request + 1);
+ sr.request.length = msg->size - sizeof *request;
+ ds_init(&sr.output);
+ for (c = ss->categories; c < &ss->categories[ss->n_categories]; c++) {
+ if (!memcmp(c->name, sr.request.string,
+ MIN(strlen(c->name), sr.request.length))) {
+ sr.category = c;
+ c->cb(&sr, c->aux);
+ }
+ }
+ reply = make_openflow_xid(sizeof *reply + sr.output.length,
+ OFPT_VENDOR, request->header.xid, &b);
+ reply->vendor = htonl(NX_VENDOR_ID);
+ reply->subtype = htonl(NXT_STATUS_REPLY);
+ memcpy(reply + 1, sr.output.string, sr.output.length);
+ retval = rconn_send(rc, b, NULL);
+ if (retval && retval != EAGAIN) {
+ VLOG_WARN("send failed (%s)", strerror(retval));
+ }
+ ds_destroy(&sr.output);
+ return true;
+}
+
+static void
+rconn_status_cb(struct status_reply *sr, void *rconn_)
+{
+ struct rconn *rconn = rconn_;
+ time_t now = time_now();
+
+ status_reply_put(sr, "name=%s", rconn_get_name(rconn));
+ status_reply_put(sr, "state=%s", rconn_get_state(rconn));
+ status_reply_put(sr, "backoff=%d", rconn_get_backoff(rconn));
+ status_reply_put(sr, "is-connected=%s",
+ rconn_is_connected(rconn) ? "true" : "false");
+ status_reply_put(sr, "sent-msgs=%u", rconn_packets_sent(rconn));
+ status_reply_put(sr, "received-msgs=%u", rconn_packets_received(rconn));
+ status_reply_put(sr, "attempted-connections=%u",
+ rconn_get_attempted_connections(rconn));
+ status_reply_put(sr, "successful-connections=%u",
+ rconn_get_successful_connections(rconn));
+ status_reply_put(sr, "last-connection=%ld",
+ (long int) (now - rconn_get_last_connection(rconn)));
+ status_reply_put(sr, "time-connected=%lu",
+ rconn_get_total_time_connected(rconn));
+ status_reply_put(sr, "state-elapsed=%u", rconn_get_state_elapsed(rconn));
+}
+
+static void
+config_status_cb(struct status_reply *sr, void *s_)
+{
+ const struct settings *s = s_;
+ size_t i;
+
+ for (i = 0; i < s->n_listeners; i++) {
+ status_reply_put(sr, "management%zu=%s", i, s->listener_names[i]);
+ }
+ if (s->probe_interval) {
+ status_reply_put(sr, "probe-interval=%d", s->probe_interval);
+ }
+ if (s->max_backoff) {
+ status_reply_put(sr, "max-backoff=%d", s->max_backoff);
+ }
+}
+
+static void
+switch_status_cb(struct status_reply *sr, void *ss_)
+{
+ struct switch_status *ss = ss_;
+ time_t now = time_now();
+
+ status_reply_put(sr, "now=%ld", (long int) now);
+ status_reply_put(sr, "uptime=%ld", (long int) (now - ss->booted));
+ status_reply_put(sr, "pid=%ld", (long int) getpid());
+}
+
+static struct hook
+switch_status_hook_create(const struct settings *s, struct switch_status **ssp)
+{
+ struct switch_status *ss = xcalloc(1, sizeof *ss);
+ ss->s = s;
+ ss->booted = time_now();
+ switch_status_register_category(ss, "config",
+ config_status_cb, (void *) s);
+ switch_status_register_category(ss, "switch", switch_status_cb, ss);
+ *ssp = ss;
+ return make_hook(NULL, switch_status_remote_packet_cb, NULL, NULL, ss);
+}
+
+static void
+switch_status_register_category(struct switch_status *ss,
+ const char *category,
+ void (*cb)(struct status_reply *,
+ void *aux),
+ void *aux)
+{
+ struct switch_status_category *c;
+ assert(ss->n_categories < ARRAY_SIZE(ss->categories));
+ c = &ss->categories[ss->n_categories++];
+ c->cb = cb;
+ c->aux = aux;
+ c->name = xstrdup(category);
+}
+
+static void
+status_reply_put(struct status_reply *sr, const char *content, ...)
+{
+ size_t old_length = sr->output.length;
+ size_t added;
+ va_list args;
+
+ /* Append the status reply to the output. */
+ ds_put_format(&sr->output, "%s.", sr->category->name);
+ va_start(args, content);
+ ds_put_format_valist(&sr->output, content, args);
+ va_end(args);
+ if (ds_last(&sr->output) != '\n') {
+ ds_put_char(&sr->output, '\n');
+ }
+
+ /* Drop what we just added if it doesn't match the request. */
+ added = sr->output.length - old_length;
+ if (added < sr->request.length
+ || memcmp(&sr->output.string[old_length],
+ sr->request.string, sr->request.length)) {
+ ds_truncate(&sr->output, old_length);
+ }
+}
+
+\f
+/* Controller discovery. */
+
+struct discovery
+{
+ const struct settings *s;
+ struct dhclient *dhcp;
+ int n_changes;
+};
+
+static void
+discovery_status_cb(struct status_reply *sr, void *d_)
+{
+ struct discovery *d = d_;
+
+ status_reply_put(sr, "accept-remote=%s", d->s->accept_controller_re);
+ status_reply_put(sr, "n-changes=%d", d->n_changes);
+ if (d->dhcp) {
+ status_reply_put(sr, "state=%s", dhclient_get_state(d->dhcp));
+ status_reply_put(sr, "state-elapsed=%u",
+ dhclient_get_state_elapsed(d->dhcp));
+ if (dhclient_is_bound(d->dhcp)) {
+ uint32_t ip = dhclient_get_ip(d->dhcp);
+ uint32_t netmask = dhclient_get_netmask(d->dhcp);
+ uint32_t router = dhclient_get_router(d->dhcp);
+
+ const struct dhcp_msg *cfg = dhclient_get_config(d->dhcp);
+ uint32_t dns_server;
+ char *domain_name;
+ int i;
+
+ status_reply_put(sr, "ip="IP_FMT, IP_ARGS(&ip));
+ status_reply_put(sr, "netmask="IP_FMT, IP_ARGS(&netmask));
+ if (router) {
+ status_reply_put(sr, "router="IP_FMT, IP_ARGS(&router));
+ }
+
+ for (i = 0; dhcp_msg_get_ip(cfg, DHCP_CODE_DNS_SERVER, i,
+ &dns_server);
+ i++) {
+ status_reply_put(sr, "dns%d="IP_FMT, i, IP_ARGS(&dns_server));
+ }
+
+ domain_name = dhcp_msg_get_string(cfg, DHCP_CODE_DOMAIN_NAME);
+ if (domain_name) {
+ status_reply_put(sr, "domain=%s", domain_name);
+ free(domain_name);
+ }
+
+ status_reply_put(sr, "lease-remaining=%u",
+ dhclient_get_lease_remaining(d->dhcp));
+ }
+ }
+}
+
+static void
+discovery_local_port_cb(const struct ofp_phy_port *port, void *d_)
+{
+ struct discovery *d = d_;
+ if (port) {
+ char name[OFP_MAX_PORT_NAME_LEN + 1];
+ struct netdev *netdev;
+ int retval;
+
+ /* Check that this was really a change. */
+ get_port_name(port, name, sizeof name);
+ if (d->dhcp && !strcmp(netdev_get_name(dhclient_get_netdev(d->dhcp)),
+ name)) {
+ return;
+ }
+
+ /* Destroy current DHCP client. */
+ dhclient_destroy(d->dhcp);
+ d->dhcp = NULL;
+
+ /* Bring local network device up. */
+ retval = netdev_open(name, NETDEV_ETH_TYPE_NONE, &netdev);
+ if (retval) {
+ VLOG_ERR("Could not open %s device, discovery disabled: %s",
+ name, strerror(retval));
+ return;
+ }
+ retval = netdev_turn_flags_on(netdev, NETDEV_UP, true);
+ if (retval) {
+ VLOG_ERR("Could not bring %s device up, discovery disabled: %s",
+ name, strerror(retval));
+ return;
+ }
+ netdev_close(netdev);
+
+ /* Initialize DHCP client. */
+ retval = dhclient_create(name, modify_dhcp_request,
+ validate_dhcp_offer, (void *) d->s, &d->dhcp);
+ if (retval) {
+ VLOG_ERR("Failed to initialize DHCP client, "
+ "discovery disabled: %s", strerror(retval));
+ return;
+ }
+ dhclient_init(d->dhcp, 0);
+ } else {
+ dhclient_destroy(d->dhcp);
+ d->dhcp = NULL;
+ }
+}
+
+
+static struct discovery *
+discovery_init(const struct settings *s, struct port_watcher *pw,
+ struct switch_status *ss)
+{
+ struct discovery *d;
+
+ d = xmalloc(sizeof *d);
+ d->s = s;
+ d->dhcp = NULL;
+ d->n_changes = 0;
+
+ switch_status_register_category(ss, "discovery", discovery_status_cb, d);
+ port_watcher_register_local_port_callback(pw, discovery_local_port_cb, d);
+
+ return d;
+}
+
+static void
+discovery_question_connectivity(struct discovery *d)
+{
+ if (d->dhcp) {
+ dhclient_force_renew(d->dhcp, 15);
+ }
+}
+
+static bool
+discovery_run(struct discovery *d, char **controller_name)
+{
+ if (!d->dhcp) {
+ *controller_name = NULL;
+ return true;
+ }
+
+ dhclient_run(d->dhcp);
+ if (!dhclient_changed(d->dhcp)) {
+ return false;
+ }
+
+ dhclient_configure_netdev(d->dhcp);
+ if (d->s->update_resolv_conf) {
+ dhclient_update_resolv_conf(d->dhcp);
+ }
+
+ if (dhclient_is_bound(d->dhcp)) {
+ *controller_name = dhcp_msg_get_string(dhclient_get_config(d->dhcp),
+ DHCP_CODE_OFP_CONTROLLER_VCONN);
+ VLOG_WARN("%s: discovered controller", *controller_name);
+ d->n_changes++;
+ } else {
+ *controller_name = NULL;
+ if (d->n_changes) {
+ VLOG_WARN("discovered controller no longer available");
+ d->n_changes++;
+ }
+ }
+ return true;
+}
+
+static void
+discovery_wait(struct discovery *d)
+{
+ if (d->dhcp) {
+ dhclient_wait(d->dhcp);
+ }
+}
+
+static void
+modify_dhcp_request(struct dhcp_msg *msg, void *aux)
+{
+ dhcp_msg_put_string(msg, DHCP_CODE_VENDOR_CLASS, "OpenFlow");
+}
+
+static bool
+validate_dhcp_offer(const struct dhcp_msg *msg, void *s_)
+{
+ const struct settings *s = s_;
+ char *vconn_name;
+ bool accept;
+
+ vconn_name = dhcp_msg_get_string(msg, DHCP_CODE_OFP_CONTROLLER_VCONN);
+ if (!vconn_name) {
+ VLOG_WARN_RL(&vrl, "rejecting DHCP offer missing controller vconn");
+ return false;
+ }
+ accept = !regexec(&s->accept_controller_regex, vconn_name, 0, NULL, 0);
+ if (!accept) {
+ VLOG_WARN_RL(&vrl, "rejecting controller vconn that fails to match %s",
+ s->accept_controller_re);
+ }
+ free(vconn_name);
+ return accept;
+}
+\f
+/* User interface. */
+
+static void
+parse_options(int argc, char *argv[], struct settings *s)