+\f
+struct rate_limiter {
+ const struct settings *s;
+ struct rconn *remote_rconn;
+
+ /* One queue per physical port. */
+ struct queue queues[OFPP_MAX];
+ int n_queued; /* Sum over queues[*].n. */
+ int next_tx_port; /* Next port to check in round-robin. */
+
+ /* Token bucket.
+ *
+ * It costs 1000 tokens to send a single packet_in message. A single token
+ * per message would be more straightforward, but this choice lets us avoid
+ * round-off error in refill_bucket()'s calculation of how many tokens to
+ * add to the bucket, since no division step is needed. */
+ long long int last_fill; /* Time at which we last added tokens. */
+ int tokens; /* Current number of tokens. */
+
+ /* Transmission queue. */
+ int n_txq; /* No. of packets waiting in rconn for tx. */
+
+ /* Statistics reporting. */
+ unsigned long long n_normal; /* # txed w/o rate limit queuing. */
+ unsigned long long n_limited; /* # queued for rate limiting. */
+ unsigned long long n_queue_dropped; /* # dropped due to queue overflow. */
+ unsigned long long n_tx_dropped; /* # dropped due to tx overflow. */
+};
+
+/* Drop a packet from the longest queue in 'rl'. */
+static void
+drop_packet(struct rate_limiter *rl)
+{
+ struct queue *longest; /* Queue currently selected as longest. */
+ int n_longest; /* # of queues of same length as 'longest'. */
+ struct queue *q;
+
+ longest = &rl->queues[0];
+ n_longest = 1;
+ for (q = &rl->queues[0]; q < &rl->queues[OFPP_MAX]; q++) {
+ if (longest->n < q->n) {
+ longest = q;
+ n_longest = 1;
+ } else if (longest->n == q->n) {
+ n_longest++;
+
+ /* Randomly select one of the longest queues, with a uniform
+ * distribution (Knuth algorithm 3.4.2R). */
+ if (!random_range(n_longest)) {
+ longest = q;
+ }
+ }
+ }
+
+ /* FIXME: do we want to pop the tail instead? */
+ ofpbuf_delete(queue_pop_head(longest));
+ rl->n_queued--;
+}
+
+/* Remove and return the next packet to transmit (in round-robin order). */
+static struct ofpbuf *
+dequeue_packet(struct rate_limiter *rl)
+{
+ unsigned int i;
+
+ for (i = 0; i < OFPP_MAX; i++) {
+ unsigned int port = (rl->next_tx_port + i) % OFPP_MAX;
+ struct queue *q = &rl->queues[port];
+ if (q->n) {
+ rl->next_tx_port = (port + 1) % OFPP_MAX;
+ rl->n_queued--;
+ return queue_pop_head(q);
+ }
+ }
+ NOT_REACHED();
+}
+
+/* Add tokens to the bucket based on elapsed time. */
+static void
+refill_bucket(struct rate_limiter *rl)
+{
+ const struct settings *s = rl->s;
+ long long int now = time_msec();
+ long long int tokens = (now - rl->last_fill) * s->rate_limit + rl->tokens;
+ if (tokens >= 1000) {
+ rl->last_fill = now;
+ rl->tokens = MIN(tokens, s->burst_limit * 1000);
+ }
+}
+
+/* Attempts to remove enough tokens from 'rl' to transmit a packet. Returns
+ * true if successful, false otherwise. (In the latter case no tokens are
+ * removed.) */
+static bool
+get_token(struct rate_limiter *rl)
+{
+ if (rl->tokens >= 1000) {
+ rl->tokens -= 1000;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static bool
+rate_limit_packet_cb(struct relay *r, int half, void *rl_)
+{
+ struct rate_limiter *rl = rl_;
+ const struct settings *s = rl->s;
+ struct ofpbuf *msg = r->halves[HALF_LOCAL].rxbuf;
+ struct ofp_header *oh;
+
+ if (half == HALF_REMOTE) {
+ return false;
+ }
+
+ oh = msg->data;
+ if (oh->type != OFPT_PACKET_IN) {
+ return false;
+ }
+ if (msg->size < offsetof(struct ofp_packet_in, data)) {
+ VLOG_WARN_RL(&vrl, "packet too short (%zu bytes) for packet_in",
+ msg->size);
+ return false;
+ }
+
+ if (!rl->n_queued && get_token(rl)) {
+ /* In the common case where we are not constrained by the rate limit,
+ * let the packet take the normal path. */
+ rl->n_normal++;
+ return false;
+ } else {
+ /* Otherwise queue it up for the periodic callback to drain out. */
+ struct ofp_packet_in *opi = msg->data;
+ int port = ntohs(opi->in_port) % OFPP_MAX;
+ if (rl->n_queued >= s->burst_limit) {
+ drop_packet(rl);
+ }
+ queue_push_tail(&rl->queues[port], ofpbuf_clone(msg));
+ rl->n_queued++;
+ rl->n_limited++;
+ return true;
+ }
+}
+
+static void
+rate_limit_status_cb(struct status_reply *sr, void *rl_)
+{
+ struct rate_limiter *rl = rl_;
+
+ status_reply_put(sr, "normal=%llu", rl->n_normal);
+ status_reply_put(sr, "limited=%llu", rl->n_limited);
+ status_reply_put(sr, "queue-dropped=%llu", rl->n_queue_dropped);
+ status_reply_put(sr, "tx-dropped=%llu", rl->n_tx_dropped);
+}
+
+static void
+rate_limit_periodic_cb(void *rl_)
+{
+ struct rate_limiter *rl = rl_;
+ int i;
+
+ /* Drain some packets out of the bucket if possible, but limit the number
+ * of iterations to allow other code to get work done too. */
+ refill_bucket(rl);
+ for (i = 0; rl->n_queued && get_token(rl) && i < 50; i++) {
+ /* Use a small, arbitrary limit for the amount of queuing to do here,
+ * because the TCP connection is responsible for buffering and there is
+ * no point in trying to transmit faster than the TCP connection can
+ * handle. */
+ struct ofpbuf *b = dequeue_packet(rl);
+ if (rconn_send_with_limit(rl->remote_rconn, b, &rl->n_txq, 10)) {
+ rl->n_tx_dropped++;
+ }
+ }
+}
+
+static void
+rate_limit_wait_cb(void *rl_)
+{
+ struct rate_limiter *rl = rl_;
+ if (rl->n_queued) {
+ if (rl->tokens >= 1000) {
+ /* We can transmit more packets as soon as we're called again. */
+ poll_immediate_wake();
+ } else {
+ /* We have to wait for the bucket to re-fill. We could calculate
+ * the exact amount of time here for increased smoothness. */
+ poll_timer_wait(TIME_UPDATE_INTERVAL / 2);
+ }
+ }
+}
+
+static struct hook
+rate_limit_hook_create(const struct settings *s, struct switch_status *ss,
+ struct rconn *local, struct rconn *remote)
+{
+ struct rate_limiter *rl;
+ size_t i;
+
+ rl = xcalloc(1, sizeof *rl);
+ rl->s = s;
+ rl->remote_rconn = remote;
+ for (i = 0; i < ARRAY_SIZE(rl->queues); i++) {
+ queue_init(&rl->queues[i]);
+ }
+ rl->last_fill = time_msec();
+ rl->tokens = s->rate_limit * 100;
+ switch_status_register_category(ss, "rate-limit",
+ rate_limit_status_cb, rl);
+ return make_hook(rate_limit_packet_cb, rate_limit_periodic_cb,
+ rate_limit_wait_cb, rl);
+}
+\f
+/* OFPST_SWITCH statistics. */
+
+struct switch_status_category {
+ char *name;
+ void (*cb)(struct status_reply *, void *aux);
+ void *aux;
+};
+
+struct switch_status {
+ const struct settings *s;
+ time_t booted;
+ struct switch_status_category categories[8];
+ int n_categories;
+};
+
+struct status_reply {
+ struct switch_status_category *category;
+ struct ds request;
+ struct ds output;
+};
+
+static bool
+switch_status_packet_cb(struct relay *r, int half, void *ss_)
+{
+ struct switch_status *ss = ss_;
+ struct rconn *rc = r->halves[HALF_REMOTE].rconn;
+ struct ofpbuf *msg = r->halves[HALF_REMOTE].rxbuf;
+ struct switch_status_category *c;
+ struct ofp_stats_request *osr;
+ struct ofp_stats_reply *reply;
+ struct status_reply sr;
+ struct ofp_header *oh;
+ struct ofpbuf *b;
+ int retval;
+
+ if (half == HALF_LOCAL) {
+ return false;
+ }
+
+ oh = msg->data;
+ if (oh->type != OFPT_STATS_REQUEST) {
+ return false;
+ }
+ if (msg->size < sizeof(struct ofp_stats_request)) {
+ VLOG_WARN_RL(&vrl, "packet too short (%zu bytes) for stats_request",
+ msg->size);
+ return false;
+ }
+
+ osr = msg->data;
+ if (osr->type != htons(OFPST_SWITCH)) {
+ return false;
+ }
+
+ sr.request.string = (void *) (osr + 1);
+ sr.request.length = msg->size - sizeof *osr;
+ ds_init(&sr.output);
+ for (c = ss->categories; c < &ss->categories[ss->n_categories]; c++) {
+ if (!memcmp(c->name, sr.request.string,
+ MIN(strlen(c->name), sr.request.length))) {
+ sr.category = c;
+ c->cb(&sr, c->aux);
+ }
+ }
+ reply = make_openflow_xid((offsetof(struct ofp_stats_reply, body)
+ + sr.output.length),
+ OFPT_STATS_REPLY, osr->header.xid, &b);
+ reply->type = htons(OFPST_SWITCH);
+ reply->flags = 0;
+ memcpy(reply->body, sr.output.string, sr.output.length);
+ retval = rconn_send(rc, b, NULL);
+ if (retval && retval != EAGAIN) {
+ VLOG_WARN("send failed (%s)", strerror(retval));
+ }
+ ds_destroy(&sr.output);
+ return true;
+}
+
+static void
+rconn_status_cb(struct status_reply *sr, void *rconn_)
+{
+ struct rconn *rconn = rconn_;
+ time_t now = time_now();
+
+ status_reply_put(sr, "name=%s", rconn_get_name(rconn));
+ status_reply_put(sr, "state=%s", rconn_get_state(rconn));
+ status_reply_put(sr, "backoff=%d", rconn_get_backoff(rconn));
+ status_reply_put(sr, "is-connected=%s",
+ rconn_is_connected(rconn) ? "true" : "false");
+ status_reply_put(sr, "sent-msgs=%u", rconn_packets_sent(rconn));
+ status_reply_put(sr, "received-msgs=%u", rconn_packets_received(rconn));
+ status_reply_put(sr, "attempted-connections=%u",
+ rconn_get_attempted_connections(rconn));
+ status_reply_put(sr, "successful-connections=%u",
+ rconn_get_successful_connections(rconn));
+ status_reply_put(sr, "last-connection=%ld",
+ (long int) (now - rconn_get_last_connection(rconn)));
+ status_reply_put(sr, "time-connected=%lu",
+ rconn_get_total_time_connected(rconn));
+ status_reply_put(sr, "state-elapsed=%u", rconn_get_state_elapsed(rconn));
+}
+
+static void
+config_status_cb(struct status_reply *sr, void *s_)
+{
+ const struct settings *s = s_;
+ size_t i;
+
+ for (i = 0; i < s->n_listeners; i++) {
+ status_reply_put(sr, "management%zu=%s", i, s->listener_names[i]);
+ }
+ if (s->probe_interval) {
+ status_reply_put(sr, "probe-interval=%d", s->probe_interval);
+ }
+ if (s->max_backoff) {
+ status_reply_put(sr, "max-backoff=%d", s->max_backoff);
+ }
+}
+
+static void
+switch_status_cb(struct status_reply *sr, void *ss_)
+{
+ struct switch_status *ss = ss_;
+ time_t now = time_now();
+
+ status_reply_put(sr, "now=%ld", (long int) now);
+ status_reply_put(sr, "uptime=%ld", (long int) (now - ss->booted));
+ status_reply_put(sr, "pid=%ld", (long int) getpid());
+}
+
+static struct hook
+switch_status_hook_create(const struct settings *s, struct switch_status **ssp)
+{
+ struct switch_status *ss = xcalloc(1, sizeof *ss);
+ ss->s = s;
+ ss->booted = time_now();
+ switch_status_register_category(ss, "config",
+ config_status_cb, (void *) s);
+ switch_status_register_category(ss, "switch", switch_status_cb, ss);
+ *ssp = ss;
+ return make_hook(switch_status_packet_cb, NULL, NULL, ss);
+}
+
+static void
+switch_status_register_category(struct switch_status *ss,
+ const char *category,
+ void (*cb)(struct status_reply *,
+ void *aux),
+ void *aux)
+{
+ struct switch_status_category *c;
+ assert(ss->n_categories < ARRAY_SIZE(ss->categories));
+ c = &ss->categories[ss->n_categories++];
+ c->cb = cb;
+ c->aux = aux;
+ c->name = xstrdup(category);
+}
+
+static void
+status_reply_put(struct status_reply *sr, const char *content, ...)
+{
+ size_t old_length = sr->output.length;
+ size_t added;
+ va_list args;
+
+ /* Append the status reply to the output. */
+ ds_put_format(&sr->output, "%s.", sr->category->name);
+ va_start(args, content);
+ ds_put_format_valist(&sr->output, content, args);
+ va_end(args);
+ if (ds_last(&sr->output) != '\n') {
+ ds_put_char(&sr->output, '\n');
+ }
+
+ /* Drop what we just added if it doesn't match the request. */
+ added = sr->output.length - old_length;
+ if (added < sr->request.length
+ || memcmp(&sr->output.string[old_length],
+ sr->request.string, sr->request.length)) {
+ ds_truncate(&sr->output, old_length);
+ }
+}
+