+fail_open_packet_cb(struct relay *r, int half, void *fail_open_)
+{
+ struct fail_open_data *fail_open = fail_open_;
+ if (half != HALF_LOCAL || r->is_mgmt_conn || !fail_open->lswitch) {
+ return false;
+ } else {
+ lswitch_process_packet(fail_open->lswitch, fail_open->local_rconn,
+ r->halves[HALF_LOCAL].rxbuf);
+ rconn_run(fail_open->local_rconn);
+ return true;
+ }
+}
+
+static struct hook
+fail_open_hook_create(const struct settings *s, struct rconn *local_rconn,
+ struct rconn *remote_rconn)
+{
+ struct fail_open_data *fail_open = xmalloc(sizeof *fail_open);
+ fail_open->s = s;
+ fail_open->local_rconn = local_rconn;
+ fail_open->remote_rconn = remote_rconn;
+ fail_open->lswitch = NULL;
+ return make_hook(fail_open_packet_cb, fail_open_periodic_cb, NULL,
+ fail_open);
+}
+\f
+struct rate_limiter {
+ const struct settings *s;
+ struct rconn *remote_rconn;
+
+ /* One queue per physical port. */
+ struct queue queues[OFPP_MAX];
+ int n_queued; /* Sum over queues[*].n. */
+ int next_tx_port; /* Next port to check in round-robin. */
+
+ /* Token bucket.
+ *
+ * It costs 1000 tokens to send a single packet_in message. A single token
+ * per message would be more straightforward, but this choice lets us avoid
+ * round-off error in refill_bucket()'s calculation of how many tokens to
+ * add to the bucket, since no division step is needed. */
+ long long int last_fill; /* Time at which we last added tokens. */
+ int tokens; /* Current number of tokens. */
+
+ /* Transmission queue. */
+ int n_txq; /* No. of packets waiting in rconn for tx. */
+};
+
+/* Drop a packet from the longest queue in 'rl'. */
+static void
+drop_packet(struct rate_limiter *rl)
+{
+ struct queue *longest; /* Queue currently selected as longest. */
+ int n_longest; /* # of queues of same length as 'longest'. */
+ struct queue *q;
+
+ longest = &rl->queues[0];
+ n_longest = 1;
+ for (q = &rl->queues[0]; q < &rl->queues[OFPP_MAX]; q++) {
+ if (longest->n < q->n) {
+ longest = q;
+ n_longest = 1;
+ } else if (longest->n == q->n) {
+ n_longest++;
+
+ /* Randomly select one of the longest queues, with a uniform
+ * distribution (Knuth algorithm 3.4.2R). */
+ if (!random_range(n_longest)) {
+ longest = q;
+ }
+ }
+ }
+
+ /* FIXME: do we want to pop the tail instead? */
+ buffer_delete(queue_pop_head(longest));
+ rl->n_queued--;
+}
+
+/* Remove and return the next packet to transmit (in round-robin order). */
+static struct buffer *
+dequeue_packet(struct rate_limiter *rl)
+{
+ unsigned int i;
+
+ for (i = 0; i < OFPP_MAX; i++) {
+ unsigned int port = (rl->next_tx_port + i) % OFPP_MAX;
+ struct queue *q = &rl->queues[port];
+ if (q->n) {
+ rl->next_tx_port = (port + 1) % OFPP_MAX;
+ rl->n_queued--;
+ return queue_pop_head(q);
+ }
+ }
+ NOT_REACHED();
+}
+
+/* Add tokens to the bucket based on elapsed time. */
+static void
+refill_bucket(struct rate_limiter *rl)
+{
+ const struct settings *s = rl->s;
+ long long int now = time_msec();
+ long long int tokens = (now - rl->last_fill) * s->rate_limit + rl->tokens;
+ if (tokens >= 1000) {
+ rl->last_fill = now;
+ rl->tokens = MIN(tokens, s->burst_limit * 1000);
+ }
+}
+
+/* Attempts to remove enough tokens from 'rl' to transmit a packet. Returns
+ * true if successful, false otherwise. (In the latter case no tokens are
+ * removed.) */
+static bool
+get_token(struct rate_limiter *rl)