1 /* Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
16 #include "ofproto-dpif-upcall.h"
23 #include "dynamic-string.h"
25 #include "fail-open.h"
31 #include "ofproto-dpif.h"
33 #include "poll-loop.h"
36 #define MAX_QUEUE_LENGTH 512
38 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall);
40 COVERAGE_DEFINE(upcall_queue_overflow);
41 COVERAGE_DEFINE(drop_queue_overflow);
42 COVERAGE_DEFINE(miss_queue_overflow);
43 COVERAGE_DEFINE(fmb_queue_overflow);
45 /* A thread that processes each upcall handed to it by the dispatcher thread,
46 * forwards the upcall's packet, and then queues it to the main ofproto_dpif
47 * to possibly set up a kernel flow as a cache. */
49 struct udpif *udpif; /* Parent udpif. */
50 pthread_t thread; /* Thread ID. */
52 struct ovs_mutex mutex; /* Mutex guarding the following. */
54 /* Atomic queue of unprocessed miss upcalls. */
55 struct list upcalls OVS_GUARDED;
56 size_t n_upcalls OVS_GUARDED;
58 pthread_cond_t wake_cond; /* Wakes 'thread' while holding
62 /* An upcall handler for ofproto_dpif.
64 * udpif is implemented as a "dispatcher" thread that reads upcalls from the
65 * kernel. It processes each upcall just enough to figure out its next
66 * destination. For a "miss" upcall (MISS_UPCALL), this is one of several
67 * "handler" threads (see struct handler). Other upcalls are queued to the
68 * main ofproto_dpif. */
70 struct dpif *dpif; /* Datapath handle. */
71 struct dpif_backer *backer; /* Opaque dpif_backer pointer. */
73 uint32_t secret; /* Random seed for upcall hash. */
75 pthread_t dispatcher; /* Dispatcher thread ID. */
77 struct handler *handlers; /* Miss handlers. */
80 /* Atomic queue of unprocessed drop keys. */
81 struct ovs_mutex drop_key_mutex;
82 struct list drop_keys OVS_GUARDED;
83 size_t n_drop_keys OVS_GUARDED;
85 /* Atomic queue of special upcalls for ofproto-dpif to process. */
86 struct ovs_mutex upcall_mutex;
87 struct list upcalls OVS_GUARDED;
88 size_t n_upcalls OVS_GUARDED;
90 /* Atomic queue of flow_miss_batches. */
91 struct ovs_mutex fmb_mutex;
92 struct list fmbs OVS_GUARDED;
93 size_t n_fmbs OVS_GUARDED;
95 /* Number of times udpif_revalidate() has been called. */
96 atomic_uint reval_seq;
101 struct latch exit_latch; /* Tells child threads to exit. */
104 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
106 static void recv_upcalls(struct udpif *);
107 static void handle_miss_upcalls(struct udpif *, struct list *upcalls);
108 static void miss_destroy(struct flow_miss *);
109 static void *udpif_dispatcher(void *);
110 static void *udpif_miss_handler(void *);
113 udpif_create(struct dpif_backer *backer, struct dpif *dpif)
115 struct udpif *udpif = xzalloc(sizeof *udpif);
118 udpif->backer = backer;
119 udpif->secret = random_uint32();
120 udpif->wait_seq = seq_create();
121 latch_init(&udpif->exit_latch);
122 list_init(&udpif->drop_keys);
123 list_init(&udpif->upcalls);
124 list_init(&udpif->fmbs);
125 atomic_init(&udpif->reval_seq, 0);
131 udpif_destroy(struct udpif *udpif)
133 struct flow_miss_batch *fmb;
134 struct drop_key *drop_key;
135 struct upcall *upcall;
137 udpif_recv_set(udpif, 0, false);
139 while ((drop_key = drop_key_next(udpif))) {
140 drop_key_destroy(drop_key);
143 while ((upcall = upcall_next(udpif))) {
144 upcall_destroy(upcall);
147 while ((fmb = flow_miss_batch_next(udpif))) {
148 flow_miss_batch_destroy(fmb);
151 ovs_mutex_destroy(&udpif->drop_key_mutex);
152 ovs_mutex_destroy(&udpif->upcall_mutex);
153 ovs_mutex_destroy(&udpif->fmb_mutex);
154 latch_destroy(&udpif->exit_latch);
155 seq_destroy(udpif->wait_seq);
159 /* Tells 'udpif' to begin or stop handling flow misses depending on the value
160 * of 'enable'. 'n_handlers' is the number of miss_handler threads to create.
161 * Passing 'n_handlers' as zero is equivalent to passing 'enable' as false. */
163 udpif_recv_set(struct udpif *udpif, size_t n_handlers, bool enable)
165 n_handlers = enable ? n_handlers : 0;
166 n_handlers = MIN(n_handlers, 64);
168 /* Stop the old threads (if any). */
169 if (udpif->handlers && udpif->n_handlers != n_handlers) {
172 latch_set(&udpif->exit_latch);
174 /* Wake the handlers so they can exit. */
175 for (i = 0; i < udpif->n_handlers; i++) {
176 struct handler *handler = &udpif->handlers[i];
178 ovs_mutex_lock(&handler->mutex);
179 xpthread_cond_signal(&handler->wake_cond);
180 ovs_mutex_unlock(&handler->mutex);
183 xpthread_join(udpif->dispatcher, NULL);
184 for (i = 0; i < udpif->n_handlers; i++) {
185 struct handler *handler = &udpif->handlers[i];
186 struct upcall *miss, *next;
188 xpthread_join(handler->thread, NULL);
190 ovs_mutex_lock(&handler->mutex);
191 LIST_FOR_EACH_SAFE (miss, next, list_node, &handler->upcalls) {
192 list_remove(&miss->list_node);
193 upcall_destroy(miss);
195 ovs_mutex_unlock(&handler->mutex);
196 ovs_mutex_destroy(&handler->mutex);
198 xpthread_cond_destroy(&handler->wake_cond);
200 latch_poll(&udpif->exit_latch);
202 free(udpif->handlers);
203 udpif->handlers = NULL;
204 udpif->n_handlers = 0;
207 /* Start new threads (if necessary). */
208 if (!udpif->handlers && n_handlers) {
211 udpif->n_handlers = n_handlers;
212 udpif->handlers = xzalloc(udpif->n_handlers * sizeof *udpif->handlers);
213 for (i = 0; i < udpif->n_handlers; i++) {
214 struct handler *handler = &udpif->handlers[i];
216 handler->udpif = udpif;
217 list_init(&handler->upcalls);
218 xpthread_cond_init(&handler->wake_cond, NULL);
219 ovs_mutex_init(&handler->mutex, PTHREAD_MUTEX_NORMAL);
220 xpthread_create(&handler->thread, NULL, udpif_miss_handler, handler);
222 xpthread_create(&udpif->dispatcher, NULL, udpif_dispatcher, udpif);
227 udpif_run(struct udpif *udpif)
229 udpif->last_seq = seq_read(udpif->wait_seq);
233 udpif_wait(struct udpif *udpif)
235 ovs_mutex_lock(&udpif->drop_key_mutex);
236 if (udpif->n_drop_keys) {
237 poll_immediate_wake();
239 ovs_mutex_unlock(&udpif->drop_key_mutex);
241 ovs_mutex_lock(&udpif->upcall_mutex);
242 if (udpif->n_upcalls) {
243 poll_immediate_wake();
245 ovs_mutex_unlock(&udpif->upcall_mutex);
247 ovs_mutex_lock(&udpif->fmb_mutex);
249 poll_immediate_wake();
251 ovs_mutex_unlock(&udpif->fmb_mutex);
253 seq_wait(udpif->wait_seq, udpif->last_seq);
256 /* Notifies 'udpif' that something changed which may render previous
257 * xlate_actions() results invalid. */
259 udpif_revalidate(struct udpif *udpif)
261 struct flow_miss_batch *fmb, *next_fmb;
264 /* Since we remove each miss on revalidation, their statistics won't be
265 * accounted to the appropriate 'facet's in the upper layer. In most
266 * cases, this is alright because we've already pushed the stats to the
267 * relevant rules. However, NetFlow requires absolute packet counts on
268 * 'facet's which could now be incorrect. */
269 ovs_mutex_lock(&udpif->fmb_mutex);
270 atomic_add(&udpif->reval_seq, 1, &junk);
271 LIST_FOR_EACH_SAFE (fmb, next_fmb, list_node, &udpif->fmbs) {
272 list_remove(&fmb->list_node);
273 flow_miss_batch_destroy(fmb);
276 ovs_mutex_unlock(&udpif->fmb_mutex);
277 udpif_drop_key_clear(udpif);
280 /* Retreives the next upcall which ofproto-dpif is responsible for handling.
281 * The caller is responsible for destroying the returned upcall with
282 * upcall_destroy(). */
284 upcall_next(struct udpif *udpif)
286 struct upcall *next = NULL;
288 ovs_mutex_lock(&udpif->upcall_mutex);
289 if (udpif->n_upcalls) {
291 next = CONTAINER_OF(list_pop_front(&udpif->upcalls), struct upcall,
294 ovs_mutex_unlock(&udpif->upcall_mutex);
298 /* Destroys and deallocates 'upcall'. */
300 upcall_destroy(struct upcall *upcall)
303 ofpbuf_uninit(&upcall->upcall_buf);
308 /* Retreives the next batch of processed flow misses for 'udpif' to install.
309 * The caller is responsible for destroying it with flow_miss_batch_destroy().
311 struct flow_miss_batch *
312 flow_miss_batch_next(struct udpif *udpif)
314 struct flow_miss_batch *next = NULL;
316 ovs_mutex_lock(&udpif->fmb_mutex);
319 next = CONTAINER_OF(list_pop_front(&udpif->fmbs),
320 struct flow_miss_batch, list_node);
322 ovs_mutex_unlock(&udpif->fmb_mutex);
326 /* Destroys and deallocates 'fmb'. */
328 flow_miss_batch_destroy(struct flow_miss_batch *fmb)
330 struct flow_miss *miss, *next;
336 HMAP_FOR_EACH_SAFE (miss, next, hmap_node, &fmb->misses) {
337 hmap_remove(&fmb->misses, &miss->hmap_node);
341 hmap_destroy(&fmb->misses);
345 /* Retreives the next drop key which ofproto-dpif needs to process. The caller
346 * is responsible for destroying it with drop_key_destroy(). */
348 drop_key_next(struct udpif *udpif)
350 struct drop_key *next = NULL;
352 ovs_mutex_lock(&udpif->drop_key_mutex);
353 if (udpif->n_drop_keys) {
354 udpif->n_drop_keys--;
355 next = CONTAINER_OF(list_pop_front(&udpif->drop_keys), struct drop_key,
358 ovs_mutex_unlock(&udpif->drop_key_mutex);
362 /* Destorys and deallocates 'drop_key'. */
364 drop_key_destroy(struct drop_key *drop_key)
372 /* Clears all drop keys waiting to be processed by drop_key_next(). */
374 udpif_drop_key_clear(struct udpif *udpif)
376 struct drop_key *drop_key, *next;
378 ovs_mutex_lock(&udpif->drop_key_mutex);
379 LIST_FOR_EACH_SAFE (drop_key, next, list_node, &udpif->drop_keys) {
380 list_remove(&drop_key->list_node);
381 drop_key_destroy(drop_key);
382 udpif->n_drop_keys--;
384 ovs_mutex_unlock(&udpif->drop_key_mutex);
387 /* The dispatcher thread is responsible for receving upcalls from the kernel,
388 * assigning the miss upcalls to a miss_handler thread, and assigning the more
389 * complex ones to ofproto-dpif directly. */
391 udpif_dispatcher(void *arg)
393 struct udpif *udpif = arg;
395 set_subprogram_name("dispatcher");
396 while (!latch_is_set(&udpif->exit_latch)) {
398 dpif_recv_wait(udpif->dpif);
399 latch_wait(&udpif->exit_latch);
406 /* The miss handler thread is responsible for processing miss upcalls retreived
407 * by the dispatcher thread. Once finished it passes the processed miss
408 * upcalls to ofproto-dpif where they're installed in the datapath. */
410 udpif_miss_handler(void *arg)
412 struct list misses = LIST_INITIALIZER(&misses);
413 struct handler *handler = arg;
415 set_subprogram_name("miss_handler");
419 ovs_mutex_lock(&handler->mutex);
421 if (latch_is_set(&handler->udpif->exit_latch)) {
422 ovs_mutex_unlock(&handler->mutex);
426 if (!handler->n_upcalls) {
427 ovs_mutex_cond_wait(&handler->wake_cond, &handler->mutex);
430 for (i = 0; i < FLOW_MISS_MAX_BATCH; i++) {
431 if (handler->n_upcalls) {
432 handler->n_upcalls--;
433 list_push_back(&misses, list_pop_front(&handler->upcalls));
438 ovs_mutex_unlock(&handler->mutex);
440 handle_miss_upcalls(handler->udpif, &misses);
445 miss_destroy(struct flow_miss *miss)
447 struct upcall *upcall, *next;
449 LIST_FOR_EACH_SAFE (upcall, next, list_node, &miss->upcalls) {
450 list_remove(&upcall->list_node);
451 upcall_destroy(upcall);
453 xlate_out_uninit(&miss->xout);
456 static enum upcall_type
457 classify_upcall(const struct upcall *upcall)
459 const struct dpif_upcall *dpif_upcall = &upcall->dpif_upcall;
460 union user_action_cookie cookie;
463 /* First look at the upcall type. */
464 switch (dpif_upcall->type) {
471 case DPIF_N_UC_TYPES:
473 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32,
478 /* "action" upcalls need a closer look. */
479 if (!dpif_upcall->userdata) {
480 VLOG_WARN_RL(&rl, "action upcall missing cookie");
483 userdata_len = nl_attr_get_size(dpif_upcall->userdata);
484 if (userdata_len < sizeof cookie.type
485 || userdata_len > sizeof cookie) {
486 VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %zu",
490 memset(&cookie, 0, sizeof cookie);
491 memcpy(&cookie, nl_attr_get(dpif_upcall->userdata), userdata_len);
492 if (userdata_len == sizeof cookie.sflow
493 && cookie.type == USER_ACTION_COOKIE_SFLOW) {
495 } else if (userdata_len == sizeof cookie.slow_path
496 && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
498 } else if (userdata_len == sizeof cookie.flow_sample
499 && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
500 return FLOW_SAMPLE_UPCALL;
501 } else if (userdata_len == sizeof cookie.ipfix
502 && cookie.type == USER_ACTION_COOKIE_IPFIX) {
505 VLOG_WARN_RL(&rl, "invalid user cookie of type %"PRIu16
506 " and size %zu", cookie.type, userdata_len);
512 recv_upcalls(struct udpif *udpif)
514 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 60);
516 struct upcall *upcall;
519 upcall = xmalloc(sizeof *upcall);
520 ofpbuf_use_stub(&upcall->upcall_buf, upcall->upcall_stub,
521 sizeof upcall->upcall_stub);
522 error = dpif_recv(udpif->dpif, &upcall->dpif_upcall,
523 &upcall->upcall_buf);
525 upcall_destroy(upcall);
529 upcall->type = classify_upcall(upcall);
530 if (upcall->type == BAD_UPCALL) {
531 upcall_destroy(upcall);
532 } else if (upcall->type == MISS_UPCALL) {
533 struct dpif_upcall *dupcall = &upcall->dpif_upcall;
534 uint32_t hash = udpif->secret;
535 struct handler *handler;
537 size_t n_bytes, left;
540 NL_ATTR_FOR_EACH (nla, left, dupcall->key, dupcall->key_len) {
541 enum ovs_key_attr type = nl_attr_type(nla);
542 if (type == OVS_KEY_ATTR_IN_PORT
543 || type == OVS_KEY_ATTR_TCP
544 || type == OVS_KEY_ATTR_UDP) {
545 if (nl_attr_get_size(nla) == 4) {
546 ovs_be32 attr = nl_attr_get_be32(nla);
547 hash = mhash_add(hash, (uint32_t) attr);
550 VLOG_WARN("Netlink attribute with incorrect size.");
554 hash = mhash_finish(hash, n_bytes);
556 handler = &udpif->handlers[hash % udpif->n_handlers];
558 ovs_mutex_lock(&handler->mutex);
559 if (handler->n_upcalls < MAX_QUEUE_LENGTH) {
560 list_push_back(&handler->upcalls, &upcall->list_node);
561 handler->n_upcalls++;
562 xpthread_cond_signal(&handler->wake_cond);
563 ovs_mutex_unlock(&handler->mutex);
564 if (!VLOG_DROP_DBG(&rl)) {
565 struct ds ds = DS_EMPTY_INITIALIZER;
567 odp_flow_key_format(upcall->dpif_upcall.key,
568 upcall->dpif_upcall.key_len,
570 VLOG_DBG("dispatcher: miss enqueue (%s)", ds_cstr(&ds));
574 ovs_mutex_unlock(&handler->mutex);
575 COVERAGE_INC(miss_queue_overflow);
576 upcall_destroy(upcall);
579 ovs_mutex_lock(&udpif->upcall_mutex);
580 if (udpif->n_upcalls < MAX_QUEUE_LENGTH) {
582 list_push_back(&udpif->upcalls, &upcall->list_node);
583 ovs_mutex_unlock(&udpif->upcall_mutex);
584 seq_change(udpif->wait_seq);
586 ovs_mutex_unlock(&udpif->upcall_mutex);
587 COVERAGE_INC(upcall_queue_overflow);
588 upcall_destroy(upcall);
594 static struct flow_miss *
595 flow_miss_find(struct hmap *todo, const struct ofproto_dpif *ofproto,
596 const struct flow *flow, uint32_t hash)
598 struct flow_miss *miss;
600 HMAP_FOR_EACH_WITH_HASH (miss, hmap_node, hash, todo) {
601 if (miss->ofproto == ofproto && flow_equal(&miss->flow, flow)) {
609 /* Executes flow miss 'miss'. May add any required datapath operations
610 * to 'ops', incrementing '*n_ops' for each new op. */
612 execute_flow_miss(struct flow_miss *miss, struct dpif_op *ops, size_t *n_ops)
614 struct ofproto_dpif *ofproto = miss->ofproto;
615 struct flow_wildcards wc;
616 struct rule_dpif *rule;
617 struct ofpbuf *packet;
620 memset(&miss->stats, 0, sizeof miss->stats);
621 miss->stats.used = time_msec();
622 LIST_FOR_EACH (packet, list_node, &miss->packets) {
623 miss->stats.tcp_flags |= packet_get_tcp_flags(packet, &miss->flow);
624 miss->stats.n_bytes += packet->size;
625 miss->stats.n_packets++;
628 flow_wildcards_init_catchall(&wc);
629 rule_dpif_lookup(ofproto, &miss->flow, &wc, &rule);
630 rule_credit_stats(rule, &miss->stats);
631 xlate_in_init(&xin, ofproto, &miss->flow, rule, miss->stats.tcp_flags,
633 xin.may_learn = true;
634 xin.resubmit_stats = &miss->stats;
635 xlate_actions(&xin, &miss->xout);
636 flow_wildcards_or(&miss->xout.wc, &miss->xout.wc, &wc);
638 if (rule->up.cr.priority == FAIL_OPEN_PRIORITY) {
639 struct ofputil_packet_in pin;
641 /* Extra-special case for fail-open mode.
643 * We are in fail-open mode and the packet matched the fail-open
644 * rule, but we are connected to a controller too. We should send
645 * the packet up to the controller in the hope that it will try to
646 * set up a flow and thereby allow us to exit fail-open.
648 * See the top-level comment in fail-open.c for more information. */
649 pin.packet = packet->data;
650 pin.packet_len = packet->size;
651 pin.reason = OFPR_NO_MATCH;
652 pin.controller_id = 0;
655 pin.send_len = 0; /* Not used for flow table misses. */
656 flow_get_metadata(&miss->flow, &pin.fmd);
657 ofproto_dpif_send_packet_in(ofproto, &pin);
660 if (miss->xout.slow) {
661 LIST_FOR_EACH (packet, list_node, &miss->packets) {
664 xlate_in_init(&xin, miss->ofproto, &miss->flow, rule, 0, packet);
665 xlate_actions_for_side_effects(&xin);
670 if (miss->xout.odp_actions.size) {
671 LIST_FOR_EACH (packet, list_node, &miss->packets) {
672 struct dpif_op *op = &ops[*n_ops];
673 struct dpif_execute *execute = &op->u.execute;
675 if (miss->flow.in_port.ofp_port
676 != vsp_realdev_to_vlandev(miss->ofproto,
677 miss->flow.in_port.ofp_port,
678 miss->flow.vlan_tci)) {
679 /* This packet was received on a VLAN splinter port. We
680 * added a VLAN to the packet to make the packet resemble
681 * the flow, but the actions were composed assuming that
682 * the packet contained no VLAN. So, we must remove the
683 * VLAN header from the packet before trying to execute the
685 eth_pop_vlan(packet);
688 op->type = DPIF_OP_EXECUTE;
689 execute->key = miss->key;
690 execute->key_len = miss->key_len;
691 execute->packet = packet;
692 execute->actions = miss->xout.odp_actions.data;
693 execute->actions_len = miss->xout.odp_actions.size;
701 handle_miss_upcalls(struct udpif *udpif, struct list *upcalls)
703 struct dpif_op *opsp[FLOW_MISS_MAX_BATCH];
704 struct dpif_op ops[FLOW_MISS_MAX_BATCH];
705 unsigned int old_reval_seq, new_reval_seq;
706 struct upcall *upcall, *next;
707 struct flow_miss_batch *fmb;
708 size_t n_upcalls, n_ops, i;
709 struct flow_miss *miss;
711 atomic_read(&udpif->reval_seq, &old_reval_seq);
713 /* Construct the to-do list.
715 * This just amounts to extracting the flow from each packet and sticking
716 * the packets that have the same flow in the same "flow_miss" structure so
717 * that we can process them together. */
718 fmb = xmalloc(sizeof *fmb);
719 hmap_init(&fmb->misses);
721 LIST_FOR_EACH_SAFE (upcall, next, list_node, upcalls) {
722 struct dpif_upcall *dupcall = &upcall->dpif_upcall;
723 struct flow_miss *miss = &fmb->miss_buf[n_upcalls];
724 struct flow_miss *existing_miss;
725 struct ofproto_dpif *ofproto;
726 odp_port_t odp_in_port;
731 error = xlate_receive(udpif->backer, dupcall->packet, dupcall->key,
732 dupcall->key_len, &flow, &miss->key_fitness,
733 &ofproto, &odp_in_port);
735 if (error == ENODEV) {
736 struct drop_key *drop_key;
738 /* Received packet on datapath port for which we couldn't
739 * associate an ofproto. This can happen if a port is removed
740 * while traffic is being received. Print a rate-limited message
741 * in case it happens frequently. Install a drop flow so
742 * that future packets of the flow are inexpensively dropped
744 VLOG_INFO_RL(&rl, "received packet on unassociated datapath port "
745 "%"PRIu32, odp_in_port);
747 drop_key = xmalloc(sizeof *drop_key);
748 drop_key->key = xmemdup(dupcall->key, dupcall->key_len);
749 drop_key->key_len = dupcall->key_len;
751 ovs_mutex_lock(&udpif->drop_key_mutex);
752 if (udpif->n_drop_keys < MAX_QUEUE_LENGTH) {
753 udpif->n_drop_keys++;
754 list_push_back(&udpif->drop_keys, &drop_key->list_node);
755 ovs_mutex_unlock(&udpif->drop_key_mutex);
756 seq_change(udpif->wait_seq);
758 ovs_mutex_unlock(&udpif->drop_key_mutex);
759 COVERAGE_INC(drop_queue_overflow);
760 drop_key_destroy(drop_key);
767 flow_extract(dupcall->packet, flow.skb_priority, flow.skb_mark,
768 &flow.tunnel, &flow.in_port, &miss->flow);
770 /* Add other packets to a to-do list. */
771 hash = flow_hash(&miss->flow, 0);
772 existing_miss = flow_miss_find(&fmb->misses, ofproto, &miss->flow, hash);
773 if (!existing_miss) {
774 hmap_insert(&fmb->misses, &miss->hmap_node, hash);
775 miss->ofproto = ofproto;
776 miss->key = dupcall->key;
777 miss->key_len = dupcall->key_len;
778 miss->upcall_type = dupcall->type;
779 list_init(&miss->packets);
780 list_init(&miss->upcalls);
784 miss = existing_miss;
786 list_push_back(&miss->packets, &dupcall->packet->list_node);
788 list_remove(&upcall->list_node);
789 list_push_back(&miss->upcalls, &upcall->list_node);
792 LIST_FOR_EACH_SAFE (upcall, next, list_node, upcalls) {
793 list_remove(&upcall->list_node);
794 upcall_destroy(upcall);
797 /* Process each element in the to-do list, constructing the set of
798 * operations to batch. */
800 HMAP_FOR_EACH (miss, hmap_node, &fmb->misses) {
801 execute_flow_miss(miss, ops, &n_ops);
803 ovs_assert(n_ops <= ARRAY_SIZE(ops));
806 for (i = 0; i < n_ops; i++) {
809 dpif_operate(udpif->dpif, opsp, n_ops);
811 ovs_mutex_lock(&udpif->fmb_mutex);
812 atomic_read(&udpif->reval_seq, &new_reval_seq);
813 if (old_reval_seq != new_reval_seq) {
814 /* udpif_revalidate() was called as we were calculating the actions.
815 * To be safe, we need to assume all the misses need revalidation. */
816 ovs_mutex_unlock(&udpif->fmb_mutex);
817 flow_miss_batch_destroy(fmb);
818 } else if (udpif->n_fmbs < MAX_QUEUE_LENGTH) {
820 list_push_back(&udpif->fmbs, &fmb->list_node);
821 ovs_mutex_unlock(&udpif->fmb_mutex);
822 seq_change(udpif->wait_seq);
824 COVERAGE_INC(fmb_queue_overflow);
825 ovs_mutex_unlock(&udpif->fmb_mutex);
826 flow_miss_batch_destroy(fmb);