2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "poll-loop.h"
25 #include "dynamic-string.h"
26 #include "fatal-signal.h"
28 #include "ovs-thread.h"
30 #include "socket-util.h"
36 VLOG_DEFINE_THIS_MODULE(poll_loop);
38 COVERAGE_DEFINE(poll_fd_wait);
39 COVERAGE_DEFINE(poll_zero_timeout);
42 struct hmap_node hmap_node;
43 struct pollfd pollfd; /* Events to pass to time_poll(). */
44 HANDLE wevent; /* Events for WaitForMultipleObjects(). */
45 const char *where; /* Where poll_node was created. */
49 /* All active poll waiters. */
50 struct hmap poll_nodes;
52 /* Time at which to wake up the next call to poll_block(), LLONG_MIN to
53 * wake up immediately, or LLONG_MAX to wait forever. */
54 long long int timeout_when; /* In msecs as returned by time_msec(). */
55 const char *timeout_where; /* Where 'timeout_when' was set. */
58 static struct poll_loop *poll_loop(void);
60 /* Look up the node with same fd and wevent. */
61 static struct poll_node *
62 find_poll_node(struct poll_loop *loop, int fd, uint32_t wevent)
64 struct poll_node *node;
66 HMAP_FOR_EACH_WITH_HASH (node, hmap_node, hash_2words(fd, wevent),
68 if (node->pollfd.fd == fd && node->wevent == wevent) {
75 /* On Unix based systems:
77 * Registers 'fd' as waiting for the specified 'events' (which should be
78 * POLLIN or POLLOUT or POLLIN | POLLOUT). The following call to
79 * poll_block() will wake up when 'fd' becomes ready for one or more of the
80 * requested events. the 'fd's are given to poll() function later.
84 * If both 'wevent' handle and 'fd' is specified, associate the 'fd' with
85 * with that 'wevent' for 'events' (implemented in poll_block()).
86 * In case of no 'fd' specified, wake up on any event on that 'wevent'.
87 * These wevents are given to the WaitForMultipleObjects() to be polled.
88 * The event registration is one-shot: only the following call to
89 * poll_block() is affected. The event will need to be re-registered after
90 * poll_block() is called if it is to persist.
92 * ('where' is used in debug logging. Commonly one would use poll_fd_wait() to
93 * automatically provide the caller's source file and line number for
96 poll_fd_wait_at(int fd, HANDLE wevent, short int events, const char *where)
98 struct poll_loop *loop = poll_loop();
99 struct poll_node *node;
101 COVERAGE_INC(poll_fd_wait);
104 /* Null event cannot be polled. */
106 VLOG_ERR("No event to wait fd %d", fd);
111 /* Check for duplicate. If found, "or" the event. */
112 node = find_poll_node(loop, fd, wevent);
114 node->pollfd.events |= events;
116 node = xzalloc(sizeof *node);
117 hmap_insert(&loop->poll_nodes, &node->hmap_node,
118 hash_2words(fd, wevent));
119 node->pollfd.fd = fd;
120 node->pollfd.events = events;
121 node->wevent = wevent;
126 /* Causes the following call to poll_block() to block for no more than 'msec'
127 * milliseconds. If 'msec' is nonpositive, the following call to poll_block()
128 * will not block at all.
130 * The timer registration is one-shot: only the following call to poll_block()
131 * is affected. The timer will need to be re-registered after poll_block() is
132 * called if it is to persist.
134 * ('where' is used in debug logging. Commonly one would use poll_timer_wait()
135 * to automatically provide the caller's source file and line number for
138 poll_timer_wait_at(long long int msec, const char *where)
140 long long int now = time_msec();
144 /* Wake up immediately. */
146 } else if ((unsigned long long int) now + msec <= LLONG_MAX) {
150 /* now + msec would overflow. */
154 poll_timer_wait_until_at(when, where);
157 /* Causes the following call to poll_block() to wake up when the current time,
158 * as returned by time_msec(), reaches 'when' or later. If 'when' is earlier
159 * than the current time, the following call to poll_block() will not block at
162 * The timer registration is one-shot: only the following call to poll_block()
163 * is affected. The timer will need to be re-registered after poll_block() is
164 * called if it is to persist.
166 * ('where' is used in debug logging. Commonly one would use
167 * poll_timer_wait_until() to automatically provide the caller's source file
168 * and line number for 'where'.) */
170 poll_timer_wait_until_at(long long int when, const char *where)
172 struct poll_loop *loop = poll_loop();
173 if (when < loop->timeout_when) {
174 loop->timeout_when = when;
175 loop->timeout_where = where;
179 /* Causes the following call to poll_block() to wake up immediately, without
182 * ('where' is used in debug logging. Commonly one would use
183 * poll_immediate_wake() to automatically provide the caller's source file and
184 * line number for 'where'.) */
186 poll_immediate_wake_at(const char *where)
188 poll_timer_wait_at(0, where);
191 /* Logs, if appropriate, that the poll loop was awakened by an event
192 * registered at 'where' (typically a source file and line number). The other
193 * arguments have two possible interpretations:
195 * - If 'pollfd' is nonnull then it should be the "struct pollfd" that caused
196 * the wakeup. 'timeout' is ignored.
198 * - If 'pollfd' is NULL then 'timeout' is the number of milliseconds after
199 * which the poll loop woke up.
202 log_wakeup(const char *where, const struct pollfd *pollfd, int timeout)
204 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
205 enum vlog_level level;
209 cpu_usage = get_cpu_usage();
210 if (VLOG_IS_DBG_ENABLED()) {
212 } else if (cpu_usage > 50 && !VLOG_DROP_INFO(&rl)) {
219 ds_put_cstr(&s, "wakeup due to ");
221 char *description = describe_fd(pollfd->fd);
222 if (pollfd->revents & POLLIN) {
223 ds_put_cstr(&s, "[POLLIN]");
225 if (pollfd->revents & POLLOUT) {
226 ds_put_cstr(&s, "[POLLOUT]");
228 if (pollfd->revents & POLLERR) {
229 ds_put_cstr(&s, "[POLLERR]");
231 if (pollfd->revents & POLLHUP) {
232 ds_put_cstr(&s, "[POLLHUP]");
234 if (pollfd->revents & POLLNVAL) {
235 ds_put_cstr(&s, "[POLLNVAL]");
237 ds_put_format(&s, " on fd %d (%s)", pollfd->fd, description);
240 ds_put_format(&s, "%d-ms timeout", timeout);
243 ds_put_format(&s, " at %s", where);
245 if (cpu_usage >= 0) {
246 ds_put_format(&s, " (%d%% CPU usage)", cpu_usage);
248 VLOG(level, "%s", ds_cstr(&s));
253 free_poll_nodes(struct poll_loop *loop)
255 struct poll_node *node, *next;
257 HMAP_FOR_EACH_SAFE (node, next, hmap_node, &loop->poll_nodes) {
258 hmap_remove(&loop->poll_nodes, &node->hmap_node);
263 /* Blocks until one or more of the events registered with poll_fd_wait()
264 * occurs, or until the minimum duration registered with poll_timer_wait()
265 * elapses, or not at all if poll_immediate_wake() has been called. */
269 struct poll_loop *loop = poll_loop();
270 struct poll_node *node;
271 struct pollfd *pollfds;
272 HANDLE *wevents = NULL;
277 /* Register fatal signal events before actually doing any real work for
281 if (loop->timeout_when == LLONG_MIN) {
282 COVERAGE_INC(poll_zero_timeout);
286 pollfds = xmalloc(hmap_count(&loop->poll_nodes) * sizeof *pollfds);
289 wevents = xmalloc(hmap_count(&loop->poll_nodes) * sizeof *wevents);
292 /* Populate with all the fds and events. */
294 HMAP_FOR_EACH (node, hmap_node, &loop->poll_nodes) {
295 pollfds[i] = node->pollfd;
297 wevents[i] = node->wevent;
298 if (node->pollfd.fd && node->wevent) {
299 short int wsa_events = 0;
300 if (node->pollfd.events & POLLIN) {
301 wsa_events |= FD_READ | FD_ACCEPT | FD_CLOSE;
303 if (node->pollfd.events & POLLOUT) {
304 wsa_events |= FD_WRITE | FD_CONNECT | FD_CLOSE;
306 WSAEventSelect(node->pollfd.fd, node->wevent, wsa_events);
312 retval = time_poll(pollfds, hmap_count(&loop->poll_nodes), wevents,
313 loop->timeout_when, &elapsed);
315 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
316 VLOG_ERR_RL(&rl, "poll: %s", ovs_strerror(-retval));
317 } else if (!retval) {
318 log_wakeup(loop->timeout_where, NULL, elapsed);
319 } else if (get_cpu_usage() > 50 || VLOG_IS_DBG_ENABLED()) {
321 HMAP_FOR_EACH (node, hmap_node, &loop->poll_nodes) {
322 if (pollfds[i].revents) {
323 log_wakeup(node->where, &pollfds[i], 0);
329 free_poll_nodes(loop);
330 loop->timeout_when = LLONG_MAX;
331 loop->timeout_where = NULL;
335 /* Handle any pending signals before doing anything else. */
342 free_poll_loop(void *loop_)
344 struct poll_loop *loop = loop_;
346 free_poll_nodes(loop);
347 hmap_destroy(&loop->poll_nodes);
351 static struct poll_loop *
354 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
355 static pthread_key_t key;
356 struct poll_loop *loop;
358 if (ovsthread_once_start(&once)) {
359 xpthread_key_create(&key, free_poll_loop);
360 ovsthread_once_done(&once);
363 loop = pthread_getspecific(key);
365 loop = xzalloc(sizeof *loop);
366 hmap_init(&loop->poll_nodes);
367 xpthread_setspecific(key, loop);