--- /dev/null
+/*
+ * Copyright (c) 2013 Nicira, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#include "latch.h"
+#include <errno.h>
+#include <poll.h>
+#include <unistd.h>
+#include "poll-loop.h"
+#include "socket-util.h"
+
+/* Initializes 'latch' as initially unset. */
+void
+latch_init(struct latch *latch)
+{
+ latch->is_set = FALSE;
+ latch->wevent = CreateEvent(NULL, TRUE, FALSE, NULL);
+}
+
+/* Destroys 'latch'. */
+void
+latch_destroy(struct latch *latch)
+{
+ latch->is_set = FALSE;
+ CloseHandle(latch->wevent);
+}
+
+/* Resets 'latch' to the unset state. Returns true if 'latch' was previously
+ * set, false otherwise. */
+bool
+latch_poll(struct latch *latch)
+{
+ bool is_set;
+
+ is_set = latch->is_set;
+ latch->is_set = FALSE;
+ ResetEvent(latch->wevent);
+ return is_set;
+}
+
+/* Sets 'latch'.
+ *
+ * Calls are not additive: a single latch_poll() clears out any number of
+ * latch_set(). */
+void
+latch_set(struct latch *latch)
+{
+ latch->is_set = TRUE;
+ SetEvent(latch->wevent);
+}
+
+/* Returns true if 'latch' is set, false otherwise. Does not reset 'latch'
+ * to the unset state. */
+bool
+latch_is_set(const struct latch *latch)
+{
+ return latch->is_set;
+}
+
+/* Causes the next poll_block() to wake up when 'latch' is set.
+ *
+ * ('where' is used in debug logging. Commonly one would use latch_wait() to
+ * automatically provide the caller's source file and line number for
+ * 'where'.) */
+void
+latch_wait_at(const struct latch *latch, const char *where)
+{
+ poll_fd_wait_at(0, latch->wevent, POLLIN, where);
+}
/*
- * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+ * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "socket-util.h"
#include "timeval.h"
#include "vlog.h"
+#include "hmap.h"
+#include "hash.h"
VLOG_DEFINE_THIS_MODULE(poll_loop);
COVERAGE_DEFINE(poll_fd_wait);
COVERAGE_DEFINE(poll_zero_timeout);
+struct poll_node {
+ struct hmap_node hmap_node;
+ struct pollfd pollfd; /* Events to pass to time_poll(). */
+ HANDLE wevent; /* Events for WaitForMultipleObjects(). */
+ const char *where; /* Where poll_node was created. */
+};
+
struct poll_loop {
/* All active poll waiters. */
- struct pollfd *pollfds; /* Events to pass to poll(). */
- const char **where; /* Where each pollfd was created. */
- size_t n_waiters; /* Number of elems in 'where' and 'pollfds'. */
- size_t allocated_waiters; /* Allocated elems in 'where' and 'pollfds'. */
+ struct hmap poll_nodes;
/* Time at which to wake up the next call to poll_block(), LLONG_MIN to
* wake up immediately, or LLONG_MAX to wait forever. */
static struct poll_loop *poll_loop(void);
-/* Registers 'fd' as waiting for the specified 'events' (which should be POLLIN
- * or POLLOUT or POLLIN | POLLOUT). The following call to poll_block() will
- * wake up when 'fd' becomes ready for one or more of the requested events.
+/* Look up the node with same fd and wevent. */
+static struct poll_node *
+find_poll_node(struct poll_loop *loop, int fd, uint32_t wevent)
+{
+ struct poll_node *node;
+
+ HMAP_FOR_EACH_WITH_HASH (node, hmap_node, hash_2words(fd, wevent),
+ &loop->poll_nodes) {
+ if (node->pollfd.fd == fd && node->wevent == wevent) {
+ return node;
+ }
+ }
+ return NULL;
+}
+
+/* On Unix based systems:
*
- * The event registration is one-shot: only the following call to poll_block()
- * is affected. The event will need to be re-registered after poll_block() is
- * called if it is to persist.
+ * Registers 'fd' as waiting for the specified 'events' (which should be
+ * POLLIN or POLLOUT or POLLIN | POLLOUT). The following call to
+ * poll_block() will wake up when 'fd' becomes ready for one or more of the
+ * requested events. the 'fd's are given to poll() function later.
+ *
+ * On Windows system:
+ *
+ * Register 'wevent' handle for the specified 'events'. These wevents are
+ * given to the handleMultipleObjects() to be polled. The event
+ * registration is one-shot: only the following call to poll_block() is
+ * affected. The event will need to be re-registered after poll_block() is
+ * called if it is to persist.
*
* ('where' is used in debug logging. Commonly one would use poll_fd_wait() to
* automatically provide the caller's source file and line number for
* 'where'.) */
void
-poll_fd_wait_at(int fd, short int events, const char *where)
+poll_fd_wait_at(int fd, HANDLE wevent, short int events, const char *where)
{
struct poll_loop *loop = poll_loop();
+ struct poll_node *node;
COVERAGE_INC(poll_fd_wait);
- if (loop->n_waiters >= loop->allocated_waiters) {
- loop->where = x2nrealloc(loop->where, &loop->allocated_waiters,
- sizeof *loop->where);
- loop->pollfds = xrealloc(loop->pollfds,
- (loop->allocated_waiters
- * sizeof *loop->pollfds));
+
+#ifdef _WIN32
+ /* Null event cannot be polled. */
+ if (wevent == 0) {
+ VLOG_ERR("No event to wait fd %d", fd);
+ return;
}
+#endif
- loop->where[loop->n_waiters] = where;
- loop->pollfds[loop->n_waiters].fd = fd;
- loop->pollfds[loop->n_waiters].events = events;
- loop->n_waiters++;
+ /* Check for duplicate. If found, "or" the event. */
+ node = find_poll_node(loop, fd, wevent);
+ if (node) {
+ node->pollfd.events |= events;
+ } else {
+ node = xzalloc(sizeof *node);
+ hmap_insert(&loop->poll_nodes, &node->hmap_node,
+ hash_2words(fd, wevent));
+ node->pollfd.fd = fd;
+ node->pollfd.events = events;
+ node->wevent = wevent;
+ node->where = where;
+ }
}
/* Causes the following call to poll_block() to block for no more than 'msec'
ds_destroy(&s);
}
+static void
+free_poll_nodes(struct poll_loop *loop)
+{
+ struct poll_node *node, *next;
+
+ HMAP_FOR_EACH_SAFE (node, next, hmap_node, &loop->poll_nodes) {
+ hmap_remove(&loop->poll_nodes, &node->hmap_node);
+ free(node);
+ }
+}
+
/* Blocks until one or more of the events registered with poll_fd_wait()
* occurs, or until the minimum duration registered with poll_timer_wait()
* elapses, or not at all if poll_immediate_wake() has been called. */
poll_block(void)
{
struct poll_loop *loop = poll_loop();
+ struct poll_node *node;
+ struct pollfd *pollfds;
+ HANDLE *wevents = NULL;
int elapsed;
int retval;
+ int i;
/* Register fatal signal events before actually doing any real work for
* poll_block. */
}
timewarp_wait();
- retval = time_poll(loop->pollfds, loop->n_waiters,
+ pollfds = xmalloc(hmap_count(&loop->poll_nodes) * sizeof *pollfds);
+
+#ifdef _WIN32
+ wevents = xmalloc(hmap_count(&loop->poll_nodes) * sizeof *wevents);
+#endif
+
+ /* Populate with all the fds and events. */
+ i = 0;
+ HMAP_FOR_EACH (node, hmap_node, &loop->poll_nodes) {
+ pollfds[i] = node->pollfd;
+#ifdef _WIN32
+ wevents[i] = node->wevent;
+#endif
+ i++;
+ }
+
+ retval = time_poll(pollfds, hmap_count(&loop->poll_nodes), wevents,
loop->timeout_when, &elapsed);
if (retval < 0) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
} else if (!retval) {
log_wakeup(loop->timeout_where, NULL, elapsed);
} else if (get_cpu_usage() > 50 || VLOG_IS_DBG_ENABLED()) {
- size_t i;
-
- for (i = 0; i < loop->n_waiters; i++) {
- if (loop->pollfds[i].revents) {
- log_wakeup(loop->where[i], &loop->pollfds[i], 0);
+ i = 0;
+ HMAP_FOR_EACH (node, hmap_node, &loop->poll_nodes) {
+ if (pollfds[i].revents) {
+ log_wakeup(node->where, &pollfds[i], 0);
}
+ i++;
}
}
+ free_poll_nodes(loop);
loop->timeout_when = LLONG_MAX;
loop->timeout_where = NULL;
- loop->n_waiters = 0;
+ free(pollfds);
+ free(wevents);
/* Handle any pending signals before doing anything else. */
fatal_signal_run();
{
struct poll_loop *loop = loop_;
- free(loop->pollfds);
- free(loop->where);
+ free_poll_nodes(loop);
+ hmap_destroy(&loop->poll_nodes);
free(loop);
}
loop = pthread_getspecific(key);
if (!loop) {
loop = xzalloc(sizeof *loop);
+ hmap_init(&loop->poll_nodes);
xpthread_setspecific(key, loop);
}
return loop;
/*
- * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+ * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* Stores the number of milliseconds elapsed during poll in '*elapsed'. */
int
-time_poll(struct pollfd *pollfds, int n_pollfds, long long int timeout_when,
- int *elapsed)
+time_poll(struct pollfd *pollfds, int n_pollfds, HANDLE *handles OVS_UNUSED,
+ long long int timeout_when, int *elapsed)
{
long long int *last_wakeup = last_wakeup_get();
long long int start;
- int retval;
+ int retval = 0;
time_init();
coverage_clear();
time_left = timeout_when - now;
}
+#ifndef _WIN32
retval = poll(pollfds, n_pollfds, time_left);
if (retval < 0) {
retval = -errno;
}
+#else
+ if (n_pollfds > MAXIMUM_WAIT_OBJECTS) {
+ VLOG_ERR("Cannot handle more than maximum wait objects\n");
+ } else if (n_pollfds != 0) {
+ retval = WaitForMultipleObjects(n_pollfds, handles, FALSE,
+ time_left);
+ }
+ if (retval < 0) {
+ /* XXX This will be replace by a win error to errno
+ conversion function */
+ retval = -WSAGetLastError();
+ retval = -EINVAL;
+ }
+#endif
if (deadline <= time_msec()) {
fatal_signal_handler(SIGALRM);