2 * Copyright (c) 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "guarded-list.h"
21 #include "ovs-thread.h"
22 #include "poll-loop.h"
26 void (*function)(void *aux);
31 struct list list_node;
32 struct ovsrcu_cb cbs[16];
36 struct ovsrcu_perthread {
37 struct list list_node; /* In global list. */
39 struct ovs_mutex mutex;
41 struct ovsrcu_cbset *cbset;
44 static struct seq *global_seqno;
46 static pthread_key_t perthread_key;
47 static struct list ovsrcu_threads;
48 static struct ovs_mutex ovsrcu_threads_mutex;
50 static struct guarded_list flushed_cbsets;
51 static struct seq *flushed_cbsets_seq;
53 static void ovsrcu_init(void);
54 static void ovsrcu_flush_cbset(struct ovsrcu_perthread *);
55 static void ovsrcu_unregister__(struct ovsrcu_perthread *);
56 static bool ovsrcu_call_postponed(void);
57 static void *ovsrcu_postpone_thread(void *arg OVS_UNUSED);
58 static void ovsrcu_synchronize(void);
60 static struct ovsrcu_perthread *
61 ovsrcu_perthread_get(void)
63 struct ovsrcu_perthread *perthread;
67 perthread = pthread_getspecific(perthread_key);
69 perthread = xmalloc(sizeof *perthread);
70 ovs_mutex_init(&perthread->mutex);
71 perthread->seqno = seq_read(global_seqno);
72 perthread->cbset = NULL;
74 ovs_mutex_lock(&ovsrcu_threads_mutex);
75 list_push_back(&ovsrcu_threads, &perthread->list_node);
76 ovs_mutex_unlock(&ovsrcu_threads_mutex);
78 pthread_setspecific(perthread_key, perthread);
83 /* Indicates the end of a quiescent state. See "Details" near the top of
86 * Quiescent states don't stack or nest, so this always ends a quiescent state
87 * even if ovsrcu_quiesce_start() was called multiple times in a row. */
89 ovsrcu_quiesce_end(void)
91 ovsrcu_perthread_get();
97 if (single_threaded()) {
98 ovsrcu_call_postponed();
100 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
101 if (ovsthread_once_start(&once)) {
102 ovs_thread_create("urcu", ovsrcu_postpone_thread, NULL);
103 ovsthread_once_done(&once);
108 /* Indicates the beginning of a quiescent state. See "Details" near the top of
111 ovsrcu_quiesce_start(void)
113 struct ovsrcu_perthread *perthread;
116 perthread = pthread_getspecific(perthread_key);
118 pthread_setspecific(perthread_key, NULL);
119 ovsrcu_unregister__(perthread);
125 /* Indicates a momentary quiescent state. See "Details" near the top of
131 ovsrcu_perthread_get()->seqno = seq_read(global_seqno);
132 seq_change(global_seqno);
138 ovsrcu_is_quiescent(void)
141 return pthread_getspecific(perthread_key) == NULL;
145 ovsrcu_synchronize(void)
147 uint64_t target_seqno;
149 if (single_threaded()) {
153 target_seqno = seq_read(global_seqno);
154 ovsrcu_quiesce_start();
157 uint64_t cur_seqno = seq_read(global_seqno);
158 struct ovsrcu_perthread *perthread;
161 ovs_mutex_lock(&ovsrcu_threads_mutex);
162 LIST_FOR_EACH (perthread, list_node, &ovsrcu_threads) {
163 if (perthread->seqno <= target_seqno) {
168 ovs_mutex_unlock(&ovsrcu_threads_mutex);
174 seq_wait(global_seqno, cur_seqno);
177 ovsrcu_quiesce_end();
180 /* Registers 'function' to be called, passing 'aux' as argument, after the
183 * This function is more conveniently called through the ovsrcu_postpone()
184 * macro, which provides a type-safe way to allow 'function''s parameter to be
185 * any pointer type. */
187 ovsrcu_postpone__(void (*function)(void *aux), void *aux)
189 struct ovsrcu_perthread *perthread = ovsrcu_perthread_get();
190 struct ovsrcu_cbset *cbset;
191 struct ovsrcu_cb *cb;
193 cbset = perthread->cbset;
195 cbset = perthread->cbset = xmalloc(sizeof *perthread->cbset);
199 cb = &cbset->cbs[cbset->n_cbs++];
200 cb->function = function;
203 if (cbset->n_cbs >= ARRAY_SIZE(cbset->cbs)) {
204 ovsrcu_flush_cbset(perthread);
209 ovsrcu_call_postponed(void)
211 struct ovsrcu_cbset *cbset, *next_cbset;
214 guarded_list_pop_all(&flushed_cbsets, &cbsets);
215 if (list_is_empty(&cbsets)) {
219 ovsrcu_synchronize();
221 LIST_FOR_EACH_SAFE (cbset, next_cbset, list_node, &cbsets) {
222 struct ovsrcu_cb *cb;
224 for (cb = cbset->cbs; cb < &cbset->cbs[cbset->n_cbs]; cb++) {
225 cb->function(cb->aux);
227 list_remove(&cbset->list_node);
235 ovsrcu_postpone_thread(void *arg OVS_UNUSED)
237 pthread_detach(pthread_self());
240 uint64_t seqno = seq_read(flushed_cbsets_seq);
241 if (!ovsrcu_call_postponed()) {
242 seq_wait(flushed_cbsets_seq, seqno);
251 ovsrcu_flush_cbset(struct ovsrcu_perthread *perthread)
253 struct ovsrcu_cbset *cbset = perthread->cbset;
256 guarded_list_push_back(&flushed_cbsets, &cbset->list_node, SIZE_MAX);
257 perthread->cbset = NULL;
259 seq_change(flushed_cbsets_seq);
264 ovsrcu_unregister__(struct ovsrcu_perthread *perthread)
266 if (perthread->cbset) {
267 ovsrcu_flush_cbset(perthread);
270 ovs_mutex_lock(&ovsrcu_threads_mutex);
271 list_remove(&perthread->list_node);
272 ovs_mutex_unlock(&ovsrcu_threads_mutex);
274 ovs_mutex_destroy(&perthread->mutex);
277 seq_change(global_seqno);
281 ovsrcu_thread_exit_cb(void *perthread)
283 ovsrcu_unregister__(perthread);
289 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
290 if (ovsthread_once_start(&once)) {
291 global_seqno = seq_create();
292 xpthread_key_create(&perthread_key, ovsrcu_thread_exit_cb);
293 list_init(&ovsrcu_threads);
294 ovs_mutex_init(&ovsrcu_threads_mutex);
296 guarded_list_init(&flushed_cbsets);
297 flushed_cbsets_seq = seq_create();
299 ovsthread_once_done(&once);