2 * Copyright (c) 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "fat-rwlock.h"
25 #include "ovs-thread.h"
28 /* This system's cache line size, in bytes.
29 * Being wrong hurts performance but not correctness. */
30 #define CACHE_LINE_SIZE 64 /* Correct for most CPUs. */
31 BUILD_ASSERT_DECL(IS_POW2(CACHE_LINE_SIZE));
33 struct fat_rwlock_slot {
34 /* Membership in rwlock's list of "struct fat_rwlock_slot"s.
36 * fat_rwlock_destroy() sets 'rwlock' to NULL to indicate that this
37 * slot may be destroyed. */
38 struct list list_node; /* In struct rwlock's 'threads' list. */
39 struct fat_rwlock *rwlock; /* Owner. */
43 * A thread holding the read-lock holds its own mutex.
45 * A thread holding the write-lock holds every thread's mutex, plus
47 struct ovs_mutex mutex;
49 /* This thread's locking status for 'rwlock':
51 * - 0: This thread does not have any lock on 'rwlock'. This thread
52 * does not have 'mutex' locked.
54 * - 1: This thread has a read-lock on 'rwlock' and holds 'mutex'.
56 * - 2...UINT_MAX-1: This thread has recursively taken the read-lock on
57 * 'rwlock' to the level of 'depth'. This thread holds 'mutex'.
59 * - UINT_MAX: This thread has the write-lock on 'rwlock' and holds
60 * 'mutex' (plus the 'mutex' of all of 'rwlock''s other slots).
62 * Accessed only by the slot's own thread, so no synchronization is
66 /* To prevent two of these structures from accidentally occupying the same
67 * cache line (causing "false sharing"), we cache-align each of these data
68 * structures. That requires malloc()ing extra space and throwing away
69 * some space at the beginning, which means that the pointer to this struct
70 * isn't necessarily the pointer to the beginning of the block, and so we
71 * need to retain the original pointer to free later.
73 * Accessed only by a single thread, so no synchronization is needed. */
74 void *base; /* Pointer to pass to free() for this block. */
78 free_slot(struct fat_rwlock_slot *slot)
84 list_remove(&slot->list_node);
89 slot_destructor(void *slot_)
91 struct fat_rwlock_slot *slot = slot_;
92 struct fat_rwlock *rwlock = slot->rwlock;
94 ovs_mutex_lock(&rwlock->mutex);
96 ovs_mutex_unlock(&rwlock->mutex);
99 /* Initialize 'rwlock' as a new fat_rwlock. */
101 fat_rwlock_init(struct fat_rwlock *rwlock)
103 ovsthread_key_create(&rwlock->key, slot_destructor);
104 ovs_mutex_init(&rwlock->mutex);
105 ovs_mutex_lock(&rwlock->mutex);
106 list_init(&rwlock->threads);
107 ovs_mutex_unlock(&rwlock->mutex);
110 /* Destroys 'rwlock', which must not be locked or otherwise in use by any
113 fat_rwlock_destroy(struct fat_rwlock *rwlock)
115 struct fat_rwlock_slot *slot, *next;
117 /* Order is important here. By destroying the thread-specific data first,
118 * before we destroy the slots, we ensure that the thread-specific
119 * data destructor can't race with our loop below. */
120 ovsthread_key_delete(rwlock->key);
122 LIST_FOR_EACH_SAFE (slot, next, list_node, &rwlock->threads) {
125 ovs_mutex_destroy(&rwlock->mutex);
128 static struct fat_rwlock_slot *
129 fat_rwlock_get_slot__(struct fat_rwlock *rwlock)
131 struct fat_rwlock_slot *slot;
135 slot = ovsthread_getspecific(rwlock->key);
140 /* Slow path: create a new slot for 'rwlock' in this thread. */
142 /* Allocate room for:
144 * - Up to CACHE_LINE_SIZE - 1 bytes before the per-thread, so that
145 * the start of the slot doesn't potentially share a cache line.
149 * - Space following the slot up to the end of the cache line, so
150 * that the end of the slot doesn't potentially share a cache
152 base = xmalloc((CACHE_LINE_SIZE - 1)
153 + ROUND_UP(sizeof *slot, CACHE_LINE_SIZE));
154 slot = (void *) ROUND_UP((uintptr_t) base, CACHE_LINE_SIZE);
157 slot->rwlock = rwlock;
158 ovs_mutex_init(&slot->mutex);
161 ovs_mutex_lock(&rwlock->mutex);
162 list_push_back(&rwlock->threads, &slot->list_node);
163 ovs_mutex_unlock(&rwlock->mutex);
165 ovsthread_setspecific(rwlock->key, slot);
170 /* Locks 'rwlock' for reading. The read-lock is recursive: it may be acquired
171 * any number of times by a single thread (which must then release it the same
172 * number of times for it to truly be released). */
174 fat_rwlock_rdlock(const struct fat_rwlock *rwlock_)
175 OVS_ACQ_RDLOCK(rwlock_)
176 OVS_NO_THREAD_SAFETY_ANALYSIS
178 struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_);
179 struct fat_rwlock_slot *this = fat_rwlock_get_slot__(rwlock);
181 switch (this->depth) {
183 /* This thread already holds the write-lock. */
187 ovs_mutex_lock(&this->mutex);
195 /* Tries to lock 'rwlock' for reading. If successful, returns 0. If taking
196 * the lock would require blocking, returns EBUSY (without blocking). */
198 fat_rwlock_tryrdlock(const struct fat_rwlock *rwlock_)
199 OVS_TRY_RDLOCK(0, rwlock_)
200 OVS_NO_THREAD_SAFETY_ANALYSIS
202 struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_);
203 struct fat_rwlock_slot *this = fat_rwlock_get_slot__(rwlock);
206 switch (this->depth) {
211 error = ovs_mutex_trylock(&this->mutex);
224 /* Locks 'rwlock' for writing.
226 * The write lock is not recursive. */
228 fat_rwlock_wrlock(const struct fat_rwlock *rwlock_)
229 OVS_ACQ_WRLOCK(rwlock_)
230 OVS_NO_THREAD_SAFETY_ANALYSIS
232 struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_);
233 struct fat_rwlock_slot *this = fat_rwlock_get_slot__(rwlock);
234 struct fat_rwlock_slot *slot;
236 ovs_assert(!this->depth);
237 this->depth = UINT_MAX;
239 ovs_mutex_lock(&rwlock->mutex);
240 LIST_FOR_EACH (slot, list_node, &rwlock->threads) {
241 ovs_mutex_lock(&slot->mutex);
245 /* Unlocks 'rwlock', which the current thread must have locked for reading or
246 * for writing. If the read lock has been taken recursively, it must be
247 * released the same number of times to be truly released. */
249 fat_rwlock_unlock(const struct fat_rwlock *rwlock_)
250 OVS_RELEASES(rwlock_)
251 OVS_NO_THREAD_SAFETY_ANALYSIS
253 struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_);
254 struct fat_rwlock_slot *this = fat_rwlock_get_slot__(rwlock);
255 struct fat_rwlock_slot *slot;
257 switch (this->depth) {
259 LIST_FOR_EACH (slot, list_node, &rwlock->threads) {
260 ovs_mutex_unlock(&slot->mutex);
262 ovs_mutex_unlock(&rwlock->mutex);
267 /* This thread doesn't hold any lock. */
271 ovs_mutex_unlock(&this->mutex);