ovs-atomic: Use raw types, not structs, when locks are required.
[sliver-openvswitch.git] / lib / ovs-atomic-gcc4+.h
1 /*
2  * Copyright (c) 2013, 2014 Nicira, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 /* This header implements atomic operation primitives on GCC 4.x. */
18 #ifndef IN_OVS_ATOMIC_H
19 #error "This header should only be included indirectly via ovs-atomic.h."
20 #endif
21
22 #include "ovs-atomic-locked.h"
23 #define OVS_ATOMIC_GCC4P_IMPL 1
24
25 #define ATOMIC(TYPE) TYPE
26 #include "ovs-atomic-types.h"
27
28 #define ATOMIC_BOOL_LOCK_FREE 2
29 #define ATOMIC_CHAR_LOCK_FREE 2
30 #define ATOMIC_SHORT_LOCK_FREE 2
31 #define ATOMIC_INT_LOCK_FREE 2
32 #define ATOMIC_LONG_LOCK_FREE (ULONG_MAX <= UINTPTR_MAX ? 2 : 0)
33 #define ATOMIC_LLONG_LOCK_FREE (ULLONG_MAX <= UINTPTR_MAX ? 2 : 0)
34 #define ATOMIC_POINTER_LOCK_FREE 2
35
36 typedef enum {
37     memory_order_relaxed,
38     memory_order_consume,
39     memory_order_acquire,
40     memory_order_release,
41     memory_order_acq_rel,
42     memory_order_seq_cst
43 } memory_order;
44 \f
45 #define IS_LOCKLESS_ATOMIC(OBJECT) (sizeof(OBJECT) <= sizeof(void *))
46 \f
47 #define ATOMIC_VAR_INIT(VALUE) VALUE
48 #define atomic_init(OBJECT, VALUE) (*(OBJECT) = (VALUE), (void) 0)
49 #define atomic_destroy(OBJECT) ((void) (OBJECT))
50
51 static inline void
52 atomic_thread_fence(memory_order order)
53 {
54     if (order != memory_order_relaxed) {
55         __sync_synchronize();
56     }
57 }
58
59 static inline void
60 atomic_thread_fence_if_seq_cst(memory_order order)
61 {
62     if (order == memory_order_seq_cst) {
63         __sync_synchronize();
64     }
65 }
66
67 static inline void
68 atomic_signal_fence(memory_order order OVS_UNUSED)
69 {
70     if (order != memory_order_relaxed) {
71         asm volatile("" : : : "memory");
72     }
73 }
74
75 #define atomic_is_lock_free(OBJ)                \
76     ((void) *(OBJ),                             \
77      IF_LOCKLESS_ATOMIC(OBJ, true, false))
78
79 #define atomic_store(DST, SRC) \
80     atomic_store_explicit(DST, SRC, memory_order_seq_cst)
81 #define atomic_store_explicit(DST, SRC, ORDER)          \
82     ({                                                  \
83         typeof(DST) dst__ = (DST);                      \
84         typeof(SRC) src__ = (SRC);                      \
85         memory_order order__ = (ORDER);                 \
86                                                         \
87         if (IS_LOCKLESS_ATOMIC(*dst__)) {               \
88             atomic_thread_fence(order__);               \
89             *dst__ = src__;                             \
90             atomic_thread_fence_if_seq_cst(order__);    \
91         } else {                                        \
92             atomic_store_locked(DST, SRC);              \
93         }                                               \
94         (void) 0;                                       \
95     })
96 #define atomic_read(SRC, DST) \
97     atomic_read_explicit(SRC, DST, memory_order_seq_cst)
98 #define atomic_read_explicit(SRC, DST, ORDER)           \
99     ({                                                  \
100         typeof(DST) dst__ = (DST);                      \
101         typeof(SRC) src__ = (SRC);                      \
102         memory_order order__ = (ORDER);                 \
103                                                         \
104         if (IS_LOCKLESS_ATOMIC(*src__)) {               \
105             atomic_thread_fence_if_seq_cst(order__);    \
106             *dst__ = *src__;                            \
107         } else {                                        \
108             atomic_read_locked(SRC, DST);               \
109         }                                               \
110         (void) 0;                                       \
111     })
112
113 #define atomic_op__(RMW, OP, ARG, ORIG)                     \
114     ({                                                      \
115         typeof(RMW) rmw__ = (RMW);                          \
116         typeof(ARG) arg__ = (ARG);                          \
117         typeof(ORIG) orig__ = (ORIG);                       \
118                                                             \
119         if (IS_LOCKLESS_ATOMIC(*rmw__)) {                   \
120             *orig__ = __sync_fetch_and_##OP(rmw__, arg__);  \
121         } else {                                            \
122             atomic_op_locked(RMW, OP, ARG, ORIG);           \
123         }                                                   \
124     })
125
126 #define atomic_add(RMW, ARG, ORIG) atomic_op__(RMW, add, ARG, ORIG)
127 #define atomic_sub(RMW, ARG, ORIG) atomic_op__(RMW, sub, ARG, ORIG)
128 #define atomic_or( RMW, ARG, ORIG) atomic_op__(RMW, or,  ARG, ORIG)
129 #define atomic_xor(RMW, ARG, ORIG) atomic_op__(RMW, xor, ARG, ORIG)
130 #define atomic_and(RMW, ARG, ORIG) atomic_op__(RMW, and, ARG, ORIG)
131
132 #define atomic_add_explicit(RMW, OPERAND, ORIG, ORDER)  \
133     ((void) (ORDER), atomic_add(RMW, OPERAND, ORIG))
134 #define atomic_sub_explicit(RMW, OPERAND, ORIG, ORDER)  \
135     ((void) (ORDER), atomic_sub(RMW, OPERAND, ORIG))
136 #define atomic_or_explicit(RMW, OPERAND, ORIG, ORDER)   \
137     ((void) (ORDER), atomic_or(RMW, OPERAND, ORIG))
138 #define atomic_xor_explicit(RMW, OPERAND, ORIG, ORDER)  \
139     ((void) (ORDER), atomic_xor(RMW, OPERAND, ORIG))
140 #define atomic_and_explicit(RMW, OPERAND, ORIG, ORDER)  \
141     ((void) (ORDER), atomic_and(RMW, OPERAND, ORIG))
142 \f
143 /* atomic_flag */
144
145 typedef struct {
146     int b;
147 } atomic_flag;
148 #define ATOMIC_FLAG_INIT { false }
149
150 static inline void
151 atomic_flag_init(volatile atomic_flag *object OVS_UNUSED)
152 {
153     /* Nothing to do. */
154 }
155
156 static inline void
157 atomic_flag_destroy(volatile atomic_flag *object OVS_UNUSED)
158 {
159     /* Nothing to do. */
160 }
161
162 static inline bool
163 atomic_flag_test_and_set(volatile atomic_flag *object)
164 {
165     return __sync_lock_test_and_set(&object->b, 1);
166 }
167
168 static inline bool
169 atomic_flag_test_and_set_explicit(volatile atomic_flag *object,
170                                   memory_order order OVS_UNUSED)
171 {
172     return atomic_flag_test_and_set(object);
173 }
174
175 static inline void
176 atomic_flag_clear(volatile atomic_flag *object)
177 {
178     __sync_lock_release(&object->b);
179 }
180
181 static inline void
182 atomic_flag_clear_explicit(volatile atomic_flag *object,
183                            memory_order order OVS_UNUSED)
184 {
185     atomic_flag_clear(object);
186 }