1 /*
2 * Simple interface for atomic operations.
3 *
4 * Copyright (C) 2013 Red Hat, Inc.
5 *
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
10 *
11 * See docs/devel/atomics.rst for discussion about the guarantees each
12 * atomic primitive is meant to provide.
13 */
14
15 #ifndef QEMU_ATOMIC_H
16 #define QEMU_ATOMIC_H
17
18 #include "compiler.h"
19
20 /* Compiler barrier */
21 #define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
22
23 #ifndef __ATOMIC_RELAXED
24 #error "Expecting C11 atomic ops"
25 #endif
26
27 /* Manual memory barriers
28 *
29 *__atomic_thread_fence does not include a compiler barrier; instead,
30 * the barrier is part of __atomic_load/__atomic_store's "volatile-like"
31 * semantics. If smp_wmb() is a no-op, absence of the barrier means that
32 * the compiler is free to reorder stores on each side of the barrier.
33 * Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
34 */
35
36 #define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); })
37 #define smp_mb_release() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); })
38 #define smp_mb_acquire() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); })
39
40 /* Most compilers currently treat consume and acquire the same, but really
41 * no processors except Alpha need a barrier here. Leave it in if
42 * using Thread Sanitizer to avoid warnings, otherwise optimize it away.
43 */
44 #ifdef QEMU_SANITIZE_THREAD
45 #define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); })
46 #elif defined(__alpha__)
47 #define smp_read_barrier_depends() asm volatile("mb":::"memory")
48 #else
49 #define smp_read_barrier_depends() barrier()
50 #endif
51
52 /*
53 * A signal barrier forces all pending local memory ops to be observed before
54 * a SIGSEGV is delivered to the *same* thread. In practice this is exactly
55 * the same as barrier(), but since we have the correct builtin, use it.
56 */
57 #define signal_barrier() __atomic_signal_fence(__ATOMIC_SEQ_CST)
58
59 /* Sanity check that the size of an atomic operation isn't "overly large".
60 * Despite the fact that e.g. i686 has 64-bit atomic operations, we do not
61 * want to use them because we ought not need them, and this lets us do a
62 * bit of sanity checking that other 32-bit hosts might build.
63 *
64 * That said, we have a problem on 64-bit ILP32 hosts in that in order to
65 * sync with TCG_OVERSIZED_GUEST, this must match TCG_TARGET_REG_BITS.
66 * We'd prefer not want to pull in everything else TCG related, so handle
67 * those few cases by hand.
68 *
69 * Note that x32 is fully detected with __x86_64__ + _ILP32, and that for
70 * Sparc we always force the use of sparcv9 in configure. MIPS n32 (ILP32) &
71 * n64 (LP64) ABIs are both detected using __mips64.
72 */
73 #if defined(__x86_64__) || defined(__sparc__) || defined(__mips64)
74 # define ATOMIC_REG_SIZE 8
75 #else
76 # define ATOMIC_REG_SIZE sizeof(void *)
77 #endif
78
79 /* Weak atomic operations prevent the compiler moving other
80 * loads/stores past the atomic operation load/store. However there is
81 * no explicit memory barrier for the processor.
82 *
83 * The C11 memory model says that variables that are accessed from
84 * different threads should at least be done with __ATOMIC_RELAXED
85 * primitives or the result is undefined. Generally this has little to
86 * no effect on the generated code but not using the atomic primitives
87 * will get flagged by sanitizers as a violation.
88 */
89 #define qatomic_read__nocheck(ptr) \
90 __atomic_load_n(ptr, __ATOMIC_RELAXED)
91
92 #define qatomic_read(ptr) \
93 ({ \
94 qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
95 qatomic_read__nocheck(ptr); \
96 })
97
98 #define qatomic_set__nocheck(ptr, i) \
99 __atomic_store_n(ptr, i, __ATOMIC_RELAXED)
100
101 #define qatomic_set(ptr, i) do { \
102 qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
103 qatomic_set__nocheck(ptr, i); \
104 } while(0)
105
106 /* See above: most compilers currently treat consume and acquire the
107 * same, but this slows down qatomic_rcu_read unnecessarily.
108 */
109 #ifdef QEMU_SANITIZE_THREAD
110 #define qatomic_rcu_read__nocheck(ptr, valptr) \
111 __atomic_load(ptr, valptr, __ATOMIC_CONSUME);
112 #else
113 #define qatomic_rcu_read__nocheck(ptr, valptr) \
114 __atomic_load(ptr, valptr, __ATOMIC_RELAXED); \
115 smp_read_barrier_depends();
116 #endif
117
118 /*
119 * Preprocessor sorcery ahead: use a different identifier for the
120 * local variable in each expansion, so we can nest macro calls
121 * without shadowing variables.
122 */
123 #define qatomic_rcu_read_internal(ptr, _val) \
124 ({ \
125 qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
126 typeof_strip_qual(*ptr) _val; \
127 qatomic_rcu_read__nocheck(ptr, &_val); \
128 _val; \
129 })
130 #define qatomic_rcu_read(ptr) \
131 qatomic_rcu_read_internal((ptr), MAKE_IDENTIFIER(_val))
132
133 #define qatomic_rcu_set(ptr, i) do { \
134 qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
135 __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
136 } while(0)
137
138 #define qatomic_load_acquire(ptr) \
139 ({ \
140 qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
141 typeof_strip_qual(*ptr) _val; \
142 __atomic_load(ptr, &_val, __ATOMIC_ACQUIRE); \
143 _val; \
144 })
145
146 #define qatomic_store_release(ptr, i) do { \
147 qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
148 __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
149 } while(0)
150
151
152 /* All the remaining operations are fully sequentially consistent */
153
154 #define qatomic_xchg__nocheck(ptr, i) ({ \
155 __atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST); \
156 })
157
158 #define qatomic_xchg(ptr, i) ({ \
159 qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
160 qatomic_xchg__nocheck(ptr, i); \
161 })
162
163 /* Returns the old value of '*ptr' (whether the cmpxchg failed or not) */
164 #define qatomic_cmpxchg__nocheck(ptr, old, new) ({ \
165 typeof_strip_qual(*ptr) _old = (old); \
166 (void)__atomic_compare_exchange_n(ptr, &_old, new, false, \
167 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
168 _old; \
169 })
170
171 #define qatomic_cmpxchg(ptr, old, new) ({ \
172 qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
173 qatomic_cmpxchg__nocheck(ptr, old, new); \
174 })
175
176 /* Provide shorter names for GCC atomic builtins, return old value */
177 #define qatomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
178 #define qatomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
179
180 #define qatomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
181 #define qatomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
182 #define qatomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
183 #define qatomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
184 #define qatomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
185
186 #define qatomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
187 #define qatomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
188 #define qatomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
189 #define qatomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
190 #define qatomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
191 #define qatomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
192 #define qatomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
193
194 /* And even shorter names that return void. */
195 #define qatomic_inc(ptr) \
196 ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
197 #define qatomic_dec(ptr) \
198 ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
199 #define qatomic_add(ptr, n) \
200 ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
201 #define qatomic_sub(ptr, n) \
202 ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
203 #define qatomic_and(ptr, n) \
204 ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
205 #define qatomic_or(ptr, n) \
206 ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
207 #define qatomic_xor(ptr, n) \
208 ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
209
210 #define smp_wmb() smp_mb_release()
211 #define smp_rmb() smp_mb_acquire()
212
213 /*
214 * SEQ_CST is weaker than the older __sync_* builtins and Linux
215 * kernel read-modify-write atomics. Provide a macro to obtain
216 * the same semantics.
217 */
218 #if !defined(QEMU_SANITIZE_THREAD) && \
219 (defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
220 # define smp_mb__before_rmw() signal_barrier()
221 # define smp_mb__after_rmw() signal_barrier()
222 #else
223 # define smp_mb__before_rmw() smp_mb()
224 # define smp_mb__after_rmw() smp_mb()
225 #endif
226
227 /*
228 * On some architectures, qatomic_set_mb is more efficient than a store
229 * plus a fence.
230 */
231
232 #if !defined(QEMU_SANITIZE_THREAD) && \
233 (defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
234 # define qatomic_set_mb(ptr, i) \
235 ({ (void)qatomic_xchg(ptr, i); smp_mb__after_rmw(); })
236 #else
237 # define qatomic_set_mb(ptr, i) \
238 ({ qatomic_store_release(ptr, i); smp_mb(); })
239 #endif
240
241 #define qatomic_fetch_inc_nonzero(ptr) ({ \
242 typeof_strip_qual(*ptr) _oldn = qatomic_read(ptr); \
243 while (_oldn && qatomic_cmpxchg(ptr, _oldn, _oldn + 1) != _oldn) { \
244 _oldn = qatomic_read(ptr); \
245 } \
246 _oldn; \
247 })
248
249 /*
250 * Abstractions to access atomically (i.e. "once") i64/u64 variables.
251 *
252 * The i386 abi is odd in that by default members are only aligned to
253 * 4 bytes, which means that 8-byte types can wind up mis-aligned.
254 * Clang will then warn about this, and emit a call into libatomic.
255 *
256 * Use of these types in structures when they will be used with atomic
257 * operations can avoid this.
258 */
259 typedef int64_t aligned_int64_t __attribute__((aligned(8)));
260 typedef uint64_t aligned_uint64_t __attribute__((aligned(8)));
261
262 #ifdef CONFIG_ATOMIC64
263 /* Use __nocheck because sizeof(void *) might be < sizeof(u64) */
264 #define qatomic_read_i64(P) \
265 _Generic(*(P), int64_t: qatomic_read__nocheck(P))
266 #define qatomic_read_u64(P) \
267 _Generic(*(P), uint64_t: qatomic_read__nocheck(P))
268 #define qatomic_set_i64(P, V) \
269 _Generic(*(P), int64_t: qatomic_set__nocheck(P, V))
270 #define qatomic_set_u64(P, V) \
271 _Generic(*(P), uint64_t: qatomic_set__nocheck(P, V))
272
qatomic64_init(void)273 static inline void qatomic64_init(void)
274 {
275 }
276 #else /* !CONFIG_ATOMIC64 */
277 int64_t qatomic_read_i64(const int64_t *ptr);
278 uint64_t qatomic_read_u64(const uint64_t *ptr);
279 void qatomic_set_i64(int64_t *ptr, int64_t val);
280 void qatomic_set_u64(uint64_t *ptr, uint64_t val);
281 void qatomic64_init(void);
282 #endif /* !CONFIG_ATOMIC64 */
283
284 #endif /* QEMU_ATOMIC_H */
285