xref: /openbmc/qemu/accel/tcg/atomic_template.h (revision 65e728a2)
1 /*
2  * Atomic helper templates
3  * Included from tcg-runtime.c and cputlb.c.
4  *
5  * Copyright (c) 2016 Red Hat, Inc
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/plugin.h"
22 #include "trace/mem.h"
23 
24 #if DATA_SIZE == 16
25 # define SUFFIX     o
26 # define DATA_TYPE  Int128
27 # define BSWAP      bswap128
28 # define SHIFT      4
29 #elif DATA_SIZE == 8
30 # define SUFFIX     q
31 # define DATA_TYPE  aligned_uint64_t
32 # define SDATA_TYPE aligned_int64_t
33 # define BSWAP      bswap64
34 # define SHIFT      3
35 #elif DATA_SIZE == 4
36 # define SUFFIX     l
37 # define DATA_TYPE  uint32_t
38 # define SDATA_TYPE int32_t
39 # define BSWAP      bswap32
40 # define SHIFT      2
41 #elif DATA_SIZE == 2
42 # define SUFFIX     w
43 # define DATA_TYPE  uint16_t
44 # define SDATA_TYPE int16_t
45 # define BSWAP      bswap16
46 # define SHIFT      1
47 #elif DATA_SIZE == 1
48 # define SUFFIX     b
49 # define DATA_TYPE  uint8_t
50 # define SDATA_TYPE int8_t
51 # define BSWAP
52 # define SHIFT      0
53 #else
54 # error unsupported data size
55 #endif
56 
57 #if DATA_SIZE >= 4
58 # define ABI_TYPE  DATA_TYPE
59 #else
60 # define ABI_TYPE  uint32_t
61 #endif
62 
63 /* Define host-endian atomic operations.  Note that END is used within
64    the ATOMIC_NAME macro, and redefined below.  */
65 #if DATA_SIZE == 1
66 # define END
67 #elif defined(HOST_WORDS_BIGENDIAN)
68 # define END  _be
69 #else
70 # define END  _le
71 #endif
72 
73 ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
74                               ABI_TYPE cmpv, ABI_TYPE newv,
75                               TCGMemOpIdx oi, uintptr_t retaddr)
76 {
77     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
78                                          PAGE_READ | PAGE_WRITE, retaddr);
79     DATA_TYPE ret;
80     uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
81 
82 #if DATA_SIZE == 16
83     ret = atomic16_cmpxchg(haddr, cmpv, newv);
84 #else
85     ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
86 #endif
87     ATOMIC_MMU_CLEANUP;
88     atomic_trace_rmw_post(env, addr, info);
89     return ret;
90 }
91 
92 #if DATA_SIZE >= 16
93 #if HAVE_ATOMIC128
94 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
95                          TCGMemOpIdx oi, uintptr_t retaddr)
96 {
97     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
98                                          PAGE_READ, retaddr);
99     DATA_TYPE val;
100     uint16_t info = atomic_trace_ld_pre(env, addr, oi);
101 
102     val = atomic16_read(haddr);
103     ATOMIC_MMU_CLEANUP;
104     atomic_trace_ld_post(env, addr, info);
105     return val;
106 }
107 
108 void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
109                      TCGMemOpIdx oi, uintptr_t retaddr)
110 {
111     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
112                                          PAGE_WRITE, retaddr);
113     uint16_t info = atomic_trace_st_pre(env, addr, oi);
114 
115     atomic16_set(haddr, val);
116     ATOMIC_MMU_CLEANUP;
117     atomic_trace_st_post(env, addr, info);
118 }
119 #endif
120 #else
121 ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
122                            TCGMemOpIdx oi, uintptr_t retaddr)
123 {
124     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
125                                          PAGE_READ | PAGE_WRITE, retaddr);
126     DATA_TYPE ret;
127     uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
128 
129     ret = qatomic_xchg__nocheck(haddr, val);
130     ATOMIC_MMU_CLEANUP;
131     atomic_trace_rmw_post(env, addr, info);
132     return ret;
133 }
134 
135 #define GEN_ATOMIC_HELPER(X)                                        \
136 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
137                         ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \
138 {                                                                   \
139     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,  \
140                                          PAGE_READ | PAGE_WRITE, retaddr); \
141     DATA_TYPE ret;                                                  \
142     uint16_t info = atomic_trace_rmw_pre(env, addr, oi);            \
143     ret = qatomic_##X(haddr, val);                                  \
144     ATOMIC_MMU_CLEANUP;                                             \
145     atomic_trace_rmw_post(env, addr, info);                         \
146     return ret;                                                     \
147 }
148 
149 GEN_ATOMIC_HELPER(fetch_add)
150 GEN_ATOMIC_HELPER(fetch_and)
151 GEN_ATOMIC_HELPER(fetch_or)
152 GEN_ATOMIC_HELPER(fetch_xor)
153 GEN_ATOMIC_HELPER(add_fetch)
154 GEN_ATOMIC_HELPER(and_fetch)
155 GEN_ATOMIC_HELPER(or_fetch)
156 GEN_ATOMIC_HELPER(xor_fetch)
157 
158 #undef GEN_ATOMIC_HELPER
159 
160 /*
161  * These helpers are, as a whole, full barriers.  Within the helper,
162  * the leading barrier is explicit and the trailing barrier is within
163  * cmpxchg primitive.
164  *
165  * Trace this load + RMW loop as a single RMW op. This way, regardless
166  * of CF_PARALLEL's value, we'll trace just a read and a write.
167  */
168 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET)                \
169 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
170                         ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \
171 {                                                                   \
172     XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
173                                           PAGE_READ | PAGE_WRITE, retaddr); \
174     XDATA_TYPE cmp, old, new, val = xval;                           \
175     uint16_t info = atomic_trace_rmw_pre(env, addr, oi);            \
176     smp_mb();                                                       \
177     cmp = qatomic_read__nocheck(haddr);                             \
178     do {                                                            \
179         old = cmp; new = FN(old, val);                              \
180         cmp = qatomic_cmpxchg__nocheck(haddr, old, new);            \
181     } while (cmp != old);                                           \
182     ATOMIC_MMU_CLEANUP;                                             \
183     atomic_trace_rmw_post(env, addr, info);                         \
184     return RET;                                                     \
185 }
186 
187 GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
188 GEN_ATOMIC_HELPER_FN(fetch_umin, MIN,  DATA_TYPE, old)
189 GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
190 GEN_ATOMIC_HELPER_FN(fetch_umax, MAX,  DATA_TYPE, old)
191 
192 GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
193 GEN_ATOMIC_HELPER_FN(umin_fetch, MIN,  DATA_TYPE, new)
194 GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
195 GEN_ATOMIC_HELPER_FN(umax_fetch, MAX,  DATA_TYPE, new)
196 
197 #undef GEN_ATOMIC_HELPER_FN
198 #endif /* DATA SIZE >= 16 */
199 
200 #undef END
201 
202 #if DATA_SIZE > 1
203 
204 /* Define reverse-host-endian atomic operations.  Note that END is used
205    within the ATOMIC_NAME macro.  */
206 #ifdef HOST_WORDS_BIGENDIAN
207 # define END  _le
208 #else
209 # define END  _be
210 #endif
211 
212 ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
213                               ABI_TYPE cmpv, ABI_TYPE newv,
214                               TCGMemOpIdx oi, uintptr_t retaddr)
215 {
216     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
217                                          PAGE_READ | PAGE_WRITE, retaddr);
218     DATA_TYPE ret;
219     uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
220 
221 #if DATA_SIZE == 16
222     ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
223 #else
224     ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
225 #endif
226     ATOMIC_MMU_CLEANUP;
227     atomic_trace_rmw_post(env, addr, info);
228     return BSWAP(ret);
229 }
230 
231 #if DATA_SIZE >= 16
232 #if HAVE_ATOMIC128
233 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
234                          TCGMemOpIdx oi, uintptr_t retaddr)
235 {
236     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
237                                          PAGE_READ, retaddr);
238     DATA_TYPE val;
239     uint16_t info = atomic_trace_ld_pre(env, addr, oi);
240 
241     val = atomic16_read(haddr);
242     ATOMIC_MMU_CLEANUP;
243     atomic_trace_ld_post(env, addr, info);
244     return BSWAP(val);
245 }
246 
247 void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
248                      TCGMemOpIdx oi, uintptr_t retaddr)
249 {
250     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
251                                          PAGE_WRITE, retaddr);
252     uint16_t info = atomic_trace_st_pre(env, addr, oi);
253 
254     val = BSWAP(val);
255     atomic16_set(haddr, val);
256     ATOMIC_MMU_CLEANUP;
257     atomic_trace_st_post(env, addr, info);
258 }
259 #endif
260 #else
261 ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
262                            TCGMemOpIdx oi, uintptr_t retaddr)
263 {
264     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
265                                          PAGE_READ | PAGE_WRITE, retaddr);
266     ABI_TYPE ret;
267     uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
268 
269     ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
270     ATOMIC_MMU_CLEANUP;
271     atomic_trace_rmw_post(env, addr, info);
272     return BSWAP(ret);
273 }
274 
275 #define GEN_ATOMIC_HELPER(X)                                        \
276 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
277                         ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \
278 {                                                                   \
279     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,  \
280                                          PAGE_READ | PAGE_WRITE, retaddr); \
281     DATA_TYPE ret;                                                  \
282     uint16_t info = atomic_trace_rmw_pre(env, addr, oi);            \
283     ret = qatomic_##X(haddr, BSWAP(val));                           \
284     ATOMIC_MMU_CLEANUP;                                             \
285     atomic_trace_rmw_post(env, addr, info);                         \
286     return BSWAP(ret);                                              \
287 }
288 
289 GEN_ATOMIC_HELPER(fetch_and)
290 GEN_ATOMIC_HELPER(fetch_or)
291 GEN_ATOMIC_HELPER(fetch_xor)
292 GEN_ATOMIC_HELPER(and_fetch)
293 GEN_ATOMIC_HELPER(or_fetch)
294 GEN_ATOMIC_HELPER(xor_fetch)
295 
296 #undef GEN_ATOMIC_HELPER
297 
298 /* These helpers are, as a whole, full barriers.  Within the helper,
299  * the leading barrier is explicit and the trailing barrier is within
300  * cmpxchg primitive.
301  *
302  * Trace this load + RMW loop as a single RMW op. This way, regardless
303  * of CF_PARALLEL's value, we'll trace just a read and a write.
304  */
305 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET)                \
306 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
307                         ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \
308 {                                                                   \
309     XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
310                                           PAGE_READ | PAGE_WRITE, retaddr); \
311     XDATA_TYPE ldo, ldn, old, new, val = xval;                      \
312     uint16_t info = atomic_trace_rmw_pre(env, addr, oi);            \
313     smp_mb();                                                       \
314     ldn = qatomic_read__nocheck(haddr);                             \
315     do {                                                            \
316         ldo = ldn; old = BSWAP(ldo); new = FN(old, val);            \
317         ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new));     \
318     } while (ldo != ldn);                                           \
319     ATOMIC_MMU_CLEANUP;                                             \
320     atomic_trace_rmw_post(env, addr, info);                         \
321     return RET;                                                     \
322 }
323 
324 GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
325 GEN_ATOMIC_HELPER_FN(fetch_umin, MIN,  DATA_TYPE, old)
326 GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
327 GEN_ATOMIC_HELPER_FN(fetch_umax, MAX,  DATA_TYPE, old)
328 
329 GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
330 GEN_ATOMIC_HELPER_FN(umin_fetch, MIN,  DATA_TYPE, new)
331 GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
332 GEN_ATOMIC_HELPER_FN(umax_fetch, MAX,  DATA_TYPE, new)
333 
334 /* Note that for addition, we need to use a separate cmpxchg loop instead
335    of bswaps for the reverse-host-endian helpers.  */
336 #define ADD(X, Y)   (X + Y)
337 GEN_ATOMIC_HELPER_FN(fetch_add, ADD, DATA_TYPE, old)
338 GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
339 #undef ADD
340 
341 #undef GEN_ATOMIC_HELPER_FN
342 #endif /* DATA_SIZE >= 16 */
343 
344 #undef END
345 #endif /* DATA_SIZE > 1 */
346 
347 #undef BSWAP
348 #undef ABI_TYPE
349 #undef DATA_TYPE
350 #undef SDATA_TYPE
351 #undef SUFFIX
352 #undef DATA_SIZE
353 #undef SHIFT
354