xref: /openbmc/qemu/accel/tcg/atomic_template.h (revision d201cf7a)
1 /*
2  * Atomic helper templates
3  * Included from tcg-runtime.c and cputlb.c.
4  *
5  * Copyright (c) 2016 Red Hat, Inc
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/plugin.h"
22 
23 #if DATA_SIZE == 16
24 # define SUFFIX     o
25 # define DATA_TYPE  Int128
26 # define BSWAP      bswap128
27 # define SHIFT      4
28 #elif DATA_SIZE == 8
29 # define SUFFIX     q
30 # define DATA_TYPE  aligned_uint64_t
31 # define SDATA_TYPE aligned_int64_t
32 # define BSWAP      bswap64
33 # define SHIFT      3
34 #elif DATA_SIZE == 4
35 # define SUFFIX     l
36 # define DATA_TYPE  uint32_t
37 # define SDATA_TYPE int32_t
38 # define BSWAP      bswap32
39 # define SHIFT      2
40 #elif DATA_SIZE == 2
41 # define SUFFIX     w
42 # define DATA_TYPE  uint16_t
43 # define SDATA_TYPE int16_t
44 # define BSWAP      bswap16
45 # define SHIFT      1
46 #elif DATA_SIZE == 1
47 # define SUFFIX     b
48 # define DATA_TYPE  uint8_t
49 # define SDATA_TYPE int8_t
50 # define BSWAP
51 # define SHIFT      0
52 #else
53 # error unsupported data size
54 #endif
55 
56 #if DATA_SIZE >= 4
57 # define ABI_TYPE  DATA_TYPE
58 #else
59 # define ABI_TYPE  uint32_t
60 #endif
61 
62 /* Define host-endian atomic operations.  Note that END is used within
63    the ATOMIC_NAME macro, and redefined below.  */
64 #if DATA_SIZE == 1
65 # define END
66 #elif defined(HOST_WORDS_BIGENDIAN)
67 # define END  _be
68 #else
69 # define END  _le
70 #endif
71 
72 ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
73                               ABI_TYPE cmpv, ABI_TYPE newv,
74                               MemOpIdx oi, uintptr_t retaddr)
75 {
76     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
77                                          PAGE_READ | PAGE_WRITE, retaddr);
78     DATA_TYPE ret;
79 
80 #if DATA_SIZE == 16
81     ret = atomic16_cmpxchg(haddr, cmpv, newv);
82 #else
83     ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
84 #endif
85     ATOMIC_MMU_CLEANUP;
86     atomic_trace_rmw_post(env, addr, oi);
87     return ret;
88 }
89 
90 #if DATA_SIZE >= 16
91 #if HAVE_ATOMIC128
92 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
93                          MemOpIdx oi, uintptr_t retaddr)
94 {
95     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
96                                          PAGE_READ, retaddr);
97     DATA_TYPE val;
98 
99     val = atomic16_read(haddr);
100     ATOMIC_MMU_CLEANUP;
101     atomic_trace_ld_post(env, addr, oi);
102     return val;
103 }
104 
105 void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
106                      MemOpIdx oi, uintptr_t retaddr)
107 {
108     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
109                                          PAGE_WRITE, retaddr);
110 
111     atomic16_set(haddr, val);
112     ATOMIC_MMU_CLEANUP;
113     atomic_trace_st_post(env, addr, oi);
114 }
115 #endif
116 #else
117 ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
118                            MemOpIdx oi, uintptr_t retaddr)
119 {
120     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
121                                          PAGE_READ | PAGE_WRITE, retaddr);
122     DATA_TYPE ret;
123 
124     ret = qatomic_xchg__nocheck(haddr, val);
125     ATOMIC_MMU_CLEANUP;
126     atomic_trace_rmw_post(env, addr, oi);
127     return ret;
128 }
129 
130 #define GEN_ATOMIC_HELPER(X)                                        \
131 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
132                         ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
133 {                                                                   \
134     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,  \
135                                          PAGE_READ | PAGE_WRITE, retaddr); \
136     DATA_TYPE ret;                                                  \
137     ret = qatomic_##X(haddr, val);                                  \
138     ATOMIC_MMU_CLEANUP;                                             \
139     atomic_trace_rmw_post(env, addr, oi);                           \
140     return ret;                                                     \
141 }
142 
143 GEN_ATOMIC_HELPER(fetch_add)
144 GEN_ATOMIC_HELPER(fetch_and)
145 GEN_ATOMIC_HELPER(fetch_or)
146 GEN_ATOMIC_HELPER(fetch_xor)
147 GEN_ATOMIC_HELPER(add_fetch)
148 GEN_ATOMIC_HELPER(and_fetch)
149 GEN_ATOMIC_HELPER(or_fetch)
150 GEN_ATOMIC_HELPER(xor_fetch)
151 
152 #undef GEN_ATOMIC_HELPER
153 
154 /*
155  * These helpers are, as a whole, full barriers.  Within the helper,
156  * the leading barrier is explicit and the trailing barrier is within
157  * cmpxchg primitive.
158  *
159  * Trace this load + RMW loop as a single RMW op. This way, regardless
160  * of CF_PARALLEL's value, we'll trace just a read and a write.
161  */
162 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET)                \
163 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
164                         ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
165 {                                                                   \
166     XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
167                                           PAGE_READ | PAGE_WRITE, retaddr); \
168     XDATA_TYPE cmp, old, new, val = xval;                           \
169     smp_mb();                                                       \
170     cmp = qatomic_read__nocheck(haddr);                             \
171     do {                                                            \
172         old = cmp; new = FN(old, val);                              \
173         cmp = qatomic_cmpxchg__nocheck(haddr, old, new);            \
174     } while (cmp != old);                                           \
175     ATOMIC_MMU_CLEANUP;                                             \
176     atomic_trace_rmw_post(env, addr, oi);                           \
177     return RET;                                                     \
178 }
179 
180 GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
181 GEN_ATOMIC_HELPER_FN(fetch_umin, MIN,  DATA_TYPE, old)
182 GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
183 GEN_ATOMIC_HELPER_FN(fetch_umax, MAX,  DATA_TYPE, old)
184 
185 GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
186 GEN_ATOMIC_HELPER_FN(umin_fetch, MIN,  DATA_TYPE, new)
187 GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
188 GEN_ATOMIC_HELPER_FN(umax_fetch, MAX,  DATA_TYPE, new)
189 
190 #undef GEN_ATOMIC_HELPER_FN
191 #endif /* DATA SIZE >= 16 */
192 
193 #undef END
194 
195 #if DATA_SIZE > 1
196 
197 /* Define reverse-host-endian atomic operations.  Note that END is used
198    within the ATOMIC_NAME macro.  */
199 #ifdef HOST_WORDS_BIGENDIAN
200 # define END  _le
201 #else
202 # define END  _be
203 #endif
204 
205 ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
206                               ABI_TYPE cmpv, ABI_TYPE newv,
207                               MemOpIdx oi, uintptr_t retaddr)
208 {
209     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
210                                          PAGE_READ | PAGE_WRITE, retaddr);
211     DATA_TYPE ret;
212 
213 #if DATA_SIZE == 16
214     ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
215 #else
216     ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
217 #endif
218     ATOMIC_MMU_CLEANUP;
219     atomic_trace_rmw_post(env, addr, oi);
220     return BSWAP(ret);
221 }
222 
223 #if DATA_SIZE >= 16
224 #if HAVE_ATOMIC128
225 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
226                          MemOpIdx oi, uintptr_t retaddr)
227 {
228     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
229                                          PAGE_READ, retaddr);
230     DATA_TYPE val;
231 
232     val = atomic16_read(haddr);
233     ATOMIC_MMU_CLEANUP;
234     atomic_trace_ld_post(env, addr, oi);
235     return BSWAP(val);
236 }
237 
238 void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
239                      MemOpIdx oi, uintptr_t retaddr)
240 {
241     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
242                                          PAGE_WRITE, retaddr);
243 
244     val = BSWAP(val);
245     atomic16_set(haddr, val);
246     ATOMIC_MMU_CLEANUP;
247     atomic_trace_st_post(env, addr, oi);
248 }
249 #endif
250 #else
251 ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
252                            MemOpIdx oi, uintptr_t retaddr)
253 {
254     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
255                                          PAGE_READ | PAGE_WRITE, retaddr);
256     ABI_TYPE ret;
257 
258     ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
259     ATOMIC_MMU_CLEANUP;
260     atomic_trace_rmw_post(env, addr, oi);
261     return BSWAP(ret);
262 }
263 
264 #define GEN_ATOMIC_HELPER(X)                                        \
265 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
266                         ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
267 {                                                                   \
268     DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,  \
269                                          PAGE_READ | PAGE_WRITE, retaddr); \
270     DATA_TYPE ret;                                                  \
271     ret = qatomic_##X(haddr, BSWAP(val));                           \
272     ATOMIC_MMU_CLEANUP;                                             \
273     atomic_trace_rmw_post(env, addr, oi);                           \
274     return BSWAP(ret);                                              \
275 }
276 
277 GEN_ATOMIC_HELPER(fetch_and)
278 GEN_ATOMIC_HELPER(fetch_or)
279 GEN_ATOMIC_HELPER(fetch_xor)
280 GEN_ATOMIC_HELPER(and_fetch)
281 GEN_ATOMIC_HELPER(or_fetch)
282 GEN_ATOMIC_HELPER(xor_fetch)
283 
284 #undef GEN_ATOMIC_HELPER
285 
286 /* These helpers are, as a whole, full barriers.  Within the helper,
287  * the leading barrier is explicit and the trailing barrier is within
288  * cmpxchg primitive.
289  *
290  * Trace this load + RMW loop as a single RMW op. This way, regardless
291  * of CF_PARALLEL's value, we'll trace just a read and a write.
292  */
293 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET)                \
294 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
295                         ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
296 {                                                                   \
297     XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
298                                           PAGE_READ | PAGE_WRITE, retaddr); \
299     XDATA_TYPE ldo, ldn, old, new, val = xval;                      \
300     smp_mb();                                                       \
301     ldn = qatomic_read__nocheck(haddr);                             \
302     do {                                                            \
303         ldo = ldn; old = BSWAP(ldo); new = FN(old, val);            \
304         ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new));     \
305     } while (ldo != ldn);                                           \
306     ATOMIC_MMU_CLEANUP;                                             \
307     atomic_trace_rmw_post(env, addr, oi);                           \
308     return RET;                                                     \
309 }
310 
311 GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
312 GEN_ATOMIC_HELPER_FN(fetch_umin, MIN,  DATA_TYPE, old)
313 GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
314 GEN_ATOMIC_HELPER_FN(fetch_umax, MAX,  DATA_TYPE, old)
315 
316 GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
317 GEN_ATOMIC_HELPER_FN(umin_fetch, MIN,  DATA_TYPE, new)
318 GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
319 GEN_ATOMIC_HELPER_FN(umax_fetch, MAX,  DATA_TYPE, new)
320 
321 /* Note that for addition, we need to use a separate cmpxchg loop instead
322    of bswaps for the reverse-host-endian helpers.  */
323 #define ADD(X, Y)   (X + Y)
324 GEN_ATOMIC_HELPER_FN(fetch_add, ADD, DATA_TYPE, old)
325 GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
326 #undef ADD
327 
328 #undef GEN_ATOMIC_HELPER_FN
329 #endif /* DATA_SIZE >= 16 */
330 
331 #undef END
332 #endif /* DATA_SIZE > 1 */
333 
334 #undef BSWAP
335 #undef ABI_TYPE
336 #undef DATA_TYPE
337 #undef SDATA_TYPE
338 #undef SUFFIX
339 #undef DATA_SIZE
340 #undef SHIFT
341