1 /* 2 * Atomic helper templates 3 * Included from tcg-runtime.c and cputlb.c. 4 * 5 * Copyright (c) 2016 Red Hat, Inc 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "trace/mem.h" 22 23 #if DATA_SIZE == 16 24 # define SUFFIX o 25 # define DATA_TYPE Int128 26 # define BSWAP bswap128 27 # define SHIFT 4 28 #elif DATA_SIZE == 8 29 # define SUFFIX q 30 # define DATA_TYPE uint64_t 31 # define SDATA_TYPE int64_t 32 # define BSWAP bswap64 33 # define SHIFT 3 34 #elif DATA_SIZE == 4 35 # define SUFFIX l 36 # define DATA_TYPE uint32_t 37 # define SDATA_TYPE int32_t 38 # define BSWAP bswap32 39 # define SHIFT 2 40 #elif DATA_SIZE == 2 41 # define SUFFIX w 42 # define DATA_TYPE uint16_t 43 # define SDATA_TYPE int16_t 44 # define BSWAP bswap16 45 # define SHIFT 1 46 #elif DATA_SIZE == 1 47 # define SUFFIX b 48 # define DATA_TYPE uint8_t 49 # define SDATA_TYPE int8_t 50 # define BSWAP 51 # define SHIFT 0 52 #else 53 # error unsupported data size 54 #endif 55 56 #if DATA_SIZE >= 4 57 # define ABI_TYPE DATA_TYPE 58 #else 59 # define ABI_TYPE uint32_t 60 #endif 61 62 #define ATOMIC_TRACE_RMW do { \ 63 uint16_t info = glue(trace_mem_build_info_no_se, MEND) \ 64 (SHIFT, false, ATOMIC_MMU_IDX); \ 65 \ 66 trace_guest_mem_before_exec(env_cpu(env), addr, info); \ 67 trace_guest_mem_before_exec(env_cpu(env), addr, \ 68 info | TRACE_MEM_ST); \ 69 } while (0) 70 71 #define ATOMIC_TRACE_LD do { \ 72 uint16_t info = glue(trace_mem_build_info_no_se, MEND) \ 73 (SHIFT, false, ATOMIC_MMU_IDX); \ 74 \ 75 trace_guest_mem_before_exec(env_cpu(env), addr, info); \ 76 } while (0) 77 78 #define ATOMIC_TRACE_ST do { \ 79 uint16_t info = glue(trace_mem_build_info_no_se, MEND) \ 80 (SHIFT, true, ATOMIC_MMU_IDX); \ 81 \ 82 trace_guest_mem_before_exec(env_cpu(env), addr, info); \ 83 } while (0) 84 85 /* Define host-endian atomic operations. Note that END is used within 86 the ATOMIC_NAME macro, and redefined below. */ 87 #if DATA_SIZE == 1 88 # define END 89 # define MEND _be /* either le or be would be fine */ 90 #elif defined(HOST_WORDS_BIGENDIAN) 91 # define END _be 92 # define MEND _be 93 #else 94 # define END _le 95 # define MEND _le 96 #endif 97 98 ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, 99 ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) 100 { 101 ATOMIC_MMU_DECLS; 102 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; 103 DATA_TYPE ret; 104 105 ATOMIC_TRACE_RMW; 106 #if DATA_SIZE == 16 107 ret = atomic16_cmpxchg(haddr, cmpv, newv); 108 #else 109 ret = atomic_cmpxchg__nocheck(haddr, cmpv, newv); 110 #endif 111 ATOMIC_MMU_CLEANUP; 112 return ret; 113 } 114 115 #if DATA_SIZE >= 16 116 #if HAVE_ATOMIC128 117 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) 118 { 119 ATOMIC_MMU_DECLS; 120 DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; 121 122 ATOMIC_TRACE_LD; 123 val = atomic16_read(haddr); 124 ATOMIC_MMU_CLEANUP; 125 return val; 126 } 127 128 void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, 129 ABI_TYPE val EXTRA_ARGS) 130 { 131 ATOMIC_MMU_DECLS; 132 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; 133 134 ATOMIC_TRACE_ST; 135 atomic16_set(haddr, val); 136 ATOMIC_MMU_CLEANUP; 137 } 138 #endif 139 #else 140 ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, 141 ABI_TYPE val EXTRA_ARGS) 142 { 143 ATOMIC_MMU_DECLS; 144 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; 145 DATA_TYPE ret; 146 147 ATOMIC_TRACE_RMW; 148 ret = atomic_xchg__nocheck(haddr, val); 149 ATOMIC_MMU_CLEANUP; 150 return ret; 151 } 152 153 #define GEN_ATOMIC_HELPER(X) \ 154 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ 155 ABI_TYPE val EXTRA_ARGS) \ 156 { \ 157 ATOMIC_MMU_DECLS; \ 158 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ 159 DATA_TYPE ret; \ 160 \ 161 ATOMIC_TRACE_RMW; \ 162 ret = atomic_##X(haddr, val); \ 163 ATOMIC_MMU_CLEANUP; \ 164 return ret; \ 165 } 166 167 GEN_ATOMIC_HELPER(fetch_add) 168 GEN_ATOMIC_HELPER(fetch_and) 169 GEN_ATOMIC_HELPER(fetch_or) 170 GEN_ATOMIC_HELPER(fetch_xor) 171 GEN_ATOMIC_HELPER(add_fetch) 172 GEN_ATOMIC_HELPER(and_fetch) 173 GEN_ATOMIC_HELPER(or_fetch) 174 GEN_ATOMIC_HELPER(xor_fetch) 175 176 #undef GEN_ATOMIC_HELPER 177 178 /* These helpers are, as a whole, full barriers. Within the helper, 179 * the leading barrier is explicit and the trailing barrier is within 180 * cmpxchg primitive. 181 * 182 * Trace this load + RMW loop as a single RMW op. This way, regardless 183 * of CF_PARALLEL's value, we'll trace just a read and a write. 184 */ 185 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ 186 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ 187 ABI_TYPE xval EXTRA_ARGS) \ 188 { \ 189 ATOMIC_MMU_DECLS; \ 190 XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ 191 XDATA_TYPE cmp, old, new, val = xval; \ 192 \ 193 ATOMIC_TRACE_RMW; \ 194 smp_mb(); \ 195 cmp = atomic_read__nocheck(haddr); \ 196 do { \ 197 old = cmp; new = FN(old, val); \ 198 cmp = atomic_cmpxchg__nocheck(haddr, old, new); \ 199 } while (cmp != old); \ 200 ATOMIC_MMU_CLEANUP; \ 201 return RET; \ 202 } 203 204 GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old) 205 GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old) 206 GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old) 207 GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old) 208 209 GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new) 210 GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new) 211 GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new) 212 GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) 213 214 #undef GEN_ATOMIC_HELPER_FN 215 #endif /* DATA SIZE >= 16 */ 216 217 #undef END 218 #undef MEND 219 220 #if DATA_SIZE > 1 221 222 /* Define reverse-host-endian atomic operations. Note that END is used 223 within the ATOMIC_NAME macro. */ 224 #ifdef HOST_WORDS_BIGENDIAN 225 # define END _le 226 # define MEND _le 227 #else 228 # define END _be 229 # define MEND _be 230 #endif 231 232 ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, 233 ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) 234 { 235 ATOMIC_MMU_DECLS; 236 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; 237 DATA_TYPE ret; 238 239 ATOMIC_TRACE_RMW; 240 #if DATA_SIZE == 16 241 ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv)); 242 #else 243 ret = atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv)); 244 #endif 245 ATOMIC_MMU_CLEANUP; 246 return BSWAP(ret); 247 } 248 249 #if DATA_SIZE >= 16 250 #if HAVE_ATOMIC128 251 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) 252 { 253 ATOMIC_MMU_DECLS; 254 DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; 255 256 ATOMIC_TRACE_LD; 257 val = atomic16_read(haddr); 258 ATOMIC_MMU_CLEANUP; 259 return BSWAP(val); 260 } 261 262 void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, 263 ABI_TYPE val EXTRA_ARGS) 264 { 265 ATOMIC_MMU_DECLS; 266 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; 267 268 ATOMIC_TRACE_ST; 269 val = BSWAP(val); 270 atomic16_set(haddr, val); 271 ATOMIC_MMU_CLEANUP; 272 } 273 #endif 274 #else 275 ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, 276 ABI_TYPE val EXTRA_ARGS) 277 { 278 ATOMIC_MMU_DECLS; 279 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; 280 ABI_TYPE ret; 281 282 ATOMIC_TRACE_RMW; 283 ret = atomic_xchg__nocheck(haddr, BSWAP(val)); 284 ATOMIC_MMU_CLEANUP; 285 return BSWAP(ret); 286 } 287 288 #define GEN_ATOMIC_HELPER(X) \ 289 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ 290 ABI_TYPE val EXTRA_ARGS) \ 291 { \ 292 ATOMIC_MMU_DECLS; \ 293 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ 294 DATA_TYPE ret; \ 295 \ 296 ATOMIC_TRACE_RMW; \ 297 ret = atomic_##X(haddr, BSWAP(val)); \ 298 ATOMIC_MMU_CLEANUP; \ 299 return BSWAP(ret); \ 300 } 301 302 GEN_ATOMIC_HELPER(fetch_and) 303 GEN_ATOMIC_HELPER(fetch_or) 304 GEN_ATOMIC_HELPER(fetch_xor) 305 GEN_ATOMIC_HELPER(and_fetch) 306 GEN_ATOMIC_HELPER(or_fetch) 307 GEN_ATOMIC_HELPER(xor_fetch) 308 309 #undef GEN_ATOMIC_HELPER 310 311 /* These helpers are, as a whole, full barriers. Within the helper, 312 * the leading barrier is explicit and the trailing barrier is within 313 * cmpxchg primitive. 314 * 315 * Trace this load + RMW loop as a single RMW op. This way, regardless 316 * of CF_PARALLEL's value, we'll trace just a read and a write. 317 */ 318 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ 319 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ 320 ABI_TYPE xval EXTRA_ARGS) \ 321 { \ 322 ATOMIC_MMU_DECLS; \ 323 XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ 324 XDATA_TYPE ldo, ldn, old, new, val = xval; \ 325 \ 326 ATOMIC_TRACE_RMW; \ 327 smp_mb(); \ 328 ldn = atomic_read__nocheck(haddr); \ 329 do { \ 330 ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \ 331 ldn = atomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \ 332 } while (ldo != ldn); \ 333 ATOMIC_MMU_CLEANUP; \ 334 return RET; \ 335 } 336 337 GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old) 338 GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old) 339 GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old) 340 GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old) 341 342 GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new) 343 GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new) 344 GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new) 345 GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) 346 347 /* Note that for addition, we need to use a separate cmpxchg loop instead 348 of bswaps for the reverse-host-endian helpers. */ 349 #define ADD(X, Y) (X + Y) 350 GEN_ATOMIC_HELPER_FN(fetch_add, ADD, DATA_TYPE, old) 351 GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new) 352 #undef ADD 353 354 #undef GEN_ATOMIC_HELPER_FN 355 #endif /* DATA_SIZE >= 16 */ 356 357 #undef END 358 #undef MEND 359 #endif /* DATA_SIZE > 1 */ 360 361 #undef ATOMIC_TRACE_ST 362 #undef ATOMIC_TRACE_LD 363 #undef ATOMIC_TRACE_RMW 364 365 #undef BSWAP 366 #undef ABI_TYPE 367 #undef DATA_TYPE 368 #undef SDATA_TYPE 369 #undef SUFFIX 370 #undef DATA_SIZE 371 #undef SHIFT 372