1 /* 2 * Atomic helper templates 3 * Included from tcg-runtime.c and cputlb.c. 4 * 5 * Copyright (c) 2016 Red Hat, Inc 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "trace/mem.h" 22 23 #if DATA_SIZE == 16 24 # define SUFFIX o 25 # define DATA_TYPE Int128 26 # define BSWAP bswap128 27 # define SHIFT 4 28 #elif DATA_SIZE == 8 29 # define SUFFIX q 30 # define DATA_TYPE uint64_t 31 # define SDATA_TYPE int64_t 32 # define BSWAP bswap64 33 # define SHIFT 3 34 #elif DATA_SIZE == 4 35 # define SUFFIX l 36 # define DATA_TYPE uint32_t 37 # define SDATA_TYPE int32_t 38 # define BSWAP bswap32 39 # define SHIFT 2 40 #elif DATA_SIZE == 2 41 # define SUFFIX w 42 # define DATA_TYPE uint16_t 43 # define SDATA_TYPE int16_t 44 # define BSWAP bswap16 45 # define SHIFT 1 46 #elif DATA_SIZE == 1 47 # define SUFFIX b 48 # define DATA_TYPE uint8_t 49 # define SDATA_TYPE int8_t 50 # define BSWAP 51 # define SHIFT 0 52 #else 53 # error unsupported data size 54 #endif 55 56 #if DATA_SIZE >= 4 57 # define ABI_TYPE DATA_TYPE 58 #else 59 # define ABI_TYPE uint32_t 60 #endif 61 62 #define ATOMIC_TRACE_RMW do { \ 63 uint8_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false); \ 64 \ 65 trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, info); \ 66 trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, \ 67 info | TRACE_MEM_ST); \ 68 } while (0) 69 70 #define ATOMIC_TRACE_LD do { \ 71 uint8_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false); \ 72 \ 73 trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, info); \ 74 } while (0) 75 76 # define ATOMIC_TRACE_ST do { \ 77 uint8_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, true); \ 78 \ 79 trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, info); \ 80 } while (0) 81 82 /* Define host-endian atomic operations. Note that END is used within 83 the ATOMIC_NAME macro, and redefined below. */ 84 #if DATA_SIZE == 1 85 # define END 86 # define MEND _be /* either le or be would be fine */ 87 #elif defined(HOST_WORDS_BIGENDIAN) 88 # define END _be 89 # define MEND _be 90 #else 91 # define END _le 92 # define MEND _le 93 #endif 94 95 ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, 96 ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) 97 { 98 ATOMIC_MMU_DECLS; 99 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; 100 DATA_TYPE ret; 101 102 ATOMIC_TRACE_RMW; 103 #if DATA_SIZE == 16 104 ret = atomic16_cmpxchg(haddr, cmpv, newv); 105 #else 106 ret = atomic_cmpxchg__nocheck(haddr, cmpv, newv); 107 #endif 108 ATOMIC_MMU_CLEANUP; 109 return ret; 110 } 111 112 #if DATA_SIZE >= 16 113 #if HAVE_ATOMIC128 114 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) 115 { 116 ATOMIC_MMU_DECLS; 117 DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; 118 119 ATOMIC_TRACE_LD; 120 val = atomic16_read(haddr); 121 ATOMIC_MMU_CLEANUP; 122 return val; 123 } 124 125 void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, 126 ABI_TYPE val EXTRA_ARGS) 127 { 128 ATOMIC_MMU_DECLS; 129 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; 130 131 ATOMIC_TRACE_ST; 132 atomic16_set(haddr, val); 133 ATOMIC_MMU_CLEANUP; 134 } 135 #endif 136 #else 137 ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, 138 ABI_TYPE val EXTRA_ARGS) 139 { 140 ATOMIC_MMU_DECLS; 141 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; 142 DATA_TYPE ret; 143 144 ATOMIC_TRACE_RMW; 145 ret = atomic_xchg__nocheck(haddr, val); 146 ATOMIC_MMU_CLEANUP; 147 return ret; 148 } 149 150 #define GEN_ATOMIC_HELPER(X) \ 151 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ 152 ABI_TYPE val EXTRA_ARGS) \ 153 { \ 154 ATOMIC_MMU_DECLS; \ 155 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ 156 DATA_TYPE ret; \ 157 \ 158 ATOMIC_TRACE_RMW; \ 159 ret = atomic_##X(haddr, val); \ 160 ATOMIC_MMU_CLEANUP; \ 161 return ret; \ 162 } 163 164 GEN_ATOMIC_HELPER(fetch_add) 165 GEN_ATOMIC_HELPER(fetch_and) 166 GEN_ATOMIC_HELPER(fetch_or) 167 GEN_ATOMIC_HELPER(fetch_xor) 168 GEN_ATOMIC_HELPER(add_fetch) 169 GEN_ATOMIC_HELPER(and_fetch) 170 GEN_ATOMIC_HELPER(or_fetch) 171 GEN_ATOMIC_HELPER(xor_fetch) 172 173 #undef GEN_ATOMIC_HELPER 174 175 /* These helpers are, as a whole, full barriers. Within the helper, 176 * the leading barrier is explicit and the trailing barrier is within 177 * cmpxchg primitive. 178 * 179 * Trace this load + RMW loop as a single RMW op. This way, regardless 180 * of CF_PARALLEL's value, we'll trace just a read and a write. 181 */ 182 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ 183 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ 184 ABI_TYPE xval EXTRA_ARGS) \ 185 { \ 186 ATOMIC_MMU_DECLS; \ 187 XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ 188 XDATA_TYPE cmp, old, new, val = xval; \ 189 \ 190 ATOMIC_TRACE_RMW; \ 191 smp_mb(); \ 192 cmp = atomic_read__nocheck(haddr); \ 193 do { \ 194 old = cmp; new = FN(old, val); \ 195 cmp = atomic_cmpxchg__nocheck(haddr, old, new); \ 196 } while (cmp != old); \ 197 ATOMIC_MMU_CLEANUP; \ 198 return RET; \ 199 } 200 201 GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old) 202 GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old) 203 GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old) 204 GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old) 205 206 GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new) 207 GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new) 208 GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new) 209 GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) 210 211 #undef GEN_ATOMIC_HELPER_FN 212 #endif /* DATA SIZE >= 16 */ 213 214 #undef END 215 #undef MEND 216 217 #if DATA_SIZE > 1 218 219 /* Define reverse-host-endian atomic operations. Note that END is used 220 within the ATOMIC_NAME macro. */ 221 #ifdef HOST_WORDS_BIGENDIAN 222 # define END _le 223 # define MEND _le 224 #else 225 # define END _be 226 # define MEND _be 227 #endif 228 229 ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, 230 ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) 231 { 232 ATOMIC_MMU_DECLS; 233 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; 234 DATA_TYPE ret; 235 236 ATOMIC_TRACE_RMW; 237 #if DATA_SIZE == 16 238 ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv)); 239 #else 240 ret = atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv)); 241 #endif 242 ATOMIC_MMU_CLEANUP; 243 return BSWAP(ret); 244 } 245 246 #if DATA_SIZE >= 16 247 #if HAVE_ATOMIC128 248 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) 249 { 250 ATOMIC_MMU_DECLS; 251 DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; 252 253 ATOMIC_TRACE_LD; 254 val = atomic16_read(haddr); 255 ATOMIC_MMU_CLEANUP; 256 return BSWAP(val); 257 } 258 259 void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, 260 ABI_TYPE val EXTRA_ARGS) 261 { 262 ATOMIC_MMU_DECLS; 263 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; 264 265 ATOMIC_TRACE_ST; 266 val = BSWAP(val); 267 atomic16_set(haddr, val); 268 ATOMIC_MMU_CLEANUP; 269 } 270 #endif 271 #else 272 ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, 273 ABI_TYPE val EXTRA_ARGS) 274 { 275 ATOMIC_MMU_DECLS; 276 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; 277 ABI_TYPE ret; 278 279 ATOMIC_TRACE_RMW; 280 ret = atomic_xchg__nocheck(haddr, BSWAP(val)); 281 ATOMIC_MMU_CLEANUP; 282 return BSWAP(ret); 283 } 284 285 #define GEN_ATOMIC_HELPER(X) \ 286 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ 287 ABI_TYPE val EXTRA_ARGS) \ 288 { \ 289 ATOMIC_MMU_DECLS; \ 290 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ 291 DATA_TYPE ret; \ 292 \ 293 ATOMIC_TRACE_RMW; \ 294 ret = atomic_##X(haddr, BSWAP(val)); \ 295 ATOMIC_MMU_CLEANUP; \ 296 return BSWAP(ret); \ 297 } 298 299 GEN_ATOMIC_HELPER(fetch_and) 300 GEN_ATOMIC_HELPER(fetch_or) 301 GEN_ATOMIC_HELPER(fetch_xor) 302 GEN_ATOMIC_HELPER(and_fetch) 303 GEN_ATOMIC_HELPER(or_fetch) 304 GEN_ATOMIC_HELPER(xor_fetch) 305 306 #undef GEN_ATOMIC_HELPER 307 308 /* These helpers are, as a whole, full barriers. Within the helper, 309 * the leading barrier is explicit and the trailing barrier is within 310 * cmpxchg primitive. 311 * 312 * Trace this load + RMW loop as a single RMW op. This way, regardless 313 * of CF_PARALLEL's value, we'll trace just a read and a write. 314 */ 315 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ 316 ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ 317 ABI_TYPE xval EXTRA_ARGS) \ 318 { \ 319 ATOMIC_MMU_DECLS; \ 320 XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ 321 XDATA_TYPE ldo, ldn, old, new, val = xval; \ 322 \ 323 ATOMIC_TRACE_RMW; \ 324 smp_mb(); \ 325 ldn = atomic_read__nocheck(haddr); \ 326 do { \ 327 ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \ 328 ldn = atomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \ 329 } while (ldo != ldn); \ 330 ATOMIC_MMU_CLEANUP; \ 331 return RET; \ 332 } 333 334 GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old) 335 GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old) 336 GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old) 337 GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old) 338 339 GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new) 340 GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new) 341 GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new) 342 GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) 343 344 /* Note that for addition, we need to use a separate cmpxchg loop instead 345 of bswaps for the reverse-host-endian helpers. */ 346 #define ADD(X, Y) (X + Y) 347 GEN_ATOMIC_HELPER_FN(fetch_add, ADD, DATA_TYPE, old) 348 GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new) 349 #undef ADD 350 351 #undef GEN_ATOMIC_HELPER_FN 352 #endif /* DATA_SIZE >= 16 */ 353 354 #undef END 355 #undef MEND 356 #endif /* DATA_SIZE > 1 */ 357 358 #undef ATOMIC_TRACE_ST 359 #undef ATOMIC_TRACE_LD 360 #undef ATOMIC_TRACE_RMW 361 362 #undef BSWAP 363 #undef ABI_TYPE 364 #undef DATA_TYPE 365 #undef SDATA_TYPE 366 #undef SUFFIX 367 #undef DATA_SIZE 368 #undef SHIFT 369