1 /* 2 * User emulator execution 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "hw/core/tcg-cpu-ops.h" 21 #include "disas/disas.h" 22 #include "exec/exec-all.h" 23 #include "tcg/tcg.h" 24 #include "qemu/bitops.h" 25 #include "exec/cpu_ldst.h" 26 #include "exec/translate-all.h" 27 #include "exec/helper-proto.h" 28 #include "qemu/atomic128.h" 29 #include "trace/trace-root.h" 30 #include "internal.h" 31 32 #undef EAX 33 #undef ECX 34 #undef EDX 35 #undef EBX 36 #undef ESP 37 #undef EBP 38 #undef ESI 39 #undef EDI 40 #undef EIP 41 #ifdef __linux__ 42 #include <sys/ucontext.h> 43 #endif 44 45 __thread uintptr_t helper_retaddr; 46 47 //#define DEBUG_SIGNAL 48 49 /* exit the current TB from a signal handler. The host registers are 50 restored in a state compatible with the CPU emulator 51 */ 52 static void QEMU_NORETURN cpu_exit_tb_from_sighandler(CPUState *cpu, 53 sigset_t *old_set) 54 { 55 /* XXX: use siglongjmp ? */ 56 sigprocmask(SIG_SETMASK, old_set, NULL); 57 cpu_loop_exit_noexc(cpu); 58 } 59 60 /* 'pc' is the host PC at which the exception was raised. 'address' is 61 the effective address of the memory exception. 'is_write' is 1 if a 62 write caused the exception and otherwise 0'. 'old_set' is the 63 signal set which should be restored */ 64 static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, 65 int is_write, sigset_t *old_set) 66 { 67 CPUState *cpu = current_cpu; 68 CPUClass *cc; 69 unsigned long address = (unsigned long)info->si_addr; 70 MMUAccessType access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; 71 72 switch (helper_retaddr) { 73 default: 74 /* 75 * Fault during host memory operation within a helper function. 76 * The helper's host return address, saved here, gives us a 77 * pointer into the generated code that will unwind to the 78 * correct guest pc. 79 */ 80 pc = helper_retaddr; 81 break; 82 83 case 0: 84 /* 85 * Fault during host memory operation within generated code. 86 * (Or, a unrelated bug within qemu, but we can't tell from here). 87 * 88 * We take the host pc from the signal frame. However, we cannot 89 * use that value directly. Within cpu_restore_state_from_tb, we 90 * assume PC comes from GETPC(), as used by the helper functions, 91 * so we adjust the address by -GETPC_ADJ to form an address that 92 * is within the call insn, so that the address does not accidentally 93 * match the beginning of the next guest insn. However, when the 94 * pc comes from the signal frame it points to the actual faulting 95 * host memory insn and not the return from a call insn. 96 * 97 * Therefore, adjust to compensate for what will be done later 98 * by cpu_restore_state_from_tb. 99 */ 100 pc += GETPC_ADJ; 101 break; 102 103 case 1: 104 /* 105 * Fault during host read for translation, or loosely, "execution". 106 * 107 * The guest pc is already pointing to the start of the TB for which 108 * code is being generated. If the guest translator manages the 109 * page crossings correctly, this is exactly the correct address 110 * (and if the translator doesn't handle page boundaries correctly 111 * there's little we can do about that here). Therefore, do not 112 * trigger the unwinder. 113 * 114 * Like tb_gen_code, release the memory lock before cpu_loop_exit. 115 */ 116 pc = 0; 117 access_type = MMU_INST_FETCH; 118 mmap_unlock(); 119 break; 120 } 121 122 /* For synchronous signals we expect to be coming from the vCPU 123 * thread (so current_cpu should be valid) and either from running 124 * code or during translation which can fault as we cross pages. 125 * 126 * If neither is true then something has gone wrong and we should 127 * abort rather than try and restart the vCPU execution. 128 */ 129 if (!cpu || !cpu->running) { 130 printf("qemu:%s received signal outside vCPU context @ pc=0x%" 131 PRIxPTR "\n", __func__, pc); 132 abort(); 133 } 134 135 #if defined(DEBUG_SIGNAL) 136 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 137 pc, address, is_write, *(unsigned long *)old_set); 138 #endif 139 /* XXX: locking issue */ 140 /* Note that it is important that we don't call page_unprotect() unless 141 * this is really a "write to nonwriteable page" fault, because 142 * page_unprotect() assumes that if it is called for an access to 143 * a page that's writeable this means we had two threads racing and 144 * another thread got there first and already made the page writeable; 145 * so we will retry the access. If we were to call page_unprotect() 146 * for some other kind of fault that should really be passed to the 147 * guest, we'd end up in an infinite loop of retrying the faulting 148 * access. 149 */ 150 if (is_write && info->si_signo == SIGSEGV && info->si_code == SEGV_ACCERR && 151 h2g_valid(address)) { 152 switch (page_unprotect(h2g(address), pc)) { 153 case 0: 154 /* Fault not caused by a page marked unwritable to protect 155 * cached translations, must be the guest binary's problem. 156 */ 157 break; 158 case 1: 159 /* Fault caused by protection of cached translation; TBs 160 * invalidated, so resume execution. Retain helper_retaddr 161 * for a possible second fault. 162 */ 163 return 1; 164 case 2: 165 /* Fault caused by protection of cached translation, and the 166 * currently executing TB was modified and must be exited 167 * immediately. Clear helper_retaddr for next execution. 168 */ 169 clear_helper_retaddr(); 170 cpu_exit_tb_from_sighandler(cpu, old_set); 171 /* NORETURN */ 172 173 default: 174 g_assert_not_reached(); 175 } 176 } 177 178 /* Convert forcefully to guest address space, invalid addresses 179 are still valid segv ones */ 180 address = h2g_nocheck(address); 181 182 /* 183 * There is no way the target can handle this other than raising 184 * an exception. Undo signal and retaddr state prior to longjmp. 185 */ 186 sigprocmask(SIG_SETMASK, old_set, NULL); 187 clear_helper_retaddr(); 188 189 cc = CPU_GET_CLASS(cpu); 190 cc->tcg_ops->tlb_fill(cpu, address, 0, access_type, 191 MMU_USER_IDX, false, pc); 192 g_assert_not_reached(); 193 } 194 195 static int probe_access_internal(CPUArchState *env, target_ulong addr, 196 int fault_size, MMUAccessType access_type, 197 bool nonfault, uintptr_t ra) 198 { 199 int flags; 200 201 switch (access_type) { 202 case MMU_DATA_STORE: 203 flags = PAGE_WRITE; 204 break; 205 case MMU_DATA_LOAD: 206 flags = PAGE_READ; 207 break; 208 case MMU_INST_FETCH: 209 flags = PAGE_EXEC; 210 break; 211 default: 212 g_assert_not_reached(); 213 } 214 215 if (!guest_addr_valid_untagged(addr) || 216 page_check_range(addr, 1, flags) < 0) { 217 if (nonfault) { 218 return TLB_INVALID_MASK; 219 } else { 220 CPUState *cpu = env_cpu(env); 221 CPUClass *cc = CPU_GET_CLASS(cpu); 222 cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type, 223 MMU_USER_IDX, false, ra); 224 g_assert_not_reached(); 225 } 226 } 227 return 0; 228 } 229 230 int probe_access_flags(CPUArchState *env, target_ulong addr, 231 MMUAccessType access_type, int mmu_idx, 232 bool nonfault, void **phost, uintptr_t ra) 233 { 234 int flags; 235 236 flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra); 237 *phost = flags ? NULL : g2h(env_cpu(env), addr); 238 return flags; 239 } 240 241 void *probe_access(CPUArchState *env, target_ulong addr, int size, 242 MMUAccessType access_type, int mmu_idx, uintptr_t ra) 243 { 244 int flags; 245 246 g_assert(-(addr | TARGET_PAGE_MASK) >= size); 247 flags = probe_access_internal(env, addr, size, access_type, false, ra); 248 g_assert(flags == 0); 249 250 return size ? g2h(env_cpu(env), addr) : NULL; 251 } 252 253 #if defined(__i386__) 254 255 #if defined(__NetBSD__) 256 #include <ucontext.h> 257 #include <machine/trap.h> 258 259 #define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP]) 260 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO]) 261 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR]) 262 #define MASK_sig(context) ((context)->uc_sigmask) 263 #define PAGE_FAULT_TRAP T_PAGEFLT 264 #elif defined(__FreeBSD__) || defined(__DragonFly__) 265 #include <ucontext.h> 266 #include <machine/trap.h> 267 268 #define EIP_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_eip)) 269 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno) 270 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err) 271 #define MASK_sig(context) ((context)->uc_sigmask) 272 #define PAGE_FAULT_TRAP T_PAGEFLT 273 #elif defined(__OpenBSD__) 274 #include <machine/trap.h> 275 #define EIP_sig(context) ((context)->sc_eip) 276 #define TRAP_sig(context) ((context)->sc_trapno) 277 #define ERROR_sig(context) ((context)->sc_err) 278 #define MASK_sig(context) ((context)->sc_mask) 279 #define PAGE_FAULT_TRAP T_PAGEFLT 280 #else 281 #define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP]) 282 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO]) 283 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR]) 284 #define MASK_sig(context) ((context)->uc_sigmask) 285 #define PAGE_FAULT_TRAP 0xe 286 #endif 287 288 int cpu_signal_handler(int host_signum, void *pinfo, 289 void *puc) 290 { 291 siginfo_t *info = pinfo; 292 #if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__) 293 ucontext_t *uc = puc; 294 #elif defined(__OpenBSD__) 295 struct sigcontext *uc = puc; 296 #else 297 ucontext_t *uc = puc; 298 #endif 299 unsigned long pc; 300 int trapno; 301 302 #ifndef REG_EIP 303 /* for glibc 2.1 */ 304 #define REG_EIP EIP 305 #define REG_ERR ERR 306 #define REG_TRAPNO TRAPNO 307 #endif 308 pc = EIP_sig(uc); 309 trapno = TRAP_sig(uc); 310 return handle_cpu_signal(pc, info, 311 trapno == PAGE_FAULT_TRAP ? 312 (ERROR_sig(uc) >> 1) & 1 : 0, 313 &MASK_sig(uc)); 314 } 315 316 #elif defined(__x86_64__) 317 318 #ifdef __NetBSD__ 319 #include <machine/trap.h> 320 #define PC_sig(context) _UC_MACHINE_PC(context) 321 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO]) 322 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR]) 323 #define MASK_sig(context) ((context)->uc_sigmask) 324 #define PAGE_FAULT_TRAP T_PAGEFLT 325 #elif defined(__OpenBSD__) 326 #include <machine/trap.h> 327 #define PC_sig(context) ((context)->sc_rip) 328 #define TRAP_sig(context) ((context)->sc_trapno) 329 #define ERROR_sig(context) ((context)->sc_err) 330 #define MASK_sig(context) ((context)->sc_mask) 331 #define PAGE_FAULT_TRAP T_PAGEFLT 332 #elif defined(__FreeBSD__) || defined(__DragonFly__) 333 #include <ucontext.h> 334 #include <machine/trap.h> 335 336 #define PC_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_rip)) 337 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno) 338 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err) 339 #define MASK_sig(context) ((context)->uc_sigmask) 340 #define PAGE_FAULT_TRAP T_PAGEFLT 341 #else 342 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP]) 343 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO]) 344 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR]) 345 #define MASK_sig(context) ((context)->uc_sigmask) 346 #define PAGE_FAULT_TRAP 0xe 347 #endif 348 349 int cpu_signal_handler(int host_signum, void *pinfo, 350 void *puc) 351 { 352 siginfo_t *info = pinfo; 353 unsigned long pc; 354 #if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__) 355 ucontext_t *uc = puc; 356 #elif defined(__OpenBSD__) 357 struct sigcontext *uc = puc; 358 #else 359 ucontext_t *uc = puc; 360 #endif 361 362 pc = PC_sig(uc); 363 return handle_cpu_signal(pc, info, 364 TRAP_sig(uc) == PAGE_FAULT_TRAP ? 365 (ERROR_sig(uc) >> 1) & 1 : 0, 366 &MASK_sig(uc)); 367 } 368 369 #elif defined(_ARCH_PPC) 370 371 /*********************************************************************** 372 * signal context platform-specific definitions 373 * From Wine 374 */ 375 #ifdef linux 376 /* All Registers access - only for local access */ 377 #define REG_sig(reg_name, context) \ 378 ((context)->uc_mcontext.regs->reg_name) 379 /* Gpr Registers access */ 380 #define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context) 381 /* Program counter */ 382 #define IAR_sig(context) REG_sig(nip, context) 383 /* Machine State Register (Supervisor) */ 384 #define MSR_sig(context) REG_sig(msr, context) 385 /* Count register */ 386 #define CTR_sig(context) REG_sig(ctr, context) 387 /* User's integer exception register */ 388 #define XER_sig(context) REG_sig(xer, context) 389 /* Link register */ 390 #define LR_sig(context) REG_sig(link, context) 391 /* Condition register */ 392 #define CR_sig(context) REG_sig(ccr, context) 393 394 /* Float Registers access */ 395 #define FLOAT_sig(reg_num, context) \ 396 (((double *)((char *)((context)->uc_mcontext.regs + 48 * 4)))[reg_num]) 397 #define FPSCR_sig(context) \ 398 (*(int *)((char *)((context)->uc_mcontext.regs + (48 + 32 * 2) * 4))) 399 /* Exception Registers access */ 400 #define DAR_sig(context) REG_sig(dar, context) 401 #define DSISR_sig(context) REG_sig(dsisr, context) 402 #define TRAP_sig(context) REG_sig(trap, context) 403 #endif /* linux */ 404 405 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 406 #include <ucontext.h> 407 #define IAR_sig(context) ((context)->uc_mcontext.mc_srr0) 408 #define MSR_sig(context) ((context)->uc_mcontext.mc_srr1) 409 #define CTR_sig(context) ((context)->uc_mcontext.mc_ctr) 410 #define XER_sig(context) ((context)->uc_mcontext.mc_xer) 411 #define LR_sig(context) ((context)->uc_mcontext.mc_lr) 412 #define CR_sig(context) ((context)->uc_mcontext.mc_cr) 413 /* Exception Registers access */ 414 #define DAR_sig(context) ((context)->uc_mcontext.mc_dar) 415 #define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr) 416 #define TRAP_sig(context) ((context)->uc_mcontext.mc_exc) 417 #endif /* __FreeBSD__|| __FreeBSD_kernel__ */ 418 419 int cpu_signal_handler(int host_signum, void *pinfo, 420 void *puc) 421 { 422 siginfo_t *info = pinfo; 423 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 424 ucontext_t *uc = puc; 425 #else 426 ucontext_t *uc = puc; 427 #endif 428 unsigned long pc; 429 int is_write; 430 431 pc = IAR_sig(uc); 432 is_write = 0; 433 #if 0 434 /* ppc 4xx case */ 435 if (DSISR_sig(uc) & 0x00800000) { 436 is_write = 1; 437 } 438 #else 439 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000)) { 440 is_write = 1; 441 } 442 #endif 443 return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); 444 } 445 446 #elif defined(__alpha__) 447 448 int cpu_signal_handler(int host_signum, void *pinfo, 449 void *puc) 450 { 451 siginfo_t *info = pinfo; 452 ucontext_t *uc = puc; 453 uint32_t *pc = uc->uc_mcontext.sc_pc; 454 uint32_t insn = *pc; 455 int is_write = 0; 456 457 /* XXX: need kernel patch to get write flag faster */ 458 switch (insn >> 26) { 459 case 0x0d: /* stw */ 460 case 0x0e: /* stb */ 461 case 0x0f: /* stq_u */ 462 case 0x24: /* stf */ 463 case 0x25: /* stg */ 464 case 0x26: /* sts */ 465 case 0x27: /* stt */ 466 case 0x2c: /* stl */ 467 case 0x2d: /* stq */ 468 case 0x2e: /* stl_c */ 469 case 0x2f: /* stq_c */ 470 is_write = 1; 471 } 472 473 return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); 474 } 475 #elif defined(__sparc__) 476 477 int cpu_signal_handler(int host_signum, void *pinfo, 478 void *puc) 479 { 480 siginfo_t *info = pinfo; 481 int is_write; 482 uint32_t insn; 483 #if !defined(__arch64__) || defined(CONFIG_SOLARIS) 484 uint32_t *regs = (uint32_t *)(info + 1); 485 void *sigmask = (regs + 20); 486 /* XXX: is there a standard glibc define ? */ 487 unsigned long pc = regs[1]; 488 #else 489 #ifdef __linux__ 490 struct sigcontext *sc = puc; 491 unsigned long pc = sc->sigc_regs.tpc; 492 void *sigmask = (void *)sc->sigc_mask; 493 #elif defined(__OpenBSD__) 494 struct sigcontext *uc = puc; 495 unsigned long pc = uc->sc_pc; 496 void *sigmask = (void *)(long)uc->sc_mask; 497 #elif defined(__NetBSD__) 498 ucontext_t *uc = puc; 499 unsigned long pc = _UC_MACHINE_PC(uc); 500 void *sigmask = (void *)&uc->uc_sigmask; 501 #endif 502 #endif 503 504 /* XXX: need kernel patch to get write flag faster */ 505 is_write = 0; 506 insn = *(uint32_t *)pc; 507 if ((insn >> 30) == 3) { 508 switch ((insn >> 19) & 0x3f) { 509 case 0x05: /* stb */ 510 case 0x15: /* stba */ 511 case 0x06: /* sth */ 512 case 0x16: /* stha */ 513 case 0x04: /* st */ 514 case 0x14: /* sta */ 515 case 0x07: /* std */ 516 case 0x17: /* stda */ 517 case 0x0e: /* stx */ 518 case 0x1e: /* stxa */ 519 case 0x24: /* stf */ 520 case 0x34: /* stfa */ 521 case 0x27: /* stdf */ 522 case 0x37: /* stdfa */ 523 case 0x26: /* stqf */ 524 case 0x36: /* stqfa */ 525 case 0x25: /* stfsr */ 526 case 0x3c: /* casa */ 527 case 0x3e: /* casxa */ 528 is_write = 1; 529 break; 530 } 531 } 532 return handle_cpu_signal(pc, info, is_write, sigmask); 533 } 534 535 #elif defined(__arm__) 536 537 #if defined(__NetBSD__) 538 #include <ucontext.h> 539 #include <sys/siginfo.h> 540 #endif 541 542 int cpu_signal_handler(int host_signum, void *pinfo, 543 void *puc) 544 { 545 siginfo_t *info = pinfo; 546 #if defined(__NetBSD__) 547 ucontext_t *uc = puc; 548 siginfo_t *si = pinfo; 549 #else 550 ucontext_t *uc = puc; 551 #endif 552 unsigned long pc; 553 uint32_t fsr; 554 int is_write; 555 556 #if defined(__NetBSD__) 557 pc = uc->uc_mcontext.__gregs[_REG_R15]; 558 #elif defined(__GLIBC__) && (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3)) 559 pc = uc->uc_mcontext.gregs[R15]; 560 #else 561 pc = uc->uc_mcontext.arm_pc; 562 #endif 563 564 #ifdef __NetBSD__ 565 fsr = si->si_trap; 566 #else 567 fsr = uc->uc_mcontext.error_code; 568 #endif 569 /* 570 * In the FSR, bit 11 is WnR, assuming a v6 or 571 * later processor. On v5 we will always report 572 * this as a read, which will fail later. 573 */ 574 is_write = extract32(fsr, 11, 1); 575 return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); 576 } 577 578 #elif defined(__aarch64__) 579 580 #if defined(__NetBSD__) 581 582 #include <ucontext.h> 583 #include <sys/siginfo.h> 584 585 int cpu_signal_handler(int host_signum, void *pinfo, void *puc) 586 { 587 ucontext_t *uc = puc; 588 siginfo_t *si = pinfo; 589 unsigned long pc; 590 int is_write; 591 uint32_t esr; 592 593 pc = uc->uc_mcontext.__gregs[_REG_PC]; 594 esr = si->si_trap; 595 596 /* 597 * siginfo_t::si_trap is the ESR value, for data aborts ESR.EC 598 * is 0b10010x: then bit 6 is the WnR bit 599 */ 600 is_write = extract32(esr, 27, 5) == 0x12 && extract32(esr, 6, 1) == 1; 601 return handle_cpu_signal(pc, si, is_write, &uc->uc_sigmask); 602 } 603 604 #else 605 606 #ifndef ESR_MAGIC 607 /* Pre-3.16 kernel headers don't have these, so provide fallback definitions */ 608 #define ESR_MAGIC 0x45535201 609 struct esr_context { 610 struct _aarch64_ctx head; 611 uint64_t esr; 612 }; 613 #endif 614 615 static inline struct _aarch64_ctx *first_ctx(ucontext_t *uc) 616 { 617 return (struct _aarch64_ctx *)&uc->uc_mcontext.__reserved; 618 } 619 620 static inline struct _aarch64_ctx *next_ctx(struct _aarch64_ctx *hdr) 621 { 622 return (struct _aarch64_ctx *)((char *)hdr + hdr->size); 623 } 624 625 int cpu_signal_handler(int host_signum, void *pinfo, void *puc) 626 { 627 siginfo_t *info = pinfo; 628 ucontext_t *uc = puc; 629 uintptr_t pc = uc->uc_mcontext.pc; 630 bool is_write; 631 struct _aarch64_ctx *hdr; 632 struct esr_context const *esrctx = NULL; 633 634 /* Find the esr_context, which has the WnR bit in it */ 635 for (hdr = first_ctx(uc); hdr->magic; hdr = next_ctx(hdr)) { 636 if (hdr->magic == ESR_MAGIC) { 637 esrctx = (struct esr_context const *)hdr; 638 break; 639 } 640 } 641 642 if (esrctx) { 643 /* For data aborts ESR.EC is 0b10010x: then bit 6 is the WnR bit */ 644 uint64_t esr = esrctx->esr; 645 is_write = extract32(esr, 27, 5) == 0x12 && extract32(esr, 6, 1) == 1; 646 } else { 647 /* 648 * Fall back to parsing instructions; will only be needed 649 * for really ancient (pre-3.16) kernels. 650 */ 651 uint32_t insn = *(uint32_t *)pc; 652 653 is_write = ((insn & 0xbfff0000) == 0x0c000000 /* C3.3.1 */ 654 || (insn & 0xbfe00000) == 0x0c800000 /* C3.3.2 */ 655 || (insn & 0xbfdf0000) == 0x0d000000 /* C3.3.3 */ 656 || (insn & 0xbfc00000) == 0x0d800000 /* C3.3.4 */ 657 || (insn & 0x3f400000) == 0x08000000 /* C3.3.6 */ 658 || (insn & 0x3bc00000) == 0x39000000 /* C3.3.13 */ 659 || (insn & 0x3fc00000) == 0x3d800000 /* ... 128bit */ 660 /* Ignore bits 10, 11 & 21, controlling indexing. */ 661 || (insn & 0x3bc00000) == 0x38000000 /* C3.3.8-12 */ 662 || (insn & 0x3fe00000) == 0x3c800000 /* ... 128bit */ 663 /* Ignore bits 23 & 24, controlling indexing. */ 664 || (insn & 0x3a400000) == 0x28000000); /* C3.3.7,14-16 */ 665 } 666 return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); 667 } 668 #endif 669 670 #elif defined(__s390__) 671 672 int cpu_signal_handler(int host_signum, void *pinfo, 673 void *puc) 674 { 675 siginfo_t *info = pinfo; 676 ucontext_t *uc = puc; 677 unsigned long pc; 678 uint16_t *pinsn; 679 int is_write = 0; 680 681 pc = uc->uc_mcontext.psw.addr; 682 683 /* 684 * ??? On linux, the non-rt signal handler has 4 (!) arguments instead 685 * of the normal 2 arguments. The 4th argument contains the "Translation- 686 * Exception Identification for DAT Exceptions" from the hardware (aka 687 * "int_parm_long"), which does in fact contain the is_write value. 688 * The rt signal handler, as far as I can tell, does not give this value 689 * at all. Not that we could get to it from here even if it were. 690 * So fall back to parsing instructions. Treat read-modify-write ones as 691 * writes, which is not fully correct, but for tracking self-modifying code 692 * this is better than treating them as reads. Checking si_addr page flags 693 * might be a viable improvement, albeit a racy one. 694 */ 695 /* ??? This is not even close to complete. */ 696 pinsn = (uint16_t *)pc; 697 switch (pinsn[0] >> 8) { 698 case 0x50: /* ST */ 699 case 0x42: /* STC */ 700 case 0x40: /* STH */ 701 case 0xba: /* CS */ 702 case 0xbb: /* CDS */ 703 is_write = 1; 704 break; 705 case 0xc4: /* RIL format insns */ 706 switch (pinsn[0] & 0xf) { 707 case 0xf: /* STRL */ 708 case 0xb: /* STGRL */ 709 case 0x7: /* STHRL */ 710 is_write = 1; 711 } 712 break; 713 case 0xc8: /* SSF format insns */ 714 switch (pinsn[0] & 0xf) { 715 case 0x2: /* CSST */ 716 is_write = 1; 717 } 718 break; 719 case 0xe3: /* RXY format insns */ 720 switch (pinsn[2] & 0xff) { 721 case 0x50: /* STY */ 722 case 0x24: /* STG */ 723 case 0x72: /* STCY */ 724 case 0x70: /* STHY */ 725 case 0x8e: /* STPQ */ 726 case 0x3f: /* STRVH */ 727 case 0x3e: /* STRV */ 728 case 0x2f: /* STRVG */ 729 is_write = 1; 730 } 731 break; 732 case 0xeb: /* RSY format insns */ 733 switch (pinsn[2] & 0xff) { 734 case 0x14: /* CSY */ 735 case 0x30: /* CSG */ 736 case 0x31: /* CDSY */ 737 case 0x3e: /* CDSG */ 738 case 0xe4: /* LANG */ 739 case 0xe6: /* LAOG */ 740 case 0xe7: /* LAXG */ 741 case 0xe8: /* LAAG */ 742 case 0xea: /* LAALG */ 743 case 0xf4: /* LAN */ 744 case 0xf6: /* LAO */ 745 case 0xf7: /* LAX */ 746 case 0xfa: /* LAAL */ 747 case 0xf8: /* LAA */ 748 is_write = 1; 749 } 750 break; 751 } 752 753 return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); 754 } 755 756 #elif defined(__mips__) 757 758 #if defined(__misp16) || defined(__mips_micromips) 759 #error "Unsupported encoding" 760 #endif 761 762 int cpu_signal_handler(int host_signum, void *pinfo, 763 void *puc) 764 { 765 siginfo_t *info = pinfo; 766 ucontext_t *uc = puc; 767 uintptr_t pc = uc->uc_mcontext.pc; 768 uint32_t insn = *(uint32_t *)pc; 769 int is_write = 0; 770 771 /* Detect all store instructions at program counter. */ 772 switch((insn >> 26) & 077) { 773 case 050: /* SB */ 774 case 051: /* SH */ 775 case 052: /* SWL */ 776 case 053: /* SW */ 777 case 054: /* SDL */ 778 case 055: /* SDR */ 779 case 056: /* SWR */ 780 case 070: /* SC */ 781 case 071: /* SWC1 */ 782 case 074: /* SCD */ 783 case 075: /* SDC1 */ 784 case 077: /* SD */ 785 #if !defined(__mips_isa_rev) || __mips_isa_rev < 6 786 case 072: /* SWC2 */ 787 case 076: /* SDC2 */ 788 #endif 789 is_write = 1; 790 break; 791 case 023: /* COP1X */ 792 /* Required in all versions of MIPS64 since 793 MIPS64r1 and subsequent versions of MIPS32r2. */ 794 switch (insn & 077) { 795 case 010: /* SWXC1 */ 796 case 011: /* SDXC1 */ 797 case 015: /* SUXC1 */ 798 is_write = 1; 799 } 800 break; 801 } 802 803 return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); 804 } 805 806 #elif defined(__riscv) 807 808 int cpu_signal_handler(int host_signum, void *pinfo, 809 void *puc) 810 { 811 siginfo_t *info = pinfo; 812 ucontext_t *uc = puc; 813 greg_t pc = uc->uc_mcontext.__gregs[REG_PC]; 814 uint32_t insn = *(uint32_t *)pc; 815 int is_write = 0; 816 817 /* Detect store by reading the instruction at the program 818 counter. Note: we currently only generate 32-bit 819 instructions so we thus only detect 32-bit stores */ 820 switch (((insn >> 0) & 0b11)) { 821 case 3: 822 switch (((insn >> 2) & 0b11111)) { 823 case 8: 824 switch (((insn >> 12) & 0b111)) { 825 case 0: /* sb */ 826 case 1: /* sh */ 827 case 2: /* sw */ 828 case 3: /* sd */ 829 case 4: /* sq */ 830 is_write = 1; 831 break; 832 default: 833 break; 834 } 835 break; 836 case 9: 837 switch (((insn >> 12) & 0b111)) { 838 case 2: /* fsw */ 839 case 3: /* fsd */ 840 case 4: /* fsq */ 841 is_write = 1; 842 break; 843 default: 844 break; 845 } 846 break; 847 default: 848 break; 849 } 850 } 851 852 /* Check for compressed instructions */ 853 switch (((insn >> 13) & 0b111)) { 854 case 7: 855 switch (insn & 0b11) { 856 case 0: /*c.sd */ 857 case 2: /* c.sdsp */ 858 is_write = 1; 859 break; 860 default: 861 break; 862 } 863 break; 864 case 6: 865 switch (insn & 0b11) { 866 case 0: /* c.sw */ 867 case 3: /* c.swsp */ 868 is_write = 1; 869 break; 870 default: 871 break; 872 } 873 break; 874 default: 875 break; 876 } 877 878 return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); 879 } 880 881 #else 882 883 #error host CPU specific signal handler needed 884 885 #endif 886 887 /* The softmmu versions of these helpers are in cputlb.c. */ 888 889 uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr) 890 { 891 MemOpIdx oi = make_memop_idx(MO_UB, MMU_USER_IDX); 892 uint32_t ret; 893 894 trace_guest_ld_before_exec(env_cpu(env), ptr, oi); 895 ret = ldub_p(g2h(env_cpu(env), ptr)); 896 qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R); 897 return ret; 898 } 899 900 int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr) 901 { 902 return (int8_t)cpu_ldub_data(env, ptr); 903 } 904 905 uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr) 906 { 907 MemOpIdx oi = make_memop_idx(MO_BEUW, MMU_USER_IDX); 908 uint32_t ret; 909 910 trace_guest_ld_before_exec(env_cpu(env), ptr, oi); 911 ret = lduw_be_p(g2h(env_cpu(env), ptr)); 912 qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R); 913 return ret; 914 } 915 916 int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr) 917 { 918 return (int16_t)cpu_lduw_be_data(env, ptr); 919 } 920 921 uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr) 922 { 923 MemOpIdx oi = make_memop_idx(MO_BEUL, MMU_USER_IDX); 924 uint32_t ret; 925 926 trace_guest_ld_before_exec(env_cpu(env), ptr, oi); 927 ret = ldl_be_p(g2h(env_cpu(env), ptr)); 928 qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R); 929 return ret; 930 } 931 932 uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr) 933 { 934 MemOpIdx oi = make_memop_idx(MO_BEQ, MMU_USER_IDX); 935 uint64_t ret; 936 937 trace_guest_ld_before_exec(env_cpu(env), ptr, oi); 938 ret = ldq_be_p(g2h(env_cpu(env), ptr)); 939 qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R); 940 return ret; 941 } 942 943 uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr) 944 { 945 MemOpIdx oi = make_memop_idx(MO_LEUW, MMU_USER_IDX); 946 uint32_t ret; 947 948 trace_guest_ld_before_exec(env_cpu(env), ptr, oi); 949 ret = lduw_le_p(g2h(env_cpu(env), ptr)); 950 qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R); 951 return ret; 952 } 953 954 int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr) 955 { 956 return (int16_t)cpu_lduw_le_data(env, ptr); 957 } 958 959 uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr) 960 { 961 MemOpIdx oi = make_memop_idx(MO_LEUL, MMU_USER_IDX); 962 uint32_t ret; 963 964 trace_guest_ld_before_exec(env_cpu(env), ptr, oi); 965 ret = ldl_le_p(g2h(env_cpu(env), ptr)); 966 qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R); 967 return ret; 968 } 969 970 uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr) 971 { 972 MemOpIdx oi = make_memop_idx(MO_LEQ, MMU_USER_IDX); 973 uint64_t ret; 974 975 trace_guest_ld_before_exec(env_cpu(env), ptr, oi); 976 ret = ldq_le_p(g2h(env_cpu(env), ptr)); 977 qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R); 978 return ret; 979 } 980 981 uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) 982 { 983 uint32_t ret; 984 985 set_helper_retaddr(retaddr); 986 ret = cpu_ldub_data(env, ptr); 987 clear_helper_retaddr(); 988 return ret; 989 } 990 991 int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) 992 { 993 return (int8_t)cpu_ldub_data_ra(env, ptr, retaddr); 994 } 995 996 uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) 997 { 998 uint32_t ret; 999 1000 set_helper_retaddr(retaddr); 1001 ret = cpu_lduw_be_data(env, ptr); 1002 clear_helper_retaddr(); 1003 return ret; 1004 } 1005 1006 int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) 1007 { 1008 return (int16_t)cpu_lduw_be_data_ra(env, ptr, retaddr); 1009 } 1010 1011 uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) 1012 { 1013 uint32_t ret; 1014 1015 set_helper_retaddr(retaddr); 1016 ret = cpu_ldl_be_data(env, ptr); 1017 clear_helper_retaddr(); 1018 return ret; 1019 } 1020 1021 uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) 1022 { 1023 uint64_t ret; 1024 1025 set_helper_retaddr(retaddr); 1026 ret = cpu_ldq_be_data(env, ptr); 1027 clear_helper_retaddr(); 1028 return ret; 1029 } 1030 1031 uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) 1032 { 1033 uint32_t ret; 1034 1035 set_helper_retaddr(retaddr); 1036 ret = cpu_lduw_le_data(env, ptr); 1037 clear_helper_retaddr(); 1038 return ret; 1039 } 1040 1041 int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) 1042 { 1043 return (int16_t)cpu_lduw_le_data_ra(env, ptr, retaddr); 1044 } 1045 1046 uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) 1047 { 1048 uint32_t ret; 1049 1050 set_helper_retaddr(retaddr); 1051 ret = cpu_ldl_le_data(env, ptr); 1052 clear_helper_retaddr(); 1053 return ret; 1054 } 1055 1056 uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) 1057 { 1058 uint64_t ret; 1059 1060 set_helper_retaddr(retaddr); 1061 ret = cpu_ldq_le_data(env, ptr); 1062 clear_helper_retaddr(); 1063 return ret; 1064 } 1065 1066 void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val) 1067 { 1068 MemOpIdx oi = make_memop_idx(MO_UB, MMU_USER_IDX); 1069 1070 trace_guest_st_before_exec(env_cpu(env), ptr, oi); 1071 stb_p(g2h(env_cpu(env), ptr), val); 1072 qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W); 1073 } 1074 1075 void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val) 1076 { 1077 MemOpIdx oi = make_memop_idx(MO_BEUW, MMU_USER_IDX); 1078 1079 trace_guest_st_before_exec(env_cpu(env), ptr, oi); 1080 stw_be_p(g2h(env_cpu(env), ptr), val); 1081 qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W); 1082 } 1083 1084 void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val) 1085 { 1086 MemOpIdx oi = make_memop_idx(MO_BEUL, MMU_USER_IDX); 1087 1088 trace_guest_st_before_exec(env_cpu(env), ptr, oi); 1089 stl_be_p(g2h(env_cpu(env), ptr), val); 1090 qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W); 1091 } 1092 1093 void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val) 1094 { 1095 MemOpIdx oi = make_memop_idx(MO_BEQ, MMU_USER_IDX); 1096 1097 trace_guest_st_before_exec(env_cpu(env), ptr, oi); 1098 stq_be_p(g2h(env_cpu(env), ptr), val); 1099 qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W); 1100 } 1101 1102 void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val) 1103 { 1104 MemOpIdx oi = make_memop_idx(MO_LEUW, MMU_USER_IDX); 1105 1106 trace_guest_st_before_exec(env_cpu(env), ptr, oi); 1107 stw_le_p(g2h(env_cpu(env), ptr), val); 1108 qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W); 1109 } 1110 1111 void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val) 1112 { 1113 MemOpIdx oi = make_memop_idx(MO_LEUL, MMU_USER_IDX); 1114 1115 trace_guest_st_before_exec(env_cpu(env), ptr, oi); 1116 stl_le_p(g2h(env_cpu(env), ptr), val); 1117 qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W); 1118 } 1119 1120 void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val) 1121 { 1122 MemOpIdx oi = make_memop_idx(MO_LEQ, MMU_USER_IDX); 1123 1124 trace_guest_st_before_exec(env_cpu(env), ptr, oi); 1125 stq_le_p(g2h(env_cpu(env), ptr), val); 1126 qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W); 1127 } 1128 1129 void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr, 1130 uint32_t val, uintptr_t retaddr) 1131 { 1132 set_helper_retaddr(retaddr); 1133 cpu_stb_data(env, ptr, val); 1134 clear_helper_retaddr(); 1135 } 1136 1137 void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr, 1138 uint32_t val, uintptr_t retaddr) 1139 { 1140 set_helper_retaddr(retaddr); 1141 cpu_stw_be_data(env, ptr, val); 1142 clear_helper_retaddr(); 1143 } 1144 1145 void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr, 1146 uint32_t val, uintptr_t retaddr) 1147 { 1148 set_helper_retaddr(retaddr); 1149 cpu_stl_be_data(env, ptr, val); 1150 clear_helper_retaddr(); 1151 } 1152 1153 void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr, 1154 uint64_t val, uintptr_t retaddr) 1155 { 1156 set_helper_retaddr(retaddr); 1157 cpu_stq_be_data(env, ptr, val); 1158 clear_helper_retaddr(); 1159 } 1160 1161 void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr, 1162 uint32_t val, uintptr_t retaddr) 1163 { 1164 set_helper_retaddr(retaddr); 1165 cpu_stw_le_data(env, ptr, val); 1166 clear_helper_retaddr(); 1167 } 1168 1169 void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr, 1170 uint32_t val, uintptr_t retaddr) 1171 { 1172 set_helper_retaddr(retaddr); 1173 cpu_stl_le_data(env, ptr, val); 1174 clear_helper_retaddr(); 1175 } 1176 1177 void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr, 1178 uint64_t val, uintptr_t retaddr) 1179 { 1180 set_helper_retaddr(retaddr); 1181 cpu_stq_le_data(env, ptr, val); 1182 clear_helper_retaddr(); 1183 } 1184 1185 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) 1186 { 1187 uint32_t ret; 1188 1189 set_helper_retaddr(1); 1190 ret = ldub_p(g2h_untagged(ptr)); 1191 clear_helper_retaddr(); 1192 return ret; 1193 } 1194 1195 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr) 1196 { 1197 uint32_t ret; 1198 1199 set_helper_retaddr(1); 1200 ret = lduw_p(g2h_untagged(ptr)); 1201 clear_helper_retaddr(); 1202 return ret; 1203 } 1204 1205 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr) 1206 { 1207 uint32_t ret; 1208 1209 set_helper_retaddr(1); 1210 ret = ldl_p(g2h_untagged(ptr)); 1211 clear_helper_retaddr(); 1212 return ret; 1213 } 1214 1215 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr) 1216 { 1217 uint64_t ret; 1218 1219 set_helper_retaddr(1); 1220 ret = ldq_p(g2h_untagged(ptr)); 1221 clear_helper_retaddr(); 1222 return ret; 1223 } 1224 1225 /* 1226 * Do not allow unaligned operations to proceed. Return the host address. 1227 * 1228 * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. 1229 */ 1230 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 1231 MemOpIdx oi, int size, int prot, 1232 uintptr_t retaddr) 1233 { 1234 /* Enforce qemu required alignment. */ 1235 if (unlikely(addr & (size - 1))) { 1236 cpu_loop_exit_atomic(env_cpu(env), retaddr); 1237 } 1238 void *ret = g2h(env_cpu(env), addr); 1239 set_helper_retaddr(retaddr); 1240 return ret; 1241 } 1242 1243 #include "atomic_common.c.inc" 1244 1245 /* 1246 * First set of functions passes in OI and RETADDR. 1247 * This makes them callable from other helpers. 1248 */ 1249 1250 #define ATOMIC_NAME(X) \ 1251 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) 1252 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) 1253 #define ATOMIC_MMU_IDX MMU_USER_IDX 1254 1255 #define DATA_SIZE 1 1256 #include "atomic_template.h" 1257 1258 #define DATA_SIZE 2 1259 #include "atomic_template.h" 1260 1261 #define DATA_SIZE 4 1262 #include "atomic_template.h" 1263 1264 #ifdef CONFIG_ATOMIC64 1265 #define DATA_SIZE 8 1266 #include "atomic_template.h" 1267 #endif 1268 1269 #if HAVE_ATOMIC128 || HAVE_CMPXCHG128 1270 #define DATA_SIZE 16 1271 #include "atomic_template.h" 1272 #endif 1273