1 /* 2 * qemu user cpu loop 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu-common.h" 22 #include "qemu.h" 23 #include "user-internals.h" 24 #include "elf.h" 25 #include "cpu_loop-common.h" 26 #include "signal-common.h" 27 #include "semihosting/common-semi.h" 28 #include "target/arm/syndrome.h" 29 30 #define get_user_code_u32(x, gaddr, env) \ 31 ({ abi_long __r = get_user_u32((x), (gaddr)); \ 32 if (!__r && bswap_code(arm_sctlr_b(env))) { \ 33 (x) = bswap32(x); \ 34 } \ 35 __r; \ 36 }) 37 38 #define get_user_code_u16(x, gaddr, env) \ 39 ({ abi_long __r = get_user_u16((x), (gaddr)); \ 40 if (!__r && bswap_code(arm_sctlr_b(env))) { \ 41 (x) = bswap16(x); \ 42 } \ 43 __r; \ 44 }) 45 46 #define get_user_data_u32(x, gaddr, env) \ 47 ({ abi_long __r = get_user_u32((x), (gaddr)); \ 48 if (!__r && arm_cpu_bswap_data(env)) { \ 49 (x) = bswap32(x); \ 50 } \ 51 __r; \ 52 }) 53 54 #define get_user_data_u16(x, gaddr, env) \ 55 ({ abi_long __r = get_user_u16((x), (gaddr)); \ 56 if (!__r && arm_cpu_bswap_data(env)) { \ 57 (x) = bswap16(x); \ 58 } \ 59 __r; \ 60 }) 61 62 #define put_user_data_u32(x, gaddr, env) \ 63 ({ typeof(x) __x = (x); \ 64 if (arm_cpu_bswap_data(env)) { \ 65 __x = bswap32(__x); \ 66 } \ 67 put_user_u32(__x, (gaddr)); \ 68 }) 69 70 #define put_user_data_u16(x, gaddr, env) \ 71 ({ typeof(x) __x = (x); \ 72 if (arm_cpu_bswap_data(env)) { \ 73 __x = bswap16(__x); \ 74 } \ 75 put_user_u16(__x, (gaddr)); \ 76 }) 77 78 /* 79 * Similar to code in accel/tcg/user-exec.c, but outside the execution loop. 80 * Must be called with mmap_lock. 81 * We get the PC of the entry address - which is as good as anything, 82 * on a real kernel what you get depends on which mode it uses. 83 */ 84 static void *atomic_mmu_lookup(CPUArchState *env, uint32_t addr, int size) 85 { 86 int need_flags = PAGE_READ | PAGE_WRITE_ORG | PAGE_VALID; 87 int page_flags; 88 89 /* Enforce guest required alignment. */ 90 if (unlikely(addr & (size - 1))) { 91 force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr); 92 return NULL; 93 } 94 95 page_flags = page_get_flags(addr); 96 if (unlikely((page_flags & need_flags) != need_flags)) { 97 force_sig_fault(TARGET_SIGSEGV, 98 page_flags & PAGE_VALID ? 99 TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR, addr); 100 return NULL; 101 } 102 103 return g2h(env_cpu(env), addr); 104 } 105 106 /* 107 * See the Linux kernel's Documentation/arm/kernel_user_helpers.rst 108 * Input: 109 * r0 = oldval 110 * r1 = newval 111 * r2 = pointer to target value 112 * 113 * Output: 114 * r0 = 0 if *ptr was changed, non-0 if no exchange happened 115 * C set if *ptr was changed, clear if no exchange happened 116 */ 117 static void arm_kernel_cmpxchg32_helper(CPUARMState *env) 118 { 119 uint32_t oldval, newval, val, addr, cpsr, *host_addr; 120 121 oldval = env->regs[0]; 122 newval = env->regs[1]; 123 addr = env->regs[2]; 124 125 mmap_lock(); 126 host_addr = atomic_mmu_lookup(env, addr, 4); 127 if (!host_addr) { 128 mmap_unlock(); 129 return; 130 } 131 132 val = qatomic_cmpxchg__nocheck(host_addr, oldval, newval); 133 mmap_unlock(); 134 135 cpsr = (val == oldval) * CPSR_C; 136 cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr); 137 env->regs[0] = cpsr ? 0 : -1; 138 } 139 140 /* 141 * See the Linux kernel's Documentation/arm/kernel_user_helpers.rst 142 * Input: 143 * r0 = pointer to oldval 144 * r1 = pointer to newval 145 * r2 = pointer to target value 146 * 147 * Output: 148 * r0 = 0 if *ptr was changed, non-0 if no exchange happened 149 * C set if *ptr was changed, clear if no exchange happened 150 * 151 * Note segv's in kernel helpers are a bit tricky, we can set the 152 * data address sensibly but the PC address is just the entry point. 153 */ 154 static void arm_kernel_cmpxchg64_helper(CPUARMState *env) 155 { 156 uint64_t oldval, newval, val; 157 uint32_t addr, cpsr; 158 uint64_t *host_addr; 159 160 addr = env->regs[0]; 161 if (get_user_u64(oldval, addr)) { 162 goto segv; 163 } 164 165 addr = env->regs[1]; 166 if (get_user_u64(newval, addr)) { 167 goto segv; 168 } 169 170 mmap_lock(); 171 addr = env->regs[2]; 172 host_addr = atomic_mmu_lookup(env, addr, 8); 173 if (!host_addr) { 174 mmap_unlock(); 175 return; 176 } 177 178 #ifdef CONFIG_ATOMIC64 179 val = qatomic_cmpxchg__nocheck(host_addr, oldval, newval); 180 cpsr = (val == oldval) * CPSR_C; 181 #else 182 /* 183 * This only works between threads, not between processes, but since 184 * the host has no 64-bit cmpxchg, it is the best that we can do. 185 */ 186 start_exclusive(); 187 val = *host_addr; 188 if (val == oldval) { 189 *host_addr = newval; 190 cpsr = CPSR_C; 191 } else { 192 cpsr = 0; 193 } 194 end_exclusive(); 195 #endif 196 mmap_unlock(); 197 198 cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr); 199 env->regs[0] = cpsr ? 0 : -1; 200 return; 201 202 segv: 203 force_sig_fault(TARGET_SIGSEGV, 204 page_get_flags(addr) & PAGE_VALID ? 205 TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR, addr); 206 } 207 208 /* Handle a jump to the kernel code page. */ 209 static int 210 do_kernel_trap(CPUARMState *env) 211 { 212 uint32_t addr; 213 214 switch (env->regs[15]) { 215 case 0xffff0fa0: /* __kernel_memory_barrier */ 216 smp_mb(); 217 break; 218 case 0xffff0fc0: /* __kernel_cmpxchg */ 219 arm_kernel_cmpxchg32_helper(env); 220 break; 221 case 0xffff0fe0: /* __kernel_get_tls */ 222 env->regs[0] = cpu_get_tls(env); 223 break; 224 case 0xffff0f60: /* __kernel_cmpxchg64 */ 225 arm_kernel_cmpxchg64_helper(env); 226 break; 227 228 default: 229 return 1; 230 } 231 /* Jump back to the caller. */ 232 addr = env->regs[14]; 233 if (addr & 1) { 234 env->thumb = 1; 235 addr &= ~1; 236 } 237 env->regs[15] = addr; 238 239 return 0; 240 } 241 242 static bool insn_is_linux_bkpt(uint32_t opcode, bool is_thumb) 243 { 244 /* 245 * Return true if this insn is one of the three magic UDF insns 246 * which the kernel treats as breakpoint insns. 247 */ 248 if (!is_thumb) { 249 return (opcode & 0x0fffffff) == 0x07f001f0; 250 } else { 251 /* 252 * Note that we get the two halves of the 32-bit T32 insn 253 * in the opposite order to the value the kernel uses in 254 * its undef_hook struct. 255 */ 256 return ((opcode & 0xffff) == 0xde01) || (opcode == 0xa000f7f0); 257 } 258 } 259 260 static bool emulate_arm_fpa11(CPUARMState *env, uint32_t opcode) 261 { 262 TaskState *ts = env_cpu(env)->opaque; 263 int rc = EmulateAll(opcode, &ts->fpa, env); 264 int raise, enabled; 265 266 if (rc == 0) { 267 /* Illegal instruction */ 268 return false; 269 } 270 if (rc > 0) { 271 /* Everything ok. */ 272 env->regs[15] += 4; 273 return true; 274 } 275 276 /* FP exception */ 277 rc = -rc; 278 raise = 0; 279 280 /* Translate softfloat flags to FPSR flags */ 281 if (rc & float_flag_invalid) { 282 raise |= BIT_IOC; 283 } 284 if (rc & float_flag_divbyzero) { 285 raise |= BIT_DZC; 286 } 287 if (rc & float_flag_overflow) { 288 raise |= BIT_OFC; 289 } 290 if (rc & float_flag_underflow) { 291 raise |= BIT_UFC; 292 } 293 if (rc & float_flag_inexact) { 294 raise |= BIT_IXC; 295 } 296 297 /* Accumulate unenabled exceptions */ 298 enabled = ts->fpa.fpsr >> 16; 299 ts->fpa.fpsr |= raise & ~enabled; 300 301 if (raise & enabled) { 302 /* 303 * The kernel's nwfpe emulator does not pass a real si_code. 304 * It merely uses send_sig(SIGFPE, current, 1), which results in 305 * __send_signal() filling out SI_KERNEL with pid and uid 0 (under 306 * the "SEND_SIG_PRIV" case). That's what our force_sig() does. 307 */ 308 force_sig(TARGET_SIGFPE); 309 } else { 310 env->regs[15] += 4; 311 } 312 return true; 313 } 314 315 void cpu_loop(CPUARMState *env) 316 { 317 CPUState *cs = env_cpu(env); 318 int trapnr, si_signo, si_code; 319 unsigned int n, insn; 320 abi_ulong ret; 321 322 for(;;) { 323 cpu_exec_start(cs); 324 trapnr = cpu_exec(cs); 325 cpu_exec_end(cs); 326 process_queued_cpu_work(cs); 327 328 switch(trapnr) { 329 case EXCP_UDEF: 330 case EXCP_NOCP: 331 case EXCP_INVSTATE: 332 { 333 uint32_t opcode; 334 335 /* we handle the FPU emulation here, as Linux */ 336 /* we get the opcode */ 337 /* FIXME - what to do if get_user() fails? */ 338 get_user_code_u32(opcode, env->regs[15], env); 339 340 /* 341 * The Linux kernel treats some UDF patterns specially 342 * to use as breakpoints (instead of the architectural 343 * bkpt insn). These should trigger a SIGTRAP rather 344 * than SIGILL. 345 */ 346 if (insn_is_linux_bkpt(opcode, env->thumb)) { 347 goto excp_debug; 348 } 349 350 if (!env->thumb && emulate_arm_fpa11(env, opcode)) { 351 break; 352 } 353 354 force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPN, 355 env->regs[15]); 356 } 357 break; 358 case EXCP_SWI: 359 { 360 env->eabi = 1; 361 /* system call */ 362 if (env->thumb) { 363 /* Thumb is always EABI style with syscall number in r7 */ 364 n = env->regs[7]; 365 } else { 366 /* 367 * Equivalent of kernel CONFIG_OABI_COMPAT: read the 368 * Arm SVC insn to extract the immediate, which is the 369 * syscall number in OABI. 370 */ 371 /* FIXME - what to do if get_user() fails? */ 372 get_user_code_u32(insn, env->regs[15] - 4, env); 373 n = insn & 0xffffff; 374 if (n == 0) { 375 /* zero immediate: EABI, syscall number in r7 */ 376 n = env->regs[7]; 377 } else { 378 /* 379 * This XOR matches the kernel code: an immediate 380 * in the valid range (0x900000 .. 0x9fffff) is 381 * converted into the correct EABI-style syscall 382 * number; invalid immediates end up as values 383 * > 0xfffff and are handled below as out-of-range. 384 */ 385 n ^= ARM_SYSCALL_BASE; 386 env->eabi = 0; 387 } 388 } 389 390 if (n > ARM_NR_BASE) { 391 switch (n) { 392 case ARM_NR_cacheflush: 393 /* nop */ 394 break; 395 case ARM_NR_set_tls: 396 cpu_set_tls(env, env->regs[0]); 397 env->regs[0] = 0; 398 break; 399 case ARM_NR_breakpoint: 400 env->regs[15] -= env->thumb ? 2 : 4; 401 goto excp_debug; 402 case ARM_NR_get_tls: 403 env->regs[0] = cpu_get_tls(env); 404 break; 405 default: 406 if (n < 0xf0800) { 407 /* 408 * Syscalls 0xf0000..0xf07ff (or 0x9f0000.. 409 * 0x9f07ff in OABI numbering) are defined 410 * to return -ENOSYS rather than raising 411 * SIGILL. Note that we have already 412 * removed the 0x900000 prefix. 413 */ 414 qemu_log_mask(LOG_UNIMP, 415 "qemu: Unsupported ARM syscall: 0x%x\n", 416 n); 417 env->regs[0] = -TARGET_ENOSYS; 418 } else { 419 /* 420 * Otherwise SIGILL. This includes any SWI with 421 * immediate not originally 0x9fxxxx, because 422 * of the earlier XOR. 423 * Like the real kernel, we report the addr of the 424 * SWI in the siginfo si_addr but leave the PC 425 * pointing at the insn after the SWI. 426 */ 427 abi_ulong faultaddr = env->regs[15]; 428 faultaddr -= env->thumb ? 2 : 4; 429 force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLTRP, 430 faultaddr); 431 } 432 break; 433 } 434 } else { 435 ret = do_syscall(env, 436 n, 437 env->regs[0], 438 env->regs[1], 439 env->regs[2], 440 env->regs[3], 441 env->regs[4], 442 env->regs[5], 443 0, 0); 444 if (ret == -QEMU_ERESTARTSYS) { 445 env->regs[15] -= env->thumb ? 2 : 4; 446 } else if (ret != -QEMU_ESIGRETURN) { 447 env->regs[0] = ret; 448 } 449 } 450 } 451 break; 452 case EXCP_SEMIHOST: 453 env->regs[0] = do_common_semihosting(cs); 454 env->regs[15] += env->thumb ? 2 : 4; 455 break; 456 case EXCP_INTERRUPT: 457 /* just indicate that signals should be handled asap */ 458 break; 459 case EXCP_PREFETCH_ABORT: 460 case EXCP_DATA_ABORT: 461 /* For user-only we don't set TTBCR_EAE, so look at the FSR. */ 462 switch (env->exception.fsr & 0x1f) { 463 case 0x1: /* Alignment */ 464 si_signo = TARGET_SIGBUS; 465 si_code = TARGET_BUS_ADRALN; 466 break; 467 case 0x3: /* Access flag fault, level 1 */ 468 case 0x6: /* Access flag fault, level 2 */ 469 case 0x9: /* Domain fault, level 1 */ 470 case 0xb: /* Domain fault, level 2 */ 471 case 0xd: /* Permission fault, level 1 */ 472 case 0xf: /* Permission fault, level 2 */ 473 si_signo = TARGET_SIGSEGV; 474 si_code = TARGET_SEGV_ACCERR; 475 break; 476 case 0x5: /* Translation fault, level 1 */ 477 case 0x7: /* Translation fault, level 2 */ 478 si_signo = TARGET_SIGSEGV; 479 si_code = TARGET_SEGV_MAPERR; 480 break; 481 default: 482 g_assert_not_reached(); 483 } 484 force_sig_fault(si_signo, si_code, env->exception.vaddress); 485 break; 486 case EXCP_DEBUG: 487 case EXCP_BKPT: 488 excp_debug: 489 force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->regs[15]); 490 break; 491 case EXCP_KERNEL_TRAP: 492 if (do_kernel_trap(env)) 493 goto error; 494 break; 495 case EXCP_YIELD: 496 /* nothing to do here for user-mode, just resume guest code */ 497 break; 498 case EXCP_ATOMIC: 499 cpu_exec_step_atomic(cs); 500 break; 501 default: 502 error: 503 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr); 504 abort(); 505 } 506 process_pending_signals(env); 507 } 508 } 509 510 void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) 511 { 512 CPUState *cpu = env_cpu(env); 513 TaskState *ts = cpu->opaque; 514 struct image_info *info = ts->info; 515 int i; 516 517 cpsr_write(env, regs->uregs[16], CPSR_USER | CPSR_EXEC, 518 CPSRWriteByInstr); 519 for(i = 0; i < 16; i++) { 520 env->regs[i] = regs->uregs[i]; 521 } 522 #ifdef TARGET_WORDS_BIGENDIAN 523 /* Enable BE8. */ 524 if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4 525 && (info->elf_flags & EF_ARM_BE8)) { 526 env->uncached_cpsr |= CPSR_E; 527 env->cp15.sctlr_el[1] |= SCTLR_E0E; 528 } else { 529 env->cp15.sctlr_el[1] |= SCTLR_B; 530 } 531 arm_rebuild_hflags(env); 532 #endif 533 534 ts->stack_base = info->start_stack; 535 ts->heap_base = info->brk; 536 /* This will be filled in on the first SYS_HEAPINFO call. */ 537 ts->heap_limit = 0; 538 } 539