1 /* 2 * qemu user cpu loop 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu.h" 22 #include "elf.h" 23 #include "cpu_loop-common.h" 24 25 #define get_user_code_u32(x, gaddr, env) \ 26 ({ abi_long __r = get_user_u32((x), (gaddr)); \ 27 if (!__r && bswap_code(arm_sctlr_b(env))) { \ 28 (x) = bswap32(x); \ 29 } \ 30 __r; \ 31 }) 32 33 #define get_user_code_u16(x, gaddr, env) \ 34 ({ abi_long __r = get_user_u16((x), (gaddr)); \ 35 if (!__r && bswap_code(arm_sctlr_b(env))) { \ 36 (x) = bswap16(x); \ 37 } \ 38 __r; \ 39 }) 40 41 #define get_user_data_u32(x, gaddr, env) \ 42 ({ abi_long __r = get_user_u32((x), (gaddr)); \ 43 if (!__r && arm_cpu_bswap_data(env)) { \ 44 (x) = bswap32(x); \ 45 } \ 46 __r; \ 47 }) 48 49 #define get_user_data_u16(x, gaddr, env) \ 50 ({ abi_long __r = get_user_u16((x), (gaddr)); \ 51 if (!__r && arm_cpu_bswap_data(env)) { \ 52 (x) = bswap16(x); \ 53 } \ 54 __r; \ 55 }) 56 57 #define put_user_data_u32(x, gaddr, env) \ 58 ({ typeof(x) __x = (x); \ 59 if (arm_cpu_bswap_data(env)) { \ 60 __x = bswap32(__x); \ 61 } \ 62 put_user_u32(__x, (gaddr)); \ 63 }) 64 65 #define put_user_data_u16(x, gaddr, env) \ 66 ({ typeof(x) __x = (x); \ 67 if (arm_cpu_bswap_data(env)) { \ 68 __x = bswap16(__x); \ 69 } \ 70 put_user_u16(__x, (gaddr)); \ 71 }) 72 73 /* Commpage handling -- there is no commpage for AArch64 */ 74 75 /* 76 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt 77 * Input: 78 * r0 = pointer to oldval 79 * r1 = pointer to newval 80 * r2 = pointer to target value 81 * 82 * Output: 83 * r0 = 0 if *ptr was changed, non-0 if no exchange happened 84 * C set if *ptr was changed, clear if no exchange happened 85 * 86 * Note segv's in kernel helpers are a bit tricky, we can set the 87 * data address sensibly but the PC address is just the entry point. 88 */ 89 static void arm_kernel_cmpxchg64_helper(CPUARMState *env) 90 { 91 uint64_t oldval, newval, val; 92 uint32_t addr, cpsr; 93 target_siginfo_t info; 94 95 /* Based on the 32 bit code in do_kernel_trap */ 96 97 /* XXX: This only works between threads, not between processes. 98 It's probably possible to implement this with native host 99 operations. However things like ldrex/strex are much harder so 100 there's not much point trying. */ 101 start_exclusive(); 102 cpsr = cpsr_read(env); 103 addr = env->regs[2]; 104 105 if (get_user_u64(oldval, env->regs[0])) { 106 env->exception.vaddress = env->regs[0]; 107 goto segv; 108 }; 109 110 if (get_user_u64(newval, env->regs[1])) { 111 env->exception.vaddress = env->regs[1]; 112 goto segv; 113 }; 114 115 if (get_user_u64(val, addr)) { 116 env->exception.vaddress = addr; 117 goto segv; 118 } 119 120 if (val == oldval) { 121 val = newval; 122 123 if (put_user_u64(val, addr)) { 124 env->exception.vaddress = addr; 125 goto segv; 126 }; 127 128 env->regs[0] = 0; 129 cpsr |= CPSR_C; 130 } else { 131 env->regs[0] = -1; 132 cpsr &= ~CPSR_C; 133 } 134 cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr); 135 end_exclusive(); 136 return; 137 138 segv: 139 end_exclusive(); 140 /* We get the PC of the entry address - which is as good as anything, 141 on a real kernel what you get depends on which mode it uses. */ 142 info.si_signo = TARGET_SIGSEGV; 143 info.si_errno = 0; 144 /* XXX: check env->error_code */ 145 info.si_code = TARGET_SEGV_MAPERR; 146 info._sifields._sigfault._addr = env->exception.vaddress; 147 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); 148 } 149 150 /* Handle a jump to the kernel code page. */ 151 static int 152 do_kernel_trap(CPUARMState *env) 153 { 154 uint32_t addr; 155 uint32_t cpsr; 156 uint32_t val; 157 158 switch (env->regs[15]) { 159 case 0xffff0fa0: /* __kernel_memory_barrier */ 160 /* ??? No-op. Will need to do better for SMP. */ 161 break; 162 case 0xffff0fc0: /* __kernel_cmpxchg */ 163 /* XXX: This only works between threads, not between processes. 164 It's probably possible to implement this with native host 165 operations. However things like ldrex/strex are much harder so 166 there's not much point trying. */ 167 start_exclusive(); 168 cpsr = cpsr_read(env); 169 addr = env->regs[2]; 170 /* FIXME: This should SEGV if the access fails. */ 171 if (get_user_u32(val, addr)) 172 val = ~env->regs[0]; 173 if (val == env->regs[0]) { 174 val = env->regs[1]; 175 /* FIXME: Check for segfaults. */ 176 put_user_u32(val, addr); 177 env->regs[0] = 0; 178 cpsr |= CPSR_C; 179 } else { 180 env->regs[0] = -1; 181 cpsr &= ~CPSR_C; 182 } 183 cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr); 184 end_exclusive(); 185 break; 186 case 0xffff0fe0: /* __kernel_get_tls */ 187 env->regs[0] = cpu_get_tls(env); 188 break; 189 case 0xffff0f60: /* __kernel_cmpxchg64 */ 190 arm_kernel_cmpxchg64_helper(env); 191 break; 192 193 default: 194 return 1; 195 } 196 /* Jump back to the caller. */ 197 addr = env->regs[14]; 198 if (addr & 1) { 199 env->thumb = 1; 200 addr &= ~1; 201 } 202 env->regs[15] = addr; 203 204 return 0; 205 } 206 207 void cpu_loop(CPUARMState *env) 208 { 209 CPUState *cs = env_cpu(env); 210 int trapnr; 211 unsigned int n, insn; 212 target_siginfo_t info; 213 uint32_t addr; 214 abi_ulong ret; 215 216 for(;;) { 217 cpu_exec_start(cs); 218 trapnr = cpu_exec(cs); 219 cpu_exec_end(cs); 220 process_queued_cpu_work(cs); 221 222 switch(trapnr) { 223 case EXCP_UDEF: 224 case EXCP_NOCP: 225 case EXCP_INVSTATE: 226 { 227 TaskState *ts = cs->opaque; 228 uint32_t opcode; 229 int rc; 230 231 /* we handle the FPU emulation here, as Linux */ 232 /* we get the opcode */ 233 /* FIXME - what to do if get_user() fails? */ 234 get_user_code_u32(opcode, env->regs[15], env); 235 236 rc = EmulateAll(opcode, &ts->fpa, env); 237 if (rc == 0) { /* illegal instruction */ 238 info.si_signo = TARGET_SIGILL; 239 info.si_errno = 0; 240 info.si_code = TARGET_ILL_ILLOPN; 241 info._sifields._sigfault._addr = env->regs[15]; 242 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); 243 } else if (rc < 0) { /* FP exception */ 244 int arm_fpe=0; 245 246 /* translate softfloat flags to FPSR flags */ 247 if (-rc & float_flag_invalid) 248 arm_fpe |= BIT_IOC; 249 if (-rc & float_flag_divbyzero) 250 arm_fpe |= BIT_DZC; 251 if (-rc & float_flag_overflow) 252 arm_fpe |= BIT_OFC; 253 if (-rc & float_flag_underflow) 254 arm_fpe |= BIT_UFC; 255 if (-rc & float_flag_inexact) 256 arm_fpe |= BIT_IXC; 257 258 FPSR fpsr = ts->fpa.fpsr; 259 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe); 260 261 if (fpsr & (arm_fpe << 16)) { /* exception enabled? */ 262 info.si_signo = TARGET_SIGFPE; 263 info.si_errno = 0; 264 265 /* ordered by priority, least first */ 266 if (arm_fpe & BIT_IXC) info.si_code = TARGET_FPE_FLTRES; 267 if (arm_fpe & BIT_UFC) info.si_code = TARGET_FPE_FLTUND; 268 if (arm_fpe & BIT_OFC) info.si_code = TARGET_FPE_FLTOVF; 269 if (arm_fpe & BIT_DZC) info.si_code = TARGET_FPE_FLTDIV; 270 if (arm_fpe & BIT_IOC) info.si_code = TARGET_FPE_FLTINV; 271 272 info._sifields._sigfault._addr = env->regs[15]; 273 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); 274 } else { 275 env->regs[15] += 4; 276 } 277 278 /* accumulate unenabled exceptions */ 279 if ((!(fpsr & BIT_IXE)) && (arm_fpe & BIT_IXC)) 280 fpsr |= BIT_IXC; 281 if ((!(fpsr & BIT_UFE)) && (arm_fpe & BIT_UFC)) 282 fpsr |= BIT_UFC; 283 if ((!(fpsr & BIT_OFE)) && (arm_fpe & BIT_OFC)) 284 fpsr |= BIT_OFC; 285 if ((!(fpsr & BIT_DZE)) && (arm_fpe & BIT_DZC)) 286 fpsr |= BIT_DZC; 287 if ((!(fpsr & BIT_IOE)) && (arm_fpe & BIT_IOC)) 288 fpsr |= BIT_IOC; 289 ts->fpa.fpsr=fpsr; 290 } else { /* everything OK */ 291 /* increment PC */ 292 env->regs[15] += 4; 293 } 294 } 295 break; 296 case EXCP_SWI: 297 case EXCP_BKPT: 298 { 299 env->eabi = 1; 300 /* system call */ 301 if (trapnr == EXCP_BKPT) { 302 if (env->thumb) { 303 /* FIXME - what to do if get_user() fails? */ 304 get_user_code_u16(insn, env->regs[15], env); 305 n = insn & 0xff; 306 env->regs[15] += 2; 307 } else { 308 /* FIXME - what to do if get_user() fails? */ 309 get_user_code_u32(insn, env->regs[15], env); 310 n = (insn & 0xf) | ((insn >> 4) & 0xff0); 311 env->regs[15] += 4; 312 } 313 } else { 314 if (env->thumb) { 315 /* FIXME - what to do if get_user() fails? */ 316 get_user_code_u16(insn, env->regs[15] - 2, env); 317 n = insn & 0xff; 318 } else { 319 /* FIXME - what to do if get_user() fails? */ 320 get_user_code_u32(insn, env->regs[15] - 4, env); 321 n = insn & 0xffffff; 322 } 323 } 324 325 if (n == ARM_NR_cacheflush) { 326 /* nop */ 327 } else if (n == ARM_NR_semihosting 328 || n == ARM_NR_thumb_semihosting) { 329 env->regs[0] = do_arm_semihosting (env); 330 } else if (n == 0 || n >= ARM_SYSCALL_BASE || env->thumb) { 331 /* linux syscall */ 332 if (env->thumb || n == 0) { 333 n = env->regs[7]; 334 } else { 335 n -= ARM_SYSCALL_BASE; 336 env->eabi = 0; 337 } 338 if ( n > ARM_NR_BASE) { 339 switch (n) { 340 case ARM_NR_cacheflush: 341 /* nop */ 342 break; 343 case ARM_NR_set_tls: 344 cpu_set_tls(env, env->regs[0]); 345 env->regs[0] = 0; 346 break; 347 case ARM_NR_breakpoint: 348 env->regs[15] -= env->thumb ? 2 : 4; 349 goto excp_debug; 350 case ARM_NR_get_tls: 351 env->regs[0] = cpu_get_tls(env); 352 break; 353 default: 354 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n", 355 n); 356 env->regs[0] = -TARGET_ENOSYS; 357 break; 358 } 359 } else { 360 ret = do_syscall(env, 361 n, 362 env->regs[0], 363 env->regs[1], 364 env->regs[2], 365 env->regs[3], 366 env->regs[4], 367 env->regs[5], 368 0, 0); 369 if (ret == -TARGET_ERESTARTSYS) { 370 env->regs[15] -= env->thumb ? 2 : 4; 371 } else if (ret != -TARGET_QEMU_ESIGRETURN) { 372 env->regs[0] = ret; 373 } 374 } 375 } else { 376 goto error; 377 } 378 } 379 break; 380 case EXCP_SEMIHOST: 381 env->regs[0] = do_arm_semihosting(env); 382 break; 383 case EXCP_INTERRUPT: 384 /* just indicate that signals should be handled asap */ 385 break; 386 case EXCP_PREFETCH_ABORT: 387 case EXCP_DATA_ABORT: 388 addr = env->exception.vaddress; 389 { 390 info.si_signo = TARGET_SIGSEGV; 391 info.si_errno = 0; 392 /* XXX: check env->error_code */ 393 info.si_code = TARGET_SEGV_MAPERR; 394 info._sifields._sigfault._addr = addr; 395 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); 396 } 397 break; 398 case EXCP_DEBUG: 399 excp_debug: 400 info.si_signo = TARGET_SIGTRAP; 401 info.si_errno = 0; 402 info.si_code = TARGET_TRAP_BRKPT; 403 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); 404 break; 405 case EXCP_KERNEL_TRAP: 406 if (do_kernel_trap(env)) 407 goto error; 408 break; 409 case EXCP_YIELD: 410 /* nothing to do here for user-mode, just resume guest code */ 411 break; 412 case EXCP_ATOMIC: 413 cpu_exec_step_atomic(cs); 414 break; 415 default: 416 error: 417 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr); 418 abort(); 419 } 420 process_pending_signals(env); 421 } 422 } 423 424 void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) 425 { 426 CPUState *cpu = env_cpu(env); 427 TaskState *ts = cpu->opaque; 428 struct image_info *info = ts->info; 429 int i; 430 431 cpsr_write(env, regs->uregs[16], CPSR_USER | CPSR_EXEC, 432 CPSRWriteByInstr); 433 for(i = 0; i < 16; i++) { 434 env->regs[i] = regs->uregs[i]; 435 } 436 #ifdef TARGET_WORDS_BIGENDIAN 437 /* Enable BE8. */ 438 if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4 439 && (info->elf_flags & EF_ARM_BE8)) { 440 env->uncached_cpsr |= CPSR_E; 441 env->cp15.sctlr_el[1] |= SCTLR_E0E; 442 } else { 443 env->cp15.sctlr_el[1] |= SCTLR_B; 444 } 445 #endif 446 447 ts->stack_base = info->start_stack; 448 ts->heap_base = info->brk; 449 /* This will be filled in on the first SYS_HEAPINFO call. */ 450 ts->heap_limit = 0; 451 } 452