1 /* 2 * qemu user main 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include <stdlib.h> 20 #include <stdio.h> 21 #include <stdarg.h> 22 #include <string.h> 23 #include <errno.h> 24 #include <unistd.h> 25 #include <sys/mman.h> 26 #include <sys/syscall.h> 27 #include <sys/resource.h> 28 29 #include "qemu.h" 30 #include "qemu-common.h" 31 #include "qemu/cache-utils.h" 32 #include "cpu.h" 33 #include "tcg.h" 34 #include "qemu/timer.h" 35 #include "qemu/envlist.h" 36 #include "elf.h" 37 38 char *exec_path; 39 40 int singlestep; 41 const char *filename; 42 const char *argv0; 43 int gdbstub_port; 44 envlist_t *envlist; 45 static const char *cpu_model; 46 unsigned long mmap_min_addr; 47 #if defined(CONFIG_USE_GUEST_BASE) 48 unsigned long guest_base; 49 int have_guest_base; 50 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64) 51 /* 52 * When running 32-on-64 we should make sure we can fit all of the possible 53 * guest address space into a contiguous chunk of virtual host memory. 54 * 55 * This way we will never overlap with our own libraries or binaries or stack 56 * or anything else that QEMU maps. 57 */ 58 # ifdef TARGET_MIPS 59 /* MIPS only supports 31 bits of virtual address space for user space */ 60 unsigned long reserved_va = 0x77000000; 61 # else 62 unsigned long reserved_va = 0xf7000000; 63 # endif 64 #else 65 unsigned long reserved_va; 66 #endif 67 #endif 68 69 static void usage(void); 70 71 static const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX; 72 const char *qemu_uname_release; 73 74 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so 75 we allocate a bigger stack. Need a better solution, for example 76 by remapping the process stack directly at the right place */ 77 unsigned long guest_stack_size = 8 * 1024 * 1024UL; 78 79 void gemu_log(const char *fmt, ...) 80 { 81 va_list ap; 82 83 va_start(ap, fmt); 84 vfprintf(stderr, fmt, ap); 85 va_end(ap); 86 } 87 88 #if defined(TARGET_I386) 89 int cpu_get_pic_interrupt(CPUX86State *env) 90 { 91 return -1; 92 } 93 #endif 94 95 /***********************************************************/ 96 /* Helper routines for implementing atomic operations. */ 97 98 /* To implement exclusive operations we force all cpus to syncronise. 99 We don't require a full sync, only that no cpus are executing guest code. 100 The alternative is to map target atomic ops onto host equivalents, 101 which requires quite a lot of per host/target work. */ 102 static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER; 103 static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER; 104 static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER; 105 static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER; 106 static int pending_cpus; 107 108 /* Make sure everything is in a consistent state for calling fork(). */ 109 void fork_start(void) 110 { 111 pthread_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); 112 pthread_mutex_lock(&exclusive_lock); 113 mmap_fork_start(); 114 } 115 116 void fork_end(int child) 117 { 118 mmap_fork_end(child); 119 if (child) { 120 CPUState *cpu, *next_cpu; 121 /* Child processes created by fork() only have a single thread. 122 Discard information about the parent threads. */ 123 CPU_FOREACH_SAFE(cpu, next_cpu) { 124 if (cpu != thread_cpu) { 125 QTAILQ_REMOVE(&cpus, thread_cpu, node); 126 } 127 } 128 pending_cpus = 0; 129 pthread_mutex_init(&exclusive_lock, NULL); 130 pthread_mutex_init(&cpu_list_mutex, NULL); 131 pthread_cond_init(&exclusive_cond, NULL); 132 pthread_cond_init(&exclusive_resume, NULL); 133 pthread_mutex_init(&tcg_ctx.tb_ctx.tb_lock, NULL); 134 gdbserver_fork((CPUArchState *)thread_cpu->env_ptr); 135 } else { 136 pthread_mutex_unlock(&exclusive_lock); 137 pthread_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); 138 } 139 } 140 141 /* Wait for pending exclusive operations to complete. The exclusive lock 142 must be held. */ 143 static inline void exclusive_idle(void) 144 { 145 while (pending_cpus) { 146 pthread_cond_wait(&exclusive_resume, &exclusive_lock); 147 } 148 } 149 150 /* Start an exclusive operation. 151 Must only be called from outside cpu_arm_exec. */ 152 static inline void start_exclusive(void) 153 { 154 CPUState *other_cpu; 155 156 pthread_mutex_lock(&exclusive_lock); 157 exclusive_idle(); 158 159 pending_cpus = 1; 160 /* Make all other cpus stop executing. */ 161 CPU_FOREACH(other_cpu) { 162 if (other_cpu->running) { 163 pending_cpus++; 164 cpu_exit(other_cpu); 165 } 166 } 167 if (pending_cpus > 1) { 168 pthread_cond_wait(&exclusive_cond, &exclusive_lock); 169 } 170 } 171 172 /* Finish an exclusive operation. */ 173 static inline void end_exclusive(void) 174 { 175 pending_cpus = 0; 176 pthread_cond_broadcast(&exclusive_resume); 177 pthread_mutex_unlock(&exclusive_lock); 178 } 179 180 /* Wait for exclusive ops to finish, and begin cpu execution. */ 181 static inline void cpu_exec_start(CPUState *cpu) 182 { 183 pthread_mutex_lock(&exclusive_lock); 184 exclusive_idle(); 185 cpu->running = true; 186 pthread_mutex_unlock(&exclusive_lock); 187 } 188 189 /* Mark cpu as not executing, and release pending exclusive ops. */ 190 static inline void cpu_exec_end(CPUState *cpu) 191 { 192 pthread_mutex_lock(&exclusive_lock); 193 cpu->running = false; 194 if (pending_cpus > 1) { 195 pending_cpus--; 196 if (pending_cpus == 1) { 197 pthread_cond_signal(&exclusive_cond); 198 } 199 } 200 exclusive_idle(); 201 pthread_mutex_unlock(&exclusive_lock); 202 } 203 204 void cpu_list_lock(void) 205 { 206 pthread_mutex_lock(&cpu_list_mutex); 207 } 208 209 void cpu_list_unlock(void) 210 { 211 pthread_mutex_unlock(&cpu_list_mutex); 212 } 213 214 215 #ifdef TARGET_I386 216 /***********************************************************/ 217 /* CPUX86 core interface */ 218 219 void cpu_smm_update(CPUX86State *env) 220 { 221 } 222 223 uint64_t cpu_get_tsc(CPUX86State *env) 224 { 225 return cpu_get_real_ticks(); 226 } 227 228 static void write_dt(void *ptr, unsigned long addr, unsigned long limit, 229 int flags) 230 { 231 unsigned int e1, e2; 232 uint32_t *p; 233 e1 = (addr << 16) | (limit & 0xffff); 234 e2 = ((addr >> 16) & 0xff) | (addr & 0xff000000) | (limit & 0x000f0000); 235 e2 |= flags; 236 p = ptr; 237 p[0] = tswap32(e1); 238 p[1] = tswap32(e2); 239 } 240 241 static uint64_t *idt_table; 242 #ifdef TARGET_X86_64 243 static void set_gate64(void *ptr, unsigned int type, unsigned int dpl, 244 uint64_t addr, unsigned int sel) 245 { 246 uint32_t *p, e1, e2; 247 e1 = (addr & 0xffff) | (sel << 16); 248 e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8); 249 p = ptr; 250 p[0] = tswap32(e1); 251 p[1] = tswap32(e2); 252 p[2] = tswap32(addr >> 32); 253 p[3] = 0; 254 } 255 /* only dpl matters as we do only user space emulation */ 256 static void set_idt(int n, unsigned int dpl) 257 { 258 set_gate64(idt_table + n * 2, 0, dpl, 0, 0); 259 } 260 #else 261 static void set_gate(void *ptr, unsigned int type, unsigned int dpl, 262 uint32_t addr, unsigned int sel) 263 { 264 uint32_t *p, e1, e2; 265 e1 = (addr & 0xffff) | (sel << 16); 266 e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8); 267 p = ptr; 268 p[0] = tswap32(e1); 269 p[1] = tswap32(e2); 270 } 271 272 /* only dpl matters as we do only user space emulation */ 273 static void set_idt(int n, unsigned int dpl) 274 { 275 set_gate(idt_table + n, 0, dpl, 0, 0); 276 } 277 #endif 278 279 void cpu_loop(CPUX86State *env) 280 { 281 CPUState *cs = CPU(x86_env_get_cpu(env)); 282 int trapnr; 283 abi_ulong pc; 284 target_siginfo_t info; 285 286 for(;;) { 287 trapnr = cpu_x86_exec(env); 288 switch(trapnr) { 289 case 0x80: 290 /* linux syscall from int $0x80 */ 291 env->regs[R_EAX] = do_syscall(env, 292 env->regs[R_EAX], 293 env->regs[R_EBX], 294 env->regs[R_ECX], 295 env->regs[R_EDX], 296 env->regs[R_ESI], 297 env->regs[R_EDI], 298 env->regs[R_EBP], 299 0, 0); 300 break; 301 #ifndef TARGET_ABI32 302 case EXCP_SYSCALL: 303 /* linux syscall from syscall instruction */ 304 env->regs[R_EAX] = do_syscall(env, 305 env->regs[R_EAX], 306 env->regs[R_EDI], 307 env->regs[R_ESI], 308 env->regs[R_EDX], 309 env->regs[10], 310 env->regs[8], 311 env->regs[9], 312 0, 0); 313 env->eip = env->exception_next_eip; 314 break; 315 #endif 316 case EXCP0B_NOSEG: 317 case EXCP0C_STACK: 318 info.si_signo = SIGBUS; 319 info.si_errno = 0; 320 info.si_code = TARGET_SI_KERNEL; 321 info._sifields._sigfault._addr = 0; 322 queue_signal(env, info.si_signo, &info); 323 break; 324 case EXCP0D_GPF: 325 /* XXX: potential problem if ABI32 */ 326 #ifndef TARGET_X86_64 327 if (env->eflags & VM_MASK) { 328 handle_vm86_fault(env); 329 } else 330 #endif 331 { 332 info.si_signo = SIGSEGV; 333 info.si_errno = 0; 334 info.si_code = TARGET_SI_KERNEL; 335 info._sifields._sigfault._addr = 0; 336 queue_signal(env, info.si_signo, &info); 337 } 338 break; 339 case EXCP0E_PAGE: 340 info.si_signo = SIGSEGV; 341 info.si_errno = 0; 342 if (!(env->error_code & 1)) 343 info.si_code = TARGET_SEGV_MAPERR; 344 else 345 info.si_code = TARGET_SEGV_ACCERR; 346 info._sifields._sigfault._addr = env->cr[2]; 347 queue_signal(env, info.si_signo, &info); 348 break; 349 case EXCP00_DIVZ: 350 #ifndef TARGET_X86_64 351 if (env->eflags & VM_MASK) { 352 handle_vm86_trap(env, trapnr); 353 } else 354 #endif 355 { 356 /* division by zero */ 357 info.si_signo = SIGFPE; 358 info.si_errno = 0; 359 info.si_code = TARGET_FPE_INTDIV; 360 info._sifields._sigfault._addr = env->eip; 361 queue_signal(env, info.si_signo, &info); 362 } 363 break; 364 case EXCP01_DB: 365 case EXCP03_INT3: 366 #ifndef TARGET_X86_64 367 if (env->eflags & VM_MASK) { 368 handle_vm86_trap(env, trapnr); 369 } else 370 #endif 371 { 372 info.si_signo = SIGTRAP; 373 info.si_errno = 0; 374 if (trapnr == EXCP01_DB) { 375 info.si_code = TARGET_TRAP_BRKPT; 376 info._sifields._sigfault._addr = env->eip; 377 } else { 378 info.si_code = TARGET_SI_KERNEL; 379 info._sifields._sigfault._addr = 0; 380 } 381 queue_signal(env, info.si_signo, &info); 382 } 383 break; 384 case EXCP04_INTO: 385 case EXCP05_BOUND: 386 #ifndef TARGET_X86_64 387 if (env->eflags & VM_MASK) { 388 handle_vm86_trap(env, trapnr); 389 } else 390 #endif 391 { 392 info.si_signo = SIGSEGV; 393 info.si_errno = 0; 394 info.si_code = TARGET_SI_KERNEL; 395 info._sifields._sigfault._addr = 0; 396 queue_signal(env, info.si_signo, &info); 397 } 398 break; 399 case EXCP06_ILLOP: 400 info.si_signo = SIGILL; 401 info.si_errno = 0; 402 info.si_code = TARGET_ILL_ILLOPN; 403 info._sifields._sigfault._addr = env->eip; 404 queue_signal(env, info.si_signo, &info); 405 break; 406 case EXCP_INTERRUPT: 407 /* just indicate that signals should be handled asap */ 408 break; 409 case EXCP_DEBUG: 410 { 411 int sig; 412 413 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 414 if (sig) 415 { 416 info.si_signo = sig; 417 info.si_errno = 0; 418 info.si_code = TARGET_TRAP_BRKPT; 419 queue_signal(env, info.si_signo, &info); 420 } 421 } 422 break; 423 default: 424 pc = env->segs[R_CS].base + env->eip; 425 fprintf(stderr, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n", 426 (long)pc, trapnr); 427 abort(); 428 } 429 process_pending_signals(env); 430 } 431 } 432 #endif 433 434 #ifdef TARGET_ARM 435 436 #define get_user_code_u32(x, gaddr, doswap) \ 437 ({ abi_long __r = get_user_u32((x), (gaddr)); \ 438 if (!__r && (doswap)) { \ 439 (x) = bswap32(x); \ 440 } \ 441 __r; \ 442 }) 443 444 #define get_user_code_u16(x, gaddr, doswap) \ 445 ({ abi_long __r = get_user_u16((x), (gaddr)); \ 446 if (!__r && (doswap)) { \ 447 (x) = bswap16(x); \ 448 } \ 449 __r; \ 450 }) 451 452 #ifdef TARGET_ABI32 453 /* Commpage handling -- there is no commpage for AArch64 */ 454 455 /* 456 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt 457 * Input: 458 * r0 = pointer to oldval 459 * r1 = pointer to newval 460 * r2 = pointer to target value 461 * 462 * Output: 463 * r0 = 0 if *ptr was changed, non-0 if no exchange happened 464 * C set if *ptr was changed, clear if no exchange happened 465 * 466 * Note segv's in kernel helpers are a bit tricky, we can set the 467 * data address sensibly but the PC address is just the entry point. 468 */ 469 static void arm_kernel_cmpxchg64_helper(CPUARMState *env) 470 { 471 uint64_t oldval, newval, val; 472 uint32_t addr, cpsr; 473 target_siginfo_t info; 474 475 /* Based on the 32 bit code in do_kernel_trap */ 476 477 /* XXX: This only works between threads, not between processes. 478 It's probably possible to implement this with native host 479 operations. However things like ldrex/strex are much harder so 480 there's not much point trying. */ 481 start_exclusive(); 482 cpsr = cpsr_read(env); 483 addr = env->regs[2]; 484 485 if (get_user_u64(oldval, env->regs[0])) { 486 env->exception.vaddress = env->regs[0]; 487 goto segv; 488 }; 489 490 if (get_user_u64(newval, env->regs[1])) { 491 env->exception.vaddress = env->regs[1]; 492 goto segv; 493 }; 494 495 if (get_user_u64(val, addr)) { 496 env->exception.vaddress = addr; 497 goto segv; 498 } 499 500 if (val == oldval) { 501 val = newval; 502 503 if (put_user_u64(val, addr)) { 504 env->exception.vaddress = addr; 505 goto segv; 506 }; 507 508 env->regs[0] = 0; 509 cpsr |= CPSR_C; 510 } else { 511 env->regs[0] = -1; 512 cpsr &= ~CPSR_C; 513 } 514 cpsr_write(env, cpsr, CPSR_C); 515 end_exclusive(); 516 return; 517 518 segv: 519 end_exclusive(); 520 /* We get the PC of the entry address - which is as good as anything, 521 on a real kernel what you get depends on which mode it uses. */ 522 info.si_signo = SIGSEGV; 523 info.si_errno = 0; 524 /* XXX: check env->error_code */ 525 info.si_code = TARGET_SEGV_MAPERR; 526 info._sifields._sigfault._addr = env->exception.vaddress; 527 queue_signal(env, info.si_signo, &info); 528 529 end_exclusive(); 530 } 531 532 /* Handle a jump to the kernel code page. */ 533 static int 534 do_kernel_trap(CPUARMState *env) 535 { 536 uint32_t addr; 537 uint32_t cpsr; 538 uint32_t val; 539 540 switch (env->regs[15]) { 541 case 0xffff0fa0: /* __kernel_memory_barrier */ 542 /* ??? No-op. Will need to do better for SMP. */ 543 break; 544 case 0xffff0fc0: /* __kernel_cmpxchg */ 545 /* XXX: This only works between threads, not between processes. 546 It's probably possible to implement this with native host 547 operations. However things like ldrex/strex are much harder so 548 there's not much point trying. */ 549 start_exclusive(); 550 cpsr = cpsr_read(env); 551 addr = env->regs[2]; 552 /* FIXME: This should SEGV if the access fails. */ 553 if (get_user_u32(val, addr)) 554 val = ~env->regs[0]; 555 if (val == env->regs[0]) { 556 val = env->regs[1]; 557 /* FIXME: Check for segfaults. */ 558 put_user_u32(val, addr); 559 env->regs[0] = 0; 560 cpsr |= CPSR_C; 561 } else { 562 env->regs[0] = -1; 563 cpsr &= ~CPSR_C; 564 } 565 cpsr_write(env, cpsr, CPSR_C); 566 end_exclusive(); 567 break; 568 case 0xffff0fe0: /* __kernel_get_tls */ 569 env->regs[0] = env->cp15.tpidrro_el0; 570 break; 571 case 0xffff0f60: /* __kernel_cmpxchg64 */ 572 arm_kernel_cmpxchg64_helper(env); 573 break; 574 575 default: 576 return 1; 577 } 578 /* Jump back to the caller. */ 579 addr = env->regs[14]; 580 if (addr & 1) { 581 env->thumb = 1; 582 addr &= ~1; 583 } 584 env->regs[15] = addr; 585 586 return 0; 587 } 588 589 /* Store exclusive handling for AArch32 */ 590 static int do_strex(CPUARMState *env) 591 { 592 uint64_t val; 593 int size; 594 int rc = 1; 595 int segv = 0; 596 uint32_t addr; 597 start_exclusive(); 598 if (env->exclusive_addr != env->exclusive_test) { 599 goto fail; 600 } 601 /* We know we're always AArch32 so the address is in uint32_t range 602 * unless it was the -1 exclusive-monitor-lost value (which won't 603 * match exclusive_test above). 604 */ 605 assert(extract64(env->exclusive_addr, 32, 32) == 0); 606 addr = env->exclusive_addr; 607 size = env->exclusive_info & 0xf; 608 switch (size) { 609 case 0: 610 segv = get_user_u8(val, addr); 611 break; 612 case 1: 613 segv = get_user_u16(val, addr); 614 break; 615 case 2: 616 case 3: 617 segv = get_user_u32(val, addr); 618 break; 619 default: 620 abort(); 621 } 622 if (segv) { 623 env->exception.vaddress = addr; 624 goto done; 625 } 626 if (size == 3) { 627 uint32_t valhi; 628 segv = get_user_u32(valhi, addr + 4); 629 if (segv) { 630 env->exception.vaddress = addr + 4; 631 goto done; 632 } 633 val = deposit64(val, 32, 32, valhi); 634 } 635 if (val != env->exclusive_val) { 636 goto fail; 637 } 638 639 val = env->regs[(env->exclusive_info >> 8) & 0xf]; 640 switch (size) { 641 case 0: 642 segv = put_user_u8(val, addr); 643 break; 644 case 1: 645 segv = put_user_u16(val, addr); 646 break; 647 case 2: 648 case 3: 649 segv = put_user_u32(val, addr); 650 break; 651 } 652 if (segv) { 653 env->exception.vaddress = addr; 654 goto done; 655 } 656 if (size == 3) { 657 val = env->regs[(env->exclusive_info >> 12) & 0xf]; 658 segv = put_user_u32(val, addr + 4); 659 if (segv) { 660 env->exception.vaddress = addr + 4; 661 goto done; 662 } 663 } 664 rc = 0; 665 fail: 666 env->regs[15] += 4; 667 env->regs[(env->exclusive_info >> 4) & 0xf] = rc; 668 done: 669 end_exclusive(); 670 return segv; 671 } 672 673 void cpu_loop(CPUARMState *env) 674 { 675 CPUState *cs = CPU(arm_env_get_cpu(env)); 676 int trapnr; 677 unsigned int n, insn; 678 target_siginfo_t info; 679 uint32_t addr; 680 681 for(;;) { 682 cpu_exec_start(cs); 683 trapnr = cpu_arm_exec(env); 684 cpu_exec_end(cs); 685 switch(trapnr) { 686 case EXCP_UDEF: 687 { 688 TaskState *ts = cs->opaque; 689 uint32_t opcode; 690 int rc; 691 692 /* we handle the FPU emulation here, as Linux */ 693 /* we get the opcode */ 694 /* FIXME - what to do if get_user() fails? */ 695 get_user_code_u32(opcode, env->regs[15], env->bswap_code); 696 697 rc = EmulateAll(opcode, &ts->fpa, env); 698 if (rc == 0) { /* illegal instruction */ 699 info.si_signo = SIGILL; 700 info.si_errno = 0; 701 info.si_code = TARGET_ILL_ILLOPN; 702 info._sifields._sigfault._addr = env->regs[15]; 703 queue_signal(env, info.si_signo, &info); 704 } else if (rc < 0) { /* FP exception */ 705 int arm_fpe=0; 706 707 /* translate softfloat flags to FPSR flags */ 708 if (-rc & float_flag_invalid) 709 arm_fpe |= BIT_IOC; 710 if (-rc & float_flag_divbyzero) 711 arm_fpe |= BIT_DZC; 712 if (-rc & float_flag_overflow) 713 arm_fpe |= BIT_OFC; 714 if (-rc & float_flag_underflow) 715 arm_fpe |= BIT_UFC; 716 if (-rc & float_flag_inexact) 717 arm_fpe |= BIT_IXC; 718 719 FPSR fpsr = ts->fpa.fpsr; 720 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe); 721 722 if (fpsr & (arm_fpe << 16)) { /* exception enabled? */ 723 info.si_signo = SIGFPE; 724 info.si_errno = 0; 725 726 /* ordered by priority, least first */ 727 if (arm_fpe & BIT_IXC) info.si_code = TARGET_FPE_FLTRES; 728 if (arm_fpe & BIT_UFC) info.si_code = TARGET_FPE_FLTUND; 729 if (arm_fpe & BIT_OFC) info.si_code = TARGET_FPE_FLTOVF; 730 if (arm_fpe & BIT_DZC) info.si_code = TARGET_FPE_FLTDIV; 731 if (arm_fpe & BIT_IOC) info.si_code = TARGET_FPE_FLTINV; 732 733 info._sifields._sigfault._addr = env->regs[15]; 734 queue_signal(env, info.si_signo, &info); 735 } else { 736 env->regs[15] += 4; 737 } 738 739 /* accumulate unenabled exceptions */ 740 if ((!(fpsr & BIT_IXE)) && (arm_fpe & BIT_IXC)) 741 fpsr |= BIT_IXC; 742 if ((!(fpsr & BIT_UFE)) && (arm_fpe & BIT_UFC)) 743 fpsr |= BIT_UFC; 744 if ((!(fpsr & BIT_OFE)) && (arm_fpe & BIT_OFC)) 745 fpsr |= BIT_OFC; 746 if ((!(fpsr & BIT_DZE)) && (arm_fpe & BIT_DZC)) 747 fpsr |= BIT_DZC; 748 if ((!(fpsr & BIT_IOE)) && (arm_fpe & BIT_IOC)) 749 fpsr |= BIT_IOC; 750 ts->fpa.fpsr=fpsr; 751 } else { /* everything OK */ 752 /* increment PC */ 753 env->regs[15] += 4; 754 } 755 } 756 break; 757 case EXCP_SWI: 758 case EXCP_BKPT: 759 { 760 env->eabi = 1; 761 /* system call */ 762 if (trapnr == EXCP_BKPT) { 763 if (env->thumb) { 764 /* FIXME - what to do if get_user() fails? */ 765 get_user_code_u16(insn, env->regs[15], env->bswap_code); 766 n = insn & 0xff; 767 env->regs[15] += 2; 768 } else { 769 /* FIXME - what to do if get_user() fails? */ 770 get_user_code_u32(insn, env->regs[15], env->bswap_code); 771 n = (insn & 0xf) | ((insn >> 4) & 0xff0); 772 env->regs[15] += 4; 773 } 774 } else { 775 if (env->thumb) { 776 /* FIXME - what to do if get_user() fails? */ 777 get_user_code_u16(insn, env->regs[15] - 2, 778 env->bswap_code); 779 n = insn & 0xff; 780 } else { 781 /* FIXME - what to do if get_user() fails? */ 782 get_user_code_u32(insn, env->regs[15] - 4, 783 env->bswap_code); 784 n = insn & 0xffffff; 785 } 786 } 787 788 if (n == ARM_NR_cacheflush) { 789 /* nop */ 790 } else if (n == ARM_NR_semihosting 791 || n == ARM_NR_thumb_semihosting) { 792 env->regs[0] = do_arm_semihosting (env); 793 } else if (n == 0 || n >= ARM_SYSCALL_BASE || env->thumb) { 794 /* linux syscall */ 795 if (env->thumb || n == 0) { 796 n = env->regs[7]; 797 } else { 798 n -= ARM_SYSCALL_BASE; 799 env->eabi = 0; 800 } 801 if ( n > ARM_NR_BASE) { 802 switch (n) { 803 case ARM_NR_cacheflush: 804 /* nop */ 805 break; 806 case ARM_NR_set_tls: 807 cpu_set_tls(env, env->regs[0]); 808 env->regs[0] = 0; 809 break; 810 default: 811 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n", 812 n); 813 env->regs[0] = -TARGET_ENOSYS; 814 break; 815 } 816 } else { 817 env->regs[0] = do_syscall(env, 818 n, 819 env->regs[0], 820 env->regs[1], 821 env->regs[2], 822 env->regs[3], 823 env->regs[4], 824 env->regs[5], 825 0, 0); 826 } 827 } else { 828 goto error; 829 } 830 } 831 break; 832 case EXCP_INTERRUPT: 833 /* just indicate that signals should be handled asap */ 834 break; 835 case EXCP_STREX: 836 if (!do_strex(env)) { 837 break; 838 } 839 /* fall through for segv */ 840 case EXCP_PREFETCH_ABORT: 841 case EXCP_DATA_ABORT: 842 addr = env->exception.vaddress; 843 { 844 info.si_signo = SIGSEGV; 845 info.si_errno = 0; 846 /* XXX: check env->error_code */ 847 info.si_code = TARGET_SEGV_MAPERR; 848 info._sifields._sigfault._addr = addr; 849 queue_signal(env, info.si_signo, &info); 850 } 851 break; 852 case EXCP_DEBUG: 853 { 854 int sig; 855 856 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 857 if (sig) 858 { 859 info.si_signo = sig; 860 info.si_errno = 0; 861 info.si_code = TARGET_TRAP_BRKPT; 862 queue_signal(env, info.si_signo, &info); 863 } 864 } 865 break; 866 case EXCP_KERNEL_TRAP: 867 if (do_kernel_trap(env)) 868 goto error; 869 break; 870 default: 871 error: 872 fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", 873 trapnr); 874 cpu_dump_state(cs, stderr, fprintf, 0); 875 abort(); 876 } 877 process_pending_signals(env); 878 } 879 } 880 881 #else 882 883 /* 884 * Handle AArch64 store-release exclusive 885 * 886 * rs = gets the status result of store exclusive 887 * rt = is the register that is stored 888 * rt2 = is the second register store (in STP) 889 * 890 */ 891 static int do_strex_a64(CPUARMState *env) 892 { 893 uint64_t val; 894 int size; 895 bool is_pair; 896 int rc = 1; 897 int segv = 0; 898 uint64_t addr; 899 int rs, rt, rt2; 900 901 start_exclusive(); 902 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */ 903 size = extract32(env->exclusive_info, 0, 2); 904 is_pair = extract32(env->exclusive_info, 2, 1); 905 rs = extract32(env->exclusive_info, 4, 5); 906 rt = extract32(env->exclusive_info, 9, 5); 907 rt2 = extract32(env->exclusive_info, 14, 5); 908 909 addr = env->exclusive_addr; 910 911 if (addr != env->exclusive_test) { 912 goto finish; 913 } 914 915 switch (size) { 916 case 0: 917 segv = get_user_u8(val, addr); 918 break; 919 case 1: 920 segv = get_user_u16(val, addr); 921 break; 922 case 2: 923 segv = get_user_u32(val, addr); 924 break; 925 case 3: 926 segv = get_user_u64(val, addr); 927 break; 928 default: 929 abort(); 930 } 931 if (segv) { 932 env->exception.vaddress = addr; 933 goto error; 934 } 935 if (val != env->exclusive_val) { 936 goto finish; 937 } 938 if (is_pair) { 939 if (size == 2) { 940 segv = get_user_u32(val, addr + 4); 941 } else { 942 segv = get_user_u64(val, addr + 8); 943 } 944 if (segv) { 945 env->exception.vaddress = addr + (size == 2 ? 4 : 8); 946 goto error; 947 } 948 if (val != env->exclusive_high) { 949 goto finish; 950 } 951 } 952 /* handle the zero register */ 953 val = rt == 31 ? 0 : env->xregs[rt]; 954 switch (size) { 955 case 0: 956 segv = put_user_u8(val, addr); 957 break; 958 case 1: 959 segv = put_user_u16(val, addr); 960 break; 961 case 2: 962 segv = put_user_u32(val, addr); 963 break; 964 case 3: 965 segv = put_user_u64(val, addr); 966 break; 967 } 968 if (segv) { 969 goto error; 970 } 971 if (is_pair) { 972 /* handle the zero register */ 973 val = rt2 == 31 ? 0 : env->xregs[rt2]; 974 if (size == 2) { 975 segv = put_user_u32(val, addr + 4); 976 } else { 977 segv = put_user_u64(val, addr + 8); 978 } 979 if (segv) { 980 env->exception.vaddress = addr + (size == 2 ? 4 : 8); 981 goto error; 982 } 983 } 984 rc = 0; 985 finish: 986 env->pc += 4; 987 /* rs == 31 encodes a write to the ZR, thus throwing away 988 * the status return. This is rather silly but valid. 989 */ 990 if (rs < 31) { 991 env->xregs[rs] = rc; 992 } 993 error: 994 /* instruction faulted, PC does not advance */ 995 /* either way a strex releases any exclusive lock we have */ 996 env->exclusive_addr = -1; 997 end_exclusive(); 998 return segv; 999 } 1000 1001 /* AArch64 main loop */ 1002 void cpu_loop(CPUARMState *env) 1003 { 1004 CPUState *cs = CPU(arm_env_get_cpu(env)); 1005 int trapnr, sig; 1006 target_siginfo_t info; 1007 uint32_t addr; 1008 1009 for (;;) { 1010 cpu_exec_start(cs); 1011 trapnr = cpu_arm_exec(env); 1012 cpu_exec_end(cs); 1013 1014 switch (trapnr) { 1015 case EXCP_SWI: 1016 env->xregs[0] = do_syscall(env, 1017 env->xregs[8], 1018 env->xregs[0], 1019 env->xregs[1], 1020 env->xregs[2], 1021 env->xregs[3], 1022 env->xregs[4], 1023 env->xregs[5], 1024 0, 0); 1025 break; 1026 case EXCP_INTERRUPT: 1027 /* just indicate that signals should be handled asap */ 1028 break; 1029 case EXCP_UDEF: 1030 info.si_signo = SIGILL; 1031 info.si_errno = 0; 1032 info.si_code = TARGET_ILL_ILLOPN; 1033 info._sifields._sigfault._addr = env->pc; 1034 queue_signal(env, info.si_signo, &info); 1035 break; 1036 case EXCP_STREX: 1037 if (!do_strex_a64(env)) { 1038 break; 1039 } 1040 /* fall through for segv */ 1041 case EXCP_PREFETCH_ABORT: 1042 case EXCP_DATA_ABORT: 1043 addr = env->exception.vaddress; 1044 info.si_signo = SIGSEGV; 1045 info.si_errno = 0; 1046 /* XXX: check env->error_code */ 1047 info.si_code = TARGET_SEGV_MAPERR; 1048 info._sifields._sigfault._addr = addr; 1049 queue_signal(env, info.si_signo, &info); 1050 break; 1051 case EXCP_DEBUG: 1052 case EXCP_BKPT: 1053 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 1054 if (sig) { 1055 info.si_signo = sig; 1056 info.si_errno = 0; 1057 info.si_code = TARGET_TRAP_BRKPT; 1058 queue_signal(env, info.si_signo, &info); 1059 } 1060 break; 1061 default: 1062 fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", 1063 trapnr); 1064 cpu_dump_state(cs, stderr, fprintf, 0); 1065 abort(); 1066 } 1067 process_pending_signals(env); 1068 /* Exception return on AArch64 always clears the exclusive monitor, 1069 * so any return to running guest code implies this. 1070 * A strex (successful or otherwise) also clears the monitor, so 1071 * we don't need to specialcase EXCP_STREX. 1072 */ 1073 env->exclusive_addr = -1; 1074 } 1075 } 1076 #endif /* ndef TARGET_ABI32 */ 1077 1078 #endif 1079 1080 #ifdef TARGET_UNICORE32 1081 1082 void cpu_loop(CPUUniCore32State *env) 1083 { 1084 CPUState *cs = CPU(uc32_env_get_cpu(env)); 1085 int trapnr; 1086 unsigned int n, insn; 1087 target_siginfo_t info; 1088 1089 for (;;) { 1090 cpu_exec_start(cs); 1091 trapnr = uc32_cpu_exec(env); 1092 cpu_exec_end(cs); 1093 switch (trapnr) { 1094 case UC32_EXCP_PRIV: 1095 { 1096 /* system call */ 1097 get_user_u32(insn, env->regs[31] - 4); 1098 n = insn & 0xffffff; 1099 1100 if (n >= UC32_SYSCALL_BASE) { 1101 /* linux syscall */ 1102 n -= UC32_SYSCALL_BASE; 1103 if (n == UC32_SYSCALL_NR_set_tls) { 1104 cpu_set_tls(env, env->regs[0]); 1105 env->regs[0] = 0; 1106 } else { 1107 env->regs[0] = do_syscall(env, 1108 n, 1109 env->regs[0], 1110 env->regs[1], 1111 env->regs[2], 1112 env->regs[3], 1113 env->regs[4], 1114 env->regs[5], 1115 0, 0); 1116 } 1117 } else { 1118 goto error; 1119 } 1120 } 1121 break; 1122 case UC32_EXCP_DTRAP: 1123 case UC32_EXCP_ITRAP: 1124 info.si_signo = SIGSEGV; 1125 info.si_errno = 0; 1126 /* XXX: check env->error_code */ 1127 info.si_code = TARGET_SEGV_MAPERR; 1128 info._sifields._sigfault._addr = env->cp0.c4_faultaddr; 1129 queue_signal(env, info.si_signo, &info); 1130 break; 1131 case EXCP_INTERRUPT: 1132 /* just indicate that signals should be handled asap */ 1133 break; 1134 case EXCP_DEBUG: 1135 { 1136 int sig; 1137 1138 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 1139 if (sig) { 1140 info.si_signo = sig; 1141 info.si_errno = 0; 1142 info.si_code = TARGET_TRAP_BRKPT; 1143 queue_signal(env, info.si_signo, &info); 1144 } 1145 } 1146 break; 1147 default: 1148 goto error; 1149 } 1150 process_pending_signals(env); 1151 } 1152 1153 error: 1154 fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr); 1155 cpu_dump_state(cs, stderr, fprintf, 0); 1156 abort(); 1157 } 1158 #endif 1159 1160 #ifdef TARGET_SPARC 1161 #define SPARC64_STACK_BIAS 2047 1162 1163 //#define DEBUG_WIN 1164 1165 /* WARNING: dealing with register windows _is_ complicated. More info 1166 can be found at http://www.sics.se/~psm/sparcstack.html */ 1167 static inline int get_reg_index(CPUSPARCState *env, int cwp, int index) 1168 { 1169 index = (index + cwp * 16) % (16 * env->nwindows); 1170 /* wrap handling : if cwp is on the last window, then we use the 1171 registers 'after' the end */ 1172 if (index < 8 && env->cwp == env->nwindows - 1) 1173 index += 16 * env->nwindows; 1174 return index; 1175 } 1176 1177 /* save the register window 'cwp1' */ 1178 static inline void save_window_offset(CPUSPARCState *env, int cwp1) 1179 { 1180 unsigned int i; 1181 abi_ulong sp_ptr; 1182 1183 sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)]; 1184 #ifdef TARGET_SPARC64 1185 if (sp_ptr & 3) 1186 sp_ptr += SPARC64_STACK_BIAS; 1187 #endif 1188 #if defined(DEBUG_WIN) 1189 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx " save_cwp=%d\n", 1190 sp_ptr, cwp1); 1191 #endif 1192 for(i = 0; i < 16; i++) { 1193 /* FIXME - what to do if put_user() fails? */ 1194 put_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr); 1195 sp_ptr += sizeof(abi_ulong); 1196 } 1197 } 1198 1199 static void save_window(CPUSPARCState *env) 1200 { 1201 #ifndef TARGET_SPARC64 1202 unsigned int new_wim; 1203 new_wim = ((env->wim >> 1) | (env->wim << (env->nwindows - 1))) & 1204 ((1LL << env->nwindows) - 1); 1205 save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2)); 1206 env->wim = new_wim; 1207 #else 1208 save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2)); 1209 env->cansave++; 1210 env->canrestore--; 1211 #endif 1212 } 1213 1214 static void restore_window(CPUSPARCState *env) 1215 { 1216 #ifndef TARGET_SPARC64 1217 unsigned int new_wim; 1218 #endif 1219 unsigned int i, cwp1; 1220 abi_ulong sp_ptr; 1221 1222 #ifndef TARGET_SPARC64 1223 new_wim = ((env->wim << 1) | (env->wim >> (env->nwindows - 1))) & 1224 ((1LL << env->nwindows) - 1); 1225 #endif 1226 1227 /* restore the invalid window */ 1228 cwp1 = cpu_cwp_inc(env, env->cwp + 1); 1229 sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)]; 1230 #ifdef TARGET_SPARC64 1231 if (sp_ptr & 3) 1232 sp_ptr += SPARC64_STACK_BIAS; 1233 #endif 1234 #if defined(DEBUG_WIN) 1235 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx " load_cwp=%d\n", 1236 sp_ptr, cwp1); 1237 #endif 1238 for(i = 0; i < 16; i++) { 1239 /* FIXME - what to do if get_user() fails? */ 1240 get_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr); 1241 sp_ptr += sizeof(abi_ulong); 1242 } 1243 #ifdef TARGET_SPARC64 1244 env->canrestore++; 1245 if (env->cleanwin < env->nwindows - 1) 1246 env->cleanwin++; 1247 env->cansave--; 1248 #else 1249 env->wim = new_wim; 1250 #endif 1251 } 1252 1253 static void flush_windows(CPUSPARCState *env) 1254 { 1255 int offset, cwp1; 1256 1257 offset = 1; 1258 for(;;) { 1259 /* if restore would invoke restore_window(), then we can stop */ 1260 cwp1 = cpu_cwp_inc(env, env->cwp + offset); 1261 #ifndef TARGET_SPARC64 1262 if (env->wim & (1 << cwp1)) 1263 break; 1264 #else 1265 if (env->canrestore == 0) 1266 break; 1267 env->cansave++; 1268 env->canrestore--; 1269 #endif 1270 save_window_offset(env, cwp1); 1271 offset++; 1272 } 1273 cwp1 = cpu_cwp_inc(env, env->cwp + 1); 1274 #ifndef TARGET_SPARC64 1275 /* set wim so that restore will reload the registers */ 1276 env->wim = 1 << cwp1; 1277 #endif 1278 #if defined(DEBUG_WIN) 1279 printf("flush_windows: nb=%d\n", offset - 1); 1280 #endif 1281 } 1282 1283 void cpu_loop (CPUSPARCState *env) 1284 { 1285 CPUState *cs = CPU(sparc_env_get_cpu(env)); 1286 int trapnr; 1287 abi_long ret; 1288 target_siginfo_t info; 1289 1290 while (1) { 1291 trapnr = cpu_sparc_exec (env); 1292 1293 /* Compute PSR before exposing state. */ 1294 if (env->cc_op != CC_OP_FLAGS) { 1295 cpu_get_psr(env); 1296 } 1297 1298 switch (trapnr) { 1299 #ifndef TARGET_SPARC64 1300 case 0x88: 1301 case 0x90: 1302 #else 1303 case 0x110: 1304 case 0x16d: 1305 #endif 1306 ret = do_syscall (env, env->gregs[1], 1307 env->regwptr[0], env->regwptr[1], 1308 env->regwptr[2], env->regwptr[3], 1309 env->regwptr[4], env->regwptr[5], 1310 0, 0); 1311 if ((abi_ulong)ret >= (abi_ulong)(-515)) { 1312 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) 1313 env->xcc |= PSR_CARRY; 1314 #else 1315 env->psr |= PSR_CARRY; 1316 #endif 1317 ret = -ret; 1318 } else { 1319 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) 1320 env->xcc &= ~PSR_CARRY; 1321 #else 1322 env->psr &= ~PSR_CARRY; 1323 #endif 1324 } 1325 env->regwptr[0] = ret; 1326 /* next instruction */ 1327 env->pc = env->npc; 1328 env->npc = env->npc + 4; 1329 break; 1330 case 0x83: /* flush windows */ 1331 #ifdef TARGET_ABI32 1332 case 0x103: 1333 #endif 1334 flush_windows(env); 1335 /* next instruction */ 1336 env->pc = env->npc; 1337 env->npc = env->npc + 4; 1338 break; 1339 #ifndef TARGET_SPARC64 1340 case TT_WIN_OVF: /* window overflow */ 1341 save_window(env); 1342 break; 1343 case TT_WIN_UNF: /* window underflow */ 1344 restore_window(env); 1345 break; 1346 case TT_TFAULT: 1347 case TT_DFAULT: 1348 { 1349 info.si_signo = TARGET_SIGSEGV; 1350 info.si_errno = 0; 1351 /* XXX: check env->error_code */ 1352 info.si_code = TARGET_SEGV_MAPERR; 1353 info._sifields._sigfault._addr = env->mmuregs[4]; 1354 queue_signal(env, info.si_signo, &info); 1355 } 1356 break; 1357 #else 1358 case TT_SPILL: /* window overflow */ 1359 save_window(env); 1360 break; 1361 case TT_FILL: /* window underflow */ 1362 restore_window(env); 1363 break; 1364 case TT_TFAULT: 1365 case TT_DFAULT: 1366 { 1367 info.si_signo = TARGET_SIGSEGV; 1368 info.si_errno = 0; 1369 /* XXX: check env->error_code */ 1370 info.si_code = TARGET_SEGV_MAPERR; 1371 if (trapnr == TT_DFAULT) 1372 info._sifields._sigfault._addr = env->dmmuregs[4]; 1373 else 1374 info._sifields._sigfault._addr = cpu_tsptr(env)->tpc; 1375 queue_signal(env, info.si_signo, &info); 1376 } 1377 break; 1378 #ifndef TARGET_ABI32 1379 case 0x16e: 1380 flush_windows(env); 1381 sparc64_get_context(env); 1382 break; 1383 case 0x16f: 1384 flush_windows(env); 1385 sparc64_set_context(env); 1386 break; 1387 #endif 1388 #endif 1389 case EXCP_INTERRUPT: 1390 /* just indicate that signals should be handled asap */ 1391 break; 1392 case TT_ILL_INSN: 1393 { 1394 info.si_signo = TARGET_SIGILL; 1395 info.si_errno = 0; 1396 info.si_code = TARGET_ILL_ILLOPC; 1397 info._sifields._sigfault._addr = env->pc; 1398 queue_signal(env, info.si_signo, &info); 1399 } 1400 break; 1401 case EXCP_DEBUG: 1402 { 1403 int sig; 1404 1405 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 1406 if (sig) 1407 { 1408 info.si_signo = sig; 1409 info.si_errno = 0; 1410 info.si_code = TARGET_TRAP_BRKPT; 1411 queue_signal(env, info.si_signo, &info); 1412 } 1413 } 1414 break; 1415 default: 1416 printf ("Unhandled trap: 0x%x\n", trapnr); 1417 cpu_dump_state(cs, stderr, fprintf, 0); 1418 exit (1); 1419 } 1420 process_pending_signals (env); 1421 } 1422 } 1423 1424 #endif 1425 1426 #ifdef TARGET_PPC 1427 static inline uint64_t cpu_ppc_get_tb(CPUPPCState *env) 1428 { 1429 /* TO FIX */ 1430 return 0; 1431 } 1432 1433 uint64_t cpu_ppc_load_tbl(CPUPPCState *env) 1434 { 1435 return cpu_ppc_get_tb(env); 1436 } 1437 1438 uint32_t cpu_ppc_load_tbu(CPUPPCState *env) 1439 { 1440 return cpu_ppc_get_tb(env) >> 32; 1441 } 1442 1443 uint64_t cpu_ppc_load_atbl(CPUPPCState *env) 1444 { 1445 return cpu_ppc_get_tb(env); 1446 } 1447 1448 uint32_t cpu_ppc_load_atbu(CPUPPCState *env) 1449 { 1450 return cpu_ppc_get_tb(env) >> 32; 1451 } 1452 1453 uint32_t cpu_ppc601_load_rtcu(CPUPPCState *env) 1454 __attribute__ (( alias ("cpu_ppc_load_tbu") )); 1455 1456 uint32_t cpu_ppc601_load_rtcl(CPUPPCState *env) 1457 { 1458 return cpu_ppc_load_tbl(env) & 0x3FFFFF80; 1459 } 1460 1461 /* XXX: to be fixed */ 1462 int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp) 1463 { 1464 return -1; 1465 } 1466 1467 int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val) 1468 { 1469 return -1; 1470 } 1471 1472 #define EXCP_DUMP(env, fmt, ...) \ 1473 do { \ 1474 CPUState *cs = ENV_GET_CPU(env); \ 1475 fprintf(stderr, fmt , ## __VA_ARGS__); \ 1476 cpu_dump_state(cs, stderr, fprintf, 0); \ 1477 qemu_log(fmt, ## __VA_ARGS__); \ 1478 if (qemu_log_enabled()) { \ 1479 log_cpu_state(cs, 0); \ 1480 } \ 1481 } while (0) 1482 1483 static int do_store_exclusive(CPUPPCState *env) 1484 { 1485 target_ulong addr; 1486 target_ulong page_addr; 1487 target_ulong val, val2 __attribute__((unused)) = 0; 1488 int flags; 1489 int segv = 0; 1490 1491 addr = env->reserve_ea; 1492 page_addr = addr & TARGET_PAGE_MASK; 1493 start_exclusive(); 1494 mmap_lock(); 1495 flags = page_get_flags(page_addr); 1496 if ((flags & PAGE_READ) == 0) { 1497 segv = 1; 1498 } else { 1499 int reg = env->reserve_info & 0x1f; 1500 int size = env->reserve_info >> 5; 1501 int stored = 0; 1502 1503 if (addr == env->reserve_addr) { 1504 switch (size) { 1505 case 1: segv = get_user_u8(val, addr); break; 1506 case 2: segv = get_user_u16(val, addr); break; 1507 case 4: segv = get_user_u32(val, addr); break; 1508 #if defined(TARGET_PPC64) 1509 case 8: segv = get_user_u64(val, addr); break; 1510 case 16: { 1511 segv = get_user_u64(val, addr); 1512 if (!segv) { 1513 segv = get_user_u64(val2, addr + 8); 1514 } 1515 break; 1516 } 1517 #endif 1518 default: abort(); 1519 } 1520 if (!segv && val == env->reserve_val) { 1521 val = env->gpr[reg]; 1522 switch (size) { 1523 case 1: segv = put_user_u8(val, addr); break; 1524 case 2: segv = put_user_u16(val, addr); break; 1525 case 4: segv = put_user_u32(val, addr); break; 1526 #if defined(TARGET_PPC64) 1527 case 8: segv = put_user_u64(val, addr); break; 1528 case 16: { 1529 if (val2 == env->reserve_val2) { 1530 if (msr_le) { 1531 val2 = val; 1532 val = env->gpr[reg+1]; 1533 } else { 1534 val2 = env->gpr[reg+1]; 1535 } 1536 segv = put_user_u64(val, addr); 1537 if (!segv) { 1538 segv = put_user_u64(val2, addr + 8); 1539 } 1540 } 1541 break; 1542 } 1543 #endif 1544 default: abort(); 1545 } 1546 if (!segv) { 1547 stored = 1; 1548 } 1549 } 1550 } 1551 env->crf[0] = (stored << 1) | xer_so; 1552 env->reserve_addr = (target_ulong)-1; 1553 } 1554 if (!segv) { 1555 env->nip += 4; 1556 } 1557 mmap_unlock(); 1558 end_exclusive(); 1559 return segv; 1560 } 1561 1562 void cpu_loop(CPUPPCState *env) 1563 { 1564 CPUState *cs = CPU(ppc_env_get_cpu(env)); 1565 target_siginfo_t info; 1566 int trapnr; 1567 target_ulong ret; 1568 1569 for(;;) { 1570 cpu_exec_start(cs); 1571 trapnr = cpu_ppc_exec(env); 1572 cpu_exec_end(cs); 1573 switch(trapnr) { 1574 case POWERPC_EXCP_NONE: 1575 /* Just go on */ 1576 break; 1577 case POWERPC_EXCP_CRITICAL: /* Critical input */ 1578 cpu_abort(cs, "Critical interrupt while in user mode. " 1579 "Aborting\n"); 1580 break; 1581 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 1582 cpu_abort(cs, "Machine check exception while in user mode. " 1583 "Aborting\n"); 1584 break; 1585 case POWERPC_EXCP_DSI: /* Data storage exception */ 1586 EXCP_DUMP(env, "Invalid data memory access: 0x" TARGET_FMT_lx "\n", 1587 env->spr[SPR_DAR]); 1588 /* XXX: check this. Seems bugged */ 1589 switch (env->error_code & 0xFF000000) { 1590 case 0x40000000: 1591 info.si_signo = TARGET_SIGSEGV; 1592 info.si_errno = 0; 1593 info.si_code = TARGET_SEGV_MAPERR; 1594 break; 1595 case 0x04000000: 1596 info.si_signo = TARGET_SIGILL; 1597 info.si_errno = 0; 1598 info.si_code = TARGET_ILL_ILLADR; 1599 break; 1600 case 0x08000000: 1601 info.si_signo = TARGET_SIGSEGV; 1602 info.si_errno = 0; 1603 info.si_code = TARGET_SEGV_ACCERR; 1604 break; 1605 default: 1606 /* Let's send a regular segfault... */ 1607 EXCP_DUMP(env, "Invalid segfault errno (%02x)\n", 1608 env->error_code); 1609 info.si_signo = TARGET_SIGSEGV; 1610 info.si_errno = 0; 1611 info.si_code = TARGET_SEGV_MAPERR; 1612 break; 1613 } 1614 info._sifields._sigfault._addr = env->nip; 1615 queue_signal(env, info.si_signo, &info); 1616 break; 1617 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 1618 EXCP_DUMP(env, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx 1619 "\n", env->spr[SPR_SRR0]); 1620 /* XXX: check this */ 1621 switch (env->error_code & 0xFF000000) { 1622 case 0x40000000: 1623 info.si_signo = TARGET_SIGSEGV; 1624 info.si_errno = 0; 1625 info.si_code = TARGET_SEGV_MAPERR; 1626 break; 1627 case 0x10000000: 1628 case 0x08000000: 1629 info.si_signo = TARGET_SIGSEGV; 1630 info.si_errno = 0; 1631 info.si_code = TARGET_SEGV_ACCERR; 1632 break; 1633 default: 1634 /* Let's send a regular segfault... */ 1635 EXCP_DUMP(env, "Invalid segfault errno (%02x)\n", 1636 env->error_code); 1637 info.si_signo = TARGET_SIGSEGV; 1638 info.si_errno = 0; 1639 info.si_code = TARGET_SEGV_MAPERR; 1640 break; 1641 } 1642 info._sifields._sigfault._addr = env->nip - 4; 1643 queue_signal(env, info.si_signo, &info); 1644 break; 1645 case POWERPC_EXCP_EXTERNAL: /* External input */ 1646 cpu_abort(cs, "External interrupt while in user mode. " 1647 "Aborting\n"); 1648 break; 1649 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 1650 EXCP_DUMP(env, "Unaligned memory access\n"); 1651 /* XXX: check this */ 1652 info.si_signo = TARGET_SIGBUS; 1653 info.si_errno = 0; 1654 info.si_code = TARGET_BUS_ADRALN; 1655 info._sifields._sigfault._addr = env->nip - 4; 1656 queue_signal(env, info.si_signo, &info); 1657 break; 1658 case POWERPC_EXCP_PROGRAM: /* Program exception */ 1659 /* XXX: check this */ 1660 switch (env->error_code & ~0xF) { 1661 case POWERPC_EXCP_FP: 1662 EXCP_DUMP(env, "Floating point program exception\n"); 1663 info.si_signo = TARGET_SIGFPE; 1664 info.si_errno = 0; 1665 switch (env->error_code & 0xF) { 1666 case POWERPC_EXCP_FP_OX: 1667 info.si_code = TARGET_FPE_FLTOVF; 1668 break; 1669 case POWERPC_EXCP_FP_UX: 1670 info.si_code = TARGET_FPE_FLTUND; 1671 break; 1672 case POWERPC_EXCP_FP_ZX: 1673 case POWERPC_EXCP_FP_VXZDZ: 1674 info.si_code = TARGET_FPE_FLTDIV; 1675 break; 1676 case POWERPC_EXCP_FP_XX: 1677 info.si_code = TARGET_FPE_FLTRES; 1678 break; 1679 case POWERPC_EXCP_FP_VXSOFT: 1680 info.si_code = TARGET_FPE_FLTINV; 1681 break; 1682 case POWERPC_EXCP_FP_VXSNAN: 1683 case POWERPC_EXCP_FP_VXISI: 1684 case POWERPC_EXCP_FP_VXIDI: 1685 case POWERPC_EXCP_FP_VXIMZ: 1686 case POWERPC_EXCP_FP_VXVC: 1687 case POWERPC_EXCP_FP_VXSQRT: 1688 case POWERPC_EXCP_FP_VXCVI: 1689 info.si_code = TARGET_FPE_FLTSUB; 1690 break; 1691 default: 1692 EXCP_DUMP(env, "Unknown floating point exception (%02x)\n", 1693 env->error_code); 1694 break; 1695 } 1696 break; 1697 case POWERPC_EXCP_INVAL: 1698 EXCP_DUMP(env, "Invalid instruction\n"); 1699 info.si_signo = TARGET_SIGILL; 1700 info.si_errno = 0; 1701 switch (env->error_code & 0xF) { 1702 case POWERPC_EXCP_INVAL_INVAL: 1703 info.si_code = TARGET_ILL_ILLOPC; 1704 break; 1705 case POWERPC_EXCP_INVAL_LSWX: 1706 info.si_code = TARGET_ILL_ILLOPN; 1707 break; 1708 case POWERPC_EXCP_INVAL_SPR: 1709 info.si_code = TARGET_ILL_PRVREG; 1710 break; 1711 case POWERPC_EXCP_INVAL_FP: 1712 info.si_code = TARGET_ILL_COPROC; 1713 break; 1714 default: 1715 EXCP_DUMP(env, "Unknown invalid operation (%02x)\n", 1716 env->error_code & 0xF); 1717 info.si_code = TARGET_ILL_ILLADR; 1718 break; 1719 } 1720 break; 1721 case POWERPC_EXCP_PRIV: 1722 EXCP_DUMP(env, "Privilege violation\n"); 1723 info.si_signo = TARGET_SIGILL; 1724 info.si_errno = 0; 1725 switch (env->error_code & 0xF) { 1726 case POWERPC_EXCP_PRIV_OPC: 1727 info.si_code = TARGET_ILL_PRVOPC; 1728 break; 1729 case POWERPC_EXCP_PRIV_REG: 1730 info.si_code = TARGET_ILL_PRVREG; 1731 break; 1732 default: 1733 EXCP_DUMP(env, "Unknown privilege violation (%02x)\n", 1734 env->error_code & 0xF); 1735 info.si_code = TARGET_ILL_PRVOPC; 1736 break; 1737 } 1738 break; 1739 case POWERPC_EXCP_TRAP: 1740 cpu_abort(cs, "Tried to call a TRAP\n"); 1741 break; 1742 default: 1743 /* Should not happen ! */ 1744 cpu_abort(cs, "Unknown program exception (%02x)\n", 1745 env->error_code); 1746 break; 1747 } 1748 info._sifields._sigfault._addr = env->nip - 4; 1749 queue_signal(env, info.si_signo, &info); 1750 break; 1751 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 1752 EXCP_DUMP(env, "No floating point allowed\n"); 1753 info.si_signo = TARGET_SIGILL; 1754 info.si_errno = 0; 1755 info.si_code = TARGET_ILL_COPROC; 1756 info._sifields._sigfault._addr = env->nip - 4; 1757 queue_signal(env, info.si_signo, &info); 1758 break; 1759 case POWERPC_EXCP_SYSCALL: /* System call exception */ 1760 cpu_abort(cs, "Syscall exception while in user mode. " 1761 "Aborting\n"); 1762 break; 1763 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ 1764 EXCP_DUMP(env, "No APU instruction allowed\n"); 1765 info.si_signo = TARGET_SIGILL; 1766 info.si_errno = 0; 1767 info.si_code = TARGET_ILL_COPROC; 1768 info._sifields._sigfault._addr = env->nip - 4; 1769 queue_signal(env, info.si_signo, &info); 1770 break; 1771 case POWERPC_EXCP_DECR: /* Decrementer exception */ 1772 cpu_abort(cs, "Decrementer interrupt while in user mode. " 1773 "Aborting\n"); 1774 break; 1775 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ 1776 cpu_abort(cs, "Fix interval timer interrupt while in user mode. " 1777 "Aborting\n"); 1778 break; 1779 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ 1780 cpu_abort(cs, "Watchdog timer interrupt while in user mode. " 1781 "Aborting\n"); 1782 break; 1783 case POWERPC_EXCP_DTLB: /* Data TLB error */ 1784 cpu_abort(cs, "Data TLB exception while in user mode. " 1785 "Aborting\n"); 1786 break; 1787 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 1788 cpu_abort(cs, "Instruction TLB exception while in user mode. " 1789 "Aborting\n"); 1790 break; 1791 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavail. */ 1792 EXCP_DUMP(env, "No SPE/floating-point instruction allowed\n"); 1793 info.si_signo = TARGET_SIGILL; 1794 info.si_errno = 0; 1795 info.si_code = TARGET_ILL_COPROC; 1796 info._sifields._sigfault._addr = env->nip - 4; 1797 queue_signal(env, info.si_signo, &info); 1798 break; 1799 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data IRQ */ 1800 cpu_abort(cs, "Embedded floating-point data IRQ not handled\n"); 1801 break; 1802 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round IRQ */ 1803 cpu_abort(cs, "Embedded floating-point round IRQ not handled\n"); 1804 break; 1805 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor IRQ */ 1806 cpu_abort(cs, "Performance monitor exception not handled\n"); 1807 break; 1808 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ 1809 cpu_abort(cs, "Doorbell interrupt while in user mode. " 1810 "Aborting\n"); 1811 break; 1812 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ 1813 cpu_abort(cs, "Doorbell critical interrupt while in user mode. " 1814 "Aborting\n"); 1815 break; 1816 case POWERPC_EXCP_RESET: /* System reset exception */ 1817 cpu_abort(cs, "Reset interrupt while in user mode. " 1818 "Aborting\n"); 1819 break; 1820 case POWERPC_EXCP_DSEG: /* Data segment exception */ 1821 cpu_abort(cs, "Data segment exception while in user mode. " 1822 "Aborting\n"); 1823 break; 1824 case POWERPC_EXCP_ISEG: /* Instruction segment exception */ 1825 cpu_abort(cs, "Instruction segment exception " 1826 "while in user mode. Aborting\n"); 1827 break; 1828 /* PowerPC 64 with hypervisor mode support */ 1829 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ 1830 cpu_abort(cs, "Hypervisor decrementer interrupt " 1831 "while in user mode. Aborting\n"); 1832 break; 1833 case POWERPC_EXCP_TRACE: /* Trace exception */ 1834 /* Nothing to do: 1835 * we use this exception to emulate step-by-step execution mode. 1836 */ 1837 break; 1838 /* PowerPC 64 with hypervisor mode support */ 1839 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ 1840 cpu_abort(cs, "Hypervisor data storage exception " 1841 "while in user mode. Aborting\n"); 1842 break; 1843 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage excp */ 1844 cpu_abort(cs, "Hypervisor instruction storage exception " 1845 "while in user mode. Aborting\n"); 1846 break; 1847 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ 1848 cpu_abort(cs, "Hypervisor data segment exception " 1849 "while in user mode. Aborting\n"); 1850 break; 1851 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment excp */ 1852 cpu_abort(cs, "Hypervisor instruction segment exception " 1853 "while in user mode. Aborting\n"); 1854 break; 1855 case POWERPC_EXCP_VPU: /* Vector unavailable exception */ 1856 EXCP_DUMP(env, "No Altivec instructions allowed\n"); 1857 info.si_signo = TARGET_SIGILL; 1858 info.si_errno = 0; 1859 info.si_code = TARGET_ILL_COPROC; 1860 info._sifields._sigfault._addr = env->nip - 4; 1861 queue_signal(env, info.si_signo, &info); 1862 break; 1863 case POWERPC_EXCP_PIT: /* Programmable interval timer IRQ */ 1864 cpu_abort(cs, "Programmable interval timer interrupt " 1865 "while in user mode. Aborting\n"); 1866 break; 1867 case POWERPC_EXCP_IO: /* IO error exception */ 1868 cpu_abort(cs, "IO error exception while in user mode. " 1869 "Aborting\n"); 1870 break; 1871 case POWERPC_EXCP_RUNM: /* Run mode exception */ 1872 cpu_abort(cs, "Run mode exception while in user mode. " 1873 "Aborting\n"); 1874 break; 1875 case POWERPC_EXCP_EMUL: /* Emulation trap exception */ 1876 cpu_abort(cs, "Emulation trap exception not handled\n"); 1877 break; 1878 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ 1879 cpu_abort(cs, "Instruction fetch TLB exception " 1880 "while in user-mode. Aborting"); 1881 break; 1882 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ 1883 cpu_abort(cs, "Data load TLB exception while in user-mode. " 1884 "Aborting"); 1885 break; 1886 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ 1887 cpu_abort(cs, "Data store TLB exception while in user-mode. " 1888 "Aborting"); 1889 break; 1890 case POWERPC_EXCP_FPA: /* Floating-point assist exception */ 1891 cpu_abort(cs, "Floating-point assist exception not handled\n"); 1892 break; 1893 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 1894 cpu_abort(cs, "Instruction address breakpoint exception " 1895 "not handled\n"); 1896 break; 1897 case POWERPC_EXCP_SMI: /* System management interrupt */ 1898 cpu_abort(cs, "System management interrupt while in user mode. " 1899 "Aborting\n"); 1900 break; 1901 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 1902 cpu_abort(cs, "Thermal interrupt interrupt while in user mode. " 1903 "Aborting\n"); 1904 break; 1905 case POWERPC_EXCP_PERFM: /* Embedded performance monitor IRQ */ 1906 cpu_abort(cs, "Performance monitor exception not handled\n"); 1907 break; 1908 case POWERPC_EXCP_VPUA: /* Vector assist exception */ 1909 cpu_abort(cs, "Vector assist exception not handled\n"); 1910 break; 1911 case POWERPC_EXCP_SOFTP: /* Soft patch exception */ 1912 cpu_abort(cs, "Soft patch exception not handled\n"); 1913 break; 1914 case POWERPC_EXCP_MAINT: /* Maintenance exception */ 1915 cpu_abort(cs, "Maintenance exception while in user mode. " 1916 "Aborting\n"); 1917 break; 1918 case POWERPC_EXCP_STOP: /* stop translation */ 1919 /* We did invalidate the instruction cache. Go on */ 1920 break; 1921 case POWERPC_EXCP_BRANCH: /* branch instruction: */ 1922 /* We just stopped because of a branch. Go on */ 1923 break; 1924 case POWERPC_EXCP_SYSCALL_USER: 1925 /* system call in user-mode emulation */ 1926 /* WARNING: 1927 * PPC ABI uses overflow flag in cr0 to signal an error 1928 * in syscalls. 1929 */ 1930 env->crf[0] &= ~0x1; 1931 ret = do_syscall(env, env->gpr[0], env->gpr[3], env->gpr[4], 1932 env->gpr[5], env->gpr[6], env->gpr[7], 1933 env->gpr[8], 0, 0); 1934 if (ret == (target_ulong)(-TARGET_QEMU_ESIGRETURN)) { 1935 /* Returning from a successful sigreturn syscall. 1936 Avoid corrupting register state. */ 1937 break; 1938 } 1939 if (ret > (target_ulong)(-515)) { 1940 env->crf[0] |= 0x1; 1941 ret = -ret; 1942 } 1943 env->gpr[3] = ret; 1944 break; 1945 case POWERPC_EXCP_STCX: 1946 if (do_store_exclusive(env)) { 1947 info.si_signo = TARGET_SIGSEGV; 1948 info.si_errno = 0; 1949 info.si_code = TARGET_SEGV_MAPERR; 1950 info._sifields._sigfault._addr = env->nip; 1951 queue_signal(env, info.si_signo, &info); 1952 } 1953 break; 1954 case EXCP_DEBUG: 1955 { 1956 int sig; 1957 1958 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 1959 if (sig) { 1960 info.si_signo = sig; 1961 info.si_errno = 0; 1962 info.si_code = TARGET_TRAP_BRKPT; 1963 queue_signal(env, info.si_signo, &info); 1964 } 1965 } 1966 break; 1967 case EXCP_INTERRUPT: 1968 /* just indicate that signals should be handled asap */ 1969 break; 1970 default: 1971 cpu_abort(cs, "Unknown exception 0x%d. Aborting\n", trapnr); 1972 break; 1973 } 1974 process_pending_signals(env); 1975 } 1976 } 1977 #endif 1978 1979 #ifdef TARGET_MIPS 1980 1981 # ifdef TARGET_ABI_MIPSO32 1982 # define MIPS_SYS(name, args) args, 1983 static const uint8_t mips_syscall_args[] = { 1984 MIPS_SYS(sys_syscall , 8) /* 4000 */ 1985 MIPS_SYS(sys_exit , 1) 1986 MIPS_SYS(sys_fork , 0) 1987 MIPS_SYS(sys_read , 3) 1988 MIPS_SYS(sys_write , 3) 1989 MIPS_SYS(sys_open , 3) /* 4005 */ 1990 MIPS_SYS(sys_close , 1) 1991 MIPS_SYS(sys_waitpid , 3) 1992 MIPS_SYS(sys_creat , 2) 1993 MIPS_SYS(sys_link , 2) 1994 MIPS_SYS(sys_unlink , 1) /* 4010 */ 1995 MIPS_SYS(sys_execve , 0) 1996 MIPS_SYS(sys_chdir , 1) 1997 MIPS_SYS(sys_time , 1) 1998 MIPS_SYS(sys_mknod , 3) 1999 MIPS_SYS(sys_chmod , 2) /* 4015 */ 2000 MIPS_SYS(sys_lchown , 3) 2001 MIPS_SYS(sys_ni_syscall , 0) 2002 MIPS_SYS(sys_ni_syscall , 0) /* was sys_stat */ 2003 MIPS_SYS(sys_lseek , 3) 2004 MIPS_SYS(sys_getpid , 0) /* 4020 */ 2005 MIPS_SYS(sys_mount , 5) 2006 MIPS_SYS(sys_umount , 1) 2007 MIPS_SYS(sys_setuid , 1) 2008 MIPS_SYS(sys_getuid , 0) 2009 MIPS_SYS(sys_stime , 1) /* 4025 */ 2010 MIPS_SYS(sys_ptrace , 4) 2011 MIPS_SYS(sys_alarm , 1) 2012 MIPS_SYS(sys_ni_syscall , 0) /* was sys_fstat */ 2013 MIPS_SYS(sys_pause , 0) 2014 MIPS_SYS(sys_utime , 2) /* 4030 */ 2015 MIPS_SYS(sys_ni_syscall , 0) 2016 MIPS_SYS(sys_ni_syscall , 0) 2017 MIPS_SYS(sys_access , 2) 2018 MIPS_SYS(sys_nice , 1) 2019 MIPS_SYS(sys_ni_syscall , 0) /* 4035 */ 2020 MIPS_SYS(sys_sync , 0) 2021 MIPS_SYS(sys_kill , 2) 2022 MIPS_SYS(sys_rename , 2) 2023 MIPS_SYS(sys_mkdir , 2) 2024 MIPS_SYS(sys_rmdir , 1) /* 4040 */ 2025 MIPS_SYS(sys_dup , 1) 2026 MIPS_SYS(sys_pipe , 0) 2027 MIPS_SYS(sys_times , 1) 2028 MIPS_SYS(sys_ni_syscall , 0) 2029 MIPS_SYS(sys_brk , 1) /* 4045 */ 2030 MIPS_SYS(sys_setgid , 1) 2031 MIPS_SYS(sys_getgid , 0) 2032 MIPS_SYS(sys_ni_syscall , 0) /* was signal(2) */ 2033 MIPS_SYS(sys_geteuid , 0) 2034 MIPS_SYS(sys_getegid , 0) /* 4050 */ 2035 MIPS_SYS(sys_acct , 0) 2036 MIPS_SYS(sys_umount2 , 2) 2037 MIPS_SYS(sys_ni_syscall , 0) 2038 MIPS_SYS(sys_ioctl , 3) 2039 MIPS_SYS(sys_fcntl , 3) /* 4055 */ 2040 MIPS_SYS(sys_ni_syscall , 2) 2041 MIPS_SYS(sys_setpgid , 2) 2042 MIPS_SYS(sys_ni_syscall , 0) 2043 MIPS_SYS(sys_olduname , 1) 2044 MIPS_SYS(sys_umask , 1) /* 4060 */ 2045 MIPS_SYS(sys_chroot , 1) 2046 MIPS_SYS(sys_ustat , 2) 2047 MIPS_SYS(sys_dup2 , 2) 2048 MIPS_SYS(sys_getppid , 0) 2049 MIPS_SYS(sys_getpgrp , 0) /* 4065 */ 2050 MIPS_SYS(sys_setsid , 0) 2051 MIPS_SYS(sys_sigaction , 3) 2052 MIPS_SYS(sys_sgetmask , 0) 2053 MIPS_SYS(sys_ssetmask , 1) 2054 MIPS_SYS(sys_setreuid , 2) /* 4070 */ 2055 MIPS_SYS(sys_setregid , 2) 2056 MIPS_SYS(sys_sigsuspend , 0) 2057 MIPS_SYS(sys_sigpending , 1) 2058 MIPS_SYS(sys_sethostname , 2) 2059 MIPS_SYS(sys_setrlimit , 2) /* 4075 */ 2060 MIPS_SYS(sys_getrlimit , 2) 2061 MIPS_SYS(sys_getrusage , 2) 2062 MIPS_SYS(sys_gettimeofday, 2) 2063 MIPS_SYS(sys_settimeofday, 2) 2064 MIPS_SYS(sys_getgroups , 2) /* 4080 */ 2065 MIPS_SYS(sys_setgroups , 2) 2066 MIPS_SYS(sys_ni_syscall , 0) /* old_select */ 2067 MIPS_SYS(sys_symlink , 2) 2068 MIPS_SYS(sys_ni_syscall , 0) /* was sys_lstat */ 2069 MIPS_SYS(sys_readlink , 3) /* 4085 */ 2070 MIPS_SYS(sys_uselib , 1) 2071 MIPS_SYS(sys_swapon , 2) 2072 MIPS_SYS(sys_reboot , 3) 2073 MIPS_SYS(old_readdir , 3) 2074 MIPS_SYS(old_mmap , 6) /* 4090 */ 2075 MIPS_SYS(sys_munmap , 2) 2076 MIPS_SYS(sys_truncate , 2) 2077 MIPS_SYS(sys_ftruncate , 2) 2078 MIPS_SYS(sys_fchmod , 2) 2079 MIPS_SYS(sys_fchown , 3) /* 4095 */ 2080 MIPS_SYS(sys_getpriority , 2) 2081 MIPS_SYS(sys_setpriority , 3) 2082 MIPS_SYS(sys_ni_syscall , 0) 2083 MIPS_SYS(sys_statfs , 2) 2084 MIPS_SYS(sys_fstatfs , 2) /* 4100 */ 2085 MIPS_SYS(sys_ni_syscall , 0) /* was ioperm(2) */ 2086 MIPS_SYS(sys_socketcall , 2) 2087 MIPS_SYS(sys_syslog , 3) 2088 MIPS_SYS(sys_setitimer , 3) 2089 MIPS_SYS(sys_getitimer , 2) /* 4105 */ 2090 MIPS_SYS(sys_newstat , 2) 2091 MIPS_SYS(sys_newlstat , 2) 2092 MIPS_SYS(sys_newfstat , 2) 2093 MIPS_SYS(sys_uname , 1) 2094 MIPS_SYS(sys_ni_syscall , 0) /* 4110 was iopl(2) */ 2095 MIPS_SYS(sys_vhangup , 0) 2096 MIPS_SYS(sys_ni_syscall , 0) /* was sys_idle() */ 2097 MIPS_SYS(sys_ni_syscall , 0) /* was sys_vm86 */ 2098 MIPS_SYS(sys_wait4 , 4) 2099 MIPS_SYS(sys_swapoff , 1) /* 4115 */ 2100 MIPS_SYS(sys_sysinfo , 1) 2101 MIPS_SYS(sys_ipc , 6) 2102 MIPS_SYS(sys_fsync , 1) 2103 MIPS_SYS(sys_sigreturn , 0) 2104 MIPS_SYS(sys_clone , 6) /* 4120 */ 2105 MIPS_SYS(sys_setdomainname, 2) 2106 MIPS_SYS(sys_newuname , 1) 2107 MIPS_SYS(sys_ni_syscall , 0) /* sys_modify_ldt */ 2108 MIPS_SYS(sys_adjtimex , 1) 2109 MIPS_SYS(sys_mprotect , 3) /* 4125 */ 2110 MIPS_SYS(sys_sigprocmask , 3) 2111 MIPS_SYS(sys_ni_syscall , 0) /* was create_module */ 2112 MIPS_SYS(sys_init_module , 5) 2113 MIPS_SYS(sys_delete_module, 1) 2114 MIPS_SYS(sys_ni_syscall , 0) /* 4130 was get_kernel_syms */ 2115 MIPS_SYS(sys_quotactl , 0) 2116 MIPS_SYS(sys_getpgid , 1) 2117 MIPS_SYS(sys_fchdir , 1) 2118 MIPS_SYS(sys_bdflush , 2) 2119 MIPS_SYS(sys_sysfs , 3) /* 4135 */ 2120 MIPS_SYS(sys_personality , 1) 2121 MIPS_SYS(sys_ni_syscall , 0) /* for afs_syscall */ 2122 MIPS_SYS(sys_setfsuid , 1) 2123 MIPS_SYS(sys_setfsgid , 1) 2124 MIPS_SYS(sys_llseek , 5) /* 4140 */ 2125 MIPS_SYS(sys_getdents , 3) 2126 MIPS_SYS(sys_select , 5) 2127 MIPS_SYS(sys_flock , 2) 2128 MIPS_SYS(sys_msync , 3) 2129 MIPS_SYS(sys_readv , 3) /* 4145 */ 2130 MIPS_SYS(sys_writev , 3) 2131 MIPS_SYS(sys_cacheflush , 3) 2132 MIPS_SYS(sys_cachectl , 3) 2133 MIPS_SYS(sys_sysmips , 4) 2134 MIPS_SYS(sys_ni_syscall , 0) /* 4150 */ 2135 MIPS_SYS(sys_getsid , 1) 2136 MIPS_SYS(sys_fdatasync , 0) 2137 MIPS_SYS(sys_sysctl , 1) 2138 MIPS_SYS(sys_mlock , 2) 2139 MIPS_SYS(sys_munlock , 2) /* 4155 */ 2140 MIPS_SYS(sys_mlockall , 1) 2141 MIPS_SYS(sys_munlockall , 0) 2142 MIPS_SYS(sys_sched_setparam, 2) 2143 MIPS_SYS(sys_sched_getparam, 2) 2144 MIPS_SYS(sys_sched_setscheduler, 3) /* 4160 */ 2145 MIPS_SYS(sys_sched_getscheduler, 1) 2146 MIPS_SYS(sys_sched_yield , 0) 2147 MIPS_SYS(sys_sched_get_priority_max, 1) 2148 MIPS_SYS(sys_sched_get_priority_min, 1) 2149 MIPS_SYS(sys_sched_rr_get_interval, 2) /* 4165 */ 2150 MIPS_SYS(sys_nanosleep, 2) 2151 MIPS_SYS(sys_mremap , 5) 2152 MIPS_SYS(sys_accept , 3) 2153 MIPS_SYS(sys_bind , 3) 2154 MIPS_SYS(sys_connect , 3) /* 4170 */ 2155 MIPS_SYS(sys_getpeername , 3) 2156 MIPS_SYS(sys_getsockname , 3) 2157 MIPS_SYS(sys_getsockopt , 5) 2158 MIPS_SYS(sys_listen , 2) 2159 MIPS_SYS(sys_recv , 4) /* 4175 */ 2160 MIPS_SYS(sys_recvfrom , 6) 2161 MIPS_SYS(sys_recvmsg , 3) 2162 MIPS_SYS(sys_send , 4) 2163 MIPS_SYS(sys_sendmsg , 3) 2164 MIPS_SYS(sys_sendto , 6) /* 4180 */ 2165 MIPS_SYS(sys_setsockopt , 5) 2166 MIPS_SYS(sys_shutdown , 2) 2167 MIPS_SYS(sys_socket , 3) 2168 MIPS_SYS(sys_socketpair , 4) 2169 MIPS_SYS(sys_setresuid , 3) /* 4185 */ 2170 MIPS_SYS(sys_getresuid , 3) 2171 MIPS_SYS(sys_ni_syscall , 0) /* was sys_query_module */ 2172 MIPS_SYS(sys_poll , 3) 2173 MIPS_SYS(sys_nfsservctl , 3) 2174 MIPS_SYS(sys_setresgid , 3) /* 4190 */ 2175 MIPS_SYS(sys_getresgid , 3) 2176 MIPS_SYS(sys_prctl , 5) 2177 MIPS_SYS(sys_rt_sigreturn, 0) 2178 MIPS_SYS(sys_rt_sigaction, 4) 2179 MIPS_SYS(sys_rt_sigprocmask, 4) /* 4195 */ 2180 MIPS_SYS(sys_rt_sigpending, 2) 2181 MIPS_SYS(sys_rt_sigtimedwait, 4) 2182 MIPS_SYS(sys_rt_sigqueueinfo, 3) 2183 MIPS_SYS(sys_rt_sigsuspend, 0) 2184 MIPS_SYS(sys_pread64 , 6) /* 4200 */ 2185 MIPS_SYS(sys_pwrite64 , 6) 2186 MIPS_SYS(sys_chown , 3) 2187 MIPS_SYS(sys_getcwd , 2) 2188 MIPS_SYS(sys_capget , 2) 2189 MIPS_SYS(sys_capset , 2) /* 4205 */ 2190 MIPS_SYS(sys_sigaltstack , 2) 2191 MIPS_SYS(sys_sendfile , 4) 2192 MIPS_SYS(sys_ni_syscall , 0) 2193 MIPS_SYS(sys_ni_syscall , 0) 2194 MIPS_SYS(sys_mmap2 , 6) /* 4210 */ 2195 MIPS_SYS(sys_truncate64 , 4) 2196 MIPS_SYS(sys_ftruncate64 , 4) 2197 MIPS_SYS(sys_stat64 , 2) 2198 MIPS_SYS(sys_lstat64 , 2) 2199 MIPS_SYS(sys_fstat64 , 2) /* 4215 */ 2200 MIPS_SYS(sys_pivot_root , 2) 2201 MIPS_SYS(sys_mincore , 3) 2202 MIPS_SYS(sys_madvise , 3) 2203 MIPS_SYS(sys_getdents64 , 3) 2204 MIPS_SYS(sys_fcntl64 , 3) /* 4220 */ 2205 MIPS_SYS(sys_ni_syscall , 0) 2206 MIPS_SYS(sys_gettid , 0) 2207 MIPS_SYS(sys_readahead , 5) 2208 MIPS_SYS(sys_setxattr , 5) 2209 MIPS_SYS(sys_lsetxattr , 5) /* 4225 */ 2210 MIPS_SYS(sys_fsetxattr , 5) 2211 MIPS_SYS(sys_getxattr , 4) 2212 MIPS_SYS(sys_lgetxattr , 4) 2213 MIPS_SYS(sys_fgetxattr , 4) 2214 MIPS_SYS(sys_listxattr , 3) /* 4230 */ 2215 MIPS_SYS(sys_llistxattr , 3) 2216 MIPS_SYS(sys_flistxattr , 3) 2217 MIPS_SYS(sys_removexattr , 2) 2218 MIPS_SYS(sys_lremovexattr, 2) 2219 MIPS_SYS(sys_fremovexattr, 2) /* 4235 */ 2220 MIPS_SYS(sys_tkill , 2) 2221 MIPS_SYS(sys_sendfile64 , 5) 2222 MIPS_SYS(sys_futex , 6) 2223 MIPS_SYS(sys_sched_setaffinity, 3) 2224 MIPS_SYS(sys_sched_getaffinity, 3) /* 4240 */ 2225 MIPS_SYS(sys_io_setup , 2) 2226 MIPS_SYS(sys_io_destroy , 1) 2227 MIPS_SYS(sys_io_getevents, 5) 2228 MIPS_SYS(sys_io_submit , 3) 2229 MIPS_SYS(sys_io_cancel , 3) /* 4245 */ 2230 MIPS_SYS(sys_exit_group , 1) 2231 MIPS_SYS(sys_lookup_dcookie, 3) 2232 MIPS_SYS(sys_epoll_create, 1) 2233 MIPS_SYS(sys_epoll_ctl , 4) 2234 MIPS_SYS(sys_epoll_wait , 3) /* 4250 */ 2235 MIPS_SYS(sys_remap_file_pages, 5) 2236 MIPS_SYS(sys_set_tid_address, 1) 2237 MIPS_SYS(sys_restart_syscall, 0) 2238 MIPS_SYS(sys_fadvise64_64, 7) 2239 MIPS_SYS(sys_statfs64 , 3) /* 4255 */ 2240 MIPS_SYS(sys_fstatfs64 , 2) 2241 MIPS_SYS(sys_timer_create, 3) 2242 MIPS_SYS(sys_timer_settime, 4) 2243 MIPS_SYS(sys_timer_gettime, 2) 2244 MIPS_SYS(sys_timer_getoverrun, 1) /* 4260 */ 2245 MIPS_SYS(sys_timer_delete, 1) 2246 MIPS_SYS(sys_clock_settime, 2) 2247 MIPS_SYS(sys_clock_gettime, 2) 2248 MIPS_SYS(sys_clock_getres, 2) 2249 MIPS_SYS(sys_clock_nanosleep, 4) /* 4265 */ 2250 MIPS_SYS(sys_tgkill , 3) 2251 MIPS_SYS(sys_utimes , 2) 2252 MIPS_SYS(sys_mbind , 4) 2253 MIPS_SYS(sys_ni_syscall , 0) /* sys_get_mempolicy */ 2254 MIPS_SYS(sys_ni_syscall , 0) /* 4270 sys_set_mempolicy */ 2255 MIPS_SYS(sys_mq_open , 4) 2256 MIPS_SYS(sys_mq_unlink , 1) 2257 MIPS_SYS(sys_mq_timedsend, 5) 2258 MIPS_SYS(sys_mq_timedreceive, 5) 2259 MIPS_SYS(sys_mq_notify , 2) /* 4275 */ 2260 MIPS_SYS(sys_mq_getsetattr, 3) 2261 MIPS_SYS(sys_ni_syscall , 0) /* sys_vserver */ 2262 MIPS_SYS(sys_waitid , 4) 2263 MIPS_SYS(sys_ni_syscall , 0) /* available, was setaltroot */ 2264 MIPS_SYS(sys_add_key , 5) 2265 MIPS_SYS(sys_request_key, 4) 2266 MIPS_SYS(sys_keyctl , 5) 2267 MIPS_SYS(sys_set_thread_area, 1) 2268 MIPS_SYS(sys_inotify_init, 0) 2269 MIPS_SYS(sys_inotify_add_watch, 3) /* 4285 */ 2270 MIPS_SYS(sys_inotify_rm_watch, 2) 2271 MIPS_SYS(sys_migrate_pages, 4) 2272 MIPS_SYS(sys_openat, 4) 2273 MIPS_SYS(sys_mkdirat, 3) 2274 MIPS_SYS(sys_mknodat, 4) /* 4290 */ 2275 MIPS_SYS(sys_fchownat, 5) 2276 MIPS_SYS(sys_futimesat, 3) 2277 MIPS_SYS(sys_fstatat64, 4) 2278 MIPS_SYS(sys_unlinkat, 3) 2279 MIPS_SYS(sys_renameat, 4) /* 4295 */ 2280 MIPS_SYS(sys_linkat, 5) 2281 MIPS_SYS(sys_symlinkat, 3) 2282 MIPS_SYS(sys_readlinkat, 4) 2283 MIPS_SYS(sys_fchmodat, 3) 2284 MIPS_SYS(sys_faccessat, 3) /* 4300 */ 2285 MIPS_SYS(sys_pselect6, 6) 2286 MIPS_SYS(sys_ppoll, 5) 2287 MIPS_SYS(sys_unshare, 1) 2288 MIPS_SYS(sys_splice, 6) 2289 MIPS_SYS(sys_sync_file_range, 7) /* 4305 */ 2290 MIPS_SYS(sys_tee, 4) 2291 MIPS_SYS(sys_vmsplice, 4) 2292 MIPS_SYS(sys_move_pages, 6) 2293 MIPS_SYS(sys_set_robust_list, 2) 2294 MIPS_SYS(sys_get_robust_list, 3) /* 4310 */ 2295 MIPS_SYS(sys_kexec_load, 4) 2296 MIPS_SYS(sys_getcpu, 3) 2297 MIPS_SYS(sys_epoll_pwait, 6) 2298 MIPS_SYS(sys_ioprio_set, 3) 2299 MIPS_SYS(sys_ioprio_get, 2) 2300 MIPS_SYS(sys_utimensat, 4) 2301 MIPS_SYS(sys_signalfd, 3) 2302 MIPS_SYS(sys_ni_syscall, 0) /* was timerfd */ 2303 MIPS_SYS(sys_eventfd, 1) 2304 MIPS_SYS(sys_fallocate, 6) /* 4320 */ 2305 MIPS_SYS(sys_timerfd_create, 2) 2306 MIPS_SYS(sys_timerfd_gettime, 2) 2307 MIPS_SYS(sys_timerfd_settime, 4) 2308 MIPS_SYS(sys_signalfd4, 4) 2309 MIPS_SYS(sys_eventfd2, 2) /* 4325 */ 2310 MIPS_SYS(sys_epoll_create1, 1) 2311 MIPS_SYS(sys_dup3, 3) 2312 MIPS_SYS(sys_pipe2, 2) 2313 MIPS_SYS(sys_inotify_init1, 1) 2314 MIPS_SYS(sys_preadv, 6) /* 4330 */ 2315 MIPS_SYS(sys_pwritev, 6) 2316 MIPS_SYS(sys_rt_tgsigqueueinfo, 4) 2317 MIPS_SYS(sys_perf_event_open, 5) 2318 MIPS_SYS(sys_accept4, 4) 2319 MIPS_SYS(sys_recvmmsg, 5) /* 4335 */ 2320 MIPS_SYS(sys_fanotify_init, 2) 2321 MIPS_SYS(sys_fanotify_mark, 6) 2322 MIPS_SYS(sys_prlimit64, 4) 2323 MIPS_SYS(sys_name_to_handle_at, 5) 2324 MIPS_SYS(sys_open_by_handle_at, 3) /* 4340 */ 2325 MIPS_SYS(sys_clock_adjtime, 2) 2326 MIPS_SYS(sys_syncfs, 1) 2327 }; 2328 # undef MIPS_SYS 2329 # endif /* O32 */ 2330 2331 static int do_store_exclusive(CPUMIPSState *env) 2332 { 2333 target_ulong addr; 2334 target_ulong page_addr; 2335 target_ulong val; 2336 int flags; 2337 int segv = 0; 2338 int reg; 2339 int d; 2340 2341 addr = env->lladdr; 2342 page_addr = addr & TARGET_PAGE_MASK; 2343 start_exclusive(); 2344 mmap_lock(); 2345 flags = page_get_flags(page_addr); 2346 if ((flags & PAGE_READ) == 0) { 2347 segv = 1; 2348 } else { 2349 reg = env->llreg & 0x1f; 2350 d = (env->llreg & 0x20) != 0; 2351 if (d) { 2352 segv = get_user_s64(val, addr); 2353 } else { 2354 segv = get_user_s32(val, addr); 2355 } 2356 if (!segv) { 2357 if (val != env->llval) { 2358 env->active_tc.gpr[reg] = 0; 2359 } else { 2360 if (d) { 2361 segv = put_user_u64(env->llnewval, addr); 2362 } else { 2363 segv = put_user_u32(env->llnewval, addr); 2364 } 2365 if (!segv) { 2366 env->active_tc.gpr[reg] = 1; 2367 } 2368 } 2369 } 2370 } 2371 env->lladdr = -1; 2372 if (!segv) { 2373 env->active_tc.PC += 4; 2374 } 2375 mmap_unlock(); 2376 end_exclusive(); 2377 return segv; 2378 } 2379 2380 /* Break codes */ 2381 enum { 2382 BRK_OVERFLOW = 6, 2383 BRK_DIVZERO = 7 2384 }; 2385 2386 static int do_break(CPUMIPSState *env, target_siginfo_t *info, 2387 unsigned int code) 2388 { 2389 int ret = -1; 2390 2391 switch (code) { 2392 case BRK_OVERFLOW: 2393 case BRK_DIVZERO: 2394 info->si_signo = TARGET_SIGFPE; 2395 info->si_errno = 0; 2396 info->si_code = (code == BRK_OVERFLOW) ? FPE_INTOVF : FPE_INTDIV; 2397 queue_signal(env, info->si_signo, &*info); 2398 ret = 0; 2399 break; 2400 default: 2401 info->si_signo = TARGET_SIGTRAP; 2402 info->si_errno = 0; 2403 queue_signal(env, info->si_signo, &*info); 2404 ret = 0; 2405 break; 2406 } 2407 2408 return ret; 2409 } 2410 2411 void cpu_loop(CPUMIPSState *env) 2412 { 2413 CPUState *cs = CPU(mips_env_get_cpu(env)); 2414 target_siginfo_t info; 2415 int trapnr; 2416 abi_long ret; 2417 # ifdef TARGET_ABI_MIPSO32 2418 unsigned int syscall_num; 2419 # endif 2420 2421 for(;;) { 2422 cpu_exec_start(cs); 2423 trapnr = cpu_mips_exec(env); 2424 cpu_exec_end(cs); 2425 switch(trapnr) { 2426 case EXCP_SYSCALL: 2427 env->active_tc.PC += 4; 2428 # ifdef TARGET_ABI_MIPSO32 2429 syscall_num = env->active_tc.gpr[2] - 4000; 2430 if (syscall_num >= sizeof(mips_syscall_args)) { 2431 ret = -TARGET_ENOSYS; 2432 } else { 2433 int nb_args; 2434 abi_ulong sp_reg; 2435 abi_ulong arg5 = 0, arg6 = 0, arg7 = 0, arg8 = 0; 2436 2437 nb_args = mips_syscall_args[syscall_num]; 2438 sp_reg = env->active_tc.gpr[29]; 2439 switch (nb_args) { 2440 /* these arguments are taken from the stack */ 2441 case 8: 2442 if ((ret = get_user_ual(arg8, sp_reg + 28)) != 0) { 2443 goto done_syscall; 2444 } 2445 case 7: 2446 if ((ret = get_user_ual(arg7, sp_reg + 24)) != 0) { 2447 goto done_syscall; 2448 } 2449 case 6: 2450 if ((ret = get_user_ual(arg6, sp_reg + 20)) != 0) { 2451 goto done_syscall; 2452 } 2453 case 5: 2454 if ((ret = get_user_ual(arg5, sp_reg + 16)) != 0) { 2455 goto done_syscall; 2456 } 2457 default: 2458 break; 2459 } 2460 ret = do_syscall(env, env->active_tc.gpr[2], 2461 env->active_tc.gpr[4], 2462 env->active_tc.gpr[5], 2463 env->active_tc.gpr[6], 2464 env->active_tc.gpr[7], 2465 arg5, arg6, arg7, arg8); 2466 } 2467 done_syscall: 2468 # else 2469 ret = do_syscall(env, env->active_tc.gpr[2], 2470 env->active_tc.gpr[4], env->active_tc.gpr[5], 2471 env->active_tc.gpr[6], env->active_tc.gpr[7], 2472 env->active_tc.gpr[8], env->active_tc.gpr[9], 2473 env->active_tc.gpr[10], env->active_tc.gpr[11]); 2474 # endif /* O32 */ 2475 if (ret == -TARGET_QEMU_ESIGRETURN) { 2476 /* Returning from a successful sigreturn syscall. 2477 Avoid clobbering register state. */ 2478 break; 2479 } 2480 if ((abi_ulong)ret >= (abi_ulong)-1133) { 2481 env->active_tc.gpr[7] = 1; /* error flag */ 2482 ret = -ret; 2483 } else { 2484 env->active_tc.gpr[7] = 0; /* error flag */ 2485 } 2486 env->active_tc.gpr[2] = ret; 2487 break; 2488 case EXCP_TLBL: 2489 case EXCP_TLBS: 2490 case EXCP_AdEL: 2491 case EXCP_AdES: 2492 info.si_signo = TARGET_SIGSEGV; 2493 info.si_errno = 0; 2494 /* XXX: check env->error_code */ 2495 info.si_code = TARGET_SEGV_MAPERR; 2496 info._sifields._sigfault._addr = env->CP0_BadVAddr; 2497 queue_signal(env, info.si_signo, &info); 2498 break; 2499 case EXCP_CpU: 2500 case EXCP_RI: 2501 info.si_signo = TARGET_SIGILL; 2502 info.si_errno = 0; 2503 info.si_code = 0; 2504 queue_signal(env, info.si_signo, &info); 2505 break; 2506 case EXCP_INTERRUPT: 2507 /* just indicate that signals should be handled asap */ 2508 break; 2509 case EXCP_DEBUG: 2510 { 2511 int sig; 2512 2513 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 2514 if (sig) 2515 { 2516 info.si_signo = sig; 2517 info.si_errno = 0; 2518 info.si_code = TARGET_TRAP_BRKPT; 2519 queue_signal(env, info.si_signo, &info); 2520 } 2521 } 2522 break; 2523 case EXCP_SC: 2524 if (do_store_exclusive(env)) { 2525 info.si_signo = TARGET_SIGSEGV; 2526 info.si_errno = 0; 2527 info.si_code = TARGET_SEGV_MAPERR; 2528 info._sifields._sigfault._addr = env->active_tc.PC; 2529 queue_signal(env, info.si_signo, &info); 2530 } 2531 break; 2532 case EXCP_DSPDIS: 2533 info.si_signo = TARGET_SIGILL; 2534 info.si_errno = 0; 2535 info.si_code = TARGET_ILL_ILLOPC; 2536 queue_signal(env, info.si_signo, &info); 2537 break; 2538 /* The code below was inspired by the MIPS Linux kernel trap 2539 * handling code in arch/mips/kernel/traps.c. 2540 */ 2541 case EXCP_BREAK: 2542 { 2543 abi_ulong trap_instr; 2544 unsigned int code; 2545 2546 if (env->hflags & MIPS_HFLAG_M16) { 2547 if (env->insn_flags & ASE_MICROMIPS) { 2548 /* microMIPS mode */ 2549 ret = get_user_u16(trap_instr, env->active_tc.PC); 2550 if (ret != 0) { 2551 goto error; 2552 } 2553 2554 if ((trap_instr >> 10) == 0x11) { 2555 /* 16-bit instruction */ 2556 code = trap_instr & 0xf; 2557 } else { 2558 /* 32-bit instruction */ 2559 abi_ulong instr_lo; 2560 2561 ret = get_user_u16(instr_lo, 2562 env->active_tc.PC + 2); 2563 if (ret != 0) { 2564 goto error; 2565 } 2566 trap_instr = (trap_instr << 16) | instr_lo; 2567 code = ((trap_instr >> 6) & ((1 << 20) - 1)); 2568 /* Unfortunately, microMIPS also suffers from 2569 the old assembler bug... */ 2570 if (code >= (1 << 10)) { 2571 code >>= 10; 2572 } 2573 } 2574 } else { 2575 /* MIPS16e mode */ 2576 ret = get_user_u16(trap_instr, env->active_tc.PC); 2577 if (ret != 0) { 2578 goto error; 2579 } 2580 code = (trap_instr >> 6) & 0x3f; 2581 } 2582 } else { 2583 ret = get_user_ual(trap_instr, env->active_tc.PC); 2584 if (ret != 0) { 2585 goto error; 2586 } 2587 2588 /* As described in the original Linux kernel code, the 2589 * below checks on 'code' are to work around an old 2590 * assembly bug. 2591 */ 2592 code = ((trap_instr >> 6) & ((1 << 20) - 1)); 2593 if (code >= (1 << 10)) { 2594 code >>= 10; 2595 } 2596 } 2597 2598 if (do_break(env, &info, code) != 0) { 2599 goto error; 2600 } 2601 } 2602 break; 2603 case EXCP_TRAP: 2604 { 2605 abi_ulong trap_instr; 2606 unsigned int code = 0; 2607 2608 if (env->hflags & MIPS_HFLAG_M16) { 2609 /* microMIPS mode */ 2610 abi_ulong instr[2]; 2611 2612 ret = get_user_u16(instr[0], env->active_tc.PC) || 2613 get_user_u16(instr[1], env->active_tc.PC + 2); 2614 2615 trap_instr = (instr[0] << 16) | instr[1]; 2616 } else { 2617 ret = get_user_ual(trap_instr, env->active_tc.PC); 2618 } 2619 2620 if (ret != 0) { 2621 goto error; 2622 } 2623 2624 /* The immediate versions don't provide a code. */ 2625 if (!(trap_instr & 0xFC000000)) { 2626 if (env->hflags & MIPS_HFLAG_M16) { 2627 /* microMIPS mode */ 2628 code = ((trap_instr >> 12) & ((1 << 4) - 1)); 2629 } else { 2630 code = ((trap_instr >> 6) & ((1 << 10) - 1)); 2631 } 2632 } 2633 2634 if (do_break(env, &info, code) != 0) { 2635 goto error; 2636 } 2637 } 2638 break; 2639 default: 2640 error: 2641 fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", 2642 trapnr); 2643 cpu_dump_state(cs, stderr, fprintf, 0); 2644 abort(); 2645 } 2646 process_pending_signals(env); 2647 } 2648 } 2649 #endif 2650 2651 #ifdef TARGET_OPENRISC 2652 2653 void cpu_loop(CPUOpenRISCState *env) 2654 { 2655 CPUState *cs = CPU(openrisc_env_get_cpu(env)); 2656 int trapnr, gdbsig; 2657 2658 for (;;) { 2659 trapnr = cpu_exec(env); 2660 gdbsig = 0; 2661 2662 switch (trapnr) { 2663 case EXCP_RESET: 2664 qemu_log("\nReset request, exit, pc is %#x\n", env->pc); 2665 exit(1); 2666 break; 2667 case EXCP_BUSERR: 2668 qemu_log("\nBus error, exit, pc is %#x\n", env->pc); 2669 gdbsig = SIGBUS; 2670 break; 2671 case EXCP_DPF: 2672 case EXCP_IPF: 2673 cpu_dump_state(cs, stderr, fprintf, 0); 2674 gdbsig = TARGET_SIGSEGV; 2675 break; 2676 case EXCP_TICK: 2677 qemu_log("\nTick time interrupt pc is %#x\n", env->pc); 2678 break; 2679 case EXCP_ALIGN: 2680 qemu_log("\nAlignment pc is %#x\n", env->pc); 2681 gdbsig = SIGBUS; 2682 break; 2683 case EXCP_ILLEGAL: 2684 qemu_log("\nIllegal instructionpc is %#x\n", env->pc); 2685 gdbsig = SIGILL; 2686 break; 2687 case EXCP_INT: 2688 qemu_log("\nExternal interruptpc is %#x\n", env->pc); 2689 break; 2690 case EXCP_DTLBMISS: 2691 case EXCP_ITLBMISS: 2692 qemu_log("\nTLB miss\n"); 2693 break; 2694 case EXCP_RANGE: 2695 qemu_log("\nRange\n"); 2696 gdbsig = SIGSEGV; 2697 break; 2698 case EXCP_SYSCALL: 2699 env->pc += 4; /* 0xc00; */ 2700 env->gpr[11] = do_syscall(env, 2701 env->gpr[11], /* return value */ 2702 env->gpr[3], /* r3 - r7 are params */ 2703 env->gpr[4], 2704 env->gpr[5], 2705 env->gpr[6], 2706 env->gpr[7], 2707 env->gpr[8], 0, 0); 2708 break; 2709 case EXCP_FPE: 2710 qemu_log("\nFloating point error\n"); 2711 break; 2712 case EXCP_TRAP: 2713 qemu_log("\nTrap\n"); 2714 gdbsig = SIGTRAP; 2715 break; 2716 case EXCP_NR: 2717 qemu_log("\nNR\n"); 2718 break; 2719 default: 2720 qemu_log("\nqemu: unhandled CPU exception %#x - aborting\n", 2721 trapnr); 2722 cpu_dump_state(cs, stderr, fprintf, 0); 2723 gdbsig = TARGET_SIGILL; 2724 break; 2725 } 2726 if (gdbsig) { 2727 gdb_handlesig(cs, gdbsig); 2728 if (gdbsig != TARGET_SIGTRAP) { 2729 exit(1); 2730 } 2731 } 2732 2733 process_pending_signals(env); 2734 } 2735 } 2736 2737 #endif /* TARGET_OPENRISC */ 2738 2739 #ifdef TARGET_SH4 2740 void cpu_loop(CPUSH4State *env) 2741 { 2742 CPUState *cs = CPU(sh_env_get_cpu(env)); 2743 int trapnr, ret; 2744 target_siginfo_t info; 2745 2746 while (1) { 2747 trapnr = cpu_sh4_exec (env); 2748 2749 switch (trapnr) { 2750 case 0x160: 2751 env->pc += 2; 2752 ret = do_syscall(env, 2753 env->gregs[3], 2754 env->gregs[4], 2755 env->gregs[5], 2756 env->gregs[6], 2757 env->gregs[7], 2758 env->gregs[0], 2759 env->gregs[1], 2760 0, 0); 2761 env->gregs[0] = ret; 2762 break; 2763 case EXCP_INTERRUPT: 2764 /* just indicate that signals should be handled asap */ 2765 break; 2766 case EXCP_DEBUG: 2767 { 2768 int sig; 2769 2770 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 2771 if (sig) 2772 { 2773 info.si_signo = sig; 2774 info.si_errno = 0; 2775 info.si_code = TARGET_TRAP_BRKPT; 2776 queue_signal(env, info.si_signo, &info); 2777 } 2778 } 2779 break; 2780 case 0xa0: 2781 case 0xc0: 2782 info.si_signo = SIGSEGV; 2783 info.si_errno = 0; 2784 info.si_code = TARGET_SEGV_MAPERR; 2785 info._sifields._sigfault._addr = env->tea; 2786 queue_signal(env, info.si_signo, &info); 2787 break; 2788 2789 default: 2790 printf ("Unhandled trap: 0x%x\n", trapnr); 2791 cpu_dump_state(cs, stderr, fprintf, 0); 2792 exit (1); 2793 } 2794 process_pending_signals (env); 2795 } 2796 } 2797 #endif 2798 2799 #ifdef TARGET_CRIS 2800 void cpu_loop(CPUCRISState *env) 2801 { 2802 CPUState *cs = CPU(cris_env_get_cpu(env)); 2803 int trapnr, ret; 2804 target_siginfo_t info; 2805 2806 while (1) { 2807 trapnr = cpu_cris_exec (env); 2808 switch (trapnr) { 2809 case 0xaa: 2810 { 2811 info.si_signo = SIGSEGV; 2812 info.si_errno = 0; 2813 /* XXX: check env->error_code */ 2814 info.si_code = TARGET_SEGV_MAPERR; 2815 info._sifields._sigfault._addr = env->pregs[PR_EDA]; 2816 queue_signal(env, info.si_signo, &info); 2817 } 2818 break; 2819 case EXCP_INTERRUPT: 2820 /* just indicate that signals should be handled asap */ 2821 break; 2822 case EXCP_BREAK: 2823 ret = do_syscall(env, 2824 env->regs[9], 2825 env->regs[10], 2826 env->regs[11], 2827 env->regs[12], 2828 env->regs[13], 2829 env->pregs[7], 2830 env->pregs[11], 2831 0, 0); 2832 env->regs[10] = ret; 2833 break; 2834 case EXCP_DEBUG: 2835 { 2836 int sig; 2837 2838 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 2839 if (sig) 2840 { 2841 info.si_signo = sig; 2842 info.si_errno = 0; 2843 info.si_code = TARGET_TRAP_BRKPT; 2844 queue_signal(env, info.si_signo, &info); 2845 } 2846 } 2847 break; 2848 default: 2849 printf ("Unhandled trap: 0x%x\n", trapnr); 2850 cpu_dump_state(cs, stderr, fprintf, 0); 2851 exit (1); 2852 } 2853 process_pending_signals (env); 2854 } 2855 } 2856 #endif 2857 2858 #ifdef TARGET_MICROBLAZE 2859 void cpu_loop(CPUMBState *env) 2860 { 2861 CPUState *cs = CPU(mb_env_get_cpu(env)); 2862 int trapnr, ret; 2863 target_siginfo_t info; 2864 2865 while (1) { 2866 trapnr = cpu_mb_exec (env); 2867 switch (trapnr) { 2868 case 0xaa: 2869 { 2870 info.si_signo = SIGSEGV; 2871 info.si_errno = 0; 2872 /* XXX: check env->error_code */ 2873 info.si_code = TARGET_SEGV_MAPERR; 2874 info._sifields._sigfault._addr = 0; 2875 queue_signal(env, info.si_signo, &info); 2876 } 2877 break; 2878 case EXCP_INTERRUPT: 2879 /* just indicate that signals should be handled asap */ 2880 break; 2881 case EXCP_BREAK: 2882 /* Return address is 4 bytes after the call. */ 2883 env->regs[14] += 4; 2884 env->sregs[SR_PC] = env->regs[14]; 2885 ret = do_syscall(env, 2886 env->regs[12], 2887 env->regs[5], 2888 env->regs[6], 2889 env->regs[7], 2890 env->regs[8], 2891 env->regs[9], 2892 env->regs[10], 2893 0, 0); 2894 env->regs[3] = ret; 2895 break; 2896 case EXCP_HW_EXCP: 2897 env->regs[17] = env->sregs[SR_PC] + 4; 2898 if (env->iflags & D_FLAG) { 2899 env->sregs[SR_ESR] |= 1 << 12; 2900 env->sregs[SR_PC] -= 4; 2901 /* FIXME: if branch was immed, replay the imm as well. */ 2902 } 2903 2904 env->iflags &= ~(IMM_FLAG | D_FLAG); 2905 2906 switch (env->sregs[SR_ESR] & 31) { 2907 case ESR_EC_DIVZERO: 2908 info.si_signo = SIGFPE; 2909 info.si_errno = 0; 2910 info.si_code = TARGET_FPE_FLTDIV; 2911 info._sifields._sigfault._addr = 0; 2912 queue_signal(env, info.si_signo, &info); 2913 break; 2914 case ESR_EC_FPU: 2915 info.si_signo = SIGFPE; 2916 info.si_errno = 0; 2917 if (env->sregs[SR_FSR] & FSR_IO) { 2918 info.si_code = TARGET_FPE_FLTINV; 2919 } 2920 if (env->sregs[SR_FSR] & FSR_DZ) { 2921 info.si_code = TARGET_FPE_FLTDIV; 2922 } 2923 info._sifields._sigfault._addr = 0; 2924 queue_signal(env, info.si_signo, &info); 2925 break; 2926 default: 2927 printf ("Unhandled hw-exception: 0x%x\n", 2928 env->sregs[SR_ESR] & ESR_EC_MASK); 2929 cpu_dump_state(cs, stderr, fprintf, 0); 2930 exit (1); 2931 break; 2932 } 2933 break; 2934 case EXCP_DEBUG: 2935 { 2936 int sig; 2937 2938 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 2939 if (sig) 2940 { 2941 info.si_signo = sig; 2942 info.si_errno = 0; 2943 info.si_code = TARGET_TRAP_BRKPT; 2944 queue_signal(env, info.si_signo, &info); 2945 } 2946 } 2947 break; 2948 default: 2949 printf ("Unhandled trap: 0x%x\n", trapnr); 2950 cpu_dump_state(cs, stderr, fprintf, 0); 2951 exit (1); 2952 } 2953 process_pending_signals (env); 2954 } 2955 } 2956 #endif 2957 2958 #ifdef TARGET_M68K 2959 2960 void cpu_loop(CPUM68KState *env) 2961 { 2962 CPUState *cs = CPU(m68k_env_get_cpu(env)); 2963 int trapnr; 2964 unsigned int n; 2965 target_siginfo_t info; 2966 TaskState *ts = cs->opaque; 2967 2968 for(;;) { 2969 trapnr = cpu_m68k_exec(env); 2970 switch(trapnr) { 2971 case EXCP_ILLEGAL: 2972 { 2973 if (ts->sim_syscalls) { 2974 uint16_t nr; 2975 nr = lduw(env->pc + 2); 2976 env->pc += 4; 2977 do_m68k_simcall(env, nr); 2978 } else { 2979 goto do_sigill; 2980 } 2981 } 2982 break; 2983 case EXCP_HALT_INSN: 2984 /* Semihosing syscall. */ 2985 env->pc += 4; 2986 do_m68k_semihosting(env, env->dregs[0]); 2987 break; 2988 case EXCP_LINEA: 2989 case EXCP_LINEF: 2990 case EXCP_UNSUPPORTED: 2991 do_sigill: 2992 info.si_signo = SIGILL; 2993 info.si_errno = 0; 2994 info.si_code = TARGET_ILL_ILLOPN; 2995 info._sifields._sigfault._addr = env->pc; 2996 queue_signal(env, info.si_signo, &info); 2997 break; 2998 case EXCP_TRAP0: 2999 { 3000 ts->sim_syscalls = 0; 3001 n = env->dregs[0]; 3002 env->pc += 2; 3003 env->dregs[0] = do_syscall(env, 3004 n, 3005 env->dregs[1], 3006 env->dregs[2], 3007 env->dregs[3], 3008 env->dregs[4], 3009 env->dregs[5], 3010 env->aregs[0], 3011 0, 0); 3012 } 3013 break; 3014 case EXCP_INTERRUPT: 3015 /* just indicate that signals should be handled asap */ 3016 break; 3017 case EXCP_ACCESS: 3018 { 3019 info.si_signo = SIGSEGV; 3020 info.si_errno = 0; 3021 /* XXX: check env->error_code */ 3022 info.si_code = TARGET_SEGV_MAPERR; 3023 info._sifields._sigfault._addr = env->mmu.ar; 3024 queue_signal(env, info.si_signo, &info); 3025 } 3026 break; 3027 case EXCP_DEBUG: 3028 { 3029 int sig; 3030 3031 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 3032 if (sig) 3033 { 3034 info.si_signo = sig; 3035 info.si_errno = 0; 3036 info.si_code = TARGET_TRAP_BRKPT; 3037 queue_signal(env, info.si_signo, &info); 3038 } 3039 } 3040 break; 3041 default: 3042 fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", 3043 trapnr); 3044 cpu_dump_state(cs, stderr, fprintf, 0); 3045 abort(); 3046 } 3047 process_pending_signals(env); 3048 } 3049 } 3050 #endif /* TARGET_M68K */ 3051 3052 #ifdef TARGET_ALPHA 3053 static void do_store_exclusive(CPUAlphaState *env, int reg, int quad) 3054 { 3055 target_ulong addr, val, tmp; 3056 target_siginfo_t info; 3057 int ret = 0; 3058 3059 addr = env->lock_addr; 3060 tmp = env->lock_st_addr; 3061 env->lock_addr = -1; 3062 env->lock_st_addr = 0; 3063 3064 start_exclusive(); 3065 mmap_lock(); 3066 3067 if (addr == tmp) { 3068 if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) { 3069 goto do_sigsegv; 3070 } 3071 3072 if (val == env->lock_value) { 3073 tmp = env->ir[reg]; 3074 if (quad ? put_user_u64(tmp, addr) : put_user_u32(tmp, addr)) { 3075 goto do_sigsegv; 3076 } 3077 ret = 1; 3078 } 3079 } 3080 env->ir[reg] = ret; 3081 env->pc += 4; 3082 3083 mmap_unlock(); 3084 end_exclusive(); 3085 return; 3086 3087 do_sigsegv: 3088 mmap_unlock(); 3089 end_exclusive(); 3090 3091 info.si_signo = TARGET_SIGSEGV; 3092 info.si_errno = 0; 3093 info.si_code = TARGET_SEGV_MAPERR; 3094 info._sifields._sigfault._addr = addr; 3095 queue_signal(env, TARGET_SIGSEGV, &info); 3096 } 3097 3098 void cpu_loop(CPUAlphaState *env) 3099 { 3100 CPUState *cs = CPU(alpha_env_get_cpu(env)); 3101 int trapnr; 3102 target_siginfo_t info; 3103 abi_long sysret; 3104 3105 while (1) { 3106 trapnr = cpu_alpha_exec (env); 3107 3108 /* All of the traps imply a transition through PALcode, which 3109 implies an REI instruction has been executed. Which means 3110 that the intr_flag should be cleared. */ 3111 env->intr_flag = 0; 3112 3113 switch (trapnr) { 3114 case EXCP_RESET: 3115 fprintf(stderr, "Reset requested. Exit\n"); 3116 exit(1); 3117 break; 3118 case EXCP_MCHK: 3119 fprintf(stderr, "Machine check exception. Exit\n"); 3120 exit(1); 3121 break; 3122 case EXCP_SMP_INTERRUPT: 3123 case EXCP_CLK_INTERRUPT: 3124 case EXCP_DEV_INTERRUPT: 3125 fprintf(stderr, "External interrupt. Exit\n"); 3126 exit(1); 3127 break; 3128 case EXCP_MMFAULT: 3129 env->lock_addr = -1; 3130 info.si_signo = TARGET_SIGSEGV; 3131 info.si_errno = 0; 3132 info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID 3133 ? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR); 3134 info._sifields._sigfault._addr = env->trap_arg0; 3135 queue_signal(env, info.si_signo, &info); 3136 break; 3137 case EXCP_UNALIGN: 3138 env->lock_addr = -1; 3139 info.si_signo = TARGET_SIGBUS; 3140 info.si_errno = 0; 3141 info.si_code = TARGET_BUS_ADRALN; 3142 info._sifields._sigfault._addr = env->trap_arg0; 3143 queue_signal(env, info.si_signo, &info); 3144 break; 3145 case EXCP_OPCDEC: 3146 do_sigill: 3147 env->lock_addr = -1; 3148 info.si_signo = TARGET_SIGILL; 3149 info.si_errno = 0; 3150 info.si_code = TARGET_ILL_ILLOPC; 3151 info._sifields._sigfault._addr = env->pc; 3152 queue_signal(env, info.si_signo, &info); 3153 break; 3154 case EXCP_ARITH: 3155 env->lock_addr = -1; 3156 info.si_signo = TARGET_SIGFPE; 3157 info.si_errno = 0; 3158 info.si_code = TARGET_FPE_FLTINV; 3159 info._sifields._sigfault._addr = env->pc; 3160 queue_signal(env, info.si_signo, &info); 3161 break; 3162 case EXCP_FEN: 3163 /* No-op. Linux simply re-enables the FPU. */ 3164 break; 3165 case EXCP_CALL_PAL: 3166 env->lock_addr = -1; 3167 switch (env->error_code) { 3168 case 0x80: 3169 /* BPT */ 3170 info.si_signo = TARGET_SIGTRAP; 3171 info.si_errno = 0; 3172 info.si_code = TARGET_TRAP_BRKPT; 3173 info._sifields._sigfault._addr = env->pc; 3174 queue_signal(env, info.si_signo, &info); 3175 break; 3176 case 0x81: 3177 /* BUGCHK */ 3178 info.si_signo = TARGET_SIGTRAP; 3179 info.si_errno = 0; 3180 info.si_code = 0; 3181 info._sifields._sigfault._addr = env->pc; 3182 queue_signal(env, info.si_signo, &info); 3183 break; 3184 case 0x83: 3185 /* CALLSYS */ 3186 trapnr = env->ir[IR_V0]; 3187 sysret = do_syscall(env, trapnr, 3188 env->ir[IR_A0], env->ir[IR_A1], 3189 env->ir[IR_A2], env->ir[IR_A3], 3190 env->ir[IR_A4], env->ir[IR_A5], 3191 0, 0); 3192 if (trapnr == TARGET_NR_sigreturn 3193 || trapnr == TARGET_NR_rt_sigreturn) { 3194 break; 3195 } 3196 /* Syscall writes 0 to V0 to bypass error check, similar 3197 to how this is handled internal to Linux kernel. 3198 (Ab)use trapnr temporarily as boolean indicating error. */ 3199 trapnr = (env->ir[IR_V0] != 0 && sysret < 0); 3200 env->ir[IR_V0] = (trapnr ? -sysret : sysret); 3201 env->ir[IR_A3] = trapnr; 3202 break; 3203 case 0x86: 3204 /* IMB */ 3205 /* ??? We can probably elide the code using page_unprotect 3206 that is checking for self-modifying code. Instead we 3207 could simply call tb_flush here. Until we work out the 3208 changes required to turn off the extra write protection, 3209 this can be a no-op. */ 3210 break; 3211 case 0x9E: 3212 /* RDUNIQUE */ 3213 /* Handled in the translator for usermode. */ 3214 abort(); 3215 case 0x9F: 3216 /* WRUNIQUE */ 3217 /* Handled in the translator for usermode. */ 3218 abort(); 3219 case 0xAA: 3220 /* GENTRAP */ 3221 info.si_signo = TARGET_SIGFPE; 3222 switch (env->ir[IR_A0]) { 3223 case TARGET_GEN_INTOVF: 3224 info.si_code = TARGET_FPE_INTOVF; 3225 break; 3226 case TARGET_GEN_INTDIV: 3227 info.si_code = TARGET_FPE_INTDIV; 3228 break; 3229 case TARGET_GEN_FLTOVF: 3230 info.si_code = TARGET_FPE_FLTOVF; 3231 break; 3232 case TARGET_GEN_FLTUND: 3233 info.si_code = TARGET_FPE_FLTUND; 3234 break; 3235 case TARGET_GEN_FLTINV: 3236 info.si_code = TARGET_FPE_FLTINV; 3237 break; 3238 case TARGET_GEN_FLTINE: 3239 info.si_code = TARGET_FPE_FLTRES; 3240 break; 3241 case TARGET_GEN_ROPRAND: 3242 info.si_code = 0; 3243 break; 3244 default: 3245 info.si_signo = TARGET_SIGTRAP; 3246 info.si_code = 0; 3247 break; 3248 } 3249 info.si_errno = 0; 3250 info._sifields._sigfault._addr = env->pc; 3251 queue_signal(env, info.si_signo, &info); 3252 break; 3253 default: 3254 goto do_sigill; 3255 } 3256 break; 3257 case EXCP_DEBUG: 3258 info.si_signo = gdb_handlesig(cs, TARGET_SIGTRAP); 3259 if (info.si_signo) { 3260 env->lock_addr = -1; 3261 info.si_errno = 0; 3262 info.si_code = TARGET_TRAP_BRKPT; 3263 queue_signal(env, info.si_signo, &info); 3264 } 3265 break; 3266 case EXCP_STL_C: 3267 case EXCP_STQ_C: 3268 do_store_exclusive(env, env->error_code, trapnr - EXCP_STL_C); 3269 break; 3270 case EXCP_INTERRUPT: 3271 /* Just indicate that signals should be handled asap. */ 3272 break; 3273 default: 3274 printf ("Unhandled trap: 0x%x\n", trapnr); 3275 cpu_dump_state(cs, stderr, fprintf, 0); 3276 exit (1); 3277 } 3278 process_pending_signals (env); 3279 } 3280 } 3281 #endif /* TARGET_ALPHA */ 3282 3283 #ifdef TARGET_S390X 3284 void cpu_loop(CPUS390XState *env) 3285 { 3286 CPUState *cs = CPU(s390_env_get_cpu(env)); 3287 int trapnr, n, sig; 3288 target_siginfo_t info; 3289 target_ulong addr; 3290 3291 while (1) { 3292 trapnr = cpu_s390x_exec(env); 3293 switch (trapnr) { 3294 case EXCP_INTERRUPT: 3295 /* Just indicate that signals should be handled asap. */ 3296 break; 3297 3298 case EXCP_SVC: 3299 n = env->int_svc_code; 3300 if (!n) { 3301 /* syscalls > 255 */ 3302 n = env->regs[1]; 3303 } 3304 env->psw.addr += env->int_svc_ilen; 3305 env->regs[2] = do_syscall(env, n, env->regs[2], env->regs[3], 3306 env->regs[4], env->regs[5], 3307 env->regs[6], env->regs[7], 0, 0); 3308 break; 3309 3310 case EXCP_DEBUG: 3311 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 3312 if (sig) { 3313 n = TARGET_TRAP_BRKPT; 3314 goto do_signal_pc; 3315 } 3316 break; 3317 case EXCP_PGM: 3318 n = env->int_pgm_code; 3319 switch (n) { 3320 case PGM_OPERATION: 3321 case PGM_PRIVILEGED: 3322 sig = SIGILL; 3323 n = TARGET_ILL_ILLOPC; 3324 goto do_signal_pc; 3325 case PGM_PROTECTION: 3326 case PGM_ADDRESSING: 3327 sig = SIGSEGV; 3328 /* XXX: check env->error_code */ 3329 n = TARGET_SEGV_MAPERR; 3330 addr = env->__excp_addr; 3331 goto do_signal; 3332 case PGM_EXECUTE: 3333 case PGM_SPECIFICATION: 3334 case PGM_SPECIAL_OP: 3335 case PGM_OPERAND: 3336 do_sigill_opn: 3337 sig = SIGILL; 3338 n = TARGET_ILL_ILLOPN; 3339 goto do_signal_pc; 3340 3341 case PGM_FIXPT_OVERFLOW: 3342 sig = SIGFPE; 3343 n = TARGET_FPE_INTOVF; 3344 goto do_signal_pc; 3345 case PGM_FIXPT_DIVIDE: 3346 sig = SIGFPE; 3347 n = TARGET_FPE_INTDIV; 3348 goto do_signal_pc; 3349 3350 case PGM_DATA: 3351 n = (env->fpc >> 8) & 0xff; 3352 if (n == 0xff) { 3353 /* compare-and-trap */ 3354 goto do_sigill_opn; 3355 } else { 3356 /* An IEEE exception, simulated or otherwise. */ 3357 if (n & 0x80) { 3358 n = TARGET_FPE_FLTINV; 3359 } else if (n & 0x40) { 3360 n = TARGET_FPE_FLTDIV; 3361 } else if (n & 0x20) { 3362 n = TARGET_FPE_FLTOVF; 3363 } else if (n & 0x10) { 3364 n = TARGET_FPE_FLTUND; 3365 } else if (n & 0x08) { 3366 n = TARGET_FPE_FLTRES; 3367 } else { 3368 /* ??? Quantum exception; BFP, DFP error. */ 3369 goto do_sigill_opn; 3370 } 3371 sig = SIGFPE; 3372 goto do_signal_pc; 3373 } 3374 3375 default: 3376 fprintf(stderr, "Unhandled program exception: %#x\n", n); 3377 cpu_dump_state(cs, stderr, fprintf, 0); 3378 exit(1); 3379 } 3380 break; 3381 3382 do_signal_pc: 3383 addr = env->psw.addr; 3384 do_signal: 3385 info.si_signo = sig; 3386 info.si_errno = 0; 3387 info.si_code = n; 3388 info._sifields._sigfault._addr = addr; 3389 queue_signal(env, info.si_signo, &info); 3390 break; 3391 3392 default: 3393 fprintf(stderr, "Unhandled trap: 0x%x\n", trapnr); 3394 cpu_dump_state(cs, stderr, fprintf, 0); 3395 exit(1); 3396 } 3397 process_pending_signals (env); 3398 } 3399 } 3400 3401 #endif /* TARGET_S390X */ 3402 3403 THREAD CPUState *thread_cpu; 3404 3405 void task_settid(TaskState *ts) 3406 { 3407 if (ts->ts_tid == 0) { 3408 ts->ts_tid = (pid_t)syscall(SYS_gettid); 3409 } 3410 } 3411 3412 void stop_all_tasks(void) 3413 { 3414 /* 3415 * We trust that when using NPTL, start_exclusive() 3416 * handles thread stopping correctly. 3417 */ 3418 start_exclusive(); 3419 } 3420 3421 /* Assumes contents are already zeroed. */ 3422 void init_task_state(TaskState *ts) 3423 { 3424 int i; 3425 3426 ts->used = 1; 3427 ts->first_free = ts->sigqueue_table; 3428 for (i = 0; i < MAX_SIGQUEUE_SIZE - 1; i++) { 3429 ts->sigqueue_table[i].next = &ts->sigqueue_table[i + 1]; 3430 } 3431 ts->sigqueue_table[i].next = NULL; 3432 } 3433 3434 CPUArchState *cpu_copy(CPUArchState *env) 3435 { 3436 CPUState *cpu = ENV_GET_CPU(env); 3437 CPUArchState *new_env = cpu_init(cpu_model); 3438 CPUState *new_cpu = ENV_GET_CPU(new_env); 3439 #if defined(TARGET_HAS_ICE) 3440 CPUBreakpoint *bp; 3441 CPUWatchpoint *wp; 3442 #endif 3443 3444 /* Reset non arch specific state */ 3445 cpu_reset(new_cpu); 3446 3447 memcpy(new_env, env, sizeof(CPUArchState)); 3448 3449 /* Clone all break/watchpoints. 3450 Note: Once we support ptrace with hw-debug register access, make sure 3451 BP_CPU break/watchpoints are handled correctly on clone. */ 3452 QTAILQ_INIT(&cpu->breakpoints); 3453 QTAILQ_INIT(&cpu->watchpoints); 3454 #if defined(TARGET_HAS_ICE) 3455 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { 3456 cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags, NULL); 3457 } 3458 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { 3459 cpu_watchpoint_insert(new_cpu, wp->vaddr, (~wp->len_mask) + 1, 3460 wp->flags, NULL); 3461 } 3462 #endif 3463 3464 return new_env; 3465 } 3466 3467 static void handle_arg_help(const char *arg) 3468 { 3469 usage(); 3470 } 3471 3472 static void handle_arg_log(const char *arg) 3473 { 3474 int mask; 3475 3476 mask = qemu_str_to_log_mask(arg); 3477 if (!mask) { 3478 qemu_print_log_usage(stdout); 3479 exit(1); 3480 } 3481 qemu_set_log(mask); 3482 } 3483 3484 static void handle_arg_log_filename(const char *arg) 3485 { 3486 qemu_set_log_filename(arg); 3487 } 3488 3489 static void handle_arg_set_env(const char *arg) 3490 { 3491 char *r, *p, *token; 3492 r = p = strdup(arg); 3493 while ((token = strsep(&p, ",")) != NULL) { 3494 if (envlist_setenv(envlist, token) != 0) { 3495 usage(); 3496 } 3497 } 3498 free(r); 3499 } 3500 3501 static void handle_arg_unset_env(const char *arg) 3502 { 3503 char *r, *p, *token; 3504 r = p = strdup(arg); 3505 while ((token = strsep(&p, ",")) != NULL) { 3506 if (envlist_unsetenv(envlist, token) != 0) { 3507 usage(); 3508 } 3509 } 3510 free(r); 3511 } 3512 3513 static void handle_arg_argv0(const char *arg) 3514 { 3515 argv0 = strdup(arg); 3516 } 3517 3518 static void handle_arg_stack_size(const char *arg) 3519 { 3520 char *p; 3521 guest_stack_size = strtoul(arg, &p, 0); 3522 if (guest_stack_size == 0) { 3523 usage(); 3524 } 3525 3526 if (*p == 'M') { 3527 guest_stack_size *= 1024 * 1024; 3528 } else if (*p == 'k' || *p == 'K') { 3529 guest_stack_size *= 1024; 3530 } 3531 } 3532 3533 static void handle_arg_ld_prefix(const char *arg) 3534 { 3535 interp_prefix = strdup(arg); 3536 } 3537 3538 static void handle_arg_pagesize(const char *arg) 3539 { 3540 qemu_host_page_size = atoi(arg); 3541 if (qemu_host_page_size == 0 || 3542 (qemu_host_page_size & (qemu_host_page_size - 1)) != 0) { 3543 fprintf(stderr, "page size must be a power of two\n"); 3544 exit(1); 3545 } 3546 } 3547 3548 static void handle_arg_gdb(const char *arg) 3549 { 3550 gdbstub_port = atoi(arg); 3551 } 3552 3553 static void handle_arg_uname(const char *arg) 3554 { 3555 qemu_uname_release = strdup(arg); 3556 } 3557 3558 static void handle_arg_cpu(const char *arg) 3559 { 3560 cpu_model = strdup(arg); 3561 if (cpu_model == NULL || is_help_option(cpu_model)) { 3562 /* XXX: implement xxx_cpu_list for targets that still miss it */ 3563 #if defined(cpu_list) 3564 cpu_list(stdout, &fprintf); 3565 #endif 3566 exit(1); 3567 } 3568 } 3569 3570 #if defined(CONFIG_USE_GUEST_BASE) 3571 static void handle_arg_guest_base(const char *arg) 3572 { 3573 guest_base = strtol(arg, NULL, 0); 3574 have_guest_base = 1; 3575 } 3576 3577 static void handle_arg_reserved_va(const char *arg) 3578 { 3579 char *p; 3580 int shift = 0; 3581 reserved_va = strtoul(arg, &p, 0); 3582 switch (*p) { 3583 case 'k': 3584 case 'K': 3585 shift = 10; 3586 break; 3587 case 'M': 3588 shift = 20; 3589 break; 3590 case 'G': 3591 shift = 30; 3592 break; 3593 } 3594 if (shift) { 3595 unsigned long unshifted = reserved_va; 3596 p++; 3597 reserved_va <<= shift; 3598 if (((reserved_va >> shift) != unshifted) 3599 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS 3600 || (reserved_va > (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) 3601 #endif 3602 ) { 3603 fprintf(stderr, "Reserved virtual address too big\n"); 3604 exit(1); 3605 } 3606 } 3607 if (*p) { 3608 fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p); 3609 exit(1); 3610 } 3611 } 3612 #endif 3613 3614 static void handle_arg_singlestep(const char *arg) 3615 { 3616 singlestep = 1; 3617 } 3618 3619 static void handle_arg_strace(const char *arg) 3620 { 3621 do_strace = 1; 3622 } 3623 3624 static void handle_arg_version(const char *arg) 3625 { 3626 printf("qemu-" TARGET_NAME " version " QEMU_VERSION QEMU_PKGVERSION 3627 ", Copyright (c) 2003-2008 Fabrice Bellard\n"); 3628 exit(0); 3629 } 3630 3631 struct qemu_argument { 3632 const char *argv; 3633 const char *env; 3634 bool has_arg; 3635 void (*handle_opt)(const char *arg); 3636 const char *example; 3637 const char *help; 3638 }; 3639 3640 static const struct qemu_argument arg_table[] = { 3641 {"h", "", false, handle_arg_help, 3642 "", "print this help"}, 3643 {"g", "QEMU_GDB", true, handle_arg_gdb, 3644 "port", "wait gdb connection to 'port'"}, 3645 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix, 3646 "path", "set the elf interpreter prefix to 'path'"}, 3647 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size, 3648 "size", "set the stack size to 'size' bytes"}, 3649 {"cpu", "QEMU_CPU", true, handle_arg_cpu, 3650 "model", "select CPU (-cpu help for list)"}, 3651 {"E", "QEMU_SET_ENV", true, handle_arg_set_env, 3652 "var=value", "sets targets environment variable (see below)"}, 3653 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env, 3654 "var", "unsets targets environment variable (see below)"}, 3655 {"0", "QEMU_ARGV0", true, handle_arg_argv0, 3656 "argv0", "forces target process argv[0] to be 'argv0'"}, 3657 {"r", "QEMU_UNAME", true, handle_arg_uname, 3658 "uname", "set qemu uname release string to 'uname'"}, 3659 #if defined(CONFIG_USE_GUEST_BASE) 3660 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base, 3661 "address", "set guest_base address to 'address'"}, 3662 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va, 3663 "size", "reserve 'size' bytes for guest virtual address space"}, 3664 #endif 3665 {"d", "QEMU_LOG", true, handle_arg_log, 3666 "item[,...]", "enable logging of specified items " 3667 "(use '-d help' for a list of items)"}, 3668 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename, 3669 "logfile", "write logs to 'logfile' (default stderr)"}, 3670 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize, 3671 "pagesize", "set the host page size to 'pagesize'"}, 3672 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep, 3673 "", "run in singlestep mode"}, 3674 {"strace", "QEMU_STRACE", false, handle_arg_strace, 3675 "", "log system calls"}, 3676 {"version", "QEMU_VERSION", false, handle_arg_version, 3677 "", "display version information and exit"}, 3678 {NULL, NULL, false, NULL, NULL, NULL} 3679 }; 3680 3681 static void usage(void) 3682 { 3683 const struct qemu_argument *arginfo; 3684 int maxarglen; 3685 int maxenvlen; 3686 3687 printf("usage: qemu-" TARGET_NAME " [options] program [arguments...]\n" 3688 "Linux CPU emulator (compiled for " TARGET_NAME " emulation)\n" 3689 "\n" 3690 "Options and associated environment variables:\n" 3691 "\n"); 3692 3693 /* Calculate column widths. We must always have at least enough space 3694 * for the column header. 3695 */ 3696 maxarglen = strlen("Argument"); 3697 maxenvlen = strlen("Env-variable"); 3698 3699 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) { 3700 int arglen = strlen(arginfo->argv); 3701 if (arginfo->has_arg) { 3702 arglen += strlen(arginfo->example) + 1; 3703 } 3704 if (strlen(arginfo->env) > maxenvlen) { 3705 maxenvlen = strlen(arginfo->env); 3706 } 3707 if (arglen > maxarglen) { 3708 maxarglen = arglen; 3709 } 3710 } 3711 3712 printf("%-*s %-*s Description\n", maxarglen+1, "Argument", 3713 maxenvlen, "Env-variable"); 3714 3715 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) { 3716 if (arginfo->has_arg) { 3717 printf("-%s %-*s %-*s %s\n", arginfo->argv, 3718 (int)(maxarglen - strlen(arginfo->argv) - 1), 3719 arginfo->example, maxenvlen, arginfo->env, arginfo->help); 3720 } else { 3721 printf("-%-*s %-*s %s\n", maxarglen, arginfo->argv, 3722 maxenvlen, arginfo->env, 3723 arginfo->help); 3724 } 3725 } 3726 3727 printf("\n" 3728 "Defaults:\n" 3729 "QEMU_LD_PREFIX = %s\n" 3730 "QEMU_STACK_SIZE = %ld byte\n", 3731 interp_prefix, 3732 guest_stack_size); 3733 3734 printf("\n" 3735 "You can use -E and -U options or the QEMU_SET_ENV and\n" 3736 "QEMU_UNSET_ENV environment variables to set and unset\n" 3737 "environment variables for the target process.\n" 3738 "It is possible to provide several variables by separating them\n" 3739 "by commas in getsubopt(3) style. Additionally it is possible to\n" 3740 "provide the -E and -U options multiple times.\n" 3741 "The following lines are equivalent:\n" 3742 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n" 3743 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n" 3744 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n" 3745 "Note that if you provide several changes to a single variable\n" 3746 "the last change will stay in effect.\n"); 3747 3748 exit(1); 3749 } 3750 3751 static int parse_args(int argc, char **argv) 3752 { 3753 const char *r; 3754 int optind; 3755 const struct qemu_argument *arginfo; 3756 3757 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) { 3758 if (arginfo->env == NULL) { 3759 continue; 3760 } 3761 3762 r = getenv(arginfo->env); 3763 if (r != NULL) { 3764 arginfo->handle_opt(r); 3765 } 3766 } 3767 3768 optind = 1; 3769 for (;;) { 3770 if (optind >= argc) { 3771 break; 3772 } 3773 r = argv[optind]; 3774 if (r[0] != '-') { 3775 break; 3776 } 3777 optind++; 3778 r++; 3779 if (!strcmp(r, "-")) { 3780 break; 3781 } 3782 3783 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) { 3784 if (!strcmp(r, arginfo->argv)) { 3785 if (arginfo->has_arg) { 3786 if (optind >= argc) { 3787 usage(); 3788 } 3789 arginfo->handle_opt(argv[optind]); 3790 optind++; 3791 } else { 3792 arginfo->handle_opt(NULL); 3793 } 3794 break; 3795 } 3796 } 3797 3798 /* no option matched the current argv */ 3799 if (arginfo->handle_opt == NULL) { 3800 usage(); 3801 } 3802 } 3803 3804 if (optind >= argc) { 3805 usage(); 3806 } 3807 3808 filename = argv[optind]; 3809 exec_path = argv[optind]; 3810 3811 return optind; 3812 } 3813 3814 int main(int argc, char **argv, char **envp) 3815 { 3816 struct target_pt_regs regs1, *regs = ®s1; 3817 struct image_info info1, *info = &info1; 3818 struct linux_binprm bprm; 3819 TaskState *ts; 3820 CPUArchState *env; 3821 CPUState *cpu; 3822 int optind; 3823 char **target_environ, **wrk; 3824 char **target_argv; 3825 int target_argc; 3826 int i; 3827 int ret; 3828 int execfd; 3829 3830 module_call_init(MODULE_INIT_QOM); 3831 3832 qemu_init_auxval(envp); 3833 qemu_cache_utils_init(); 3834 3835 if ((envlist = envlist_create()) == NULL) { 3836 (void) fprintf(stderr, "Unable to allocate envlist\n"); 3837 exit(1); 3838 } 3839 3840 /* add current environment into the list */ 3841 for (wrk = environ; *wrk != NULL; wrk++) { 3842 (void) envlist_setenv(envlist, *wrk); 3843 } 3844 3845 /* Read the stack limit from the kernel. If it's "unlimited", 3846 then we can do little else besides use the default. */ 3847 { 3848 struct rlimit lim; 3849 if (getrlimit(RLIMIT_STACK, &lim) == 0 3850 && lim.rlim_cur != RLIM_INFINITY 3851 && lim.rlim_cur == (target_long)lim.rlim_cur) { 3852 guest_stack_size = lim.rlim_cur; 3853 } 3854 } 3855 3856 cpu_model = NULL; 3857 #if defined(cpudef_setup) 3858 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */ 3859 #endif 3860 3861 optind = parse_args(argc, argv); 3862 3863 /* Zero out regs */ 3864 memset(regs, 0, sizeof(struct target_pt_regs)); 3865 3866 /* Zero out image_info */ 3867 memset(info, 0, sizeof(struct image_info)); 3868 3869 memset(&bprm, 0, sizeof (bprm)); 3870 3871 /* Scan interp_prefix dir for replacement files. */ 3872 init_paths(interp_prefix); 3873 3874 init_qemu_uname_release(); 3875 3876 if (cpu_model == NULL) { 3877 #if defined(TARGET_I386) 3878 #ifdef TARGET_X86_64 3879 cpu_model = "qemu64"; 3880 #else 3881 cpu_model = "qemu32"; 3882 #endif 3883 #elif defined(TARGET_ARM) 3884 cpu_model = "any"; 3885 #elif defined(TARGET_UNICORE32) 3886 cpu_model = "any"; 3887 #elif defined(TARGET_M68K) 3888 cpu_model = "any"; 3889 #elif defined(TARGET_SPARC) 3890 #ifdef TARGET_SPARC64 3891 cpu_model = "TI UltraSparc II"; 3892 #else 3893 cpu_model = "Fujitsu MB86904"; 3894 #endif 3895 #elif defined(TARGET_MIPS) 3896 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) 3897 cpu_model = "20Kc"; 3898 #else 3899 cpu_model = "24Kf"; 3900 #endif 3901 #elif defined TARGET_OPENRISC 3902 cpu_model = "or1200"; 3903 #elif defined(TARGET_PPC) 3904 #ifdef TARGET_PPC64 3905 cpu_model = "970fx"; 3906 #else 3907 cpu_model = "750"; 3908 #endif 3909 #else 3910 cpu_model = "any"; 3911 #endif 3912 } 3913 tcg_exec_init(0); 3914 cpu_exec_init_all(); 3915 /* NOTE: we need to init the CPU at this stage to get 3916 qemu_host_page_size */ 3917 env = cpu_init(cpu_model); 3918 if (!env) { 3919 fprintf(stderr, "Unable to find CPU definition\n"); 3920 exit(1); 3921 } 3922 cpu = ENV_GET_CPU(env); 3923 cpu_reset(cpu); 3924 3925 thread_cpu = cpu; 3926 3927 if (getenv("QEMU_STRACE")) { 3928 do_strace = 1; 3929 } 3930 3931 target_environ = envlist_to_environ(envlist, NULL); 3932 envlist_free(envlist); 3933 3934 #if defined(CONFIG_USE_GUEST_BASE) 3935 /* 3936 * Now that page sizes are configured in cpu_init() we can do 3937 * proper page alignment for guest_base. 3938 */ 3939 guest_base = HOST_PAGE_ALIGN(guest_base); 3940 3941 if (reserved_va || have_guest_base) { 3942 guest_base = init_guest_space(guest_base, reserved_va, 0, 3943 have_guest_base); 3944 if (guest_base == (unsigned long)-1) { 3945 fprintf(stderr, "Unable to reserve 0x%lx bytes of virtual address " 3946 "space for use as guest address space (check your virtual " 3947 "memory ulimit setting or reserve less using -R option)\n", 3948 reserved_va); 3949 exit(1); 3950 } 3951 3952 if (reserved_va) { 3953 mmap_next_start = reserved_va; 3954 } 3955 } 3956 #endif /* CONFIG_USE_GUEST_BASE */ 3957 3958 /* 3959 * Read in mmap_min_addr kernel parameter. This value is used 3960 * When loading the ELF image to determine whether guest_base 3961 * is needed. It is also used in mmap_find_vma. 3962 */ 3963 { 3964 FILE *fp; 3965 3966 if ((fp = fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL) { 3967 unsigned long tmp; 3968 if (fscanf(fp, "%lu", &tmp) == 1) { 3969 mmap_min_addr = tmp; 3970 qemu_log("host mmap_min_addr=0x%lx\n", mmap_min_addr); 3971 } 3972 fclose(fp); 3973 } 3974 } 3975 3976 /* 3977 * Prepare copy of argv vector for target. 3978 */ 3979 target_argc = argc - optind; 3980 target_argv = calloc(target_argc + 1, sizeof (char *)); 3981 if (target_argv == NULL) { 3982 (void) fprintf(stderr, "Unable to allocate memory for target_argv\n"); 3983 exit(1); 3984 } 3985 3986 /* 3987 * If argv0 is specified (using '-0' switch) we replace 3988 * argv[0] pointer with the given one. 3989 */ 3990 i = 0; 3991 if (argv0 != NULL) { 3992 target_argv[i++] = strdup(argv0); 3993 } 3994 for (; i < target_argc; i++) { 3995 target_argv[i] = strdup(argv[optind + i]); 3996 } 3997 target_argv[target_argc] = NULL; 3998 3999 ts = g_malloc0 (sizeof(TaskState)); 4000 init_task_state(ts); 4001 /* build Task State */ 4002 ts->info = info; 4003 ts->bprm = &bprm; 4004 cpu->opaque = ts; 4005 task_settid(ts); 4006 4007 execfd = qemu_getauxval(AT_EXECFD); 4008 if (execfd == 0) { 4009 execfd = open(filename, O_RDONLY); 4010 if (execfd < 0) { 4011 printf("Error while loading %s: %s\n", filename, strerror(errno)); 4012 _exit(1); 4013 } 4014 } 4015 4016 ret = loader_exec(execfd, filename, target_argv, target_environ, regs, 4017 info, &bprm); 4018 if (ret != 0) { 4019 printf("Error while loading %s: %s\n", filename, strerror(-ret)); 4020 _exit(1); 4021 } 4022 4023 for (wrk = target_environ; *wrk; wrk++) { 4024 free(*wrk); 4025 } 4026 4027 free(target_environ); 4028 4029 if (qemu_log_enabled()) { 4030 #if defined(CONFIG_USE_GUEST_BASE) 4031 qemu_log("guest_base 0x%lx\n", guest_base); 4032 #endif 4033 log_page_dump(); 4034 4035 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk); 4036 qemu_log("end_code 0x" TARGET_ABI_FMT_lx "\n", info->end_code); 4037 qemu_log("start_code 0x" TARGET_ABI_FMT_lx "\n", 4038 info->start_code); 4039 qemu_log("start_data 0x" TARGET_ABI_FMT_lx "\n", 4040 info->start_data); 4041 qemu_log("end_data 0x" TARGET_ABI_FMT_lx "\n", info->end_data); 4042 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx "\n", 4043 info->start_stack); 4044 qemu_log("brk 0x" TARGET_ABI_FMT_lx "\n", info->brk); 4045 qemu_log("entry 0x" TARGET_ABI_FMT_lx "\n", info->entry); 4046 } 4047 4048 target_set_brk(info->brk); 4049 syscall_init(); 4050 signal_init(); 4051 4052 #if defined(CONFIG_USE_GUEST_BASE) 4053 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay 4054 generating the prologue until now so that the prologue can take 4055 the real value of GUEST_BASE into account. */ 4056 tcg_prologue_init(&tcg_ctx); 4057 #endif 4058 4059 #if defined(TARGET_I386) 4060 env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK; 4061 env->hflags |= HF_PE_MASK | HF_CPL_MASK; 4062 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 4063 env->cr[4] |= CR4_OSFXSR_MASK; 4064 env->hflags |= HF_OSFXSR_MASK; 4065 } 4066 #ifndef TARGET_ABI32 4067 /* enable 64 bit mode if possible */ 4068 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) { 4069 fprintf(stderr, "The selected x86 CPU does not support 64 bit mode\n"); 4070 exit(1); 4071 } 4072 env->cr[4] |= CR4_PAE_MASK; 4073 env->efer |= MSR_EFER_LMA | MSR_EFER_LME; 4074 env->hflags |= HF_LMA_MASK; 4075 #endif 4076 4077 /* flags setup : we activate the IRQs by default as in user mode */ 4078 env->eflags |= IF_MASK; 4079 4080 /* linux register setup */ 4081 #ifndef TARGET_ABI32 4082 env->regs[R_EAX] = regs->rax; 4083 env->regs[R_EBX] = regs->rbx; 4084 env->regs[R_ECX] = regs->rcx; 4085 env->regs[R_EDX] = regs->rdx; 4086 env->regs[R_ESI] = regs->rsi; 4087 env->regs[R_EDI] = regs->rdi; 4088 env->regs[R_EBP] = regs->rbp; 4089 env->regs[R_ESP] = regs->rsp; 4090 env->eip = regs->rip; 4091 #else 4092 env->regs[R_EAX] = regs->eax; 4093 env->regs[R_EBX] = regs->ebx; 4094 env->regs[R_ECX] = regs->ecx; 4095 env->regs[R_EDX] = regs->edx; 4096 env->regs[R_ESI] = regs->esi; 4097 env->regs[R_EDI] = regs->edi; 4098 env->regs[R_EBP] = regs->ebp; 4099 env->regs[R_ESP] = regs->esp; 4100 env->eip = regs->eip; 4101 #endif 4102 4103 /* linux interrupt setup */ 4104 #ifndef TARGET_ABI32 4105 env->idt.limit = 511; 4106 #else 4107 env->idt.limit = 255; 4108 #endif 4109 env->idt.base = target_mmap(0, sizeof(uint64_t) * (env->idt.limit + 1), 4110 PROT_READ|PROT_WRITE, 4111 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 4112 idt_table = g2h(env->idt.base); 4113 set_idt(0, 0); 4114 set_idt(1, 0); 4115 set_idt(2, 0); 4116 set_idt(3, 3); 4117 set_idt(4, 3); 4118 set_idt(5, 0); 4119 set_idt(6, 0); 4120 set_idt(7, 0); 4121 set_idt(8, 0); 4122 set_idt(9, 0); 4123 set_idt(10, 0); 4124 set_idt(11, 0); 4125 set_idt(12, 0); 4126 set_idt(13, 0); 4127 set_idt(14, 0); 4128 set_idt(15, 0); 4129 set_idt(16, 0); 4130 set_idt(17, 0); 4131 set_idt(18, 0); 4132 set_idt(19, 0); 4133 set_idt(0x80, 3); 4134 4135 /* linux segment setup */ 4136 { 4137 uint64_t *gdt_table; 4138 env->gdt.base = target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES, 4139 PROT_READ|PROT_WRITE, 4140 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 4141 env->gdt.limit = sizeof(uint64_t) * TARGET_GDT_ENTRIES - 1; 4142 gdt_table = g2h(env->gdt.base); 4143 #ifdef TARGET_ABI32 4144 write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff, 4145 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | 4146 (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT)); 4147 #else 4148 /* 64 bit code segment */ 4149 write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff, 4150 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | 4151 DESC_L_MASK | 4152 (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT)); 4153 #endif 4154 write_dt(&gdt_table[__USER_DS >> 3], 0, 0xfffff, 4155 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | 4156 (3 << DESC_DPL_SHIFT) | (0x2 << DESC_TYPE_SHIFT)); 4157 } 4158 cpu_x86_load_seg(env, R_CS, __USER_CS); 4159 cpu_x86_load_seg(env, R_SS, __USER_DS); 4160 #ifdef TARGET_ABI32 4161 cpu_x86_load_seg(env, R_DS, __USER_DS); 4162 cpu_x86_load_seg(env, R_ES, __USER_DS); 4163 cpu_x86_load_seg(env, R_FS, __USER_DS); 4164 cpu_x86_load_seg(env, R_GS, __USER_DS); 4165 /* This hack makes Wine work... */ 4166 env->segs[R_FS].selector = 0; 4167 #else 4168 cpu_x86_load_seg(env, R_DS, 0); 4169 cpu_x86_load_seg(env, R_ES, 0); 4170 cpu_x86_load_seg(env, R_FS, 0); 4171 cpu_x86_load_seg(env, R_GS, 0); 4172 #endif 4173 #elif defined(TARGET_AARCH64) 4174 { 4175 int i; 4176 4177 if (!(arm_feature(env, ARM_FEATURE_AARCH64))) { 4178 fprintf(stderr, 4179 "The selected ARM CPU does not support 64 bit mode\n"); 4180 exit(1); 4181 } 4182 4183 for (i = 0; i < 31; i++) { 4184 env->xregs[i] = regs->regs[i]; 4185 } 4186 env->pc = regs->pc; 4187 env->xregs[31] = regs->sp; 4188 } 4189 #elif defined(TARGET_ARM) 4190 { 4191 int i; 4192 cpsr_write(env, regs->uregs[16], 0xffffffff); 4193 for(i = 0; i < 16; i++) { 4194 env->regs[i] = regs->uregs[i]; 4195 } 4196 /* Enable BE8. */ 4197 if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4 4198 && (info->elf_flags & EF_ARM_BE8)) { 4199 env->bswap_code = 1; 4200 } 4201 } 4202 #elif defined(TARGET_UNICORE32) 4203 { 4204 int i; 4205 cpu_asr_write(env, regs->uregs[32], 0xffffffff); 4206 for (i = 0; i < 32; i++) { 4207 env->regs[i] = regs->uregs[i]; 4208 } 4209 } 4210 #elif defined(TARGET_SPARC) 4211 { 4212 int i; 4213 env->pc = regs->pc; 4214 env->npc = regs->npc; 4215 env->y = regs->y; 4216 for(i = 0; i < 8; i++) 4217 env->gregs[i] = regs->u_regs[i]; 4218 for(i = 0; i < 8; i++) 4219 env->regwptr[i] = regs->u_regs[i + 8]; 4220 } 4221 #elif defined(TARGET_PPC) 4222 { 4223 int i; 4224 4225 #if defined(TARGET_PPC64) 4226 #if defined(TARGET_ABI32) 4227 env->msr &= ~((target_ulong)1 << MSR_SF); 4228 #else 4229 env->msr |= (target_ulong)1 << MSR_SF; 4230 #endif 4231 #endif 4232 env->nip = regs->nip; 4233 for(i = 0; i < 32; i++) { 4234 env->gpr[i] = regs->gpr[i]; 4235 } 4236 } 4237 #elif defined(TARGET_M68K) 4238 { 4239 env->pc = regs->pc; 4240 env->dregs[0] = regs->d0; 4241 env->dregs[1] = regs->d1; 4242 env->dregs[2] = regs->d2; 4243 env->dregs[3] = regs->d3; 4244 env->dregs[4] = regs->d4; 4245 env->dregs[5] = regs->d5; 4246 env->dregs[6] = regs->d6; 4247 env->dregs[7] = regs->d7; 4248 env->aregs[0] = regs->a0; 4249 env->aregs[1] = regs->a1; 4250 env->aregs[2] = regs->a2; 4251 env->aregs[3] = regs->a3; 4252 env->aregs[4] = regs->a4; 4253 env->aregs[5] = regs->a5; 4254 env->aregs[6] = regs->a6; 4255 env->aregs[7] = regs->usp; 4256 env->sr = regs->sr; 4257 ts->sim_syscalls = 1; 4258 } 4259 #elif defined(TARGET_MICROBLAZE) 4260 { 4261 env->regs[0] = regs->r0; 4262 env->regs[1] = regs->r1; 4263 env->regs[2] = regs->r2; 4264 env->regs[3] = regs->r3; 4265 env->regs[4] = regs->r4; 4266 env->regs[5] = regs->r5; 4267 env->regs[6] = regs->r6; 4268 env->regs[7] = regs->r7; 4269 env->regs[8] = regs->r8; 4270 env->regs[9] = regs->r9; 4271 env->regs[10] = regs->r10; 4272 env->regs[11] = regs->r11; 4273 env->regs[12] = regs->r12; 4274 env->regs[13] = regs->r13; 4275 env->regs[14] = regs->r14; 4276 env->regs[15] = regs->r15; 4277 env->regs[16] = regs->r16; 4278 env->regs[17] = regs->r17; 4279 env->regs[18] = regs->r18; 4280 env->regs[19] = regs->r19; 4281 env->regs[20] = regs->r20; 4282 env->regs[21] = regs->r21; 4283 env->regs[22] = regs->r22; 4284 env->regs[23] = regs->r23; 4285 env->regs[24] = regs->r24; 4286 env->regs[25] = regs->r25; 4287 env->regs[26] = regs->r26; 4288 env->regs[27] = regs->r27; 4289 env->regs[28] = regs->r28; 4290 env->regs[29] = regs->r29; 4291 env->regs[30] = regs->r30; 4292 env->regs[31] = regs->r31; 4293 env->sregs[SR_PC] = regs->pc; 4294 } 4295 #elif defined(TARGET_MIPS) 4296 { 4297 int i; 4298 4299 for(i = 0; i < 32; i++) { 4300 env->active_tc.gpr[i] = regs->regs[i]; 4301 } 4302 env->active_tc.PC = regs->cp0_epc & ~(target_ulong)1; 4303 if (regs->cp0_epc & 1) { 4304 env->hflags |= MIPS_HFLAG_M16; 4305 } 4306 } 4307 #elif defined(TARGET_OPENRISC) 4308 { 4309 int i; 4310 4311 for (i = 0; i < 32; i++) { 4312 env->gpr[i] = regs->gpr[i]; 4313 } 4314 4315 env->sr = regs->sr; 4316 env->pc = regs->pc; 4317 } 4318 #elif defined(TARGET_SH4) 4319 { 4320 int i; 4321 4322 for(i = 0; i < 16; i++) { 4323 env->gregs[i] = regs->regs[i]; 4324 } 4325 env->pc = regs->pc; 4326 } 4327 #elif defined(TARGET_ALPHA) 4328 { 4329 int i; 4330 4331 for(i = 0; i < 28; i++) { 4332 env->ir[i] = ((abi_ulong *)regs)[i]; 4333 } 4334 env->ir[IR_SP] = regs->usp; 4335 env->pc = regs->pc; 4336 } 4337 #elif defined(TARGET_CRIS) 4338 { 4339 env->regs[0] = regs->r0; 4340 env->regs[1] = regs->r1; 4341 env->regs[2] = regs->r2; 4342 env->regs[3] = regs->r3; 4343 env->regs[4] = regs->r4; 4344 env->regs[5] = regs->r5; 4345 env->regs[6] = regs->r6; 4346 env->regs[7] = regs->r7; 4347 env->regs[8] = regs->r8; 4348 env->regs[9] = regs->r9; 4349 env->regs[10] = regs->r10; 4350 env->regs[11] = regs->r11; 4351 env->regs[12] = regs->r12; 4352 env->regs[13] = regs->r13; 4353 env->regs[14] = info->start_stack; 4354 env->regs[15] = regs->acr; 4355 env->pc = regs->erp; 4356 } 4357 #elif defined(TARGET_S390X) 4358 { 4359 int i; 4360 for (i = 0; i < 16; i++) { 4361 env->regs[i] = regs->gprs[i]; 4362 } 4363 env->psw.mask = regs->psw.mask; 4364 env->psw.addr = regs->psw.addr; 4365 } 4366 #else 4367 #error unsupported target CPU 4368 #endif 4369 4370 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 4371 ts->stack_base = info->start_stack; 4372 ts->heap_base = info->brk; 4373 /* This will be filled in on the first SYS_HEAPINFO call. */ 4374 ts->heap_limit = 0; 4375 #endif 4376 4377 if (gdbstub_port) { 4378 if (gdbserver_start(gdbstub_port) < 0) { 4379 fprintf(stderr, "qemu: could not open gdbserver on port %d\n", 4380 gdbstub_port); 4381 exit(1); 4382 } 4383 gdb_handlesig(cpu, 0); 4384 } 4385 cpu_loop(env); 4386 /* never exits */ 4387 return 0; 4388 } 4389