1 /* 2 * qemu user main 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include <stdlib.h> 20 #include <stdio.h> 21 #include <stdarg.h> 22 #include <string.h> 23 #include <errno.h> 24 #include <unistd.h> 25 #include <sys/mman.h> 26 #include <sys/syscall.h> 27 #include <sys/resource.h> 28 29 #include "qemu.h" 30 #include "qemu-common.h" 31 #include "cpu.h" 32 #include "tcg.h" 33 #include "qemu/timer.h" 34 #include "qemu/envlist.h" 35 #include "elf.h" 36 37 char *exec_path; 38 39 int singlestep; 40 const char *filename; 41 const char *argv0; 42 int gdbstub_port; 43 envlist_t *envlist; 44 static const char *cpu_model; 45 unsigned long mmap_min_addr; 46 #if defined(CONFIG_USE_GUEST_BASE) 47 unsigned long guest_base; 48 int have_guest_base; 49 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64) 50 /* 51 * When running 32-on-64 we should make sure we can fit all of the possible 52 * guest address space into a contiguous chunk of virtual host memory. 53 * 54 * This way we will never overlap with our own libraries or binaries or stack 55 * or anything else that QEMU maps. 56 */ 57 # ifdef TARGET_MIPS 58 /* MIPS only supports 31 bits of virtual address space for user space */ 59 unsigned long reserved_va = 0x77000000; 60 # else 61 unsigned long reserved_va = 0xf7000000; 62 # endif 63 #else 64 unsigned long reserved_va; 65 #endif 66 #endif 67 68 static void usage(void); 69 70 static const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX; 71 const char *qemu_uname_release; 72 73 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so 74 we allocate a bigger stack. Need a better solution, for example 75 by remapping the process stack directly at the right place */ 76 unsigned long guest_stack_size = 8 * 1024 * 1024UL; 77 78 void gemu_log(const char *fmt, ...) 79 { 80 va_list ap; 81 82 va_start(ap, fmt); 83 vfprintf(stderr, fmt, ap); 84 va_end(ap); 85 } 86 87 #if defined(TARGET_I386) 88 int cpu_get_pic_interrupt(CPUX86State *env) 89 { 90 return -1; 91 } 92 #endif 93 94 /***********************************************************/ 95 /* Helper routines for implementing atomic operations. */ 96 97 /* To implement exclusive operations we force all cpus to syncronise. 98 We don't require a full sync, only that no cpus are executing guest code. 99 The alternative is to map target atomic ops onto host equivalents, 100 which requires quite a lot of per host/target work. */ 101 static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER; 102 static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER; 103 static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER; 104 static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER; 105 static int pending_cpus; 106 107 /* Make sure everything is in a consistent state for calling fork(). */ 108 void fork_start(void) 109 { 110 pthread_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); 111 pthread_mutex_lock(&exclusive_lock); 112 mmap_fork_start(); 113 } 114 115 void fork_end(int child) 116 { 117 mmap_fork_end(child); 118 if (child) { 119 CPUState *cpu, *next_cpu; 120 /* Child processes created by fork() only have a single thread. 121 Discard information about the parent threads. */ 122 CPU_FOREACH_SAFE(cpu, next_cpu) { 123 if (cpu != thread_cpu) { 124 QTAILQ_REMOVE(&cpus, thread_cpu, node); 125 } 126 } 127 pending_cpus = 0; 128 pthread_mutex_init(&exclusive_lock, NULL); 129 pthread_mutex_init(&cpu_list_mutex, NULL); 130 pthread_cond_init(&exclusive_cond, NULL); 131 pthread_cond_init(&exclusive_resume, NULL); 132 pthread_mutex_init(&tcg_ctx.tb_ctx.tb_lock, NULL); 133 gdbserver_fork((CPUArchState *)thread_cpu->env_ptr); 134 } else { 135 pthread_mutex_unlock(&exclusive_lock); 136 pthread_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); 137 } 138 } 139 140 /* Wait for pending exclusive operations to complete. The exclusive lock 141 must be held. */ 142 static inline void exclusive_idle(void) 143 { 144 while (pending_cpus) { 145 pthread_cond_wait(&exclusive_resume, &exclusive_lock); 146 } 147 } 148 149 /* Start an exclusive operation. 150 Must only be called from outside cpu_arm_exec. */ 151 static inline void start_exclusive(void) 152 { 153 CPUState *other_cpu; 154 155 pthread_mutex_lock(&exclusive_lock); 156 exclusive_idle(); 157 158 pending_cpus = 1; 159 /* Make all other cpus stop executing. */ 160 CPU_FOREACH(other_cpu) { 161 if (other_cpu->running) { 162 pending_cpus++; 163 cpu_exit(other_cpu); 164 } 165 } 166 if (pending_cpus > 1) { 167 pthread_cond_wait(&exclusive_cond, &exclusive_lock); 168 } 169 } 170 171 /* Finish an exclusive operation. */ 172 static inline void end_exclusive(void) 173 { 174 pending_cpus = 0; 175 pthread_cond_broadcast(&exclusive_resume); 176 pthread_mutex_unlock(&exclusive_lock); 177 } 178 179 /* Wait for exclusive ops to finish, and begin cpu execution. */ 180 static inline void cpu_exec_start(CPUState *cpu) 181 { 182 pthread_mutex_lock(&exclusive_lock); 183 exclusive_idle(); 184 cpu->running = true; 185 pthread_mutex_unlock(&exclusive_lock); 186 } 187 188 /* Mark cpu as not executing, and release pending exclusive ops. */ 189 static inline void cpu_exec_end(CPUState *cpu) 190 { 191 pthread_mutex_lock(&exclusive_lock); 192 cpu->running = false; 193 if (pending_cpus > 1) { 194 pending_cpus--; 195 if (pending_cpus == 1) { 196 pthread_cond_signal(&exclusive_cond); 197 } 198 } 199 exclusive_idle(); 200 pthread_mutex_unlock(&exclusive_lock); 201 } 202 203 void cpu_list_lock(void) 204 { 205 pthread_mutex_lock(&cpu_list_mutex); 206 } 207 208 void cpu_list_unlock(void) 209 { 210 pthread_mutex_unlock(&cpu_list_mutex); 211 } 212 213 214 #ifdef TARGET_I386 215 /***********************************************************/ 216 /* CPUX86 core interface */ 217 218 void cpu_smm_update(CPUX86State *env) 219 { 220 } 221 222 uint64_t cpu_get_tsc(CPUX86State *env) 223 { 224 return cpu_get_real_ticks(); 225 } 226 227 static void write_dt(void *ptr, unsigned long addr, unsigned long limit, 228 int flags) 229 { 230 unsigned int e1, e2; 231 uint32_t *p; 232 e1 = (addr << 16) | (limit & 0xffff); 233 e2 = ((addr >> 16) & 0xff) | (addr & 0xff000000) | (limit & 0x000f0000); 234 e2 |= flags; 235 p = ptr; 236 p[0] = tswap32(e1); 237 p[1] = tswap32(e2); 238 } 239 240 static uint64_t *idt_table; 241 #ifdef TARGET_X86_64 242 static void set_gate64(void *ptr, unsigned int type, unsigned int dpl, 243 uint64_t addr, unsigned int sel) 244 { 245 uint32_t *p, e1, e2; 246 e1 = (addr & 0xffff) | (sel << 16); 247 e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8); 248 p = ptr; 249 p[0] = tswap32(e1); 250 p[1] = tswap32(e2); 251 p[2] = tswap32(addr >> 32); 252 p[3] = 0; 253 } 254 /* only dpl matters as we do only user space emulation */ 255 static void set_idt(int n, unsigned int dpl) 256 { 257 set_gate64(idt_table + n * 2, 0, dpl, 0, 0); 258 } 259 #else 260 static void set_gate(void *ptr, unsigned int type, unsigned int dpl, 261 uint32_t addr, unsigned int sel) 262 { 263 uint32_t *p, e1, e2; 264 e1 = (addr & 0xffff) | (sel << 16); 265 e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8); 266 p = ptr; 267 p[0] = tswap32(e1); 268 p[1] = tswap32(e2); 269 } 270 271 /* only dpl matters as we do only user space emulation */ 272 static void set_idt(int n, unsigned int dpl) 273 { 274 set_gate(idt_table + n, 0, dpl, 0, 0); 275 } 276 #endif 277 278 void cpu_loop(CPUX86State *env) 279 { 280 CPUState *cs = CPU(x86_env_get_cpu(env)); 281 int trapnr; 282 abi_ulong pc; 283 target_siginfo_t info; 284 285 for(;;) { 286 trapnr = cpu_x86_exec(env); 287 switch(trapnr) { 288 case 0x80: 289 /* linux syscall from int $0x80 */ 290 env->regs[R_EAX] = do_syscall(env, 291 env->regs[R_EAX], 292 env->regs[R_EBX], 293 env->regs[R_ECX], 294 env->regs[R_EDX], 295 env->regs[R_ESI], 296 env->regs[R_EDI], 297 env->regs[R_EBP], 298 0, 0); 299 break; 300 #ifndef TARGET_ABI32 301 case EXCP_SYSCALL: 302 /* linux syscall from syscall instruction */ 303 env->regs[R_EAX] = do_syscall(env, 304 env->regs[R_EAX], 305 env->regs[R_EDI], 306 env->regs[R_ESI], 307 env->regs[R_EDX], 308 env->regs[10], 309 env->regs[8], 310 env->regs[9], 311 0, 0); 312 break; 313 #endif 314 case EXCP0B_NOSEG: 315 case EXCP0C_STACK: 316 info.si_signo = SIGBUS; 317 info.si_errno = 0; 318 info.si_code = TARGET_SI_KERNEL; 319 info._sifields._sigfault._addr = 0; 320 queue_signal(env, info.si_signo, &info); 321 break; 322 case EXCP0D_GPF: 323 /* XXX: potential problem if ABI32 */ 324 #ifndef TARGET_X86_64 325 if (env->eflags & VM_MASK) { 326 handle_vm86_fault(env); 327 } else 328 #endif 329 { 330 info.si_signo = SIGSEGV; 331 info.si_errno = 0; 332 info.si_code = TARGET_SI_KERNEL; 333 info._sifields._sigfault._addr = 0; 334 queue_signal(env, info.si_signo, &info); 335 } 336 break; 337 case EXCP0E_PAGE: 338 info.si_signo = SIGSEGV; 339 info.si_errno = 0; 340 if (!(env->error_code & 1)) 341 info.si_code = TARGET_SEGV_MAPERR; 342 else 343 info.si_code = TARGET_SEGV_ACCERR; 344 info._sifields._sigfault._addr = env->cr[2]; 345 queue_signal(env, info.si_signo, &info); 346 break; 347 case EXCP00_DIVZ: 348 #ifndef TARGET_X86_64 349 if (env->eflags & VM_MASK) { 350 handle_vm86_trap(env, trapnr); 351 } else 352 #endif 353 { 354 /* division by zero */ 355 info.si_signo = SIGFPE; 356 info.si_errno = 0; 357 info.si_code = TARGET_FPE_INTDIV; 358 info._sifields._sigfault._addr = env->eip; 359 queue_signal(env, info.si_signo, &info); 360 } 361 break; 362 case EXCP01_DB: 363 case EXCP03_INT3: 364 #ifndef TARGET_X86_64 365 if (env->eflags & VM_MASK) { 366 handle_vm86_trap(env, trapnr); 367 } else 368 #endif 369 { 370 info.si_signo = SIGTRAP; 371 info.si_errno = 0; 372 if (trapnr == EXCP01_DB) { 373 info.si_code = TARGET_TRAP_BRKPT; 374 info._sifields._sigfault._addr = env->eip; 375 } else { 376 info.si_code = TARGET_SI_KERNEL; 377 info._sifields._sigfault._addr = 0; 378 } 379 queue_signal(env, info.si_signo, &info); 380 } 381 break; 382 case EXCP04_INTO: 383 case EXCP05_BOUND: 384 #ifndef TARGET_X86_64 385 if (env->eflags & VM_MASK) { 386 handle_vm86_trap(env, trapnr); 387 } else 388 #endif 389 { 390 info.si_signo = SIGSEGV; 391 info.si_errno = 0; 392 info.si_code = TARGET_SI_KERNEL; 393 info._sifields._sigfault._addr = 0; 394 queue_signal(env, info.si_signo, &info); 395 } 396 break; 397 case EXCP06_ILLOP: 398 info.si_signo = SIGILL; 399 info.si_errno = 0; 400 info.si_code = TARGET_ILL_ILLOPN; 401 info._sifields._sigfault._addr = env->eip; 402 queue_signal(env, info.si_signo, &info); 403 break; 404 case EXCP_INTERRUPT: 405 /* just indicate that signals should be handled asap */ 406 break; 407 case EXCP_DEBUG: 408 { 409 int sig; 410 411 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 412 if (sig) 413 { 414 info.si_signo = sig; 415 info.si_errno = 0; 416 info.si_code = TARGET_TRAP_BRKPT; 417 queue_signal(env, info.si_signo, &info); 418 } 419 } 420 break; 421 default: 422 pc = env->segs[R_CS].base + env->eip; 423 fprintf(stderr, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n", 424 (long)pc, trapnr); 425 abort(); 426 } 427 process_pending_signals(env); 428 } 429 } 430 #endif 431 432 #ifdef TARGET_ARM 433 434 #define get_user_code_u32(x, gaddr, doswap) \ 435 ({ abi_long __r = get_user_u32((x), (gaddr)); \ 436 if (!__r && (doswap)) { \ 437 (x) = bswap32(x); \ 438 } \ 439 __r; \ 440 }) 441 442 #define get_user_code_u16(x, gaddr, doswap) \ 443 ({ abi_long __r = get_user_u16((x), (gaddr)); \ 444 if (!__r && (doswap)) { \ 445 (x) = bswap16(x); \ 446 } \ 447 __r; \ 448 }) 449 450 #ifdef TARGET_ABI32 451 /* Commpage handling -- there is no commpage for AArch64 */ 452 453 /* 454 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt 455 * Input: 456 * r0 = pointer to oldval 457 * r1 = pointer to newval 458 * r2 = pointer to target value 459 * 460 * Output: 461 * r0 = 0 if *ptr was changed, non-0 if no exchange happened 462 * C set if *ptr was changed, clear if no exchange happened 463 * 464 * Note segv's in kernel helpers are a bit tricky, we can set the 465 * data address sensibly but the PC address is just the entry point. 466 */ 467 static void arm_kernel_cmpxchg64_helper(CPUARMState *env) 468 { 469 uint64_t oldval, newval, val; 470 uint32_t addr, cpsr; 471 target_siginfo_t info; 472 473 /* Based on the 32 bit code in do_kernel_trap */ 474 475 /* XXX: This only works between threads, not between processes. 476 It's probably possible to implement this with native host 477 operations. However things like ldrex/strex are much harder so 478 there's not much point trying. */ 479 start_exclusive(); 480 cpsr = cpsr_read(env); 481 addr = env->regs[2]; 482 483 if (get_user_u64(oldval, env->regs[0])) { 484 env->exception.vaddress = env->regs[0]; 485 goto segv; 486 }; 487 488 if (get_user_u64(newval, env->regs[1])) { 489 env->exception.vaddress = env->regs[1]; 490 goto segv; 491 }; 492 493 if (get_user_u64(val, addr)) { 494 env->exception.vaddress = addr; 495 goto segv; 496 } 497 498 if (val == oldval) { 499 val = newval; 500 501 if (put_user_u64(val, addr)) { 502 env->exception.vaddress = addr; 503 goto segv; 504 }; 505 506 env->regs[0] = 0; 507 cpsr |= CPSR_C; 508 } else { 509 env->regs[0] = -1; 510 cpsr &= ~CPSR_C; 511 } 512 cpsr_write(env, cpsr, CPSR_C); 513 end_exclusive(); 514 return; 515 516 segv: 517 end_exclusive(); 518 /* We get the PC of the entry address - which is as good as anything, 519 on a real kernel what you get depends on which mode it uses. */ 520 info.si_signo = SIGSEGV; 521 info.si_errno = 0; 522 /* XXX: check env->error_code */ 523 info.si_code = TARGET_SEGV_MAPERR; 524 info._sifields._sigfault._addr = env->exception.vaddress; 525 queue_signal(env, info.si_signo, &info); 526 527 end_exclusive(); 528 } 529 530 /* Handle a jump to the kernel code page. */ 531 static int 532 do_kernel_trap(CPUARMState *env) 533 { 534 uint32_t addr; 535 uint32_t cpsr; 536 uint32_t val; 537 538 switch (env->regs[15]) { 539 case 0xffff0fa0: /* __kernel_memory_barrier */ 540 /* ??? No-op. Will need to do better for SMP. */ 541 break; 542 case 0xffff0fc0: /* __kernel_cmpxchg */ 543 /* XXX: This only works between threads, not between processes. 544 It's probably possible to implement this with native host 545 operations. However things like ldrex/strex are much harder so 546 there's not much point trying. */ 547 start_exclusive(); 548 cpsr = cpsr_read(env); 549 addr = env->regs[2]; 550 /* FIXME: This should SEGV if the access fails. */ 551 if (get_user_u32(val, addr)) 552 val = ~env->regs[0]; 553 if (val == env->regs[0]) { 554 val = env->regs[1]; 555 /* FIXME: Check for segfaults. */ 556 put_user_u32(val, addr); 557 env->regs[0] = 0; 558 cpsr |= CPSR_C; 559 } else { 560 env->regs[0] = -1; 561 cpsr &= ~CPSR_C; 562 } 563 cpsr_write(env, cpsr, CPSR_C); 564 end_exclusive(); 565 break; 566 case 0xffff0fe0: /* __kernel_get_tls */ 567 env->regs[0] = env->cp15.tpidrro_el0; 568 break; 569 case 0xffff0f60: /* __kernel_cmpxchg64 */ 570 arm_kernel_cmpxchg64_helper(env); 571 break; 572 573 default: 574 return 1; 575 } 576 /* Jump back to the caller. */ 577 addr = env->regs[14]; 578 if (addr & 1) { 579 env->thumb = 1; 580 addr &= ~1; 581 } 582 env->regs[15] = addr; 583 584 return 0; 585 } 586 587 /* Store exclusive handling for AArch32 */ 588 static int do_strex(CPUARMState *env) 589 { 590 uint64_t val; 591 int size; 592 int rc = 1; 593 int segv = 0; 594 uint32_t addr; 595 start_exclusive(); 596 if (env->exclusive_addr != env->exclusive_test) { 597 goto fail; 598 } 599 /* We know we're always AArch32 so the address is in uint32_t range 600 * unless it was the -1 exclusive-monitor-lost value (which won't 601 * match exclusive_test above). 602 */ 603 assert(extract64(env->exclusive_addr, 32, 32) == 0); 604 addr = env->exclusive_addr; 605 size = env->exclusive_info & 0xf; 606 switch (size) { 607 case 0: 608 segv = get_user_u8(val, addr); 609 break; 610 case 1: 611 segv = get_user_u16(val, addr); 612 break; 613 case 2: 614 case 3: 615 segv = get_user_u32(val, addr); 616 break; 617 default: 618 abort(); 619 } 620 if (segv) { 621 env->exception.vaddress = addr; 622 goto done; 623 } 624 if (size == 3) { 625 uint32_t valhi; 626 segv = get_user_u32(valhi, addr + 4); 627 if (segv) { 628 env->exception.vaddress = addr + 4; 629 goto done; 630 } 631 val = deposit64(val, 32, 32, valhi); 632 } 633 if (val != env->exclusive_val) { 634 goto fail; 635 } 636 637 val = env->regs[(env->exclusive_info >> 8) & 0xf]; 638 switch (size) { 639 case 0: 640 segv = put_user_u8(val, addr); 641 break; 642 case 1: 643 segv = put_user_u16(val, addr); 644 break; 645 case 2: 646 case 3: 647 segv = put_user_u32(val, addr); 648 break; 649 } 650 if (segv) { 651 env->exception.vaddress = addr; 652 goto done; 653 } 654 if (size == 3) { 655 val = env->regs[(env->exclusive_info >> 12) & 0xf]; 656 segv = put_user_u32(val, addr + 4); 657 if (segv) { 658 env->exception.vaddress = addr + 4; 659 goto done; 660 } 661 } 662 rc = 0; 663 fail: 664 env->regs[15] += 4; 665 env->regs[(env->exclusive_info >> 4) & 0xf] = rc; 666 done: 667 end_exclusive(); 668 return segv; 669 } 670 671 void cpu_loop(CPUARMState *env) 672 { 673 CPUState *cs = CPU(arm_env_get_cpu(env)); 674 int trapnr; 675 unsigned int n, insn; 676 target_siginfo_t info; 677 uint32_t addr; 678 679 for(;;) { 680 cpu_exec_start(cs); 681 trapnr = cpu_arm_exec(env); 682 cpu_exec_end(cs); 683 switch(trapnr) { 684 case EXCP_UDEF: 685 { 686 TaskState *ts = cs->opaque; 687 uint32_t opcode; 688 int rc; 689 690 /* we handle the FPU emulation here, as Linux */ 691 /* we get the opcode */ 692 /* FIXME - what to do if get_user() fails? */ 693 get_user_code_u32(opcode, env->regs[15], env->bswap_code); 694 695 rc = EmulateAll(opcode, &ts->fpa, env); 696 if (rc == 0) { /* illegal instruction */ 697 info.si_signo = SIGILL; 698 info.si_errno = 0; 699 info.si_code = TARGET_ILL_ILLOPN; 700 info._sifields._sigfault._addr = env->regs[15]; 701 queue_signal(env, info.si_signo, &info); 702 } else if (rc < 0) { /* FP exception */ 703 int arm_fpe=0; 704 705 /* translate softfloat flags to FPSR flags */ 706 if (-rc & float_flag_invalid) 707 arm_fpe |= BIT_IOC; 708 if (-rc & float_flag_divbyzero) 709 arm_fpe |= BIT_DZC; 710 if (-rc & float_flag_overflow) 711 arm_fpe |= BIT_OFC; 712 if (-rc & float_flag_underflow) 713 arm_fpe |= BIT_UFC; 714 if (-rc & float_flag_inexact) 715 arm_fpe |= BIT_IXC; 716 717 FPSR fpsr = ts->fpa.fpsr; 718 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe); 719 720 if (fpsr & (arm_fpe << 16)) { /* exception enabled? */ 721 info.si_signo = SIGFPE; 722 info.si_errno = 0; 723 724 /* ordered by priority, least first */ 725 if (arm_fpe & BIT_IXC) info.si_code = TARGET_FPE_FLTRES; 726 if (arm_fpe & BIT_UFC) info.si_code = TARGET_FPE_FLTUND; 727 if (arm_fpe & BIT_OFC) info.si_code = TARGET_FPE_FLTOVF; 728 if (arm_fpe & BIT_DZC) info.si_code = TARGET_FPE_FLTDIV; 729 if (arm_fpe & BIT_IOC) info.si_code = TARGET_FPE_FLTINV; 730 731 info._sifields._sigfault._addr = env->regs[15]; 732 queue_signal(env, info.si_signo, &info); 733 } else { 734 env->regs[15] += 4; 735 } 736 737 /* accumulate unenabled exceptions */ 738 if ((!(fpsr & BIT_IXE)) && (arm_fpe & BIT_IXC)) 739 fpsr |= BIT_IXC; 740 if ((!(fpsr & BIT_UFE)) && (arm_fpe & BIT_UFC)) 741 fpsr |= BIT_UFC; 742 if ((!(fpsr & BIT_OFE)) && (arm_fpe & BIT_OFC)) 743 fpsr |= BIT_OFC; 744 if ((!(fpsr & BIT_DZE)) && (arm_fpe & BIT_DZC)) 745 fpsr |= BIT_DZC; 746 if ((!(fpsr & BIT_IOE)) && (arm_fpe & BIT_IOC)) 747 fpsr |= BIT_IOC; 748 ts->fpa.fpsr=fpsr; 749 } else { /* everything OK */ 750 /* increment PC */ 751 env->regs[15] += 4; 752 } 753 } 754 break; 755 case EXCP_SWI: 756 case EXCP_BKPT: 757 { 758 env->eabi = 1; 759 /* system call */ 760 if (trapnr == EXCP_BKPT) { 761 if (env->thumb) { 762 /* FIXME - what to do if get_user() fails? */ 763 get_user_code_u16(insn, env->regs[15], env->bswap_code); 764 n = insn & 0xff; 765 env->regs[15] += 2; 766 } else { 767 /* FIXME - what to do if get_user() fails? */ 768 get_user_code_u32(insn, env->regs[15], env->bswap_code); 769 n = (insn & 0xf) | ((insn >> 4) & 0xff0); 770 env->regs[15] += 4; 771 } 772 } else { 773 if (env->thumb) { 774 /* FIXME - what to do if get_user() fails? */ 775 get_user_code_u16(insn, env->regs[15] - 2, 776 env->bswap_code); 777 n = insn & 0xff; 778 } else { 779 /* FIXME - what to do if get_user() fails? */ 780 get_user_code_u32(insn, env->regs[15] - 4, 781 env->bswap_code); 782 n = insn & 0xffffff; 783 } 784 } 785 786 if (n == ARM_NR_cacheflush) { 787 /* nop */ 788 } else if (n == ARM_NR_semihosting 789 || n == ARM_NR_thumb_semihosting) { 790 env->regs[0] = do_arm_semihosting (env); 791 } else if (n == 0 || n >= ARM_SYSCALL_BASE || env->thumb) { 792 /* linux syscall */ 793 if (env->thumb || n == 0) { 794 n = env->regs[7]; 795 } else { 796 n -= ARM_SYSCALL_BASE; 797 env->eabi = 0; 798 } 799 if ( n > ARM_NR_BASE) { 800 switch (n) { 801 case ARM_NR_cacheflush: 802 /* nop */ 803 break; 804 case ARM_NR_set_tls: 805 cpu_set_tls(env, env->regs[0]); 806 env->regs[0] = 0; 807 break; 808 case ARM_NR_breakpoint: 809 env->regs[15] -= env->thumb ? 2 : 4; 810 goto excp_debug; 811 default: 812 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n", 813 n); 814 env->regs[0] = -TARGET_ENOSYS; 815 break; 816 } 817 } else { 818 env->regs[0] = do_syscall(env, 819 n, 820 env->regs[0], 821 env->regs[1], 822 env->regs[2], 823 env->regs[3], 824 env->regs[4], 825 env->regs[5], 826 0, 0); 827 } 828 } else { 829 goto error; 830 } 831 } 832 break; 833 case EXCP_INTERRUPT: 834 /* just indicate that signals should be handled asap */ 835 break; 836 case EXCP_STREX: 837 if (!do_strex(env)) { 838 break; 839 } 840 /* fall through for segv */ 841 case EXCP_PREFETCH_ABORT: 842 case EXCP_DATA_ABORT: 843 addr = env->exception.vaddress; 844 { 845 info.si_signo = SIGSEGV; 846 info.si_errno = 0; 847 /* XXX: check env->error_code */ 848 info.si_code = TARGET_SEGV_MAPERR; 849 info._sifields._sigfault._addr = addr; 850 queue_signal(env, info.si_signo, &info); 851 } 852 break; 853 case EXCP_DEBUG: 854 excp_debug: 855 { 856 int sig; 857 858 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 859 if (sig) 860 { 861 info.si_signo = sig; 862 info.si_errno = 0; 863 info.si_code = TARGET_TRAP_BRKPT; 864 queue_signal(env, info.si_signo, &info); 865 } 866 } 867 break; 868 case EXCP_KERNEL_TRAP: 869 if (do_kernel_trap(env)) 870 goto error; 871 break; 872 default: 873 error: 874 fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", 875 trapnr); 876 cpu_dump_state(cs, stderr, fprintf, 0); 877 abort(); 878 } 879 process_pending_signals(env); 880 } 881 } 882 883 #else 884 885 /* 886 * Handle AArch64 store-release exclusive 887 * 888 * rs = gets the status result of store exclusive 889 * rt = is the register that is stored 890 * rt2 = is the second register store (in STP) 891 * 892 */ 893 static int do_strex_a64(CPUARMState *env) 894 { 895 uint64_t val; 896 int size; 897 bool is_pair; 898 int rc = 1; 899 int segv = 0; 900 uint64_t addr; 901 int rs, rt, rt2; 902 903 start_exclusive(); 904 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */ 905 size = extract32(env->exclusive_info, 0, 2); 906 is_pair = extract32(env->exclusive_info, 2, 1); 907 rs = extract32(env->exclusive_info, 4, 5); 908 rt = extract32(env->exclusive_info, 9, 5); 909 rt2 = extract32(env->exclusive_info, 14, 5); 910 911 addr = env->exclusive_addr; 912 913 if (addr != env->exclusive_test) { 914 goto finish; 915 } 916 917 switch (size) { 918 case 0: 919 segv = get_user_u8(val, addr); 920 break; 921 case 1: 922 segv = get_user_u16(val, addr); 923 break; 924 case 2: 925 segv = get_user_u32(val, addr); 926 break; 927 case 3: 928 segv = get_user_u64(val, addr); 929 break; 930 default: 931 abort(); 932 } 933 if (segv) { 934 env->exception.vaddress = addr; 935 goto error; 936 } 937 if (val != env->exclusive_val) { 938 goto finish; 939 } 940 if (is_pair) { 941 if (size == 2) { 942 segv = get_user_u32(val, addr + 4); 943 } else { 944 segv = get_user_u64(val, addr + 8); 945 } 946 if (segv) { 947 env->exception.vaddress = addr + (size == 2 ? 4 : 8); 948 goto error; 949 } 950 if (val != env->exclusive_high) { 951 goto finish; 952 } 953 } 954 /* handle the zero register */ 955 val = rt == 31 ? 0 : env->xregs[rt]; 956 switch (size) { 957 case 0: 958 segv = put_user_u8(val, addr); 959 break; 960 case 1: 961 segv = put_user_u16(val, addr); 962 break; 963 case 2: 964 segv = put_user_u32(val, addr); 965 break; 966 case 3: 967 segv = put_user_u64(val, addr); 968 break; 969 } 970 if (segv) { 971 goto error; 972 } 973 if (is_pair) { 974 /* handle the zero register */ 975 val = rt2 == 31 ? 0 : env->xregs[rt2]; 976 if (size == 2) { 977 segv = put_user_u32(val, addr + 4); 978 } else { 979 segv = put_user_u64(val, addr + 8); 980 } 981 if (segv) { 982 env->exception.vaddress = addr + (size == 2 ? 4 : 8); 983 goto error; 984 } 985 } 986 rc = 0; 987 finish: 988 env->pc += 4; 989 /* rs == 31 encodes a write to the ZR, thus throwing away 990 * the status return. This is rather silly but valid. 991 */ 992 if (rs < 31) { 993 env->xregs[rs] = rc; 994 } 995 error: 996 /* instruction faulted, PC does not advance */ 997 /* either way a strex releases any exclusive lock we have */ 998 env->exclusive_addr = -1; 999 end_exclusive(); 1000 return segv; 1001 } 1002 1003 /* AArch64 main loop */ 1004 void cpu_loop(CPUARMState *env) 1005 { 1006 CPUState *cs = CPU(arm_env_get_cpu(env)); 1007 int trapnr, sig; 1008 target_siginfo_t info; 1009 uint32_t addr; 1010 1011 for (;;) { 1012 cpu_exec_start(cs); 1013 trapnr = cpu_arm_exec(env); 1014 cpu_exec_end(cs); 1015 1016 switch (trapnr) { 1017 case EXCP_SWI: 1018 env->xregs[0] = do_syscall(env, 1019 env->xregs[8], 1020 env->xregs[0], 1021 env->xregs[1], 1022 env->xregs[2], 1023 env->xregs[3], 1024 env->xregs[4], 1025 env->xregs[5], 1026 0, 0); 1027 break; 1028 case EXCP_INTERRUPT: 1029 /* just indicate that signals should be handled asap */ 1030 break; 1031 case EXCP_UDEF: 1032 info.si_signo = SIGILL; 1033 info.si_errno = 0; 1034 info.si_code = TARGET_ILL_ILLOPN; 1035 info._sifields._sigfault._addr = env->pc; 1036 queue_signal(env, info.si_signo, &info); 1037 break; 1038 case EXCP_STREX: 1039 if (!do_strex_a64(env)) { 1040 break; 1041 } 1042 /* fall through for segv */ 1043 case EXCP_PREFETCH_ABORT: 1044 case EXCP_DATA_ABORT: 1045 addr = env->exception.vaddress; 1046 info.si_signo = SIGSEGV; 1047 info.si_errno = 0; 1048 /* XXX: check env->error_code */ 1049 info.si_code = TARGET_SEGV_MAPERR; 1050 info._sifields._sigfault._addr = addr; 1051 queue_signal(env, info.si_signo, &info); 1052 break; 1053 case EXCP_DEBUG: 1054 case EXCP_BKPT: 1055 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 1056 if (sig) { 1057 info.si_signo = sig; 1058 info.si_errno = 0; 1059 info.si_code = TARGET_TRAP_BRKPT; 1060 queue_signal(env, info.si_signo, &info); 1061 } 1062 break; 1063 default: 1064 fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", 1065 trapnr); 1066 cpu_dump_state(cs, stderr, fprintf, 0); 1067 abort(); 1068 } 1069 process_pending_signals(env); 1070 /* Exception return on AArch64 always clears the exclusive monitor, 1071 * so any return to running guest code implies this. 1072 * A strex (successful or otherwise) also clears the monitor, so 1073 * we don't need to specialcase EXCP_STREX. 1074 */ 1075 env->exclusive_addr = -1; 1076 } 1077 } 1078 #endif /* ndef TARGET_ABI32 */ 1079 1080 #endif 1081 1082 #ifdef TARGET_UNICORE32 1083 1084 void cpu_loop(CPUUniCore32State *env) 1085 { 1086 CPUState *cs = CPU(uc32_env_get_cpu(env)); 1087 int trapnr; 1088 unsigned int n, insn; 1089 target_siginfo_t info; 1090 1091 for (;;) { 1092 cpu_exec_start(cs); 1093 trapnr = uc32_cpu_exec(env); 1094 cpu_exec_end(cs); 1095 switch (trapnr) { 1096 case UC32_EXCP_PRIV: 1097 { 1098 /* system call */ 1099 get_user_u32(insn, env->regs[31] - 4); 1100 n = insn & 0xffffff; 1101 1102 if (n >= UC32_SYSCALL_BASE) { 1103 /* linux syscall */ 1104 n -= UC32_SYSCALL_BASE; 1105 if (n == UC32_SYSCALL_NR_set_tls) { 1106 cpu_set_tls(env, env->regs[0]); 1107 env->regs[0] = 0; 1108 } else { 1109 env->regs[0] = do_syscall(env, 1110 n, 1111 env->regs[0], 1112 env->regs[1], 1113 env->regs[2], 1114 env->regs[3], 1115 env->regs[4], 1116 env->regs[5], 1117 0, 0); 1118 } 1119 } else { 1120 goto error; 1121 } 1122 } 1123 break; 1124 case UC32_EXCP_DTRAP: 1125 case UC32_EXCP_ITRAP: 1126 info.si_signo = SIGSEGV; 1127 info.si_errno = 0; 1128 /* XXX: check env->error_code */ 1129 info.si_code = TARGET_SEGV_MAPERR; 1130 info._sifields._sigfault._addr = env->cp0.c4_faultaddr; 1131 queue_signal(env, info.si_signo, &info); 1132 break; 1133 case EXCP_INTERRUPT: 1134 /* just indicate that signals should be handled asap */ 1135 break; 1136 case EXCP_DEBUG: 1137 { 1138 int sig; 1139 1140 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 1141 if (sig) { 1142 info.si_signo = sig; 1143 info.si_errno = 0; 1144 info.si_code = TARGET_TRAP_BRKPT; 1145 queue_signal(env, info.si_signo, &info); 1146 } 1147 } 1148 break; 1149 default: 1150 goto error; 1151 } 1152 process_pending_signals(env); 1153 } 1154 1155 error: 1156 fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr); 1157 cpu_dump_state(cs, stderr, fprintf, 0); 1158 abort(); 1159 } 1160 #endif 1161 1162 #ifdef TARGET_SPARC 1163 #define SPARC64_STACK_BIAS 2047 1164 1165 //#define DEBUG_WIN 1166 1167 /* WARNING: dealing with register windows _is_ complicated. More info 1168 can be found at http://www.sics.se/~psm/sparcstack.html */ 1169 static inline int get_reg_index(CPUSPARCState *env, int cwp, int index) 1170 { 1171 index = (index + cwp * 16) % (16 * env->nwindows); 1172 /* wrap handling : if cwp is on the last window, then we use the 1173 registers 'after' the end */ 1174 if (index < 8 && env->cwp == env->nwindows - 1) 1175 index += 16 * env->nwindows; 1176 return index; 1177 } 1178 1179 /* save the register window 'cwp1' */ 1180 static inline void save_window_offset(CPUSPARCState *env, int cwp1) 1181 { 1182 unsigned int i; 1183 abi_ulong sp_ptr; 1184 1185 sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)]; 1186 #ifdef TARGET_SPARC64 1187 if (sp_ptr & 3) 1188 sp_ptr += SPARC64_STACK_BIAS; 1189 #endif 1190 #if defined(DEBUG_WIN) 1191 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx " save_cwp=%d\n", 1192 sp_ptr, cwp1); 1193 #endif 1194 for(i = 0; i < 16; i++) { 1195 /* FIXME - what to do if put_user() fails? */ 1196 put_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr); 1197 sp_ptr += sizeof(abi_ulong); 1198 } 1199 } 1200 1201 static void save_window(CPUSPARCState *env) 1202 { 1203 #ifndef TARGET_SPARC64 1204 unsigned int new_wim; 1205 new_wim = ((env->wim >> 1) | (env->wim << (env->nwindows - 1))) & 1206 ((1LL << env->nwindows) - 1); 1207 save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2)); 1208 env->wim = new_wim; 1209 #else 1210 save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2)); 1211 env->cansave++; 1212 env->canrestore--; 1213 #endif 1214 } 1215 1216 static void restore_window(CPUSPARCState *env) 1217 { 1218 #ifndef TARGET_SPARC64 1219 unsigned int new_wim; 1220 #endif 1221 unsigned int i, cwp1; 1222 abi_ulong sp_ptr; 1223 1224 #ifndef TARGET_SPARC64 1225 new_wim = ((env->wim << 1) | (env->wim >> (env->nwindows - 1))) & 1226 ((1LL << env->nwindows) - 1); 1227 #endif 1228 1229 /* restore the invalid window */ 1230 cwp1 = cpu_cwp_inc(env, env->cwp + 1); 1231 sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)]; 1232 #ifdef TARGET_SPARC64 1233 if (sp_ptr & 3) 1234 sp_ptr += SPARC64_STACK_BIAS; 1235 #endif 1236 #if defined(DEBUG_WIN) 1237 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx " load_cwp=%d\n", 1238 sp_ptr, cwp1); 1239 #endif 1240 for(i = 0; i < 16; i++) { 1241 /* FIXME - what to do if get_user() fails? */ 1242 get_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr); 1243 sp_ptr += sizeof(abi_ulong); 1244 } 1245 #ifdef TARGET_SPARC64 1246 env->canrestore++; 1247 if (env->cleanwin < env->nwindows - 1) 1248 env->cleanwin++; 1249 env->cansave--; 1250 #else 1251 env->wim = new_wim; 1252 #endif 1253 } 1254 1255 static void flush_windows(CPUSPARCState *env) 1256 { 1257 int offset, cwp1; 1258 1259 offset = 1; 1260 for(;;) { 1261 /* if restore would invoke restore_window(), then we can stop */ 1262 cwp1 = cpu_cwp_inc(env, env->cwp + offset); 1263 #ifndef TARGET_SPARC64 1264 if (env->wim & (1 << cwp1)) 1265 break; 1266 #else 1267 if (env->canrestore == 0) 1268 break; 1269 env->cansave++; 1270 env->canrestore--; 1271 #endif 1272 save_window_offset(env, cwp1); 1273 offset++; 1274 } 1275 cwp1 = cpu_cwp_inc(env, env->cwp + 1); 1276 #ifndef TARGET_SPARC64 1277 /* set wim so that restore will reload the registers */ 1278 env->wim = 1 << cwp1; 1279 #endif 1280 #if defined(DEBUG_WIN) 1281 printf("flush_windows: nb=%d\n", offset - 1); 1282 #endif 1283 } 1284 1285 void cpu_loop (CPUSPARCState *env) 1286 { 1287 CPUState *cs = CPU(sparc_env_get_cpu(env)); 1288 int trapnr; 1289 abi_long ret; 1290 target_siginfo_t info; 1291 1292 while (1) { 1293 trapnr = cpu_sparc_exec (env); 1294 1295 /* Compute PSR before exposing state. */ 1296 if (env->cc_op != CC_OP_FLAGS) { 1297 cpu_get_psr(env); 1298 } 1299 1300 switch (trapnr) { 1301 #ifndef TARGET_SPARC64 1302 case 0x88: 1303 case 0x90: 1304 #else 1305 case 0x110: 1306 case 0x16d: 1307 #endif 1308 ret = do_syscall (env, env->gregs[1], 1309 env->regwptr[0], env->regwptr[1], 1310 env->regwptr[2], env->regwptr[3], 1311 env->regwptr[4], env->regwptr[5], 1312 0, 0); 1313 if ((abi_ulong)ret >= (abi_ulong)(-515)) { 1314 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) 1315 env->xcc |= PSR_CARRY; 1316 #else 1317 env->psr |= PSR_CARRY; 1318 #endif 1319 ret = -ret; 1320 } else { 1321 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) 1322 env->xcc &= ~PSR_CARRY; 1323 #else 1324 env->psr &= ~PSR_CARRY; 1325 #endif 1326 } 1327 env->regwptr[0] = ret; 1328 /* next instruction */ 1329 env->pc = env->npc; 1330 env->npc = env->npc + 4; 1331 break; 1332 case 0x83: /* flush windows */ 1333 #ifdef TARGET_ABI32 1334 case 0x103: 1335 #endif 1336 flush_windows(env); 1337 /* next instruction */ 1338 env->pc = env->npc; 1339 env->npc = env->npc + 4; 1340 break; 1341 #ifndef TARGET_SPARC64 1342 case TT_WIN_OVF: /* window overflow */ 1343 save_window(env); 1344 break; 1345 case TT_WIN_UNF: /* window underflow */ 1346 restore_window(env); 1347 break; 1348 case TT_TFAULT: 1349 case TT_DFAULT: 1350 { 1351 info.si_signo = TARGET_SIGSEGV; 1352 info.si_errno = 0; 1353 /* XXX: check env->error_code */ 1354 info.si_code = TARGET_SEGV_MAPERR; 1355 info._sifields._sigfault._addr = env->mmuregs[4]; 1356 queue_signal(env, info.si_signo, &info); 1357 } 1358 break; 1359 #else 1360 case TT_SPILL: /* window overflow */ 1361 save_window(env); 1362 break; 1363 case TT_FILL: /* window underflow */ 1364 restore_window(env); 1365 break; 1366 case TT_TFAULT: 1367 case TT_DFAULT: 1368 { 1369 info.si_signo = TARGET_SIGSEGV; 1370 info.si_errno = 0; 1371 /* XXX: check env->error_code */ 1372 info.si_code = TARGET_SEGV_MAPERR; 1373 if (trapnr == TT_DFAULT) 1374 info._sifields._sigfault._addr = env->dmmuregs[4]; 1375 else 1376 info._sifields._sigfault._addr = cpu_tsptr(env)->tpc; 1377 queue_signal(env, info.si_signo, &info); 1378 } 1379 break; 1380 #ifndef TARGET_ABI32 1381 case 0x16e: 1382 flush_windows(env); 1383 sparc64_get_context(env); 1384 break; 1385 case 0x16f: 1386 flush_windows(env); 1387 sparc64_set_context(env); 1388 break; 1389 #endif 1390 #endif 1391 case EXCP_INTERRUPT: 1392 /* just indicate that signals should be handled asap */ 1393 break; 1394 case TT_ILL_INSN: 1395 { 1396 info.si_signo = TARGET_SIGILL; 1397 info.si_errno = 0; 1398 info.si_code = TARGET_ILL_ILLOPC; 1399 info._sifields._sigfault._addr = env->pc; 1400 queue_signal(env, info.si_signo, &info); 1401 } 1402 break; 1403 case EXCP_DEBUG: 1404 { 1405 int sig; 1406 1407 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 1408 if (sig) 1409 { 1410 info.si_signo = sig; 1411 info.si_errno = 0; 1412 info.si_code = TARGET_TRAP_BRKPT; 1413 queue_signal(env, info.si_signo, &info); 1414 } 1415 } 1416 break; 1417 default: 1418 printf ("Unhandled trap: 0x%x\n", trapnr); 1419 cpu_dump_state(cs, stderr, fprintf, 0); 1420 exit (1); 1421 } 1422 process_pending_signals (env); 1423 } 1424 } 1425 1426 #endif 1427 1428 #ifdef TARGET_PPC 1429 static inline uint64_t cpu_ppc_get_tb(CPUPPCState *env) 1430 { 1431 /* TO FIX */ 1432 return 0; 1433 } 1434 1435 uint64_t cpu_ppc_load_tbl(CPUPPCState *env) 1436 { 1437 return cpu_ppc_get_tb(env); 1438 } 1439 1440 uint32_t cpu_ppc_load_tbu(CPUPPCState *env) 1441 { 1442 return cpu_ppc_get_tb(env) >> 32; 1443 } 1444 1445 uint64_t cpu_ppc_load_atbl(CPUPPCState *env) 1446 { 1447 return cpu_ppc_get_tb(env); 1448 } 1449 1450 uint32_t cpu_ppc_load_atbu(CPUPPCState *env) 1451 { 1452 return cpu_ppc_get_tb(env) >> 32; 1453 } 1454 1455 uint32_t cpu_ppc601_load_rtcu(CPUPPCState *env) 1456 __attribute__ (( alias ("cpu_ppc_load_tbu") )); 1457 1458 uint32_t cpu_ppc601_load_rtcl(CPUPPCState *env) 1459 { 1460 return cpu_ppc_load_tbl(env) & 0x3FFFFF80; 1461 } 1462 1463 /* XXX: to be fixed */ 1464 int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp) 1465 { 1466 return -1; 1467 } 1468 1469 int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val) 1470 { 1471 return -1; 1472 } 1473 1474 #define EXCP_DUMP(env, fmt, ...) \ 1475 do { \ 1476 CPUState *cs = ENV_GET_CPU(env); \ 1477 fprintf(stderr, fmt , ## __VA_ARGS__); \ 1478 cpu_dump_state(cs, stderr, fprintf, 0); \ 1479 qemu_log(fmt, ## __VA_ARGS__); \ 1480 if (qemu_log_enabled()) { \ 1481 log_cpu_state(cs, 0); \ 1482 } \ 1483 } while (0) 1484 1485 static int do_store_exclusive(CPUPPCState *env) 1486 { 1487 target_ulong addr; 1488 target_ulong page_addr; 1489 target_ulong val, val2 __attribute__((unused)) = 0; 1490 int flags; 1491 int segv = 0; 1492 1493 addr = env->reserve_ea; 1494 page_addr = addr & TARGET_PAGE_MASK; 1495 start_exclusive(); 1496 mmap_lock(); 1497 flags = page_get_flags(page_addr); 1498 if ((flags & PAGE_READ) == 0) { 1499 segv = 1; 1500 } else { 1501 int reg = env->reserve_info & 0x1f; 1502 int size = env->reserve_info >> 5; 1503 int stored = 0; 1504 1505 if (addr == env->reserve_addr) { 1506 switch (size) { 1507 case 1: segv = get_user_u8(val, addr); break; 1508 case 2: segv = get_user_u16(val, addr); break; 1509 case 4: segv = get_user_u32(val, addr); break; 1510 #if defined(TARGET_PPC64) 1511 case 8: segv = get_user_u64(val, addr); break; 1512 case 16: { 1513 segv = get_user_u64(val, addr); 1514 if (!segv) { 1515 segv = get_user_u64(val2, addr + 8); 1516 } 1517 break; 1518 } 1519 #endif 1520 default: abort(); 1521 } 1522 if (!segv && val == env->reserve_val) { 1523 val = env->gpr[reg]; 1524 switch (size) { 1525 case 1: segv = put_user_u8(val, addr); break; 1526 case 2: segv = put_user_u16(val, addr); break; 1527 case 4: segv = put_user_u32(val, addr); break; 1528 #if defined(TARGET_PPC64) 1529 case 8: segv = put_user_u64(val, addr); break; 1530 case 16: { 1531 if (val2 == env->reserve_val2) { 1532 if (msr_le) { 1533 val2 = val; 1534 val = env->gpr[reg+1]; 1535 } else { 1536 val2 = env->gpr[reg+1]; 1537 } 1538 segv = put_user_u64(val, addr); 1539 if (!segv) { 1540 segv = put_user_u64(val2, addr + 8); 1541 } 1542 } 1543 break; 1544 } 1545 #endif 1546 default: abort(); 1547 } 1548 if (!segv) { 1549 stored = 1; 1550 } 1551 } 1552 } 1553 env->crf[0] = (stored << 1) | xer_so; 1554 env->reserve_addr = (target_ulong)-1; 1555 } 1556 if (!segv) { 1557 env->nip += 4; 1558 } 1559 mmap_unlock(); 1560 end_exclusive(); 1561 return segv; 1562 } 1563 1564 void cpu_loop(CPUPPCState *env) 1565 { 1566 CPUState *cs = CPU(ppc_env_get_cpu(env)); 1567 target_siginfo_t info; 1568 int trapnr; 1569 target_ulong ret; 1570 1571 for(;;) { 1572 cpu_exec_start(cs); 1573 trapnr = cpu_ppc_exec(env); 1574 cpu_exec_end(cs); 1575 switch(trapnr) { 1576 case POWERPC_EXCP_NONE: 1577 /* Just go on */ 1578 break; 1579 case POWERPC_EXCP_CRITICAL: /* Critical input */ 1580 cpu_abort(cs, "Critical interrupt while in user mode. " 1581 "Aborting\n"); 1582 break; 1583 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 1584 cpu_abort(cs, "Machine check exception while in user mode. " 1585 "Aborting\n"); 1586 break; 1587 case POWERPC_EXCP_DSI: /* Data storage exception */ 1588 EXCP_DUMP(env, "Invalid data memory access: 0x" TARGET_FMT_lx "\n", 1589 env->spr[SPR_DAR]); 1590 /* XXX: check this. Seems bugged */ 1591 switch (env->error_code & 0xFF000000) { 1592 case 0x40000000: 1593 info.si_signo = TARGET_SIGSEGV; 1594 info.si_errno = 0; 1595 info.si_code = TARGET_SEGV_MAPERR; 1596 break; 1597 case 0x04000000: 1598 info.si_signo = TARGET_SIGILL; 1599 info.si_errno = 0; 1600 info.si_code = TARGET_ILL_ILLADR; 1601 break; 1602 case 0x08000000: 1603 info.si_signo = TARGET_SIGSEGV; 1604 info.si_errno = 0; 1605 info.si_code = TARGET_SEGV_ACCERR; 1606 break; 1607 default: 1608 /* Let's send a regular segfault... */ 1609 EXCP_DUMP(env, "Invalid segfault errno (%02x)\n", 1610 env->error_code); 1611 info.si_signo = TARGET_SIGSEGV; 1612 info.si_errno = 0; 1613 info.si_code = TARGET_SEGV_MAPERR; 1614 break; 1615 } 1616 info._sifields._sigfault._addr = env->nip; 1617 queue_signal(env, info.si_signo, &info); 1618 break; 1619 case POWERPC_EXCP_ISI: /* Instruction storage exception */ 1620 EXCP_DUMP(env, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx 1621 "\n", env->spr[SPR_SRR0]); 1622 /* XXX: check this */ 1623 switch (env->error_code & 0xFF000000) { 1624 case 0x40000000: 1625 info.si_signo = TARGET_SIGSEGV; 1626 info.si_errno = 0; 1627 info.si_code = TARGET_SEGV_MAPERR; 1628 break; 1629 case 0x10000000: 1630 case 0x08000000: 1631 info.si_signo = TARGET_SIGSEGV; 1632 info.si_errno = 0; 1633 info.si_code = TARGET_SEGV_ACCERR; 1634 break; 1635 default: 1636 /* Let's send a regular segfault... */ 1637 EXCP_DUMP(env, "Invalid segfault errno (%02x)\n", 1638 env->error_code); 1639 info.si_signo = TARGET_SIGSEGV; 1640 info.si_errno = 0; 1641 info.si_code = TARGET_SEGV_MAPERR; 1642 break; 1643 } 1644 info._sifields._sigfault._addr = env->nip - 4; 1645 queue_signal(env, info.si_signo, &info); 1646 break; 1647 case POWERPC_EXCP_EXTERNAL: /* External input */ 1648 cpu_abort(cs, "External interrupt while in user mode. " 1649 "Aborting\n"); 1650 break; 1651 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 1652 EXCP_DUMP(env, "Unaligned memory access\n"); 1653 /* XXX: check this */ 1654 info.si_signo = TARGET_SIGBUS; 1655 info.si_errno = 0; 1656 info.si_code = TARGET_BUS_ADRALN; 1657 info._sifields._sigfault._addr = env->nip - 4; 1658 queue_signal(env, info.si_signo, &info); 1659 break; 1660 case POWERPC_EXCP_PROGRAM: /* Program exception */ 1661 /* XXX: check this */ 1662 switch (env->error_code & ~0xF) { 1663 case POWERPC_EXCP_FP: 1664 EXCP_DUMP(env, "Floating point program exception\n"); 1665 info.si_signo = TARGET_SIGFPE; 1666 info.si_errno = 0; 1667 switch (env->error_code & 0xF) { 1668 case POWERPC_EXCP_FP_OX: 1669 info.si_code = TARGET_FPE_FLTOVF; 1670 break; 1671 case POWERPC_EXCP_FP_UX: 1672 info.si_code = TARGET_FPE_FLTUND; 1673 break; 1674 case POWERPC_EXCP_FP_ZX: 1675 case POWERPC_EXCP_FP_VXZDZ: 1676 info.si_code = TARGET_FPE_FLTDIV; 1677 break; 1678 case POWERPC_EXCP_FP_XX: 1679 info.si_code = TARGET_FPE_FLTRES; 1680 break; 1681 case POWERPC_EXCP_FP_VXSOFT: 1682 info.si_code = TARGET_FPE_FLTINV; 1683 break; 1684 case POWERPC_EXCP_FP_VXSNAN: 1685 case POWERPC_EXCP_FP_VXISI: 1686 case POWERPC_EXCP_FP_VXIDI: 1687 case POWERPC_EXCP_FP_VXIMZ: 1688 case POWERPC_EXCP_FP_VXVC: 1689 case POWERPC_EXCP_FP_VXSQRT: 1690 case POWERPC_EXCP_FP_VXCVI: 1691 info.si_code = TARGET_FPE_FLTSUB; 1692 break; 1693 default: 1694 EXCP_DUMP(env, "Unknown floating point exception (%02x)\n", 1695 env->error_code); 1696 break; 1697 } 1698 break; 1699 case POWERPC_EXCP_INVAL: 1700 EXCP_DUMP(env, "Invalid instruction\n"); 1701 info.si_signo = TARGET_SIGILL; 1702 info.si_errno = 0; 1703 switch (env->error_code & 0xF) { 1704 case POWERPC_EXCP_INVAL_INVAL: 1705 info.si_code = TARGET_ILL_ILLOPC; 1706 break; 1707 case POWERPC_EXCP_INVAL_LSWX: 1708 info.si_code = TARGET_ILL_ILLOPN; 1709 break; 1710 case POWERPC_EXCP_INVAL_SPR: 1711 info.si_code = TARGET_ILL_PRVREG; 1712 break; 1713 case POWERPC_EXCP_INVAL_FP: 1714 info.si_code = TARGET_ILL_COPROC; 1715 break; 1716 default: 1717 EXCP_DUMP(env, "Unknown invalid operation (%02x)\n", 1718 env->error_code & 0xF); 1719 info.si_code = TARGET_ILL_ILLADR; 1720 break; 1721 } 1722 break; 1723 case POWERPC_EXCP_PRIV: 1724 EXCP_DUMP(env, "Privilege violation\n"); 1725 info.si_signo = TARGET_SIGILL; 1726 info.si_errno = 0; 1727 switch (env->error_code & 0xF) { 1728 case POWERPC_EXCP_PRIV_OPC: 1729 info.si_code = TARGET_ILL_PRVOPC; 1730 break; 1731 case POWERPC_EXCP_PRIV_REG: 1732 info.si_code = TARGET_ILL_PRVREG; 1733 break; 1734 default: 1735 EXCP_DUMP(env, "Unknown privilege violation (%02x)\n", 1736 env->error_code & 0xF); 1737 info.si_code = TARGET_ILL_PRVOPC; 1738 break; 1739 } 1740 break; 1741 case POWERPC_EXCP_TRAP: 1742 cpu_abort(cs, "Tried to call a TRAP\n"); 1743 break; 1744 default: 1745 /* Should not happen ! */ 1746 cpu_abort(cs, "Unknown program exception (%02x)\n", 1747 env->error_code); 1748 break; 1749 } 1750 info._sifields._sigfault._addr = env->nip - 4; 1751 queue_signal(env, info.si_signo, &info); 1752 break; 1753 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ 1754 EXCP_DUMP(env, "No floating point allowed\n"); 1755 info.si_signo = TARGET_SIGILL; 1756 info.si_errno = 0; 1757 info.si_code = TARGET_ILL_COPROC; 1758 info._sifields._sigfault._addr = env->nip - 4; 1759 queue_signal(env, info.si_signo, &info); 1760 break; 1761 case POWERPC_EXCP_SYSCALL: /* System call exception */ 1762 cpu_abort(cs, "Syscall exception while in user mode. " 1763 "Aborting\n"); 1764 break; 1765 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ 1766 EXCP_DUMP(env, "No APU instruction allowed\n"); 1767 info.si_signo = TARGET_SIGILL; 1768 info.si_errno = 0; 1769 info.si_code = TARGET_ILL_COPROC; 1770 info._sifields._sigfault._addr = env->nip - 4; 1771 queue_signal(env, info.si_signo, &info); 1772 break; 1773 case POWERPC_EXCP_DECR: /* Decrementer exception */ 1774 cpu_abort(cs, "Decrementer interrupt while in user mode. " 1775 "Aborting\n"); 1776 break; 1777 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ 1778 cpu_abort(cs, "Fix interval timer interrupt while in user mode. " 1779 "Aborting\n"); 1780 break; 1781 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ 1782 cpu_abort(cs, "Watchdog timer interrupt while in user mode. " 1783 "Aborting\n"); 1784 break; 1785 case POWERPC_EXCP_DTLB: /* Data TLB error */ 1786 cpu_abort(cs, "Data TLB exception while in user mode. " 1787 "Aborting\n"); 1788 break; 1789 case POWERPC_EXCP_ITLB: /* Instruction TLB error */ 1790 cpu_abort(cs, "Instruction TLB exception while in user mode. " 1791 "Aborting\n"); 1792 break; 1793 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavail. */ 1794 EXCP_DUMP(env, "No SPE/floating-point instruction allowed\n"); 1795 info.si_signo = TARGET_SIGILL; 1796 info.si_errno = 0; 1797 info.si_code = TARGET_ILL_COPROC; 1798 info._sifields._sigfault._addr = env->nip - 4; 1799 queue_signal(env, info.si_signo, &info); 1800 break; 1801 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data IRQ */ 1802 cpu_abort(cs, "Embedded floating-point data IRQ not handled\n"); 1803 break; 1804 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round IRQ */ 1805 cpu_abort(cs, "Embedded floating-point round IRQ not handled\n"); 1806 break; 1807 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor IRQ */ 1808 cpu_abort(cs, "Performance monitor exception not handled\n"); 1809 break; 1810 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ 1811 cpu_abort(cs, "Doorbell interrupt while in user mode. " 1812 "Aborting\n"); 1813 break; 1814 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ 1815 cpu_abort(cs, "Doorbell critical interrupt while in user mode. " 1816 "Aborting\n"); 1817 break; 1818 case POWERPC_EXCP_RESET: /* System reset exception */ 1819 cpu_abort(cs, "Reset interrupt while in user mode. " 1820 "Aborting\n"); 1821 break; 1822 case POWERPC_EXCP_DSEG: /* Data segment exception */ 1823 cpu_abort(cs, "Data segment exception while in user mode. " 1824 "Aborting\n"); 1825 break; 1826 case POWERPC_EXCP_ISEG: /* Instruction segment exception */ 1827 cpu_abort(cs, "Instruction segment exception " 1828 "while in user mode. Aborting\n"); 1829 break; 1830 /* PowerPC 64 with hypervisor mode support */ 1831 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ 1832 cpu_abort(cs, "Hypervisor decrementer interrupt " 1833 "while in user mode. Aborting\n"); 1834 break; 1835 case POWERPC_EXCP_TRACE: /* Trace exception */ 1836 /* Nothing to do: 1837 * we use this exception to emulate step-by-step execution mode. 1838 */ 1839 break; 1840 /* PowerPC 64 with hypervisor mode support */ 1841 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ 1842 cpu_abort(cs, "Hypervisor data storage exception " 1843 "while in user mode. Aborting\n"); 1844 break; 1845 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage excp */ 1846 cpu_abort(cs, "Hypervisor instruction storage exception " 1847 "while in user mode. Aborting\n"); 1848 break; 1849 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ 1850 cpu_abort(cs, "Hypervisor data segment exception " 1851 "while in user mode. Aborting\n"); 1852 break; 1853 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment excp */ 1854 cpu_abort(cs, "Hypervisor instruction segment exception " 1855 "while in user mode. Aborting\n"); 1856 break; 1857 case POWERPC_EXCP_VPU: /* Vector unavailable exception */ 1858 EXCP_DUMP(env, "No Altivec instructions allowed\n"); 1859 info.si_signo = TARGET_SIGILL; 1860 info.si_errno = 0; 1861 info.si_code = TARGET_ILL_COPROC; 1862 info._sifields._sigfault._addr = env->nip - 4; 1863 queue_signal(env, info.si_signo, &info); 1864 break; 1865 case POWERPC_EXCP_PIT: /* Programmable interval timer IRQ */ 1866 cpu_abort(cs, "Programmable interval timer interrupt " 1867 "while in user mode. Aborting\n"); 1868 break; 1869 case POWERPC_EXCP_IO: /* IO error exception */ 1870 cpu_abort(cs, "IO error exception while in user mode. " 1871 "Aborting\n"); 1872 break; 1873 case POWERPC_EXCP_RUNM: /* Run mode exception */ 1874 cpu_abort(cs, "Run mode exception while in user mode. " 1875 "Aborting\n"); 1876 break; 1877 case POWERPC_EXCP_EMUL: /* Emulation trap exception */ 1878 cpu_abort(cs, "Emulation trap exception not handled\n"); 1879 break; 1880 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ 1881 cpu_abort(cs, "Instruction fetch TLB exception " 1882 "while in user-mode. Aborting"); 1883 break; 1884 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ 1885 cpu_abort(cs, "Data load TLB exception while in user-mode. " 1886 "Aborting"); 1887 break; 1888 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ 1889 cpu_abort(cs, "Data store TLB exception while in user-mode. " 1890 "Aborting"); 1891 break; 1892 case POWERPC_EXCP_FPA: /* Floating-point assist exception */ 1893 cpu_abort(cs, "Floating-point assist exception not handled\n"); 1894 break; 1895 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ 1896 cpu_abort(cs, "Instruction address breakpoint exception " 1897 "not handled\n"); 1898 break; 1899 case POWERPC_EXCP_SMI: /* System management interrupt */ 1900 cpu_abort(cs, "System management interrupt while in user mode. " 1901 "Aborting\n"); 1902 break; 1903 case POWERPC_EXCP_THERM: /* Thermal interrupt */ 1904 cpu_abort(cs, "Thermal interrupt interrupt while in user mode. " 1905 "Aborting\n"); 1906 break; 1907 case POWERPC_EXCP_PERFM: /* Embedded performance monitor IRQ */ 1908 cpu_abort(cs, "Performance monitor exception not handled\n"); 1909 break; 1910 case POWERPC_EXCP_VPUA: /* Vector assist exception */ 1911 cpu_abort(cs, "Vector assist exception not handled\n"); 1912 break; 1913 case POWERPC_EXCP_SOFTP: /* Soft patch exception */ 1914 cpu_abort(cs, "Soft patch exception not handled\n"); 1915 break; 1916 case POWERPC_EXCP_MAINT: /* Maintenance exception */ 1917 cpu_abort(cs, "Maintenance exception while in user mode. " 1918 "Aborting\n"); 1919 break; 1920 case POWERPC_EXCP_STOP: /* stop translation */ 1921 /* We did invalidate the instruction cache. Go on */ 1922 break; 1923 case POWERPC_EXCP_BRANCH: /* branch instruction: */ 1924 /* We just stopped because of a branch. Go on */ 1925 break; 1926 case POWERPC_EXCP_SYSCALL_USER: 1927 /* system call in user-mode emulation */ 1928 /* WARNING: 1929 * PPC ABI uses overflow flag in cr0 to signal an error 1930 * in syscalls. 1931 */ 1932 env->crf[0] &= ~0x1; 1933 ret = do_syscall(env, env->gpr[0], env->gpr[3], env->gpr[4], 1934 env->gpr[5], env->gpr[6], env->gpr[7], 1935 env->gpr[8], 0, 0); 1936 if (ret == (target_ulong)(-TARGET_QEMU_ESIGRETURN)) { 1937 /* Returning from a successful sigreturn syscall. 1938 Avoid corrupting register state. */ 1939 break; 1940 } 1941 if (ret > (target_ulong)(-515)) { 1942 env->crf[0] |= 0x1; 1943 ret = -ret; 1944 } 1945 env->gpr[3] = ret; 1946 break; 1947 case POWERPC_EXCP_STCX: 1948 if (do_store_exclusive(env)) { 1949 info.si_signo = TARGET_SIGSEGV; 1950 info.si_errno = 0; 1951 info.si_code = TARGET_SEGV_MAPERR; 1952 info._sifields._sigfault._addr = env->nip; 1953 queue_signal(env, info.si_signo, &info); 1954 } 1955 break; 1956 case EXCP_DEBUG: 1957 { 1958 int sig; 1959 1960 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 1961 if (sig) { 1962 info.si_signo = sig; 1963 info.si_errno = 0; 1964 info.si_code = TARGET_TRAP_BRKPT; 1965 queue_signal(env, info.si_signo, &info); 1966 } 1967 } 1968 break; 1969 case EXCP_INTERRUPT: 1970 /* just indicate that signals should be handled asap */ 1971 break; 1972 default: 1973 cpu_abort(cs, "Unknown exception 0x%d. Aborting\n", trapnr); 1974 break; 1975 } 1976 process_pending_signals(env); 1977 } 1978 } 1979 #endif 1980 1981 #ifdef TARGET_MIPS 1982 1983 # ifdef TARGET_ABI_MIPSO32 1984 # define MIPS_SYS(name, args) args, 1985 static const uint8_t mips_syscall_args[] = { 1986 MIPS_SYS(sys_syscall , 8) /* 4000 */ 1987 MIPS_SYS(sys_exit , 1) 1988 MIPS_SYS(sys_fork , 0) 1989 MIPS_SYS(sys_read , 3) 1990 MIPS_SYS(sys_write , 3) 1991 MIPS_SYS(sys_open , 3) /* 4005 */ 1992 MIPS_SYS(sys_close , 1) 1993 MIPS_SYS(sys_waitpid , 3) 1994 MIPS_SYS(sys_creat , 2) 1995 MIPS_SYS(sys_link , 2) 1996 MIPS_SYS(sys_unlink , 1) /* 4010 */ 1997 MIPS_SYS(sys_execve , 0) 1998 MIPS_SYS(sys_chdir , 1) 1999 MIPS_SYS(sys_time , 1) 2000 MIPS_SYS(sys_mknod , 3) 2001 MIPS_SYS(sys_chmod , 2) /* 4015 */ 2002 MIPS_SYS(sys_lchown , 3) 2003 MIPS_SYS(sys_ni_syscall , 0) 2004 MIPS_SYS(sys_ni_syscall , 0) /* was sys_stat */ 2005 MIPS_SYS(sys_lseek , 3) 2006 MIPS_SYS(sys_getpid , 0) /* 4020 */ 2007 MIPS_SYS(sys_mount , 5) 2008 MIPS_SYS(sys_umount , 1) 2009 MIPS_SYS(sys_setuid , 1) 2010 MIPS_SYS(sys_getuid , 0) 2011 MIPS_SYS(sys_stime , 1) /* 4025 */ 2012 MIPS_SYS(sys_ptrace , 4) 2013 MIPS_SYS(sys_alarm , 1) 2014 MIPS_SYS(sys_ni_syscall , 0) /* was sys_fstat */ 2015 MIPS_SYS(sys_pause , 0) 2016 MIPS_SYS(sys_utime , 2) /* 4030 */ 2017 MIPS_SYS(sys_ni_syscall , 0) 2018 MIPS_SYS(sys_ni_syscall , 0) 2019 MIPS_SYS(sys_access , 2) 2020 MIPS_SYS(sys_nice , 1) 2021 MIPS_SYS(sys_ni_syscall , 0) /* 4035 */ 2022 MIPS_SYS(sys_sync , 0) 2023 MIPS_SYS(sys_kill , 2) 2024 MIPS_SYS(sys_rename , 2) 2025 MIPS_SYS(sys_mkdir , 2) 2026 MIPS_SYS(sys_rmdir , 1) /* 4040 */ 2027 MIPS_SYS(sys_dup , 1) 2028 MIPS_SYS(sys_pipe , 0) 2029 MIPS_SYS(sys_times , 1) 2030 MIPS_SYS(sys_ni_syscall , 0) 2031 MIPS_SYS(sys_brk , 1) /* 4045 */ 2032 MIPS_SYS(sys_setgid , 1) 2033 MIPS_SYS(sys_getgid , 0) 2034 MIPS_SYS(sys_ni_syscall , 0) /* was signal(2) */ 2035 MIPS_SYS(sys_geteuid , 0) 2036 MIPS_SYS(sys_getegid , 0) /* 4050 */ 2037 MIPS_SYS(sys_acct , 0) 2038 MIPS_SYS(sys_umount2 , 2) 2039 MIPS_SYS(sys_ni_syscall , 0) 2040 MIPS_SYS(sys_ioctl , 3) 2041 MIPS_SYS(sys_fcntl , 3) /* 4055 */ 2042 MIPS_SYS(sys_ni_syscall , 2) 2043 MIPS_SYS(sys_setpgid , 2) 2044 MIPS_SYS(sys_ni_syscall , 0) 2045 MIPS_SYS(sys_olduname , 1) 2046 MIPS_SYS(sys_umask , 1) /* 4060 */ 2047 MIPS_SYS(sys_chroot , 1) 2048 MIPS_SYS(sys_ustat , 2) 2049 MIPS_SYS(sys_dup2 , 2) 2050 MIPS_SYS(sys_getppid , 0) 2051 MIPS_SYS(sys_getpgrp , 0) /* 4065 */ 2052 MIPS_SYS(sys_setsid , 0) 2053 MIPS_SYS(sys_sigaction , 3) 2054 MIPS_SYS(sys_sgetmask , 0) 2055 MIPS_SYS(sys_ssetmask , 1) 2056 MIPS_SYS(sys_setreuid , 2) /* 4070 */ 2057 MIPS_SYS(sys_setregid , 2) 2058 MIPS_SYS(sys_sigsuspend , 0) 2059 MIPS_SYS(sys_sigpending , 1) 2060 MIPS_SYS(sys_sethostname , 2) 2061 MIPS_SYS(sys_setrlimit , 2) /* 4075 */ 2062 MIPS_SYS(sys_getrlimit , 2) 2063 MIPS_SYS(sys_getrusage , 2) 2064 MIPS_SYS(sys_gettimeofday, 2) 2065 MIPS_SYS(sys_settimeofday, 2) 2066 MIPS_SYS(sys_getgroups , 2) /* 4080 */ 2067 MIPS_SYS(sys_setgroups , 2) 2068 MIPS_SYS(sys_ni_syscall , 0) /* old_select */ 2069 MIPS_SYS(sys_symlink , 2) 2070 MIPS_SYS(sys_ni_syscall , 0) /* was sys_lstat */ 2071 MIPS_SYS(sys_readlink , 3) /* 4085 */ 2072 MIPS_SYS(sys_uselib , 1) 2073 MIPS_SYS(sys_swapon , 2) 2074 MIPS_SYS(sys_reboot , 3) 2075 MIPS_SYS(old_readdir , 3) 2076 MIPS_SYS(old_mmap , 6) /* 4090 */ 2077 MIPS_SYS(sys_munmap , 2) 2078 MIPS_SYS(sys_truncate , 2) 2079 MIPS_SYS(sys_ftruncate , 2) 2080 MIPS_SYS(sys_fchmod , 2) 2081 MIPS_SYS(sys_fchown , 3) /* 4095 */ 2082 MIPS_SYS(sys_getpriority , 2) 2083 MIPS_SYS(sys_setpriority , 3) 2084 MIPS_SYS(sys_ni_syscall , 0) 2085 MIPS_SYS(sys_statfs , 2) 2086 MIPS_SYS(sys_fstatfs , 2) /* 4100 */ 2087 MIPS_SYS(sys_ni_syscall , 0) /* was ioperm(2) */ 2088 MIPS_SYS(sys_socketcall , 2) 2089 MIPS_SYS(sys_syslog , 3) 2090 MIPS_SYS(sys_setitimer , 3) 2091 MIPS_SYS(sys_getitimer , 2) /* 4105 */ 2092 MIPS_SYS(sys_newstat , 2) 2093 MIPS_SYS(sys_newlstat , 2) 2094 MIPS_SYS(sys_newfstat , 2) 2095 MIPS_SYS(sys_uname , 1) 2096 MIPS_SYS(sys_ni_syscall , 0) /* 4110 was iopl(2) */ 2097 MIPS_SYS(sys_vhangup , 0) 2098 MIPS_SYS(sys_ni_syscall , 0) /* was sys_idle() */ 2099 MIPS_SYS(sys_ni_syscall , 0) /* was sys_vm86 */ 2100 MIPS_SYS(sys_wait4 , 4) 2101 MIPS_SYS(sys_swapoff , 1) /* 4115 */ 2102 MIPS_SYS(sys_sysinfo , 1) 2103 MIPS_SYS(sys_ipc , 6) 2104 MIPS_SYS(sys_fsync , 1) 2105 MIPS_SYS(sys_sigreturn , 0) 2106 MIPS_SYS(sys_clone , 6) /* 4120 */ 2107 MIPS_SYS(sys_setdomainname, 2) 2108 MIPS_SYS(sys_newuname , 1) 2109 MIPS_SYS(sys_ni_syscall , 0) /* sys_modify_ldt */ 2110 MIPS_SYS(sys_adjtimex , 1) 2111 MIPS_SYS(sys_mprotect , 3) /* 4125 */ 2112 MIPS_SYS(sys_sigprocmask , 3) 2113 MIPS_SYS(sys_ni_syscall , 0) /* was create_module */ 2114 MIPS_SYS(sys_init_module , 5) 2115 MIPS_SYS(sys_delete_module, 1) 2116 MIPS_SYS(sys_ni_syscall , 0) /* 4130 was get_kernel_syms */ 2117 MIPS_SYS(sys_quotactl , 0) 2118 MIPS_SYS(sys_getpgid , 1) 2119 MIPS_SYS(sys_fchdir , 1) 2120 MIPS_SYS(sys_bdflush , 2) 2121 MIPS_SYS(sys_sysfs , 3) /* 4135 */ 2122 MIPS_SYS(sys_personality , 1) 2123 MIPS_SYS(sys_ni_syscall , 0) /* for afs_syscall */ 2124 MIPS_SYS(sys_setfsuid , 1) 2125 MIPS_SYS(sys_setfsgid , 1) 2126 MIPS_SYS(sys_llseek , 5) /* 4140 */ 2127 MIPS_SYS(sys_getdents , 3) 2128 MIPS_SYS(sys_select , 5) 2129 MIPS_SYS(sys_flock , 2) 2130 MIPS_SYS(sys_msync , 3) 2131 MIPS_SYS(sys_readv , 3) /* 4145 */ 2132 MIPS_SYS(sys_writev , 3) 2133 MIPS_SYS(sys_cacheflush , 3) 2134 MIPS_SYS(sys_cachectl , 3) 2135 MIPS_SYS(sys_sysmips , 4) 2136 MIPS_SYS(sys_ni_syscall , 0) /* 4150 */ 2137 MIPS_SYS(sys_getsid , 1) 2138 MIPS_SYS(sys_fdatasync , 0) 2139 MIPS_SYS(sys_sysctl , 1) 2140 MIPS_SYS(sys_mlock , 2) 2141 MIPS_SYS(sys_munlock , 2) /* 4155 */ 2142 MIPS_SYS(sys_mlockall , 1) 2143 MIPS_SYS(sys_munlockall , 0) 2144 MIPS_SYS(sys_sched_setparam, 2) 2145 MIPS_SYS(sys_sched_getparam, 2) 2146 MIPS_SYS(sys_sched_setscheduler, 3) /* 4160 */ 2147 MIPS_SYS(sys_sched_getscheduler, 1) 2148 MIPS_SYS(sys_sched_yield , 0) 2149 MIPS_SYS(sys_sched_get_priority_max, 1) 2150 MIPS_SYS(sys_sched_get_priority_min, 1) 2151 MIPS_SYS(sys_sched_rr_get_interval, 2) /* 4165 */ 2152 MIPS_SYS(sys_nanosleep, 2) 2153 MIPS_SYS(sys_mremap , 5) 2154 MIPS_SYS(sys_accept , 3) 2155 MIPS_SYS(sys_bind , 3) 2156 MIPS_SYS(sys_connect , 3) /* 4170 */ 2157 MIPS_SYS(sys_getpeername , 3) 2158 MIPS_SYS(sys_getsockname , 3) 2159 MIPS_SYS(sys_getsockopt , 5) 2160 MIPS_SYS(sys_listen , 2) 2161 MIPS_SYS(sys_recv , 4) /* 4175 */ 2162 MIPS_SYS(sys_recvfrom , 6) 2163 MIPS_SYS(sys_recvmsg , 3) 2164 MIPS_SYS(sys_send , 4) 2165 MIPS_SYS(sys_sendmsg , 3) 2166 MIPS_SYS(sys_sendto , 6) /* 4180 */ 2167 MIPS_SYS(sys_setsockopt , 5) 2168 MIPS_SYS(sys_shutdown , 2) 2169 MIPS_SYS(sys_socket , 3) 2170 MIPS_SYS(sys_socketpair , 4) 2171 MIPS_SYS(sys_setresuid , 3) /* 4185 */ 2172 MIPS_SYS(sys_getresuid , 3) 2173 MIPS_SYS(sys_ni_syscall , 0) /* was sys_query_module */ 2174 MIPS_SYS(sys_poll , 3) 2175 MIPS_SYS(sys_nfsservctl , 3) 2176 MIPS_SYS(sys_setresgid , 3) /* 4190 */ 2177 MIPS_SYS(sys_getresgid , 3) 2178 MIPS_SYS(sys_prctl , 5) 2179 MIPS_SYS(sys_rt_sigreturn, 0) 2180 MIPS_SYS(sys_rt_sigaction, 4) 2181 MIPS_SYS(sys_rt_sigprocmask, 4) /* 4195 */ 2182 MIPS_SYS(sys_rt_sigpending, 2) 2183 MIPS_SYS(sys_rt_sigtimedwait, 4) 2184 MIPS_SYS(sys_rt_sigqueueinfo, 3) 2185 MIPS_SYS(sys_rt_sigsuspend, 0) 2186 MIPS_SYS(sys_pread64 , 6) /* 4200 */ 2187 MIPS_SYS(sys_pwrite64 , 6) 2188 MIPS_SYS(sys_chown , 3) 2189 MIPS_SYS(sys_getcwd , 2) 2190 MIPS_SYS(sys_capget , 2) 2191 MIPS_SYS(sys_capset , 2) /* 4205 */ 2192 MIPS_SYS(sys_sigaltstack , 2) 2193 MIPS_SYS(sys_sendfile , 4) 2194 MIPS_SYS(sys_ni_syscall , 0) 2195 MIPS_SYS(sys_ni_syscall , 0) 2196 MIPS_SYS(sys_mmap2 , 6) /* 4210 */ 2197 MIPS_SYS(sys_truncate64 , 4) 2198 MIPS_SYS(sys_ftruncate64 , 4) 2199 MIPS_SYS(sys_stat64 , 2) 2200 MIPS_SYS(sys_lstat64 , 2) 2201 MIPS_SYS(sys_fstat64 , 2) /* 4215 */ 2202 MIPS_SYS(sys_pivot_root , 2) 2203 MIPS_SYS(sys_mincore , 3) 2204 MIPS_SYS(sys_madvise , 3) 2205 MIPS_SYS(sys_getdents64 , 3) 2206 MIPS_SYS(sys_fcntl64 , 3) /* 4220 */ 2207 MIPS_SYS(sys_ni_syscall , 0) 2208 MIPS_SYS(sys_gettid , 0) 2209 MIPS_SYS(sys_readahead , 5) 2210 MIPS_SYS(sys_setxattr , 5) 2211 MIPS_SYS(sys_lsetxattr , 5) /* 4225 */ 2212 MIPS_SYS(sys_fsetxattr , 5) 2213 MIPS_SYS(sys_getxattr , 4) 2214 MIPS_SYS(sys_lgetxattr , 4) 2215 MIPS_SYS(sys_fgetxattr , 4) 2216 MIPS_SYS(sys_listxattr , 3) /* 4230 */ 2217 MIPS_SYS(sys_llistxattr , 3) 2218 MIPS_SYS(sys_flistxattr , 3) 2219 MIPS_SYS(sys_removexattr , 2) 2220 MIPS_SYS(sys_lremovexattr, 2) 2221 MIPS_SYS(sys_fremovexattr, 2) /* 4235 */ 2222 MIPS_SYS(sys_tkill , 2) 2223 MIPS_SYS(sys_sendfile64 , 5) 2224 MIPS_SYS(sys_futex , 6) 2225 MIPS_SYS(sys_sched_setaffinity, 3) 2226 MIPS_SYS(sys_sched_getaffinity, 3) /* 4240 */ 2227 MIPS_SYS(sys_io_setup , 2) 2228 MIPS_SYS(sys_io_destroy , 1) 2229 MIPS_SYS(sys_io_getevents, 5) 2230 MIPS_SYS(sys_io_submit , 3) 2231 MIPS_SYS(sys_io_cancel , 3) /* 4245 */ 2232 MIPS_SYS(sys_exit_group , 1) 2233 MIPS_SYS(sys_lookup_dcookie, 3) 2234 MIPS_SYS(sys_epoll_create, 1) 2235 MIPS_SYS(sys_epoll_ctl , 4) 2236 MIPS_SYS(sys_epoll_wait , 3) /* 4250 */ 2237 MIPS_SYS(sys_remap_file_pages, 5) 2238 MIPS_SYS(sys_set_tid_address, 1) 2239 MIPS_SYS(sys_restart_syscall, 0) 2240 MIPS_SYS(sys_fadvise64_64, 7) 2241 MIPS_SYS(sys_statfs64 , 3) /* 4255 */ 2242 MIPS_SYS(sys_fstatfs64 , 2) 2243 MIPS_SYS(sys_timer_create, 3) 2244 MIPS_SYS(sys_timer_settime, 4) 2245 MIPS_SYS(sys_timer_gettime, 2) 2246 MIPS_SYS(sys_timer_getoverrun, 1) /* 4260 */ 2247 MIPS_SYS(sys_timer_delete, 1) 2248 MIPS_SYS(sys_clock_settime, 2) 2249 MIPS_SYS(sys_clock_gettime, 2) 2250 MIPS_SYS(sys_clock_getres, 2) 2251 MIPS_SYS(sys_clock_nanosleep, 4) /* 4265 */ 2252 MIPS_SYS(sys_tgkill , 3) 2253 MIPS_SYS(sys_utimes , 2) 2254 MIPS_SYS(sys_mbind , 4) 2255 MIPS_SYS(sys_ni_syscall , 0) /* sys_get_mempolicy */ 2256 MIPS_SYS(sys_ni_syscall , 0) /* 4270 sys_set_mempolicy */ 2257 MIPS_SYS(sys_mq_open , 4) 2258 MIPS_SYS(sys_mq_unlink , 1) 2259 MIPS_SYS(sys_mq_timedsend, 5) 2260 MIPS_SYS(sys_mq_timedreceive, 5) 2261 MIPS_SYS(sys_mq_notify , 2) /* 4275 */ 2262 MIPS_SYS(sys_mq_getsetattr, 3) 2263 MIPS_SYS(sys_ni_syscall , 0) /* sys_vserver */ 2264 MIPS_SYS(sys_waitid , 4) 2265 MIPS_SYS(sys_ni_syscall , 0) /* available, was setaltroot */ 2266 MIPS_SYS(sys_add_key , 5) 2267 MIPS_SYS(sys_request_key, 4) 2268 MIPS_SYS(sys_keyctl , 5) 2269 MIPS_SYS(sys_set_thread_area, 1) 2270 MIPS_SYS(sys_inotify_init, 0) 2271 MIPS_SYS(sys_inotify_add_watch, 3) /* 4285 */ 2272 MIPS_SYS(sys_inotify_rm_watch, 2) 2273 MIPS_SYS(sys_migrate_pages, 4) 2274 MIPS_SYS(sys_openat, 4) 2275 MIPS_SYS(sys_mkdirat, 3) 2276 MIPS_SYS(sys_mknodat, 4) /* 4290 */ 2277 MIPS_SYS(sys_fchownat, 5) 2278 MIPS_SYS(sys_futimesat, 3) 2279 MIPS_SYS(sys_fstatat64, 4) 2280 MIPS_SYS(sys_unlinkat, 3) 2281 MIPS_SYS(sys_renameat, 4) /* 4295 */ 2282 MIPS_SYS(sys_linkat, 5) 2283 MIPS_SYS(sys_symlinkat, 3) 2284 MIPS_SYS(sys_readlinkat, 4) 2285 MIPS_SYS(sys_fchmodat, 3) 2286 MIPS_SYS(sys_faccessat, 3) /* 4300 */ 2287 MIPS_SYS(sys_pselect6, 6) 2288 MIPS_SYS(sys_ppoll, 5) 2289 MIPS_SYS(sys_unshare, 1) 2290 MIPS_SYS(sys_splice, 6) 2291 MIPS_SYS(sys_sync_file_range, 7) /* 4305 */ 2292 MIPS_SYS(sys_tee, 4) 2293 MIPS_SYS(sys_vmsplice, 4) 2294 MIPS_SYS(sys_move_pages, 6) 2295 MIPS_SYS(sys_set_robust_list, 2) 2296 MIPS_SYS(sys_get_robust_list, 3) /* 4310 */ 2297 MIPS_SYS(sys_kexec_load, 4) 2298 MIPS_SYS(sys_getcpu, 3) 2299 MIPS_SYS(sys_epoll_pwait, 6) 2300 MIPS_SYS(sys_ioprio_set, 3) 2301 MIPS_SYS(sys_ioprio_get, 2) 2302 MIPS_SYS(sys_utimensat, 4) 2303 MIPS_SYS(sys_signalfd, 3) 2304 MIPS_SYS(sys_ni_syscall, 0) /* was timerfd */ 2305 MIPS_SYS(sys_eventfd, 1) 2306 MIPS_SYS(sys_fallocate, 6) /* 4320 */ 2307 MIPS_SYS(sys_timerfd_create, 2) 2308 MIPS_SYS(sys_timerfd_gettime, 2) 2309 MIPS_SYS(sys_timerfd_settime, 4) 2310 MIPS_SYS(sys_signalfd4, 4) 2311 MIPS_SYS(sys_eventfd2, 2) /* 4325 */ 2312 MIPS_SYS(sys_epoll_create1, 1) 2313 MIPS_SYS(sys_dup3, 3) 2314 MIPS_SYS(sys_pipe2, 2) 2315 MIPS_SYS(sys_inotify_init1, 1) 2316 MIPS_SYS(sys_preadv, 6) /* 4330 */ 2317 MIPS_SYS(sys_pwritev, 6) 2318 MIPS_SYS(sys_rt_tgsigqueueinfo, 4) 2319 MIPS_SYS(sys_perf_event_open, 5) 2320 MIPS_SYS(sys_accept4, 4) 2321 MIPS_SYS(sys_recvmmsg, 5) /* 4335 */ 2322 MIPS_SYS(sys_fanotify_init, 2) 2323 MIPS_SYS(sys_fanotify_mark, 6) 2324 MIPS_SYS(sys_prlimit64, 4) 2325 MIPS_SYS(sys_name_to_handle_at, 5) 2326 MIPS_SYS(sys_open_by_handle_at, 3) /* 4340 */ 2327 MIPS_SYS(sys_clock_adjtime, 2) 2328 MIPS_SYS(sys_syncfs, 1) 2329 }; 2330 # undef MIPS_SYS 2331 # endif /* O32 */ 2332 2333 static int do_store_exclusive(CPUMIPSState *env) 2334 { 2335 target_ulong addr; 2336 target_ulong page_addr; 2337 target_ulong val; 2338 int flags; 2339 int segv = 0; 2340 int reg; 2341 int d; 2342 2343 addr = env->lladdr; 2344 page_addr = addr & TARGET_PAGE_MASK; 2345 start_exclusive(); 2346 mmap_lock(); 2347 flags = page_get_flags(page_addr); 2348 if ((flags & PAGE_READ) == 0) { 2349 segv = 1; 2350 } else { 2351 reg = env->llreg & 0x1f; 2352 d = (env->llreg & 0x20) != 0; 2353 if (d) { 2354 segv = get_user_s64(val, addr); 2355 } else { 2356 segv = get_user_s32(val, addr); 2357 } 2358 if (!segv) { 2359 if (val != env->llval) { 2360 env->active_tc.gpr[reg] = 0; 2361 } else { 2362 if (d) { 2363 segv = put_user_u64(env->llnewval, addr); 2364 } else { 2365 segv = put_user_u32(env->llnewval, addr); 2366 } 2367 if (!segv) { 2368 env->active_tc.gpr[reg] = 1; 2369 } 2370 } 2371 } 2372 } 2373 env->lladdr = -1; 2374 if (!segv) { 2375 env->active_tc.PC += 4; 2376 } 2377 mmap_unlock(); 2378 end_exclusive(); 2379 return segv; 2380 } 2381 2382 /* Break codes */ 2383 enum { 2384 BRK_OVERFLOW = 6, 2385 BRK_DIVZERO = 7 2386 }; 2387 2388 static int do_break(CPUMIPSState *env, target_siginfo_t *info, 2389 unsigned int code) 2390 { 2391 int ret = -1; 2392 2393 switch (code) { 2394 case BRK_OVERFLOW: 2395 case BRK_DIVZERO: 2396 info->si_signo = TARGET_SIGFPE; 2397 info->si_errno = 0; 2398 info->si_code = (code == BRK_OVERFLOW) ? FPE_INTOVF : FPE_INTDIV; 2399 queue_signal(env, info->si_signo, &*info); 2400 ret = 0; 2401 break; 2402 default: 2403 info->si_signo = TARGET_SIGTRAP; 2404 info->si_errno = 0; 2405 queue_signal(env, info->si_signo, &*info); 2406 ret = 0; 2407 break; 2408 } 2409 2410 return ret; 2411 } 2412 2413 void cpu_loop(CPUMIPSState *env) 2414 { 2415 CPUState *cs = CPU(mips_env_get_cpu(env)); 2416 target_siginfo_t info; 2417 int trapnr; 2418 abi_long ret; 2419 # ifdef TARGET_ABI_MIPSO32 2420 unsigned int syscall_num; 2421 # endif 2422 2423 for(;;) { 2424 cpu_exec_start(cs); 2425 trapnr = cpu_mips_exec(env); 2426 cpu_exec_end(cs); 2427 switch(trapnr) { 2428 case EXCP_SYSCALL: 2429 env->active_tc.PC += 4; 2430 # ifdef TARGET_ABI_MIPSO32 2431 syscall_num = env->active_tc.gpr[2] - 4000; 2432 if (syscall_num >= sizeof(mips_syscall_args)) { 2433 ret = -TARGET_ENOSYS; 2434 } else { 2435 int nb_args; 2436 abi_ulong sp_reg; 2437 abi_ulong arg5 = 0, arg6 = 0, arg7 = 0, arg8 = 0; 2438 2439 nb_args = mips_syscall_args[syscall_num]; 2440 sp_reg = env->active_tc.gpr[29]; 2441 switch (nb_args) { 2442 /* these arguments are taken from the stack */ 2443 case 8: 2444 if ((ret = get_user_ual(arg8, sp_reg + 28)) != 0) { 2445 goto done_syscall; 2446 } 2447 case 7: 2448 if ((ret = get_user_ual(arg7, sp_reg + 24)) != 0) { 2449 goto done_syscall; 2450 } 2451 case 6: 2452 if ((ret = get_user_ual(arg6, sp_reg + 20)) != 0) { 2453 goto done_syscall; 2454 } 2455 case 5: 2456 if ((ret = get_user_ual(arg5, sp_reg + 16)) != 0) { 2457 goto done_syscall; 2458 } 2459 default: 2460 break; 2461 } 2462 ret = do_syscall(env, env->active_tc.gpr[2], 2463 env->active_tc.gpr[4], 2464 env->active_tc.gpr[5], 2465 env->active_tc.gpr[6], 2466 env->active_tc.gpr[7], 2467 arg5, arg6, arg7, arg8); 2468 } 2469 done_syscall: 2470 # else 2471 ret = do_syscall(env, env->active_tc.gpr[2], 2472 env->active_tc.gpr[4], env->active_tc.gpr[5], 2473 env->active_tc.gpr[6], env->active_tc.gpr[7], 2474 env->active_tc.gpr[8], env->active_tc.gpr[9], 2475 env->active_tc.gpr[10], env->active_tc.gpr[11]); 2476 # endif /* O32 */ 2477 if (ret == -TARGET_QEMU_ESIGRETURN) { 2478 /* Returning from a successful sigreturn syscall. 2479 Avoid clobbering register state. */ 2480 break; 2481 } 2482 if ((abi_ulong)ret >= (abi_ulong)-1133) { 2483 env->active_tc.gpr[7] = 1; /* error flag */ 2484 ret = -ret; 2485 } else { 2486 env->active_tc.gpr[7] = 0; /* error flag */ 2487 } 2488 env->active_tc.gpr[2] = ret; 2489 break; 2490 case EXCP_TLBL: 2491 case EXCP_TLBS: 2492 case EXCP_AdEL: 2493 case EXCP_AdES: 2494 info.si_signo = TARGET_SIGSEGV; 2495 info.si_errno = 0; 2496 /* XXX: check env->error_code */ 2497 info.si_code = TARGET_SEGV_MAPERR; 2498 info._sifields._sigfault._addr = env->CP0_BadVAddr; 2499 queue_signal(env, info.si_signo, &info); 2500 break; 2501 case EXCP_CpU: 2502 case EXCP_RI: 2503 info.si_signo = TARGET_SIGILL; 2504 info.si_errno = 0; 2505 info.si_code = 0; 2506 queue_signal(env, info.si_signo, &info); 2507 break; 2508 case EXCP_INTERRUPT: 2509 /* just indicate that signals should be handled asap */ 2510 break; 2511 case EXCP_DEBUG: 2512 { 2513 int sig; 2514 2515 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 2516 if (sig) 2517 { 2518 info.si_signo = sig; 2519 info.si_errno = 0; 2520 info.si_code = TARGET_TRAP_BRKPT; 2521 queue_signal(env, info.si_signo, &info); 2522 } 2523 } 2524 break; 2525 case EXCP_SC: 2526 if (do_store_exclusive(env)) { 2527 info.si_signo = TARGET_SIGSEGV; 2528 info.si_errno = 0; 2529 info.si_code = TARGET_SEGV_MAPERR; 2530 info._sifields._sigfault._addr = env->active_tc.PC; 2531 queue_signal(env, info.si_signo, &info); 2532 } 2533 break; 2534 case EXCP_DSPDIS: 2535 info.si_signo = TARGET_SIGILL; 2536 info.si_errno = 0; 2537 info.si_code = TARGET_ILL_ILLOPC; 2538 queue_signal(env, info.si_signo, &info); 2539 break; 2540 /* The code below was inspired by the MIPS Linux kernel trap 2541 * handling code in arch/mips/kernel/traps.c. 2542 */ 2543 case EXCP_BREAK: 2544 { 2545 abi_ulong trap_instr; 2546 unsigned int code; 2547 2548 if (env->hflags & MIPS_HFLAG_M16) { 2549 if (env->insn_flags & ASE_MICROMIPS) { 2550 /* microMIPS mode */ 2551 ret = get_user_u16(trap_instr, env->active_tc.PC); 2552 if (ret != 0) { 2553 goto error; 2554 } 2555 2556 if ((trap_instr >> 10) == 0x11) { 2557 /* 16-bit instruction */ 2558 code = trap_instr & 0xf; 2559 } else { 2560 /* 32-bit instruction */ 2561 abi_ulong instr_lo; 2562 2563 ret = get_user_u16(instr_lo, 2564 env->active_tc.PC + 2); 2565 if (ret != 0) { 2566 goto error; 2567 } 2568 trap_instr = (trap_instr << 16) | instr_lo; 2569 code = ((trap_instr >> 6) & ((1 << 20) - 1)); 2570 /* Unfortunately, microMIPS also suffers from 2571 the old assembler bug... */ 2572 if (code >= (1 << 10)) { 2573 code >>= 10; 2574 } 2575 } 2576 } else { 2577 /* MIPS16e mode */ 2578 ret = get_user_u16(trap_instr, env->active_tc.PC); 2579 if (ret != 0) { 2580 goto error; 2581 } 2582 code = (trap_instr >> 6) & 0x3f; 2583 } 2584 } else { 2585 ret = get_user_ual(trap_instr, env->active_tc.PC); 2586 if (ret != 0) { 2587 goto error; 2588 } 2589 2590 /* As described in the original Linux kernel code, the 2591 * below checks on 'code' are to work around an old 2592 * assembly bug. 2593 */ 2594 code = ((trap_instr >> 6) & ((1 << 20) - 1)); 2595 if (code >= (1 << 10)) { 2596 code >>= 10; 2597 } 2598 } 2599 2600 if (do_break(env, &info, code) != 0) { 2601 goto error; 2602 } 2603 } 2604 break; 2605 case EXCP_TRAP: 2606 { 2607 abi_ulong trap_instr; 2608 unsigned int code = 0; 2609 2610 if (env->hflags & MIPS_HFLAG_M16) { 2611 /* microMIPS mode */ 2612 abi_ulong instr[2]; 2613 2614 ret = get_user_u16(instr[0], env->active_tc.PC) || 2615 get_user_u16(instr[1], env->active_tc.PC + 2); 2616 2617 trap_instr = (instr[0] << 16) | instr[1]; 2618 } else { 2619 ret = get_user_ual(trap_instr, env->active_tc.PC); 2620 } 2621 2622 if (ret != 0) { 2623 goto error; 2624 } 2625 2626 /* The immediate versions don't provide a code. */ 2627 if (!(trap_instr & 0xFC000000)) { 2628 if (env->hflags & MIPS_HFLAG_M16) { 2629 /* microMIPS mode */ 2630 code = ((trap_instr >> 12) & ((1 << 4) - 1)); 2631 } else { 2632 code = ((trap_instr >> 6) & ((1 << 10) - 1)); 2633 } 2634 } 2635 2636 if (do_break(env, &info, code) != 0) { 2637 goto error; 2638 } 2639 } 2640 break; 2641 default: 2642 error: 2643 fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", 2644 trapnr); 2645 cpu_dump_state(cs, stderr, fprintf, 0); 2646 abort(); 2647 } 2648 process_pending_signals(env); 2649 } 2650 } 2651 #endif 2652 2653 #ifdef TARGET_OPENRISC 2654 2655 void cpu_loop(CPUOpenRISCState *env) 2656 { 2657 CPUState *cs = CPU(openrisc_env_get_cpu(env)); 2658 int trapnr, gdbsig; 2659 2660 for (;;) { 2661 trapnr = cpu_exec(env); 2662 gdbsig = 0; 2663 2664 switch (trapnr) { 2665 case EXCP_RESET: 2666 qemu_log("\nReset request, exit, pc is %#x\n", env->pc); 2667 exit(1); 2668 break; 2669 case EXCP_BUSERR: 2670 qemu_log("\nBus error, exit, pc is %#x\n", env->pc); 2671 gdbsig = SIGBUS; 2672 break; 2673 case EXCP_DPF: 2674 case EXCP_IPF: 2675 cpu_dump_state(cs, stderr, fprintf, 0); 2676 gdbsig = TARGET_SIGSEGV; 2677 break; 2678 case EXCP_TICK: 2679 qemu_log("\nTick time interrupt pc is %#x\n", env->pc); 2680 break; 2681 case EXCP_ALIGN: 2682 qemu_log("\nAlignment pc is %#x\n", env->pc); 2683 gdbsig = SIGBUS; 2684 break; 2685 case EXCP_ILLEGAL: 2686 qemu_log("\nIllegal instructionpc is %#x\n", env->pc); 2687 gdbsig = SIGILL; 2688 break; 2689 case EXCP_INT: 2690 qemu_log("\nExternal interruptpc is %#x\n", env->pc); 2691 break; 2692 case EXCP_DTLBMISS: 2693 case EXCP_ITLBMISS: 2694 qemu_log("\nTLB miss\n"); 2695 break; 2696 case EXCP_RANGE: 2697 qemu_log("\nRange\n"); 2698 gdbsig = SIGSEGV; 2699 break; 2700 case EXCP_SYSCALL: 2701 env->pc += 4; /* 0xc00; */ 2702 env->gpr[11] = do_syscall(env, 2703 env->gpr[11], /* return value */ 2704 env->gpr[3], /* r3 - r7 are params */ 2705 env->gpr[4], 2706 env->gpr[5], 2707 env->gpr[6], 2708 env->gpr[7], 2709 env->gpr[8], 0, 0); 2710 break; 2711 case EXCP_FPE: 2712 qemu_log("\nFloating point error\n"); 2713 break; 2714 case EXCP_TRAP: 2715 qemu_log("\nTrap\n"); 2716 gdbsig = SIGTRAP; 2717 break; 2718 case EXCP_NR: 2719 qemu_log("\nNR\n"); 2720 break; 2721 default: 2722 qemu_log("\nqemu: unhandled CPU exception %#x - aborting\n", 2723 trapnr); 2724 cpu_dump_state(cs, stderr, fprintf, 0); 2725 gdbsig = TARGET_SIGILL; 2726 break; 2727 } 2728 if (gdbsig) { 2729 gdb_handlesig(cs, gdbsig); 2730 if (gdbsig != TARGET_SIGTRAP) { 2731 exit(1); 2732 } 2733 } 2734 2735 process_pending_signals(env); 2736 } 2737 } 2738 2739 #endif /* TARGET_OPENRISC */ 2740 2741 #ifdef TARGET_SH4 2742 void cpu_loop(CPUSH4State *env) 2743 { 2744 CPUState *cs = CPU(sh_env_get_cpu(env)); 2745 int trapnr, ret; 2746 target_siginfo_t info; 2747 2748 while (1) { 2749 trapnr = cpu_sh4_exec (env); 2750 2751 switch (trapnr) { 2752 case 0x160: 2753 env->pc += 2; 2754 ret = do_syscall(env, 2755 env->gregs[3], 2756 env->gregs[4], 2757 env->gregs[5], 2758 env->gregs[6], 2759 env->gregs[7], 2760 env->gregs[0], 2761 env->gregs[1], 2762 0, 0); 2763 env->gregs[0] = ret; 2764 break; 2765 case EXCP_INTERRUPT: 2766 /* just indicate that signals should be handled asap */ 2767 break; 2768 case EXCP_DEBUG: 2769 { 2770 int sig; 2771 2772 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 2773 if (sig) 2774 { 2775 info.si_signo = sig; 2776 info.si_errno = 0; 2777 info.si_code = TARGET_TRAP_BRKPT; 2778 queue_signal(env, info.si_signo, &info); 2779 } 2780 } 2781 break; 2782 case 0xa0: 2783 case 0xc0: 2784 info.si_signo = SIGSEGV; 2785 info.si_errno = 0; 2786 info.si_code = TARGET_SEGV_MAPERR; 2787 info._sifields._sigfault._addr = env->tea; 2788 queue_signal(env, info.si_signo, &info); 2789 break; 2790 2791 default: 2792 printf ("Unhandled trap: 0x%x\n", trapnr); 2793 cpu_dump_state(cs, stderr, fprintf, 0); 2794 exit (1); 2795 } 2796 process_pending_signals (env); 2797 } 2798 } 2799 #endif 2800 2801 #ifdef TARGET_CRIS 2802 void cpu_loop(CPUCRISState *env) 2803 { 2804 CPUState *cs = CPU(cris_env_get_cpu(env)); 2805 int trapnr, ret; 2806 target_siginfo_t info; 2807 2808 while (1) { 2809 trapnr = cpu_cris_exec (env); 2810 switch (trapnr) { 2811 case 0xaa: 2812 { 2813 info.si_signo = SIGSEGV; 2814 info.si_errno = 0; 2815 /* XXX: check env->error_code */ 2816 info.si_code = TARGET_SEGV_MAPERR; 2817 info._sifields._sigfault._addr = env->pregs[PR_EDA]; 2818 queue_signal(env, info.si_signo, &info); 2819 } 2820 break; 2821 case EXCP_INTERRUPT: 2822 /* just indicate that signals should be handled asap */ 2823 break; 2824 case EXCP_BREAK: 2825 ret = do_syscall(env, 2826 env->regs[9], 2827 env->regs[10], 2828 env->regs[11], 2829 env->regs[12], 2830 env->regs[13], 2831 env->pregs[7], 2832 env->pregs[11], 2833 0, 0); 2834 env->regs[10] = ret; 2835 break; 2836 case EXCP_DEBUG: 2837 { 2838 int sig; 2839 2840 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 2841 if (sig) 2842 { 2843 info.si_signo = sig; 2844 info.si_errno = 0; 2845 info.si_code = TARGET_TRAP_BRKPT; 2846 queue_signal(env, info.si_signo, &info); 2847 } 2848 } 2849 break; 2850 default: 2851 printf ("Unhandled trap: 0x%x\n", trapnr); 2852 cpu_dump_state(cs, stderr, fprintf, 0); 2853 exit (1); 2854 } 2855 process_pending_signals (env); 2856 } 2857 } 2858 #endif 2859 2860 #ifdef TARGET_MICROBLAZE 2861 void cpu_loop(CPUMBState *env) 2862 { 2863 CPUState *cs = CPU(mb_env_get_cpu(env)); 2864 int trapnr, ret; 2865 target_siginfo_t info; 2866 2867 while (1) { 2868 trapnr = cpu_mb_exec (env); 2869 switch (trapnr) { 2870 case 0xaa: 2871 { 2872 info.si_signo = SIGSEGV; 2873 info.si_errno = 0; 2874 /* XXX: check env->error_code */ 2875 info.si_code = TARGET_SEGV_MAPERR; 2876 info._sifields._sigfault._addr = 0; 2877 queue_signal(env, info.si_signo, &info); 2878 } 2879 break; 2880 case EXCP_INTERRUPT: 2881 /* just indicate that signals should be handled asap */ 2882 break; 2883 case EXCP_BREAK: 2884 /* Return address is 4 bytes after the call. */ 2885 env->regs[14] += 4; 2886 env->sregs[SR_PC] = env->regs[14]; 2887 ret = do_syscall(env, 2888 env->regs[12], 2889 env->regs[5], 2890 env->regs[6], 2891 env->regs[7], 2892 env->regs[8], 2893 env->regs[9], 2894 env->regs[10], 2895 0, 0); 2896 env->regs[3] = ret; 2897 break; 2898 case EXCP_HW_EXCP: 2899 env->regs[17] = env->sregs[SR_PC] + 4; 2900 if (env->iflags & D_FLAG) { 2901 env->sregs[SR_ESR] |= 1 << 12; 2902 env->sregs[SR_PC] -= 4; 2903 /* FIXME: if branch was immed, replay the imm as well. */ 2904 } 2905 2906 env->iflags &= ~(IMM_FLAG | D_FLAG); 2907 2908 switch (env->sregs[SR_ESR] & 31) { 2909 case ESR_EC_DIVZERO: 2910 info.si_signo = SIGFPE; 2911 info.si_errno = 0; 2912 info.si_code = TARGET_FPE_FLTDIV; 2913 info._sifields._sigfault._addr = 0; 2914 queue_signal(env, info.si_signo, &info); 2915 break; 2916 case ESR_EC_FPU: 2917 info.si_signo = SIGFPE; 2918 info.si_errno = 0; 2919 if (env->sregs[SR_FSR] & FSR_IO) { 2920 info.si_code = TARGET_FPE_FLTINV; 2921 } 2922 if (env->sregs[SR_FSR] & FSR_DZ) { 2923 info.si_code = TARGET_FPE_FLTDIV; 2924 } 2925 info._sifields._sigfault._addr = 0; 2926 queue_signal(env, info.si_signo, &info); 2927 break; 2928 default: 2929 printf ("Unhandled hw-exception: 0x%x\n", 2930 env->sregs[SR_ESR] & ESR_EC_MASK); 2931 cpu_dump_state(cs, stderr, fprintf, 0); 2932 exit (1); 2933 break; 2934 } 2935 break; 2936 case EXCP_DEBUG: 2937 { 2938 int sig; 2939 2940 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 2941 if (sig) 2942 { 2943 info.si_signo = sig; 2944 info.si_errno = 0; 2945 info.si_code = TARGET_TRAP_BRKPT; 2946 queue_signal(env, info.si_signo, &info); 2947 } 2948 } 2949 break; 2950 default: 2951 printf ("Unhandled trap: 0x%x\n", trapnr); 2952 cpu_dump_state(cs, stderr, fprintf, 0); 2953 exit (1); 2954 } 2955 process_pending_signals (env); 2956 } 2957 } 2958 #endif 2959 2960 #ifdef TARGET_M68K 2961 2962 void cpu_loop(CPUM68KState *env) 2963 { 2964 CPUState *cs = CPU(m68k_env_get_cpu(env)); 2965 int trapnr; 2966 unsigned int n; 2967 target_siginfo_t info; 2968 TaskState *ts = cs->opaque; 2969 2970 for(;;) { 2971 trapnr = cpu_m68k_exec(env); 2972 switch(trapnr) { 2973 case EXCP_ILLEGAL: 2974 { 2975 if (ts->sim_syscalls) { 2976 uint16_t nr; 2977 nr = lduw(env->pc + 2); 2978 env->pc += 4; 2979 do_m68k_simcall(env, nr); 2980 } else { 2981 goto do_sigill; 2982 } 2983 } 2984 break; 2985 case EXCP_HALT_INSN: 2986 /* Semihosing syscall. */ 2987 env->pc += 4; 2988 do_m68k_semihosting(env, env->dregs[0]); 2989 break; 2990 case EXCP_LINEA: 2991 case EXCP_LINEF: 2992 case EXCP_UNSUPPORTED: 2993 do_sigill: 2994 info.si_signo = SIGILL; 2995 info.si_errno = 0; 2996 info.si_code = TARGET_ILL_ILLOPN; 2997 info._sifields._sigfault._addr = env->pc; 2998 queue_signal(env, info.si_signo, &info); 2999 break; 3000 case EXCP_TRAP0: 3001 { 3002 ts->sim_syscalls = 0; 3003 n = env->dregs[0]; 3004 env->pc += 2; 3005 env->dregs[0] = do_syscall(env, 3006 n, 3007 env->dregs[1], 3008 env->dregs[2], 3009 env->dregs[3], 3010 env->dregs[4], 3011 env->dregs[5], 3012 env->aregs[0], 3013 0, 0); 3014 } 3015 break; 3016 case EXCP_INTERRUPT: 3017 /* just indicate that signals should be handled asap */ 3018 break; 3019 case EXCP_ACCESS: 3020 { 3021 info.si_signo = SIGSEGV; 3022 info.si_errno = 0; 3023 /* XXX: check env->error_code */ 3024 info.si_code = TARGET_SEGV_MAPERR; 3025 info._sifields._sigfault._addr = env->mmu.ar; 3026 queue_signal(env, info.si_signo, &info); 3027 } 3028 break; 3029 case EXCP_DEBUG: 3030 { 3031 int sig; 3032 3033 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 3034 if (sig) 3035 { 3036 info.si_signo = sig; 3037 info.si_errno = 0; 3038 info.si_code = TARGET_TRAP_BRKPT; 3039 queue_signal(env, info.si_signo, &info); 3040 } 3041 } 3042 break; 3043 default: 3044 fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", 3045 trapnr); 3046 cpu_dump_state(cs, stderr, fprintf, 0); 3047 abort(); 3048 } 3049 process_pending_signals(env); 3050 } 3051 } 3052 #endif /* TARGET_M68K */ 3053 3054 #ifdef TARGET_ALPHA 3055 static void do_store_exclusive(CPUAlphaState *env, int reg, int quad) 3056 { 3057 target_ulong addr, val, tmp; 3058 target_siginfo_t info; 3059 int ret = 0; 3060 3061 addr = env->lock_addr; 3062 tmp = env->lock_st_addr; 3063 env->lock_addr = -1; 3064 env->lock_st_addr = 0; 3065 3066 start_exclusive(); 3067 mmap_lock(); 3068 3069 if (addr == tmp) { 3070 if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) { 3071 goto do_sigsegv; 3072 } 3073 3074 if (val == env->lock_value) { 3075 tmp = env->ir[reg]; 3076 if (quad ? put_user_u64(tmp, addr) : put_user_u32(tmp, addr)) { 3077 goto do_sigsegv; 3078 } 3079 ret = 1; 3080 } 3081 } 3082 env->ir[reg] = ret; 3083 env->pc += 4; 3084 3085 mmap_unlock(); 3086 end_exclusive(); 3087 return; 3088 3089 do_sigsegv: 3090 mmap_unlock(); 3091 end_exclusive(); 3092 3093 info.si_signo = TARGET_SIGSEGV; 3094 info.si_errno = 0; 3095 info.si_code = TARGET_SEGV_MAPERR; 3096 info._sifields._sigfault._addr = addr; 3097 queue_signal(env, TARGET_SIGSEGV, &info); 3098 } 3099 3100 void cpu_loop(CPUAlphaState *env) 3101 { 3102 CPUState *cs = CPU(alpha_env_get_cpu(env)); 3103 int trapnr; 3104 target_siginfo_t info; 3105 abi_long sysret; 3106 3107 while (1) { 3108 trapnr = cpu_alpha_exec (env); 3109 3110 /* All of the traps imply a transition through PALcode, which 3111 implies an REI instruction has been executed. Which means 3112 that the intr_flag should be cleared. */ 3113 env->intr_flag = 0; 3114 3115 switch (trapnr) { 3116 case EXCP_RESET: 3117 fprintf(stderr, "Reset requested. Exit\n"); 3118 exit(1); 3119 break; 3120 case EXCP_MCHK: 3121 fprintf(stderr, "Machine check exception. Exit\n"); 3122 exit(1); 3123 break; 3124 case EXCP_SMP_INTERRUPT: 3125 case EXCP_CLK_INTERRUPT: 3126 case EXCP_DEV_INTERRUPT: 3127 fprintf(stderr, "External interrupt. Exit\n"); 3128 exit(1); 3129 break; 3130 case EXCP_MMFAULT: 3131 env->lock_addr = -1; 3132 info.si_signo = TARGET_SIGSEGV; 3133 info.si_errno = 0; 3134 info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID 3135 ? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR); 3136 info._sifields._sigfault._addr = env->trap_arg0; 3137 queue_signal(env, info.si_signo, &info); 3138 break; 3139 case EXCP_UNALIGN: 3140 env->lock_addr = -1; 3141 info.si_signo = TARGET_SIGBUS; 3142 info.si_errno = 0; 3143 info.si_code = TARGET_BUS_ADRALN; 3144 info._sifields._sigfault._addr = env->trap_arg0; 3145 queue_signal(env, info.si_signo, &info); 3146 break; 3147 case EXCP_OPCDEC: 3148 do_sigill: 3149 env->lock_addr = -1; 3150 info.si_signo = TARGET_SIGILL; 3151 info.si_errno = 0; 3152 info.si_code = TARGET_ILL_ILLOPC; 3153 info._sifields._sigfault._addr = env->pc; 3154 queue_signal(env, info.si_signo, &info); 3155 break; 3156 case EXCP_ARITH: 3157 env->lock_addr = -1; 3158 info.si_signo = TARGET_SIGFPE; 3159 info.si_errno = 0; 3160 info.si_code = TARGET_FPE_FLTINV; 3161 info._sifields._sigfault._addr = env->pc; 3162 queue_signal(env, info.si_signo, &info); 3163 break; 3164 case EXCP_FEN: 3165 /* No-op. Linux simply re-enables the FPU. */ 3166 break; 3167 case EXCP_CALL_PAL: 3168 env->lock_addr = -1; 3169 switch (env->error_code) { 3170 case 0x80: 3171 /* BPT */ 3172 info.si_signo = TARGET_SIGTRAP; 3173 info.si_errno = 0; 3174 info.si_code = TARGET_TRAP_BRKPT; 3175 info._sifields._sigfault._addr = env->pc; 3176 queue_signal(env, info.si_signo, &info); 3177 break; 3178 case 0x81: 3179 /* BUGCHK */ 3180 info.si_signo = TARGET_SIGTRAP; 3181 info.si_errno = 0; 3182 info.si_code = 0; 3183 info._sifields._sigfault._addr = env->pc; 3184 queue_signal(env, info.si_signo, &info); 3185 break; 3186 case 0x83: 3187 /* CALLSYS */ 3188 trapnr = env->ir[IR_V0]; 3189 sysret = do_syscall(env, trapnr, 3190 env->ir[IR_A0], env->ir[IR_A1], 3191 env->ir[IR_A2], env->ir[IR_A3], 3192 env->ir[IR_A4], env->ir[IR_A5], 3193 0, 0); 3194 if (trapnr == TARGET_NR_sigreturn 3195 || trapnr == TARGET_NR_rt_sigreturn) { 3196 break; 3197 } 3198 /* Syscall writes 0 to V0 to bypass error check, similar 3199 to how this is handled internal to Linux kernel. 3200 (Ab)use trapnr temporarily as boolean indicating error. */ 3201 trapnr = (env->ir[IR_V0] != 0 && sysret < 0); 3202 env->ir[IR_V0] = (trapnr ? -sysret : sysret); 3203 env->ir[IR_A3] = trapnr; 3204 break; 3205 case 0x86: 3206 /* IMB */ 3207 /* ??? We can probably elide the code using page_unprotect 3208 that is checking for self-modifying code. Instead we 3209 could simply call tb_flush here. Until we work out the 3210 changes required to turn off the extra write protection, 3211 this can be a no-op. */ 3212 break; 3213 case 0x9E: 3214 /* RDUNIQUE */ 3215 /* Handled in the translator for usermode. */ 3216 abort(); 3217 case 0x9F: 3218 /* WRUNIQUE */ 3219 /* Handled in the translator for usermode. */ 3220 abort(); 3221 case 0xAA: 3222 /* GENTRAP */ 3223 info.si_signo = TARGET_SIGFPE; 3224 switch (env->ir[IR_A0]) { 3225 case TARGET_GEN_INTOVF: 3226 info.si_code = TARGET_FPE_INTOVF; 3227 break; 3228 case TARGET_GEN_INTDIV: 3229 info.si_code = TARGET_FPE_INTDIV; 3230 break; 3231 case TARGET_GEN_FLTOVF: 3232 info.si_code = TARGET_FPE_FLTOVF; 3233 break; 3234 case TARGET_GEN_FLTUND: 3235 info.si_code = TARGET_FPE_FLTUND; 3236 break; 3237 case TARGET_GEN_FLTINV: 3238 info.si_code = TARGET_FPE_FLTINV; 3239 break; 3240 case TARGET_GEN_FLTINE: 3241 info.si_code = TARGET_FPE_FLTRES; 3242 break; 3243 case TARGET_GEN_ROPRAND: 3244 info.si_code = 0; 3245 break; 3246 default: 3247 info.si_signo = TARGET_SIGTRAP; 3248 info.si_code = 0; 3249 break; 3250 } 3251 info.si_errno = 0; 3252 info._sifields._sigfault._addr = env->pc; 3253 queue_signal(env, info.si_signo, &info); 3254 break; 3255 default: 3256 goto do_sigill; 3257 } 3258 break; 3259 case EXCP_DEBUG: 3260 info.si_signo = gdb_handlesig(cs, TARGET_SIGTRAP); 3261 if (info.si_signo) { 3262 env->lock_addr = -1; 3263 info.si_errno = 0; 3264 info.si_code = TARGET_TRAP_BRKPT; 3265 queue_signal(env, info.si_signo, &info); 3266 } 3267 break; 3268 case EXCP_STL_C: 3269 case EXCP_STQ_C: 3270 do_store_exclusive(env, env->error_code, trapnr - EXCP_STL_C); 3271 break; 3272 case EXCP_INTERRUPT: 3273 /* Just indicate that signals should be handled asap. */ 3274 break; 3275 default: 3276 printf ("Unhandled trap: 0x%x\n", trapnr); 3277 cpu_dump_state(cs, stderr, fprintf, 0); 3278 exit (1); 3279 } 3280 process_pending_signals (env); 3281 } 3282 } 3283 #endif /* TARGET_ALPHA */ 3284 3285 #ifdef TARGET_S390X 3286 void cpu_loop(CPUS390XState *env) 3287 { 3288 CPUState *cs = CPU(s390_env_get_cpu(env)); 3289 int trapnr, n, sig; 3290 target_siginfo_t info; 3291 target_ulong addr; 3292 3293 while (1) { 3294 trapnr = cpu_s390x_exec(env); 3295 switch (trapnr) { 3296 case EXCP_INTERRUPT: 3297 /* Just indicate that signals should be handled asap. */ 3298 break; 3299 3300 case EXCP_SVC: 3301 n = env->int_svc_code; 3302 if (!n) { 3303 /* syscalls > 255 */ 3304 n = env->regs[1]; 3305 } 3306 env->psw.addr += env->int_svc_ilen; 3307 env->regs[2] = do_syscall(env, n, env->regs[2], env->regs[3], 3308 env->regs[4], env->regs[5], 3309 env->regs[6], env->regs[7], 0, 0); 3310 break; 3311 3312 case EXCP_DEBUG: 3313 sig = gdb_handlesig(cs, TARGET_SIGTRAP); 3314 if (sig) { 3315 n = TARGET_TRAP_BRKPT; 3316 goto do_signal_pc; 3317 } 3318 break; 3319 case EXCP_PGM: 3320 n = env->int_pgm_code; 3321 switch (n) { 3322 case PGM_OPERATION: 3323 case PGM_PRIVILEGED: 3324 sig = SIGILL; 3325 n = TARGET_ILL_ILLOPC; 3326 goto do_signal_pc; 3327 case PGM_PROTECTION: 3328 case PGM_ADDRESSING: 3329 sig = SIGSEGV; 3330 /* XXX: check env->error_code */ 3331 n = TARGET_SEGV_MAPERR; 3332 addr = env->__excp_addr; 3333 goto do_signal; 3334 case PGM_EXECUTE: 3335 case PGM_SPECIFICATION: 3336 case PGM_SPECIAL_OP: 3337 case PGM_OPERAND: 3338 do_sigill_opn: 3339 sig = SIGILL; 3340 n = TARGET_ILL_ILLOPN; 3341 goto do_signal_pc; 3342 3343 case PGM_FIXPT_OVERFLOW: 3344 sig = SIGFPE; 3345 n = TARGET_FPE_INTOVF; 3346 goto do_signal_pc; 3347 case PGM_FIXPT_DIVIDE: 3348 sig = SIGFPE; 3349 n = TARGET_FPE_INTDIV; 3350 goto do_signal_pc; 3351 3352 case PGM_DATA: 3353 n = (env->fpc >> 8) & 0xff; 3354 if (n == 0xff) { 3355 /* compare-and-trap */ 3356 goto do_sigill_opn; 3357 } else { 3358 /* An IEEE exception, simulated or otherwise. */ 3359 if (n & 0x80) { 3360 n = TARGET_FPE_FLTINV; 3361 } else if (n & 0x40) { 3362 n = TARGET_FPE_FLTDIV; 3363 } else if (n & 0x20) { 3364 n = TARGET_FPE_FLTOVF; 3365 } else if (n & 0x10) { 3366 n = TARGET_FPE_FLTUND; 3367 } else if (n & 0x08) { 3368 n = TARGET_FPE_FLTRES; 3369 } else { 3370 /* ??? Quantum exception; BFP, DFP error. */ 3371 goto do_sigill_opn; 3372 } 3373 sig = SIGFPE; 3374 goto do_signal_pc; 3375 } 3376 3377 default: 3378 fprintf(stderr, "Unhandled program exception: %#x\n", n); 3379 cpu_dump_state(cs, stderr, fprintf, 0); 3380 exit(1); 3381 } 3382 break; 3383 3384 do_signal_pc: 3385 addr = env->psw.addr; 3386 do_signal: 3387 info.si_signo = sig; 3388 info.si_errno = 0; 3389 info.si_code = n; 3390 info._sifields._sigfault._addr = addr; 3391 queue_signal(env, info.si_signo, &info); 3392 break; 3393 3394 default: 3395 fprintf(stderr, "Unhandled trap: 0x%x\n", trapnr); 3396 cpu_dump_state(cs, stderr, fprintf, 0); 3397 exit(1); 3398 } 3399 process_pending_signals (env); 3400 } 3401 } 3402 3403 #endif /* TARGET_S390X */ 3404 3405 THREAD CPUState *thread_cpu; 3406 3407 void task_settid(TaskState *ts) 3408 { 3409 if (ts->ts_tid == 0) { 3410 ts->ts_tid = (pid_t)syscall(SYS_gettid); 3411 } 3412 } 3413 3414 void stop_all_tasks(void) 3415 { 3416 /* 3417 * We trust that when using NPTL, start_exclusive() 3418 * handles thread stopping correctly. 3419 */ 3420 start_exclusive(); 3421 } 3422 3423 /* Assumes contents are already zeroed. */ 3424 void init_task_state(TaskState *ts) 3425 { 3426 int i; 3427 3428 ts->used = 1; 3429 ts->first_free = ts->sigqueue_table; 3430 for (i = 0; i < MAX_SIGQUEUE_SIZE - 1; i++) { 3431 ts->sigqueue_table[i].next = &ts->sigqueue_table[i + 1]; 3432 } 3433 ts->sigqueue_table[i].next = NULL; 3434 } 3435 3436 CPUArchState *cpu_copy(CPUArchState *env) 3437 { 3438 CPUState *cpu = ENV_GET_CPU(env); 3439 CPUArchState *new_env = cpu_init(cpu_model); 3440 CPUState *new_cpu = ENV_GET_CPU(new_env); 3441 #if defined(TARGET_HAS_ICE) 3442 CPUBreakpoint *bp; 3443 CPUWatchpoint *wp; 3444 #endif 3445 3446 /* Reset non arch specific state */ 3447 cpu_reset(new_cpu); 3448 3449 memcpy(new_env, env, sizeof(CPUArchState)); 3450 3451 /* Clone all break/watchpoints. 3452 Note: Once we support ptrace with hw-debug register access, make sure 3453 BP_CPU break/watchpoints are handled correctly on clone. */ 3454 QTAILQ_INIT(&cpu->breakpoints); 3455 QTAILQ_INIT(&cpu->watchpoints); 3456 #if defined(TARGET_HAS_ICE) 3457 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { 3458 cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags, NULL); 3459 } 3460 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { 3461 cpu_watchpoint_insert(new_cpu, wp->vaddr, (~wp->len_mask) + 1, 3462 wp->flags, NULL); 3463 } 3464 #endif 3465 3466 return new_env; 3467 } 3468 3469 static void handle_arg_help(const char *arg) 3470 { 3471 usage(); 3472 } 3473 3474 static void handle_arg_log(const char *arg) 3475 { 3476 int mask; 3477 3478 mask = qemu_str_to_log_mask(arg); 3479 if (!mask) { 3480 qemu_print_log_usage(stdout); 3481 exit(1); 3482 } 3483 qemu_set_log(mask); 3484 } 3485 3486 static void handle_arg_log_filename(const char *arg) 3487 { 3488 qemu_set_log_filename(arg); 3489 } 3490 3491 static void handle_arg_set_env(const char *arg) 3492 { 3493 char *r, *p, *token; 3494 r = p = strdup(arg); 3495 while ((token = strsep(&p, ",")) != NULL) { 3496 if (envlist_setenv(envlist, token) != 0) { 3497 usage(); 3498 } 3499 } 3500 free(r); 3501 } 3502 3503 static void handle_arg_unset_env(const char *arg) 3504 { 3505 char *r, *p, *token; 3506 r = p = strdup(arg); 3507 while ((token = strsep(&p, ",")) != NULL) { 3508 if (envlist_unsetenv(envlist, token) != 0) { 3509 usage(); 3510 } 3511 } 3512 free(r); 3513 } 3514 3515 static void handle_arg_argv0(const char *arg) 3516 { 3517 argv0 = strdup(arg); 3518 } 3519 3520 static void handle_arg_stack_size(const char *arg) 3521 { 3522 char *p; 3523 guest_stack_size = strtoul(arg, &p, 0); 3524 if (guest_stack_size == 0) { 3525 usage(); 3526 } 3527 3528 if (*p == 'M') { 3529 guest_stack_size *= 1024 * 1024; 3530 } else if (*p == 'k' || *p == 'K') { 3531 guest_stack_size *= 1024; 3532 } 3533 } 3534 3535 static void handle_arg_ld_prefix(const char *arg) 3536 { 3537 interp_prefix = strdup(arg); 3538 } 3539 3540 static void handle_arg_pagesize(const char *arg) 3541 { 3542 qemu_host_page_size = atoi(arg); 3543 if (qemu_host_page_size == 0 || 3544 (qemu_host_page_size & (qemu_host_page_size - 1)) != 0) { 3545 fprintf(stderr, "page size must be a power of two\n"); 3546 exit(1); 3547 } 3548 } 3549 3550 static void handle_arg_gdb(const char *arg) 3551 { 3552 gdbstub_port = atoi(arg); 3553 } 3554 3555 static void handle_arg_uname(const char *arg) 3556 { 3557 qemu_uname_release = strdup(arg); 3558 } 3559 3560 static void handle_arg_cpu(const char *arg) 3561 { 3562 cpu_model = strdup(arg); 3563 if (cpu_model == NULL || is_help_option(cpu_model)) { 3564 /* XXX: implement xxx_cpu_list for targets that still miss it */ 3565 #if defined(cpu_list) 3566 cpu_list(stdout, &fprintf); 3567 #endif 3568 exit(1); 3569 } 3570 } 3571 3572 #if defined(CONFIG_USE_GUEST_BASE) 3573 static void handle_arg_guest_base(const char *arg) 3574 { 3575 guest_base = strtol(arg, NULL, 0); 3576 have_guest_base = 1; 3577 } 3578 3579 static void handle_arg_reserved_va(const char *arg) 3580 { 3581 char *p; 3582 int shift = 0; 3583 reserved_va = strtoul(arg, &p, 0); 3584 switch (*p) { 3585 case 'k': 3586 case 'K': 3587 shift = 10; 3588 break; 3589 case 'M': 3590 shift = 20; 3591 break; 3592 case 'G': 3593 shift = 30; 3594 break; 3595 } 3596 if (shift) { 3597 unsigned long unshifted = reserved_va; 3598 p++; 3599 reserved_va <<= shift; 3600 if (((reserved_va >> shift) != unshifted) 3601 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS 3602 || (reserved_va > (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) 3603 #endif 3604 ) { 3605 fprintf(stderr, "Reserved virtual address too big\n"); 3606 exit(1); 3607 } 3608 } 3609 if (*p) { 3610 fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p); 3611 exit(1); 3612 } 3613 } 3614 #endif 3615 3616 static void handle_arg_singlestep(const char *arg) 3617 { 3618 singlestep = 1; 3619 } 3620 3621 static void handle_arg_strace(const char *arg) 3622 { 3623 do_strace = 1; 3624 } 3625 3626 static void handle_arg_version(const char *arg) 3627 { 3628 printf("qemu-" TARGET_NAME " version " QEMU_VERSION QEMU_PKGVERSION 3629 ", Copyright (c) 2003-2008 Fabrice Bellard\n"); 3630 exit(0); 3631 } 3632 3633 struct qemu_argument { 3634 const char *argv; 3635 const char *env; 3636 bool has_arg; 3637 void (*handle_opt)(const char *arg); 3638 const char *example; 3639 const char *help; 3640 }; 3641 3642 static const struct qemu_argument arg_table[] = { 3643 {"h", "", false, handle_arg_help, 3644 "", "print this help"}, 3645 {"g", "QEMU_GDB", true, handle_arg_gdb, 3646 "port", "wait gdb connection to 'port'"}, 3647 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix, 3648 "path", "set the elf interpreter prefix to 'path'"}, 3649 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size, 3650 "size", "set the stack size to 'size' bytes"}, 3651 {"cpu", "QEMU_CPU", true, handle_arg_cpu, 3652 "model", "select CPU (-cpu help for list)"}, 3653 {"E", "QEMU_SET_ENV", true, handle_arg_set_env, 3654 "var=value", "sets targets environment variable (see below)"}, 3655 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env, 3656 "var", "unsets targets environment variable (see below)"}, 3657 {"0", "QEMU_ARGV0", true, handle_arg_argv0, 3658 "argv0", "forces target process argv[0] to be 'argv0'"}, 3659 {"r", "QEMU_UNAME", true, handle_arg_uname, 3660 "uname", "set qemu uname release string to 'uname'"}, 3661 #if defined(CONFIG_USE_GUEST_BASE) 3662 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base, 3663 "address", "set guest_base address to 'address'"}, 3664 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va, 3665 "size", "reserve 'size' bytes for guest virtual address space"}, 3666 #endif 3667 {"d", "QEMU_LOG", true, handle_arg_log, 3668 "item[,...]", "enable logging of specified items " 3669 "(use '-d help' for a list of items)"}, 3670 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename, 3671 "logfile", "write logs to 'logfile' (default stderr)"}, 3672 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize, 3673 "pagesize", "set the host page size to 'pagesize'"}, 3674 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep, 3675 "", "run in singlestep mode"}, 3676 {"strace", "QEMU_STRACE", false, handle_arg_strace, 3677 "", "log system calls"}, 3678 {"version", "QEMU_VERSION", false, handle_arg_version, 3679 "", "display version information and exit"}, 3680 {NULL, NULL, false, NULL, NULL, NULL} 3681 }; 3682 3683 static void usage(void) 3684 { 3685 const struct qemu_argument *arginfo; 3686 int maxarglen; 3687 int maxenvlen; 3688 3689 printf("usage: qemu-" TARGET_NAME " [options] program [arguments...]\n" 3690 "Linux CPU emulator (compiled for " TARGET_NAME " emulation)\n" 3691 "\n" 3692 "Options and associated environment variables:\n" 3693 "\n"); 3694 3695 /* Calculate column widths. We must always have at least enough space 3696 * for the column header. 3697 */ 3698 maxarglen = strlen("Argument"); 3699 maxenvlen = strlen("Env-variable"); 3700 3701 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) { 3702 int arglen = strlen(arginfo->argv); 3703 if (arginfo->has_arg) { 3704 arglen += strlen(arginfo->example) + 1; 3705 } 3706 if (strlen(arginfo->env) > maxenvlen) { 3707 maxenvlen = strlen(arginfo->env); 3708 } 3709 if (arglen > maxarglen) { 3710 maxarglen = arglen; 3711 } 3712 } 3713 3714 printf("%-*s %-*s Description\n", maxarglen+1, "Argument", 3715 maxenvlen, "Env-variable"); 3716 3717 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) { 3718 if (arginfo->has_arg) { 3719 printf("-%s %-*s %-*s %s\n", arginfo->argv, 3720 (int)(maxarglen - strlen(arginfo->argv) - 1), 3721 arginfo->example, maxenvlen, arginfo->env, arginfo->help); 3722 } else { 3723 printf("-%-*s %-*s %s\n", maxarglen, arginfo->argv, 3724 maxenvlen, arginfo->env, 3725 arginfo->help); 3726 } 3727 } 3728 3729 printf("\n" 3730 "Defaults:\n" 3731 "QEMU_LD_PREFIX = %s\n" 3732 "QEMU_STACK_SIZE = %ld byte\n", 3733 interp_prefix, 3734 guest_stack_size); 3735 3736 printf("\n" 3737 "You can use -E and -U options or the QEMU_SET_ENV and\n" 3738 "QEMU_UNSET_ENV environment variables to set and unset\n" 3739 "environment variables for the target process.\n" 3740 "It is possible to provide several variables by separating them\n" 3741 "by commas in getsubopt(3) style. Additionally it is possible to\n" 3742 "provide the -E and -U options multiple times.\n" 3743 "The following lines are equivalent:\n" 3744 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n" 3745 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n" 3746 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n" 3747 "Note that if you provide several changes to a single variable\n" 3748 "the last change will stay in effect.\n"); 3749 3750 exit(1); 3751 } 3752 3753 static int parse_args(int argc, char **argv) 3754 { 3755 const char *r; 3756 int optind; 3757 const struct qemu_argument *arginfo; 3758 3759 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) { 3760 if (arginfo->env == NULL) { 3761 continue; 3762 } 3763 3764 r = getenv(arginfo->env); 3765 if (r != NULL) { 3766 arginfo->handle_opt(r); 3767 } 3768 } 3769 3770 optind = 1; 3771 for (;;) { 3772 if (optind >= argc) { 3773 break; 3774 } 3775 r = argv[optind]; 3776 if (r[0] != '-') { 3777 break; 3778 } 3779 optind++; 3780 r++; 3781 if (!strcmp(r, "-")) { 3782 break; 3783 } 3784 3785 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) { 3786 if (!strcmp(r, arginfo->argv)) { 3787 if (arginfo->has_arg) { 3788 if (optind >= argc) { 3789 usage(); 3790 } 3791 arginfo->handle_opt(argv[optind]); 3792 optind++; 3793 } else { 3794 arginfo->handle_opt(NULL); 3795 } 3796 break; 3797 } 3798 } 3799 3800 /* no option matched the current argv */ 3801 if (arginfo->handle_opt == NULL) { 3802 usage(); 3803 } 3804 } 3805 3806 if (optind >= argc) { 3807 usage(); 3808 } 3809 3810 filename = argv[optind]; 3811 exec_path = argv[optind]; 3812 3813 return optind; 3814 } 3815 3816 int main(int argc, char **argv, char **envp) 3817 { 3818 struct target_pt_regs regs1, *regs = ®s1; 3819 struct image_info info1, *info = &info1; 3820 struct linux_binprm bprm; 3821 TaskState *ts; 3822 CPUArchState *env; 3823 CPUState *cpu; 3824 int optind; 3825 char **target_environ, **wrk; 3826 char **target_argv; 3827 int target_argc; 3828 int i; 3829 int ret; 3830 int execfd; 3831 3832 module_call_init(MODULE_INIT_QOM); 3833 3834 if ((envlist = envlist_create()) == NULL) { 3835 (void) fprintf(stderr, "Unable to allocate envlist\n"); 3836 exit(1); 3837 } 3838 3839 /* add current environment into the list */ 3840 for (wrk = environ; *wrk != NULL; wrk++) { 3841 (void) envlist_setenv(envlist, *wrk); 3842 } 3843 3844 /* Read the stack limit from the kernel. If it's "unlimited", 3845 then we can do little else besides use the default. */ 3846 { 3847 struct rlimit lim; 3848 if (getrlimit(RLIMIT_STACK, &lim) == 0 3849 && lim.rlim_cur != RLIM_INFINITY 3850 && lim.rlim_cur == (target_long)lim.rlim_cur) { 3851 guest_stack_size = lim.rlim_cur; 3852 } 3853 } 3854 3855 cpu_model = NULL; 3856 #if defined(cpudef_setup) 3857 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */ 3858 #endif 3859 3860 optind = parse_args(argc, argv); 3861 3862 /* Zero out regs */ 3863 memset(regs, 0, sizeof(struct target_pt_regs)); 3864 3865 /* Zero out image_info */ 3866 memset(info, 0, sizeof(struct image_info)); 3867 3868 memset(&bprm, 0, sizeof (bprm)); 3869 3870 /* Scan interp_prefix dir for replacement files. */ 3871 init_paths(interp_prefix); 3872 3873 init_qemu_uname_release(); 3874 3875 if (cpu_model == NULL) { 3876 #if defined(TARGET_I386) 3877 #ifdef TARGET_X86_64 3878 cpu_model = "qemu64"; 3879 #else 3880 cpu_model = "qemu32"; 3881 #endif 3882 #elif defined(TARGET_ARM) 3883 cpu_model = "any"; 3884 #elif defined(TARGET_UNICORE32) 3885 cpu_model = "any"; 3886 #elif defined(TARGET_M68K) 3887 cpu_model = "any"; 3888 #elif defined(TARGET_SPARC) 3889 #ifdef TARGET_SPARC64 3890 cpu_model = "TI UltraSparc II"; 3891 #else 3892 cpu_model = "Fujitsu MB86904"; 3893 #endif 3894 #elif defined(TARGET_MIPS) 3895 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) 3896 cpu_model = "20Kc"; 3897 #else 3898 cpu_model = "24Kf"; 3899 #endif 3900 #elif defined TARGET_OPENRISC 3901 cpu_model = "or1200"; 3902 #elif defined(TARGET_PPC) 3903 # ifdef TARGET_PPC64 3904 cpu_model = "POWER7"; 3905 # else 3906 cpu_model = "750"; 3907 # endif 3908 #else 3909 cpu_model = "any"; 3910 #endif 3911 } 3912 tcg_exec_init(0); 3913 cpu_exec_init_all(); 3914 /* NOTE: we need to init the CPU at this stage to get 3915 qemu_host_page_size */ 3916 env = cpu_init(cpu_model); 3917 if (!env) { 3918 fprintf(stderr, "Unable to find CPU definition\n"); 3919 exit(1); 3920 } 3921 cpu = ENV_GET_CPU(env); 3922 cpu_reset(cpu); 3923 3924 thread_cpu = cpu; 3925 3926 if (getenv("QEMU_STRACE")) { 3927 do_strace = 1; 3928 } 3929 3930 target_environ = envlist_to_environ(envlist, NULL); 3931 envlist_free(envlist); 3932 3933 #if defined(CONFIG_USE_GUEST_BASE) 3934 /* 3935 * Now that page sizes are configured in cpu_init() we can do 3936 * proper page alignment for guest_base. 3937 */ 3938 guest_base = HOST_PAGE_ALIGN(guest_base); 3939 3940 if (reserved_va || have_guest_base) { 3941 guest_base = init_guest_space(guest_base, reserved_va, 0, 3942 have_guest_base); 3943 if (guest_base == (unsigned long)-1) { 3944 fprintf(stderr, "Unable to reserve 0x%lx bytes of virtual address " 3945 "space for use as guest address space (check your virtual " 3946 "memory ulimit setting or reserve less using -R option)\n", 3947 reserved_va); 3948 exit(1); 3949 } 3950 3951 if (reserved_va) { 3952 mmap_next_start = reserved_va; 3953 } 3954 } 3955 #endif /* CONFIG_USE_GUEST_BASE */ 3956 3957 /* 3958 * Read in mmap_min_addr kernel parameter. This value is used 3959 * When loading the ELF image to determine whether guest_base 3960 * is needed. It is also used in mmap_find_vma. 3961 */ 3962 { 3963 FILE *fp; 3964 3965 if ((fp = fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL) { 3966 unsigned long tmp; 3967 if (fscanf(fp, "%lu", &tmp) == 1) { 3968 mmap_min_addr = tmp; 3969 qemu_log("host mmap_min_addr=0x%lx\n", mmap_min_addr); 3970 } 3971 fclose(fp); 3972 } 3973 } 3974 3975 /* 3976 * Prepare copy of argv vector for target. 3977 */ 3978 target_argc = argc - optind; 3979 target_argv = calloc(target_argc + 1, sizeof (char *)); 3980 if (target_argv == NULL) { 3981 (void) fprintf(stderr, "Unable to allocate memory for target_argv\n"); 3982 exit(1); 3983 } 3984 3985 /* 3986 * If argv0 is specified (using '-0' switch) we replace 3987 * argv[0] pointer with the given one. 3988 */ 3989 i = 0; 3990 if (argv0 != NULL) { 3991 target_argv[i++] = strdup(argv0); 3992 } 3993 for (; i < target_argc; i++) { 3994 target_argv[i] = strdup(argv[optind + i]); 3995 } 3996 target_argv[target_argc] = NULL; 3997 3998 ts = g_malloc0 (sizeof(TaskState)); 3999 init_task_state(ts); 4000 /* build Task State */ 4001 ts->info = info; 4002 ts->bprm = &bprm; 4003 cpu->opaque = ts; 4004 task_settid(ts); 4005 4006 execfd = qemu_getauxval(AT_EXECFD); 4007 if (execfd == 0) { 4008 execfd = open(filename, O_RDONLY); 4009 if (execfd < 0) { 4010 printf("Error while loading %s: %s\n", filename, strerror(errno)); 4011 _exit(1); 4012 } 4013 } 4014 4015 ret = loader_exec(execfd, filename, target_argv, target_environ, regs, 4016 info, &bprm); 4017 if (ret != 0) { 4018 printf("Error while loading %s: %s\n", filename, strerror(-ret)); 4019 _exit(1); 4020 } 4021 4022 for (wrk = target_environ; *wrk; wrk++) { 4023 free(*wrk); 4024 } 4025 4026 free(target_environ); 4027 4028 if (qemu_log_enabled()) { 4029 #if defined(CONFIG_USE_GUEST_BASE) 4030 qemu_log("guest_base 0x%lx\n", guest_base); 4031 #endif 4032 log_page_dump(); 4033 4034 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk); 4035 qemu_log("end_code 0x" TARGET_ABI_FMT_lx "\n", info->end_code); 4036 qemu_log("start_code 0x" TARGET_ABI_FMT_lx "\n", 4037 info->start_code); 4038 qemu_log("start_data 0x" TARGET_ABI_FMT_lx "\n", 4039 info->start_data); 4040 qemu_log("end_data 0x" TARGET_ABI_FMT_lx "\n", info->end_data); 4041 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx "\n", 4042 info->start_stack); 4043 qemu_log("brk 0x" TARGET_ABI_FMT_lx "\n", info->brk); 4044 qemu_log("entry 0x" TARGET_ABI_FMT_lx "\n", info->entry); 4045 } 4046 4047 target_set_brk(info->brk); 4048 syscall_init(); 4049 signal_init(); 4050 4051 #if defined(CONFIG_USE_GUEST_BASE) 4052 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay 4053 generating the prologue until now so that the prologue can take 4054 the real value of GUEST_BASE into account. */ 4055 tcg_prologue_init(&tcg_ctx); 4056 #endif 4057 4058 #if defined(TARGET_I386) 4059 env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK; 4060 env->hflags |= HF_PE_MASK | HF_CPL_MASK; 4061 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 4062 env->cr[4] |= CR4_OSFXSR_MASK; 4063 env->hflags |= HF_OSFXSR_MASK; 4064 } 4065 #ifndef TARGET_ABI32 4066 /* enable 64 bit mode if possible */ 4067 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) { 4068 fprintf(stderr, "The selected x86 CPU does not support 64 bit mode\n"); 4069 exit(1); 4070 } 4071 env->cr[4] |= CR4_PAE_MASK; 4072 env->efer |= MSR_EFER_LMA | MSR_EFER_LME; 4073 env->hflags |= HF_LMA_MASK; 4074 #endif 4075 4076 /* flags setup : we activate the IRQs by default as in user mode */ 4077 env->eflags |= IF_MASK; 4078 4079 /* linux register setup */ 4080 #ifndef TARGET_ABI32 4081 env->regs[R_EAX] = regs->rax; 4082 env->regs[R_EBX] = regs->rbx; 4083 env->regs[R_ECX] = regs->rcx; 4084 env->regs[R_EDX] = regs->rdx; 4085 env->regs[R_ESI] = regs->rsi; 4086 env->regs[R_EDI] = regs->rdi; 4087 env->regs[R_EBP] = regs->rbp; 4088 env->regs[R_ESP] = regs->rsp; 4089 env->eip = regs->rip; 4090 #else 4091 env->regs[R_EAX] = regs->eax; 4092 env->regs[R_EBX] = regs->ebx; 4093 env->regs[R_ECX] = regs->ecx; 4094 env->regs[R_EDX] = regs->edx; 4095 env->regs[R_ESI] = regs->esi; 4096 env->regs[R_EDI] = regs->edi; 4097 env->regs[R_EBP] = regs->ebp; 4098 env->regs[R_ESP] = regs->esp; 4099 env->eip = regs->eip; 4100 #endif 4101 4102 /* linux interrupt setup */ 4103 #ifndef TARGET_ABI32 4104 env->idt.limit = 511; 4105 #else 4106 env->idt.limit = 255; 4107 #endif 4108 env->idt.base = target_mmap(0, sizeof(uint64_t) * (env->idt.limit + 1), 4109 PROT_READ|PROT_WRITE, 4110 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 4111 idt_table = g2h(env->idt.base); 4112 set_idt(0, 0); 4113 set_idt(1, 0); 4114 set_idt(2, 0); 4115 set_idt(3, 3); 4116 set_idt(4, 3); 4117 set_idt(5, 0); 4118 set_idt(6, 0); 4119 set_idt(7, 0); 4120 set_idt(8, 0); 4121 set_idt(9, 0); 4122 set_idt(10, 0); 4123 set_idt(11, 0); 4124 set_idt(12, 0); 4125 set_idt(13, 0); 4126 set_idt(14, 0); 4127 set_idt(15, 0); 4128 set_idt(16, 0); 4129 set_idt(17, 0); 4130 set_idt(18, 0); 4131 set_idt(19, 0); 4132 set_idt(0x80, 3); 4133 4134 /* linux segment setup */ 4135 { 4136 uint64_t *gdt_table; 4137 env->gdt.base = target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES, 4138 PROT_READ|PROT_WRITE, 4139 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 4140 env->gdt.limit = sizeof(uint64_t) * TARGET_GDT_ENTRIES - 1; 4141 gdt_table = g2h(env->gdt.base); 4142 #ifdef TARGET_ABI32 4143 write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff, 4144 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | 4145 (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT)); 4146 #else 4147 /* 64 bit code segment */ 4148 write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff, 4149 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | 4150 DESC_L_MASK | 4151 (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT)); 4152 #endif 4153 write_dt(&gdt_table[__USER_DS >> 3], 0, 0xfffff, 4154 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | 4155 (3 << DESC_DPL_SHIFT) | (0x2 << DESC_TYPE_SHIFT)); 4156 } 4157 cpu_x86_load_seg(env, R_CS, __USER_CS); 4158 cpu_x86_load_seg(env, R_SS, __USER_DS); 4159 #ifdef TARGET_ABI32 4160 cpu_x86_load_seg(env, R_DS, __USER_DS); 4161 cpu_x86_load_seg(env, R_ES, __USER_DS); 4162 cpu_x86_load_seg(env, R_FS, __USER_DS); 4163 cpu_x86_load_seg(env, R_GS, __USER_DS); 4164 /* This hack makes Wine work... */ 4165 env->segs[R_FS].selector = 0; 4166 #else 4167 cpu_x86_load_seg(env, R_DS, 0); 4168 cpu_x86_load_seg(env, R_ES, 0); 4169 cpu_x86_load_seg(env, R_FS, 0); 4170 cpu_x86_load_seg(env, R_GS, 0); 4171 #endif 4172 #elif defined(TARGET_AARCH64) 4173 { 4174 int i; 4175 4176 if (!(arm_feature(env, ARM_FEATURE_AARCH64))) { 4177 fprintf(stderr, 4178 "The selected ARM CPU does not support 64 bit mode\n"); 4179 exit(1); 4180 } 4181 4182 for (i = 0; i < 31; i++) { 4183 env->xregs[i] = regs->regs[i]; 4184 } 4185 env->pc = regs->pc; 4186 env->xregs[31] = regs->sp; 4187 } 4188 #elif defined(TARGET_ARM) 4189 { 4190 int i; 4191 cpsr_write(env, regs->uregs[16], 0xffffffff); 4192 for(i = 0; i < 16; i++) { 4193 env->regs[i] = regs->uregs[i]; 4194 } 4195 /* Enable BE8. */ 4196 if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4 4197 && (info->elf_flags & EF_ARM_BE8)) { 4198 env->bswap_code = 1; 4199 } 4200 } 4201 #elif defined(TARGET_UNICORE32) 4202 { 4203 int i; 4204 cpu_asr_write(env, regs->uregs[32], 0xffffffff); 4205 for (i = 0; i < 32; i++) { 4206 env->regs[i] = regs->uregs[i]; 4207 } 4208 } 4209 #elif defined(TARGET_SPARC) 4210 { 4211 int i; 4212 env->pc = regs->pc; 4213 env->npc = regs->npc; 4214 env->y = regs->y; 4215 for(i = 0; i < 8; i++) 4216 env->gregs[i] = regs->u_regs[i]; 4217 for(i = 0; i < 8; i++) 4218 env->regwptr[i] = regs->u_regs[i + 8]; 4219 } 4220 #elif defined(TARGET_PPC) 4221 { 4222 int i; 4223 4224 #if defined(TARGET_PPC64) 4225 #if defined(TARGET_ABI32) 4226 env->msr &= ~((target_ulong)1 << MSR_SF); 4227 #else 4228 env->msr |= (target_ulong)1 << MSR_SF; 4229 #endif 4230 #endif 4231 env->nip = regs->nip; 4232 for(i = 0; i < 32; i++) { 4233 env->gpr[i] = regs->gpr[i]; 4234 } 4235 } 4236 #elif defined(TARGET_M68K) 4237 { 4238 env->pc = regs->pc; 4239 env->dregs[0] = regs->d0; 4240 env->dregs[1] = regs->d1; 4241 env->dregs[2] = regs->d2; 4242 env->dregs[3] = regs->d3; 4243 env->dregs[4] = regs->d4; 4244 env->dregs[5] = regs->d5; 4245 env->dregs[6] = regs->d6; 4246 env->dregs[7] = regs->d7; 4247 env->aregs[0] = regs->a0; 4248 env->aregs[1] = regs->a1; 4249 env->aregs[2] = regs->a2; 4250 env->aregs[3] = regs->a3; 4251 env->aregs[4] = regs->a4; 4252 env->aregs[5] = regs->a5; 4253 env->aregs[6] = regs->a6; 4254 env->aregs[7] = regs->usp; 4255 env->sr = regs->sr; 4256 ts->sim_syscalls = 1; 4257 } 4258 #elif defined(TARGET_MICROBLAZE) 4259 { 4260 env->regs[0] = regs->r0; 4261 env->regs[1] = regs->r1; 4262 env->regs[2] = regs->r2; 4263 env->regs[3] = regs->r3; 4264 env->regs[4] = regs->r4; 4265 env->regs[5] = regs->r5; 4266 env->regs[6] = regs->r6; 4267 env->regs[7] = regs->r7; 4268 env->regs[8] = regs->r8; 4269 env->regs[9] = regs->r9; 4270 env->regs[10] = regs->r10; 4271 env->regs[11] = regs->r11; 4272 env->regs[12] = regs->r12; 4273 env->regs[13] = regs->r13; 4274 env->regs[14] = regs->r14; 4275 env->regs[15] = regs->r15; 4276 env->regs[16] = regs->r16; 4277 env->regs[17] = regs->r17; 4278 env->regs[18] = regs->r18; 4279 env->regs[19] = regs->r19; 4280 env->regs[20] = regs->r20; 4281 env->regs[21] = regs->r21; 4282 env->regs[22] = regs->r22; 4283 env->regs[23] = regs->r23; 4284 env->regs[24] = regs->r24; 4285 env->regs[25] = regs->r25; 4286 env->regs[26] = regs->r26; 4287 env->regs[27] = regs->r27; 4288 env->regs[28] = regs->r28; 4289 env->regs[29] = regs->r29; 4290 env->regs[30] = regs->r30; 4291 env->regs[31] = regs->r31; 4292 env->sregs[SR_PC] = regs->pc; 4293 } 4294 #elif defined(TARGET_MIPS) 4295 { 4296 int i; 4297 4298 for(i = 0; i < 32; i++) { 4299 env->active_tc.gpr[i] = regs->regs[i]; 4300 } 4301 env->active_tc.PC = regs->cp0_epc & ~(target_ulong)1; 4302 if (regs->cp0_epc & 1) { 4303 env->hflags |= MIPS_HFLAG_M16; 4304 } 4305 } 4306 #elif defined(TARGET_OPENRISC) 4307 { 4308 int i; 4309 4310 for (i = 0; i < 32; i++) { 4311 env->gpr[i] = regs->gpr[i]; 4312 } 4313 4314 env->sr = regs->sr; 4315 env->pc = regs->pc; 4316 } 4317 #elif defined(TARGET_SH4) 4318 { 4319 int i; 4320 4321 for(i = 0; i < 16; i++) { 4322 env->gregs[i] = regs->regs[i]; 4323 } 4324 env->pc = regs->pc; 4325 } 4326 #elif defined(TARGET_ALPHA) 4327 { 4328 int i; 4329 4330 for(i = 0; i < 28; i++) { 4331 env->ir[i] = ((abi_ulong *)regs)[i]; 4332 } 4333 env->ir[IR_SP] = regs->usp; 4334 env->pc = regs->pc; 4335 } 4336 #elif defined(TARGET_CRIS) 4337 { 4338 env->regs[0] = regs->r0; 4339 env->regs[1] = regs->r1; 4340 env->regs[2] = regs->r2; 4341 env->regs[3] = regs->r3; 4342 env->regs[4] = regs->r4; 4343 env->regs[5] = regs->r5; 4344 env->regs[6] = regs->r6; 4345 env->regs[7] = regs->r7; 4346 env->regs[8] = regs->r8; 4347 env->regs[9] = regs->r9; 4348 env->regs[10] = regs->r10; 4349 env->regs[11] = regs->r11; 4350 env->regs[12] = regs->r12; 4351 env->regs[13] = regs->r13; 4352 env->regs[14] = info->start_stack; 4353 env->regs[15] = regs->acr; 4354 env->pc = regs->erp; 4355 } 4356 #elif defined(TARGET_S390X) 4357 { 4358 int i; 4359 for (i = 0; i < 16; i++) { 4360 env->regs[i] = regs->gprs[i]; 4361 } 4362 env->psw.mask = regs->psw.mask; 4363 env->psw.addr = regs->psw.addr; 4364 } 4365 #else 4366 #error unsupported target CPU 4367 #endif 4368 4369 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 4370 ts->stack_base = info->start_stack; 4371 ts->heap_base = info->brk; 4372 /* This will be filled in on the first SYS_HEAPINFO call. */ 4373 ts->heap_limit = 0; 4374 #endif 4375 4376 if (gdbstub_port) { 4377 if (gdbserver_start(gdbstub_port) < 0) { 4378 fprintf(stderr, "qemu: could not open gdbserver on port %d\n", 4379 gdbstub_port); 4380 exit(1); 4381 } 4382 gdb_handlesig(cpu, 0); 4383 } 4384 cpu_loop(env); 4385 /* never exits */ 4386 return 0; 4387 } 4388