1 /* 2 * Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Licensed under the GPL 4 */ 5 6 #include <stdlib.h> 7 #include <unistd.h> 8 #include <sched.h> 9 #include <errno.h> 10 #include <string.h> 11 #include <sys/mman.h> 12 #include <sys/ptrace.h> 13 #include <sys/wait.h> 14 #include <asm/unistd.h> 15 #include "as-layout.h" 16 #include "chan_user.h" 17 #include "kern_constants.h" 18 #include "mem.h" 19 #include "os.h" 20 #include "process.h" 21 #include "proc_mm.h" 22 #include "ptrace_user.h" 23 #include "registers.h" 24 #include "skas.h" 25 #include "skas_ptrace.h" 26 #include "user.h" 27 #include "sysdep/stub.h" 28 29 int is_skas_winch(int pid, int fd, void *data) 30 { 31 if (pid != getpgrp()) 32 return 0; 33 34 register_winch_irq(-1, fd, -1, data, 0); 35 return 1; 36 } 37 38 static int ptrace_dump_regs(int pid) 39 { 40 unsigned long regs[MAX_REG_NR]; 41 int i; 42 43 if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0) 44 return -errno; 45 46 printk(UM_KERN_ERR "Stub registers -\n"); 47 for (i = 0; i < ARRAY_SIZE(regs); i++) 48 printk(UM_KERN_ERR "\t%d - %lx\n", i, regs[i]); 49 50 return 0; 51 } 52 53 /* 54 * Signals that are OK to receive in the stub - we'll just continue it. 55 * SIGWINCH will happen when UML is inside a detached screen. 56 */ 57 #define STUB_SIG_MASK ((1 << SIGVTALRM) | (1 << SIGWINCH)) 58 59 /* Signals that the stub will finish with - anything else is an error */ 60 #define STUB_DONE_MASK ((1 << SIGUSR1) | (1 << SIGTRAP)) 61 62 void wait_stub_done(int pid) 63 { 64 int n, status, err; 65 66 while (1) { 67 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); 68 if ((n < 0) || !WIFSTOPPED(status)) 69 goto bad_wait; 70 71 if (((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0) 72 break; 73 74 err = ptrace(PTRACE_CONT, pid, 0, 0); 75 if (err) 76 panic("wait_stub_done : continue failed, errno = %d\n", 77 errno); 78 } 79 80 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0) 81 return; 82 83 bad_wait: 84 err = ptrace_dump_regs(pid); 85 if (err) 86 printk(UM_KERN_ERR "Failed to get registers from stub, " 87 "errno = %d\n", -err); 88 panic("wait_stub_done : failed to wait for SIGUSR1/SIGTRAP, pid = %d, " 89 "n = %d, errno = %d, status = 0x%x\n", pid, n, errno, status); 90 } 91 92 extern unsigned long current_stub_stack(void); 93 94 void get_skas_faultinfo(int pid, struct faultinfo * fi) 95 { 96 int err; 97 98 if (ptrace_faultinfo) { 99 err = ptrace(PTRACE_FAULTINFO, pid, 0, fi); 100 if (err) 101 panic("get_skas_faultinfo - PTRACE_FAULTINFO failed, " 102 "errno = %d\n", errno); 103 104 /* Special handling for i386, which has different structs */ 105 if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo)) 106 memset((char *)fi + sizeof(struct ptrace_faultinfo), 0, 107 sizeof(struct faultinfo) - 108 sizeof(struct ptrace_faultinfo)); 109 } 110 else { 111 err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV); 112 if (err) 113 panic("Failed to continue stub, pid = %d, errno = %d\n", 114 pid, errno); 115 wait_stub_done(pid); 116 117 /* 118 * faultinfo is prepared by the stub-segv-handler at start of 119 * the stub stack page. We just have to copy it. 120 */ 121 memcpy(fi, (void *)current_stub_stack(), sizeof(*fi)); 122 } 123 } 124 125 static void handle_segv(int pid, struct uml_pt_regs * regs) 126 { 127 get_skas_faultinfo(pid, ®s->faultinfo); 128 segv(regs->faultinfo, 0, 1, NULL); 129 } 130 131 /* 132 * To use the same value of using_sysemu as the caller, ask it that value 133 * (in local_using_sysemu 134 */ 135 static void handle_trap(int pid, struct uml_pt_regs *regs, 136 int local_using_sysemu) 137 { 138 int err, status; 139 140 /* Mark this as a syscall */ 141 UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->gp); 142 143 if (!local_using_sysemu) 144 { 145 err = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET, 146 __NR_getpid); 147 if (err < 0) 148 panic("handle_trap - nullifying syscall failed, " 149 "errno = %d\n", errno); 150 151 err = ptrace(PTRACE_SYSCALL, pid, 0, 0); 152 if (err < 0) 153 panic("handle_trap - continuing to end of syscall " 154 "failed, errno = %d\n", errno); 155 156 CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED)); 157 if ((err < 0) || !WIFSTOPPED(status) || 158 (WSTOPSIG(status) != SIGTRAP + 0x80)) { 159 err = ptrace_dump_regs(pid); 160 if (err) 161 printk(UM_KERN_ERR "Failed to get registers " 162 "from process, errno = %d\n", -err); 163 panic("handle_trap - failed to wait at end of syscall, " 164 "errno = %d, status = %d\n", errno, status); 165 } 166 } 167 168 handle_syscall(regs); 169 } 170 171 extern int __syscall_stub_start; 172 173 static int userspace_tramp(void *stack) 174 { 175 void *addr; 176 int err; 177 178 ptrace(PTRACE_TRACEME, 0, 0, 0); 179 180 signal(SIGTERM, SIG_DFL); 181 err = set_interval(); 182 if (err) 183 panic("userspace_tramp - setting timer failed, errno = %d\n", 184 err); 185 186 if (!proc_mm) { 187 /* 188 * This has a pte, but it can't be mapped in with the usual 189 * tlb_flush mechanism because this is part of that mechanism 190 */ 191 int fd; 192 unsigned long long offset; 193 fd = phys_mapping(to_phys(&__syscall_stub_start), &offset); 194 addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE, 195 PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset); 196 if (addr == MAP_FAILED) { 197 printk(UM_KERN_ERR "mapping mmap stub at 0x%lx failed, " 198 "errno = %d\n", STUB_CODE, errno); 199 exit(1); 200 } 201 202 if (stack != NULL) { 203 fd = phys_mapping(to_phys(stack), &offset); 204 addr = mmap((void *) STUB_DATA, 205 UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE, 206 MAP_FIXED | MAP_SHARED, fd, offset); 207 if (addr == MAP_FAILED) { 208 printk(UM_KERN_ERR "mapping segfault stack " 209 "at 0x%lx failed, errno = %d\n", 210 STUB_DATA, errno); 211 exit(1); 212 } 213 } 214 } 215 if (!ptrace_faultinfo && (stack != NULL)) { 216 struct sigaction sa; 217 218 unsigned long v = STUB_CODE + 219 (unsigned long) stub_segv_handler - 220 (unsigned long) &__syscall_stub_start; 221 222 set_sigstack((void *) STUB_DATA, UM_KERN_PAGE_SIZE); 223 sigemptyset(&sa.sa_mask); 224 sigaddset(&sa.sa_mask, SIGIO); 225 sigaddset(&sa.sa_mask, SIGWINCH); 226 sigaddset(&sa.sa_mask, SIGVTALRM); 227 sigaddset(&sa.sa_mask, SIGUSR1); 228 sa.sa_flags = SA_ONSTACK; 229 sa.sa_handler = (void *) v; 230 sa.sa_restorer = NULL; 231 if (sigaction(SIGSEGV, &sa, NULL) < 0) 232 panic("userspace_tramp - setting SIGSEGV handler " 233 "failed - errno = %d\n", errno); 234 } 235 236 kill(os_getpid(), SIGSTOP); 237 return 0; 238 } 239 240 /* Each element set once, and only accessed by a single processor anyway */ 241 #undef NR_CPUS 242 #define NR_CPUS 1 243 int userspace_pid[NR_CPUS]; 244 245 int start_userspace(unsigned long stub_stack) 246 { 247 void *stack; 248 unsigned long sp; 249 int pid, status, n, flags; 250 251 stack = mmap(NULL, UM_KERN_PAGE_SIZE, 252 PROT_READ | PROT_WRITE | PROT_EXEC, 253 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 254 if (stack == MAP_FAILED) 255 panic("start_userspace : mmap failed, errno = %d", errno); 256 sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *); 257 258 flags = CLONE_FILES | SIGCHLD; 259 if (proc_mm) 260 flags |= CLONE_VM; 261 262 pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack); 263 if (pid < 0) 264 panic("start_userspace : clone failed, errno = %d", errno); 265 266 do { 267 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); 268 if (n < 0) 269 panic("start_userspace : wait failed, errno = %d", 270 errno); 271 } while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM)); 272 273 if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) 274 panic("start_userspace : expected SIGSTOP, got status = %d", 275 status); 276 277 if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL, 278 (void *) PTRACE_O_TRACESYSGOOD) < 0) 279 panic("start_userspace : PTRACE_OLDSETOPTIONS failed, " 280 "errno = %d\n", errno); 281 282 if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) 283 panic("start_userspace : munmap failed, errno = %d\n", errno); 284 285 return pid; 286 } 287 288 void userspace(struct uml_pt_regs *regs) 289 { 290 struct itimerval timer; 291 unsigned long long nsecs, now; 292 int err, status, op, pid = userspace_pid[0]; 293 /* To prevent races if using_sysemu changes under us.*/ 294 int local_using_sysemu; 295 296 if (getitimer(ITIMER_VIRTUAL, &timer)) 297 printk("Failed to get itimer, errno = %d\n", errno); 298 nsecs = timer.it_value.tv_sec * UM_NSEC_PER_SEC + 299 timer.it_value.tv_usec * UM_NSEC_PER_USEC; 300 nsecs += os_nsecs(); 301 302 while (1) { 303 restore_registers(pid, regs); 304 305 /* Now we set local_using_sysemu to be used for one loop */ 306 local_using_sysemu = get_using_sysemu(); 307 308 op = SELECT_PTRACE_OPERATION(local_using_sysemu, 309 singlestepping(NULL)); 310 311 err = ptrace(op, pid, 0, 0); 312 if (err) 313 panic("userspace - could not resume userspace process, " 314 "pid=%d, ptrace operation = %d, errno = %d\n", 315 pid, op, errno); 316 317 CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED)); 318 if (err < 0) 319 panic("userspace - waitpid failed, errno = %d\n", 320 errno); 321 322 regs->is_user = 1; 323 save_registers(pid, regs); 324 UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */ 325 326 if (WIFSTOPPED(status)) { 327 int sig = WSTOPSIG(status); 328 switch(sig) { 329 case SIGSEGV: 330 if (PTRACE_FULL_FAULTINFO || 331 !ptrace_faultinfo) { 332 get_skas_faultinfo(pid, 333 ®s->faultinfo); 334 (*sig_info[SIGSEGV])(SIGSEGV, regs); 335 } 336 else handle_segv(pid, regs); 337 break; 338 case SIGTRAP + 0x80: 339 handle_trap(pid, regs, local_using_sysemu); 340 break; 341 case SIGTRAP: 342 relay_signal(SIGTRAP, regs); 343 break; 344 case SIGVTALRM: 345 now = os_nsecs(); 346 if(now < nsecs) 347 break; 348 block_signals(); 349 (*sig_info[sig])(sig, regs); 350 unblock_signals(); 351 nsecs = timer.it_value.tv_sec * 352 UM_NSEC_PER_SEC + 353 timer.it_value.tv_usec * 354 UM_NSEC_PER_USEC; 355 nsecs += os_nsecs(); 356 break; 357 case SIGIO: 358 case SIGILL: 359 case SIGBUS: 360 case SIGFPE: 361 case SIGWINCH: 362 block_signals(); 363 (*sig_info[sig])(sig, regs); 364 unblock_signals(); 365 break; 366 default: 367 printk(UM_KERN_ERR "userspace - child stopped " 368 "with signal %d\n", sig); 369 } 370 pid = userspace_pid[0]; 371 interrupt_end(); 372 373 /* Avoid -ERESTARTSYS handling in host */ 374 if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET) 375 PT_SYSCALL_NR(regs->gp) = -1; 376 } 377 } 378 } 379 380 static unsigned long thread_regs[MAX_REG_NR]; 381 382 static int __init init_thread_regs(void) 383 { 384 get_safe_registers(thread_regs); 385 /* Set parent's instruction pointer to start of clone-stub */ 386 thread_regs[REGS_IP_INDEX] = STUB_CODE + 387 (unsigned long) stub_clone_handler - 388 (unsigned long) &__syscall_stub_start; 389 thread_regs[REGS_SP_INDEX] = STUB_DATA + UM_KERN_PAGE_SIZE - 390 sizeof(void *); 391 #ifdef __SIGNAL_FRAMESIZE 392 thread_regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE; 393 #endif 394 return 0; 395 } 396 397 __initcall(init_thread_regs); 398 399 int copy_context_skas0(unsigned long new_stack, int pid) 400 { 401 struct timeval tv = { .tv_sec = 0, .tv_usec = UM_USEC_PER_SEC / UM_HZ }; 402 int err; 403 unsigned long current_stack = current_stub_stack(); 404 struct stub_data *data = (struct stub_data *) current_stack; 405 struct stub_data *child_data = (struct stub_data *) new_stack; 406 unsigned long long new_offset; 407 int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset); 408 409 /* 410 * prepare offset and fd of child's stack as argument for parent's 411 * and child's mmap2 calls 412 */ 413 *data = ((struct stub_data) { .offset = MMAP_OFFSET(new_offset), 414 .fd = new_fd, 415 .timer = ((struct itimerval) 416 { .it_value = tv, 417 .it_interval = tv }) }); 418 419 err = ptrace_setregs(pid, thread_regs); 420 if (err < 0) 421 panic("copy_context_skas0 : PTRACE_SETREGS failed, " 422 "pid = %d, errno = %d\n", pid, -err); 423 424 /* set a well known return code for detection of child write failure */ 425 child_data->err = 12345678; 426 427 /* 428 * Wait, until parent has finished its work: read child's pid from 429 * parent's stack, and check, if bad result. 430 */ 431 err = ptrace(PTRACE_CONT, pid, 0, 0); 432 if (err) 433 panic("Failed to continue new process, pid = %d, " 434 "errno = %d\n", pid, errno); 435 wait_stub_done(pid); 436 437 pid = data->err; 438 if (pid < 0) 439 panic("copy_context_skas0 - stub-parent reports error %d\n", 440 -pid); 441 442 /* 443 * Wait, until child has finished too: read child's result from 444 * child's stack and check it. 445 */ 446 wait_stub_done(pid); 447 if (child_data->err != STUB_DATA) 448 panic("copy_context_skas0 - stub-child reports error %ld\n", 449 child_data->err); 450 451 if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL, 452 (void *)PTRACE_O_TRACESYSGOOD) < 0) 453 panic("copy_context_skas0 : PTRACE_OLDSETOPTIONS failed, " 454 "errno = %d\n", errno); 455 456 return pid; 457 } 458 459 /* 460 * This is used only, if stub pages are needed, while proc_mm is 461 * available. Opening /proc/mm creates a new mm_context, which lacks 462 * the stub-pages. Thus, we map them using /proc/mm-fd 463 */ 464 void map_stub_pages(int fd, unsigned long code, 465 unsigned long data, unsigned long stack) 466 { 467 struct proc_mm_op mmop; 468 int n; 469 unsigned long long code_offset; 470 int code_fd = phys_mapping(to_phys((void *) &__syscall_stub_start), 471 &code_offset); 472 473 mmop = ((struct proc_mm_op) { .op = MM_MMAP, 474 .u = 475 { .mmap = 476 { .addr = code, 477 .len = UM_KERN_PAGE_SIZE, 478 .prot = PROT_EXEC, 479 .flags = MAP_FIXED | MAP_PRIVATE, 480 .fd = code_fd, 481 .offset = code_offset 482 } } }); 483 CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop))); 484 if (n != sizeof(mmop)) { 485 n = errno; 486 printk(UM_KERN_ERR "mmap args - addr = 0x%lx, fd = %d, " 487 "offset = %llx\n", code, code_fd, 488 (unsigned long long) code_offset); 489 panic("map_stub_pages : /proc/mm map for code failed, " 490 "err = %d\n", n); 491 } 492 493 if (stack) { 494 unsigned long long map_offset; 495 int map_fd = phys_mapping(to_phys((void *)stack), &map_offset); 496 mmop = ((struct proc_mm_op) 497 { .op = MM_MMAP, 498 .u = 499 { .mmap = 500 { .addr = data, 501 .len = UM_KERN_PAGE_SIZE, 502 .prot = PROT_READ | PROT_WRITE, 503 .flags = MAP_FIXED | MAP_SHARED, 504 .fd = map_fd, 505 .offset = map_offset 506 } } }); 507 CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop))); 508 if (n != sizeof(mmop)) 509 panic("map_stub_pages : /proc/mm map for data failed, " 510 "err = %d\n", errno); 511 } 512 } 513 514 void new_thread(void *stack, jmp_buf *buf, void (*handler)(void)) 515 { 516 (*buf)[0].JB_IP = (unsigned long) handler; 517 (*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE - 518 sizeof(void *); 519 } 520 521 #define INIT_JMP_NEW_THREAD 0 522 #define INIT_JMP_CALLBACK 1 523 #define INIT_JMP_HALT 2 524 #define INIT_JMP_REBOOT 3 525 526 void switch_threads(jmp_buf *me, jmp_buf *you) 527 { 528 if (UML_SETJMP(me) == 0) 529 UML_LONGJMP(you, 1); 530 } 531 532 static jmp_buf initial_jmpbuf; 533 534 /* XXX Make these percpu */ 535 static void (*cb_proc)(void *arg); 536 static void *cb_arg; 537 static jmp_buf *cb_back; 538 539 int start_idle_thread(void *stack, jmp_buf *switch_buf) 540 { 541 int n; 542 543 set_handler(SIGWINCH, (__sighandler_t) sig_handler, 544 SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGVTALRM, -1); 545 546 /* 547 * Can't use UML_SETJMP or UML_LONGJMP here because they save 548 * and restore signals, with the possible side-effect of 549 * trying to handle any signals which came when they were 550 * blocked, which can't be done on this stack. 551 * Signals must be blocked when jumping back here and restored 552 * after returning to the jumper. 553 */ 554 n = setjmp(initial_jmpbuf); 555 switch(n) { 556 case INIT_JMP_NEW_THREAD: 557 (*switch_buf)[0].JB_IP = (unsigned long) new_thread_handler; 558 (*switch_buf)[0].JB_SP = (unsigned long) stack + 559 UM_THREAD_SIZE - sizeof(void *); 560 break; 561 case INIT_JMP_CALLBACK: 562 (*cb_proc)(cb_arg); 563 longjmp(*cb_back, 1); 564 break; 565 case INIT_JMP_HALT: 566 kmalloc_ok = 0; 567 return 0; 568 case INIT_JMP_REBOOT: 569 kmalloc_ok = 0; 570 return 1; 571 default: 572 panic("Bad sigsetjmp return in start_idle_thread - %d\n", n); 573 } 574 longjmp(*switch_buf, 1); 575 } 576 577 void initial_thread_cb_skas(void (*proc)(void *), void *arg) 578 { 579 jmp_buf here; 580 581 cb_proc = proc; 582 cb_arg = arg; 583 cb_back = &here; 584 585 block_signals(); 586 if (UML_SETJMP(&here) == 0) 587 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK); 588 unblock_signals(); 589 590 cb_proc = NULL; 591 cb_arg = NULL; 592 cb_back = NULL; 593 } 594 595 void halt_skas(void) 596 { 597 block_signals(); 598 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT); 599 } 600 601 void reboot_skas(void) 602 { 603 block_signals(); 604 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT); 605 } 606 607 void __switch_mm(struct mm_id *mm_idp) 608 { 609 int err; 610 611 /* FIXME: need cpu pid in __switch_mm */ 612 if (proc_mm) { 613 err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0, 614 mm_idp->u.mm_fd); 615 if (err) 616 panic("__switch_mm - PTRACE_SWITCH_MM failed, " 617 "errno = %d\n", errno); 618 } 619 else userspace_pid[0] = mm_idp->u.pid; 620 } 621