1 /* 2 * Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Licensed under the GPL 4 */ 5 6 #include <stdlib.h> 7 #include <unistd.h> 8 #include <sched.h> 9 #include <errno.h> 10 #include <string.h> 11 #include <sys/mman.h> 12 #include <sys/wait.h> 13 #include <asm/unistd.h> 14 #include <as-layout.h> 15 #include <init.h> 16 #include <kern_util.h> 17 #include <mem.h> 18 #include <os.h> 19 #include <proc_mm.h> 20 #include <ptrace_user.h> 21 #include <registers.h> 22 #include <skas.h> 23 #include <skas_ptrace.h> 24 #include <sysdep/stub.h> 25 26 int is_skas_winch(int pid, int fd, void *data) 27 { 28 return pid == getpgrp(); 29 } 30 31 static int ptrace_dump_regs(int pid) 32 { 33 unsigned long regs[MAX_REG_NR]; 34 int i; 35 36 if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0) 37 return -errno; 38 39 printk(UM_KERN_ERR "Stub registers -\n"); 40 for (i = 0; i < ARRAY_SIZE(regs); i++) 41 printk(UM_KERN_ERR "\t%d - %lx\n", i, regs[i]); 42 43 return 0; 44 } 45 46 /* 47 * Signals that are OK to receive in the stub - we'll just continue it. 48 * SIGWINCH will happen when UML is inside a detached screen. 49 */ 50 #define STUB_SIG_MASK ((1 << SIGVTALRM) | (1 << SIGWINCH)) 51 52 /* Signals that the stub will finish with - anything else is an error */ 53 #define STUB_DONE_MASK (1 << SIGTRAP) 54 55 void wait_stub_done(int pid) 56 { 57 int n, status, err; 58 59 while (1) { 60 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL)); 61 if ((n < 0) || !WIFSTOPPED(status)) 62 goto bad_wait; 63 64 if (((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0) 65 break; 66 67 err = ptrace(PTRACE_CONT, pid, 0, 0); 68 if (err) { 69 printk(UM_KERN_ERR "wait_stub_done : continue failed, " 70 "errno = %d\n", errno); 71 fatal_sigsegv(); 72 } 73 } 74 75 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0) 76 return; 77 78 bad_wait: 79 err = ptrace_dump_regs(pid); 80 if (err) 81 printk(UM_KERN_ERR "Failed to get registers from stub, " 82 "errno = %d\n", -err); 83 printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, " 84 "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno, 85 status); 86 fatal_sigsegv(); 87 } 88 89 extern unsigned long current_stub_stack(void); 90 91 static void get_skas_faultinfo(int pid, struct faultinfo *fi) 92 { 93 int err; 94 95 if (ptrace_faultinfo) { 96 err = ptrace(PTRACE_FAULTINFO, pid, 0, fi); 97 if (err) { 98 printk(UM_KERN_ERR "get_skas_faultinfo - " 99 "PTRACE_FAULTINFO failed, errno = %d\n", errno); 100 fatal_sigsegv(); 101 } 102 103 /* Special handling for i386, which has different structs */ 104 if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo)) 105 memset((char *)fi + sizeof(struct ptrace_faultinfo), 0, 106 sizeof(struct faultinfo) - 107 sizeof(struct ptrace_faultinfo)); 108 } 109 else { 110 unsigned long fpregs[FP_SIZE]; 111 112 err = get_fp_registers(pid, fpregs); 113 if (err < 0) { 114 printk(UM_KERN_ERR "save_fp_registers returned %d\n", 115 err); 116 fatal_sigsegv(); 117 } 118 err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV); 119 if (err) { 120 printk(UM_KERN_ERR "Failed to continue stub, pid = %d, " 121 "errno = %d\n", pid, errno); 122 fatal_sigsegv(); 123 } 124 wait_stub_done(pid); 125 126 /* 127 * faultinfo is prepared by the stub-segv-handler at start of 128 * the stub stack page. We just have to copy it. 129 */ 130 memcpy(fi, (void *)current_stub_stack(), sizeof(*fi)); 131 132 err = put_fp_registers(pid, fpregs); 133 if (err < 0) { 134 printk(UM_KERN_ERR "put_fp_registers returned %d\n", 135 err); 136 fatal_sigsegv(); 137 } 138 } 139 } 140 141 static void handle_segv(int pid, struct uml_pt_regs * regs) 142 { 143 get_skas_faultinfo(pid, ®s->faultinfo); 144 segv(regs->faultinfo, 0, 1, NULL); 145 } 146 147 /* 148 * To use the same value of using_sysemu as the caller, ask it that value 149 * (in local_using_sysemu 150 */ 151 static void handle_trap(int pid, struct uml_pt_regs *regs, 152 int local_using_sysemu) 153 { 154 int err, status; 155 156 if ((UPT_IP(regs) >= STUB_START) && (UPT_IP(regs) < STUB_END)) 157 fatal_sigsegv(); 158 159 /* Mark this as a syscall */ 160 UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->gp); 161 162 if (!local_using_sysemu) 163 { 164 err = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_NR_OFFSET, 165 __NR_getpid); 166 if (err < 0) { 167 printk(UM_KERN_ERR "handle_trap - nullifying syscall " 168 "failed, errno = %d\n", errno); 169 fatal_sigsegv(); 170 } 171 172 err = ptrace(PTRACE_SYSCALL, pid, 0, 0); 173 if (err < 0) { 174 printk(UM_KERN_ERR "handle_trap - continuing to end of " 175 "syscall failed, errno = %d\n", errno); 176 fatal_sigsegv(); 177 } 178 179 CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL)); 180 if ((err < 0) || !WIFSTOPPED(status) || 181 (WSTOPSIG(status) != SIGTRAP + 0x80)) { 182 err = ptrace_dump_regs(pid); 183 if (err) 184 printk(UM_KERN_ERR "Failed to get registers " 185 "from process, errno = %d\n", -err); 186 printk(UM_KERN_ERR "handle_trap - failed to wait at " 187 "end of syscall, errno = %d, status = %d\n", 188 errno, status); 189 fatal_sigsegv(); 190 } 191 } 192 193 handle_syscall(regs); 194 } 195 196 extern int __syscall_stub_start; 197 198 static int userspace_tramp(void *stack) 199 { 200 void *addr; 201 int err; 202 203 ptrace(PTRACE_TRACEME, 0, 0, 0); 204 205 signal(SIGTERM, SIG_DFL); 206 signal(SIGWINCH, SIG_IGN); 207 err = set_interval(); 208 if (err) { 209 printk(UM_KERN_ERR "userspace_tramp - setting timer failed, " 210 "errno = %d\n", err); 211 exit(1); 212 } 213 214 if (!proc_mm) { 215 /* 216 * This has a pte, but it can't be mapped in with the usual 217 * tlb_flush mechanism because this is part of that mechanism 218 */ 219 int fd; 220 unsigned long long offset; 221 fd = phys_mapping(to_phys(&__syscall_stub_start), &offset); 222 addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE, 223 PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset); 224 if (addr == MAP_FAILED) { 225 printk(UM_KERN_ERR "mapping mmap stub at 0x%lx failed, " 226 "errno = %d\n", STUB_CODE, errno); 227 exit(1); 228 } 229 230 if (stack != NULL) { 231 fd = phys_mapping(to_phys(stack), &offset); 232 addr = mmap((void *) STUB_DATA, 233 UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE, 234 MAP_FIXED | MAP_SHARED, fd, offset); 235 if (addr == MAP_FAILED) { 236 printk(UM_KERN_ERR "mapping segfault stack " 237 "at 0x%lx failed, errno = %d\n", 238 STUB_DATA, errno); 239 exit(1); 240 } 241 } 242 } 243 if (!ptrace_faultinfo && (stack != NULL)) { 244 struct sigaction sa; 245 246 unsigned long v = STUB_CODE + 247 (unsigned long) stub_segv_handler - 248 (unsigned long) &__syscall_stub_start; 249 250 set_sigstack((void *) STUB_DATA, UM_KERN_PAGE_SIZE); 251 sigemptyset(&sa.sa_mask); 252 sa.sa_flags = SA_ONSTACK | SA_NODEFER | SA_SIGINFO; 253 sa.sa_sigaction = (void *) v; 254 sa.sa_restorer = NULL; 255 if (sigaction(SIGSEGV, &sa, NULL) < 0) { 256 printk(UM_KERN_ERR "userspace_tramp - setting SIGSEGV " 257 "handler failed - errno = %d\n", errno); 258 exit(1); 259 } 260 } 261 262 kill(os_getpid(), SIGSTOP); 263 return 0; 264 } 265 266 /* Each element set once, and only accessed by a single processor anyway */ 267 #undef NR_CPUS 268 #define NR_CPUS 1 269 int userspace_pid[NR_CPUS]; 270 271 int start_userspace(unsigned long stub_stack) 272 { 273 void *stack; 274 unsigned long sp; 275 int pid, status, n, flags, err; 276 277 stack = mmap(NULL, UM_KERN_PAGE_SIZE, 278 PROT_READ | PROT_WRITE | PROT_EXEC, 279 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 280 if (stack == MAP_FAILED) { 281 err = -errno; 282 printk(UM_KERN_ERR "start_userspace : mmap failed, " 283 "errno = %d\n", errno); 284 return err; 285 } 286 287 sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *); 288 289 flags = CLONE_FILES; 290 if (proc_mm) 291 flags |= CLONE_VM; 292 else 293 flags |= SIGCHLD; 294 295 pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack); 296 if (pid < 0) { 297 err = -errno; 298 printk(UM_KERN_ERR "start_userspace : clone failed, " 299 "errno = %d\n", errno); 300 return err; 301 } 302 303 do { 304 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL)); 305 if (n < 0) { 306 err = -errno; 307 printk(UM_KERN_ERR "start_userspace : wait failed, " 308 "errno = %d\n", errno); 309 goto out_kill; 310 } 311 } while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM)); 312 313 if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) { 314 err = -EINVAL; 315 printk(UM_KERN_ERR "start_userspace : expected SIGSTOP, got " 316 "status = %d\n", status); 317 goto out_kill; 318 } 319 320 if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL, 321 (void *) PTRACE_O_TRACESYSGOOD) < 0) { 322 err = -errno; 323 printk(UM_KERN_ERR "start_userspace : PTRACE_OLDSETOPTIONS " 324 "failed, errno = %d\n", errno); 325 goto out_kill; 326 } 327 328 if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) { 329 err = -errno; 330 printk(UM_KERN_ERR "start_userspace : munmap failed, " 331 "errno = %d\n", errno); 332 goto out_kill; 333 } 334 335 return pid; 336 337 out_kill: 338 os_kill_ptraced_process(pid, 1); 339 return err; 340 } 341 342 void userspace(struct uml_pt_regs *regs) 343 { 344 struct itimerval timer; 345 unsigned long long nsecs, now; 346 int err, status, op, pid = userspace_pid[0]; 347 /* To prevent races if using_sysemu changes under us.*/ 348 int local_using_sysemu; 349 siginfo_t si; 350 351 /* Handle any immediate reschedules or signals */ 352 interrupt_end(); 353 354 if (getitimer(ITIMER_VIRTUAL, &timer)) 355 printk(UM_KERN_ERR "Failed to get itimer, errno = %d\n", errno); 356 nsecs = timer.it_value.tv_sec * UM_NSEC_PER_SEC + 357 timer.it_value.tv_usec * UM_NSEC_PER_USEC; 358 nsecs += os_nsecs(); 359 360 while (1) { 361 /* 362 * This can legitimately fail if the process loads a 363 * bogus value into a segment register. It will 364 * segfault and PTRACE_GETREGS will read that value 365 * out of the process. However, PTRACE_SETREGS will 366 * fail. In this case, there is nothing to do but 367 * just kill the process. 368 */ 369 if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp)) 370 fatal_sigsegv(); 371 372 if (put_fp_registers(pid, regs->fp)) 373 fatal_sigsegv(); 374 375 /* Now we set local_using_sysemu to be used for one loop */ 376 local_using_sysemu = get_using_sysemu(); 377 378 op = SELECT_PTRACE_OPERATION(local_using_sysemu, 379 singlestepping(NULL)); 380 381 if (ptrace(op, pid, 0, 0)) { 382 printk(UM_KERN_ERR "userspace - ptrace continue " 383 "failed, op = %d, errno = %d\n", op, errno); 384 fatal_sigsegv(); 385 } 386 387 CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL)); 388 if (err < 0) { 389 printk(UM_KERN_ERR "userspace - wait failed, " 390 "errno = %d\n", errno); 391 fatal_sigsegv(); 392 } 393 394 regs->is_user = 1; 395 if (ptrace(PTRACE_GETREGS, pid, 0, regs->gp)) { 396 printk(UM_KERN_ERR "userspace - PTRACE_GETREGS failed, " 397 "errno = %d\n", errno); 398 fatal_sigsegv(); 399 } 400 401 if (get_fp_registers(pid, regs->fp)) { 402 printk(UM_KERN_ERR "userspace - get_fp_registers failed, " 403 "errno = %d\n", errno); 404 fatal_sigsegv(); 405 } 406 407 UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */ 408 409 if (WIFSTOPPED(status)) { 410 int sig = WSTOPSIG(status); 411 412 ptrace(PTRACE_GETSIGINFO, pid, 0, (struct siginfo *)&si); 413 414 switch (sig) { 415 case SIGSEGV: 416 if (PTRACE_FULL_FAULTINFO || 417 !ptrace_faultinfo) { 418 get_skas_faultinfo(pid, 419 ®s->faultinfo); 420 (*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si, 421 regs); 422 } 423 else handle_segv(pid, regs); 424 break; 425 case SIGTRAP + 0x80: 426 handle_trap(pid, regs, local_using_sysemu); 427 break; 428 case SIGTRAP: 429 relay_signal(SIGTRAP, (struct siginfo *)&si, regs); 430 break; 431 case SIGVTALRM: 432 now = os_nsecs(); 433 if (now < nsecs) 434 break; 435 block_signals(); 436 (*sig_info[sig])(sig, (struct siginfo *)&si, regs); 437 unblock_signals(); 438 nsecs = timer.it_value.tv_sec * 439 UM_NSEC_PER_SEC + 440 timer.it_value.tv_usec * 441 UM_NSEC_PER_USEC; 442 nsecs += os_nsecs(); 443 break; 444 case SIGIO: 445 case SIGILL: 446 case SIGBUS: 447 case SIGFPE: 448 case SIGWINCH: 449 block_signals(); 450 (*sig_info[sig])(sig, (struct siginfo *)&si, regs); 451 unblock_signals(); 452 break; 453 default: 454 printk(UM_KERN_ERR "userspace - child stopped " 455 "with signal %d\n", sig); 456 fatal_sigsegv(); 457 } 458 pid = userspace_pid[0]; 459 interrupt_end(); 460 461 /* Avoid -ERESTARTSYS handling in host */ 462 if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET) 463 PT_SYSCALL_NR(regs->gp) = -1; 464 } 465 } 466 } 467 468 static unsigned long thread_regs[MAX_REG_NR]; 469 static unsigned long thread_fp_regs[FP_SIZE]; 470 471 static int __init init_thread_regs(void) 472 { 473 get_safe_registers(thread_regs, thread_fp_regs); 474 /* Set parent's instruction pointer to start of clone-stub */ 475 thread_regs[REGS_IP_INDEX] = STUB_CODE + 476 (unsigned long) stub_clone_handler - 477 (unsigned long) &__syscall_stub_start; 478 thread_regs[REGS_SP_INDEX] = STUB_DATA + UM_KERN_PAGE_SIZE - 479 sizeof(void *); 480 #ifdef __SIGNAL_FRAMESIZE 481 thread_regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE; 482 #endif 483 return 0; 484 } 485 486 __initcall(init_thread_regs); 487 488 int copy_context_skas0(unsigned long new_stack, int pid) 489 { 490 struct timeval tv = { .tv_sec = 0, .tv_usec = UM_USEC_PER_SEC / UM_HZ }; 491 int err; 492 unsigned long current_stack = current_stub_stack(); 493 struct stub_data *data = (struct stub_data *) current_stack; 494 struct stub_data *child_data = (struct stub_data *) new_stack; 495 unsigned long long new_offset; 496 int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset); 497 498 /* 499 * prepare offset and fd of child's stack as argument for parent's 500 * and child's mmap2 calls 501 */ 502 *data = ((struct stub_data) { .offset = MMAP_OFFSET(new_offset), 503 .fd = new_fd, 504 .timer = ((struct itimerval) 505 { .it_value = tv, 506 .it_interval = tv }) }); 507 508 err = ptrace_setregs(pid, thread_regs); 509 if (err < 0) { 510 err = -errno; 511 printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_SETREGS " 512 "failed, pid = %d, errno = %d\n", pid, -err); 513 return err; 514 } 515 516 err = put_fp_registers(pid, thread_fp_regs); 517 if (err < 0) { 518 printk(UM_KERN_ERR "copy_context_skas0 : put_fp_registers " 519 "failed, pid = %d, err = %d\n", pid, err); 520 return err; 521 } 522 523 /* set a well known return code for detection of child write failure */ 524 child_data->err = 12345678; 525 526 /* 527 * Wait, until parent has finished its work: read child's pid from 528 * parent's stack, and check, if bad result. 529 */ 530 err = ptrace(PTRACE_CONT, pid, 0, 0); 531 if (err) { 532 err = -errno; 533 printk(UM_KERN_ERR "Failed to continue new process, pid = %d, " 534 "errno = %d\n", pid, errno); 535 return err; 536 } 537 538 wait_stub_done(pid); 539 540 pid = data->err; 541 if (pid < 0) { 542 printk(UM_KERN_ERR "copy_context_skas0 - stub-parent reports " 543 "error %d\n", -pid); 544 return pid; 545 } 546 547 /* 548 * Wait, until child has finished too: read child's result from 549 * child's stack and check it. 550 */ 551 wait_stub_done(pid); 552 if (child_data->err != STUB_DATA) { 553 printk(UM_KERN_ERR "copy_context_skas0 - stub-child reports " 554 "error %ld\n", child_data->err); 555 err = child_data->err; 556 goto out_kill; 557 } 558 559 if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL, 560 (void *)PTRACE_O_TRACESYSGOOD) < 0) { 561 err = -errno; 562 printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_OLDSETOPTIONS " 563 "failed, errno = %d\n", errno); 564 goto out_kill; 565 } 566 567 return pid; 568 569 out_kill: 570 os_kill_ptraced_process(pid, 1); 571 return err; 572 } 573 574 /* 575 * This is used only, if stub pages are needed, while proc_mm is 576 * available. Opening /proc/mm creates a new mm_context, which lacks 577 * the stub-pages. Thus, we map them using /proc/mm-fd 578 */ 579 int map_stub_pages(int fd, unsigned long code, unsigned long data, 580 unsigned long stack) 581 { 582 struct proc_mm_op mmop; 583 int n; 584 unsigned long long code_offset; 585 int code_fd = phys_mapping(to_phys((void *) &__syscall_stub_start), 586 &code_offset); 587 588 mmop = ((struct proc_mm_op) { .op = MM_MMAP, 589 .u = 590 { .mmap = 591 { .addr = code, 592 .len = UM_KERN_PAGE_SIZE, 593 .prot = PROT_EXEC, 594 .flags = MAP_FIXED | MAP_PRIVATE, 595 .fd = code_fd, 596 .offset = code_offset 597 } } }); 598 CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop))); 599 if (n != sizeof(mmop)) { 600 n = errno; 601 printk(UM_KERN_ERR "mmap args - addr = 0x%lx, fd = %d, " 602 "offset = %llx\n", code, code_fd, 603 (unsigned long long) code_offset); 604 printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for code " 605 "failed, err = %d\n", n); 606 return -n; 607 } 608 609 if (stack) { 610 unsigned long long map_offset; 611 int map_fd = phys_mapping(to_phys((void *)stack), &map_offset); 612 mmop = ((struct proc_mm_op) 613 { .op = MM_MMAP, 614 .u = 615 { .mmap = 616 { .addr = data, 617 .len = UM_KERN_PAGE_SIZE, 618 .prot = PROT_READ | PROT_WRITE, 619 .flags = MAP_FIXED | MAP_SHARED, 620 .fd = map_fd, 621 .offset = map_offset 622 } } }); 623 CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop))); 624 if (n != sizeof(mmop)) { 625 n = errno; 626 printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for " 627 "data failed, err = %d\n", n); 628 return -n; 629 } 630 } 631 632 return 0; 633 } 634 635 void new_thread(void *stack, jmp_buf *buf, void (*handler)(void)) 636 { 637 (*buf)[0].JB_IP = (unsigned long) handler; 638 (*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE - 639 sizeof(void *); 640 } 641 642 #define INIT_JMP_NEW_THREAD 0 643 #define INIT_JMP_CALLBACK 1 644 #define INIT_JMP_HALT 2 645 #define INIT_JMP_REBOOT 3 646 647 void switch_threads(jmp_buf *me, jmp_buf *you) 648 { 649 if (UML_SETJMP(me) == 0) 650 UML_LONGJMP(you, 1); 651 } 652 653 static jmp_buf initial_jmpbuf; 654 655 /* XXX Make these percpu */ 656 static void (*cb_proc)(void *arg); 657 static void *cb_arg; 658 static jmp_buf *cb_back; 659 660 int start_idle_thread(void *stack, jmp_buf *switch_buf) 661 { 662 int n; 663 664 set_handler(SIGWINCH); 665 666 /* 667 * Can't use UML_SETJMP or UML_LONGJMP here because they save 668 * and restore signals, with the possible side-effect of 669 * trying to handle any signals which came when they were 670 * blocked, which can't be done on this stack. 671 * Signals must be blocked when jumping back here and restored 672 * after returning to the jumper. 673 */ 674 n = setjmp(initial_jmpbuf); 675 switch (n) { 676 case INIT_JMP_NEW_THREAD: 677 (*switch_buf)[0].JB_IP = (unsigned long) new_thread_handler; 678 (*switch_buf)[0].JB_SP = (unsigned long) stack + 679 UM_THREAD_SIZE - sizeof(void *); 680 break; 681 case INIT_JMP_CALLBACK: 682 (*cb_proc)(cb_arg); 683 longjmp(*cb_back, 1); 684 break; 685 case INIT_JMP_HALT: 686 kmalloc_ok = 0; 687 return 0; 688 case INIT_JMP_REBOOT: 689 kmalloc_ok = 0; 690 return 1; 691 default: 692 printk(UM_KERN_ERR "Bad sigsetjmp return in " 693 "start_idle_thread - %d\n", n); 694 fatal_sigsegv(); 695 } 696 longjmp(*switch_buf, 1); 697 } 698 699 void initial_thread_cb_skas(void (*proc)(void *), void *arg) 700 { 701 jmp_buf here; 702 703 cb_proc = proc; 704 cb_arg = arg; 705 cb_back = &here; 706 707 block_signals(); 708 if (UML_SETJMP(&here) == 0) 709 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK); 710 unblock_signals(); 711 712 cb_proc = NULL; 713 cb_arg = NULL; 714 cb_back = NULL; 715 } 716 717 void halt_skas(void) 718 { 719 block_signals(); 720 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT); 721 } 722 723 void reboot_skas(void) 724 { 725 block_signals(); 726 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT); 727 } 728 729 void __switch_mm(struct mm_id *mm_idp) 730 { 731 int err; 732 733 /* FIXME: need cpu pid in __switch_mm */ 734 if (proc_mm) { 735 err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0, 736 mm_idp->u.mm_fd); 737 if (err) { 738 printk(UM_KERN_ERR "__switch_mm - PTRACE_SWITCH_MM " 739 "failed, errno = %d\n", errno); 740 fatal_sigsegv(); 741 } 742 } 743 else userspace_pid[0] = mm_idp->u.pid; 744 } 745