1 /* 2 * linux/kernel/ptrace.c 3 * 4 * (C) Copyright 1999 Linus Torvalds 5 * 6 * Common interfaces for "ptrace()" which we do not want 7 * to continually duplicate across every architecture. 8 */ 9 10 #include <linux/capability.h> 11 #include <linux/export.h> 12 #include <linux/sched.h> 13 #include <linux/errno.h> 14 #include <linux/mm.h> 15 #include <linux/highmem.h> 16 #include <linux/pagemap.h> 17 #include <linux/ptrace.h> 18 #include <linux/security.h> 19 #include <linux/signal.h> 20 #include <linux/uio.h> 21 #include <linux/audit.h> 22 #include <linux/pid_namespace.h> 23 #include <linux/syscalls.h> 24 #include <linux/uaccess.h> 25 #include <linux/regset.h> 26 #include <linux/hw_breakpoint.h> 27 #include <linux/cn_proc.h> 28 #include <linux/compat.h> 29 30 31 /* 32 * ptrace a task: make the debugger its new parent and 33 * move it to the ptrace list. 34 * 35 * Must be called with the tasklist lock write-held. 36 */ 37 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) 38 { 39 BUG_ON(!list_empty(&child->ptrace_entry)); 40 list_add(&child->ptrace_entry, &new_parent->ptraced); 41 child->parent = new_parent; 42 } 43 44 /** 45 * __ptrace_unlink - unlink ptracee and restore its execution state 46 * @child: ptracee to be unlinked 47 * 48 * Remove @child from the ptrace list, move it back to the original parent, 49 * and restore the execution state so that it conforms to the group stop 50 * state. 51 * 52 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer 53 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between 54 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. 55 * If the ptracer is exiting, the ptracee can be in any state. 56 * 57 * After detach, the ptracee should be in a state which conforms to the 58 * group stop. If the group is stopped or in the process of stopping, the 59 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken 60 * up from TASK_TRACED. 61 * 62 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, 63 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar 64 * to but in the opposite direction of what happens while attaching to a 65 * stopped task. However, in this direction, the intermediate RUNNING 66 * state is not hidden even from the current ptracer and if it immediately 67 * re-attaches and performs a WNOHANG wait(2), it may fail. 68 * 69 * CONTEXT: 70 * write_lock_irq(tasklist_lock) 71 */ 72 void __ptrace_unlink(struct task_struct *child) 73 { 74 BUG_ON(!child->ptrace); 75 76 child->parent = child->real_parent; 77 list_del_init(&child->ptrace_entry); 78 79 spin_lock(&child->sighand->siglock); 80 child->ptrace = 0; 81 /* 82 * Clear all pending traps and TRAPPING. TRAPPING should be 83 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly. 84 */ 85 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK); 86 task_clear_jobctl_trapping(child); 87 88 /* 89 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and 90 * @child isn't dead. 91 */ 92 if (!(child->flags & PF_EXITING) && 93 (child->signal->flags & SIGNAL_STOP_STOPPED || 94 child->signal->group_stop_count)) { 95 child->jobctl |= JOBCTL_STOP_PENDING; 96 97 /* 98 * This is only possible if this thread was cloned by the 99 * traced task running in the stopped group, set the signal 100 * for the future reports. 101 * FIXME: we should change ptrace_init_task() to handle this 102 * case. 103 */ 104 if (!(child->jobctl & JOBCTL_STOP_SIGMASK)) 105 child->jobctl |= SIGSTOP; 106 } 107 108 /* 109 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick 110 * @child in the butt. Note that @resume should be used iff @child 111 * is in TASK_TRACED; otherwise, we might unduly disrupt 112 * TASK_KILLABLE sleeps. 113 */ 114 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) 115 ptrace_signal_wake_up(child, true); 116 117 spin_unlock(&child->sighand->siglock); 118 } 119 120 /* Ensure that nothing can wake it up, even SIGKILL */ 121 static bool ptrace_freeze_traced(struct task_struct *task) 122 { 123 bool ret = false; 124 125 /* Lockless, nobody but us can set this flag */ 126 if (task->jobctl & JOBCTL_LISTENING) 127 return ret; 128 129 spin_lock_irq(&task->sighand->siglock); 130 if (task_is_traced(task) && !__fatal_signal_pending(task)) { 131 task->state = __TASK_TRACED; 132 ret = true; 133 } 134 spin_unlock_irq(&task->sighand->siglock); 135 136 return ret; 137 } 138 139 static void ptrace_unfreeze_traced(struct task_struct *task) 140 { 141 if (task->state != __TASK_TRACED) 142 return; 143 144 WARN_ON(!task->ptrace || task->parent != current); 145 146 spin_lock_irq(&task->sighand->siglock); 147 if (__fatal_signal_pending(task)) 148 wake_up_state(task, __TASK_TRACED); 149 else 150 task->state = TASK_TRACED; 151 spin_unlock_irq(&task->sighand->siglock); 152 } 153 154 /** 155 * ptrace_check_attach - check whether ptracee is ready for ptrace operation 156 * @child: ptracee to check for 157 * @ignore_state: don't check whether @child is currently %TASK_TRACED 158 * 159 * Check whether @child is being ptraced by %current and ready for further 160 * ptrace operations. If @ignore_state is %false, @child also should be in 161 * %TASK_TRACED state and on return the child is guaranteed to be traced 162 * and not executing. If @ignore_state is %true, @child can be in any 163 * state. 164 * 165 * CONTEXT: 166 * Grabs and releases tasklist_lock and @child->sighand->siglock. 167 * 168 * RETURNS: 169 * 0 on success, -ESRCH if %child is not ready. 170 */ 171 static int ptrace_check_attach(struct task_struct *child, bool ignore_state) 172 { 173 int ret = -ESRCH; 174 175 /* 176 * We take the read lock around doing both checks to close a 177 * possible race where someone else was tracing our child and 178 * detached between these two checks. After this locked check, 179 * we are sure that this is our traced child and that can only 180 * be changed by us so it's not changing right after this. 181 */ 182 read_lock(&tasklist_lock); 183 if (child->ptrace && child->parent == current) { 184 WARN_ON(child->state == __TASK_TRACED); 185 /* 186 * child->sighand can't be NULL, release_task() 187 * does ptrace_unlink() before __exit_signal(). 188 */ 189 if (ignore_state || ptrace_freeze_traced(child)) 190 ret = 0; 191 } 192 read_unlock(&tasklist_lock); 193 194 if (!ret && !ignore_state) { 195 if (!wait_task_inactive(child, __TASK_TRACED)) { 196 /* 197 * This can only happen if may_ptrace_stop() fails and 198 * ptrace_stop() changes ->state back to TASK_RUNNING, 199 * so we should not worry about leaking __TASK_TRACED. 200 */ 201 WARN_ON(child->state == __TASK_TRACED); 202 ret = -ESRCH; 203 } 204 } 205 206 return ret; 207 } 208 209 static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) 210 { 211 if (mode & PTRACE_MODE_NOAUDIT) 212 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE); 213 else 214 return has_ns_capability(current, ns, CAP_SYS_PTRACE); 215 } 216 217 /* Returns 0 on success, -errno on denial. */ 218 static int __ptrace_may_access(struct task_struct *task, unsigned int mode) 219 { 220 const struct cred *cred = current_cred(), *tcred; 221 int dumpable = 0; 222 kuid_t caller_uid; 223 kgid_t caller_gid; 224 225 if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) { 226 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n"); 227 return -EPERM; 228 } 229 230 /* May we inspect the given task? 231 * This check is used both for attaching with ptrace 232 * and for allowing access to sensitive information in /proc. 233 * 234 * ptrace_attach denies several cases that /proc allows 235 * because setting up the necessary parent/child relationship 236 * or halting the specified task is impossible. 237 */ 238 239 /* Don't let security modules deny introspection */ 240 if (same_thread_group(task, current)) 241 return 0; 242 rcu_read_lock(); 243 if (mode & PTRACE_MODE_FSCREDS) { 244 caller_uid = cred->fsuid; 245 caller_gid = cred->fsgid; 246 } else { 247 /* 248 * Using the euid would make more sense here, but something 249 * in userland might rely on the old behavior, and this 250 * shouldn't be a security problem since 251 * PTRACE_MODE_REALCREDS implies that the caller explicitly 252 * used a syscall that requests access to another process 253 * (and not a filesystem syscall to procfs). 254 */ 255 caller_uid = cred->uid; 256 caller_gid = cred->gid; 257 } 258 tcred = __task_cred(task); 259 if (uid_eq(caller_uid, tcred->euid) && 260 uid_eq(caller_uid, tcred->suid) && 261 uid_eq(caller_uid, tcred->uid) && 262 gid_eq(caller_gid, tcred->egid) && 263 gid_eq(caller_gid, tcred->sgid) && 264 gid_eq(caller_gid, tcred->gid)) 265 goto ok; 266 if (ptrace_has_cap(tcred->user_ns, mode)) 267 goto ok; 268 rcu_read_unlock(); 269 return -EPERM; 270 ok: 271 rcu_read_unlock(); 272 smp_rmb(); 273 if (task->mm) 274 dumpable = get_dumpable(task->mm); 275 rcu_read_lock(); 276 if (dumpable != SUID_DUMP_USER && 277 !ptrace_has_cap(__task_cred(task)->user_ns, mode)) { 278 rcu_read_unlock(); 279 return -EPERM; 280 } 281 rcu_read_unlock(); 282 283 return security_ptrace_access_check(task, mode); 284 } 285 286 bool ptrace_may_access(struct task_struct *task, unsigned int mode) 287 { 288 int err; 289 task_lock(task); 290 err = __ptrace_may_access(task, mode); 291 task_unlock(task); 292 return !err; 293 } 294 295 static int ptrace_attach(struct task_struct *task, long request, 296 unsigned long addr, 297 unsigned long flags) 298 { 299 bool seize = (request == PTRACE_SEIZE); 300 int retval; 301 302 retval = -EIO; 303 if (seize) { 304 if (addr != 0) 305 goto out; 306 if (flags & ~(unsigned long)PTRACE_O_MASK) 307 goto out; 308 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT); 309 } else { 310 flags = PT_PTRACED; 311 } 312 313 audit_ptrace(task); 314 315 retval = -EPERM; 316 if (unlikely(task->flags & PF_KTHREAD)) 317 goto out; 318 if (same_thread_group(task, current)) 319 goto out; 320 321 /* 322 * Protect exec's credential calculations against our interference; 323 * SUID, SGID and LSM creds get determined differently 324 * under ptrace. 325 */ 326 retval = -ERESTARTNOINTR; 327 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) 328 goto out; 329 330 task_lock(task); 331 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS); 332 task_unlock(task); 333 if (retval) 334 goto unlock_creds; 335 336 write_lock_irq(&tasklist_lock); 337 retval = -EPERM; 338 if (unlikely(task->exit_state)) 339 goto unlock_tasklist; 340 if (task->ptrace) 341 goto unlock_tasklist; 342 343 if (seize) 344 flags |= PT_SEIZED; 345 rcu_read_lock(); 346 if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE)) 347 flags |= PT_PTRACE_CAP; 348 rcu_read_unlock(); 349 task->ptrace = flags; 350 351 __ptrace_link(task, current); 352 353 /* SEIZE doesn't trap tracee on attach */ 354 if (!seize) 355 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); 356 357 spin_lock(&task->sighand->siglock); 358 359 /* 360 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and 361 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING 362 * will be cleared if the child completes the transition or any 363 * event which clears the group stop states happens. We'll wait 364 * for the transition to complete before returning from this 365 * function. 366 * 367 * This hides STOPPED -> RUNNING -> TRACED transition from the 368 * attaching thread but a different thread in the same group can 369 * still observe the transient RUNNING state. IOW, if another 370 * thread's WNOHANG wait(2) on the stopped tracee races against 371 * ATTACH, the wait(2) may fail due to the transient RUNNING. 372 * 373 * The following task_is_stopped() test is safe as both transitions 374 * in and out of STOPPED are protected by siglock. 375 */ 376 if (task_is_stopped(task) && 377 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) 378 signal_wake_up_state(task, __TASK_STOPPED); 379 380 spin_unlock(&task->sighand->siglock); 381 382 retval = 0; 383 unlock_tasklist: 384 write_unlock_irq(&tasklist_lock); 385 unlock_creds: 386 mutex_unlock(&task->signal->cred_guard_mutex); 387 out: 388 if (!retval) { 389 /* 390 * We do not bother to change retval or clear JOBCTL_TRAPPING 391 * if wait_on_bit() was interrupted by SIGKILL. The tracer will 392 * not return to user-mode, it will exit and clear this bit in 393 * __ptrace_unlink() if it wasn't already cleared by the tracee; 394 * and until then nobody can ptrace this task. 395 */ 396 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE); 397 proc_ptrace_connector(task, PTRACE_ATTACH); 398 } 399 400 return retval; 401 } 402 403 /** 404 * ptrace_traceme -- helper for PTRACE_TRACEME 405 * 406 * Performs checks and sets PT_PTRACED. 407 * Should be used by all ptrace implementations for PTRACE_TRACEME. 408 */ 409 static int ptrace_traceme(void) 410 { 411 int ret = -EPERM; 412 413 write_lock_irq(&tasklist_lock); 414 /* Are we already being traced? */ 415 if (!current->ptrace) { 416 ret = security_ptrace_traceme(current->parent); 417 /* 418 * Check PF_EXITING to ensure ->real_parent has not passed 419 * exit_ptrace(). Otherwise we don't report the error but 420 * pretend ->real_parent untraces us right after return. 421 */ 422 if (!ret && !(current->real_parent->flags & PF_EXITING)) { 423 current->ptrace = PT_PTRACED; 424 __ptrace_link(current, current->real_parent); 425 } 426 } 427 write_unlock_irq(&tasklist_lock); 428 429 return ret; 430 } 431 432 /* 433 * Called with irqs disabled, returns true if childs should reap themselves. 434 */ 435 static int ignoring_children(struct sighand_struct *sigh) 436 { 437 int ret; 438 spin_lock(&sigh->siglock); 439 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || 440 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); 441 spin_unlock(&sigh->siglock); 442 return ret; 443 } 444 445 /* 446 * Called with tasklist_lock held for writing. 447 * Unlink a traced task, and clean it up if it was a traced zombie. 448 * Return true if it needs to be reaped with release_task(). 449 * (We can't call release_task() here because we already hold tasklist_lock.) 450 * 451 * If it's a zombie, our attachedness prevented normal parent notification 452 * or self-reaping. Do notification now if it would have happened earlier. 453 * If it should reap itself, return true. 454 * 455 * If it's our own child, there is no notification to do. But if our normal 456 * children self-reap, then this child was prevented by ptrace and we must 457 * reap it now, in that case we must also wake up sub-threads sleeping in 458 * do_wait(). 459 */ 460 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) 461 { 462 bool dead; 463 464 __ptrace_unlink(p); 465 466 if (p->exit_state != EXIT_ZOMBIE) 467 return false; 468 469 dead = !thread_group_leader(p); 470 471 if (!dead && thread_group_empty(p)) { 472 if (!same_thread_group(p->real_parent, tracer)) 473 dead = do_notify_parent(p, p->exit_signal); 474 else if (ignoring_children(tracer->sighand)) { 475 __wake_up_parent(p, tracer); 476 dead = true; 477 } 478 } 479 /* Mark it as in the process of being reaped. */ 480 if (dead) 481 p->exit_state = EXIT_DEAD; 482 return dead; 483 } 484 485 static int ptrace_detach(struct task_struct *child, unsigned int data) 486 { 487 if (!valid_signal(data)) 488 return -EIO; 489 490 /* Architecture-specific hardware disable .. */ 491 ptrace_disable(child); 492 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 493 494 write_lock_irq(&tasklist_lock); 495 /* 496 * We rely on ptrace_freeze_traced(). It can't be killed and 497 * untraced by another thread, it can't be a zombie. 498 */ 499 WARN_ON(!child->ptrace || child->exit_state); 500 /* 501 * tasklist_lock avoids the race with wait_task_stopped(), see 502 * the comment in ptrace_resume(). 503 */ 504 child->exit_code = data; 505 __ptrace_detach(current, child); 506 write_unlock_irq(&tasklist_lock); 507 508 proc_ptrace_connector(child, PTRACE_DETACH); 509 510 return 0; 511 } 512 513 /* 514 * Detach all tasks we were using ptrace on. Called with tasklist held 515 * for writing. 516 */ 517 void exit_ptrace(struct task_struct *tracer, struct list_head *dead) 518 { 519 struct task_struct *p, *n; 520 521 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { 522 if (unlikely(p->ptrace & PT_EXITKILL)) 523 send_sig_info(SIGKILL, SEND_SIG_FORCED, p); 524 525 if (__ptrace_detach(tracer, p)) 526 list_add(&p->ptrace_entry, dead); 527 } 528 } 529 530 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 531 { 532 int copied = 0; 533 534 while (len > 0) { 535 char buf[128]; 536 int this_len, retval; 537 538 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 539 retval = access_process_vm(tsk, src, buf, this_len, 0); 540 if (!retval) { 541 if (copied) 542 break; 543 return -EIO; 544 } 545 if (copy_to_user(dst, buf, retval)) 546 return -EFAULT; 547 copied += retval; 548 src += retval; 549 dst += retval; 550 len -= retval; 551 } 552 return copied; 553 } 554 555 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) 556 { 557 int copied = 0; 558 559 while (len > 0) { 560 char buf[128]; 561 int this_len, retval; 562 563 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 564 if (copy_from_user(buf, src, this_len)) 565 return -EFAULT; 566 retval = access_process_vm(tsk, dst, buf, this_len, 1); 567 if (!retval) { 568 if (copied) 569 break; 570 return -EIO; 571 } 572 copied += retval; 573 src += retval; 574 dst += retval; 575 len -= retval; 576 } 577 return copied; 578 } 579 580 static int ptrace_setoptions(struct task_struct *child, unsigned long data) 581 { 582 unsigned flags; 583 584 if (data & ~(unsigned long)PTRACE_O_MASK) 585 return -EINVAL; 586 587 if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) { 588 if (!config_enabled(CONFIG_CHECKPOINT_RESTORE) || 589 !config_enabled(CONFIG_SECCOMP)) 590 return -EINVAL; 591 592 if (!capable(CAP_SYS_ADMIN)) 593 return -EPERM; 594 595 if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED || 596 current->ptrace & PT_SUSPEND_SECCOMP) 597 return -EPERM; 598 } 599 600 /* Avoid intermediate state when all opts are cleared */ 601 flags = child->ptrace; 602 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT); 603 flags |= (data << PT_OPT_FLAG_SHIFT); 604 child->ptrace = flags; 605 606 return 0; 607 } 608 609 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) 610 { 611 unsigned long flags; 612 int error = -ESRCH; 613 614 if (lock_task_sighand(child, &flags)) { 615 error = -EINVAL; 616 if (likely(child->last_siginfo != NULL)) { 617 *info = *child->last_siginfo; 618 error = 0; 619 } 620 unlock_task_sighand(child, &flags); 621 } 622 return error; 623 } 624 625 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) 626 { 627 unsigned long flags; 628 int error = -ESRCH; 629 630 if (lock_task_sighand(child, &flags)) { 631 error = -EINVAL; 632 if (likely(child->last_siginfo != NULL)) { 633 *child->last_siginfo = *info; 634 error = 0; 635 } 636 unlock_task_sighand(child, &flags); 637 } 638 return error; 639 } 640 641 static int ptrace_peek_siginfo(struct task_struct *child, 642 unsigned long addr, 643 unsigned long data) 644 { 645 struct ptrace_peeksiginfo_args arg; 646 struct sigpending *pending; 647 struct sigqueue *q; 648 int ret, i; 649 650 ret = copy_from_user(&arg, (void __user *) addr, 651 sizeof(struct ptrace_peeksiginfo_args)); 652 if (ret) 653 return -EFAULT; 654 655 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED) 656 return -EINVAL; /* unknown flags */ 657 658 if (arg.nr < 0) 659 return -EINVAL; 660 661 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED) 662 pending = &child->signal->shared_pending; 663 else 664 pending = &child->pending; 665 666 for (i = 0; i < arg.nr; ) { 667 siginfo_t info; 668 s32 off = arg.off + i; 669 670 spin_lock_irq(&child->sighand->siglock); 671 list_for_each_entry(q, &pending->list, list) { 672 if (!off--) { 673 copy_siginfo(&info, &q->info); 674 break; 675 } 676 } 677 spin_unlock_irq(&child->sighand->siglock); 678 679 if (off >= 0) /* beyond the end of the list */ 680 break; 681 682 #ifdef CONFIG_COMPAT 683 if (unlikely(in_compat_syscall())) { 684 compat_siginfo_t __user *uinfo = compat_ptr(data); 685 686 if (copy_siginfo_to_user32(uinfo, &info) || 687 __put_user(info.si_code, &uinfo->si_code)) { 688 ret = -EFAULT; 689 break; 690 } 691 692 } else 693 #endif 694 { 695 siginfo_t __user *uinfo = (siginfo_t __user *) data; 696 697 if (copy_siginfo_to_user(uinfo, &info) || 698 __put_user(info.si_code, &uinfo->si_code)) { 699 ret = -EFAULT; 700 break; 701 } 702 } 703 704 data += sizeof(siginfo_t); 705 i++; 706 707 if (signal_pending(current)) 708 break; 709 710 cond_resched(); 711 } 712 713 if (i > 0) 714 return i; 715 716 return ret; 717 } 718 719 #ifdef PTRACE_SINGLESTEP 720 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) 721 #else 722 #define is_singlestep(request) 0 723 #endif 724 725 #ifdef PTRACE_SINGLEBLOCK 726 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) 727 #else 728 #define is_singleblock(request) 0 729 #endif 730 731 #ifdef PTRACE_SYSEMU 732 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) 733 #else 734 #define is_sysemu_singlestep(request) 0 735 #endif 736 737 static int ptrace_resume(struct task_struct *child, long request, 738 unsigned long data) 739 { 740 bool need_siglock; 741 742 if (!valid_signal(data)) 743 return -EIO; 744 745 if (request == PTRACE_SYSCALL) 746 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 747 else 748 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 749 750 #ifdef TIF_SYSCALL_EMU 751 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) 752 set_tsk_thread_flag(child, TIF_SYSCALL_EMU); 753 else 754 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 755 #endif 756 757 if (is_singleblock(request)) { 758 if (unlikely(!arch_has_block_step())) 759 return -EIO; 760 user_enable_block_step(child); 761 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { 762 if (unlikely(!arch_has_single_step())) 763 return -EIO; 764 user_enable_single_step(child); 765 } else { 766 user_disable_single_step(child); 767 } 768 769 /* 770 * Change ->exit_code and ->state under siglock to avoid the race 771 * with wait_task_stopped() in between; a non-zero ->exit_code will 772 * wrongly look like another report from tracee. 773 * 774 * Note that we need siglock even if ->exit_code == data and/or this 775 * status was not reported yet, the new status must not be cleared by 776 * wait_task_stopped() after resume. 777 * 778 * If data == 0 we do not care if wait_task_stopped() reports the old 779 * status and clears the code too; this can't race with the tracee, it 780 * takes siglock after resume. 781 */ 782 need_siglock = data && !thread_group_empty(current); 783 if (need_siglock) 784 spin_lock_irq(&child->sighand->siglock); 785 child->exit_code = data; 786 wake_up_state(child, __TASK_TRACED); 787 if (need_siglock) 788 spin_unlock_irq(&child->sighand->siglock); 789 790 return 0; 791 } 792 793 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 794 795 static const struct user_regset * 796 find_regset(const struct user_regset_view *view, unsigned int type) 797 { 798 const struct user_regset *regset; 799 int n; 800 801 for (n = 0; n < view->n; ++n) { 802 regset = view->regsets + n; 803 if (regset->core_note_type == type) 804 return regset; 805 } 806 807 return NULL; 808 } 809 810 static int ptrace_regset(struct task_struct *task, int req, unsigned int type, 811 struct iovec *kiov) 812 { 813 const struct user_regset_view *view = task_user_regset_view(task); 814 const struct user_regset *regset = find_regset(view, type); 815 int regset_no; 816 817 if (!regset || (kiov->iov_len % regset->size) != 0) 818 return -EINVAL; 819 820 regset_no = regset - view->regsets; 821 kiov->iov_len = min(kiov->iov_len, 822 (__kernel_size_t) (regset->n * regset->size)); 823 824 if (req == PTRACE_GETREGSET) 825 return copy_regset_to_user(task, view, regset_no, 0, 826 kiov->iov_len, kiov->iov_base); 827 else 828 return copy_regset_from_user(task, view, regset_no, 0, 829 kiov->iov_len, kiov->iov_base); 830 } 831 832 /* 833 * This is declared in linux/regset.h and defined in machine-dependent 834 * code. We put the export here, near the primary machine-neutral use, 835 * to ensure no machine forgets it. 836 */ 837 EXPORT_SYMBOL_GPL(task_user_regset_view); 838 #endif 839 840 int ptrace_request(struct task_struct *child, long request, 841 unsigned long addr, unsigned long data) 842 { 843 bool seized = child->ptrace & PT_SEIZED; 844 int ret = -EIO; 845 siginfo_t siginfo, *si; 846 void __user *datavp = (void __user *) data; 847 unsigned long __user *datalp = datavp; 848 unsigned long flags; 849 850 switch (request) { 851 case PTRACE_PEEKTEXT: 852 case PTRACE_PEEKDATA: 853 return generic_ptrace_peekdata(child, addr, data); 854 case PTRACE_POKETEXT: 855 case PTRACE_POKEDATA: 856 return generic_ptrace_pokedata(child, addr, data); 857 858 #ifdef PTRACE_OLDSETOPTIONS 859 case PTRACE_OLDSETOPTIONS: 860 #endif 861 case PTRACE_SETOPTIONS: 862 ret = ptrace_setoptions(child, data); 863 break; 864 case PTRACE_GETEVENTMSG: 865 ret = put_user(child->ptrace_message, datalp); 866 break; 867 868 case PTRACE_PEEKSIGINFO: 869 ret = ptrace_peek_siginfo(child, addr, data); 870 break; 871 872 case PTRACE_GETSIGINFO: 873 ret = ptrace_getsiginfo(child, &siginfo); 874 if (!ret) 875 ret = copy_siginfo_to_user(datavp, &siginfo); 876 break; 877 878 case PTRACE_SETSIGINFO: 879 if (copy_from_user(&siginfo, datavp, sizeof siginfo)) 880 ret = -EFAULT; 881 else 882 ret = ptrace_setsiginfo(child, &siginfo); 883 break; 884 885 case PTRACE_GETSIGMASK: 886 if (addr != sizeof(sigset_t)) { 887 ret = -EINVAL; 888 break; 889 } 890 891 if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t))) 892 ret = -EFAULT; 893 else 894 ret = 0; 895 896 break; 897 898 case PTRACE_SETSIGMASK: { 899 sigset_t new_set; 900 901 if (addr != sizeof(sigset_t)) { 902 ret = -EINVAL; 903 break; 904 } 905 906 if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) { 907 ret = -EFAULT; 908 break; 909 } 910 911 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 912 913 /* 914 * Every thread does recalc_sigpending() after resume, so 915 * retarget_shared_pending() and recalc_sigpending() are not 916 * called here. 917 */ 918 spin_lock_irq(&child->sighand->siglock); 919 child->blocked = new_set; 920 spin_unlock_irq(&child->sighand->siglock); 921 922 ret = 0; 923 break; 924 } 925 926 case PTRACE_INTERRUPT: 927 /* 928 * Stop tracee without any side-effect on signal or job 929 * control. At least one trap is guaranteed to happen 930 * after this request. If @child is already trapped, the 931 * current trap is not disturbed and another trap will 932 * happen after the current trap is ended with PTRACE_CONT. 933 * 934 * The actual trap might not be PTRACE_EVENT_STOP trap but 935 * the pending condition is cleared regardless. 936 */ 937 if (unlikely(!seized || !lock_task_sighand(child, &flags))) 938 break; 939 940 /* 941 * INTERRUPT doesn't disturb existing trap sans one 942 * exception. If ptracer issued LISTEN for the current 943 * STOP, this INTERRUPT should clear LISTEN and re-trap 944 * tracee into STOP. 945 */ 946 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) 947 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); 948 949 unlock_task_sighand(child, &flags); 950 ret = 0; 951 break; 952 953 case PTRACE_LISTEN: 954 /* 955 * Listen for events. Tracee must be in STOP. It's not 956 * resumed per-se but is not considered to be in TRACED by 957 * wait(2) or ptrace(2). If an async event (e.g. group 958 * stop state change) happens, tracee will enter STOP trap 959 * again. Alternatively, ptracer can issue INTERRUPT to 960 * finish listening and re-trap tracee into STOP. 961 */ 962 if (unlikely(!seized || !lock_task_sighand(child, &flags))) 963 break; 964 965 si = child->last_siginfo; 966 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { 967 child->jobctl |= JOBCTL_LISTENING; 968 /* 969 * If NOTIFY is set, it means event happened between 970 * start of this trap and now. Trigger re-trap. 971 */ 972 if (child->jobctl & JOBCTL_TRAP_NOTIFY) 973 ptrace_signal_wake_up(child, true); 974 ret = 0; 975 } 976 unlock_task_sighand(child, &flags); 977 break; 978 979 case PTRACE_DETACH: /* detach a process that was attached. */ 980 ret = ptrace_detach(child, data); 981 break; 982 983 #ifdef CONFIG_BINFMT_ELF_FDPIC 984 case PTRACE_GETFDPIC: { 985 struct mm_struct *mm = get_task_mm(child); 986 unsigned long tmp = 0; 987 988 ret = -ESRCH; 989 if (!mm) 990 break; 991 992 switch (addr) { 993 case PTRACE_GETFDPIC_EXEC: 994 tmp = mm->context.exec_fdpic_loadmap; 995 break; 996 case PTRACE_GETFDPIC_INTERP: 997 tmp = mm->context.interp_fdpic_loadmap; 998 break; 999 default: 1000 break; 1001 } 1002 mmput(mm); 1003 1004 ret = put_user(tmp, datalp); 1005 break; 1006 } 1007 #endif 1008 1009 #ifdef PTRACE_SINGLESTEP 1010 case PTRACE_SINGLESTEP: 1011 #endif 1012 #ifdef PTRACE_SINGLEBLOCK 1013 case PTRACE_SINGLEBLOCK: 1014 #endif 1015 #ifdef PTRACE_SYSEMU 1016 case PTRACE_SYSEMU: 1017 case PTRACE_SYSEMU_SINGLESTEP: 1018 #endif 1019 case PTRACE_SYSCALL: 1020 case PTRACE_CONT: 1021 return ptrace_resume(child, request, data); 1022 1023 case PTRACE_KILL: 1024 if (child->exit_state) /* already dead */ 1025 return 0; 1026 return ptrace_resume(child, request, SIGKILL); 1027 1028 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 1029 case PTRACE_GETREGSET: 1030 case PTRACE_SETREGSET: { 1031 struct iovec kiov; 1032 struct iovec __user *uiov = datavp; 1033 1034 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) 1035 return -EFAULT; 1036 1037 if (__get_user(kiov.iov_base, &uiov->iov_base) || 1038 __get_user(kiov.iov_len, &uiov->iov_len)) 1039 return -EFAULT; 1040 1041 ret = ptrace_regset(child, request, addr, &kiov); 1042 if (!ret) 1043 ret = __put_user(kiov.iov_len, &uiov->iov_len); 1044 break; 1045 } 1046 #endif 1047 1048 case PTRACE_SECCOMP_GET_FILTER: 1049 ret = seccomp_get_filter(child, addr, datavp); 1050 break; 1051 1052 default: 1053 break; 1054 } 1055 1056 return ret; 1057 } 1058 1059 static struct task_struct *ptrace_get_task_struct(pid_t pid) 1060 { 1061 struct task_struct *child; 1062 1063 rcu_read_lock(); 1064 child = find_task_by_vpid(pid); 1065 if (child) 1066 get_task_struct(child); 1067 rcu_read_unlock(); 1068 1069 if (!child) 1070 return ERR_PTR(-ESRCH); 1071 return child; 1072 } 1073 1074 #ifndef arch_ptrace_attach 1075 #define arch_ptrace_attach(child) do { } while (0) 1076 #endif 1077 1078 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, 1079 unsigned long, data) 1080 { 1081 struct task_struct *child; 1082 long ret; 1083 1084 if (request == PTRACE_TRACEME) { 1085 ret = ptrace_traceme(); 1086 if (!ret) 1087 arch_ptrace_attach(current); 1088 goto out; 1089 } 1090 1091 child = ptrace_get_task_struct(pid); 1092 if (IS_ERR(child)) { 1093 ret = PTR_ERR(child); 1094 goto out; 1095 } 1096 1097 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { 1098 ret = ptrace_attach(child, request, addr, data); 1099 /* 1100 * Some architectures need to do book-keeping after 1101 * a ptrace attach. 1102 */ 1103 if (!ret) 1104 arch_ptrace_attach(child); 1105 goto out_put_task_struct; 1106 } 1107 1108 ret = ptrace_check_attach(child, request == PTRACE_KILL || 1109 request == PTRACE_INTERRUPT); 1110 if (ret < 0) 1111 goto out_put_task_struct; 1112 1113 ret = arch_ptrace(child, request, addr, data); 1114 if (ret || request != PTRACE_DETACH) 1115 ptrace_unfreeze_traced(child); 1116 1117 out_put_task_struct: 1118 put_task_struct(child); 1119 out: 1120 return ret; 1121 } 1122 1123 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, 1124 unsigned long data) 1125 { 1126 unsigned long tmp; 1127 int copied; 1128 1129 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); 1130 if (copied != sizeof(tmp)) 1131 return -EIO; 1132 return put_user(tmp, (unsigned long __user *)data); 1133 } 1134 1135 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, 1136 unsigned long data) 1137 { 1138 int copied; 1139 1140 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); 1141 return (copied == sizeof(data)) ? 0 : -EIO; 1142 } 1143 1144 #if defined CONFIG_COMPAT 1145 1146 int compat_ptrace_request(struct task_struct *child, compat_long_t request, 1147 compat_ulong_t addr, compat_ulong_t data) 1148 { 1149 compat_ulong_t __user *datap = compat_ptr(data); 1150 compat_ulong_t word; 1151 siginfo_t siginfo; 1152 int ret; 1153 1154 switch (request) { 1155 case PTRACE_PEEKTEXT: 1156 case PTRACE_PEEKDATA: 1157 ret = access_process_vm(child, addr, &word, sizeof(word), 0); 1158 if (ret != sizeof(word)) 1159 ret = -EIO; 1160 else 1161 ret = put_user(word, datap); 1162 break; 1163 1164 case PTRACE_POKETEXT: 1165 case PTRACE_POKEDATA: 1166 ret = access_process_vm(child, addr, &data, sizeof(data), 1); 1167 ret = (ret != sizeof(data) ? -EIO : 0); 1168 break; 1169 1170 case PTRACE_GETEVENTMSG: 1171 ret = put_user((compat_ulong_t) child->ptrace_message, datap); 1172 break; 1173 1174 case PTRACE_GETSIGINFO: 1175 ret = ptrace_getsiginfo(child, &siginfo); 1176 if (!ret) 1177 ret = copy_siginfo_to_user32( 1178 (struct compat_siginfo __user *) datap, 1179 &siginfo); 1180 break; 1181 1182 case PTRACE_SETSIGINFO: 1183 memset(&siginfo, 0, sizeof siginfo); 1184 if (copy_siginfo_from_user32( 1185 &siginfo, (struct compat_siginfo __user *) datap)) 1186 ret = -EFAULT; 1187 else 1188 ret = ptrace_setsiginfo(child, &siginfo); 1189 break; 1190 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 1191 case PTRACE_GETREGSET: 1192 case PTRACE_SETREGSET: 1193 { 1194 struct iovec kiov; 1195 struct compat_iovec __user *uiov = 1196 (struct compat_iovec __user *) datap; 1197 compat_uptr_t ptr; 1198 compat_size_t len; 1199 1200 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) 1201 return -EFAULT; 1202 1203 if (__get_user(ptr, &uiov->iov_base) || 1204 __get_user(len, &uiov->iov_len)) 1205 return -EFAULT; 1206 1207 kiov.iov_base = compat_ptr(ptr); 1208 kiov.iov_len = len; 1209 1210 ret = ptrace_regset(child, request, addr, &kiov); 1211 if (!ret) 1212 ret = __put_user(kiov.iov_len, &uiov->iov_len); 1213 break; 1214 } 1215 #endif 1216 1217 default: 1218 ret = ptrace_request(child, request, addr, data); 1219 } 1220 1221 return ret; 1222 } 1223 1224 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid, 1225 compat_long_t, addr, compat_long_t, data) 1226 { 1227 struct task_struct *child; 1228 long ret; 1229 1230 if (request == PTRACE_TRACEME) { 1231 ret = ptrace_traceme(); 1232 goto out; 1233 } 1234 1235 child = ptrace_get_task_struct(pid); 1236 if (IS_ERR(child)) { 1237 ret = PTR_ERR(child); 1238 goto out; 1239 } 1240 1241 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { 1242 ret = ptrace_attach(child, request, addr, data); 1243 /* 1244 * Some architectures need to do book-keeping after 1245 * a ptrace attach. 1246 */ 1247 if (!ret) 1248 arch_ptrace_attach(child); 1249 goto out_put_task_struct; 1250 } 1251 1252 ret = ptrace_check_attach(child, request == PTRACE_KILL || 1253 request == PTRACE_INTERRUPT); 1254 if (!ret) { 1255 ret = compat_arch_ptrace(child, request, addr, data); 1256 if (ret || request != PTRACE_DETACH) 1257 ptrace_unfreeze_traced(child); 1258 } 1259 1260 out_put_task_struct: 1261 put_task_struct(child); 1262 out: 1263 return ret; 1264 } 1265 #endif /* CONFIG_COMPAT */ 1266