1 /* 2 * linux/kernel/ptrace.c 3 * 4 * (C) Copyright 1999 Linus Torvalds 5 * 6 * Common interfaces for "ptrace()" which we do not want 7 * to continually duplicate across every architecture. 8 */ 9 10 #include <linux/capability.h> 11 #include <linux/export.h> 12 #include <linux/sched.h> 13 #include <linux/sched/mm.h> 14 #include <linux/sched/coredump.h> 15 #include <linux/sched/task.h> 16 #include <linux/errno.h> 17 #include <linux/mm.h> 18 #include <linux/highmem.h> 19 #include <linux/pagemap.h> 20 #include <linux/ptrace.h> 21 #include <linux/security.h> 22 #include <linux/signal.h> 23 #include <linux/uio.h> 24 #include <linux/audit.h> 25 #include <linux/pid_namespace.h> 26 #include <linux/syscalls.h> 27 #include <linux/uaccess.h> 28 #include <linux/regset.h> 29 #include <linux/hw_breakpoint.h> 30 #include <linux/cn_proc.h> 31 #include <linux/compat.h> 32 33 /* 34 * Access another process' address space via ptrace. 35 * Source/target buffer must be kernel space, 36 * Do not walk the page table directly, use get_user_pages 37 */ 38 int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, 39 void *buf, int len, unsigned int gup_flags) 40 { 41 struct mm_struct *mm; 42 int ret; 43 44 mm = get_task_mm(tsk); 45 if (!mm) 46 return 0; 47 48 if (!tsk->ptrace || 49 (current != tsk->parent) || 50 ((get_dumpable(mm) != SUID_DUMP_USER) && 51 !ptracer_capable(tsk, mm->user_ns))) { 52 mmput(mm); 53 return 0; 54 } 55 56 ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags); 57 mmput(mm); 58 59 return ret; 60 } 61 62 63 /* 64 * ptrace a task: make the debugger its new parent and 65 * move it to the ptrace list. 66 * 67 * Must be called with the tasklist lock write-held. 68 */ 69 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) 70 { 71 BUG_ON(!list_empty(&child->ptrace_entry)); 72 list_add(&child->ptrace_entry, &new_parent->ptraced); 73 child->parent = new_parent; 74 rcu_read_lock(); 75 child->ptracer_cred = get_cred(__task_cred(new_parent)); 76 rcu_read_unlock(); 77 } 78 79 /** 80 * __ptrace_unlink - unlink ptracee and restore its execution state 81 * @child: ptracee to be unlinked 82 * 83 * Remove @child from the ptrace list, move it back to the original parent, 84 * and restore the execution state so that it conforms to the group stop 85 * state. 86 * 87 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer 88 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between 89 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. 90 * If the ptracer is exiting, the ptracee can be in any state. 91 * 92 * After detach, the ptracee should be in a state which conforms to the 93 * group stop. If the group is stopped or in the process of stopping, the 94 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken 95 * up from TASK_TRACED. 96 * 97 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, 98 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar 99 * to but in the opposite direction of what happens while attaching to a 100 * stopped task. However, in this direction, the intermediate RUNNING 101 * state is not hidden even from the current ptracer and if it immediately 102 * re-attaches and performs a WNOHANG wait(2), it may fail. 103 * 104 * CONTEXT: 105 * write_lock_irq(tasklist_lock) 106 */ 107 void __ptrace_unlink(struct task_struct *child) 108 { 109 const struct cred *old_cred; 110 BUG_ON(!child->ptrace); 111 112 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 113 114 child->parent = child->real_parent; 115 list_del_init(&child->ptrace_entry); 116 old_cred = child->ptracer_cred; 117 child->ptracer_cred = NULL; 118 put_cred(old_cred); 119 120 spin_lock(&child->sighand->siglock); 121 child->ptrace = 0; 122 /* 123 * Clear all pending traps and TRAPPING. TRAPPING should be 124 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly. 125 */ 126 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK); 127 task_clear_jobctl_trapping(child); 128 129 /* 130 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and 131 * @child isn't dead. 132 */ 133 if (!(child->flags & PF_EXITING) && 134 (child->signal->flags & SIGNAL_STOP_STOPPED || 135 child->signal->group_stop_count)) { 136 child->jobctl |= JOBCTL_STOP_PENDING; 137 138 /* 139 * This is only possible if this thread was cloned by the 140 * traced task running in the stopped group, set the signal 141 * for the future reports. 142 * FIXME: we should change ptrace_init_task() to handle this 143 * case. 144 */ 145 if (!(child->jobctl & JOBCTL_STOP_SIGMASK)) 146 child->jobctl |= SIGSTOP; 147 } 148 149 /* 150 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick 151 * @child in the butt. Note that @resume should be used iff @child 152 * is in TASK_TRACED; otherwise, we might unduly disrupt 153 * TASK_KILLABLE sleeps. 154 */ 155 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) 156 ptrace_signal_wake_up(child, true); 157 158 spin_unlock(&child->sighand->siglock); 159 } 160 161 /* Ensure that nothing can wake it up, even SIGKILL */ 162 static bool ptrace_freeze_traced(struct task_struct *task) 163 { 164 bool ret = false; 165 166 /* Lockless, nobody but us can set this flag */ 167 if (task->jobctl & JOBCTL_LISTENING) 168 return ret; 169 170 spin_lock_irq(&task->sighand->siglock); 171 if (task_is_traced(task) && !__fatal_signal_pending(task)) { 172 task->state = __TASK_TRACED; 173 ret = true; 174 } 175 spin_unlock_irq(&task->sighand->siglock); 176 177 return ret; 178 } 179 180 static void ptrace_unfreeze_traced(struct task_struct *task) 181 { 182 if (task->state != __TASK_TRACED) 183 return; 184 185 WARN_ON(!task->ptrace || task->parent != current); 186 187 spin_lock_irq(&task->sighand->siglock); 188 if (__fatal_signal_pending(task)) 189 wake_up_state(task, __TASK_TRACED); 190 else 191 task->state = TASK_TRACED; 192 spin_unlock_irq(&task->sighand->siglock); 193 } 194 195 /** 196 * ptrace_check_attach - check whether ptracee is ready for ptrace operation 197 * @child: ptracee to check for 198 * @ignore_state: don't check whether @child is currently %TASK_TRACED 199 * 200 * Check whether @child is being ptraced by %current and ready for further 201 * ptrace operations. If @ignore_state is %false, @child also should be in 202 * %TASK_TRACED state and on return the child is guaranteed to be traced 203 * and not executing. If @ignore_state is %true, @child can be in any 204 * state. 205 * 206 * CONTEXT: 207 * Grabs and releases tasklist_lock and @child->sighand->siglock. 208 * 209 * RETURNS: 210 * 0 on success, -ESRCH if %child is not ready. 211 */ 212 static int ptrace_check_attach(struct task_struct *child, bool ignore_state) 213 { 214 int ret = -ESRCH; 215 216 /* 217 * We take the read lock around doing both checks to close a 218 * possible race where someone else was tracing our child and 219 * detached between these two checks. After this locked check, 220 * we are sure that this is our traced child and that can only 221 * be changed by us so it's not changing right after this. 222 */ 223 read_lock(&tasklist_lock); 224 if (child->ptrace && child->parent == current) { 225 WARN_ON(child->state == __TASK_TRACED); 226 /* 227 * child->sighand can't be NULL, release_task() 228 * does ptrace_unlink() before __exit_signal(). 229 */ 230 if (ignore_state || ptrace_freeze_traced(child)) 231 ret = 0; 232 } 233 read_unlock(&tasklist_lock); 234 235 if (!ret && !ignore_state) { 236 if (!wait_task_inactive(child, __TASK_TRACED)) { 237 /* 238 * This can only happen if may_ptrace_stop() fails and 239 * ptrace_stop() changes ->state back to TASK_RUNNING, 240 * so we should not worry about leaking __TASK_TRACED. 241 */ 242 WARN_ON(child->state == __TASK_TRACED); 243 ret = -ESRCH; 244 } 245 } 246 247 return ret; 248 } 249 250 static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) 251 { 252 if (mode & PTRACE_MODE_NOAUDIT) 253 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE); 254 else 255 return has_ns_capability(current, ns, CAP_SYS_PTRACE); 256 } 257 258 /* Returns 0 on success, -errno on denial. */ 259 static int __ptrace_may_access(struct task_struct *task, unsigned int mode) 260 { 261 const struct cred *cred = current_cred(), *tcred; 262 struct mm_struct *mm; 263 kuid_t caller_uid; 264 kgid_t caller_gid; 265 266 if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) { 267 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n"); 268 return -EPERM; 269 } 270 271 /* May we inspect the given task? 272 * This check is used both for attaching with ptrace 273 * and for allowing access to sensitive information in /proc. 274 * 275 * ptrace_attach denies several cases that /proc allows 276 * because setting up the necessary parent/child relationship 277 * or halting the specified task is impossible. 278 */ 279 280 /* Don't let security modules deny introspection */ 281 if (same_thread_group(task, current)) 282 return 0; 283 rcu_read_lock(); 284 if (mode & PTRACE_MODE_FSCREDS) { 285 caller_uid = cred->fsuid; 286 caller_gid = cred->fsgid; 287 } else { 288 /* 289 * Using the euid would make more sense here, but something 290 * in userland might rely on the old behavior, and this 291 * shouldn't be a security problem since 292 * PTRACE_MODE_REALCREDS implies that the caller explicitly 293 * used a syscall that requests access to another process 294 * (and not a filesystem syscall to procfs). 295 */ 296 caller_uid = cred->uid; 297 caller_gid = cred->gid; 298 } 299 tcred = __task_cred(task); 300 if (uid_eq(caller_uid, tcred->euid) && 301 uid_eq(caller_uid, tcred->suid) && 302 uid_eq(caller_uid, tcred->uid) && 303 gid_eq(caller_gid, tcred->egid) && 304 gid_eq(caller_gid, tcred->sgid) && 305 gid_eq(caller_gid, tcred->gid)) 306 goto ok; 307 if (ptrace_has_cap(tcred->user_ns, mode)) 308 goto ok; 309 rcu_read_unlock(); 310 return -EPERM; 311 ok: 312 rcu_read_unlock(); 313 mm = task->mm; 314 if (mm && 315 ((get_dumpable(mm) != SUID_DUMP_USER) && 316 !ptrace_has_cap(mm->user_ns, mode))) 317 return -EPERM; 318 319 return security_ptrace_access_check(task, mode); 320 } 321 322 bool ptrace_may_access(struct task_struct *task, unsigned int mode) 323 { 324 int err; 325 task_lock(task); 326 err = __ptrace_may_access(task, mode); 327 task_unlock(task); 328 return !err; 329 } 330 331 static int ptrace_attach(struct task_struct *task, long request, 332 unsigned long addr, 333 unsigned long flags) 334 { 335 bool seize = (request == PTRACE_SEIZE); 336 int retval; 337 338 retval = -EIO; 339 if (seize) { 340 if (addr != 0) 341 goto out; 342 if (flags & ~(unsigned long)PTRACE_O_MASK) 343 goto out; 344 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT); 345 } else { 346 flags = PT_PTRACED; 347 } 348 349 audit_ptrace(task); 350 351 retval = -EPERM; 352 if (unlikely(task->flags & PF_KTHREAD)) 353 goto out; 354 if (same_thread_group(task, current)) 355 goto out; 356 357 /* 358 * Protect exec's credential calculations against our interference; 359 * SUID, SGID and LSM creds get determined differently 360 * under ptrace. 361 */ 362 retval = -ERESTARTNOINTR; 363 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) 364 goto out; 365 366 task_lock(task); 367 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS); 368 task_unlock(task); 369 if (retval) 370 goto unlock_creds; 371 372 write_lock_irq(&tasklist_lock); 373 retval = -EPERM; 374 if (unlikely(task->exit_state)) 375 goto unlock_tasklist; 376 if (task->ptrace) 377 goto unlock_tasklist; 378 379 if (seize) 380 flags |= PT_SEIZED; 381 task->ptrace = flags; 382 383 __ptrace_link(task, current); 384 385 /* SEIZE doesn't trap tracee on attach */ 386 if (!seize) 387 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); 388 389 spin_lock(&task->sighand->siglock); 390 391 /* 392 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and 393 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING 394 * will be cleared if the child completes the transition or any 395 * event which clears the group stop states happens. We'll wait 396 * for the transition to complete before returning from this 397 * function. 398 * 399 * This hides STOPPED -> RUNNING -> TRACED transition from the 400 * attaching thread but a different thread in the same group can 401 * still observe the transient RUNNING state. IOW, if another 402 * thread's WNOHANG wait(2) on the stopped tracee races against 403 * ATTACH, the wait(2) may fail due to the transient RUNNING. 404 * 405 * The following task_is_stopped() test is safe as both transitions 406 * in and out of STOPPED are protected by siglock. 407 */ 408 if (task_is_stopped(task) && 409 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) 410 signal_wake_up_state(task, __TASK_STOPPED); 411 412 spin_unlock(&task->sighand->siglock); 413 414 retval = 0; 415 unlock_tasklist: 416 write_unlock_irq(&tasklist_lock); 417 unlock_creds: 418 mutex_unlock(&task->signal->cred_guard_mutex); 419 out: 420 if (!retval) { 421 /* 422 * We do not bother to change retval or clear JOBCTL_TRAPPING 423 * if wait_on_bit() was interrupted by SIGKILL. The tracer will 424 * not return to user-mode, it will exit and clear this bit in 425 * __ptrace_unlink() if it wasn't already cleared by the tracee; 426 * and until then nobody can ptrace this task. 427 */ 428 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE); 429 proc_ptrace_connector(task, PTRACE_ATTACH); 430 } 431 432 return retval; 433 } 434 435 /** 436 * ptrace_traceme -- helper for PTRACE_TRACEME 437 * 438 * Performs checks and sets PT_PTRACED. 439 * Should be used by all ptrace implementations for PTRACE_TRACEME. 440 */ 441 static int ptrace_traceme(void) 442 { 443 int ret = -EPERM; 444 445 write_lock_irq(&tasklist_lock); 446 /* Are we already being traced? */ 447 if (!current->ptrace) { 448 ret = security_ptrace_traceme(current->parent); 449 /* 450 * Check PF_EXITING to ensure ->real_parent has not passed 451 * exit_ptrace(). Otherwise we don't report the error but 452 * pretend ->real_parent untraces us right after return. 453 */ 454 if (!ret && !(current->real_parent->flags & PF_EXITING)) { 455 current->ptrace = PT_PTRACED; 456 __ptrace_link(current, current->real_parent); 457 } 458 } 459 write_unlock_irq(&tasklist_lock); 460 461 return ret; 462 } 463 464 /* 465 * Called with irqs disabled, returns true if childs should reap themselves. 466 */ 467 static int ignoring_children(struct sighand_struct *sigh) 468 { 469 int ret; 470 spin_lock(&sigh->siglock); 471 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || 472 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); 473 spin_unlock(&sigh->siglock); 474 return ret; 475 } 476 477 /* 478 * Called with tasklist_lock held for writing. 479 * Unlink a traced task, and clean it up if it was a traced zombie. 480 * Return true if it needs to be reaped with release_task(). 481 * (We can't call release_task() here because we already hold tasklist_lock.) 482 * 483 * If it's a zombie, our attachedness prevented normal parent notification 484 * or self-reaping. Do notification now if it would have happened earlier. 485 * If it should reap itself, return true. 486 * 487 * If it's our own child, there is no notification to do. But if our normal 488 * children self-reap, then this child was prevented by ptrace and we must 489 * reap it now, in that case we must also wake up sub-threads sleeping in 490 * do_wait(). 491 */ 492 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) 493 { 494 bool dead; 495 496 __ptrace_unlink(p); 497 498 if (p->exit_state != EXIT_ZOMBIE) 499 return false; 500 501 dead = !thread_group_leader(p); 502 503 if (!dead && thread_group_empty(p)) { 504 if (!same_thread_group(p->real_parent, tracer)) 505 dead = do_notify_parent(p, p->exit_signal); 506 else if (ignoring_children(tracer->sighand)) { 507 __wake_up_parent(p, tracer); 508 dead = true; 509 } 510 } 511 /* Mark it as in the process of being reaped. */ 512 if (dead) 513 p->exit_state = EXIT_DEAD; 514 return dead; 515 } 516 517 static int ptrace_detach(struct task_struct *child, unsigned int data) 518 { 519 if (!valid_signal(data)) 520 return -EIO; 521 522 /* Architecture-specific hardware disable .. */ 523 ptrace_disable(child); 524 525 write_lock_irq(&tasklist_lock); 526 /* 527 * We rely on ptrace_freeze_traced(). It can't be killed and 528 * untraced by another thread, it can't be a zombie. 529 */ 530 WARN_ON(!child->ptrace || child->exit_state); 531 /* 532 * tasklist_lock avoids the race with wait_task_stopped(), see 533 * the comment in ptrace_resume(). 534 */ 535 child->exit_code = data; 536 __ptrace_detach(current, child); 537 write_unlock_irq(&tasklist_lock); 538 539 proc_ptrace_connector(child, PTRACE_DETACH); 540 541 return 0; 542 } 543 544 /* 545 * Detach all tasks we were using ptrace on. Called with tasklist held 546 * for writing. 547 */ 548 void exit_ptrace(struct task_struct *tracer, struct list_head *dead) 549 { 550 struct task_struct *p, *n; 551 552 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { 553 if (unlikely(p->ptrace & PT_EXITKILL)) 554 send_sig_info(SIGKILL, SEND_SIG_FORCED, p); 555 556 if (__ptrace_detach(tracer, p)) 557 list_add(&p->ptrace_entry, dead); 558 } 559 } 560 561 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 562 { 563 int copied = 0; 564 565 while (len > 0) { 566 char buf[128]; 567 int this_len, retval; 568 569 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 570 retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE); 571 572 if (!retval) { 573 if (copied) 574 break; 575 return -EIO; 576 } 577 if (copy_to_user(dst, buf, retval)) 578 return -EFAULT; 579 copied += retval; 580 src += retval; 581 dst += retval; 582 len -= retval; 583 } 584 return copied; 585 } 586 587 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) 588 { 589 int copied = 0; 590 591 while (len > 0) { 592 char buf[128]; 593 int this_len, retval; 594 595 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 596 if (copy_from_user(buf, src, this_len)) 597 return -EFAULT; 598 retval = ptrace_access_vm(tsk, dst, buf, this_len, 599 FOLL_FORCE | FOLL_WRITE); 600 if (!retval) { 601 if (copied) 602 break; 603 return -EIO; 604 } 605 copied += retval; 606 src += retval; 607 dst += retval; 608 len -= retval; 609 } 610 return copied; 611 } 612 613 static int ptrace_setoptions(struct task_struct *child, unsigned long data) 614 { 615 unsigned flags; 616 617 if (data & ~(unsigned long)PTRACE_O_MASK) 618 return -EINVAL; 619 620 if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) { 621 if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) || 622 !IS_ENABLED(CONFIG_SECCOMP)) 623 return -EINVAL; 624 625 if (!capable(CAP_SYS_ADMIN)) 626 return -EPERM; 627 628 if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED || 629 current->ptrace & PT_SUSPEND_SECCOMP) 630 return -EPERM; 631 } 632 633 /* Avoid intermediate state when all opts are cleared */ 634 flags = child->ptrace; 635 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT); 636 flags |= (data << PT_OPT_FLAG_SHIFT); 637 child->ptrace = flags; 638 639 return 0; 640 } 641 642 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) 643 { 644 unsigned long flags; 645 int error = -ESRCH; 646 647 if (lock_task_sighand(child, &flags)) { 648 error = -EINVAL; 649 if (likely(child->last_siginfo != NULL)) { 650 *info = *child->last_siginfo; 651 error = 0; 652 } 653 unlock_task_sighand(child, &flags); 654 } 655 return error; 656 } 657 658 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) 659 { 660 unsigned long flags; 661 int error = -ESRCH; 662 663 if (lock_task_sighand(child, &flags)) { 664 error = -EINVAL; 665 if (likely(child->last_siginfo != NULL)) { 666 *child->last_siginfo = *info; 667 error = 0; 668 } 669 unlock_task_sighand(child, &flags); 670 } 671 return error; 672 } 673 674 static int ptrace_peek_siginfo(struct task_struct *child, 675 unsigned long addr, 676 unsigned long data) 677 { 678 struct ptrace_peeksiginfo_args arg; 679 struct sigpending *pending; 680 struct sigqueue *q; 681 int ret, i; 682 683 ret = copy_from_user(&arg, (void __user *) addr, 684 sizeof(struct ptrace_peeksiginfo_args)); 685 if (ret) 686 return -EFAULT; 687 688 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED) 689 return -EINVAL; /* unknown flags */ 690 691 if (arg.nr < 0) 692 return -EINVAL; 693 694 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED) 695 pending = &child->signal->shared_pending; 696 else 697 pending = &child->pending; 698 699 for (i = 0; i < arg.nr; ) { 700 siginfo_t info; 701 s32 off = arg.off + i; 702 703 spin_lock_irq(&child->sighand->siglock); 704 list_for_each_entry(q, &pending->list, list) { 705 if (!off--) { 706 copy_siginfo(&info, &q->info); 707 break; 708 } 709 } 710 spin_unlock_irq(&child->sighand->siglock); 711 712 if (off >= 0) /* beyond the end of the list */ 713 break; 714 715 #ifdef CONFIG_COMPAT 716 if (unlikely(in_compat_syscall())) { 717 compat_siginfo_t __user *uinfo = compat_ptr(data); 718 719 if (copy_siginfo_to_user32(uinfo, &info) || 720 __put_user(info.si_code, &uinfo->si_code)) { 721 ret = -EFAULT; 722 break; 723 } 724 725 } else 726 #endif 727 { 728 siginfo_t __user *uinfo = (siginfo_t __user *) data; 729 730 if (copy_siginfo_to_user(uinfo, &info) || 731 __put_user(info.si_code, &uinfo->si_code)) { 732 ret = -EFAULT; 733 break; 734 } 735 } 736 737 data += sizeof(siginfo_t); 738 i++; 739 740 if (signal_pending(current)) 741 break; 742 743 cond_resched(); 744 } 745 746 if (i > 0) 747 return i; 748 749 return ret; 750 } 751 752 #ifdef PTRACE_SINGLESTEP 753 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) 754 #else 755 #define is_singlestep(request) 0 756 #endif 757 758 #ifdef PTRACE_SINGLEBLOCK 759 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) 760 #else 761 #define is_singleblock(request) 0 762 #endif 763 764 #ifdef PTRACE_SYSEMU 765 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) 766 #else 767 #define is_sysemu_singlestep(request) 0 768 #endif 769 770 static int ptrace_resume(struct task_struct *child, long request, 771 unsigned long data) 772 { 773 bool need_siglock; 774 775 if (!valid_signal(data)) 776 return -EIO; 777 778 if (request == PTRACE_SYSCALL) 779 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 780 else 781 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 782 783 #ifdef TIF_SYSCALL_EMU 784 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) 785 set_tsk_thread_flag(child, TIF_SYSCALL_EMU); 786 else 787 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 788 #endif 789 790 if (is_singleblock(request)) { 791 if (unlikely(!arch_has_block_step())) 792 return -EIO; 793 user_enable_block_step(child); 794 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { 795 if (unlikely(!arch_has_single_step())) 796 return -EIO; 797 user_enable_single_step(child); 798 } else { 799 user_disable_single_step(child); 800 } 801 802 /* 803 * Change ->exit_code and ->state under siglock to avoid the race 804 * with wait_task_stopped() in between; a non-zero ->exit_code will 805 * wrongly look like another report from tracee. 806 * 807 * Note that we need siglock even if ->exit_code == data and/or this 808 * status was not reported yet, the new status must not be cleared by 809 * wait_task_stopped() after resume. 810 * 811 * If data == 0 we do not care if wait_task_stopped() reports the old 812 * status and clears the code too; this can't race with the tracee, it 813 * takes siglock after resume. 814 */ 815 need_siglock = data && !thread_group_empty(current); 816 if (need_siglock) 817 spin_lock_irq(&child->sighand->siglock); 818 child->exit_code = data; 819 wake_up_state(child, __TASK_TRACED); 820 if (need_siglock) 821 spin_unlock_irq(&child->sighand->siglock); 822 823 return 0; 824 } 825 826 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 827 828 static const struct user_regset * 829 find_regset(const struct user_regset_view *view, unsigned int type) 830 { 831 const struct user_regset *regset; 832 int n; 833 834 for (n = 0; n < view->n; ++n) { 835 regset = view->regsets + n; 836 if (regset->core_note_type == type) 837 return regset; 838 } 839 840 return NULL; 841 } 842 843 static int ptrace_regset(struct task_struct *task, int req, unsigned int type, 844 struct iovec *kiov) 845 { 846 const struct user_regset_view *view = task_user_regset_view(task); 847 const struct user_regset *regset = find_regset(view, type); 848 int regset_no; 849 850 if (!regset || (kiov->iov_len % regset->size) != 0) 851 return -EINVAL; 852 853 regset_no = regset - view->regsets; 854 kiov->iov_len = min(kiov->iov_len, 855 (__kernel_size_t) (regset->n * regset->size)); 856 857 if (req == PTRACE_GETREGSET) 858 return copy_regset_to_user(task, view, regset_no, 0, 859 kiov->iov_len, kiov->iov_base); 860 else 861 return copy_regset_from_user(task, view, regset_no, 0, 862 kiov->iov_len, kiov->iov_base); 863 } 864 865 /* 866 * This is declared in linux/regset.h and defined in machine-dependent 867 * code. We put the export here, near the primary machine-neutral use, 868 * to ensure no machine forgets it. 869 */ 870 EXPORT_SYMBOL_GPL(task_user_regset_view); 871 #endif 872 873 int ptrace_request(struct task_struct *child, long request, 874 unsigned long addr, unsigned long data) 875 { 876 bool seized = child->ptrace & PT_SEIZED; 877 int ret = -EIO; 878 siginfo_t siginfo, *si; 879 void __user *datavp = (void __user *) data; 880 unsigned long __user *datalp = datavp; 881 unsigned long flags; 882 883 switch (request) { 884 case PTRACE_PEEKTEXT: 885 case PTRACE_PEEKDATA: 886 return generic_ptrace_peekdata(child, addr, data); 887 case PTRACE_POKETEXT: 888 case PTRACE_POKEDATA: 889 return generic_ptrace_pokedata(child, addr, data); 890 891 #ifdef PTRACE_OLDSETOPTIONS 892 case PTRACE_OLDSETOPTIONS: 893 #endif 894 case PTRACE_SETOPTIONS: 895 ret = ptrace_setoptions(child, data); 896 break; 897 case PTRACE_GETEVENTMSG: 898 ret = put_user(child->ptrace_message, datalp); 899 break; 900 901 case PTRACE_PEEKSIGINFO: 902 ret = ptrace_peek_siginfo(child, addr, data); 903 break; 904 905 case PTRACE_GETSIGINFO: 906 ret = ptrace_getsiginfo(child, &siginfo); 907 if (!ret) 908 ret = copy_siginfo_to_user(datavp, &siginfo); 909 break; 910 911 case PTRACE_SETSIGINFO: 912 if (copy_from_user(&siginfo, datavp, sizeof siginfo)) 913 ret = -EFAULT; 914 else 915 ret = ptrace_setsiginfo(child, &siginfo); 916 break; 917 918 case PTRACE_GETSIGMASK: 919 if (addr != sizeof(sigset_t)) { 920 ret = -EINVAL; 921 break; 922 } 923 924 if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t))) 925 ret = -EFAULT; 926 else 927 ret = 0; 928 929 break; 930 931 case PTRACE_SETSIGMASK: { 932 sigset_t new_set; 933 934 if (addr != sizeof(sigset_t)) { 935 ret = -EINVAL; 936 break; 937 } 938 939 if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) { 940 ret = -EFAULT; 941 break; 942 } 943 944 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 945 946 /* 947 * Every thread does recalc_sigpending() after resume, so 948 * retarget_shared_pending() and recalc_sigpending() are not 949 * called here. 950 */ 951 spin_lock_irq(&child->sighand->siglock); 952 child->blocked = new_set; 953 spin_unlock_irq(&child->sighand->siglock); 954 955 ret = 0; 956 break; 957 } 958 959 case PTRACE_INTERRUPT: 960 /* 961 * Stop tracee without any side-effect on signal or job 962 * control. At least one trap is guaranteed to happen 963 * after this request. If @child is already trapped, the 964 * current trap is not disturbed and another trap will 965 * happen after the current trap is ended with PTRACE_CONT. 966 * 967 * The actual trap might not be PTRACE_EVENT_STOP trap but 968 * the pending condition is cleared regardless. 969 */ 970 if (unlikely(!seized || !lock_task_sighand(child, &flags))) 971 break; 972 973 /* 974 * INTERRUPT doesn't disturb existing trap sans one 975 * exception. If ptracer issued LISTEN for the current 976 * STOP, this INTERRUPT should clear LISTEN and re-trap 977 * tracee into STOP. 978 */ 979 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) 980 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); 981 982 unlock_task_sighand(child, &flags); 983 ret = 0; 984 break; 985 986 case PTRACE_LISTEN: 987 /* 988 * Listen for events. Tracee must be in STOP. It's not 989 * resumed per-se but is not considered to be in TRACED by 990 * wait(2) or ptrace(2). If an async event (e.g. group 991 * stop state change) happens, tracee will enter STOP trap 992 * again. Alternatively, ptracer can issue INTERRUPT to 993 * finish listening and re-trap tracee into STOP. 994 */ 995 if (unlikely(!seized || !lock_task_sighand(child, &flags))) 996 break; 997 998 si = child->last_siginfo; 999 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { 1000 child->jobctl |= JOBCTL_LISTENING; 1001 /* 1002 * If NOTIFY is set, it means event happened between 1003 * start of this trap and now. Trigger re-trap. 1004 */ 1005 if (child->jobctl & JOBCTL_TRAP_NOTIFY) 1006 ptrace_signal_wake_up(child, true); 1007 ret = 0; 1008 } 1009 unlock_task_sighand(child, &flags); 1010 break; 1011 1012 case PTRACE_DETACH: /* detach a process that was attached. */ 1013 ret = ptrace_detach(child, data); 1014 break; 1015 1016 #ifdef CONFIG_BINFMT_ELF_FDPIC 1017 case PTRACE_GETFDPIC: { 1018 struct mm_struct *mm = get_task_mm(child); 1019 unsigned long tmp = 0; 1020 1021 ret = -ESRCH; 1022 if (!mm) 1023 break; 1024 1025 switch (addr) { 1026 case PTRACE_GETFDPIC_EXEC: 1027 tmp = mm->context.exec_fdpic_loadmap; 1028 break; 1029 case PTRACE_GETFDPIC_INTERP: 1030 tmp = mm->context.interp_fdpic_loadmap; 1031 break; 1032 default: 1033 break; 1034 } 1035 mmput(mm); 1036 1037 ret = put_user(tmp, datalp); 1038 break; 1039 } 1040 #endif 1041 1042 #ifdef PTRACE_SINGLESTEP 1043 case PTRACE_SINGLESTEP: 1044 #endif 1045 #ifdef PTRACE_SINGLEBLOCK 1046 case PTRACE_SINGLEBLOCK: 1047 #endif 1048 #ifdef PTRACE_SYSEMU 1049 case PTRACE_SYSEMU: 1050 case PTRACE_SYSEMU_SINGLESTEP: 1051 #endif 1052 case PTRACE_SYSCALL: 1053 case PTRACE_CONT: 1054 return ptrace_resume(child, request, data); 1055 1056 case PTRACE_KILL: 1057 if (child->exit_state) /* already dead */ 1058 return 0; 1059 return ptrace_resume(child, request, SIGKILL); 1060 1061 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 1062 case PTRACE_GETREGSET: 1063 case PTRACE_SETREGSET: { 1064 struct iovec kiov; 1065 struct iovec __user *uiov = datavp; 1066 1067 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) 1068 return -EFAULT; 1069 1070 if (__get_user(kiov.iov_base, &uiov->iov_base) || 1071 __get_user(kiov.iov_len, &uiov->iov_len)) 1072 return -EFAULT; 1073 1074 ret = ptrace_regset(child, request, addr, &kiov); 1075 if (!ret) 1076 ret = __put_user(kiov.iov_len, &uiov->iov_len); 1077 break; 1078 } 1079 #endif 1080 1081 case PTRACE_SECCOMP_GET_FILTER: 1082 ret = seccomp_get_filter(child, addr, datavp); 1083 break; 1084 1085 default: 1086 break; 1087 } 1088 1089 return ret; 1090 } 1091 1092 static struct task_struct *ptrace_get_task_struct(pid_t pid) 1093 { 1094 struct task_struct *child; 1095 1096 rcu_read_lock(); 1097 child = find_task_by_vpid(pid); 1098 if (child) 1099 get_task_struct(child); 1100 rcu_read_unlock(); 1101 1102 if (!child) 1103 return ERR_PTR(-ESRCH); 1104 return child; 1105 } 1106 1107 #ifndef arch_ptrace_attach 1108 #define arch_ptrace_attach(child) do { } while (0) 1109 #endif 1110 1111 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, 1112 unsigned long, data) 1113 { 1114 struct task_struct *child; 1115 long ret; 1116 1117 if (request == PTRACE_TRACEME) { 1118 ret = ptrace_traceme(); 1119 if (!ret) 1120 arch_ptrace_attach(current); 1121 goto out; 1122 } 1123 1124 child = ptrace_get_task_struct(pid); 1125 if (IS_ERR(child)) { 1126 ret = PTR_ERR(child); 1127 goto out; 1128 } 1129 1130 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { 1131 ret = ptrace_attach(child, request, addr, data); 1132 /* 1133 * Some architectures need to do book-keeping after 1134 * a ptrace attach. 1135 */ 1136 if (!ret) 1137 arch_ptrace_attach(child); 1138 goto out_put_task_struct; 1139 } 1140 1141 ret = ptrace_check_attach(child, request == PTRACE_KILL || 1142 request == PTRACE_INTERRUPT); 1143 if (ret < 0) 1144 goto out_put_task_struct; 1145 1146 ret = arch_ptrace(child, request, addr, data); 1147 if (ret || request != PTRACE_DETACH) 1148 ptrace_unfreeze_traced(child); 1149 1150 out_put_task_struct: 1151 put_task_struct(child); 1152 out: 1153 return ret; 1154 } 1155 1156 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, 1157 unsigned long data) 1158 { 1159 unsigned long tmp; 1160 int copied; 1161 1162 copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE); 1163 if (copied != sizeof(tmp)) 1164 return -EIO; 1165 return put_user(tmp, (unsigned long __user *)data); 1166 } 1167 1168 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, 1169 unsigned long data) 1170 { 1171 int copied; 1172 1173 copied = ptrace_access_vm(tsk, addr, &data, sizeof(data), 1174 FOLL_FORCE | FOLL_WRITE); 1175 return (copied == sizeof(data)) ? 0 : -EIO; 1176 } 1177 1178 #if defined CONFIG_COMPAT 1179 1180 int compat_ptrace_request(struct task_struct *child, compat_long_t request, 1181 compat_ulong_t addr, compat_ulong_t data) 1182 { 1183 compat_ulong_t __user *datap = compat_ptr(data); 1184 compat_ulong_t word; 1185 siginfo_t siginfo; 1186 int ret; 1187 1188 switch (request) { 1189 case PTRACE_PEEKTEXT: 1190 case PTRACE_PEEKDATA: 1191 ret = ptrace_access_vm(child, addr, &word, sizeof(word), 1192 FOLL_FORCE); 1193 if (ret != sizeof(word)) 1194 ret = -EIO; 1195 else 1196 ret = put_user(word, datap); 1197 break; 1198 1199 case PTRACE_POKETEXT: 1200 case PTRACE_POKEDATA: 1201 ret = ptrace_access_vm(child, addr, &data, sizeof(data), 1202 FOLL_FORCE | FOLL_WRITE); 1203 ret = (ret != sizeof(data) ? -EIO : 0); 1204 break; 1205 1206 case PTRACE_GETEVENTMSG: 1207 ret = put_user((compat_ulong_t) child->ptrace_message, datap); 1208 break; 1209 1210 case PTRACE_GETSIGINFO: 1211 ret = ptrace_getsiginfo(child, &siginfo); 1212 if (!ret) 1213 ret = copy_siginfo_to_user32( 1214 (struct compat_siginfo __user *) datap, 1215 &siginfo); 1216 break; 1217 1218 case PTRACE_SETSIGINFO: 1219 memset(&siginfo, 0, sizeof siginfo); 1220 if (copy_siginfo_from_user32( 1221 &siginfo, (struct compat_siginfo __user *) datap)) 1222 ret = -EFAULT; 1223 else 1224 ret = ptrace_setsiginfo(child, &siginfo); 1225 break; 1226 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 1227 case PTRACE_GETREGSET: 1228 case PTRACE_SETREGSET: 1229 { 1230 struct iovec kiov; 1231 struct compat_iovec __user *uiov = 1232 (struct compat_iovec __user *) datap; 1233 compat_uptr_t ptr; 1234 compat_size_t len; 1235 1236 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) 1237 return -EFAULT; 1238 1239 if (__get_user(ptr, &uiov->iov_base) || 1240 __get_user(len, &uiov->iov_len)) 1241 return -EFAULT; 1242 1243 kiov.iov_base = compat_ptr(ptr); 1244 kiov.iov_len = len; 1245 1246 ret = ptrace_regset(child, request, addr, &kiov); 1247 if (!ret) 1248 ret = __put_user(kiov.iov_len, &uiov->iov_len); 1249 break; 1250 } 1251 #endif 1252 1253 default: 1254 ret = ptrace_request(child, request, addr, data); 1255 } 1256 1257 return ret; 1258 } 1259 1260 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid, 1261 compat_long_t, addr, compat_long_t, data) 1262 { 1263 struct task_struct *child; 1264 long ret; 1265 1266 if (request == PTRACE_TRACEME) { 1267 ret = ptrace_traceme(); 1268 goto out; 1269 } 1270 1271 child = ptrace_get_task_struct(pid); 1272 if (IS_ERR(child)) { 1273 ret = PTR_ERR(child); 1274 goto out; 1275 } 1276 1277 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { 1278 ret = ptrace_attach(child, request, addr, data); 1279 /* 1280 * Some architectures need to do book-keeping after 1281 * a ptrace attach. 1282 */ 1283 if (!ret) 1284 arch_ptrace_attach(child); 1285 goto out_put_task_struct; 1286 } 1287 1288 ret = ptrace_check_attach(child, request == PTRACE_KILL || 1289 request == PTRACE_INTERRUPT); 1290 if (!ret) { 1291 ret = compat_arch_ptrace(child, request, addr, data); 1292 if (ret || request != PTRACE_DETACH) 1293 ptrace_unfreeze_traced(child); 1294 } 1295 1296 out_put_task_struct: 1297 put_task_struct(child); 1298 out: 1299 return ret; 1300 } 1301 #endif /* CONFIG_COMPAT */ 1302