11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/ptrace.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * (C) Copyright 1999 Linus Torvalds 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Common interfaces for "ptrace()" which we do not want 71da177e4SLinus Torvalds * to continually duplicate across every architecture. 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 10c59ede7bSRandy.Dunlap #include <linux/capability.h> 119984de1aSPaul Gortmaker #include <linux/export.h> 121da177e4SLinus Torvalds #include <linux/sched.h> 131da177e4SLinus Torvalds #include <linux/errno.h> 141da177e4SLinus Torvalds #include <linux/mm.h> 151da177e4SLinus Torvalds #include <linux/highmem.h> 161da177e4SLinus Torvalds #include <linux/pagemap.h> 171da177e4SLinus Torvalds #include <linux/ptrace.h> 181da177e4SLinus Torvalds #include <linux/security.h> 197ed20e1aSJesper Juhl #include <linux/signal.h> 20a27bb332SKent Overstreet #include <linux/uio.h> 21a5cb013dSAl Viro #include <linux/audit.h> 22b488893aSPavel Emelyanov #include <linux/pid_namespace.h> 23f17d30a8SAdrian Bunk #include <linux/syscalls.h> 243a709703SRoland McGrath #include <linux/uaccess.h> 252225a122SSuresh Siddha #include <linux/regset.h> 26bf26c018SFrederic Weisbecker #include <linux/hw_breakpoint.h> 27f701e5b7SVladimir Zapolskiy #include <linux/cn_proc.h> 2884c751bdSAndrey Vagin #include <linux/compat.h> 291da177e4SLinus Torvalds 30bf53de90SMarkus Metzger 31bf53de90SMarkus Metzger /* 321da177e4SLinus Torvalds * ptrace a task: make the debugger its new parent and 331da177e4SLinus Torvalds * move it to the ptrace list. 341da177e4SLinus Torvalds * 351da177e4SLinus Torvalds * Must be called with the tasklist lock write-held. 361da177e4SLinus Torvalds */ 3736c8b586SIngo Molnar void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) 381da177e4SLinus Torvalds { 39f470021aSRoland McGrath BUG_ON(!list_empty(&child->ptrace_entry)); 40f470021aSRoland McGrath list_add(&child->ptrace_entry, &new_parent->ptraced); 411da177e4SLinus Torvalds child->parent = new_parent; 421da177e4SLinus Torvalds } 431da177e4SLinus Torvalds 44e3bd058fSTejun Heo /** 45e3bd058fSTejun Heo * __ptrace_unlink - unlink ptracee and restore its execution state 46e3bd058fSTejun Heo * @child: ptracee to be unlinked 471da177e4SLinus Torvalds * 480e9f0a4aSTejun Heo * Remove @child from the ptrace list, move it back to the original parent, 490e9f0a4aSTejun Heo * and restore the execution state so that it conforms to the group stop 500e9f0a4aSTejun Heo * state. 510e9f0a4aSTejun Heo * 520e9f0a4aSTejun Heo * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer 530e9f0a4aSTejun Heo * exiting. For PTRACE_DETACH, unless the ptracee has been killed between 540e9f0a4aSTejun Heo * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. 550e9f0a4aSTejun Heo * If the ptracer is exiting, the ptracee can be in any state. 560e9f0a4aSTejun Heo * 570e9f0a4aSTejun Heo * After detach, the ptracee should be in a state which conforms to the 580e9f0a4aSTejun Heo * group stop. If the group is stopped or in the process of stopping, the 590e9f0a4aSTejun Heo * ptracee should be put into TASK_STOPPED; otherwise, it should be woken 600e9f0a4aSTejun Heo * up from TASK_TRACED. 610e9f0a4aSTejun Heo * 620e9f0a4aSTejun Heo * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, 630e9f0a4aSTejun Heo * it goes through TRACED -> RUNNING -> STOPPED transition which is similar 640e9f0a4aSTejun Heo * to but in the opposite direction of what happens while attaching to a 650e9f0a4aSTejun Heo * stopped task. However, in this direction, the intermediate RUNNING 660e9f0a4aSTejun Heo * state is not hidden even from the current ptracer and if it immediately 670e9f0a4aSTejun Heo * re-attaches and performs a WNOHANG wait(2), it may fail. 68e3bd058fSTejun Heo * 69e3bd058fSTejun Heo * CONTEXT: 70e3bd058fSTejun Heo * write_lock_irq(tasklist_lock) 711da177e4SLinus Torvalds */ 7236c8b586SIngo Molnar void __ptrace_unlink(struct task_struct *child) 731da177e4SLinus Torvalds { 745ecfbae0SOleg Nesterov BUG_ON(!child->ptrace); 755ecfbae0SOleg Nesterov 760a5bf409SAles Novak clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 770a5bf409SAles Novak 781da177e4SLinus Torvalds child->parent = child->real_parent; 79f470021aSRoland McGrath list_del_init(&child->ptrace_entry); 801da177e4SLinus Torvalds 811da177e4SLinus Torvalds spin_lock(&child->sighand->siglock); 821333ab03SOleg Nesterov child->ptrace = 0; 831da177e4SLinus Torvalds /* 8473ddff2bSTejun Heo * Clear all pending traps and TRAPPING. TRAPPING should be 8573ddff2bSTejun Heo * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly. 8673ddff2bSTejun Heo */ 8773ddff2bSTejun Heo task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK); 8873ddff2bSTejun Heo task_clear_jobctl_trapping(child); 8973ddff2bSTejun Heo 9073ddff2bSTejun Heo /* 91a8f072c1STejun Heo * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and 920e9f0a4aSTejun Heo * @child isn't dead. 931da177e4SLinus Torvalds */ 940e9f0a4aSTejun Heo if (!(child->flags & PF_EXITING) && 950e9f0a4aSTejun Heo (child->signal->flags & SIGNAL_STOP_STOPPED || 968a88951bSOleg Nesterov child->signal->group_stop_count)) { 97a8f072c1STejun Heo child->jobctl |= JOBCTL_STOP_PENDING; 980e9f0a4aSTejun Heo 990e9f0a4aSTejun Heo /* 1008a88951bSOleg Nesterov * This is only possible if this thread was cloned by the 1018a88951bSOleg Nesterov * traced task running in the stopped group, set the signal 1028a88951bSOleg Nesterov * for the future reports. 1038a88951bSOleg Nesterov * FIXME: we should change ptrace_init_task() to handle this 1048a88951bSOleg Nesterov * case. 1058a88951bSOleg Nesterov */ 1068a88951bSOleg Nesterov if (!(child->jobctl & JOBCTL_STOP_SIGMASK)) 1078a88951bSOleg Nesterov child->jobctl |= SIGSTOP; 1088a88951bSOleg Nesterov } 1098a88951bSOleg Nesterov 1108a88951bSOleg Nesterov /* 1110e9f0a4aSTejun Heo * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick 1120e9f0a4aSTejun Heo * @child in the butt. Note that @resume should be used iff @child 1130e9f0a4aSTejun Heo * is in TASK_TRACED; otherwise, we might unduly disrupt 1140e9f0a4aSTejun Heo * TASK_KILLABLE sleeps. 1150e9f0a4aSTejun Heo */ 116a8f072c1STejun Heo if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) 117910ffdb1SOleg Nesterov ptrace_signal_wake_up(child, true); 1180e9f0a4aSTejun Heo 1191da177e4SLinus Torvalds spin_unlock(&child->sighand->siglock); 1201da177e4SLinus Torvalds } 1211da177e4SLinus Torvalds 1229899d11fSOleg Nesterov /* Ensure that nothing can wake it up, even SIGKILL */ 1239899d11fSOleg Nesterov static bool ptrace_freeze_traced(struct task_struct *task) 1249899d11fSOleg Nesterov { 1259899d11fSOleg Nesterov bool ret = false; 1269899d11fSOleg Nesterov 1279899d11fSOleg Nesterov /* Lockless, nobody but us can set this flag */ 1289899d11fSOleg Nesterov if (task->jobctl & JOBCTL_LISTENING) 1299899d11fSOleg Nesterov return ret; 1309899d11fSOleg Nesterov 1319899d11fSOleg Nesterov spin_lock_irq(&task->sighand->siglock); 1329899d11fSOleg Nesterov if (task_is_traced(task) && !__fatal_signal_pending(task)) { 1339899d11fSOleg Nesterov task->state = __TASK_TRACED; 1349899d11fSOleg Nesterov ret = true; 1359899d11fSOleg Nesterov } 1369899d11fSOleg Nesterov spin_unlock_irq(&task->sighand->siglock); 1379899d11fSOleg Nesterov 1389899d11fSOleg Nesterov return ret; 1399899d11fSOleg Nesterov } 1409899d11fSOleg Nesterov 1419899d11fSOleg Nesterov static void ptrace_unfreeze_traced(struct task_struct *task) 1429899d11fSOleg Nesterov { 1439899d11fSOleg Nesterov if (task->state != __TASK_TRACED) 1449899d11fSOleg Nesterov return; 1459899d11fSOleg Nesterov 1469899d11fSOleg Nesterov WARN_ON(!task->ptrace || task->parent != current); 1479899d11fSOleg Nesterov 1489899d11fSOleg Nesterov spin_lock_irq(&task->sighand->siglock); 1499899d11fSOleg Nesterov if (__fatal_signal_pending(task)) 1509899d11fSOleg Nesterov wake_up_state(task, __TASK_TRACED); 1519899d11fSOleg Nesterov else 1529899d11fSOleg Nesterov task->state = TASK_TRACED; 1539899d11fSOleg Nesterov spin_unlock_irq(&task->sighand->siglock); 1549899d11fSOleg Nesterov } 1559899d11fSOleg Nesterov 156755e276bSTejun Heo /** 157755e276bSTejun Heo * ptrace_check_attach - check whether ptracee is ready for ptrace operation 158755e276bSTejun Heo * @child: ptracee to check for 159755e276bSTejun Heo * @ignore_state: don't check whether @child is currently %TASK_TRACED 160755e276bSTejun Heo * 161755e276bSTejun Heo * Check whether @child is being ptraced by %current and ready for further 162755e276bSTejun Heo * ptrace operations. If @ignore_state is %false, @child also should be in 163755e276bSTejun Heo * %TASK_TRACED state and on return the child is guaranteed to be traced 164755e276bSTejun Heo * and not executing. If @ignore_state is %true, @child can be in any 165755e276bSTejun Heo * state. 166755e276bSTejun Heo * 167755e276bSTejun Heo * CONTEXT: 168755e276bSTejun Heo * Grabs and releases tasklist_lock and @child->sighand->siglock. 169755e276bSTejun Heo * 170755e276bSTejun Heo * RETURNS: 171755e276bSTejun Heo * 0 on success, -ESRCH if %child is not ready. 1721da177e4SLinus Torvalds */ 173edea0d03SOleg Nesterov static int ptrace_check_attach(struct task_struct *child, bool ignore_state) 1741da177e4SLinus Torvalds { 1751da177e4SLinus Torvalds int ret = -ESRCH; 1761da177e4SLinus Torvalds 1771da177e4SLinus Torvalds /* 1781da177e4SLinus Torvalds * We take the read lock around doing both checks to close a 1791da177e4SLinus Torvalds * possible race where someone else was tracing our child and 1801da177e4SLinus Torvalds * detached between these two checks. After this locked check, 1811da177e4SLinus Torvalds * we are sure that this is our traced child and that can only 1821da177e4SLinus Torvalds * be changed by us so it's not changing right after this. 1831da177e4SLinus Torvalds */ 1841da177e4SLinus Torvalds read_lock(&tasklist_lock); 1859899d11fSOleg Nesterov if (child->ptrace && child->parent == current) { 1869899d11fSOleg Nesterov WARN_ON(child->state == __TASK_TRACED); 187c0c0b649SOleg Nesterov /* 188c0c0b649SOleg Nesterov * child->sighand can't be NULL, release_task() 189c0c0b649SOleg Nesterov * does ptrace_unlink() before __exit_signal(). 190c0c0b649SOleg Nesterov */ 1919899d11fSOleg Nesterov if (ignore_state || ptrace_freeze_traced(child)) 192321fb561SOleg Nesterov ret = 0; 1931da177e4SLinus Torvalds } 1941da177e4SLinus Torvalds read_unlock(&tasklist_lock); 1951da177e4SLinus Torvalds 1969899d11fSOleg Nesterov if (!ret && !ignore_state) { 1979899d11fSOleg Nesterov if (!wait_task_inactive(child, __TASK_TRACED)) { 1989899d11fSOleg Nesterov /* 1999899d11fSOleg Nesterov * This can only happen if may_ptrace_stop() fails and 2009899d11fSOleg Nesterov * ptrace_stop() changes ->state back to TASK_RUNNING, 2019899d11fSOleg Nesterov * so we should not worry about leaking __TASK_TRACED. 2029899d11fSOleg Nesterov */ 2039899d11fSOleg Nesterov WARN_ON(child->state == __TASK_TRACED); 2049899d11fSOleg Nesterov ret = -ESRCH; 2059899d11fSOleg Nesterov } 2069899d11fSOleg Nesterov } 2071da177e4SLinus Torvalds 2081da177e4SLinus Torvalds return ret; 2091da177e4SLinus Torvalds } 2101da177e4SLinus Torvalds 21169f594a3SEric Paris static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) 21269f594a3SEric Paris { 21369f594a3SEric Paris if (mode & PTRACE_MODE_NOAUDIT) 21469f594a3SEric Paris return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE); 21569f594a3SEric Paris else 21669f594a3SEric Paris return has_ns_capability(current, ns, CAP_SYS_PTRACE); 21769f594a3SEric Paris } 21869f594a3SEric Paris 2199f99798fSTetsuo Handa /* Returns 0 on success, -errno on denial. */ 2209f99798fSTetsuo Handa static int __ptrace_may_access(struct task_struct *task, unsigned int mode) 221ab8d11beSMiklos Szeredi { 222c69e8d9cSDavid Howells const struct cred *cred = current_cred(), *tcred; 223caaee623SJann Horn int dumpable = 0; 224caaee623SJann Horn kuid_t caller_uid; 225caaee623SJann Horn kgid_t caller_gid; 226caaee623SJann Horn 227caaee623SJann Horn if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) { 228caaee623SJann Horn WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n"); 229caaee623SJann Horn return -EPERM; 230caaee623SJann Horn } 231b6dff3ecSDavid Howells 232df26c40eSEric W. Biederman /* May we inspect the given task? 233df26c40eSEric W. Biederman * This check is used both for attaching with ptrace 234df26c40eSEric W. Biederman * and for allowing access to sensitive information in /proc. 235df26c40eSEric W. Biederman * 236df26c40eSEric W. Biederman * ptrace_attach denies several cases that /proc allows 237df26c40eSEric W. Biederman * because setting up the necessary parent/child relationship 238df26c40eSEric W. Biederman * or halting the specified task is impossible. 239df26c40eSEric W. Biederman */ 240caaee623SJann Horn 241df26c40eSEric W. Biederman /* Don't let security modules deny introspection */ 24273af963fSMark Grondona if (same_thread_group(task, current)) 243df26c40eSEric W. Biederman return 0; 244c69e8d9cSDavid Howells rcu_read_lock(); 245caaee623SJann Horn if (mode & PTRACE_MODE_FSCREDS) { 246caaee623SJann Horn caller_uid = cred->fsuid; 247caaee623SJann Horn caller_gid = cred->fsgid; 248caaee623SJann Horn } else { 249caaee623SJann Horn /* 250caaee623SJann Horn * Using the euid would make more sense here, but something 251caaee623SJann Horn * in userland might rely on the old behavior, and this 252caaee623SJann Horn * shouldn't be a security problem since 253caaee623SJann Horn * PTRACE_MODE_REALCREDS implies that the caller explicitly 254caaee623SJann Horn * used a syscall that requests access to another process 255caaee623SJann Horn * (and not a filesystem syscall to procfs). 256caaee623SJann Horn */ 257caaee623SJann Horn caller_uid = cred->uid; 258caaee623SJann Horn caller_gid = cred->gid; 259caaee623SJann Horn } 260c69e8d9cSDavid Howells tcred = __task_cred(task); 261caaee623SJann Horn if (uid_eq(caller_uid, tcred->euid) && 262caaee623SJann Horn uid_eq(caller_uid, tcred->suid) && 263caaee623SJann Horn uid_eq(caller_uid, tcred->uid) && 264caaee623SJann Horn gid_eq(caller_gid, tcred->egid) && 265caaee623SJann Horn gid_eq(caller_gid, tcred->sgid) && 266caaee623SJann Horn gid_eq(caller_gid, tcred->gid)) 2678409cca7SSerge E. Hallyn goto ok; 268c4a4d603SEric W. Biederman if (ptrace_has_cap(tcred->user_ns, mode)) 2698409cca7SSerge E. Hallyn goto ok; 270c69e8d9cSDavid Howells rcu_read_unlock(); 271ab8d11beSMiklos Szeredi return -EPERM; 2728409cca7SSerge E. Hallyn ok: 273c69e8d9cSDavid Howells rcu_read_unlock(); 274ab8d11beSMiklos Szeredi smp_rmb(); 275df26c40eSEric W. Biederman if (task->mm) 2766c5d5238SKawai, Hidehiro dumpable = get_dumpable(task->mm); 2774c44aaafSEric W. Biederman rcu_read_lock(); 278d049f74fSKees Cook if (dumpable != SUID_DUMP_USER && 279d049f74fSKees Cook !ptrace_has_cap(__task_cred(task)->user_ns, mode)) { 2804c44aaafSEric W. Biederman rcu_read_unlock(); 281ab8d11beSMiklos Szeredi return -EPERM; 2824c44aaafSEric W. Biederman } 2834c44aaafSEric W. Biederman rcu_read_unlock(); 284ab8d11beSMiklos Szeredi 2859e48858fSIngo Molnar return security_ptrace_access_check(task, mode); 286ab8d11beSMiklos Szeredi } 287ab8d11beSMiklos Szeredi 288006ebb40SStephen Smalley bool ptrace_may_access(struct task_struct *task, unsigned int mode) 289ab8d11beSMiklos Szeredi { 290ab8d11beSMiklos Szeredi int err; 291ab8d11beSMiklos Szeredi task_lock(task); 292006ebb40SStephen Smalley err = __ptrace_may_access(task, mode); 293ab8d11beSMiklos Szeredi task_unlock(task); 2943a709703SRoland McGrath return !err; 295ab8d11beSMiklos Szeredi } 296ab8d11beSMiklos Szeredi 2973544d72aSTejun Heo static int ptrace_attach(struct task_struct *task, long request, 298aa9147c9SDenys Vlasenko unsigned long addr, 2993544d72aSTejun Heo unsigned long flags) 3001da177e4SLinus Torvalds { 3013544d72aSTejun Heo bool seize = (request == PTRACE_SEIZE); 3021da177e4SLinus Torvalds int retval; 303f5b40e36SLinus Torvalds 3043544d72aSTejun Heo retval = -EIO; 305aa9147c9SDenys Vlasenko if (seize) { 306aa9147c9SDenys Vlasenko if (addr != 0) 3073544d72aSTejun Heo goto out; 308aa9147c9SDenys Vlasenko if (flags & ~(unsigned long)PTRACE_O_MASK) 309aa9147c9SDenys Vlasenko goto out; 310aa9147c9SDenys Vlasenko flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT); 311aa9147c9SDenys Vlasenko } else { 312aa9147c9SDenys Vlasenko flags = PT_PTRACED; 313aa9147c9SDenys Vlasenko } 3143544d72aSTejun Heo 315a5cb013dSAl Viro audit_ptrace(task); 316a5cb013dSAl Viro 3171da177e4SLinus Torvalds retval = -EPERM; 318b79b7ba9SOleg Nesterov if (unlikely(task->flags & PF_KTHREAD)) 319b79b7ba9SOleg Nesterov goto out; 320bac0abd6SPavel Emelyanov if (same_thread_group(task, current)) 321f5b40e36SLinus Torvalds goto out; 322f5b40e36SLinus Torvalds 323f2f0b00aSOleg Nesterov /* 324f2f0b00aSOleg Nesterov * Protect exec's credential calculations against our interference; 32586b6c1f3SDenys Vlasenko * SUID, SGID and LSM creds get determined differently 3265e751e99SDavid Howells * under ptrace. 327d84f4f99SDavid Howells */ 328793285fcSOleg Nesterov retval = -ERESTARTNOINTR; 3299b1bf12dSKOSAKI Motohiro if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) 330d84f4f99SDavid Howells goto out; 3314b105cbbSOleg Nesterov 332f5b40e36SLinus Torvalds task_lock(task); 333caaee623SJann Horn retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS); 3344b105cbbSOleg Nesterov task_unlock(task); 3351da177e4SLinus Torvalds if (retval) 3364b105cbbSOleg Nesterov goto unlock_creds; 3371da177e4SLinus Torvalds 3384b105cbbSOleg Nesterov write_lock_irq(&tasklist_lock); 339b79b7ba9SOleg Nesterov retval = -EPERM; 340b79b7ba9SOleg Nesterov if (unlikely(task->exit_state)) 3414b105cbbSOleg Nesterov goto unlock_tasklist; 342f2f0b00aSOleg Nesterov if (task->ptrace) 3434b105cbbSOleg Nesterov goto unlock_tasklist; 344b79b7ba9SOleg Nesterov 3453544d72aSTejun Heo if (seize) 346aa9147c9SDenys Vlasenko flags |= PT_SEIZED; 3474c44aaafSEric W. Biederman rcu_read_lock(); 3484c44aaafSEric W. Biederman if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE)) 349aa9147c9SDenys Vlasenko flags |= PT_PTRACE_CAP; 3504c44aaafSEric W. Biederman rcu_read_unlock(); 351aa9147c9SDenys Vlasenko task->ptrace = flags; 3521da177e4SLinus Torvalds 3531da177e4SLinus Torvalds __ptrace_link(task, current); 3543544d72aSTejun Heo 3553544d72aSTejun Heo /* SEIZE doesn't trap tracee on attach */ 3563544d72aSTejun Heo if (!seize) 35733e9fc7dSOleg Nesterov send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); 358b79b7ba9SOleg Nesterov 359d79fdd6dSTejun Heo spin_lock(&task->sighand->siglock); 360d79fdd6dSTejun Heo 361d79fdd6dSTejun Heo /* 36273ddff2bSTejun Heo * If the task is already STOPPED, set JOBCTL_TRAP_STOP and 363d79fdd6dSTejun Heo * TRAPPING, and kick it so that it transits to TRACED. TRAPPING 364d79fdd6dSTejun Heo * will be cleared if the child completes the transition or any 365d79fdd6dSTejun Heo * event which clears the group stop states happens. We'll wait 366d79fdd6dSTejun Heo * for the transition to complete before returning from this 367d79fdd6dSTejun Heo * function. 368d79fdd6dSTejun Heo * 369d79fdd6dSTejun Heo * This hides STOPPED -> RUNNING -> TRACED transition from the 370d79fdd6dSTejun Heo * attaching thread but a different thread in the same group can 371d79fdd6dSTejun Heo * still observe the transient RUNNING state. IOW, if another 372d79fdd6dSTejun Heo * thread's WNOHANG wait(2) on the stopped tracee races against 373d79fdd6dSTejun Heo * ATTACH, the wait(2) may fail due to the transient RUNNING. 374d79fdd6dSTejun Heo * 375d79fdd6dSTejun Heo * The following task_is_stopped() test is safe as both transitions 376d79fdd6dSTejun Heo * in and out of STOPPED are protected by siglock. 377d79fdd6dSTejun Heo */ 3787dd3db54STejun Heo if (task_is_stopped(task) && 37973ddff2bSTejun Heo task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) 380910ffdb1SOleg Nesterov signal_wake_up_state(task, __TASK_STOPPED); 381d79fdd6dSTejun Heo 382d79fdd6dSTejun Heo spin_unlock(&task->sighand->siglock); 383d79fdd6dSTejun Heo 384b79b7ba9SOleg Nesterov retval = 0; 3854b105cbbSOleg Nesterov unlock_tasklist: 3864b105cbbSOleg Nesterov write_unlock_irq(&tasklist_lock); 3874b105cbbSOleg Nesterov unlock_creds: 3889b1bf12dSKOSAKI Motohiro mutex_unlock(&task->signal->cred_guard_mutex); 389f5b40e36SLinus Torvalds out: 390f701e5b7SVladimir Zapolskiy if (!retval) { 3917c3b00e0SOleg Nesterov /* 3927c3b00e0SOleg Nesterov * We do not bother to change retval or clear JOBCTL_TRAPPING 3937c3b00e0SOleg Nesterov * if wait_on_bit() was interrupted by SIGKILL. The tracer will 3947c3b00e0SOleg Nesterov * not return to user-mode, it will exit and clear this bit in 3957c3b00e0SOleg Nesterov * __ptrace_unlink() if it wasn't already cleared by the tracee; 3967c3b00e0SOleg Nesterov * and until then nobody can ptrace this task. 3977c3b00e0SOleg Nesterov */ 3987c3b00e0SOleg Nesterov wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE); 399f701e5b7SVladimir Zapolskiy proc_ptrace_connector(task, PTRACE_ATTACH); 400f701e5b7SVladimir Zapolskiy } 401f701e5b7SVladimir Zapolskiy 4021da177e4SLinus Torvalds return retval; 4031da177e4SLinus Torvalds } 4041da177e4SLinus Torvalds 405f2f0b00aSOleg Nesterov /** 406f2f0b00aSOleg Nesterov * ptrace_traceme -- helper for PTRACE_TRACEME 407f2f0b00aSOleg Nesterov * 408f2f0b00aSOleg Nesterov * Performs checks and sets PT_PTRACED. 409f2f0b00aSOleg Nesterov * Should be used by all ptrace implementations for PTRACE_TRACEME. 410f2f0b00aSOleg Nesterov */ 411e3e89cc5SLinus Torvalds static int ptrace_traceme(void) 412f2f0b00aSOleg Nesterov { 413f2f0b00aSOleg Nesterov int ret = -EPERM; 414f2f0b00aSOleg Nesterov 4154b105cbbSOleg Nesterov write_lock_irq(&tasklist_lock); 4164b105cbbSOleg Nesterov /* Are we already being traced? */ 417f2f0b00aSOleg Nesterov if (!current->ptrace) { 418f2f0b00aSOleg Nesterov ret = security_ptrace_traceme(current->parent); 419f2f0b00aSOleg Nesterov /* 420f2f0b00aSOleg Nesterov * Check PF_EXITING to ensure ->real_parent has not passed 421f2f0b00aSOleg Nesterov * exit_ptrace(). Otherwise we don't report the error but 422f2f0b00aSOleg Nesterov * pretend ->real_parent untraces us right after return. 423f2f0b00aSOleg Nesterov */ 424f2f0b00aSOleg Nesterov if (!ret && !(current->real_parent->flags & PF_EXITING)) { 425f2f0b00aSOleg Nesterov current->ptrace = PT_PTRACED; 426f2f0b00aSOleg Nesterov __ptrace_link(current, current->real_parent); 427f2f0b00aSOleg Nesterov } 428f2f0b00aSOleg Nesterov } 4294b105cbbSOleg Nesterov write_unlock_irq(&tasklist_lock); 4304b105cbbSOleg Nesterov 431f2f0b00aSOleg Nesterov return ret; 432f2f0b00aSOleg Nesterov } 433f2f0b00aSOleg Nesterov 43439c626aeSOleg Nesterov /* 43539c626aeSOleg Nesterov * Called with irqs disabled, returns true if childs should reap themselves. 43639c626aeSOleg Nesterov */ 43739c626aeSOleg Nesterov static int ignoring_children(struct sighand_struct *sigh) 43839c626aeSOleg Nesterov { 43939c626aeSOleg Nesterov int ret; 44039c626aeSOleg Nesterov spin_lock(&sigh->siglock); 44139c626aeSOleg Nesterov ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || 44239c626aeSOleg Nesterov (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); 44339c626aeSOleg Nesterov spin_unlock(&sigh->siglock); 44439c626aeSOleg Nesterov return ret; 44539c626aeSOleg Nesterov } 44639c626aeSOleg Nesterov 44739c626aeSOleg Nesterov /* 44839c626aeSOleg Nesterov * Called with tasklist_lock held for writing. 44939c626aeSOleg Nesterov * Unlink a traced task, and clean it up if it was a traced zombie. 45039c626aeSOleg Nesterov * Return true if it needs to be reaped with release_task(). 45139c626aeSOleg Nesterov * (We can't call release_task() here because we already hold tasklist_lock.) 45239c626aeSOleg Nesterov * 45339c626aeSOleg Nesterov * If it's a zombie, our attachedness prevented normal parent notification 45439c626aeSOleg Nesterov * or self-reaping. Do notification now if it would have happened earlier. 45539c626aeSOleg Nesterov * If it should reap itself, return true. 45639c626aeSOleg Nesterov * 457a7f0765eSOleg Nesterov * If it's our own child, there is no notification to do. But if our normal 458a7f0765eSOleg Nesterov * children self-reap, then this child was prevented by ptrace and we must 459a7f0765eSOleg Nesterov * reap it now, in that case we must also wake up sub-threads sleeping in 460a7f0765eSOleg Nesterov * do_wait(). 46139c626aeSOleg Nesterov */ 46239c626aeSOleg Nesterov static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) 46339c626aeSOleg Nesterov { 4649843a1e9SOleg Nesterov bool dead; 4659843a1e9SOleg Nesterov 46639c626aeSOleg Nesterov __ptrace_unlink(p); 46739c626aeSOleg Nesterov 4689843a1e9SOleg Nesterov if (p->exit_state != EXIT_ZOMBIE) 4699843a1e9SOleg Nesterov return false; 4709843a1e9SOleg Nesterov 4719843a1e9SOleg Nesterov dead = !thread_group_leader(p); 4729843a1e9SOleg Nesterov 4739843a1e9SOleg Nesterov if (!dead && thread_group_empty(p)) { 47439c626aeSOleg Nesterov if (!same_thread_group(p->real_parent, tracer)) 4759843a1e9SOleg Nesterov dead = do_notify_parent(p, p->exit_signal); 476a7f0765eSOleg Nesterov else if (ignoring_children(tracer->sighand)) { 477a7f0765eSOleg Nesterov __wake_up_parent(p, tracer); 4789843a1e9SOleg Nesterov dead = true; 47939c626aeSOleg Nesterov } 480a7f0765eSOleg Nesterov } 48139c626aeSOleg Nesterov /* Mark it as in the process of being reaped. */ 4829843a1e9SOleg Nesterov if (dead) 48339c626aeSOleg Nesterov p->exit_state = EXIT_DEAD; 4849843a1e9SOleg Nesterov return dead; 48539c626aeSOleg Nesterov } 48639c626aeSOleg Nesterov 487e3e89cc5SLinus Torvalds static int ptrace_detach(struct task_struct *child, unsigned int data) 4881da177e4SLinus Torvalds { 4897ed20e1aSJesper Juhl if (!valid_signal(data)) 4901da177e4SLinus Torvalds return -EIO; 4911da177e4SLinus Torvalds 4921da177e4SLinus Torvalds /* Architecture-specific hardware disable .. */ 4931da177e4SLinus Torvalds ptrace_disable(child); 4941da177e4SLinus Torvalds 49595c3eb76SOleg Nesterov write_lock_irq(&tasklist_lock); 49639c626aeSOleg Nesterov /* 49764a4096cSOleg Nesterov * We rely on ptrace_freeze_traced(). It can't be killed and 49864a4096cSOleg Nesterov * untraced by another thread, it can't be a zombie. 49939c626aeSOleg Nesterov */ 50064a4096cSOleg Nesterov WARN_ON(!child->ptrace || child->exit_state); 50164a4096cSOleg Nesterov /* 50264a4096cSOleg Nesterov * tasklist_lock avoids the race with wait_task_stopped(), see 50364a4096cSOleg Nesterov * the comment in ptrace_resume(). 50464a4096cSOleg Nesterov */ 50595c3eb76SOleg Nesterov child->exit_code = data; 50664a4096cSOleg Nesterov __ptrace_detach(current, child); 5071da177e4SLinus Torvalds write_unlock_irq(&tasklist_lock); 5081da177e4SLinus Torvalds 509f701e5b7SVladimir Zapolskiy proc_ptrace_connector(child, PTRACE_DETACH); 5104576145cSOleg Nesterov 5111da177e4SLinus Torvalds return 0; 5121da177e4SLinus Torvalds } 5131da177e4SLinus Torvalds 51439c626aeSOleg Nesterov /* 515c7e49c14SOleg Nesterov * Detach all tasks we were using ptrace on. Called with tasklist held 5167c8bd232SOleg Nesterov * for writing. 51739c626aeSOleg Nesterov */ 5187c8bd232SOleg Nesterov void exit_ptrace(struct task_struct *tracer, struct list_head *dead) 51939c626aeSOleg Nesterov { 52039c626aeSOleg Nesterov struct task_struct *p, *n; 521c7e49c14SOleg Nesterov 52239c626aeSOleg Nesterov list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { 523992fb6e1SOleg Nesterov if (unlikely(p->ptrace & PT_EXITKILL)) 524992fb6e1SOleg Nesterov send_sig_info(SIGKILL, SEND_SIG_FORCED, p); 525992fb6e1SOleg Nesterov 52639c626aeSOleg Nesterov if (__ptrace_detach(tracer, p)) 5277c8bd232SOleg Nesterov list_add(&p->ptrace_entry, dead); 52839c626aeSOleg Nesterov } 52939c626aeSOleg Nesterov } 53039c626aeSOleg Nesterov 5311da177e4SLinus Torvalds int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 5321da177e4SLinus Torvalds { 5331da177e4SLinus Torvalds int copied = 0; 5341da177e4SLinus Torvalds 5351da177e4SLinus Torvalds while (len > 0) { 5361da177e4SLinus Torvalds char buf[128]; 5371da177e4SLinus Torvalds int this_len, retval; 5381da177e4SLinus Torvalds 5391da177e4SLinus Torvalds this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 540f307ab6dSLorenzo Stoakes retval = access_process_vm(tsk, src, buf, this_len, FOLL_FORCE); 5411da177e4SLinus Torvalds if (!retval) { 5421da177e4SLinus Torvalds if (copied) 5431da177e4SLinus Torvalds break; 5441da177e4SLinus Torvalds return -EIO; 5451da177e4SLinus Torvalds } 5461da177e4SLinus Torvalds if (copy_to_user(dst, buf, retval)) 5471da177e4SLinus Torvalds return -EFAULT; 5481da177e4SLinus Torvalds copied += retval; 5491da177e4SLinus Torvalds src += retval; 5501da177e4SLinus Torvalds dst += retval; 5511da177e4SLinus Torvalds len -= retval; 5521da177e4SLinus Torvalds } 5531da177e4SLinus Torvalds return copied; 5541da177e4SLinus Torvalds } 5551da177e4SLinus Torvalds 5561da177e4SLinus Torvalds int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) 5571da177e4SLinus Torvalds { 5581da177e4SLinus Torvalds int copied = 0; 5591da177e4SLinus Torvalds 5601da177e4SLinus Torvalds while (len > 0) { 5611da177e4SLinus Torvalds char buf[128]; 5621da177e4SLinus Torvalds int this_len, retval; 5631da177e4SLinus Torvalds 5641da177e4SLinus Torvalds this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 5651da177e4SLinus Torvalds if (copy_from_user(buf, src, this_len)) 5661da177e4SLinus Torvalds return -EFAULT; 567f307ab6dSLorenzo Stoakes retval = access_process_vm(tsk, dst, buf, this_len, 568f307ab6dSLorenzo Stoakes FOLL_FORCE | FOLL_WRITE); 5691da177e4SLinus Torvalds if (!retval) { 5701da177e4SLinus Torvalds if (copied) 5711da177e4SLinus Torvalds break; 5721da177e4SLinus Torvalds return -EIO; 5731da177e4SLinus Torvalds } 5741da177e4SLinus Torvalds copied += retval; 5751da177e4SLinus Torvalds src += retval; 5761da177e4SLinus Torvalds dst += retval; 5771da177e4SLinus Torvalds len -= retval; 5781da177e4SLinus Torvalds } 5791da177e4SLinus Torvalds return copied; 5801da177e4SLinus Torvalds } 5811da177e4SLinus Torvalds 5824abf9869SNamhyung Kim static int ptrace_setoptions(struct task_struct *child, unsigned long data) 5831da177e4SLinus Torvalds { 58486b6c1f3SDenys Vlasenko unsigned flags; 58586b6c1f3SDenys Vlasenko 5868c5cf9e5SDenys Vlasenko if (data & ~(unsigned long)PTRACE_O_MASK) 5878c5cf9e5SDenys Vlasenko return -EINVAL; 5888c5cf9e5SDenys Vlasenko 58913c4a901STycho Andersen if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) { 59097f2645fSMasahiro Yamada if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) || 59197f2645fSMasahiro Yamada !IS_ENABLED(CONFIG_SECCOMP)) 59213c4a901STycho Andersen return -EINVAL; 59313c4a901STycho Andersen 59413c4a901STycho Andersen if (!capable(CAP_SYS_ADMIN)) 59513c4a901STycho Andersen return -EPERM; 59613c4a901STycho Andersen 59713c4a901STycho Andersen if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED || 59813c4a901STycho Andersen current->ptrace & PT_SUSPEND_SECCOMP) 59913c4a901STycho Andersen return -EPERM; 60013c4a901STycho Andersen } 60113c4a901STycho Andersen 60286b6c1f3SDenys Vlasenko /* Avoid intermediate state when all opts are cleared */ 60386b6c1f3SDenys Vlasenko flags = child->ptrace; 60486b6c1f3SDenys Vlasenko flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT); 60586b6c1f3SDenys Vlasenko flags |= (data << PT_OPT_FLAG_SHIFT); 60686b6c1f3SDenys Vlasenko child->ptrace = flags; 6071da177e4SLinus Torvalds 6088c5cf9e5SDenys Vlasenko return 0; 6091da177e4SLinus Torvalds } 6101da177e4SLinus Torvalds 611e16b2781SRoland McGrath static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) 6121da177e4SLinus Torvalds { 613e4961254SOleg Nesterov unsigned long flags; 6141da177e4SLinus Torvalds int error = -ESRCH; 6151da177e4SLinus Torvalds 616e4961254SOleg Nesterov if (lock_task_sighand(child, &flags)) { 6171da177e4SLinus Torvalds error = -EINVAL; 6181da177e4SLinus Torvalds if (likely(child->last_siginfo != NULL)) { 619e16b2781SRoland McGrath *info = *child->last_siginfo; 6201da177e4SLinus Torvalds error = 0; 6211da177e4SLinus Torvalds } 622e4961254SOleg Nesterov unlock_task_sighand(child, &flags); 6231da177e4SLinus Torvalds } 6241da177e4SLinus Torvalds return error; 6251da177e4SLinus Torvalds } 6261da177e4SLinus Torvalds 627e16b2781SRoland McGrath static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) 6281da177e4SLinus Torvalds { 629e4961254SOleg Nesterov unsigned long flags; 6301da177e4SLinus Torvalds int error = -ESRCH; 6311da177e4SLinus Torvalds 632e4961254SOleg Nesterov if (lock_task_sighand(child, &flags)) { 6331da177e4SLinus Torvalds error = -EINVAL; 6341da177e4SLinus Torvalds if (likely(child->last_siginfo != NULL)) { 635e16b2781SRoland McGrath *child->last_siginfo = *info; 6361da177e4SLinus Torvalds error = 0; 6371da177e4SLinus Torvalds } 638e4961254SOleg Nesterov unlock_task_sighand(child, &flags); 6391da177e4SLinus Torvalds } 6401da177e4SLinus Torvalds return error; 6411da177e4SLinus Torvalds } 6421da177e4SLinus Torvalds 64384c751bdSAndrey Vagin static int ptrace_peek_siginfo(struct task_struct *child, 64484c751bdSAndrey Vagin unsigned long addr, 64584c751bdSAndrey Vagin unsigned long data) 64684c751bdSAndrey Vagin { 64784c751bdSAndrey Vagin struct ptrace_peeksiginfo_args arg; 64884c751bdSAndrey Vagin struct sigpending *pending; 64984c751bdSAndrey Vagin struct sigqueue *q; 65084c751bdSAndrey Vagin int ret, i; 65184c751bdSAndrey Vagin 65284c751bdSAndrey Vagin ret = copy_from_user(&arg, (void __user *) addr, 65384c751bdSAndrey Vagin sizeof(struct ptrace_peeksiginfo_args)); 65484c751bdSAndrey Vagin if (ret) 65584c751bdSAndrey Vagin return -EFAULT; 65684c751bdSAndrey Vagin 65784c751bdSAndrey Vagin if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED) 65884c751bdSAndrey Vagin return -EINVAL; /* unknown flags */ 65984c751bdSAndrey Vagin 66084c751bdSAndrey Vagin if (arg.nr < 0) 66184c751bdSAndrey Vagin return -EINVAL; 66284c751bdSAndrey Vagin 66384c751bdSAndrey Vagin if (arg.flags & PTRACE_PEEKSIGINFO_SHARED) 66484c751bdSAndrey Vagin pending = &child->signal->shared_pending; 66584c751bdSAndrey Vagin else 66684c751bdSAndrey Vagin pending = &child->pending; 66784c751bdSAndrey Vagin 66884c751bdSAndrey Vagin for (i = 0; i < arg.nr; ) { 66984c751bdSAndrey Vagin siginfo_t info; 67084c751bdSAndrey Vagin s32 off = arg.off + i; 67184c751bdSAndrey Vagin 67284c751bdSAndrey Vagin spin_lock_irq(&child->sighand->siglock); 67384c751bdSAndrey Vagin list_for_each_entry(q, &pending->list, list) { 67484c751bdSAndrey Vagin if (!off--) { 67584c751bdSAndrey Vagin copy_siginfo(&info, &q->info); 67684c751bdSAndrey Vagin break; 67784c751bdSAndrey Vagin } 67884c751bdSAndrey Vagin } 67984c751bdSAndrey Vagin spin_unlock_irq(&child->sighand->siglock); 68084c751bdSAndrey Vagin 68184c751bdSAndrey Vagin if (off >= 0) /* beyond the end of the list */ 68284c751bdSAndrey Vagin break; 68384c751bdSAndrey Vagin 68484c751bdSAndrey Vagin #ifdef CONFIG_COMPAT 6855c465217SAndy Lutomirski if (unlikely(in_compat_syscall())) { 68684c751bdSAndrey Vagin compat_siginfo_t __user *uinfo = compat_ptr(data); 68784c751bdSAndrey Vagin 688706b23bdSMathieu Desnoyers if (copy_siginfo_to_user32(uinfo, &info) || 689706b23bdSMathieu Desnoyers __put_user(info.si_code, &uinfo->si_code)) { 690706b23bdSMathieu Desnoyers ret = -EFAULT; 691706b23bdSMathieu Desnoyers break; 692706b23bdSMathieu Desnoyers } 693706b23bdSMathieu Desnoyers 69484c751bdSAndrey Vagin } else 69584c751bdSAndrey Vagin #endif 69684c751bdSAndrey Vagin { 69784c751bdSAndrey Vagin siginfo_t __user *uinfo = (siginfo_t __user *) data; 69884c751bdSAndrey Vagin 699706b23bdSMathieu Desnoyers if (copy_siginfo_to_user(uinfo, &info) || 700706b23bdSMathieu Desnoyers __put_user(info.si_code, &uinfo->si_code)) { 70184c751bdSAndrey Vagin ret = -EFAULT; 70284c751bdSAndrey Vagin break; 70384c751bdSAndrey Vagin } 704706b23bdSMathieu Desnoyers } 70584c751bdSAndrey Vagin 70684c751bdSAndrey Vagin data += sizeof(siginfo_t); 70784c751bdSAndrey Vagin i++; 70884c751bdSAndrey Vagin 70984c751bdSAndrey Vagin if (signal_pending(current)) 71084c751bdSAndrey Vagin break; 71184c751bdSAndrey Vagin 71284c751bdSAndrey Vagin cond_resched(); 71384c751bdSAndrey Vagin } 71484c751bdSAndrey Vagin 71584c751bdSAndrey Vagin if (i > 0) 71684c751bdSAndrey Vagin return i; 71784c751bdSAndrey Vagin 71884c751bdSAndrey Vagin return ret; 71984c751bdSAndrey Vagin } 72036df29d7SRoland McGrath 72136df29d7SRoland McGrath #ifdef PTRACE_SINGLESTEP 72236df29d7SRoland McGrath #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) 72336df29d7SRoland McGrath #else 72436df29d7SRoland McGrath #define is_singlestep(request) 0 72536df29d7SRoland McGrath #endif 72636df29d7SRoland McGrath 7275b88abbfSRoland McGrath #ifdef PTRACE_SINGLEBLOCK 7285b88abbfSRoland McGrath #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) 7295b88abbfSRoland McGrath #else 7305b88abbfSRoland McGrath #define is_singleblock(request) 0 7315b88abbfSRoland McGrath #endif 7325b88abbfSRoland McGrath 73336df29d7SRoland McGrath #ifdef PTRACE_SYSEMU 73436df29d7SRoland McGrath #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) 73536df29d7SRoland McGrath #else 73636df29d7SRoland McGrath #define is_sysemu_singlestep(request) 0 73736df29d7SRoland McGrath #endif 73836df29d7SRoland McGrath 7394abf9869SNamhyung Kim static int ptrace_resume(struct task_struct *child, long request, 7404abf9869SNamhyung Kim unsigned long data) 74136df29d7SRoland McGrath { 742b72c1869SOleg Nesterov bool need_siglock; 743b72c1869SOleg Nesterov 74436df29d7SRoland McGrath if (!valid_signal(data)) 74536df29d7SRoland McGrath return -EIO; 74636df29d7SRoland McGrath 74736df29d7SRoland McGrath if (request == PTRACE_SYSCALL) 74836df29d7SRoland McGrath set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 74936df29d7SRoland McGrath else 75036df29d7SRoland McGrath clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 75136df29d7SRoland McGrath 75236df29d7SRoland McGrath #ifdef TIF_SYSCALL_EMU 75336df29d7SRoland McGrath if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) 75436df29d7SRoland McGrath set_tsk_thread_flag(child, TIF_SYSCALL_EMU); 75536df29d7SRoland McGrath else 75636df29d7SRoland McGrath clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 75736df29d7SRoland McGrath #endif 75836df29d7SRoland McGrath 7595b88abbfSRoland McGrath if (is_singleblock(request)) { 7605b88abbfSRoland McGrath if (unlikely(!arch_has_block_step())) 7615b88abbfSRoland McGrath return -EIO; 7625b88abbfSRoland McGrath user_enable_block_step(child); 7635b88abbfSRoland McGrath } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { 76436df29d7SRoland McGrath if (unlikely(!arch_has_single_step())) 76536df29d7SRoland McGrath return -EIO; 76636df29d7SRoland McGrath user_enable_single_step(child); 7673a709703SRoland McGrath } else { 76836df29d7SRoland McGrath user_disable_single_step(child); 7693a709703SRoland McGrath } 77036df29d7SRoland McGrath 771b72c1869SOleg Nesterov /* 772b72c1869SOleg Nesterov * Change ->exit_code and ->state under siglock to avoid the race 773b72c1869SOleg Nesterov * with wait_task_stopped() in between; a non-zero ->exit_code will 774b72c1869SOleg Nesterov * wrongly look like another report from tracee. 775b72c1869SOleg Nesterov * 776b72c1869SOleg Nesterov * Note that we need siglock even if ->exit_code == data and/or this 777b72c1869SOleg Nesterov * status was not reported yet, the new status must not be cleared by 778b72c1869SOleg Nesterov * wait_task_stopped() after resume. 779b72c1869SOleg Nesterov * 780b72c1869SOleg Nesterov * If data == 0 we do not care if wait_task_stopped() reports the old 781b72c1869SOleg Nesterov * status and clears the code too; this can't race with the tracee, it 782b72c1869SOleg Nesterov * takes siglock after resume. 783b72c1869SOleg Nesterov */ 784b72c1869SOleg Nesterov need_siglock = data && !thread_group_empty(current); 785b72c1869SOleg Nesterov if (need_siglock) 786b72c1869SOleg Nesterov spin_lock_irq(&child->sighand->siglock); 78736df29d7SRoland McGrath child->exit_code = data; 7880666fb51SOleg Nesterov wake_up_state(child, __TASK_TRACED); 789b72c1869SOleg Nesterov if (need_siglock) 790b72c1869SOleg Nesterov spin_unlock_irq(&child->sighand->siglock); 79136df29d7SRoland McGrath 79236df29d7SRoland McGrath return 0; 79336df29d7SRoland McGrath } 79436df29d7SRoland McGrath 7952225a122SSuresh Siddha #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 7962225a122SSuresh Siddha 7972225a122SSuresh Siddha static const struct user_regset * 7982225a122SSuresh Siddha find_regset(const struct user_regset_view *view, unsigned int type) 7992225a122SSuresh Siddha { 8002225a122SSuresh Siddha const struct user_regset *regset; 8012225a122SSuresh Siddha int n; 8022225a122SSuresh Siddha 8032225a122SSuresh Siddha for (n = 0; n < view->n; ++n) { 8042225a122SSuresh Siddha regset = view->regsets + n; 8052225a122SSuresh Siddha if (regset->core_note_type == type) 8062225a122SSuresh Siddha return regset; 8072225a122SSuresh Siddha } 8082225a122SSuresh Siddha 8092225a122SSuresh Siddha return NULL; 8102225a122SSuresh Siddha } 8112225a122SSuresh Siddha 8122225a122SSuresh Siddha static int ptrace_regset(struct task_struct *task, int req, unsigned int type, 8132225a122SSuresh Siddha struct iovec *kiov) 8142225a122SSuresh Siddha { 8152225a122SSuresh Siddha const struct user_regset_view *view = task_user_regset_view(task); 8162225a122SSuresh Siddha const struct user_regset *regset = find_regset(view, type); 8172225a122SSuresh Siddha int regset_no; 8182225a122SSuresh Siddha 8192225a122SSuresh Siddha if (!regset || (kiov->iov_len % regset->size) != 0) 820c6a0dd7eSSuresh Siddha return -EINVAL; 8212225a122SSuresh Siddha 8222225a122SSuresh Siddha regset_no = regset - view->regsets; 8232225a122SSuresh Siddha kiov->iov_len = min(kiov->iov_len, 8242225a122SSuresh Siddha (__kernel_size_t) (regset->n * regset->size)); 8252225a122SSuresh Siddha 8262225a122SSuresh Siddha if (req == PTRACE_GETREGSET) 8272225a122SSuresh Siddha return copy_regset_to_user(task, view, regset_no, 0, 8282225a122SSuresh Siddha kiov->iov_len, kiov->iov_base); 8292225a122SSuresh Siddha else 8302225a122SSuresh Siddha return copy_regset_from_user(task, view, regset_no, 0, 8312225a122SSuresh Siddha kiov->iov_len, kiov->iov_base); 8322225a122SSuresh Siddha } 8332225a122SSuresh Siddha 834e8440c14SJosh Stone /* 835e8440c14SJosh Stone * This is declared in linux/regset.h and defined in machine-dependent 836e8440c14SJosh Stone * code. We put the export here, near the primary machine-neutral use, 837e8440c14SJosh Stone * to ensure no machine forgets it. 838e8440c14SJosh Stone */ 839e8440c14SJosh Stone EXPORT_SYMBOL_GPL(task_user_regset_view); 8402225a122SSuresh Siddha #endif 8412225a122SSuresh Siddha 8421da177e4SLinus Torvalds int ptrace_request(struct task_struct *child, long request, 8434abf9869SNamhyung Kim unsigned long addr, unsigned long data) 8441da177e4SLinus Torvalds { 845fca26f26STejun Heo bool seized = child->ptrace & PT_SEIZED; 8461da177e4SLinus Torvalds int ret = -EIO; 847544b2c91STejun Heo siginfo_t siginfo, *si; 8489fed81dcSNamhyung Kim void __user *datavp = (void __user *) data; 8499fed81dcSNamhyung Kim unsigned long __user *datalp = datavp; 850fca26f26STejun Heo unsigned long flags; 8511da177e4SLinus Torvalds 8521da177e4SLinus Torvalds switch (request) { 85316c3e389SRoland McGrath case PTRACE_PEEKTEXT: 85416c3e389SRoland McGrath case PTRACE_PEEKDATA: 85516c3e389SRoland McGrath return generic_ptrace_peekdata(child, addr, data); 85616c3e389SRoland McGrath case PTRACE_POKETEXT: 85716c3e389SRoland McGrath case PTRACE_POKEDATA: 85816c3e389SRoland McGrath return generic_ptrace_pokedata(child, addr, data); 85916c3e389SRoland McGrath 8601da177e4SLinus Torvalds #ifdef PTRACE_OLDSETOPTIONS 8611da177e4SLinus Torvalds case PTRACE_OLDSETOPTIONS: 8621da177e4SLinus Torvalds #endif 8631da177e4SLinus Torvalds case PTRACE_SETOPTIONS: 8641da177e4SLinus Torvalds ret = ptrace_setoptions(child, data); 8651da177e4SLinus Torvalds break; 8661da177e4SLinus Torvalds case PTRACE_GETEVENTMSG: 8679fed81dcSNamhyung Kim ret = put_user(child->ptrace_message, datalp); 8681da177e4SLinus Torvalds break; 869e16b2781SRoland McGrath 87084c751bdSAndrey Vagin case PTRACE_PEEKSIGINFO: 87184c751bdSAndrey Vagin ret = ptrace_peek_siginfo(child, addr, data); 87284c751bdSAndrey Vagin break; 87384c751bdSAndrey Vagin 8741da177e4SLinus Torvalds case PTRACE_GETSIGINFO: 875e16b2781SRoland McGrath ret = ptrace_getsiginfo(child, &siginfo); 876e16b2781SRoland McGrath if (!ret) 8779fed81dcSNamhyung Kim ret = copy_siginfo_to_user(datavp, &siginfo); 8781da177e4SLinus Torvalds break; 879e16b2781SRoland McGrath 8801da177e4SLinus Torvalds case PTRACE_SETSIGINFO: 8819fed81dcSNamhyung Kim if (copy_from_user(&siginfo, datavp, sizeof siginfo)) 882e16b2781SRoland McGrath ret = -EFAULT; 883e16b2781SRoland McGrath else 884e16b2781SRoland McGrath ret = ptrace_setsiginfo(child, &siginfo); 8851da177e4SLinus Torvalds break; 886e16b2781SRoland McGrath 88729000caeSAndrey Vagin case PTRACE_GETSIGMASK: 88829000caeSAndrey Vagin if (addr != sizeof(sigset_t)) { 88929000caeSAndrey Vagin ret = -EINVAL; 89029000caeSAndrey Vagin break; 89129000caeSAndrey Vagin } 89229000caeSAndrey Vagin 89329000caeSAndrey Vagin if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t))) 89429000caeSAndrey Vagin ret = -EFAULT; 89529000caeSAndrey Vagin else 89629000caeSAndrey Vagin ret = 0; 89729000caeSAndrey Vagin 89829000caeSAndrey Vagin break; 89929000caeSAndrey Vagin 90029000caeSAndrey Vagin case PTRACE_SETSIGMASK: { 90129000caeSAndrey Vagin sigset_t new_set; 90229000caeSAndrey Vagin 90329000caeSAndrey Vagin if (addr != sizeof(sigset_t)) { 90429000caeSAndrey Vagin ret = -EINVAL; 90529000caeSAndrey Vagin break; 90629000caeSAndrey Vagin } 90729000caeSAndrey Vagin 90829000caeSAndrey Vagin if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) { 90929000caeSAndrey Vagin ret = -EFAULT; 91029000caeSAndrey Vagin break; 91129000caeSAndrey Vagin } 91229000caeSAndrey Vagin 91329000caeSAndrey Vagin sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 91429000caeSAndrey Vagin 91529000caeSAndrey Vagin /* 91629000caeSAndrey Vagin * Every thread does recalc_sigpending() after resume, so 91729000caeSAndrey Vagin * retarget_shared_pending() and recalc_sigpending() are not 91829000caeSAndrey Vagin * called here. 91929000caeSAndrey Vagin */ 92029000caeSAndrey Vagin spin_lock_irq(&child->sighand->siglock); 92129000caeSAndrey Vagin child->blocked = new_set; 92229000caeSAndrey Vagin spin_unlock_irq(&child->sighand->siglock); 92329000caeSAndrey Vagin 92429000caeSAndrey Vagin ret = 0; 92529000caeSAndrey Vagin break; 92629000caeSAndrey Vagin } 92729000caeSAndrey Vagin 928fca26f26STejun Heo case PTRACE_INTERRUPT: 929fca26f26STejun Heo /* 930fca26f26STejun Heo * Stop tracee without any side-effect on signal or job 931fca26f26STejun Heo * control. At least one trap is guaranteed to happen 932fca26f26STejun Heo * after this request. If @child is already trapped, the 933fca26f26STejun Heo * current trap is not disturbed and another trap will 934fca26f26STejun Heo * happen after the current trap is ended with PTRACE_CONT. 935fca26f26STejun Heo * 936fca26f26STejun Heo * The actual trap might not be PTRACE_EVENT_STOP trap but 937fca26f26STejun Heo * the pending condition is cleared regardless. 938fca26f26STejun Heo */ 939fca26f26STejun Heo if (unlikely(!seized || !lock_task_sighand(child, &flags))) 940fca26f26STejun Heo break; 941fca26f26STejun Heo 942544b2c91STejun Heo /* 943544b2c91STejun Heo * INTERRUPT doesn't disturb existing trap sans one 944544b2c91STejun Heo * exception. If ptracer issued LISTEN for the current 945544b2c91STejun Heo * STOP, this INTERRUPT should clear LISTEN and re-trap 946544b2c91STejun Heo * tracee into STOP. 947544b2c91STejun Heo */ 948fca26f26STejun Heo if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) 949910ffdb1SOleg Nesterov ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); 950544b2c91STejun Heo 951544b2c91STejun Heo unlock_task_sighand(child, &flags); 952544b2c91STejun Heo ret = 0; 953544b2c91STejun Heo break; 954544b2c91STejun Heo 955544b2c91STejun Heo case PTRACE_LISTEN: 956544b2c91STejun Heo /* 957544b2c91STejun Heo * Listen for events. Tracee must be in STOP. It's not 958544b2c91STejun Heo * resumed per-se but is not considered to be in TRACED by 959544b2c91STejun Heo * wait(2) or ptrace(2). If an async event (e.g. group 960544b2c91STejun Heo * stop state change) happens, tracee will enter STOP trap 961544b2c91STejun Heo * again. Alternatively, ptracer can issue INTERRUPT to 962544b2c91STejun Heo * finish listening and re-trap tracee into STOP. 963544b2c91STejun Heo */ 964544b2c91STejun Heo if (unlikely(!seized || !lock_task_sighand(child, &flags))) 965544b2c91STejun Heo break; 966544b2c91STejun Heo 967544b2c91STejun Heo si = child->last_siginfo; 968f9d81f61SOleg Nesterov if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { 969544b2c91STejun Heo child->jobctl |= JOBCTL_LISTENING; 970544b2c91STejun Heo /* 971f9d81f61SOleg Nesterov * If NOTIFY is set, it means event happened between 972f9d81f61SOleg Nesterov * start of this trap and now. Trigger re-trap. 973544b2c91STejun Heo */ 974544b2c91STejun Heo if (child->jobctl & JOBCTL_TRAP_NOTIFY) 975910ffdb1SOleg Nesterov ptrace_signal_wake_up(child, true); 976fca26f26STejun Heo ret = 0; 977f9d81f61SOleg Nesterov } 978f9d81f61SOleg Nesterov unlock_task_sighand(child, &flags); 979fca26f26STejun Heo break; 980fca26f26STejun Heo 9811bcf5482SAlexey Dobriyan case PTRACE_DETACH: /* detach a process that was attached. */ 9821bcf5482SAlexey Dobriyan ret = ptrace_detach(child, data); 9831bcf5482SAlexey Dobriyan break; 98436df29d7SRoland McGrath 9859c1a1259SMike Frysinger #ifdef CONFIG_BINFMT_ELF_FDPIC 9869c1a1259SMike Frysinger case PTRACE_GETFDPIC: { 987e0129ef9SOleg Nesterov struct mm_struct *mm = get_task_mm(child); 9889c1a1259SMike Frysinger unsigned long tmp = 0; 9899c1a1259SMike Frysinger 990e0129ef9SOleg Nesterov ret = -ESRCH; 991e0129ef9SOleg Nesterov if (!mm) 992e0129ef9SOleg Nesterov break; 993e0129ef9SOleg Nesterov 9949c1a1259SMike Frysinger switch (addr) { 9959c1a1259SMike Frysinger case PTRACE_GETFDPIC_EXEC: 996e0129ef9SOleg Nesterov tmp = mm->context.exec_fdpic_loadmap; 9979c1a1259SMike Frysinger break; 9989c1a1259SMike Frysinger case PTRACE_GETFDPIC_INTERP: 999e0129ef9SOleg Nesterov tmp = mm->context.interp_fdpic_loadmap; 10009c1a1259SMike Frysinger break; 10019c1a1259SMike Frysinger default: 10029c1a1259SMike Frysinger break; 10039c1a1259SMike Frysinger } 1004e0129ef9SOleg Nesterov mmput(mm); 10059c1a1259SMike Frysinger 10069fed81dcSNamhyung Kim ret = put_user(tmp, datalp); 10079c1a1259SMike Frysinger break; 10089c1a1259SMike Frysinger } 10099c1a1259SMike Frysinger #endif 10109c1a1259SMike Frysinger 101136df29d7SRoland McGrath #ifdef PTRACE_SINGLESTEP 101236df29d7SRoland McGrath case PTRACE_SINGLESTEP: 101336df29d7SRoland McGrath #endif 10145b88abbfSRoland McGrath #ifdef PTRACE_SINGLEBLOCK 10155b88abbfSRoland McGrath case PTRACE_SINGLEBLOCK: 10165b88abbfSRoland McGrath #endif 101736df29d7SRoland McGrath #ifdef PTRACE_SYSEMU 101836df29d7SRoland McGrath case PTRACE_SYSEMU: 101936df29d7SRoland McGrath case PTRACE_SYSEMU_SINGLESTEP: 102036df29d7SRoland McGrath #endif 102136df29d7SRoland McGrath case PTRACE_SYSCALL: 102236df29d7SRoland McGrath case PTRACE_CONT: 102336df29d7SRoland McGrath return ptrace_resume(child, request, data); 102436df29d7SRoland McGrath 102536df29d7SRoland McGrath case PTRACE_KILL: 102636df29d7SRoland McGrath if (child->exit_state) /* already dead */ 102736df29d7SRoland McGrath return 0; 102836df29d7SRoland McGrath return ptrace_resume(child, request, SIGKILL); 102936df29d7SRoland McGrath 10302225a122SSuresh Siddha #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 10312225a122SSuresh Siddha case PTRACE_GETREGSET: 103229000caeSAndrey Vagin case PTRACE_SETREGSET: { 10332225a122SSuresh Siddha struct iovec kiov; 10349fed81dcSNamhyung Kim struct iovec __user *uiov = datavp; 10352225a122SSuresh Siddha 10362225a122SSuresh Siddha if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) 10372225a122SSuresh Siddha return -EFAULT; 10382225a122SSuresh Siddha 10392225a122SSuresh Siddha if (__get_user(kiov.iov_base, &uiov->iov_base) || 10402225a122SSuresh Siddha __get_user(kiov.iov_len, &uiov->iov_len)) 10412225a122SSuresh Siddha return -EFAULT; 10422225a122SSuresh Siddha 10432225a122SSuresh Siddha ret = ptrace_regset(child, request, addr, &kiov); 10442225a122SSuresh Siddha if (!ret) 10452225a122SSuresh Siddha ret = __put_user(kiov.iov_len, &uiov->iov_len); 10462225a122SSuresh Siddha break; 10472225a122SSuresh Siddha } 10482225a122SSuresh Siddha #endif 1049f8e529edSTycho Andersen 1050f8e529edSTycho Andersen case PTRACE_SECCOMP_GET_FILTER: 1051f8e529edSTycho Andersen ret = seccomp_get_filter(child, addr, datavp); 1052f8e529edSTycho Andersen break; 1053f8e529edSTycho Andersen 10541da177e4SLinus Torvalds default: 10551da177e4SLinus Torvalds break; 10561da177e4SLinus Torvalds } 10571da177e4SLinus Torvalds 10581da177e4SLinus Torvalds return ret; 10591da177e4SLinus Torvalds } 1060481bed45SChristoph Hellwig 10618053bdd5SOleg Nesterov static struct task_struct *ptrace_get_task_struct(pid_t pid) 10626b9c7ed8SChristoph Hellwig { 10636b9c7ed8SChristoph Hellwig struct task_struct *child; 10646b9c7ed8SChristoph Hellwig 10658053bdd5SOleg Nesterov rcu_read_lock(); 1066228ebcbeSPavel Emelyanov child = find_task_by_vpid(pid); 1067481bed45SChristoph Hellwig if (child) 1068481bed45SChristoph Hellwig get_task_struct(child); 10698053bdd5SOleg Nesterov rcu_read_unlock(); 1070f400e198SSukadev Bhattiprolu 1071481bed45SChristoph Hellwig if (!child) 10726b9c7ed8SChristoph Hellwig return ERR_PTR(-ESRCH); 10736b9c7ed8SChristoph Hellwig return child; 1074481bed45SChristoph Hellwig } 1075481bed45SChristoph Hellwig 10760ac15559SChristoph Hellwig #ifndef arch_ptrace_attach 10770ac15559SChristoph Hellwig #define arch_ptrace_attach(child) do { } while (0) 10780ac15559SChristoph Hellwig #endif 10790ac15559SChristoph Hellwig 10804abf9869SNamhyung Kim SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, 10814abf9869SNamhyung Kim unsigned long, data) 1082481bed45SChristoph Hellwig { 1083481bed45SChristoph Hellwig struct task_struct *child; 1084481bed45SChristoph Hellwig long ret; 1085481bed45SChristoph Hellwig 10866b9c7ed8SChristoph Hellwig if (request == PTRACE_TRACEME) { 10876b9c7ed8SChristoph Hellwig ret = ptrace_traceme(); 10886ea6dd93SHaavard Skinnemoen if (!ret) 10896ea6dd93SHaavard Skinnemoen arch_ptrace_attach(current); 1090481bed45SChristoph Hellwig goto out; 10916b9c7ed8SChristoph Hellwig } 10926b9c7ed8SChristoph Hellwig 10936b9c7ed8SChristoph Hellwig child = ptrace_get_task_struct(pid); 10946b9c7ed8SChristoph Hellwig if (IS_ERR(child)) { 10956b9c7ed8SChristoph Hellwig ret = PTR_ERR(child); 10966b9c7ed8SChristoph Hellwig goto out; 10976b9c7ed8SChristoph Hellwig } 1098481bed45SChristoph Hellwig 10993544d72aSTejun Heo if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { 1100aa9147c9SDenys Vlasenko ret = ptrace_attach(child, request, addr, data); 11010ac15559SChristoph Hellwig /* 11020ac15559SChristoph Hellwig * Some architectures need to do book-keeping after 11030ac15559SChristoph Hellwig * a ptrace attach. 11040ac15559SChristoph Hellwig */ 11050ac15559SChristoph Hellwig if (!ret) 11060ac15559SChristoph Hellwig arch_ptrace_attach(child); 1107005f18dfSChristoph Hellwig goto out_put_task_struct; 1108481bed45SChristoph Hellwig } 1109481bed45SChristoph Hellwig 1110fca26f26STejun Heo ret = ptrace_check_attach(child, request == PTRACE_KILL || 1111fca26f26STejun Heo request == PTRACE_INTERRUPT); 1112481bed45SChristoph Hellwig if (ret < 0) 1113481bed45SChristoph Hellwig goto out_put_task_struct; 1114481bed45SChristoph Hellwig 1115481bed45SChristoph Hellwig ret = arch_ptrace(child, request, addr, data); 11169899d11fSOleg Nesterov if (ret || request != PTRACE_DETACH) 11179899d11fSOleg Nesterov ptrace_unfreeze_traced(child); 1118481bed45SChristoph Hellwig 1119481bed45SChristoph Hellwig out_put_task_struct: 1120481bed45SChristoph Hellwig put_task_struct(child); 1121481bed45SChristoph Hellwig out: 1122481bed45SChristoph Hellwig return ret; 1123481bed45SChristoph Hellwig } 112476647323SAlexey Dobriyan 11254abf9869SNamhyung Kim int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, 11264abf9869SNamhyung Kim unsigned long data) 112776647323SAlexey Dobriyan { 112876647323SAlexey Dobriyan unsigned long tmp; 112976647323SAlexey Dobriyan int copied; 113076647323SAlexey Dobriyan 1131f307ab6dSLorenzo Stoakes copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE); 113276647323SAlexey Dobriyan if (copied != sizeof(tmp)) 113376647323SAlexey Dobriyan return -EIO; 113476647323SAlexey Dobriyan return put_user(tmp, (unsigned long __user *)data); 113576647323SAlexey Dobriyan } 1136f284ce72SAlexey Dobriyan 11374abf9869SNamhyung Kim int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, 11384abf9869SNamhyung Kim unsigned long data) 1139f284ce72SAlexey Dobriyan { 1140f284ce72SAlexey Dobriyan int copied; 1141f284ce72SAlexey Dobriyan 1142f307ab6dSLorenzo Stoakes copied = access_process_vm(tsk, addr, &data, sizeof(data), 1143f307ab6dSLorenzo Stoakes FOLL_FORCE | FOLL_WRITE); 1144f284ce72SAlexey Dobriyan return (copied == sizeof(data)) ? 0 : -EIO; 1145f284ce72SAlexey Dobriyan } 1146032d82d9SRoland McGrath 114796b8936aSChristoph Hellwig #if defined CONFIG_COMPAT 1148032d82d9SRoland McGrath 1149032d82d9SRoland McGrath int compat_ptrace_request(struct task_struct *child, compat_long_t request, 1150032d82d9SRoland McGrath compat_ulong_t addr, compat_ulong_t data) 1151032d82d9SRoland McGrath { 1152032d82d9SRoland McGrath compat_ulong_t __user *datap = compat_ptr(data); 1153032d82d9SRoland McGrath compat_ulong_t word; 1154e16b2781SRoland McGrath siginfo_t siginfo; 1155032d82d9SRoland McGrath int ret; 1156032d82d9SRoland McGrath 1157032d82d9SRoland McGrath switch (request) { 1158032d82d9SRoland McGrath case PTRACE_PEEKTEXT: 1159032d82d9SRoland McGrath case PTRACE_PEEKDATA: 1160f307ab6dSLorenzo Stoakes ret = access_process_vm(child, addr, &word, sizeof(word), 1161f307ab6dSLorenzo Stoakes FOLL_FORCE); 1162032d82d9SRoland McGrath if (ret != sizeof(word)) 1163032d82d9SRoland McGrath ret = -EIO; 1164032d82d9SRoland McGrath else 1165032d82d9SRoland McGrath ret = put_user(word, datap); 1166032d82d9SRoland McGrath break; 1167032d82d9SRoland McGrath 1168032d82d9SRoland McGrath case PTRACE_POKETEXT: 1169032d82d9SRoland McGrath case PTRACE_POKEDATA: 1170f307ab6dSLorenzo Stoakes ret = access_process_vm(child, addr, &data, sizeof(data), 1171f307ab6dSLorenzo Stoakes FOLL_FORCE | FOLL_WRITE); 1172032d82d9SRoland McGrath ret = (ret != sizeof(data) ? -EIO : 0); 1173032d82d9SRoland McGrath break; 1174032d82d9SRoland McGrath 1175032d82d9SRoland McGrath case PTRACE_GETEVENTMSG: 1176032d82d9SRoland McGrath ret = put_user((compat_ulong_t) child->ptrace_message, datap); 1177032d82d9SRoland McGrath break; 1178032d82d9SRoland McGrath 1179e16b2781SRoland McGrath case PTRACE_GETSIGINFO: 1180e16b2781SRoland McGrath ret = ptrace_getsiginfo(child, &siginfo); 1181e16b2781SRoland McGrath if (!ret) 1182e16b2781SRoland McGrath ret = copy_siginfo_to_user32( 1183e16b2781SRoland McGrath (struct compat_siginfo __user *) datap, 1184e16b2781SRoland McGrath &siginfo); 1185e16b2781SRoland McGrath break; 1186e16b2781SRoland McGrath 1187e16b2781SRoland McGrath case PTRACE_SETSIGINFO: 1188e16b2781SRoland McGrath memset(&siginfo, 0, sizeof siginfo); 1189e16b2781SRoland McGrath if (copy_siginfo_from_user32( 1190e16b2781SRoland McGrath &siginfo, (struct compat_siginfo __user *) datap)) 1191e16b2781SRoland McGrath ret = -EFAULT; 1192e16b2781SRoland McGrath else 1193e16b2781SRoland McGrath ret = ptrace_setsiginfo(child, &siginfo); 1194e16b2781SRoland McGrath break; 11952225a122SSuresh Siddha #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 11962225a122SSuresh Siddha case PTRACE_GETREGSET: 11972225a122SSuresh Siddha case PTRACE_SETREGSET: 11982225a122SSuresh Siddha { 11992225a122SSuresh Siddha struct iovec kiov; 12002225a122SSuresh Siddha struct compat_iovec __user *uiov = 12012225a122SSuresh Siddha (struct compat_iovec __user *) datap; 12022225a122SSuresh Siddha compat_uptr_t ptr; 12032225a122SSuresh Siddha compat_size_t len; 12042225a122SSuresh Siddha 12052225a122SSuresh Siddha if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) 12062225a122SSuresh Siddha return -EFAULT; 12072225a122SSuresh Siddha 12082225a122SSuresh Siddha if (__get_user(ptr, &uiov->iov_base) || 12092225a122SSuresh Siddha __get_user(len, &uiov->iov_len)) 12102225a122SSuresh Siddha return -EFAULT; 12112225a122SSuresh Siddha 12122225a122SSuresh Siddha kiov.iov_base = compat_ptr(ptr); 12132225a122SSuresh Siddha kiov.iov_len = len; 12142225a122SSuresh Siddha 12152225a122SSuresh Siddha ret = ptrace_regset(child, request, addr, &kiov); 12162225a122SSuresh Siddha if (!ret) 12172225a122SSuresh Siddha ret = __put_user(kiov.iov_len, &uiov->iov_len); 12182225a122SSuresh Siddha break; 12192225a122SSuresh Siddha } 12202225a122SSuresh Siddha #endif 1221e16b2781SRoland McGrath 1222032d82d9SRoland McGrath default: 1223032d82d9SRoland McGrath ret = ptrace_request(child, request, addr, data); 1224032d82d9SRoland McGrath } 1225032d82d9SRoland McGrath 1226032d82d9SRoland McGrath return ret; 1227032d82d9SRoland McGrath } 1228c269f196SRoland McGrath 122962a6fa97SHeiko Carstens COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid, 123062a6fa97SHeiko Carstens compat_long_t, addr, compat_long_t, data) 1231c269f196SRoland McGrath { 1232c269f196SRoland McGrath struct task_struct *child; 1233c269f196SRoland McGrath long ret; 1234c269f196SRoland McGrath 1235c269f196SRoland McGrath if (request == PTRACE_TRACEME) { 1236c269f196SRoland McGrath ret = ptrace_traceme(); 1237c269f196SRoland McGrath goto out; 1238c269f196SRoland McGrath } 1239c269f196SRoland McGrath 1240c269f196SRoland McGrath child = ptrace_get_task_struct(pid); 1241c269f196SRoland McGrath if (IS_ERR(child)) { 1242c269f196SRoland McGrath ret = PTR_ERR(child); 1243c269f196SRoland McGrath goto out; 1244c269f196SRoland McGrath } 1245c269f196SRoland McGrath 12463544d72aSTejun Heo if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { 1247aa9147c9SDenys Vlasenko ret = ptrace_attach(child, request, addr, data); 1248c269f196SRoland McGrath /* 1249c269f196SRoland McGrath * Some architectures need to do book-keeping after 1250c269f196SRoland McGrath * a ptrace attach. 1251c269f196SRoland McGrath */ 1252c269f196SRoland McGrath if (!ret) 1253c269f196SRoland McGrath arch_ptrace_attach(child); 1254c269f196SRoland McGrath goto out_put_task_struct; 1255c269f196SRoland McGrath } 1256c269f196SRoland McGrath 1257fca26f26STejun Heo ret = ptrace_check_attach(child, request == PTRACE_KILL || 1258fca26f26STejun Heo request == PTRACE_INTERRUPT); 12599899d11fSOleg Nesterov if (!ret) { 1260c269f196SRoland McGrath ret = compat_arch_ptrace(child, request, addr, data); 12619899d11fSOleg Nesterov if (ret || request != PTRACE_DETACH) 12629899d11fSOleg Nesterov ptrace_unfreeze_traced(child); 12639899d11fSOleg Nesterov } 1264c269f196SRoland McGrath 1265c269f196SRoland McGrath out_put_task_struct: 1266c269f196SRoland McGrath put_task_struct(child); 1267c269f196SRoland McGrath out: 1268c269f196SRoland McGrath return ret; 1269c269f196SRoland McGrath } 127096b8936aSChristoph Hellwig #endif /* CONFIG_COMPAT */ 1271