xref: /openbmc/linux/kernel/ptrace.c (revision 0d456bad)
1 /*
2  * linux/kernel/ptrace.c
3  *
4  * (C) Copyright 1999 Linus Torvalds
5  *
6  * Common interfaces for "ptrace()" which we do not want
7  * to continually duplicate across every architecture.
8  */
9 
10 #include <linux/capability.h>
11 #include <linux/export.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
14 #include <linux/mm.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/ptrace.h>
18 #include <linux/security.h>
19 #include <linux/signal.h>
20 #include <linux/audit.h>
21 #include <linux/pid_namespace.h>
22 #include <linux/syscalls.h>
23 #include <linux/uaccess.h>
24 #include <linux/regset.h>
25 #include <linux/hw_breakpoint.h>
26 #include <linux/cn_proc.h>
27 
28 
29 static int ptrace_trapping_sleep_fn(void *flags)
30 {
31 	schedule();
32 	return 0;
33 }
34 
35 /*
36  * ptrace a task: make the debugger its new parent and
37  * move it to the ptrace list.
38  *
39  * Must be called with the tasklist lock write-held.
40  */
41 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
42 {
43 	BUG_ON(!list_empty(&child->ptrace_entry));
44 	list_add(&child->ptrace_entry, &new_parent->ptraced);
45 	child->parent = new_parent;
46 }
47 
48 /**
49  * __ptrace_unlink - unlink ptracee and restore its execution state
50  * @child: ptracee to be unlinked
51  *
52  * Remove @child from the ptrace list, move it back to the original parent,
53  * and restore the execution state so that it conforms to the group stop
54  * state.
55  *
56  * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
57  * exiting.  For PTRACE_DETACH, unless the ptracee has been killed between
58  * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
59  * If the ptracer is exiting, the ptracee can be in any state.
60  *
61  * After detach, the ptracee should be in a state which conforms to the
62  * group stop.  If the group is stopped or in the process of stopping, the
63  * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
64  * up from TASK_TRACED.
65  *
66  * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
67  * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
68  * to but in the opposite direction of what happens while attaching to a
69  * stopped task.  However, in this direction, the intermediate RUNNING
70  * state is not hidden even from the current ptracer and if it immediately
71  * re-attaches and performs a WNOHANG wait(2), it may fail.
72  *
73  * CONTEXT:
74  * write_lock_irq(tasklist_lock)
75  */
76 void __ptrace_unlink(struct task_struct *child)
77 {
78 	BUG_ON(!child->ptrace);
79 
80 	child->ptrace = 0;
81 	child->parent = child->real_parent;
82 	list_del_init(&child->ptrace_entry);
83 
84 	spin_lock(&child->sighand->siglock);
85 
86 	/*
87 	 * Clear all pending traps and TRAPPING.  TRAPPING should be
88 	 * cleared regardless of JOBCTL_STOP_PENDING.  Do it explicitly.
89 	 */
90 	task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
91 	task_clear_jobctl_trapping(child);
92 
93 	/*
94 	 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
95 	 * @child isn't dead.
96 	 */
97 	if (!(child->flags & PF_EXITING) &&
98 	    (child->signal->flags & SIGNAL_STOP_STOPPED ||
99 	     child->signal->group_stop_count)) {
100 		child->jobctl |= JOBCTL_STOP_PENDING;
101 
102 		/*
103 		 * This is only possible if this thread was cloned by the
104 		 * traced task running in the stopped group, set the signal
105 		 * for the future reports.
106 		 * FIXME: we should change ptrace_init_task() to handle this
107 		 * case.
108 		 */
109 		if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
110 			child->jobctl |= SIGSTOP;
111 	}
112 
113 	/*
114 	 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
115 	 * @child in the butt.  Note that @resume should be used iff @child
116 	 * is in TASK_TRACED; otherwise, we might unduly disrupt
117 	 * TASK_KILLABLE sleeps.
118 	 */
119 	if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
120 		signal_wake_up(child, task_is_traced(child));
121 
122 	spin_unlock(&child->sighand->siglock);
123 }
124 
125 /**
126  * ptrace_check_attach - check whether ptracee is ready for ptrace operation
127  * @child: ptracee to check for
128  * @ignore_state: don't check whether @child is currently %TASK_TRACED
129  *
130  * Check whether @child is being ptraced by %current and ready for further
131  * ptrace operations.  If @ignore_state is %false, @child also should be in
132  * %TASK_TRACED state and on return the child is guaranteed to be traced
133  * and not executing.  If @ignore_state is %true, @child can be in any
134  * state.
135  *
136  * CONTEXT:
137  * Grabs and releases tasklist_lock and @child->sighand->siglock.
138  *
139  * RETURNS:
140  * 0 on success, -ESRCH if %child is not ready.
141  */
142 int ptrace_check_attach(struct task_struct *child, bool ignore_state)
143 {
144 	int ret = -ESRCH;
145 
146 	/*
147 	 * We take the read lock around doing both checks to close a
148 	 * possible race where someone else was tracing our child and
149 	 * detached between these two checks.  After this locked check,
150 	 * we are sure that this is our traced child and that can only
151 	 * be changed by us so it's not changing right after this.
152 	 */
153 	read_lock(&tasklist_lock);
154 	if ((child->ptrace & PT_PTRACED) && child->parent == current) {
155 		/*
156 		 * child->sighand can't be NULL, release_task()
157 		 * does ptrace_unlink() before __exit_signal().
158 		 */
159 		spin_lock_irq(&child->sighand->siglock);
160 		WARN_ON_ONCE(task_is_stopped(child));
161 		if (ignore_state || (task_is_traced(child) &&
162 				     !(child->jobctl & JOBCTL_LISTENING)))
163 			ret = 0;
164 		spin_unlock_irq(&child->sighand->siglock);
165 	}
166 	read_unlock(&tasklist_lock);
167 
168 	if (!ret && !ignore_state)
169 		ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
170 
171 	/* All systems go.. */
172 	return ret;
173 }
174 
175 static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
176 {
177 	if (mode & PTRACE_MODE_NOAUDIT)
178 		return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
179 	else
180 		return has_ns_capability(current, ns, CAP_SYS_PTRACE);
181 }
182 
183 /* Returns 0 on success, -errno on denial. */
184 static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
185 {
186 	const struct cred *cred = current_cred(), *tcred;
187 
188 	/* May we inspect the given task?
189 	 * This check is used both for attaching with ptrace
190 	 * and for allowing access to sensitive information in /proc.
191 	 *
192 	 * ptrace_attach denies several cases that /proc allows
193 	 * because setting up the necessary parent/child relationship
194 	 * or halting the specified task is impossible.
195 	 */
196 	int dumpable = 0;
197 	/* Don't let security modules deny introspection */
198 	if (task == current)
199 		return 0;
200 	rcu_read_lock();
201 	tcred = __task_cred(task);
202 	if (uid_eq(cred->uid, tcred->euid) &&
203 	    uid_eq(cred->uid, tcred->suid) &&
204 	    uid_eq(cred->uid, tcred->uid)  &&
205 	    gid_eq(cred->gid, tcred->egid) &&
206 	    gid_eq(cred->gid, tcred->sgid) &&
207 	    gid_eq(cred->gid, tcred->gid))
208 		goto ok;
209 	if (ptrace_has_cap(tcred->user_ns, mode))
210 		goto ok;
211 	rcu_read_unlock();
212 	return -EPERM;
213 ok:
214 	rcu_read_unlock();
215 	smp_rmb();
216 	if (task->mm)
217 		dumpable = get_dumpable(task->mm);
218 	rcu_read_lock();
219 	if (!dumpable && !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
220 		rcu_read_unlock();
221 		return -EPERM;
222 	}
223 	rcu_read_unlock();
224 
225 	return security_ptrace_access_check(task, mode);
226 }
227 
228 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
229 {
230 	int err;
231 	task_lock(task);
232 	err = __ptrace_may_access(task, mode);
233 	task_unlock(task);
234 	return !err;
235 }
236 
237 static int ptrace_attach(struct task_struct *task, long request,
238 			 unsigned long addr,
239 			 unsigned long flags)
240 {
241 	bool seize = (request == PTRACE_SEIZE);
242 	int retval;
243 
244 	retval = -EIO;
245 	if (seize) {
246 		if (addr != 0)
247 			goto out;
248 		if (flags & ~(unsigned long)PTRACE_O_MASK)
249 			goto out;
250 		flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
251 	} else {
252 		flags = PT_PTRACED;
253 	}
254 
255 	audit_ptrace(task);
256 
257 	retval = -EPERM;
258 	if (unlikely(task->flags & PF_KTHREAD))
259 		goto out;
260 	if (same_thread_group(task, current))
261 		goto out;
262 
263 	/*
264 	 * Protect exec's credential calculations against our interference;
265 	 * SUID, SGID and LSM creds get determined differently
266 	 * under ptrace.
267 	 */
268 	retval = -ERESTARTNOINTR;
269 	if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
270 		goto out;
271 
272 	task_lock(task);
273 	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
274 	task_unlock(task);
275 	if (retval)
276 		goto unlock_creds;
277 
278 	write_lock_irq(&tasklist_lock);
279 	retval = -EPERM;
280 	if (unlikely(task->exit_state))
281 		goto unlock_tasklist;
282 	if (task->ptrace)
283 		goto unlock_tasklist;
284 
285 	if (seize)
286 		flags |= PT_SEIZED;
287 	rcu_read_lock();
288 	if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
289 		flags |= PT_PTRACE_CAP;
290 	rcu_read_unlock();
291 	task->ptrace = flags;
292 
293 	__ptrace_link(task, current);
294 
295 	/* SEIZE doesn't trap tracee on attach */
296 	if (!seize)
297 		send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
298 
299 	spin_lock(&task->sighand->siglock);
300 
301 	/*
302 	 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
303 	 * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
304 	 * will be cleared if the child completes the transition or any
305 	 * event which clears the group stop states happens.  We'll wait
306 	 * for the transition to complete before returning from this
307 	 * function.
308 	 *
309 	 * This hides STOPPED -> RUNNING -> TRACED transition from the
310 	 * attaching thread but a different thread in the same group can
311 	 * still observe the transient RUNNING state.  IOW, if another
312 	 * thread's WNOHANG wait(2) on the stopped tracee races against
313 	 * ATTACH, the wait(2) may fail due to the transient RUNNING.
314 	 *
315 	 * The following task_is_stopped() test is safe as both transitions
316 	 * in and out of STOPPED are protected by siglock.
317 	 */
318 	if (task_is_stopped(task) &&
319 	    task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
320 		signal_wake_up(task, 1);
321 
322 	spin_unlock(&task->sighand->siglock);
323 
324 	retval = 0;
325 unlock_tasklist:
326 	write_unlock_irq(&tasklist_lock);
327 unlock_creds:
328 	mutex_unlock(&task->signal->cred_guard_mutex);
329 out:
330 	if (!retval) {
331 		wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT,
332 			    ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE);
333 		proc_ptrace_connector(task, PTRACE_ATTACH);
334 	}
335 
336 	return retval;
337 }
338 
339 /**
340  * ptrace_traceme  --  helper for PTRACE_TRACEME
341  *
342  * Performs checks and sets PT_PTRACED.
343  * Should be used by all ptrace implementations for PTRACE_TRACEME.
344  */
345 static int ptrace_traceme(void)
346 {
347 	int ret = -EPERM;
348 
349 	write_lock_irq(&tasklist_lock);
350 	/* Are we already being traced? */
351 	if (!current->ptrace) {
352 		ret = security_ptrace_traceme(current->parent);
353 		/*
354 		 * Check PF_EXITING to ensure ->real_parent has not passed
355 		 * exit_ptrace(). Otherwise we don't report the error but
356 		 * pretend ->real_parent untraces us right after return.
357 		 */
358 		if (!ret && !(current->real_parent->flags & PF_EXITING)) {
359 			current->ptrace = PT_PTRACED;
360 			__ptrace_link(current, current->real_parent);
361 		}
362 	}
363 	write_unlock_irq(&tasklist_lock);
364 
365 	return ret;
366 }
367 
368 /*
369  * Called with irqs disabled, returns true if childs should reap themselves.
370  */
371 static int ignoring_children(struct sighand_struct *sigh)
372 {
373 	int ret;
374 	spin_lock(&sigh->siglock);
375 	ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
376 	      (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
377 	spin_unlock(&sigh->siglock);
378 	return ret;
379 }
380 
381 /*
382  * Called with tasklist_lock held for writing.
383  * Unlink a traced task, and clean it up if it was a traced zombie.
384  * Return true if it needs to be reaped with release_task().
385  * (We can't call release_task() here because we already hold tasklist_lock.)
386  *
387  * If it's a zombie, our attachedness prevented normal parent notification
388  * or self-reaping.  Do notification now if it would have happened earlier.
389  * If it should reap itself, return true.
390  *
391  * If it's our own child, there is no notification to do. But if our normal
392  * children self-reap, then this child was prevented by ptrace and we must
393  * reap it now, in that case we must also wake up sub-threads sleeping in
394  * do_wait().
395  */
396 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
397 {
398 	bool dead;
399 
400 	__ptrace_unlink(p);
401 
402 	if (p->exit_state != EXIT_ZOMBIE)
403 		return false;
404 
405 	dead = !thread_group_leader(p);
406 
407 	if (!dead && thread_group_empty(p)) {
408 		if (!same_thread_group(p->real_parent, tracer))
409 			dead = do_notify_parent(p, p->exit_signal);
410 		else if (ignoring_children(tracer->sighand)) {
411 			__wake_up_parent(p, tracer);
412 			dead = true;
413 		}
414 	}
415 	/* Mark it as in the process of being reaped. */
416 	if (dead)
417 		p->exit_state = EXIT_DEAD;
418 	return dead;
419 }
420 
421 static int ptrace_detach(struct task_struct *child, unsigned int data)
422 {
423 	bool dead = false;
424 
425 	if (!valid_signal(data))
426 		return -EIO;
427 
428 	/* Architecture-specific hardware disable .. */
429 	ptrace_disable(child);
430 	clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
431 
432 	write_lock_irq(&tasklist_lock);
433 	/*
434 	 * This child can be already killed. Make sure de_thread() or
435 	 * our sub-thread doing do_wait() didn't do release_task() yet.
436 	 */
437 	if (child->ptrace) {
438 		child->exit_code = data;
439 		dead = __ptrace_detach(current, child);
440 	}
441 	write_unlock_irq(&tasklist_lock);
442 
443 	proc_ptrace_connector(child, PTRACE_DETACH);
444 	if (unlikely(dead))
445 		release_task(child);
446 
447 	return 0;
448 }
449 
450 /*
451  * Detach all tasks we were using ptrace on. Called with tasklist held
452  * for writing, and returns with it held too. But note it can release
453  * and reacquire the lock.
454  */
455 void exit_ptrace(struct task_struct *tracer)
456 	__releases(&tasklist_lock)
457 	__acquires(&tasklist_lock)
458 {
459 	struct task_struct *p, *n;
460 	LIST_HEAD(ptrace_dead);
461 
462 	if (likely(list_empty(&tracer->ptraced)))
463 		return;
464 
465 	list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
466 		if (unlikely(p->ptrace & PT_EXITKILL))
467 			send_sig_info(SIGKILL, SEND_SIG_FORCED, p);
468 
469 		if (__ptrace_detach(tracer, p))
470 			list_add(&p->ptrace_entry, &ptrace_dead);
471 	}
472 
473 	write_unlock_irq(&tasklist_lock);
474 	BUG_ON(!list_empty(&tracer->ptraced));
475 
476 	list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
477 		list_del_init(&p->ptrace_entry);
478 		release_task(p);
479 	}
480 
481 	write_lock_irq(&tasklist_lock);
482 }
483 
484 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
485 {
486 	int copied = 0;
487 
488 	while (len > 0) {
489 		char buf[128];
490 		int this_len, retval;
491 
492 		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
493 		retval = access_process_vm(tsk, src, buf, this_len, 0);
494 		if (!retval) {
495 			if (copied)
496 				break;
497 			return -EIO;
498 		}
499 		if (copy_to_user(dst, buf, retval))
500 			return -EFAULT;
501 		copied += retval;
502 		src += retval;
503 		dst += retval;
504 		len -= retval;
505 	}
506 	return copied;
507 }
508 
509 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
510 {
511 	int copied = 0;
512 
513 	while (len > 0) {
514 		char buf[128];
515 		int this_len, retval;
516 
517 		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
518 		if (copy_from_user(buf, src, this_len))
519 			return -EFAULT;
520 		retval = access_process_vm(tsk, dst, buf, this_len, 1);
521 		if (!retval) {
522 			if (copied)
523 				break;
524 			return -EIO;
525 		}
526 		copied += retval;
527 		src += retval;
528 		dst += retval;
529 		len -= retval;
530 	}
531 	return copied;
532 }
533 
534 static int ptrace_setoptions(struct task_struct *child, unsigned long data)
535 {
536 	unsigned flags;
537 
538 	if (data & ~(unsigned long)PTRACE_O_MASK)
539 		return -EINVAL;
540 
541 	/* Avoid intermediate state when all opts are cleared */
542 	flags = child->ptrace;
543 	flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
544 	flags |= (data << PT_OPT_FLAG_SHIFT);
545 	child->ptrace = flags;
546 
547 	return 0;
548 }
549 
550 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
551 {
552 	unsigned long flags;
553 	int error = -ESRCH;
554 
555 	if (lock_task_sighand(child, &flags)) {
556 		error = -EINVAL;
557 		if (likely(child->last_siginfo != NULL)) {
558 			*info = *child->last_siginfo;
559 			error = 0;
560 		}
561 		unlock_task_sighand(child, &flags);
562 	}
563 	return error;
564 }
565 
566 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
567 {
568 	unsigned long flags;
569 	int error = -ESRCH;
570 
571 	if (lock_task_sighand(child, &flags)) {
572 		error = -EINVAL;
573 		if (likely(child->last_siginfo != NULL)) {
574 			*child->last_siginfo = *info;
575 			error = 0;
576 		}
577 		unlock_task_sighand(child, &flags);
578 	}
579 	return error;
580 }
581 
582 
583 #ifdef PTRACE_SINGLESTEP
584 #define is_singlestep(request)		((request) == PTRACE_SINGLESTEP)
585 #else
586 #define is_singlestep(request)		0
587 #endif
588 
589 #ifdef PTRACE_SINGLEBLOCK
590 #define is_singleblock(request)		((request) == PTRACE_SINGLEBLOCK)
591 #else
592 #define is_singleblock(request)		0
593 #endif
594 
595 #ifdef PTRACE_SYSEMU
596 #define is_sysemu_singlestep(request)	((request) == PTRACE_SYSEMU_SINGLESTEP)
597 #else
598 #define is_sysemu_singlestep(request)	0
599 #endif
600 
601 static int ptrace_resume(struct task_struct *child, long request,
602 			 unsigned long data)
603 {
604 	if (!valid_signal(data))
605 		return -EIO;
606 
607 	if (request == PTRACE_SYSCALL)
608 		set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
609 	else
610 		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
611 
612 #ifdef TIF_SYSCALL_EMU
613 	if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
614 		set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
615 	else
616 		clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
617 #endif
618 
619 	if (is_singleblock(request)) {
620 		if (unlikely(!arch_has_block_step()))
621 			return -EIO;
622 		user_enable_block_step(child);
623 	} else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
624 		if (unlikely(!arch_has_single_step()))
625 			return -EIO;
626 		user_enable_single_step(child);
627 	} else {
628 		user_disable_single_step(child);
629 	}
630 
631 	child->exit_code = data;
632 	wake_up_state(child, __TASK_TRACED);
633 
634 	return 0;
635 }
636 
637 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
638 
639 static const struct user_regset *
640 find_regset(const struct user_regset_view *view, unsigned int type)
641 {
642 	const struct user_regset *regset;
643 	int n;
644 
645 	for (n = 0; n < view->n; ++n) {
646 		regset = view->regsets + n;
647 		if (regset->core_note_type == type)
648 			return regset;
649 	}
650 
651 	return NULL;
652 }
653 
654 static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
655 			 struct iovec *kiov)
656 {
657 	const struct user_regset_view *view = task_user_regset_view(task);
658 	const struct user_regset *regset = find_regset(view, type);
659 	int regset_no;
660 
661 	if (!regset || (kiov->iov_len % regset->size) != 0)
662 		return -EINVAL;
663 
664 	regset_no = regset - view->regsets;
665 	kiov->iov_len = min(kiov->iov_len,
666 			    (__kernel_size_t) (regset->n * regset->size));
667 
668 	if (req == PTRACE_GETREGSET)
669 		return copy_regset_to_user(task, view, regset_no, 0,
670 					   kiov->iov_len, kiov->iov_base);
671 	else
672 		return copy_regset_from_user(task, view, regset_no, 0,
673 					     kiov->iov_len, kiov->iov_base);
674 }
675 
676 #endif
677 
678 int ptrace_request(struct task_struct *child, long request,
679 		   unsigned long addr, unsigned long data)
680 {
681 	bool seized = child->ptrace & PT_SEIZED;
682 	int ret = -EIO;
683 	siginfo_t siginfo, *si;
684 	void __user *datavp = (void __user *) data;
685 	unsigned long __user *datalp = datavp;
686 	unsigned long flags;
687 
688 	switch (request) {
689 	case PTRACE_PEEKTEXT:
690 	case PTRACE_PEEKDATA:
691 		return generic_ptrace_peekdata(child, addr, data);
692 	case PTRACE_POKETEXT:
693 	case PTRACE_POKEDATA:
694 		return generic_ptrace_pokedata(child, addr, data);
695 
696 #ifdef PTRACE_OLDSETOPTIONS
697 	case PTRACE_OLDSETOPTIONS:
698 #endif
699 	case PTRACE_SETOPTIONS:
700 		ret = ptrace_setoptions(child, data);
701 		break;
702 	case PTRACE_GETEVENTMSG:
703 		ret = put_user(child->ptrace_message, datalp);
704 		break;
705 
706 	case PTRACE_GETSIGINFO:
707 		ret = ptrace_getsiginfo(child, &siginfo);
708 		if (!ret)
709 			ret = copy_siginfo_to_user(datavp, &siginfo);
710 		break;
711 
712 	case PTRACE_SETSIGINFO:
713 		if (copy_from_user(&siginfo, datavp, sizeof siginfo))
714 			ret = -EFAULT;
715 		else
716 			ret = ptrace_setsiginfo(child, &siginfo);
717 		break;
718 
719 	case PTRACE_INTERRUPT:
720 		/*
721 		 * Stop tracee without any side-effect on signal or job
722 		 * control.  At least one trap is guaranteed to happen
723 		 * after this request.  If @child is already trapped, the
724 		 * current trap is not disturbed and another trap will
725 		 * happen after the current trap is ended with PTRACE_CONT.
726 		 *
727 		 * The actual trap might not be PTRACE_EVENT_STOP trap but
728 		 * the pending condition is cleared regardless.
729 		 */
730 		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
731 			break;
732 
733 		/*
734 		 * INTERRUPT doesn't disturb existing trap sans one
735 		 * exception.  If ptracer issued LISTEN for the current
736 		 * STOP, this INTERRUPT should clear LISTEN and re-trap
737 		 * tracee into STOP.
738 		 */
739 		if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
740 			signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
741 
742 		unlock_task_sighand(child, &flags);
743 		ret = 0;
744 		break;
745 
746 	case PTRACE_LISTEN:
747 		/*
748 		 * Listen for events.  Tracee must be in STOP.  It's not
749 		 * resumed per-se but is not considered to be in TRACED by
750 		 * wait(2) or ptrace(2).  If an async event (e.g. group
751 		 * stop state change) happens, tracee will enter STOP trap
752 		 * again.  Alternatively, ptracer can issue INTERRUPT to
753 		 * finish listening and re-trap tracee into STOP.
754 		 */
755 		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
756 			break;
757 
758 		si = child->last_siginfo;
759 		if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
760 			child->jobctl |= JOBCTL_LISTENING;
761 			/*
762 			 * If NOTIFY is set, it means event happened between
763 			 * start of this trap and now.  Trigger re-trap.
764 			 */
765 			if (child->jobctl & JOBCTL_TRAP_NOTIFY)
766 				signal_wake_up(child, true);
767 			ret = 0;
768 		}
769 		unlock_task_sighand(child, &flags);
770 		break;
771 
772 	case PTRACE_DETACH:	 /* detach a process that was attached. */
773 		ret = ptrace_detach(child, data);
774 		break;
775 
776 #ifdef CONFIG_BINFMT_ELF_FDPIC
777 	case PTRACE_GETFDPIC: {
778 		struct mm_struct *mm = get_task_mm(child);
779 		unsigned long tmp = 0;
780 
781 		ret = -ESRCH;
782 		if (!mm)
783 			break;
784 
785 		switch (addr) {
786 		case PTRACE_GETFDPIC_EXEC:
787 			tmp = mm->context.exec_fdpic_loadmap;
788 			break;
789 		case PTRACE_GETFDPIC_INTERP:
790 			tmp = mm->context.interp_fdpic_loadmap;
791 			break;
792 		default:
793 			break;
794 		}
795 		mmput(mm);
796 
797 		ret = put_user(tmp, datalp);
798 		break;
799 	}
800 #endif
801 
802 #ifdef PTRACE_SINGLESTEP
803 	case PTRACE_SINGLESTEP:
804 #endif
805 #ifdef PTRACE_SINGLEBLOCK
806 	case PTRACE_SINGLEBLOCK:
807 #endif
808 #ifdef PTRACE_SYSEMU
809 	case PTRACE_SYSEMU:
810 	case PTRACE_SYSEMU_SINGLESTEP:
811 #endif
812 	case PTRACE_SYSCALL:
813 	case PTRACE_CONT:
814 		return ptrace_resume(child, request, data);
815 
816 	case PTRACE_KILL:
817 		if (child->exit_state)	/* already dead */
818 			return 0;
819 		return ptrace_resume(child, request, SIGKILL);
820 
821 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
822 	case PTRACE_GETREGSET:
823 	case PTRACE_SETREGSET:
824 	{
825 		struct iovec kiov;
826 		struct iovec __user *uiov = datavp;
827 
828 		if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
829 			return -EFAULT;
830 
831 		if (__get_user(kiov.iov_base, &uiov->iov_base) ||
832 		    __get_user(kiov.iov_len, &uiov->iov_len))
833 			return -EFAULT;
834 
835 		ret = ptrace_regset(child, request, addr, &kiov);
836 		if (!ret)
837 			ret = __put_user(kiov.iov_len, &uiov->iov_len);
838 		break;
839 	}
840 #endif
841 	default:
842 		break;
843 	}
844 
845 	return ret;
846 }
847 
848 static struct task_struct *ptrace_get_task_struct(pid_t pid)
849 {
850 	struct task_struct *child;
851 
852 	rcu_read_lock();
853 	child = find_task_by_vpid(pid);
854 	if (child)
855 		get_task_struct(child);
856 	rcu_read_unlock();
857 
858 	if (!child)
859 		return ERR_PTR(-ESRCH);
860 	return child;
861 }
862 
863 #ifndef arch_ptrace_attach
864 #define arch_ptrace_attach(child)	do { } while (0)
865 #endif
866 
867 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
868 		unsigned long, data)
869 {
870 	struct task_struct *child;
871 	long ret;
872 
873 	if (request == PTRACE_TRACEME) {
874 		ret = ptrace_traceme();
875 		if (!ret)
876 			arch_ptrace_attach(current);
877 		goto out;
878 	}
879 
880 	child = ptrace_get_task_struct(pid);
881 	if (IS_ERR(child)) {
882 		ret = PTR_ERR(child);
883 		goto out;
884 	}
885 
886 	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
887 		ret = ptrace_attach(child, request, addr, data);
888 		/*
889 		 * Some architectures need to do book-keeping after
890 		 * a ptrace attach.
891 		 */
892 		if (!ret)
893 			arch_ptrace_attach(child);
894 		goto out_put_task_struct;
895 	}
896 
897 	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
898 				  request == PTRACE_INTERRUPT);
899 	if (ret < 0)
900 		goto out_put_task_struct;
901 
902 	ret = arch_ptrace(child, request, addr, data);
903 
904  out_put_task_struct:
905 	put_task_struct(child);
906  out:
907 	return ret;
908 }
909 
910 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
911 			    unsigned long data)
912 {
913 	unsigned long tmp;
914 	int copied;
915 
916 	copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
917 	if (copied != sizeof(tmp))
918 		return -EIO;
919 	return put_user(tmp, (unsigned long __user *)data);
920 }
921 
922 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
923 			    unsigned long data)
924 {
925 	int copied;
926 
927 	copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
928 	return (copied == sizeof(data)) ? 0 : -EIO;
929 }
930 
931 #if defined CONFIG_COMPAT
932 #include <linux/compat.h>
933 
934 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
935 			  compat_ulong_t addr, compat_ulong_t data)
936 {
937 	compat_ulong_t __user *datap = compat_ptr(data);
938 	compat_ulong_t word;
939 	siginfo_t siginfo;
940 	int ret;
941 
942 	switch (request) {
943 	case PTRACE_PEEKTEXT:
944 	case PTRACE_PEEKDATA:
945 		ret = access_process_vm(child, addr, &word, sizeof(word), 0);
946 		if (ret != sizeof(word))
947 			ret = -EIO;
948 		else
949 			ret = put_user(word, datap);
950 		break;
951 
952 	case PTRACE_POKETEXT:
953 	case PTRACE_POKEDATA:
954 		ret = access_process_vm(child, addr, &data, sizeof(data), 1);
955 		ret = (ret != sizeof(data) ? -EIO : 0);
956 		break;
957 
958 	case PTRACE_GETEVENTMSG:
959 		ret = put_user((compat_ulong_t) child->ptrace_message, datap);
960 		break;
961 
962 	case PTRACE_GETSIGINFO:
963 		ret = ptrace_getsiginfo(child, &siginfo);
964 		if (!ret)
965 			ret = copy_siginfo_to_user32(
966 				(struct compat_siginfo __user *) datap,
967 				&siginfo);
968 		break;
969 
970 	case PTRACE_SETSIGINFO:
971 		memset(&siginfo, 0, sizeof siginfo);
972 		if (copy_siginfo_from_user32(
973 			    &siginfo, (struct compat_siginfo __user *) datap))
974 			ret = -EFAULT;
975 		else
976 			ret = ptrace_setsiginfo(child, &siginfo);
977 		break;
978 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
979 	case PTRACE_GETREGSET:
980 	case PTRACE_SETREGSET:
981 	{
982 		struct iovec kiov;
983 		struct compat_iovec __user *uiov =
984 			(struct compat_iovec __user *) datap;
985 		compat_uptr_t ptr;
986 		compat_size_t len;
987 
988 		if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
989 			return -EFAULT;
990 
991 		if (__get_user(ptr, &uiov->iov_base) ||
992 		    __get_user(len, &uiov->iov_len))
993 			return -EFAULT;
994 
995 		kiov.iov_base = compat_ptr(ptr);
996 		kiov.iov_len = len;
997 
998 		ret = ptrace_regset(child, request, addr, &kiov);
999 		if (!ret)
1000 			ret = __put_user(kiov.iov_len, &uiov->iov_len);
1001 		break;
1002 	}
1003 #endif
1004 
1005 	default:
1006 		ret = ptrace_request(child, request, addr, data);
1007 	}
1008 
1009 	return ret;
1010 }
1011 
1012 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
1013 				  compat_long_t addr, compat_long_t data)
1014 {
1015 	struct task_struct *child;
1016 	long ret;
1017 
1018 	if (request == PTRACE_TRACEME) {
1019 		ret = ptrace_traceme();
1020 		goto out;
1021 	}
1022 
1023 	child = ptrace_get_task_struct(pid);
1024 	if (IS_ERR(child)) {
1025 		ret = PTR_ERR(child);
1026 		goto out;
1027 	}
1028 
1029 	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1030 		ret = ptrace_attach(child, request, addr, data);
1031 		/*
1032 		 * Some architectures need to do book-keeping after
1033 		 * a ptrace attach.
1034 		 */
1035 		if (!ret)
1036 			arch_ptrace_attach(child);
1037 		goto out_put_task_struct;
1038 	}
1039 
1040 	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1041 				  request == PTRACE_INTERRUPT);
1042 	if (!ret)
1043 		ret = compat_arch_ptrace(child, request, addr, data);
1044 
1045  out_put_task_struct:
1046 	put_task_struct(child);
1047  out:
1048 	return ret;
1049 }
1050 #endif	/* CONFIG_COMPAT */
1051 
1052 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1053 int ptrace_get_breakpoints(struct task_struct *tsk)
1054 {
1055 	if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt))
1056 		return 0;
1057 
1058 	return -1;
1059 }
1060 
1061 void ptrace_put_breakpoints(struct task_struct *tsk)
1062 {
1063 	if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt))
1064 		flush_ptrace_hw_breakpoint(tsk);
1065 }
1066 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1067