xref: /openbmc/linux/kernel/ptrace.c (revision 22246614)
1 /*
2  * linux/kernel/ptrace.c
3  *
4  * (C) Copyright 1999 Linus Torvalds
5  *
6  * Common interfaces for "ptrace()" which we do not want
7  * to continually duplicate across every architecture.
8  */
9 
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
14 #include <linux/mm.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/ptrace.h>
19 #include <linux/security.h>
20 #include <linux/signal.h>
21 #include <linux/audit.h>
22 #include <linux/pid_namespace.h>
23 #include <linux/syscalls.h>
24 
25 #include <asm/pgtable.h>
26 #include <asm/uaccess.h>
27 
28 /*
29  * ptrace a task: make the debugger its new parent and
30  * move it to the ptrace list.
31  *
32  * Must be called with the tasklist lock write-held.
33  */
34 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
35 {
36 	BUG_ON(!list_empty(&child->ptrace_list));
37 	if (child->parent == new_parent)
38 		return;
39 	list_add(&child->ptrace_list, &child->parent->ptrace_children);
40 	remove_parent(child);
41 	child->parent = new_parent;
42 	add_parent(child);
43 }
44 
45 /*
46  * Turn a tracing stop into a normal stop now, since with no tracer there
47  * would be no way to wake it up with SIGCONT or SIGKILL.  If there was a
48  * signal sent that would resume the child, but didn't because it was in
49  * TASK_TRACED, resume it now.
50  * Requires that irqs be disabled.
51  */
52 void ptrace_untrace(struct task_struct *child)
53 {
54 	spin_lock(&child->sighand->siglock);
55 	if (task_is_traced(child)) {
56 		if (child->signal->flags & SIGNAL_STOP_STOPPED) {
57 			__set_task_state(child, TASK_STOPPED);
58 		} else {
59 			signal_wake_up(child, 1);
60 		}
61 	}
62 	spin_unlock(&child->sighand->siglock);
63 }
64 
65 /*
66  * unptrace a task: move it back to its original parent and
67  * remove it from the ptrace list.
68  *
69  * Must be called with the tasklist lock write-held.
70  */
71 void __ptrace_unlink(struct task_struct *child)
72 {
73 	BUG_ON(!child->ptrace);
74 
75 	child->ptrace = 0;
76 	if (ptrace_reparented(child)) {
77 		list_del_init(&child->ptrace_list);
78 		remove_parent(child);
79 		child->parent = child->real_parent;
80 		add_parent(child);
81 	}
82 
83 	if (task_is_traced(child))
84 		ptrace_untrace(child);
85 }
86 
87 /*
88  * Check that we have indeed attached to the thing..
89  */
90 int ptrace_check_attach(struct task_struct *child, int kill)
91 {
92 	int ret = -ESRCH;
93 
94 	/*
95 	 * We take the read lock around doing both checks to close a
96 	 * possible race where someone else was tracing our child and
97 	 * detached between these two checks.  After this locked check,
98 	 * we are sure that this is our traced child and that can only
99 	 * be changed by us so it's not changing right after this.
100 	 */
101 	read_lock(&tasklist_lock);
102 	if ((child->ptrace & PT_PTRACED) && child->parent == current) {
103 		ret = 0;
104 		/*
105 		 * child->sighand can't be NULL, release_task()
106 		 * does ptrace_unlink() before __exit_signal().
107 		 */
108 		spin_lock_irq(&child->sighand->siglock);
109 		if (task_is_stopped(child))
110 			child->state = TASK_TRACED;
111 		else if (!task_is_traced(child) && !kill)
112 			ret = -ESRCH;
113 		spin_unlock_irq(&child->sighand->siglock);
114 	}
115 	read_unlock(&tasklist_lock);
116 
117 	if (!ret && !kill)
118 		wait_task_inactive(child);
119 
120 	/* All systems go.. */
121 	return ret;
122 }
123 
124 int __ptrace_may_attach(struct task_struct *task)
125 {
126 	/* May we inspect the given task?
127 	 * This check is used both for attaching with ptrace
128 	 * and for allowing access to sensitive information in /proc.
129 	 *
130 	 * ptrace_attach denies several cases that /proc allows
131 	 * because setting up the necessary parent/child relationship
132 	 * or halting the specified task is impossible.
133 	 */
134 	int dumpable = 0;
135 	/* Don't let security modules deny introspection */
136 	if (task == current)
137 		return 0;
138 	if (((current->uid != task->euid) ||
139 	     (current->uid != task->suid) ||
140 	     (current->uid != task->uid) ||
141 	     (current->gid != task->egid) ||
142 	     (current->gid != task->sgid) ||
143 	     (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
144 		return -EPERM;
145 	smp_rmb();
146 	if (task->mm)
147 		dumpable = get_dumpable(task->mm);
148 	if (!dumpable && !capable(CAP_SYS_PTRACE))
149 		return -EPERM;
150 
151 	return security_ptrace(current, task);
152 }
153 
154 int ptrace_may_attach(struct task_struct *task)
155 {
156 	int err;
157 	task_lock(task);
158 	err = __ptrace_may_attach(task);
159 	task_unlock(task);
160 	return !err;
161 }
162 
163 int ptrace_attach(struct task_struct *task)
164 {
165 	int retval;
166 	unsigned long flags;
167 
168 	audit_ptrace(task);
169 
170 	retval = -EPERM;
171 	if (same_thread_group(task, current))
172 		goto out;
173 
174 repeat:
175 	/*
176 	 * Nasty, nasty.
177 	 *
178 	 * We want to hold both the task-lock and the
179 	 * tasklist_lock for writing at the same time.
180 	 * But that's against the rules (tasklist_lock
181 	 * is taken for reading by interrupts on other
182 	 * cpu's that may have task_lock).
183 	 */
184 	task_lock(task);
185 	if (!write_trylock_irqsave(&tasklist_lock, flags)) {
186 		task_unlock(task);
187 		do {
188 			cpu_relax();
189 		} while (!write_can_lock(&tasklist_lock));
190 		goto repeat;
191 	}
192 
193 	if (!task->mm)
194 		goto bad;
195 	/* the same process cannot be attached many times */
196 	if (task->ptrace & PT_PTRACED)
197 		goto bad;
198 	retval = __ptrace_may_attach(task);
199 	if (retval)
200 		goto bad;
201 
202 	/* Go */
203 	task->ptrace |= PT_PTRACED;
204 	if (capable(CAP_SYS_PTRACE))
205 		task->ptrace |= PT_PTRACE_CAP;
206 
207 	__ptrace_link(task, current);
208 
209 	send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
210 bad:
211 	write_unlock_irqrestore(&tasklist_lock, flags);
212 	task_unlock(task);
213 out:
214 	return retval;
215 }
216 
217 static inline void __ptrace_detach(struct task_struct *child, unsigned int data)
218 {
219 	child->exit_code = data;
220 	/* .. re-parent .. */
221 	__ptrace_unlink(child);
222 	/* .. and wake it up. */
223 	if (child->exit_state != EXIT_ZOMBIE)
224 		wake_up_process(child);
225 }
226 
227 int ptrace_detach(struct task_struct *child, unsigned int data)
228 {
229 	if (!valid_signal(data))
230 		return -EIO;
231 
232 	/* Architecture-specific hardware disable .. */
233 	ptrace_disable(child);
234 	clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
235 
236 	write_lock_irq(&tasklist_lock);
237 	/* protect against de_thread()->release_task() */
238 	if (child->ptrace)
239 		__ptrace_detach(child, data);
240 	write_unlock_irq(&tasklist_lock);
241 
242 	return 0;
243 }
244 
245 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
246 {
247 	int copied = 0;
248 
249 	while (len > 0) {
250 		char buf[128];
251 		int this_len, retval;
252 
253 		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
254 		retval = access_process_vm(tsk, src, buf, this_len, 0);
255 		if (!retval) {
256 			if (copied)
257 				break;
258 			return -EIO;
259 		}
260 		if (copy_to_user(dst, buf, retval))
261 			return -EFAULT;
262 		copied += retval;
263 		src += retval;
264 		dst += retval;
265 		len -= retval;
266 	}
267 	return copied;
268 }
269 
270 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
271 {
272 	int copied = 0;
273 
274 	while (len > 0) {
275 		char buf[128];
276 		int this_len, retval;
277 
278 		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
279 		if (copy_from_user(buf, src, this_len))
280 			return -EFAULT;
281 		retval = access_process_vm(tsk, dst, buf, this_len, 1);
282 		if (!retval) {
283 			if (copied)
284 				break;
285 			return -EIO;
286 		}
287 		copied += retval;
288 		src += retval;
289 		dst += retval;
290 		len -= retval;
291 	}
292 	return copied;
293 }
294 
295 static int ptrace_setoptions(struct task_struct *child, long data)
296 {
297 	child->ptrace &= ~PT_TRACE_MASK;
298 
299 	if (data & PTRACE_O_TRACESYSGOOD)
300 		child->ptrace |= PT_TRACESYSGOOD;
301 
302 	if (data & PTRACE_O_TRACEFORK)
303 		child->ptrace |= PT_TRACE_FORK;
304 
305 	if (data & PTRACE_O_TRACEVFORK)
306 		child->ptrace |= PT_TRACE_VFORK;
307 
308 	if (data & PTRACE_O_TRACECLONE)
309 		child->ptrace |= PT_TRACE_CLONE;
310 
311 	if (data & PTRACE_O_TRACEEXEC)
312 		child->ptrace |= PT_TRACE_EXEC;
313 
314 	if (data & PTRACE_O_TRACEVFORKDONE)
315 		child->ptrace |= PT_TRACE_VFORK_DONE;
316 
317 	if (data & PTRACE_O_TRACEEXIT)
318 		child->ptrace |= PT_TRACE_EXIT;
319 
320 	return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
321 }
322 
323 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
324 {
325 	int error = -ESRCH;
326 
327 	read_lock(&tasklist_lock);
328 	if (likely(child->sighand != NULL)) {
329 		error = -EINVAL;
330 		spin_lock_irq(&child->sighand->siglock);
331 		if (likely(child->last_siginfo != NULL)) {
332 			*info = *child->last_siginfo;
333 			error = 0;
334 		}
335 		spin_unlock_irq(&child->sighand->siglock);
336 	}
337 	read_unlock(&tasklist_lock);
338 	return error;
339 }
340 
341 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
342 {
343 	int error = -ESRCH;
344 
345 	read_lock(&tasklist_lock);
346 	if (likely(child->sighand != NULL)) {
347 		error = -EINVAL;
348 		spin_lock_irq(&child->sighand->siglock);
349 		if (likely(child->last_siginfo != NULL)) {
350 			*child->last_siginfo = *info;
351 			error = 0;
352 		}
353 		spin_unlock_irq(&child->sighand->siglock);
354 	}
355 	read_unlock(&tasklist_lock);
356 	return error;
357 }
358 
359 
360 #ifdef PTRACE_SINGLESTEP
361 #define is_singlestep(request)		((request) == PTRACE_SINGLESTEP)
362 #else
363 #define is_singlestep(request)		0
364 #endif
365 
366 #ifdef PTRACE_SINGLEBLOCK
367 #define is_singleblock(request)		((request) == PTRACE_SINGLEBLOCK)
368 #else
369 #define is_singleblock(request)		0
370 #endif
371 
372 #ifdef PTRACE_SYSEMU
373 #define is_sysemu_singlestep(request)	((request) == PTRACE_SYSEMU_SINGLESTEP)
374 #else
375 #define is_sysemu_singlestep(request)	0
376 #endif
377 
378 static int ptrace_resume(struct task_struct *child, long request, long data)
379 {
380 	if (!valid_signal(data))
381 		return -EIO;
382 
383 	if (request == PTRACE_SYSCALL)
384 		set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
385 	else
386 		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
387 
388 #ifdef TIF_SYSCALL_EMU
389 	if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
390 		set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
391 	else
392 		clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
393 #endif
394 
395 	if (is_singleblock(request)) {
396 		if (unlikely(!arch_has_block_step()))
397 			return -EIO;
398 		user_enable_block_step(child);
399 	} else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
400 		if (unlikely(!arch_has_single_step()))
401 			return -EIO;
402 		user_enable_single_step(child);
403 	}
404 	else
405 		user_disable_single_step(child);
406 
407 	child->exit_code = data;
408 	wake_up_process(child);
409 
410 	return 0;
411 }
412 
413 int ptrace_request(struct task_struct *child, long request,
414 		   long addr, long data)
415 {
416 	int ret = -EIO;
417 	siginfo_t siginfo;
418 
419 	switch (request) {
420 	case PTRACE_PEEKTEXT:
421 	case PTRACE_PEEKDATA:
422 		return generic_ptrace_peekdata(child, addr, data);
423 	case PTRACE_POKETEXT:
424 	case PTRACE_POKEDATA:
425 		return generic_ptrace_pokedata(child, addr, data);
426 
427 #ifdef PTRACE_OLDSETOPTIONS
428 	case PTRACE_OLDSETOPTIONS:
429 #endif
430 	case PTRACE_SETOPTIONS:
431 		ret = ptrace_setoptions(child, data);
432 		break;
433 	case PTRACE_GETEVENTMSG:
434 		ret = put_user(child->ptrace_message, (unsigned long __user *) data);
435 		break;
436 
437 	case PTRACE_GETSIGINFO:
438 		ret = ptrace_getsiginfo(child, &siginfo);
439 		if (!ret)
440 			ret = copy_siginfo_to_user((siginfo_t __user *) data,
441 						   &siginfo);
442 		break;
443 
444 	case PTRACE_SETSIGINFO:
445 		if (copy_from_user(&siginfo, (siginfo_t __user *) data,
446 				   sizeof siginfo))
447 			ret = -EFAULT;
448 		else
449 			ret = ptrace_setsiginfo(child, &siginfo);
450 		break;
451 
452 	case PTRACE_DETACH:	 /* detach a process that was attached. */
453 		ret = ptrace_detach(child, data);
454 		break;
455 
456 #ifdef PTRACE_SINGLESTEP
457 	case PTRACE_SINGLESTEP:
458 #endif
459 #ifdef PTRACE_SINGLEBLOCK
460 	case PTRACE_SINGLEBLOCK:
461 #endif
462 #ifdef PTRACE_SYSEMU
463 	case PTRACE_SYSEMU:
464 	case PTRACE_SYSEMU_SINGLESTEP:
465 #endif
466 	case PTRACE_SYSCALL:
467 	case PTRACE_CONT:
468 		return ptrace_resume(child, request, data);
469 
470 	case PTRACE_KILL:
471 		if (child->exit_state)	/* already dead */
472 			return 0;
473 		return ptrace_resume(child, request, SIGKILL);
474 
475 	default:
476 		break;
477 	}
478 
479 	return ret;
480 }
481 
482 /**
483  * ptrace_traceme  --  helper for PTRACE_TRACEME
484  *
485  * Performs checks and sets PT_PTRACED.
486  * Should be used by all ptrace implementations for PTRACE_TRACEME.
487  */
488 int ptrace_traceme(void)
489 {
490 	int ret = -EPERM;
491 
492 	/*
493 	 * Are we already being traced?
494 	 */
495 	task_lock(current);
496 	if (!(current->ptrace & PT_PTRACED)) {
497 		ret = security_ptrace(current->parent, current);
498 		/*
499 		 * Set the ptrace bit in the process ptrace flags.
500 		 */
501 		if (!ret)
502 			current->ptrace |= PT_PTRACED;
503 	}
504 	task_unlock(current);
505 	return ret;
506 }
507 
508 /**
509  * ptrace_get_task_struct  --  grab a task struct reference for ptrace
510  * @pid:       process id to grab a task_struct reference of
511  *
512  * This function is a helper for ptrace implementations.  It checks
513  * permissions and then grabs a task struct for use of the actual
514  * ptrace implementation.
515  *
516  * Returns the task_struct for @pid or an ERR_PTR() on failure.
517  */
518 struct task_struct *ptrace_get_task_struct(pid_t pid)
519 {
520 	struct task_struct *child;
521 
522 	read_lock(&tasklist_lock);
523 	child = find_task_by_vpid(pid);
524 	if (child)
525 		get_task_struct(child);
526 
527 	read_unlock(&tasklist_lock);
528 	if (!child)
529 		return ERR_PTR(-ESRCH);
530 	return child;
531 }
532 
533 #ifndef arch_ptrace_attach
534 #define arch_ptrace_attach(child)	do { } while (0)
535 #endif
536 
537 asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
538 {
539 	struct task_struct *child;
540 	long ret;
541 
542 	/*
543 	 * This lock_kernel fixes a subtle race with suid exec
544 	 */
545 	lock_kernel();
546 	if (request == PTRACE_TRACEME) {
547 		ret = ptrace_traceme();
548 		if (!ret)
549 			arch_ptrace_attach(current);
550 		goto out;
551 	}
552 
553 	child = ptrace_get_task_struct(pid);
554 	if (IS_ERR(child)) {
555 		ret = PTR_ERR(child);
556 		goto out;
557 	}
558 
559 	if (request == PTRACE_ATTACH) {
560 		ret = ptrace_attach(child);
561 		/*
562 		 * Some architectures need to do book-keeping after
563 		 * a ptrace attach.
564 		 */
565 		if (!ret)
566 			arch_ptrace_attach(child);
567 		goto out_put_task_struct;
568 	}
569 
570 	ret = ptrace_check_attach(child, request == PTRACE_KILL);
571 	if (ret < 0)
572 		goto out_put_task_struct;
573 
574 	ret = arch_ptrace(child, request, addr, data);
575 	if (ret < 0)
576 		goto out_put_task_struct;
577 
578  out_put_task_struct:
579 	put_task_struct(child);
580  out:
581 	unlock_kernel();
582 	return ret;
583 }
584 
585 int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
586 {
587 	unsigned long tmp;
588 	int copied;
589 
590 	copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
591 	if (copied != sizeof(tmp))
592 		return -EIO;
593 	return put_user(tmp, (unsigned long __user *)data);
594 }
595 
596 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
597 {
598 	int copied;
599 
600 	copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
601 	return (copied == sizeof(data)) ? 0 : -EIO;
602 }
603 
604 #if defined CONFIG_COMPAT && defined __ARCH_WANT_COMPAT_SYS_PTRACE
605 #include <linux/compat.h>
606 
607 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
608 			  compat_ulong_t addr, compat_ulong_t data)
609 {
610 	compat_ulong_t __user *datap = compat_ptr(data);
611 	compat_ulong_t word;
612 	siginfo_t siginfo;
613 	int ret;
614 
615 	switch (request) {
616 	case PTRACE_PEEKTEXT:
617 	case PTRACE_PEEKDATA:
618 		ret = access_process_vm(child, addr, &word, sizeof(word), 0);
619 		if (ret != sizeof(word))
620 			ret = -EIO;
621 		else
622 			ret = put_user(word, datap);
623 		break;
624 
625 	case PTRACE_POKETEXT:
626 	case PTRACE_POKEDATA:
627 		ret = access_process_vm(child, addr, &data, sizeof(data), 1);
628 		ret = (ret != sizeof(data) ? -EIO : 0);
629 		break;
630 
631 	case PTRACE_GETEVENTMSG:
632 		ret = put_user((compat_ulong_t) child->ptrace_message, datap);
633 		break;
634 
635 	case PTRACE_GETSIGINFO:
636 		ret = ptrace_getsiginfo(child, &siginfo);
637 		if (!ret)
638 			ret = copy_siginfo_to_user32(
639 				(struct compat_siginfo __user *) datap,
640 				&siginfo);
641 		break;
642 
643 	case PTRACE_SETSIGINFO:
644 		memset(&siginfo, 0, sizeof siginfo);
645 		if (copy_siginfo_from_user32(
646 			    &siginfo, (struct compat_siginfo __user *) datap))
647 			ret = -EFAULT;
648 		else
649 			ret = ptrace_setsiginfo(child, &siginfo);
650 		break;
651 
652 	default:
653 		ret = ptrace_request(child, request, addr, data);
654 	}
655 
656 	return ret;
657 }
658 
659 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
660 				  compat_long_t addr, compat_long_t data)
661 {
662 	struct task_struct *child;
663 	long ret;
664 
665 	/*
666 	 * This lock_kernel fixes a subtle race with suid exec
667 	 */
668 	lock_kernel();
669 	if (request == PTRACE_TRACEME) {
670 		ret = ptrace_traceme();
671 		goto out;
672 	}
673 
674 	child = ptrace_get_task_struct(pid);
675 	if (IS_ERR(child)) {
676 		ret = PTR_ERR(child);
677 		goto out;
678 	}
679 
680 	if (request == PTRACE_ATTACH) {
681 		ret = ptrace_attach(child);
682 		/*
683 		 * Some architectures need to do book-keeping after
684 		 * a ptrace attach.
685 		 */
686 		if (!ret)
687 			arch_ptrace_attach(child);
688 		goto out_put_task_struct;
689 	}
690 
691 	ret = ptrace_check_attach(child, request == PTRACE_KILL);
692 	if (!ret)
693 		ret = compat_arch_ptrace(child, request, addr, data);
694 
695  out_put_task_struct:
696 	put_task_struct(child);
697  out:
698 	unlock_kernel();
699 	return ret;
700 }
701 #endif	/* CONFIG_COMPAT && __ARCH_WANT_COMPAT_SYS_PTRACE */
702