xref: /openbmc/linux/include/linux/ptrace.h (revision fdd12a80)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds #ifndef _LINUX_PTRACE_H
31da177e4SLinus Torvalds #define _LINUX_PTRACE_H
41da177e4SLinus Torvalds 
5607ca46eSDavid Howells #include <linux/compiler.h>		/* For unlikely.  */
6607ca46eSDavid Howells #include <linux/sched.h>		/* For struct task_struct.  */
73f07c014SIngo Molnar #include <linux/sched/signal.h>		/* For send_sig(), same_thread_group(), etc. */
8607ca46eSDavid Howells #include <linux/err.h>			/* for IS_ERR_VALUE */
9607ca46eSDavid Howells #include <linux/bug.h>			/* For BUG_ON.  */
104e52365fSMatthew Dempsky #include <linux/pid_namespace.h>	/* For task_active_pid_ns.  */
11607ca46eSDavid Howells #include <uapi/linux/ptrace.h>
12631b7abaSSteven Rostedt (Red Hat) #include <linux/seccomp.h>
13631b7abaSSteven Rostedt (Red Hat) 
14631b7abaSSteven Rostedt (Red Hat) /* Add sp to seccomp_data, as seccomp is user API, we don't want to modify it */
15631b7abaSSteven Rostedt (Red Hat) struct syscall_info {
16631b7abaSSteven Rostedt (Red Hat) 	__u64			sp;
17631b7abaSSteven Rostedt (Red Hat) 	struct seccomp_data	data;
18631b7abaSSteven Rostedt (Red Hat) };
191da177e4SLinus Torvalds 
2084d77d3fSEric W. Biederman extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
2184d77d3fSEric W. Biederman 			    void *buf, int len, unsigned int gup_flags);
2284d77d3fSEric W. Biederman 
231da177e4SLinus Torvalds /*
241da177e4SLinus Torvalds  * Ptrace flags
25260ea101SEric W. Biederman  *
26260ea101SEric W. Biederman  * The owner ship rules for task->ptrace which holds the ptrace
27260ea101SEric W. Biederman  * flags is simple.  When a task is running it owns it's task->ptrace
28260ea101SEric W. Biederman  * flags.  When the a task is stopped the ptracer owns task->ptrace.
291da177e4SLinus Torvalds  */
301da177e4SLinus Torvalds 
313544d72aSTejun Heo #define PT_SEIZED	0x00010000	/* SEIZE used, enable new behavior */
321da177e4SLinus Torvalds #define PT_PTRACED	0x00000001
33643ad838STejun Heo 
3486b6c1f3SDenys Vlasenko #define PT_OPT_FLAG_SHIFT	3
35643ad838STejun Heo /* PT_TRACE_* event enable flags */
3686b6c1f3SDenys Vlasenko #define PT_EVENT_FLAG(event)	(1 << (PT_OPT_FLAG_SHIFT + (event)))
3786b6c1f3SDenys Vlasenko #define PT_TRACESYSGOOD		PT_EVENT_FLAG(0)
38643ad838STejun Heo #define PT_TRACE_FORK		PT_EVENT_FLAG(PTRACE_EVENT_FORK)
39643ad838STejun Heo #define PT_TRACE_VFORK		PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
40643ad838STejun Heo #define PT_TRACE_CLONE		PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
41643ad838STejun Heo #define PT_TRACE_EXEC		PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
42643ad838STejun Heo #define PT_TRACE_VFORK_DONE	PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
43643ad838STejun Heo #define PT_TRACE_EXIT		PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
44fb0fadf9SWill Drewry #define PT_TRACE_SECCOMP	PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
451da177e4SLinus Torvalds 
46992fb6e1SOleg Nesterov #define PT_EXITKILL		(PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
4713c4a901STycho Andersen #define PT_SUSPEND_SECCOMP	(PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
48992fb6e1SOleg Nesterov 
499b05a69eSNamhyung Kim extern long arch_ptrace(struct task_struct *child, long request,
509b05a69eSNamhyung Kim 			unsigned long addr, unsigned long data);
511da177e4SLinus Torvalds extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
521da177e4SLinus Torvalds extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
531da177e4SLinus Torvalds extern void ptrace_disable(struct task_struct *);
544abf9869SNamhyung Kim extern int ptrace_request(struct task_struct *child, long request,
554abf9869SNamhyung Kim 			  unsigned long addr, unsigned long data);
566487d1daSEric W. Biederman extern int ptrace_notify(int exit_code, unsigned long message);
571da177e4SLinus Torvalds extern void __ptrace_link(struct task_struct *child,
58c70d9d80SEric W. Biederman 			  struct task_struct *new_parent,
59c70d9d80SEric W. Biederman 			  const struct cred *ptracer_cred);
601da177e4SLinus Torvalds extern void __ptrace_unlink(struct task_struct *child);
617c8bd232SOleg Nesterov extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
6269f594a3SEric Paris #define PTRACE_MODE_READ	0x01
6369f594a3SEric Paris #define PTRACE_MODE_ATTACH	0x02
6469f594a3SEric Paris #define PTRACE_MODE_NOAUDIT	0x04
65caaee623SJann Horn #define PTRACE_MODE_FSCREDS	0x08
66caaee623SJann Horn #define PTRACE_MODE_REALCREDS	0x10
67caaee623SJann Horn 
68caaee623SJann Horn /* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
69caaee623SJann Horn #define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
70caaee623SJann Horn #define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
71caaee623SJann Horn #define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
72caaee623SJann Horn #define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
73caaee623SJann Horn 
74caaee623SJann Horn /**
75caaee623SJann Horn  * ptrace_may_access - check whether the caller is permitted to access
76caaee623SJann Horn  * a target task.
77caaee623SJann Horn  * @task: target task
78caaee623SJann Horn  * @mode: selects type of access and caller credentials
79caaee623SJann Horn  *
80caaee623SJann Horn  * Returns true on success, false on denial.
81caaee623SJann Horn  *
82caaee623SJann Horn  * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must
83caaee623SJann Horn  * be set in @mode to specify whether the access was requested through
84caaee623SJann Horn  * a filesystem syscall (should use effective capabilities and fsuid
85caaee623SJann Horn  * of the caller) or through an explicit syscall such as
86caaee623SJann Horn  * process_vm_writev or ptrace (and should use the real credentials).
87caaee623SJann Horn  */
88006ebb40SStephen Smalley extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
891da177e4SLinus Torvalds 
ptrace_reparented(struct task_struct * child)9053b6f9fbSOleg Nesterov static inline int ptrace_reparented(struct task_struct *child)
9153b6f9fbSOleg Nesterov {
920347e177SOleg Nesterov 	return !same_thread_group(child->real_parent, child->parent);
9353b6f9fbSOleg Nesterov }
94c6a47cc2SOleg Nesterov 
ptrace_unlink(struct task_struct * child)951da177e4SLinus Torvalds static inline void ptrace_unlink(struct task_struct *child)
961da177e4SLinus Torvalds {
971da177e4SLinus Torvalds 	if (unlikely(child->ptrace))
981da177e4SLinus Torvalds 		__ptrace_unlink(child);
991da177e4SLinus Torvalds }
1001da177e4SLinus Torvalds 
1014abf9869SNamhyung Kim int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1024abf9869SNamhyung Kim 			    unsigned long data);
1034abf9869SNamhyung Kim int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1044abf9869SNamhyung Kim 			    unsigned long data);
1051da177e4SLinus Torvalds 
10688ac2921SRoland McGrath /**
10706d98473STejun Heo  * ptrace_parent - return the task that is tracing the given task
10806d98473STejun Heo  * @task: task to consider
10906d98473STejun Heo  *
11006d98473STejun Heo  * Returns %NULL if no one is tracing @task, or the &struct task_struct
11106d98473STejun Heo  * pointer to its tracer.
11206d98473STejun Heo  *
11306d98473STejun Heo  * Must called under rcu_read_lock().  The pointer returned might be kept
11406d98473STejun Heo  * live only by RCU.  During exec, this may be called with task_lock() held
11506d98473STejun Heo  * on @task, still held from when check_unsafe_exec() was called.
11606d98473STejun Heo  */
ptrace_parent(struct task_struct * task)11706d98473STejun Heo static inline struct task_struct *ptrace_parent(struct task_struct *task)
11806d98473STejun Heo {
11906d98473STejun Heo 	if (unlikely(task->ptrace))
12006d98473STejun Heo 		return rcu_dereference(task->parent);
12106d98473STejun Heo 	return NULL;
12206d98473STejun Heo }
12306d98473STejun Heo 
12406d98473STejun Heo /**
125643ad838STejun Heo  * ptrace_event_enabled - test whether a ptrace event is enabled
126643ad838STejun Heo  * @task: ptracee of interest
127643ad838STejun Heo  * @event: %PTRACE_EVENT_* to test
128643ad838STejun Heo  *
129643ad838STejun Heo  * Test whether @event is enabled for ptracee @task.
130643ad838STejun Heo  *
131643ad838STejun Heo  * Returns %true if @event is enabled, %false otherwise.
132643ad838STejun Heo  */
ptrace_event_enabled(struct task_struct * task,int event)133643ad838STejun Heo static inline bool ptrace_event_enabled(struct task_struct *task, int event)
134643ad838STejun Heo {
135643ad838STejun Heo 	return task->ptrace & PT_EVENT_FLAG(event);
136643ad838STejun Heo }
137643ad838STejun Heo 
138643ad838STejun Heo /**
13988ac2921SRoland McGrath  * ptrace_event - possibly stop for a ptrace event notification
140643ad838STejun Heo  * @event:	%PTRACE_EVENT_* value to report
14188ac2921SRoland McGrath  * @message:	value for %PTRACE_GETEVENTMSG to return
14288ac2921SRoland McGrath  *
143643ad838STejun Heo  * Check whether @event is enabled and, if so, report @event and @message
144643ad838STejun Heo  * to the ptrace parent.
14588ac2921SRoland McGrath  *
14688ac2921SRoland McGrath  * Called without locks.
14788ac2921SRoland McGrath  */
ptrace_event(int event,unsigned long message)148f3c04b93STejun Heo static inline void ptrace_event(int event, unsigned long message)
14988ac2921SRoland McGrath {
150f3c04b93STejun Heo 	if (unlikely(ptrace_event_enabled(current, event))) {
151336d4b81SEric W. Biederman 		ptrace_notify((event << 8) | SIGTRAP, message);
152b1845ff5SOleg Nesterov 	} else if (event == PTRACE_EVENT_EXEC) {
153f3c04b93STejun Heo 		/* legacy EXEC report via SIGTRAP */
154b1845ff5SOleg Nesterov 		if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
155f3c04b93STejun Heo 			send_sig(SIGTRAP, current, 0);
156f3c04b93STejun Heo 	}
15788ac2921SRoland McGrath }
15888ac2921SRoland McGrath 
15909a05394SRoland McGrath /**
1604e52365fSMatthew Dempsky  * ptrace_event_pid - possibly stop for a ptrace event notification
1614e52365fSMatthew Dempsky  * @event:	%PTRACE_EVENT_* value to report
1624e52365fSMatthew Dempsky  * @pid:	process identifier for %PTRACE_GETEVENTMSG to return
1634e52365fSMatthew Dempsky  *
1644e52365fSMatthew Dempsky  * Check whether @event is enabled and, if so, report @event and @pid
1654e52365fSMatthew Dempsky  * to the ptrace parent.  @pid is reported as the pid_t seen from the
166df54714fSRandy Dunlap  * ptrace parent's pid namespace.
1674e52365fSMatthew Dempsky  *
1684e52365fSMatthew Dempsky  * Called without locks.
1694e52365fSMatthew Dempsky  */
ptrace_event_pid(int event,struct pid * pid)1704e52365fSMatthew Dempsky static inline void ptrace_event_pid(int event, struct pid *pid)
1714e52365fSMatthew Dempsky {
1724e52365fSMatthew Dempsky 	/*
1734e52365fSMatthew Dempsky 	 * FIXME: There's a potential race if a ptracer in a different pid
1744e52365fSMatthew Dempsky 	 * namespace than parent attaches between computing message below and
1754e52365fSMatthew Dempsky 	 * when we acquire tasklist_lock in ptrace_stop().  If this happens,
1764e52365fSMatthew Dempsky 	 * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG.
1774e52365fSMatthew Dempsky 	 */
1784e52365fSMatthew Dempsky 	unsigned long message = 0;
1794e52365fSMatthew Dempsky 	struct pid_namespace *ns;
1804e52365fSMatthew Dempsky 
1814e52365fSMatthew Dempsky 	rcu_read_lock();
1824e52365fSMatthew Dempsky 	ns = task_active_pid_ns(rcu_dereference(current->parent));
1834e52365fSMatthew Dempsky 	if (ns)
1844e52365fSMatthew Dempsky 		message = pid_nr_ns(pid, ns);
1854e52365fSMatthew Dempsky 	rcu_read_unlock();
1864e52365fSMatthew Dempsky 
1874e52365fSMatthew Dempsky 	ptrace_event(event, message);
1884e52365fSMatthew Dempsky }
1894e52365fSMatthew Dempsky 
1904e52365fSMatthew Dempsky /**
19109a05394SRoland McGrath  * ptrace_init_task - initialize ptrace state for a new child
19209a05394SRoland McGrath  * @child:		new child task
19309a05394SRoland McGrath  * @ptrace:		true if child should be ptrace'd by parent's tracer
19409a05394SRoland McGrath  *
19509a05394SRoland McGrath  * This is called immediately after adding @child to its parent's children
19609a05394SRoland McGrath  * list.  @ptrace is false in the normal case, and true to ptrace @child.
19709a05394SRoland McGrath  *
19809a05394SRoland McGrath  * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
19909a05394SRoland McGrath  */
ptrace_init_task(struct task_struct * child,bool ptrace)20009a05394SRoland McGrath static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
20109a05394SRoland McGrath {
20209a05394SRoland McGrath 	INIT_LIST_HEAD(&child->ptrace_entry);
20309a05394SRoland McGrath 	INIT_LIST_HEAD(&child->ptraced);
2046634ae10SOleg Nesterov 	child->jobctl = 0;
2056634ae10SOleg Nesterov 	child->ptrace = 0;
2066634ae10SOleg Nesterov 	child->parent = child->real_parent;
2076634ae10SOleg Nesterov 
2086634ae10SOleg Nesterov 	if (unlikely(ptrace) && current->ptrace) {
2096634ae10SOleg Nesterov 		child->ptrace = current->ptrace;
210c70d9d80SEric W. Biederman 		__ptrace_link(child, current->parent, current->ptracer_cred);
211dcace06cSOleg Nesterov 
212d184d6ebSOleg Nesterov 		if (child->ptrace & PT_SEIZED)
213d184d6ebSOleg Nesterov 			task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
214d184d6ebSOleg Nesterov 		else
215dcace06cSOleg Nesterov 			sigaddset(&child->pending.signal, SIGSTOP);
2166634ae10SOleg Nesterov 	}
217c70d9d80SEric W. Biederman 	else
218c70d9d80SEric W. Biederman 		child->ptracer_cred = NULL;
21909a05394SRoland McGrath }
22009a05394SRoland McGrath 
221dae33574SRoland McGrath /**
222dae33574SRoland McGrath  * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
223dae33574SRoland McGrath  * @task:	task in %EXIT_DEAD state
224dae33574SRoland McGrath  *
225dae33574SRoland McGrath  * Called with write_lock(&tasklist_lock) held.
226dae33574SRoland McGrath  */
ptrace_release_task(struct task_struct * task)227dae33574SRoland McGrath static inline void ptrace_release_task(struct task_struct *task)
228dae33574SRoland McGrath {
229dae33574SRoland McGrath 	BUG_ON(!list_empty(&task->ptraced));
230dae33574SRoland McGrath 	ptrace_unlink(task);
231dae33574SRoland McGrath 	BUG_ON(!list_empty(&task->ptrace_entry));
232dae33574SRoland McGrath }
233dae33574SRoland McGrath 
2341da177e4SLinus Torvalds #ifndef force_successful_syscall_return
2351da177e4SLinus Torvalds /*
2361da177e4SLinus Torvalds  * System call handlers that, upon successful completion, need to return a
2371da177e4SLinus Torvalds  * negative value should call force_successful_syscall_return() right before
2381da177e4SLinus Torvalds  * returning.  On architectures where the syscall convention provides for a
2391da177e4SLinus Torvalds  * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
2401da177e4SLinus Torvalds  * others), this macro can be used to ensure that the error flag will not get
2411da177e4SLinus Torvalds  * set.  On architectures which do not support a separate error flag, the macro
2421da177e4SLinus Torvalds  * is a no-op and the spurious error condition needs to be filtered out by some
2431da177e4SLinus Torvalds  * other means (e.g., in user-level, by passing an extra argument to the
2441da177e4SLinus Torvalds  * syscall handler, or something along those lines).
2451da177e4SLinus Torvalds  */
2461da177e4SLinus Torvalds #define force_successful_syscall_return() do { } while (0)
2471da177e4SLinus Torvalds #endif
2481da177e4SLinus Torvalds 
249d7e7528bSEric Paris #ifndef is_syscall_success
250d7e7528bSEric Paris /*
251d7e7528bSEric Paris  * On most systems we can tell if a syscall is a success based on if the retval
252d7e7528bSEric Paris  * is an error value.  On some systems like ia64 and powerpc they have different
253d7e7528bSEric Paris  * indicators of success/failure and must define their own.
254d7e7528bSEric Paris  */
255d7e7528bSEric Paris #define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs))))
256d7e7528bSEric Paris #endif
257d7e7528bSEric Paris 
258fb7fa8f1SRoland McGrath /*
259fb7fa8f1SRoland McGrath  * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
260fb7fa8f1SRoland McGrath  *
261fb7fa8f1SRoland McGrath  * These do-nothing inlines are used when the arch does not
262fb7fa8f1SRoland McGrath  * implement single-step.  The kerneldoc comments are here
263fb7fa8f1SRoland McGrath  * to document the interface for all arch definitions.
264fb7fa8f1SRoland McGrath  */
265fb7fa8f1SRoland McGrath 
266fb7fa8f1SRoland McGrath #ifndef arch_has_single_step
267fb7fa8f1SRoland McGrath /**
268fb7fa8f1SRoland McGrath  * arch_has_single_step - does this CPU support user-mode single-step?
269fb7fa8f1SRoland McGrath  *
270fb7fa8f1SRoland McGrath  * If this is defined, then there must be function declarations or
271fb7fa8f1SRoland McGrath  * inlines for user_enable_single_step() and user_disable_single_step().
272fb7fa8f1SRoland McGrath  * arch_has_single_step() should evaluate to nonzero iff the machine
273fb7fa8f1SRoland McGrath  * supports instruction single-step for user mode.
274fb7fa8f1SRoland McGrath  * It can be a constant or it can test a CPU feature bit.
275fb7fa8f1SRoland McGrath  */
276fb7fa8f1SRoland McGrath #define arch_has_single_step()		(0)
277fb7fa8f1SRoland McGrath 
278fb7fa8f1SRoland McGrath /**
279fb7fa8f1SRoland McGrath  * user_enable_single_step - single-step in user-mode task
280fb7fa8f1SRoland McGrath  * @task: either current or a task stopped in %TASK_TRACED
281fb7fa8f1SRoland McGrath  *
282fb7fa8f1SRoland McGrath  * This can only be called when arch_has_single_step() has returned nonzero.
283fb7fa8f1SRoland McGrath  * Set @task so that when it returns to user mode, it will trap after the
284dc802c2dSRoland McGrath  * next single instruction executes.  If arch_has_block_step() is defined,
285dc802c2dSRoland McGrath  * this must clear the effects of user_enable_block_step() too.
286fb7fa8f1SRoland McGrath  */
user_enable_single_step(struct task_struct * task)287fb7fa8f1SRoland McGrath static inline void user_enable_single_step(struct task_struct *task)
288fb7fa8f1SRoland McGrath {
289fb7fa8f1SRoland McGrath 	BUG();			/* This can never be called.  */
290fb7fa8f1SRoland McGrath }
291fb7fa8f1SRoland McGrath 
292fb7fa8f1SRoland McGrath /**
293fb7fa8f1SRoland McGrath  * user_disable_single_step - cancel user-mode single-step
294fb7fa8f1SRoland McGrath  * @task: either current or a task stopped in %TASK_TRACED
295fb7fa8f1SRoland McGrath  *
296dc802c2dSRoland McGrath  * Clear @task of the effects of user_enable_single_step() and
297dc802c2dSRoland McGrath  * user_enable_block_step().  This can be called whether or not either
298dc802c2dSRoland McGrath  * of those was ever called on @task, and even if arch_has_single_step()
299dc802c2dSRoland McGrath  * returned zero.
300fb7fa8f1SRoland McGrath  */
user_disable_single_step(struct task_struct * task)301fb7fa8f1SRoland McGrath static inline void user_disable_single_step(struct task_struct *task)
302fb7fa8f1SRoland McGrath {
303fb7fa8f1SRoland McGrath }
304dacbe41fSChristoph Hellwig #else
305dacbe41fSChristoph Hellwig extern void user_enable_single_step(struct task_struct *);
306dacbe41fSChristoph Hellwig extern void user_disable_single_step(struct task_struct *);
307fb7fa8f1SRoland McGrath #endif	/* arch_has_single_step */
308fb7fa8f1SRoland McGrath 
309dc802c2dSRoland McGrath #ifndef arch_has_block_step
310dc802c2dSRoland McGrath /**
311dc802c2dSRoland McGrath  * arch_has_block_step - does this CPU support user-mode block-step?
312dc802c2dSRoland McGrath  *
313dc802c2dSRoland McGrath  * If this is defined, then there must be a function declaration or inline
314dc802c2dSRoland McGrath  * for user_enable_block_step(), and arch_has_single_step() must be defined
315dc802c2dSRoland McGrath  * too.  arch_has_block_step() should evaluate to nonzero iff the machine
316dc802c2dSRoland McGrath  * supports step-until-branch for user mode.  It can be a constant or it
317dc802c2dSRoland McGrath  * can test a CPU feature bit.
318dc802c2dSRoland McGrath  */
3195b88abbfSRoland McGrath #define arch_has_block_step()		(0)
320dc802c2dSRoland McGrath 
321dc802c2dSRoland McGrath /**
322dc802c2dSRoland McGrath  * user_enable_block_step - step until branch in user-mode task
323dc802c2dSRoland McGrath  * @task: either current or a task stopped in %TASK_TRACED
324dc802c2dSRoland McGrath  *
325dc802c2dSRoland McGrath  * This can only be called when arch_has_block_step() has returned nonzero,
326dc802c2dSRoland McGrath  * and will never be called when single-instruction stepping is being used.
327dc802c2dSRoland McGrath  * Set @task so that when it returns to user mode, it will trap after the
328dc802c2dSRoland McGrath  * next branch or trap taken.
329dc802c2dSRoland McGrath  */
user_enable_block_step(struct task_struct * task)330dc802c2dSRoland McGrath static inline void user_enable_block_step(struct task_struct *task)
331dc802c2dSRoland McGrath {
332dc802c2dSRoland McGrath 	BUG();			/* This can never be called.  */
333dc802c2dSRoland McGrath }
334dacbe41fSChristoph Hellwig #else
335dacbe41fSChristoph Hellwig extern void user_enable_block_step(struct task_struct *);
336dc802c2dSRoland McGrath #endif	/* arch_has_block_step */
337dc802c2dSRoland McGrath 
338efc463adSEric W. Biederman #ifdef ARCH_HAS_USER_SINGLE_STEP_REPORT
339efc463adSEric W. Biederman extern void user_single_step_report(struct pt_regs *regs);
34085ec7fd9SOleg Nesterov #else
user_single_step_report(struct pt_regs * regs)341efc463adSEric W. Biederman static inline void user_single_step_report(struct pt_regs *regs)
34285ec7fd9SOleg Nesterov {
343ae7795bcSEric W. Biederman 	kernel_siginfo_t info;
344efc463adSEric W. Biederman 	clear_siginfo(&info);
345efc463adSEric W. Biederman 	info.si_signo = SIGTRAP;
346efc463adSEric W. Biederman 	info.si_errno = 0;
347efc463adSEric W. Biederman 	info.si_code = SI_USER;
348efc463adSEric W. Biederman 	info.si_pid = 0;
349efc463adSEric W. Biederman 	info.si_uid = 0;
350a89e9b8aSEric W. Biederman 	force_sig_info(&info);
35185ec7fd9SOleg Nesterov }
35285ec7fd9SOleg Nesterov #endif
35385ec7fd9SOleg Nesterov 
3541a669c2fSRoland McGrath #ifndef arch_ptrace_stop_needed
3551a669c2fSRoland McGrath /**
3561a669c2fSRoland McGrath  * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
3571a669c2fSRoland McGrath  *
3581a669c2fSRoland McGrath  * This is called with the siglock held, to decide whether or not it's
3594f627af8SEric W. Biederman  * necessary to release the siglock and call arch_ptrace_stop().  It can be
3604f627af8SEric W. Biederman  * defined to a constant if arch_ptrace_stop() is never required, or always
3614f627af8SEric W. Biederman  * is.  On machines where this makes sense, it should be defined to a quick
3624f627af8SEric W. Biederman  * test to optimize out calling arch_ptrace_stop() when it would be
3634f627af8SEric W. Biederman  * superfluous.  For example, if the thread has not been back to user mode
3644f627af8SEric W. Biederman  * since the last stop, the thread state might indicate that nothing needs
3654f627af8SEric W. Biederman  * to be done.
366b9cd18deSTejun Heo  *
367b9cd18deSTejun Heo  * This is guaranteed to be invoked once before a task stops for ptrace and
368b9cd18deSTejun Heo  * may include arch-specific operations necessary prior to a ptrace stop.
3691a669c2fSRoland McGrath  */
3704f627af8SEric W. Biederman #define arch_ptrace_stop_needed()	(0)
3711a669c2fSRoland McGrath #endif
3721a669c2fSRoland McGrath 
3731a669c2fSRoland McGrath #ifndef arch_ptrace_stop
3741a669c2fSRoland McGrath /**
3751a669c2fSRoland McGrath  * arch_ptrace_stop - Do machine-specific work before stopping for ptrace
3761a669c2fSRoland McGrath  *
3771a669c2fSRoland McGrath  * This is called with no locks held when arch_ptrace_stop_needed() has
3781a669c2fSRoland McGrath  * just returned nonzero.  It is allowed to block, e.g. for user memory
3791a669c2fSRoland McGrath  * access.  The arch can have machine-specific work to be done before
3801a669c2fSRoland McGrath  * ptrace stops.  On ia64, register backing store gets written back to user
3811a669c2fSRoland McGrath  * memory here.  Since this can be costly (requires dropping the siglock),
3821a669c2fSRoland McGrath  * we only do it when the arch requires it for this particular stop, as
3831a669c2fSRoland McGrath  * indicated by arch_ptrace_stop_needed().
3841a669c2fSRoland McGrath  */
3854f627af8SEric W. Biederman #define arch_ptrace_stop()		do { } while (0)
3861a669c2fSRoland McGrath #endif
3871a669c2fSRoland McGrath 
388a3460a59SAl Viro #ifndef current_pt_regs
389a3460a59SAl Viro #define current_pt_regs() task_pt_regs(current)
390a3460a59SAl Viro #endif
391a3460a59SAl Viro 
3921ca97bb5SAl Viro #ifndef current_user_stack_pointer
3931ca97bb5SAl Viro #define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
3941ca97bb5SAl Viro #endif
3951ca97bb5SAl Viro 
396*fdd12a80SJiaxun Yang #ifndef exception_ip
397*fdd12a80SJiaxun Yang #define exception_ip(x) instruction_pointer(x)
398*fdd12a80SJiaxun Yang #endif
399*fdd12a80SJiaxun Yang 
400631b7abaSSteven Rostedt (Red Hat) extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);
401bbc69863SRoland McGrath 
40289f579ceSYi Wang extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
40319397e8bSEric W. Biederman 
40419397e8bSEric W. Biederman /*
40519397e8bSEric W. Biederman  * ptrace report for syscall entry and exit looks identical.
40619397e8bSEric W. Biederman  */
ptrace_report_syscall(unsigned long message)40719397e8bSEric W. Biederman static inline int ptrace_report_syscall(unsigned long message)
40819397e8bSEric W. Biederman {
40919397e8bSEric W. Biederman 	int ptrace = current->ptrace;
4106487d1daSEric W. Biederman 	int signr;
41119397e8bSEric W. Biederman 
41219397e8bSEric W. Biederman 	if (!(ptrace & PT_PTRACED))
41319397e8bSEric W. Biederman 		return 0;
41419397e8bSEric W. Biederman 
4156487d1daSEric W. Biederman 	signr = ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0),
4166487d1daSEric W. Biederman 			      message);
41719397e8bSEric W. Biederman 
41819397e8bSEric W. Biederman 	/*
41919397e8bSEric W. Biederman 	 * this isn't the same as continuing with a signal, but it will do
42019397e8bSEric W. Biederman 	 * for normal use.  strace only continues with a signal if the
42119397e8bSEric W. Biederman 	 * stopping signal is not SIGTRAP.  -brl
42219397e8bSEric W. Biederman 	 */
4236487d1daSEric W. Biederman 	if (signr)
4246487d1daSEric W. Biederman 		send_sig(signr, current, 1);
42519397e8bSEric W. Biederman 
42619397e8bSEric W. Biederman 	return fatal_signal_pending(current);
42719397e8bSEric W. Biederman }
428153474baSEric W. Biederman 
429153474baSEric W. Biederman /**
430153474baSEric W. Biederman  * ptrace_report_syscall_entry - task is about to attempt a system call
431153474baSEric W. Biederman  * @regs:		user register state of current task
432153474baSEric W. Biederman  *
433153474baSEric W. Biederman  * This will be called if %SYSCALL_WORK_SYSCALL_TRACE or
434153474baSEric W. Biederman  * %SYSCALL_WORK_SYSCALL_EMU have been set, when the current task has just
435153474baSEric W. Biederman  * entered the kernel for a system call.  Full user register state is
436153474baSEric W. Biederman  * available here.  Changing the values in @regs can affect the system
437153474baSEric W. Biederman  * call number and arguments to be tried.  It is safe to block here,
438153474baSEric W. Biederman  * preventing the system call from beginning.
439153474baSEric W. Biederman  *
440153474baSEric W. Biederman  * Returns zero normally, or nonzero if the calling arch code should abort
441153474baSEric W. Biederman  * the system call.  That must prevent normal entry so no system call is
442153474baSEric W. Biederman  * made.  If @task ever returns to user mode after this, its register state
443153474baSEric W. Biederman  * is unspecified, but should be something harmless like an %ENOSYS error
444153474baSEric W. Biederman  * return.  It should preserve enough information so that syscall_rollback()
445153474baSEric W. Biederman  * can work (see asm-generic/syscall.h).
446153474baSEric W. Biederman  *
447153474baSEric W. Biederman  * Called without locks, just after entering kernel mode.
448153474baSEric W. Biederman  */
ptrace_report_syscall_entry(struct pt_regs * regs)449153474baSEric W. Biederman static inline __must_check int ptrace_report_syscall_entry(
450153474baSEric W. Biederman 	struct pt_regs *regs)
451153474baSEric W. Biederman {
452153474baSEric W. Biederman 	return ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_ENTRY);
453153474baSEric W. Biederman }
454153474baSEric W. Biederman 
455153474baSEric W. Biederman /**
456153474baSEric W. Biederman  * ptrace_report_syscall_exit - task has just finished a system call
457153474baSEric W. Biederman  * @regs:		user register state of current task
458153474baSEric W. Biederman  * @step:		nonzero if simulating single-step or block-step
459153474baSEric W. Biederman  *
460153474baSEric W. Biederman  * This will be called if %SYSCALL_WORK_SYSCALL_TRACE has been set, when
461153474baSEric W. Biederman  * the current task has just finished an attempted system call.  Full
462153474baSEric W. Biederman  * user register state is available here.  It is safe to block here,
463153474baSEric W. Biederman  * preventing signals from being processed.
464153474baSEric W. Biederman  *
465153474baSEric W. Biederman  * If @step is nonzero, this report is also in lieu of the normal
466153474baSEric W. Biederman  * trap that would follow the system call instruction because
467153474baSEric W. Biederman  * user_enable_block_step() or user_enable_single_step() was used.
468153474baSEric W. Biederman  * In this case, %SYSCALL_WORK_SYSCALL_TRACE might not be set.
469153474baSEric W. Biederman  *
470153474baSEric W. Biederman  * Called without locks, just before checking for pending signals.
471153474baSEric W. Biederman  */
ptrace_report_syscall_exit(struct pt_regs * regs,int step)472153474baSEric W. Biederman static inline void ptrace_report_syscall_exit(struct pt_regs *regs, int step)
473153474baSEric W. Biederman {
474153474baSEric W. Biederman 	if (step)
475153474baSEric W. Biederman 		user_single_step_report(regs);
476153474baSEric W. Biederman 	else
477153474baSEric W. Biederman 		ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_EXIT);
478153474baSEric W. Biederman }
4791da177e4SLinus Torvalds #endif
480