xref: /openbmc/linux/arch/x86/include/asm/ptrace.h (revision e290ed81)
1 #ifndef _ASM_X86_PTRACE_H
2 #define _ASM_X86_PTRACE_H
3 
4 #include <linux/compiler.h>	/* For __user */
5 #include <asm/ptrace-abi.h>
6 #include <asm/processor-flags.h>
7 
8 #ifdef __KERNEL__
9 #include <asm/segment.h>
10 #include <asm/page_types.h>
11 #endif
12 
13 #ifndef __ASSEMBLY__
14 
15 #ifdef __i386__
16 /* this struct defines the way the registers are stored on the
17    stack during a system call. */
18 
19 #ifndef __KERNEL__
20 
21 struct pt_regs {
22 	long ebx;
23 	long ecx;
24 	long edx;
25 	long esi;
26 	long edi;
27 	long ebp;
28 	long eax;
29 	int  xds;
30 	int  xes;
31 	int  xfs;
32 	int  xgs;
33 	long orig_eax;
34 	long eip;
35 	int  xcs;
36 	long eflags;
37 	long esp;
38 	int  xss;
39 };
40 
41 #else /* __KERNEL__ */
42 
43 struct pt_regs {
44 	unsigned long bx;
45 	unsigned long cx;
46 	unsigned long dx;
47 	unsigned long si;
48 	unsigned long di;
49 	unsigned long bp;
50 	unsigned long ax;
51 	unsigned long ds;
52 	unsigned long es;
53 	unsigned long fs;
54 	unsigned long gs;
55 	unsigned long orig_ax;
56 	unsigned long ip;
57 	unsigned long cs;
58 	unsigned long flags;
59 	unsigned long sp;
60 	unsigned long ss;
61 };
62 
63 #endif /* __KERNEL__ */
64 
65 #else /* __i386__ */
66 
67 #ifndef __KERNEL__
68 
69 struct pt_regs {
70 	unsigned long r15;
71 	unsigned long r14;
72 	unsigned long r13;
73 	unsigned long r12;
74 	unsigned long rbp;
75 	unsigned long rbx;
76 /* arguments: non interrupts/non tracing syscalls only save up to here*/
77 	unsigned long r11;
78 	unsigned long r10;
79 	unsigned long r9;
80 	unsigned long r8;
81 	unsigned long rax;
82 	unsigned long rcx;
83 	unsigned long rdx;
84 	unsigned long rsi;
85 	unsigned long rdi;
86 	unsigned long orig_rax;
87 /* end of arguments */
88 /* cpu exception frame or undefined */
89 	unsigned long rip;
90 	unsigned long cs;
91 	unsigned long eflags;
92 	unsigned long rsp;
93 	unsigned long ss;
94 /* top of stack page */
95 };
96 
97 #else /* __KERNEL__ */
98 
99 struct pt_regs {
100 	unsigned long r15;
101 	unsigned long r14;
102 	unsigned long r13;
103 	unsigned long r12;
104 	unsigned long bp;
105 	unsigned long bx;
106 /* arguments: non interrupts/non tracing syscalls only save up to here*/
107 	unsigned long r11;
108 	unsigned long r10;
109 	unsigned long r9;
110 	unsigned long r8;
111 	unsigned long ax;
112 	unsigned long cx;
113 	unsigned long dx;
114 	unsigned long si;
115 	unsigned long di;
116 	unsigned long orig_ax;
117 /* end of arguments */
118 /* cpu exception frame or undefined */
119 	unsigned long ip;
120 	unsigned long cs;
121 	unsigned long flags;
122 	unsigned long sp;
123 	unsigned long ss;
124 /* top of stack page */
125 };
126 
127 #endif /* __KERNEL__ */
128 #endif /* !__i386__ */
129 
130 
131 #ifdef __KERNEL__
132 
133 #include <linux/init.h>
134 #ifdef CONFIG_PARAVIRT
135 #include <asm/paravirt_types.h>
136 #endif
137 
138 struct cpuinfo_x86;
139 struct task_struct;
140 
141 extern unsigned long profile_pc(struct pt_regs *regs);
142 #define profile_pc profile_pc
143 
144 extern unsigned long
145 convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
146 extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
147 			 int error_code, int si_code);
148 void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
149 
150 extern long syscall_trace_enter(struct pt_regs *);
151 extern void syscall_trace_leave(struct pt_regs *);
152 
153 static inline unsigned long regs_return_value(struct pt_regs *regs)
154 {
155 	return regs->ax;
156 }
157 
158 /*
159  * user_mode_vm(regs) determines whether a register set came from user mode.
160  * This is true if V8086 mode was enabled OR if the register set was from
161  * protected mode with RPL-3 CS value.  This tricky test checks that with
162  * one comparison.  Many places in the kernel can bypass this full check
163  * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
164  */
165 static inline int user_mode(struct pt_regs *regs)
166 {
167 #ifdef CONFIG_X86_32
168 	return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
169 #else
170 	return !!(regs->cs & 3);
171 #endif
172 }
173 
174 static inline int user_mode_vm(struct pt_regs *regs)
175 {
176 #ifdef CONFIG_X86_32
177 	return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
178 		USER_RPL;
179 #else
180 	return user_mode(regs);
181 #endif
182 }
183 
184 static inline int v8086_mode(struct pt_regs *regs)
185 {
186 #ifdef CONFIG_X86_32
187 	return (regs->flags & X86_VM_MASK);
188 #else
189 	return 0;	/* No V86 mode support in long mode */
190 #endif
191 }
192 
193 #ifdef CONFIG_X86_64
194 static inline bool user_64bit_mode(struct pt_regs *regs)
195 {
196 #ifndef CONFIG_PARAVIRT
197 	/*
198 	 * On non-paravirt systems, this is the only long mode CPL 3
199 	 * selector.  We do not allow long mode selectors in the LDT.
200 	 */
201 	return regs->cs == __USER_CS;
202 #else
203 	/* Headers are too twisted for this to go in paravirt.h. */
204 	return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
205 #endif
206 }
207 #endif
208 
209 /*
210  * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
211  * when it traps.  The previous stack will be directly underneath the saved
212  * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
213  *
214  * This is valid only for kernel mode traps.
215  */
216 static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
217 {
218 #ifdef CONFIG_X86_32
219 	return (unsigned long)(&regs->sp);
220 #else
221 	return regs->sp;
222 #endif
223 }
224 
225 #define GET_IP(regs) ((regs)->ip)
226 #define GET_FP(regs) ((regs)->bp)
227 #define GET_USP(regs) ((regs)->sp)
228 
229 #include <asm-generic/ptrace.h>
230 
231 /* Query offset/name of register from its name/offset */
232 extern int regs_query_register_offset(const char *name);
233 extern const char *regs_query_register_name(unsigned int offset);
234 #define MAX_REG_OFFSET (offsetof(struct pt_regs, ss))
235 
236 /**
237  * regs_get_register() - get register value from its offset
238  * @regs:	pt_regs from which register value is gotten.
239  * @offset:	offset number of the register.
240  *
241  * regs_get_register returns the value of a register. The @offset is the
242  * offset of the register in struct pt_regs address which specified by @regs.
243  * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
244  */
245 static inline unsigned long regs_get_register(struct pt_regs *regs,
246 					      unsigned int offset)
247 {
248 	if (unlikely(offset > MAX_REG_OFFSET))
249 		return 0;
250 	return *(unsigned long *)((unsigned long)regs + offset);
251 }
252 
253 /**
254  * regs_within_kernel_stack() - check the address in the stack
255  * @regs:	pt_regs which contains kernel stack pointer.
256  * @addr:	address which is checked.
257  *
258  * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
259  * If @addr is within the kernel stack, it returns true. If not, returns false.
260  */
261 static inline int regs_within_kernel_stack(struct pt_regs *regs,
262 					   unsigned long addr)
263 {
264 	return ((addr & ~(THREAD_SIZE - 1))  ==
265 		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
266 }
267 
268 /**
269  * regs_get_kernel_stack_nth() - get Nth entry of the stack
270  * @regs:	pt_regs which contains kernel stack pointer.
271  * @n:		stack entry number.
272  *
273  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
274  * is specified by @regs. If the @n th entry is NOT in the kernel stack,
275  * this returns 0.
276  */
277 static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
278 						      unsigned int n)
279 {
280 	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
281 	addr += n;
282 	if (regs_within_kernel_stack(regs, (unsigned long)addr))
283 		return *addr;
284 	else
285 		return 0;
286 }
287 
288 #define arch_has_single_step()	(1)
289 #ifdef CONFIG_X86_DEBUGCTLMSR
290 #define arch_has_block_step()	(1)
291 #else
292 #define arch_has_block_step()	(boot_cpu_data.x86 >= 6)
293 #endif
294 
295 #define ARCH_HAS_USER_SINGLE_STEP_INFO
296 
297 struct user_desc;
298 extern int do_get_thread_area(struct task_struct *p, int idx,
299 			      struct user_desc __user *info);
300 extern int do_set_thread_area(struct task_struct *p, int idx,
301 			      struct user_desc __user *info, int can_allocate);
302 
303 #endif /* __KERNEL__ */
304 
305 #endif /* !__ASSEMBLY__ */
306 
307 #endif /* _ASM_X86_PTRACE_H */
308