xref: /openbmc/linux/arch/x86/include/asm/ptrace.h (revision 6ee73861)
1 #ifndef _ASM_X86_PTRACE_H
2 #define _ASM_X86_PTRACE_H
3 
4 #include <linux/compiler.h>	/* For __user */
5 #include <asm/ptrace-abi.h>
6 #include <asm/processor-flags.h>
7 
8 #ifdef __KERNEL__
9 #include <asm/segment.h>
10 #endif
11 
12 #ifndef __ASSEMBLY__
13 
14 #ifdef __i386__
15 /* this struct defines the way the registers are stored on the
16    stack during a system call. */
17 
18 #ifndef __KERNEL__
19 
20 struct pt_regs {
21 	long ebx;
22 	long ecx;
23 	long edx;
24 	long esi;
25 	long edi;
26 	long ebp;
27 	long eax;
28 	int  xds;
29 	int  xes;
30 	int  xfs;
31 	int  xgs;
32 	long orig_eax;
33 	long eip;
34 	int  xcs;
35 	long eflags;
36 	long esp;
37 	int  xss;
38 };
39 
40 #else /* __KERNEL__ */
41 
42 struct pt_regs {
43 	unsigned long bx;
44 	unsigned long cx;
45 	unsigned long dx;
46 	unsigned long si;
47 	unsigned long di;
48 	unsigned long bp;
49 	unsigned long ax;
50 	unsigned long ds;
51 	unsigned long es;
52 	unsigned long fs;
53 	unsigned long gs;
54 	unsigned long orig_ax;
55 	unsigned long ip;
56 	unsigned long cs;
57 	unsigned long flags;
58 	unsigned long sp;
59 	unsigned long ss;
60 };
61 
62 #endif /* __KERNEL__ */
63 
64 #else /* __i386__ */
65 
66 #ifndef __KERNEL__
67 
68 struct pt_regs {
69 	unsigned long r15;
70 	unsigned long r14;
71 	unsigned long r13;
72 	unsigned long r12;
73 	unsigned long rbp;
74 	unsigned long rbx;
75 /* arguments: non interrupts/non tracing syscalls only save upto here*/
76 	unsigned long r11;
77 	unsigned long r10;
78 	unsigned long r9;
79 	unsigned long r8;
80 	unsigned long rax;
81 	unsigned long rcx;
82 	unsigned long rdx;
83 	unsigned long rsi;
84 	unsigned long rdi;
85 	unsigned long orig_rax;
86 /* end of arguments */
87 /* cpu exception frame or undefined */
88 	unsigned long rip;
89 	unsigned long cs;
90 	unsigned long eflags;
91 	unsigned long rsp;
92 	unsigned long ss;
93 /* top of stack page */
94 };
95 
96 #else /* __KERNEL__ */
97 
98 struct pt_regs {
99 	unsigned long r15;
100 	unsigned long r14;
101 	unsigned long r13;
102 	unsigned long r12;
103 	unsigned long bp;
104 	unsigned long bx;
105 /* arguments: non interrupts/non tracing syscalls only save upto here*/
106 	unsigned long r11;
107 	unsigned long r10;
108 	unsigned long r9;
109 	unsigned long r8;
110 	unsigned long ax;
111 	unsigned long cx;
112 	unsigned long dx;
113 	unsigned long si;
114 	unsigned long di;
115 	unsigned long orig_ax;
116 /* end of arguments */
117 /* cpu exception frame or undefined */
118 	unsigned long ip;
119 	unsigned long cs;
120 	unsigned long flags;
121 	unsigned long sp;
122 	unsigned long ss;
123 /* top of stack page */
124 };
125 
126 #endif /* __KERNEL__ */
127 #endif /* !__i386__ */
128 
129 
130 #ifdef __KERNEL__
131 
132 #include <linux/init.h>
133 
134 struct cpuinfo_x86;
135 struct task_struct;
136 
137 extern unsigned long profile_pc(struct pt_regs *regs);
138 
139 extern unsigned long
140 convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
141 extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
142 			 int error_code, int si_code);
143 void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
144 
145 extern long syscall_trace_enter(struct pt_regs *);
146 extern void syscall_trace_leave(struct pt_regs *);
147 
148 static inline unsigned long regs_return_value(struct pt_regs *regs)
149 {
150 	return regs->ax;
151 }
152 
153 /*
154  * user_mode_vm(regs) determines whether a register set came from user mode.
155  * This is true if V8086 mode was enabled OR if the register set was from
156  * protected mode with RPL-3 CS value.  This tricky test checks that with
157  * one comparison.  Many places in the kernel can bypass this full check
158  * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
159  */
160 static inline int user_mode(struct pt_regs *regs)
161 {
162 #ifdef CONFIG_X86_32
163 	return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
164 #else
165 	return !!(regs->cs & 3);
166 #endif
167 }
168 
169 static inline int user_mode_vm(struct pt_regs *regs)
170 {
171 #ifdef CONFIG_X86_32
172 	return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
173 		USER_RPL;
174 #else
175 	return user_mode(regs);
176 #endif
177 }
178 
179 static inline int v8086_mode(struct pt_regs *regs)
180 {
181 #ifdef CONFIG_X86_32
182 	return (regs->flags & X86_VM_MASK);
183 #else
184 	return 0;	/* No V86 mode support in long mode */
185 #endif
186 }
187 
188 /*
189  * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
190  * when it traps.  The previous stack will be directly underneath the saved
191  * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
192  *
193  * This is valid only for kernel mode traps.
194  */
195 static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
196 {
197 #ifdef CONFIG_X86_32
198 	return (unsigned long)(&regs->sp);
199 #else
200 	return regs->sp;
201 #endif
202 }
203 
204 static inline unsigned long instruction_pointer(struct pt_regs *regs)
205 {
206 	return regs->ip;
207 }
208 
209 static inline unsigned long frame_pointer(struct pt_regs *regs)
210 {
211 	return regs->bp;
212 }
213 
214 static inline unsigned long user_stack_pointer(struct pt_regs *regs)
215 {
216 	return regs->sp;
217 }
218 
219 /*
220  * These are defined as per linux/ptrace.h, which see.
221  */
222 #define arch_has_single_step()	(1)
223 extern void user_enable_single_step(struct task_struct *);
224 extern void user_disable_single_step(struct task_struct *);
225 
226 extern void user_enable_block_step(struct task_struct *);
227 #ifdef CONFIG_X86_DEBUGCTLMSR
228 #define arch_has_block_step()	(1)
229 #else
230 #define arch_has_block_step()	(boot_cpu_data.x86 >= 6)
231 #endif
232 
233 struct user_desc;
234 extern int do_get_thread_area(struct task_struct *p, int idx,
235 			      struct user_desc __user *info);
236 extern int do_set_thread_area(struct task_struct *p, int idx,
237 			      struct user_desc __user *info, int can_allocate);
238 
239 #ifdef CONFIG_X86_PTRACE_BTS
240 extern void ptrace_bts_untrace(struct task_struct *tsk);
241 
242 #define arch_ptrace_untrace(tsk)	ptrace_bts_untrace(tsk)
243 #endif /* CONFIG_X86_PTRACE_BTS */
244 
245 #endif /* __KERNEL__ */
246 
247 #endif /* !__ASSEMBLY__ */
248 
249 #endif /* _ASM_X86_PTRACE_H */
250