1 #ifndef _ASM_X86_PTRACE_H 2 #define _ASM_X86_PTRACE_H 3 4 #include <linux/compiler.h> /* For __user */ 5 #include <asm/ptrace-abi.h> 6 #include <asm/processor-flags.h> 7 8 #ifdef __KERNEL__ 9 #include <asm/segment.h> 10 #include <asm/page_types.h> 11 #endif 12 13 #ifndef __ASSEMBLY__ 14 15 #ifdef __i386__ 16 /* this struct defines the way the registers are stored on the 17 stack during a system call. */ 18 19 #ifndef __KERNEL__ 20 21 struct pt_regs { 22 long ebx; 23 long ecx; 24 long edx; 25 long esi; 26 long edi; 27 long ebp; 28 long eax; 29 int xds; 30 int xes; 31 int xfs; 32 int xgs; 33 long orig_eax; 34 long eip; 35 int xcs; 36 long eflags; 37 long esp; 38 int xss; 39 }; 40 41 #else /* __KERNEL__ */ 42 43 struct pt_regs { 44 unsigned long bx; 45 unsigned long cx; 46 unsigned long dx; 47 unsigned long si; 48 unsigned long di; 49 unsigned long bp; 50 unsigned long ax; 51 unsigned long ds; 52 unsigned long es; 53 unsigned long fs; 54 unsigned long gs; 55 unsigned long orig_ax; 56 unsigned long ip; 57 unsigned long cs; 58 unsigned long flags; 59 unsigned long sp; 60 unsigned long ss; 61 }; 62 63 #endif /* __KERNEL__ */ 64 65 #else /* __i386__ */ 66 67 #ifndef __KERNEL__ 68 69 struct pt_regs { 70 unsigned long r15; 71 unsigned long r14; 72 unsigned long r13; 73 unsigned long r12; 74 unsigned long rbp; 75 unsigned long rbx; 76 /* arguments: non interrupts/non tracing syscalls only save up to here*/ 77 unsigned long r11; 78 unsigned long r10; 79 unsigned long r9; 80 unsigned long r8; 81 unsigned long rax; 82 unsigned long rcx; 83 unsigned long rdx; 84 unsigned long rsi; 85 unsigned long rdi; 86 unsigned long orig_rax; 87 /* end of arguments */ 88 /* cpu exception frame or undefined */ 89 unsigned long rip; 90 unsigned long cs; 91 unsigned long eflags; 92 unsigned long rsp; 93 unsigned long ss; 94 /* top of stack page */ 95 }; 96 97 #else /* __KERNEL__ */ 98 99 struct pt_regs { 100 unsigned long r15; 101 unsigned long r14; 102 unsigned long r13; 103 unsigned long r12; 104 unsigned long bp; 105 unsigned long bx; 106 /* arguments: non interrupts/non tracing syscalls only save up to here*/ 107 unsigned long r11; 108 unsigned long r10; 109 unsigned long r9; 110 unsigned long r8; 111 unsigned long ax; 112 unsigned long cx; 113 unsigned long dx; 114 unsigned long si; 115 unsigned long di; 116 unsigned long orig_ax; 117 /* end of arguments */ 118 /* cpu exception frame or undefined */ 119 unsigned long ip; 120 unsigned long cs; 121 unsigned long flags; 122 unsigned long sp; 123 unsigned long ss; 124 /* top of stack page */ 125 }; 126 127 #endif /* __KERNEL__ */ 128 #endif /* !__i386__ */ 129 130 131 #ifdef __KERNEL__ 132 133 #include <linux/init.h> 134 #ifdef CONFIG_PARAVIRT 135 #include <asm/paravirt_types.h> 136 #endif 137 138 struct cpuinfo_x86; 139 struct task_struct; 140 141 extern unsigned long profile_pc(struct pt_regs *regs); 142 #define profile_pc profile_pc 143 144 extern unsigned long 145 convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); 146 extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, 147 int error_code, int si_code); 148 149 extern long syscall_trace_enter(struct pt_regs *); 150 extern void syscall_trace_leave(struct pt_regs *); 151 152 static inline unsigned long regs_return_value(struct pt_regs *regs) 153 { 154 return regs->ax; 155 } 156 157 /* 158 * user_mode_vm(regs) determines whether a register set came from user mode. 159 * This is true if V8086 mode was enabled OR if the register set was from 160 * protected mode with RPL-3 CS value. This tricky test checks that with 161 * one comparison. Many places in the kernel can bypass this full check 162 * if they have already ruled out V8086 mode, so user_mode(regs) can be used. 163 */ 164 static inline int user_mode(struct pt_regs *regs) 165 { 166 #ifdef CONFIG_X86_32 167 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; 168 #else 169 return !!(regs->cs & 3); 170 #endif 171 } 172 173 static inline int user_mode_vm(struct pt_regs *regs) 174 { 175 #ifdef CONFIG_X86_32 176 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= 177 USER_RPL; 178 #else 179 return user_mode(regs); 180 #endif 181 } 182 183 static inline int v8086_mode(struct pt_regs *regs) 184 { 185 #ifdef CONFIG_X86_32 186 return (regs->flags & X86_VM_MASK); 187 #else 188 return 0; /* No V86 mode support in long mode */ 189 #endif 190 } 191 192 #ifdef CONFIG_X86_64 193 static inline bool user_64bit_mode(struct pt_regs *regs) 194 { 195 #ifndef CONFIG_PARAVIRT 196 /* 197 * On non-paravirt systems, this is the only long mode CPL 3 198 * selector. We do not allow long mode selectors in the LDT. 199 */ 200 return regs->cs == __USER_CS; 201 #else 202 /* Headers are too twisted for this to go in paravirt.h. */ 203 return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs; 204 #endif 205 } 206 #endif 207 208 /* 209 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode 210 * when it traps. The previous stack will be directly underneath the saved 211 * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. 212 * 213 * This is valid only for kernel mode traps. 214 */ 215 static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) 216 { 217 #ifdef CONFIG_X86_32 218 return (unsigned long)(®s->sp); 219 #else 220 return regs->sp; 221 #endif 222 } 223 224 #define GET_IP(regs) ((regs)->ip) 225 #define GET_FP(regs) ((regs)->bp) 226 #define GET_USP(regs) ((regs)->sp) 227 228 #include <asm-generic/ptrace.h> 229 230 /* Query offset/name of register from its name/offset */ 231 extern int regs_query_register_offset(const char *name); 232 extern const char *regs_query_register_name(unsigned int offset); 233 #define MAX_REG_OFFSET (offsetof(struct pt_regs, ss)) 234 235 /** 236 * regs_get_register() - get register value from its offset 237 * @regs: pt_regs from which register value is gotten. 238 * @offset: offset number of the register. 239 * 240 * regs_get_register returns the value of a register. The @offset is the 241 * offset of the register in struct pt_regs address which specified by @regs. 242 * If @offset is bigger than MAX_REG_OFFSET, this returns 0. 243 */ 244 static inline unsigned long regs_get_register(struct pt_regs *regs, 245 unsigned int offset) 246 { 247 if (unlikely(offset > MAX_REG_OFFSET)) 248 return 0; 249 return *(unsigned long *)((unsigned long)regs + offset); 250 } 251 252 /** 253 * regs_within_kernel_stack() - check the address in the stack 254 * @regs: pt_regs which contains kernel stack pointer. 255 * @addr: address which is checked. 256 * 257 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 258 * If @addr is within the kernel stack, it returns true. If not, returns false. 259 */ 260 static inline int regs_within_kernel_stack(struct pt_regs *regs, 261 unsigned long addr) 262 { 263 return ((addr & ~(THREAD_SIZE - 1)) == 264 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); 265 } 266 267 /** 268 * regs_get_kernel_stack_nth() - get Nth entry of the stack 269 * @regs: pt_regs which contains kernel stack pointer. 270 * @n: stack entry number. 271 * 272 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 273 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 274 * this returns 0. 275 */ 276 static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, 277 unsigned int n) 278 { 279 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 280 addr += n; 281 if (regs_within_kernel_stack(regs, (unsigned long)addr)) 282 return *addr; 283 else 284 return 0; 285 } 286 287 #define arch_has_single_step() (1) 288 #ifdef CONFIG_X86_DEBUGCTLMSR 289 #define arch_has_block_step() (1) 290 #else 291 #define arch_has_block_step() (boot_cpu_data.x86 >= 6) 292 #endif 293 294 #define ARCH_HAS_USER_SINGLE_STEP_INFO 295 296 struct user_desc; 297 extern int do_get_thread_area(struct task_struct *p, int idx, 298 struct user_desc __user *info); 299 extern int do_set_thread_area(struct task_struct *p, int idx, 300 struct user_desc __user *info, int can_allocate); 301 302 #endif /* __KERNEL__ */ 303 304 #endif /* !__ASSEMBLY__ */ 305 306 #endif /* _ASM_X86_PTRACE_H */ 307