xref: /openbmc/linux/arch/arm/include/asm/ptrace.h (revision 2a598d0b)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *  arch/arm/include/asm/ptrace.h
4  *
5  *  Copyright (C) 1996-2003 Russell King
6  */
7 #ifndef __ASM_ARM_PTRACE_H
8 #define __ASM_ARM_PTRACE_H
9 
10 #include <uapi/asm/ptrace.h>
11 
12 #ifndef __ASSEMBLY__
13 #include <linux/types.h>
14 
15 struct pt_regs {
16 	unsigned long uregs[18];
17 };
18 
19 struct svc_pt_regs {
20 	struct pt_regs regs;
21 	u32 dacr;
22 };
23 
24 #define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
25 
26 #define user_mode(regs)	\
27 	(((regs)->ARM_cpsr & 0xf) == 0)
28 
29 #ifdef CONFIG_ARM_THUMB
30 #define thumb_mode(regs) \
31 	(((regs)->ARM_cpsr & PSR_T_BIT))
32 #else
33 #define thumb_mode(regs) (0)
34 #endif
35 
36 #ifndef CONFIG_CPU_V7M
37 #define isa_mode(regs) \
38 	((((regs)->ARM_cpsr & PSR_J_BIT) >> (__ffs(PSR_J_BIT) - 1)) | \
39 	 (((regs)->ARM_cpsr & PSR_T_BIT) >> (__ffs(PSR_T_BIT))))
40 #else
41 #define isa_mode(regs) 1 /* Thumb */
42 #endif
43 
44 #define processor_mode(regs) \
45 	((regs)->ARM_cpsr & MODE_MASK)
46 
47 #define interrupts_enabled(regs) \
48 	(!((regs)->ARM_cpsr & PSR_I_BIT))
49 
50 #define fast_interrupts_enabled(regs) \
51 	(!((regs)->ARM_cpsr & PSR_F_BIT))
52 
53 /* Are the current registers suitable for user mode?
54  * (used to maintain security in signal handlers)
55  */
56 static inline int valid_user_regs(struct pt_regs *regs)
57 {
58 #ifndef CONFIG_CPU_V7M
59 	unsigned long mode = regs->ARM_cpsr & MODE_MASK;
60 
61 	/*
62 	 * Always clear the F (FIQ) and A (delayed abort) bits
63 	 */
64 	regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
65 
66 	if ((regs->ARM_cpsr & PSR_I_BIT) == 0) {
67 		if (mode == USR_MODE)
68 			return 1;
69 		if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE)
70 			return 1;
71 	}
72 
73 	/*
74 	 * Force CPSR to something logical...
75 	 */
76 	regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
77 	if (!(elf_hwcap & HWCAP_26BIT))
78 		regs->ARM_cpsr |= USR_MODE;
79 
80 	return 0;
81 #else /* ifndef CONFIG_CPU_V7M */
82 	return 1;
83 #endif
84 }
85 
86 static inline long regs_return_value(struct pt_regs *regs)
87 {
88 	return regs->ARM_r0;
89 }
90 
91 #define instruction_pointer(regs)	(regs)->ARM_pc
92 
93 #ifdef CONFIG_THUMB2_KERNEL
94 #define frame_pointer(regs) (regs)->ARM_r7
95 #else
96 #define frame_pointer(regs) (regs)->ARM_fp
97 #endif
98 
99 static inline void instruction_pointer_set(struct pt_regs *regs,
100 					   unsigned long val)
101 {
102 	instruction_pointer(regs) = val;
103 }
104 
105 #ifdef CONFIG_SMP
106 extern unsigned long profile_pc(struct pt_regs *regs);
107 #else
108 #define profile_pc(regs) instruction_pointer(regs)
109 #endif
110 
111 #define predicate(x)		((x) & 0xf0000000)
112 #define PREDICATE_ALWAYS	0xe0000000
113 
114 /*
115  * True if instr is a 32-bit thumb instruction. This works if instr
116  * is the first or only half-word of a thumb instruction. It also works
117  * when instr holds all 32-bits of a wide thumb instruction if stored
118  * in the form (first_half<<16)|(second_half)
119  */
120 #define is_wide_instruction(instr)	((unsigned)(instr) >= 0xe800)
121 
122 /*
123  * kprobe-based event tracer support
124  */
125 #include <linux/compiler.h>
126 #define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0))
127 
128 extern int regs_query_register_offset(const char *name);
129 extern const char *regs_query_register_name(unsigned int offset);
130 extern bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr);
131 extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
132 					       unsigned int n);
133 
134 /**
135  * regs_get_register() - get register value from its offset
136  * @regs:	   pt_regs from which register value is gotten
137  * @offset:    offset number of the register.
138  *
139  * regs_get_register returns the value of a register whose offset from @regs.
140  * The @offset is the offset of the register in struct pt_regs.
141  * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
142  */
143 static inline unsigned long regs_get_register(struct pt_regs *regs,
144 					      unsigned int offset)
145 {
146 	if (unlikely(offset > MAX_REG_OFFSET))
147 		return 0;
148 	return *(unsigned long *)((unsigned long)regs + offset);
149 }
150 
151 /* Valid only for Kernel mode traps. */
152 static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
153 {
154 	return regs->ARM_sp;
155 }
156 
157 static inline unsigned long user_stack_pointer(struct pt_regs *regs)
158 {
159 	return regs->ARM_sp;
160 }
161 
162 #define current_pt_regs(void) ({ (struct pt_regs *)			\
163 		((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1;	\
164 })
165 
166 static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
167 {
168 	regs->ARM_r0 = rc;
169 }
170 
171 /*
172  * Update ITSTATE after normal execution of an IT block instruction.
173  *
174  * The 8 IT state bits are split into two parts in CPSR:
175  *	ITSTATE<1:0> are in CPSR<26:25>
176  *	ITSTATE<7:2> are in CPSR<15:10>
177  */
178 static inline unsigned long it_advance(unsigned long cpsr)
179 {
180 	if ((cpsr & 0x06000400) == 0) {
181 		/* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
182 		cpsr &= ~PSR_IT_MASK;
183 	} else {
184 		/* We need to shift left ITSTATE<4:0> */
185 		const unsigned long mask = 0x06001c00;  /* Mask ITSTATE<4:0> */
186 		unsigned long it = cpsr & mask;
187 		it <<= 1;
188 		it |= it >> (27 - 10);  /* Carry ITSTATE<2> to correct place */
189 		it &= mask;
190 		cpsr &= ~mask;
191 		cpsr |= it;
192 	}
193 	return cpsr;
194 }
195 
196 int syscall_trace_enter(struct pt_regs *regs);
197 void syscall_trace_exit(struct pt_regs *regs);
198 
199 #endif /* __ASSEMBLY__ */
200 #endif
201