xref: /openbmc/linux/arch/arm64/include/asm/processor.h (revision 78c85161)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/processor.h
4  *
5  * Copyright (C) 1995-1999 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 #ifndef __ASM_PROCESSOR_H
9 #define __ASM_PROCESSOR_H
10 
11 #define KERNEL_DS		UL(-1)
12 #define USER_DS			((UL(1) << VA_BITS) - 1)
13 
14 /*
15  * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
16  * no point in shifting all network buffers by 2 bytes just to make some IP
17  * header fields appear aligned in memory, potentially sacrificing some DMA
18  * performance on some platforms.
19  */
20 #define NET_IP_ALIGN	0
21 
22 #ifndef __ASSEMBLY__
23 
24 #include <linux/build_bug.h>
25 #include <linux/cache.h>
26 #include <linux/init.h>
27 #include <linux/stddef.h>
28 #include <linux/string.h>
29 #include <linux/thread_info.h>
30 
31 #include <vdso/processor.h>
32 
33 #include <asm/alternative.h>
34 #include <asm/cpufeature.h>
35 #include <asm/hw_breakpoint.h>
36 #include <asm/kasan.h>
37 #include <asm/lse.h>
38 #include <asm/pgtable-hwdef.h>
39 #include <asm/pointer_auth.h>
40 #include <asm/ptrace.h>
41 #include <asm/types.h>
42 
43 /*
44  * TASK_SIZE - the maximum size of a user space task.
45  * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
46  */
47 
48 #define DEFAULT_MAP_WINDOW_64	(UL(1) << VA_BITS_MIN)
49 #define TASK_SIZE_64		(UL(1) << vabits_actual)
50 
51 #ifdef CONFIG_COMPAT
52 #if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
53 /*
54  * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
55  * by the compat vectors page.
56  */
57 #define TASK_SIZE_32		UL(0x100000000)
58 #else
59 #define TASK_SIZE_32		(UL(0x100000000) - PAGE_SIZE)
60 #endif /* CONFIG_ARM64_64K_PAGES */
61 #define TASK_SIZE		(test_thread_flag(TIF_32BIT) ? \
62 				TASK_SIZE_32 : TASK_SIZE_64)
63 #define TASK_SIZE_OF(tsk)	(test_tsk_thread_flag(tsk, TIF_32BIT) ? \
64 				TASK_SIZE_32 : TASK_SIZE_64)
65 #define DEFAULT_MAP_WINDOW	(test_thread_flag(TIF_32BIT) ? \
66 				TASK_SIZE_32 : DEFAULT_MAP_WINDOW_64)
67 #else
68 #define TASK_SIZE		TASK_SIZE_64
69 #define DEFAULT_MAP_WINDOW	DEFAULT_MAP_WINDOW_64
70 #endif /* CONFIG_COMPAT */
71 
72 #ifdef CONFIG_ARM64_FORCE_52BIT
73 #define STACK_TOP_MAX		TASK_SIZE_64
74 #define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 4))
75 #else
76 #define STACK_TOP_MAX		DEFAULT_MAP_WINDOW_64
77 #define TASK_UNMAPPED_BASE	(PAGE_ALIGN(DEFAULT_MAP_WINDOW / 4))
78 #endif /* CONFIG_ARM64_FORCE_52BIT */
79 
80 #ifdef CONFIG_COMPAT
81 #define AARCH32_VECTORS_BASE	0xffff0000
82 #define STACK_TOP		(test_thread_flag(TIF_32BIT) ? \
83 				AARCH32_VECTORS_BASE : STACK_TOP_MAX)
84 #else
85 #define STACK_TOP		STACK_TOP_MAX
86 #endif /* CONFIG_COMPAT */
87 
88 #ifndef CONFIG_ARM64_FORCE_52BIT
89 #define arch_get_mmap_end(addr) ((addr > DEFAULT_MAP_WINDOW) ? TASK_SIZE :\
90 				DEFAULT_MAP_WINDOW)
91 
92 #define arch_get_mmap_base(addr, base) ((addr > DEFAULT_MAP_WINDOW) ? \
93 					base + TASK_SIZE - DEFAULT_MAP_WINDOW :\
94 					base)
95 #endif /* CONFIG_ARM64_FORCE_52BIT */
96 
97 extern phys_addr_t arm64_dma_phys_limit;
98 #define ARCH_LOW_ADDRESS_LIMIT	(arm64_dma_phys_limit - 1)
99 
100 struct debug_info {
101 #ifdef CONFIG_HAVE_HW_BREAKPOINT
102 	/* Have we suspended stepping by a debugger? */
103 	int			suspended_step;
104 	/* Allow breakpoints and watchpoints to be disabled for this thread. */
105 	int			bps_disabled;
106 	int			wps_disabled;
107 	/* Hardware breakpoints pinned to this task. */
108 	struct perf_event	*hbp_break[ARM_MAX_BRP];
109 	struct perf_event	*hbp_watch[ARM_MAX_WRP];
110 #endif
111 };
112 
113 struct cpu_context {
114 	unsigned long x19;
115 	unsigned long x20;
116 	unsigned long x21;
117 	unsigned long x22;
118 	unsigned long x23;
119 	unsigned long x24;
120 	unsigned long x25;
121 	unsigned long x26;
122 	unsigned long x27;
123 	unsigned long x28;
124 	unsigned long fp;
125 	unsigned long sp;
126 	unsigned long pc;
127 };
128 
129 struct thread_struct {
130 	struct cpu_context	cpu_context;	/* cpu context */
131 
132 	/*
133 	 * Whitelisted fields for hardened usercopy:
134 	 * Maintainers must ensure manually that this contains no
135 	 * implicit padding.
136 	 */
137 	struct {
138 		unsigned long	tp_value;	/* TLS register */
139 		unsigned long	tp2_value;
140 		struct user_fpsimd_state fpsimd_state;
141 	} uw;
142 
143 	unsigned int		fpsimd_cpu;
144 	void			*sve_state;	/* SVE registers, if any */
145 	unsigned int		sve_vl;		/* SVE vector length */
146 	unsigned int		sve_vl_onexec;	/* SVE vl after next exec */
147 	unsigned long		fault_address;	/* fault info */
148 	unsigned long		fault_code;	/* ESR_EL1 value */
149 	struct debug_info	debug;		/* debugging */
150 #ifdef CONFIG_ARM64_PTR_AUTH
151 	struct ptrauth_keys	keys_user;
152 #endif
153 };
154 
155 static inline void arch_thread_struct_whitelist(unsigned long *offset,
156 						unsigned long *size)
157 {
158 	/* Verify that there is no padding among the whitelisted fields: */
159 	BUILD_BUG_ON(sizeof_field(struct thread_struct, uw) !=
160 		     sizeof_field(struct thread_struct, uw.tp_value) +
161 		     sizeof_field(struct thread_struct, uw.tp2_value) +
162 		     sizeof_field(struct thread_struct, uw.fpsimd_state));
163 
164 	*offset = offsetof(struct thread_struct, uw);
165 	*size = sizeof_field(struct thread_struct, uw);
166 }
167 
168 #ifdef CONFIG_COMPAT
169 #define task_user_tls(t)						\
170 ({									\
171 	unsigned long *__tls;						\
172 	if (is_compat_thread(task_thread_info(t)))			\
173 		__tls = &(t)->thread.uw.tp2_value;			\
174 	else								\
175 		__tls = &(t)->thread.uw.tp_value;			\
176 	__tls;								\
177  })
178 #else
179 #define task_user_tls(t)	(&(t)->thread.uw.tp_value)
180 #endif
181 
182 /* Sync TPIDR_EL0 back to thread_struct for current */
183 void tls_preserve_current_state(void);
184 
185 #define INIT_THREAD {				\
186 	.fpsimd_cpu = NR_CPUS,			\
187 }
188 
189 static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
190 {
191 	memset(regs, 0, sizeof(*regs));
192 	forget_syscall(regs);
193 	regs->pc = pc;
194 
195 	if (system_uses_irq_prio_masking())
196 		regs->pmr_save = GIC_PRIO_IRQON;
197 }
198 
199 static inline void set_ssbs_bit(struct pt_regs *regs)
200 {
201 	regs->pstate |= PSR_SSBS_BIT;
202 }
203 
204 static inline void set_compat_ssbs_bit(struct pt_regs *regs)
205 {
206 	regs->pstate |= PSR_AA32_SSBS_BIT;
207 }
208 
209 static inline void start_thread(struct pt_regs *regs, unsigned long pc,
210 				unsigned long sp)
211 {
212 	start_thread_common(regs, pc);
213 	regs->pstate = PSR_MODE_EL0t;
214 
215 	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
216 		set_ssbs_bit(regs);
217 
218 	regs->sp = sp;
219 }
220 
221 static inline bool is_ttbr0_addr(unsigned long addr)
222 {
223 	/* entry assembly clears tags for TTBR0 addrs */
224 	return addr < TASK_SIZE;
225 }
226 
227 static inline bool is_ttbr1_addr(unsigned long addr)
228 {
229 	/* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
230 	return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
231 }
232 
233 #ifdef CONFIG_COMPAT
234 static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
235 				       unsigned long sp)
236 {
237 	start_thread_common(regs, pc);
238 	regs->pstate = PSR_AA32_MODE_USR;
239 	if (pc & 1)
240 		regs->pstate |= PSR_AA32_T_BIT;
241 
242 #ifdef __AARCH64EB__
243 	regs->pstate |= PSR_AA32_E_BIT;
244 #endif
245 
246 	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
247 		set_compat_ssbs_bit(regs);
248 
249 	regs->compat_sp = sp;
250 }
251 #endif
252 
253 /* Forward declaration, a strange C thing */
254 struct task_struct;
255 
256 /* Free all resources held by a thread. */
257 extern void release_thread(struct task_struct *);
258 
259 unsigned long get_wchan(struct task_struct *p);
260 
261 /* Thread switching */
262 extern struct task_struct *cpu_switch_to(struct task_struct *prev,
263 					 struct task_struct *next);
264 
265 #define task_pt_regs(p) \
266 	((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
267 
268 #define KSTK_EIP(tsk)	((unsigned long)task_pt_regs(tsk)->pc)
269 #define KSTK_ESP(tsk)	user_stack_pointer(task_pt_regs(tsk))
270 
271 /*
272  * Prefetching support
273  */
274 #define ARCH_HAS_PREFETCH
275 static inline void prefetch(const void *ptr)
276 {
277 	asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr));
278 }
279 
280 #define ARCH_HAS_PREFETCHW
281 static inline void prefetchw(const void *ptr)
282 {
283 	asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr));
284 }
285 
286 #define ARCH_HAS_SPINLOCK_PREFETCH
287 static inline void spin_lock_prefetch(const void *ptr)
288 {
289 	asm volatile(ARM64_LSE_ATOMIC_INSN(
290 		     "prfm pstl1strm, %a0",
291 		     "nop") : : "p" (ptr));
292 }
293 
294 extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
295 extern void __init minsigstksz_setup(void);
296 
297 /*
298  * Not at the top of the file due to a direct #include cycle between
299  * <asm/fpsimd.h> and <asm/processor.h>.  Deferring this #include
300  * ensures that contents of processor.h are visible to fpsimd.h even if
301  * processor.h is included first.
302  *
303  * These prctl helpers are the only things in this file that require
304  * fpsimd.h.  The core code expects them to be in this header.
305  */
306 #include <asm/fpsimd.h>
307 
308 /* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
309 #define SVE_SET_VL(arg)	sve_set_current_vl(arg)
310 #define SVE_GET_VL()	sve_get_current_vl()
311 
312 /* PR_PAC_RESET_KEYS prctl */
313 #define PAC_RESET_KEYS(tsk, arg)	ptrauth_prctl_reset_keys(tsk, arg)
314 
315 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
316 /* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
317 long set_tagged_addr_ctrl(unsigned long arg);
318 long get_tagged_addr_ctrl(void);
319 #define SET_TAGGED_ADDR_CTRL(arg)	set_tagged_addr_ctrl(arg)
320 #define GET_TAGGED_ADDR_CTRL()		get_tagged_addr_ctrl()
321 #endif
322 
323 /*
324  * For CONFIG_GCC_PLUGIN_STACKLEAK
325  *
326  * These need to be macros because otherwise we get stuck in a nightmare
327  * of header definitions for the use of task_stack_page.
328  */
329 
330 #define current_top_of_stack()							\
331 ({										\
332 	struct stack_info _info;						\
333 	BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info));	\
334 	_info.high;								\
335 })
336 #define on_thread_stack()	(on_task_stack(current, current_stack_pointer, NULL))
337 
338 #endif /* __ASSEMBLY__ */
339 #endif /* __ASM_PROCESSOR_H */
340