1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Based on arch/arm/include/asm/processor.h 4 * 5 * Copyright (C) 1995-1999 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 #ifndef __ASM_PROCESSOR_H 9 #define __ASM_PROCESSOR_H 10 11 #define KERNEL_DS UL(-1) 12 #define USER_DS ((UL(1) << VA_BITS) - 1) 13 14 /* 15 * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is 16 * no point in shifting all network buffers by 2 bytes just to make some IP 17 * header fields appear aligned in memory, potentially sacrificing some DMA 18 * performance on some platforms. 19 */ 20 #define NET_IP_ALIGN 0 21 22 #ifndef __ASSEMBLY__ 23 24 #include <linux/build_bug.h> 25 #include <linux/cache.h> 26 #include <linux/init.h> 27 #include <linux/stddef.h> 28 #include <linux/string.h> 29 #include <linux/thread_info.h> 30 31 #include <vdso/processor.h> 32 33 #include <asm/alternative.h> 34 #include <asm/cpufeature.h> 35 #include <asm/hw_breakpoint.h> 36 #include <asm/kasan.h> 37 #include <asm/lse.h> 38 #include <asm/pgtable-hwdef.h> 39 #include <asm/pointer_auth.h> 40 #include <asm/ptrace.h> 41 #include <asm/spectre.h> 42 #include <asm/types.h> 43 44 /* 45 * TASK_SIZE - the maximum size of a user space task. 46 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. 47 */ 48 49 #define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN) 50 #define TASK_SIZE_64 (UL(1) << vabits_actual) 51 52 #ifdef CONFIG_COMPAT 53 #if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS) 54 /* 55 * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied 56 * by the compat vectors page. 57 */ 58 #define TASK_SIZE_32 UL(0x100000000) 59 #else 60 #define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE) 61 #endif /* CONFIG_ARM64_64K_PAGES */ 62 #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ 63 TASK_SIZE_32 : TASK_SIZE_64) 64 #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ 65 TASK_SIZE_32 : TASK_SIZE_64) 66 #define DEFAULT_MAP_WINDOW (test_thread_flag(TIF_32BIT) ? \ 67 TASK_SIZE_32 : DEFAULT_MAP_WINDOW_64) 68 #else 69 #define TASK_SIZE TASK_SIZE_64 70 #define DEFAULT_MAP_WINDOW DEFAULT_MAP_WINDOW_64 71 #endif /* CONFIG_COMPAT */ 72 73 #ifdef CONFIG_ARM64_FORCE_52BIT 74 #define STACK_TOP_MAX TASK_SIZE_64 75 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) 76 #else 77 #define STACK_TOP_MAX DEFAULT_MAP_WINDOW_64 78 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(DEFAULT_MAP_WINDOW / 4)) 79 #endif /* CONFIG_ARM64_FORCE_52BIT */ 80 81 #ifdef CONFIG_COMPAT 82 #define AARCH32_VECTORS_BASE 0xffff0000 83 #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ 84 AARCH32_VECTORS_BASE : STACK_TOP_MAX) 85 #else 86 #define STACK_TOP STACK_TOP_MAX 87 #endif /* CONFIG_COMPAT */ 88 89 #ifndef CONFIG_ARM64_FORCE_52BIT 90 #define arch_get_mmap_end(addr) ((addr > DEFAULT_MAP_WINDOW) ? TASK_SIZE :\ 91 DEFAULT_MAP_WINDOW) 92 93 #define arch_get_mmap_base(addr, base) ((addr > DEFAULT_MAP_WINDOW) ? \ 94 base + TASK_SIZE - DEFAULT_MAP_WINDOW :\ 95 base) 96 #endif /* CONFIG_ARM64_FORCE_52BIT */ 97 98 extern phys_addr_t arm64_dma_phys_limit; 99 #define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1) 100 101 struct debug_info { 102 #ifdef CONFIG_HAVE_HW_BREAKPOINT 103 /* Have we suspended stepping by a debugger? */ 104 int suspended_step; 105 /* Allow breakpoints and watchpoints to be disabled for this thread. */ 106 int bps_disabled; 107 int wps_disabled; 108 /* Hardware breakpoints pinned to this task. */ 109 struct perf_event *hbp_break[ARM_MAX_BRP]; 110 struct perf_event *hbp_watch[ARM_MAX_WRP]; 111 #endif 112 }; 113 114 struct cpu_context { 115 unsigned long x19; 116 unsigned long x20; 117 unsigned long x21; 118 unsigned long x22; 119 unsigned long x23; 120 unsigned long x24; 121 unsigned long x25; 122 unsigned long x26; 123 unsigned long x27; 124 unsigned long x28; 125 unsigned long fp; 126 unsigned long sp; 127 unsigned long pc; 128 }; 129 130 struct thread_struct { 131 struct cpu_context cpu_context; /* cpu context */ 132 133 /* 134 * Whitelisted fields for hardened usercopy: 135 * Maintainers must ensure manually that this contains no 136 * implicit padding. 137 */ 138 struct { 139 unsigned long tp_value; /* TLS register */ 140 unsigned long tp2_value; 141 struct user_fpsimd_state fpsimd_state; 142 } uw; 143 144 unsigned int fpsimd_cpu; 145 void *sve_state; /* SVE registers, if any */ 146 unsigned int sve_vl; /* SVE vector length */ 147 unsigned int sve_vl_onexec; /* SVE vl after next exec */ 148 unsigned long fault_address; /* fault info */ 149 unsigned long fault_code; /* ESR_EL1 value */ 150 struct debug_info debug; /* debugging */ 151 #ifdef CONFIG_ARM64_PTR_AUTH 152 struct ptrauth_keys_user keys_user; 153 struct ptrauth_keys_kernel keys_kernel; 154 #endif 155 #ifdef CONFIG_ARM64_MTE 156 u64 sctlr_tcf0; 157 u64 gcr_user_incl; 158 #endif 159 }; 160 161 static inline void arch_thread_struct_whitelist(unsigned long *offset, 162 unsigned long *size) 163 { 164 /* Verify that there is no padding among the whitelisted fields: */ 165 BUILD_BUG_ON(sizeof_field(struct thread_struct, uw) != 166 sizeof_field(struct thread_struct, uw.tp_value) + 167 sizeof_field(struct thread_struct, uw.tp2_value) + 168 sizeof_field(struct thread_struct, uw.fpsimd_state)); 169 170 *offset = offsetof(struct thread_struct, uw); 171 *size = sizeof_field(struct thread_struct, uw); 172 } 173 174 #ifdef CONFIG_COMPAT 175 #define task_user_tls(t) \ 176 ({ \ 177 unsigned long *__tls; \ 178 if (is_compat_thread(task_thread_info(t))) \ 179 __tls = &(t)->thread.uw.tp2_value; \ 180 else \ 181 __tls = &(t)->thread.uw.tp_value; \ 182 __tls; \ 183 }) 184 #else 185 #define task_user_tls(t) (&(t)->thread.uw.tp_value) 186 #endif 187 188 /* Sync TPIDR_EL0 back to thread_struct for current */ 189 void tls_preserve_current_state(void); 190 191 #define INIT_THREAD { \ 192 .fpsimd_cpu = NR_CPUS, \ 193 } 194 195 static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) 196 { 197 memset(regs, 0, sizeof(*regs)); 198 forget_syscall(regs); 199 regs->pc = pc; 200 201 if (system_uses_irq_prio_masking()) 202 regs->pmr_save = GIC_PRIO_IRQON; 203 } 204 205 static inline void start_thread(struct pt_regs *regs, unsigned long pc, 206 unsigned long sp) 207 { 208 start_thread_common(regs, pc); 209 regs->pstate = PSR_MODE_EL0t; 210 spectre_v4_enable_task_mitigation(current); 211 regs->sp = sp; 212 } 213 214 #ifdef CONFIG_COMPAT 215 static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, 216 unsigned long sp) 217 { 218 start_thread_common(regs, pc); 219 regs->pstate = PSR_AA32_MODE_USR; 220 if (pc & 1) 221 regs->pstate |= PSR_AA32_T_BIT; 222 223 #ifdef __AARCH64EB__ 224 regs->pstate |= PSR_AA32_E_BIT; 225 #endif 226 227 spectre_v4_enable_task_mitigation(current); 228 regs->compat_sp = sp; 229 } 230 #endif 231 232 static inline bool is_ttbr0_addr(unsigned long addr) 233 { 234 /* entry assembly clears tags for TTBR0 addrs */ 235 return addr < TASK_SIZE; 236 } 237 238 static inline bool is_ttbr1_addr(unsigned long addr) 239 { 240 /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ 241 return arch_kasan_reset_tag(addr) >= PAGE_OFFSET; 242 } 243 244 /* Forward declaration, a strange C thing */ 245 struct task_struct; 246 247 /* Free all resources held by a thread. */ 248 extern void release_thread(struct task_struct *); 249 250 unsigned long get_wchan(struct task_struct *p); 251 252 /* Thread switching */ 253 extern struct task_struct *cpu_switch_to(struct task_struct *prev, 254 struct task_struct *next); 255 256 #define task_pt_regs(p) \ 257 ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) 258 259 #define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc) 260 #define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk)) 261 262 /* 263 * Prefetching support 264 */ 265 #define ARCH_HAS_PREFETCH 266 static inline void prefetch(const void *ptr) 267 { 268 asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr)); 269 } 270 271 #define ARCH_HAS_PREFETCHW 272 static inline void prefetchw(const void *ptr) 273 { 274 asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr)); 275 } 276 277 #define ARCH_HAS_SPINLOCK_PREFETCH 278 static inline void spin_lock_prefetch(const void *ptr) 279 { 280 asm volatile(ARM64_LSE_ATOMIC_INSN( 281 "prfm pstl1strm, %a0", 282 "nop") : : "p" (ptr)); 283 } 284 285 extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */ 286 extern void __init minsigstksz_setup(void); 287 288 /* 289 * Not at the top of the file due to a direct #include cycle between 290 * <asm/fpsimd.h> and <asm/processor.h>. Deferring this #include 291 * ensures that contents of processor.h are visible to fpsimd.h even if 292 * processor.h is included first. 293 * 294 * These prctl helpers are the only things in this file that require 295 * fpsimd.h. The core code expects them to be in this header. 296 */ 297 #include <asm/fpsimd.h> 298 299 /* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */ 300 #define SVE_SET_VL(arg) sve_set_current_vl(arg) 301 #define SVE_GET_VL() sve_get_current_vl() 302 303 /* PR_PAC_RESET_KEYS prctl */ 304 #define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg) 305 306 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 307 /* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */ 308 long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg); 309 long get_tagged_addr_ctrl(struct task_struct *task); 310 #define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(current, arg) 311 #define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl(current) 312 #endif 313 314 /* 315 * For CONFIG_GCC_PLUGIN_STACKLEAK 316 * 317 * These need to be macros because otherwise we get stuck in a nightmare 318 * of header definitions for the use of task_stack_page. 319 */ 320 321 #define current_top_of_stack() \ 322 ({ \ 323 struct stack_info _info; \ 324 BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info)); \ 325 _info.high; \ 326 }) 327 #define on_thread_stack() (on_task_stack(current, current_stack_pointer, NULL)) 328 329 #endif /* __ASSEMBLY__ */ 330 #endif /* __ASM_PROCESSOR_H */ 331