1 /* 2 * Based on arch/arm/include/asm/processor.h 3 * 4 * Copyright (C) 1995-1999 Russell King 5 * Copyright (C) 2012 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 #ifndef __ASM_PROCESSOR_H 20 #define __ASM_PROCESSOR_H 21 22 /* 23 * Default implementation of macro that returns current 24 * instruction pointer ("program counter"). 25 */ 26 #define current_text_addr() ({ __label__ _l; _l: &&_l;}) 27 28 #ifdef __KERNEL__ 29 30 #include <linux/string.h> 31 32 #include <asm/fpsimd.h> 33 #include <asm/hw_breakpoint.h> 34 #include <asm/pgtable-hwdef.h> 35 #include <asm/ptrace.h> 36 #include <asm/types.h> 37 38 #ifdef __KERNEL__ 39 #define STACK_TOP_MAX TASK_SIZE_64 40 #ifdef CONFIG_COMPAT 41 #define AARCH32_VECTORS_BASE 0xffff0000 42 #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ 43 AARCH32_VECTORS_BASE : STACK_TOP_MAX) 44 #else 45 #define STACK_TOP STACK_TOP_MAX 46 #endif /* CONFIG_COMPAT */ 47 48 extern phys_addr_t arm64_dma_phys_limit; 49 #define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1) 50 #endif /* __KERNEL__ */ 51 52 struct debug_info { 53 /* Have we suspended stepping by a debugger? */ 54 int suspended_step; 55 /* Allow breakpoints and watchpoints to be disabled for this thread. */ 56 int bps_disabled; 57 int wps_disabled; 58 /* Hardware breakpoints pinned to this task. */ 59 struct perf_event *hbp_break[ARM_MAX_BRP]; 60 struct perf_event *hbp_watch[ARM_MAX_WRP]; 61 }; 62 63 struct cpu_context { 64 unsigned long x19; 65 unsigned long x20; 66 unsigned long x21; 67 unsigned long x22; 68 unsigned long x23; 69 unsigned long x24; 70 unsigned long x25; 71 unsigned long x26; 72 unsigned long x27; 73 unsigned long x28; 74 unsigned long fp; 75 unsigned long sp; 76 unsigned long pc; 77 }; 78 79 struct thread_struct { 80 struct cpu_context cpu_context; /* cpu context */ 81 unsigned long tp_value; /* TLS register */ 82 #ifdef CONFIG_COMPAT 83 unsigned long tp2_value; 84 #endif 85 struct fpsimd_state fpsimd_state; 86 unsigned long fault_address; /* fault info */ 87 unsigned long fault_code; /* ESR_EL1 value */ 88 struct debug_info debug; /* debugging */ 89 }; 90 91 #ifdef CONFIG_COMPAT 92 #define task_user_tls(t) \ 93 ({ \ 94 unsigned long *__tls; \ 95 if (is_compat_thread(task_thread_info(t))) \ 96 __tls = &(t)->thread.tp2_value; \ 97 else \ 98 __tls = &(t)->thread.tp_value; \ 99 __tls; \ 100 }) 101 #else 102 #define task_user_tls(t) (&(t)->thread.tp_value) 103 #endif 104 105 #define INIT_THREAD { } 106 107 static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) 108 { 109 memset(regs, 0, sizeof(*regs)); 110 regs->syscallno = ~0UL; 111 regs->pc = pc; 112 } 113 114 static inline void start_thread(struct pt_regs *regs, unsigned long pc, 115 unsigned long sp) 116 { 117 start_thread_common(regs, pc); 118 regs->pstate = PSR_MODE_EL0t; 119 regs->sp = sp; 120 } 121 122 #ifdef CONFIG_COMPAT 123 static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, 124 unsigned long sp) 125 { 126 start_thread_common(regs, pc); 127 regs->pstate = COMPAT_PSR_MODE_USR; 128 if (pc & 1) 129 regs->pstate |= COMPAT_PSR_T_BIT; 130 131 #ifdef __AARCH64EB__ 132 regs->pstate |= COMPAT_PSR_E_BIT; 133 #endif 134 135 regs->compat_sp = sp; 136 } 137 #endif 138 139 /* Forward declaration, a strange C thing */ 140 struct task_struct; 141 142 /* Free all resources held by a thread. */ 143 extern void release_thread(struct task_struct *); 144 145 unsigned long get_wchan(struct task_struct *p); 146 147 static inline void cpu_relax(void) 148 { 149 asm volatile("yield" ::: "memory"); 150 } 151 152 #define cpu_relax_lowlatency() cpu_relax() 153 154 /* Thread switching */ 155 extern struct task_struct *cpu_switch_to(struct task_struct *prev, 156 struct task_struct *next); 157 158 #define task_pt_regs(p) \ 159 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) 160 161 #define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc) 162 #define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk)) 163 164 /* 165 * Prefetching support 166 */ 167 #define ARCH_HAS_PREFETCH 168 static inline void prefetch(const void *ptr) 169 { 170 asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr)); 171 } 172 173 #define ARCH_HAS_PREFETCHW 174 static inline void prefetchw(const void *ptr) 175 { 176 asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr)); 177 } 178 179 #define ARCH_HAS_SPINLOCK_PREFETCH 180 static inline void spin_lock_prefetch(const void *x) 181 { 182 prefetchw(x); 183 } 184 185 #define HAVE_ARCH_PICK_MMAP_LAYOUT 186 187 #endif 188 189 void cpu_enable_pan(void); 190 191 #endif /* __ASM_PROCESSOR_H */ 192