1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2017 Arm Ltd. 3 #define pr_fmt(fmt) "sdei: " fmt 4 5 #include <linux/arm_sdei.h> 6 #include <linux/hardirq.h> 7 #include <linux/irqflags.h> 8 #include <linux/sched/task_stack.h> 9 #include <linux/uaccess.h> 10 11 #include <asm/alternative.h> 12 #include <asm/kprobes.h> 13 #include <asm/mmu.h> 14 #include <asm/ptrace.h> 15 #include <asm/sections.h> 16 #include <asm/stacktrace.h> 17 #include <asm/sysreg.h> 18 #include <asm/vmap_stack.h> 19 20 unsigned long sdei_exit_mode; 21 22 /* 23 * VMAP'd stacks checking for stack overflow on exception using sp as a scratch 24 * register, meaning SDEI has to switch to its own stack. We need two stacks as 25 * a critical event may interrupt a normal event that has just taken a 26 * synchronous exception, and is using sp as scratch register. For a critical 27 * event interrupting a normal event, we can't reliably tell if we were on the 28 * sdei stack. 29 * For now, we allocate stacks when the driver is probed. 30 */ 31 DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr); 32 DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr); 33 34 #ifdef CONFIG_VMAP_STACK 35 DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr); 36 DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr); 37 #endif 38 39 static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu) 40 { 41 unsigned long *p; 42 43 p = per_cpu(*ptr, cpu); 44 if (p) { 45 per_cpu(*ptr, cpu) = NULL; 46 vfree(p); 47 } 48 } 49 50 static void free_sdei_stacks(void) 51 { 52 int cpu; 53 54 for_each_possible_cpu(cpu) { 55 _free_sdei_stack(&sdei_stack_normal_ptr, cpu); 56 _free_sdei_stack(&sdei_stack_critical_ptr, cpu); 57 } 58 } 59 60 static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu) 61 { 62 unsigned long *p; 63 64 p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu)); 65 if (!p) 66 return -ENOMEM; 67 per_cpu(*ptr, cpu) = p; 68 69 return 0; 70 } 71 72 static int init_sdei_stacks(void) 73 { 74 int cpu; 75 int err = 0; 76 77 for_each_possible_cpu(cpu) { 78 err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu); 79 if (err) 80 break; 81 err = _init_sdei_stack(&sdei_stack_critical_ptr, cpu); 82 if (err) 83 break; 84 } 85 86 if (err) 87 free_sdei_stacks(); 88 89 return err; 90 } 91 92 static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info) 93 { 94 unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); 95 unsigned long high = low + SDEI_STACK_SIZE; 96 97 if (sp < low || sp >= high) 98 return false; 99 100 if (info) { 101 info->low = low; 102 info->high = high; 103 info->type = STACK_TYPE_SDEI_NORMAL; 104 } 105 106 return true; 107 } 108 109 static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info) 110 { 111 unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); 112 unsigned long high = low + SDEI_STACK_SIZE; 113 114 if (sp < low || sp >= high) 115 return false; 116 117 if (info) { 118 info->low = low; 119 info->high = high; 120 info->type = STACK_TYPE_SDEI_CRITICAL; 121 } 122 123 return true; 124 } 125 126 bool _on_sdei_stack(unsigned long sp, struct stack_info *info) 127 { 128 if (!IS_ENABLED(CONFIG_VMAP_STACK)) 129 return false; 130 131 if (on_sdei_critical_stack(sp, info)) 132 return true; 133 134 if (on_sdei_normal_stack(sp, info)) 135 return true; 136 137 return false; 138 } 139 140 unsigned long sdei_arch_get_entry_point(int conduit) 141 { 142 /* 143 * SDEI works between adjacent exception levels. If we booted at EL1 we 144 * assume a hypervisor is marshalling events. If we booted at EL2 and 145 * dropped to EL1 because we don't support VHE, then we can't support 146 * SDEI. 147 */ 148 if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) { 149 pr_err("Not supported on this hardware/boot configuration\n"); 150 return 0; 151 } 152 153 if (IS_ENABLED(CONFIG_VMAP_STACK)) { 154 if (init_sdei_stacks()) 155 return 0; 156 } 157 158 sdei_exit_mode = (conduit == CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC; 159 160 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 161 if (arm64_kernel_unmapped_at_el0()) { 162 unsigned long offset; 163 164 offset = (unsigned long)__sdei_asm_entry_trampoline - 165 (unsigned long)__entry_tramp_text_start; 166 return TRAMP_VALIAS + offset; 167 } else 168 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ 169 return (unsigned long)__sdei_asm_handler; 170 171 } 172 173 /* 174 * __sdei_handler() returns one of: 175 * SDEI_EV_HANDLED - success, return to the interrupted context. 176 * SDEI_EV_FAILED - failure, return this error code to firmare. 177 * virtual-address - success, return to this address. 178 */ 179 static __kprobes unsigned long _sdei_handler(struct pt_regs *regs, 180 struct sdei_registered_event *arg) 181 { 182 u32 mode; 183 int i, err = 0; 184 int clobbered_registers = 4; 185 u64 elr = read_sysreg(elr_el1); 186 u32 kernel_mode = read_sysreg(CurrentEL) | 1; /* +SPSel */ 187 unsigned long vbar = read_sysreg(vbar_el1); 188 189 if (arm64_kernel_unmapped_at_el0()) 190 clobbered_registers++; 191 192 /* Retrieve the missing registers values */ 193 for (i = 0; i < clobbered_registers; i++) { 194 /* from within the handler, this call always succeeds */ 195 sdei_api_event_context(i, ®s->regs[i]); 196 } 197 198 /* 199 * We didn't take an exception to get here, set PAN. UAO will be cleared 200 * by sdei_event_handler()s set_fs(USER_DS) call. 201 */ 202 __uaccess_enable_hw_pan(); 203 204 err = sdei_event_handler(regs, arg); 205 if (err) 206 return SDEI_EV_FAILED; 207 208 if (elr != read_sysreg(elr_el1)) { 209 /* 210 * We took a synchronous exception from the SDEI handler. 211 * This could deadlock, and if you interrupt KVM it will 212 * hyp-panic instead. 213 */ 214 pr_warn("unsafe: exception during handler\n"); 215 } 216 217 mode = regs->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK); 218 219 /* 220 * If we interrupted the kernel with interrupts masked, we always go 221 * back to wherever we came from. 222 */ 223 if (mode == kernel_mode && !interrupts_enabled(regs)) 224 return SDEI_EV_HANDLED; 225 226 /* 227 * Otherwise, we pretend this was an IRQ. This lets user space tasks 228 * receive signals before we return to them, and KVM to invoke it's 229 * world switch to do the same. 230 * 231 * See DDI0487B.a Table D1-7 'Vector offsets from vector table base 232 * address'. 233 */ 234 if (mode == kernel_mode) 235 return vbar + 0x280; 236 else if (mode & PSR_MODE32_BIT) 237 return vbar + 0x680; 238 239 return vbar + 0x480; 240 } 241 242 243 asmlinkage __kprobes notrace unsigned long 244 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) 245 { 246 unsigned long ret; 247 bool do_nmi_exit = false; 248 249 /* 250 * nmi_enter() deals with printk() re-entrance and use of RCU when 251 * RCU believed this CPU was idle. Because critical events can 252 * interrupt normal events, we may already be in_nmi(). 253 */ 254 if (!in_nmi()) { 255 nmi_enter(); 256 do_nmi_exit = true; 257 } 258 259 ret = _sdei_handler(regs, arg); 260 261 if (do_nmi_exit) 262 nmi_exit(); 263 264 return ret; 265 } 266