xref: /openbmc/linux/arch/arm64/kernel/sdei.c (revision c4a11bf4)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2017 Arm Ltd.
3 #define pr_fmt(fmt) "sdei: " fmt
4 
5 #include <linux/arm-smccc.h>
6 #include <linux/arm_sdei.h>
7 #include <linux/hardirq.h>
8 #include <linux/irqflags.h>
9 #include <linux/sched/task_stack.h>
10 #include <linux/scs.h>
11 #include <linux/uaccess.h>
12 
13 #include <asm/alternative.h>
14 #include <asm/exception.h>
15 #include <asm/kprobes.h>
16 #include <asm/mmu.h>
17 #include <asm/ptrace.h>
18 #include <asm/sections.h>
19 #include <asm/stacktrace.h>
20 #include <asm/sysreg.h>
21 #include <asm/vmap_stack.h>
22 
23 unsigned long sdei_exit_mode;
24 
25 /*
26  * VMAP'd stacks checking for stack overflow on exception using sp as a scratch
27  * register, meaning SDEI has to switch to its own stack. We need two stacks as
28  * a critical event may interrupt a normal event that has just taken a
29  * synchronous exception, and is using sp as scratch register. For a critical
30  * event interrupting a normal event, we can't reliably tell if we were on the
31  * sdei stack.
32  * For now, we allocate stacks when the driver is probed.
33  */
34 DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
35 DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
36 
37 #ifdef CONFIG_VMAP_STACK
38 DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
39 DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
40 #endif
41 
42 DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
43 DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
44 
45 #ifdef CONFIG_SHADOW_CALL_STACK
46 DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
47 DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
48 #endif
49 
50 static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
51 {
52 	unsigned long *p;
53 
54 	p = per_cpu(*ptr, cpu);
55 	if (p) {
56 		per_cpu(*ptr, cpu) = NULL;
57 		vfree(p);
58 	}
59 }
60 
61 static void free_sdei_stacks(void)
62 {
63 	int cpu;
64 
65 	if (!IS_ENABLED(CONFIG_VMAP_STACK))
66 		return;
67 
68 	for_each_possible_cpu(cpu) {
69 		_free_sdei_stack(&sdei_stack_normal_ptr, cpu);
70 		_free_sdei_stack(&sdei_stack_critical_ptr, cpu);
71 	}
72 }
73 
74 static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu)
75 {
76 	unsigned long *p;
77 
78 	p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu));
79 	if (!p)
80 		return -ENOMEM;
81 	per_cpu(*ptr, cpu) = p;
82 
83 	return 0;
84 }
85 
86 static int init_sdei_stacks(void)
87 {
88 	int cpu;
89 	int err = 0;
90 
91 	if (!IS_ENABLED(CONFIG_VMAP_STACK))
92 		return 0;
93 
94 	for_each_possible_cpu(cpu) {
95 		err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
96 		if (err)
97 			break;
98 		err = _init_sdei_stack(&sdei_stack_critical_ptr, cpu);
99 		if (err)
100 			break;
101 	}
102 
103 	if (err)
104 		free_sdei_stacks();
105 
106 	return err;
107 }
108 
109 static void _free_sdei_scs(unsigned long * __percpu *ptr, int cpu)
110 {
111 	void *s;
112 
113 	s = per_cpu(*ptr, cpu);
114 	if (s) {
115 		per_cpu(*ptr, cpu) = NULL;
116 		scs_free(s);
117 	}
118 }
119 
120 static void free_sdei_scs(void)
121 {
122 	int cpu;
123 
124 	for_each_possible_cpu(cpu) {
125 		_free_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu);
126 		_free_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu);
127 	}
128 }
129 
130 static int _init_sdei_scs(unsigned long * __percpu *ptr, int cpu)
131 {
132 	void *s;
133 
134 	s = scs_alloc(cpu_to_node(cpu));
135 	if (!s)
136 		return -ENOMEM;
137 	per_cpu(*ptr, cpu) = s;
138 
139 	return 0;
140 }
141 
142 static int init_sdei_scs(void)
143 {
144 	int cpu;
145 	int err = 0;
146 
147 	if (!IS_ENABLED(CONFIG_SHADOW_CALL_STACK))
148 		return 0;
149 
150 	for_each_possible_cpu(cpu) {
151 		err = _init_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu);
152 		if (err)
153 			break;
154 		err = _init_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu);
155 		if (err)
156 			break;
157 	}
158 
159 	if (err)
160 		free_sdei_scs();
161 
162 	return err;
163 }
164 
165 static bool on_sdei_normal_stack(unsigned long sp, unsigned long size,
166 				 struct stack_info *info)
167 {
168 	unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
169 	unsigned long high = low + SDEI_STACK_SIZE;
170 
171 	return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info);
172 }
173 
174 static bool on_sdei_critical_stack(unsigned long sp, unsigned long size,
175 				   struct stack_info *info)
176 {
177 	unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
178 	unsigned long high = low + SDEI_STACK_SIZE;
179 
180 	return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info);
181 }
182 
183 bool _on_sdei_stack(unsigned long sp, unsigned long size, struct stack_info *info)
184 {
185 	if (!IS_ENABLED(CONFIG_VMAP_STACK))
186 		return false;
187 
188 	if (on_sdei_critical_stack(sp, size, info))
189 		return true;
190 
191 	if (on_sdei_normal_stack(sp, size, info))
192 		return true;
193 
194 	return false;
195 }
196 
197 unsigned long sdei_arch_get_entry_point(int conduit)
198 {
199 	/*
200 	 * SDEI works between adjacent exception levels. If we booted at EL1 we
201 	 * assume a hypervisor is marshalling events. If we booted at EL2 and
202 	 * dropped to EL1 because we don't support VHE, then we can't support
203 	 * SDEI.
204 	 */
205 	if (is_hyp_nvhe()) {
206 		pr_err("Not supported on this hardware/boot configuration\n");
207 		goto out_err;
208 	}
209 
210 	if (init_sdei_stacks())
211 		goto out_err;
212 
213 	if (init_sdei_scs())
214 		goto out_err_free_stacks;
215 
216 	sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
217 
218 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
219 	if (arm64_kernel_unmapped_at_el0()) {
220 		unsigned long offset;
221 
222 		offset = (unsigned long)__sdei_asm_entry_trampoline -
223 			 (unsigned long)__entry_tramp_text_start;
224 		return TRAMP_VALIAS + offset;
225 	} else
226 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
227 		return (unsigned long)__sdei_asm_handler;
228 
229 out_err_free_stacks:
230 	free_sdei_stacks();
231 out_err:
232 	return 0;
233 }
234 
235 /*
236  * do_sdei_event() returns one of:
237  *  SDEI_EV_HANDLED -  success, return to the interrupted context.
238  *  SDEI_EV_FAILED  -  failure, return this error code to firmare.
239  *  virtual-address -  success, return to this address.
240  */
241 unsigned long __kprobes do_sdei_event(struct pt_regs *regs,
242 				      struct sdei_registered_event *arg)
243 {
244 	u32 mode;
245 	int i, err = 0;
246 	int clobbered_registers = 4;
247 	u64 elr = read_sysreg(elr_el1);
248 	u32 kernel_mode = read_sysreg(CurrentEL) | 1;	/* +SPSel */
249 	unsigned long vbar = read_sysreg(vbar_el1);
250 
251 	if (arm64_kernel_unmapped_at_el0())
252 		clobbered_registers++;
253 
254 	/* Retrieve the missing registers values */
255 	for (i = 0; i < clobbered_registers; i++) {
256 		/* from within the handler, this call always succeeds */
257 		sdei_api_event_context(i, &regs->regs[i]);
258 	}
259 
260 	err = sdei_event_handler(regs, arg);
261 	if (err)
262 		return SDEI_EV_FAILED;
263 
264 	if (elr != read_sysreg(elr_el1)) {
265 		/*
266 		 * We took a synchronous exception from the SDEI handler.
267 		 * This could deadlock, and if you interrupt KVM it will
268 		 * hyp-panic instead.
269 		 */
270 		pr_warn("unsafe: exception during handler\n");
271 	}
272 
273 	mode = regs->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK);
274 
275 	/*
276 	 * If we interrupted the kernel with interrupts masked, we always go
277 	 * back to wherever we came from.
278 	 */
279 	if (mode == kernel_mode && !interrupts_enabled(regs))
280 		return SDEI_EV_HANDLED;
281 
282 	/*
283 	 * Otherwise, we pretend this was an IRQ. This lets user space tasks
284 	 * receive signals before we return to them, and KVM to invoke it's
285 	 * world switch to do the same.
286 	 *
287 	 * See DDI0487B.a Table D1-7 'Vector offsets from vector table base
288 	 * address'.
289 	 */
290 	if (mode == kernel_mode)
291 		return vbar + 0x280;
292 	else if (mode & PSR_MODE32_BIT)
293 		return vbar + 0x680;
294 
295 	return vbar + 0x480;
296 }
297