xref: /openbmc/linux/arch/arm64/kernel/sdei.c (revision 74be2d3b)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2017 Arm Ltd.
3 #define pr_fmt(fmt) "sdei: " fmt
4 
5 #include <linux/arm-smccc.h>
6 #include <linux/arm_sdei.h>
7 #include <linux/hardirq.h>
8 #include <linux/irqflags.h>
9 #include <linux/sched/task_stack.h>
10 #include <linux/uaccess.h>
11 
12 #include <asm/alternative.h>
13 #include <asm/kprobes.h>
14 #include <asm/mmu.h>
15 #include <asm/ptrace.h>
16 #include <asm/sections.h>
17 #include <asm/stacktrace.h>
18 #include <asm/sysreg.h>
19 #include <asm/vmap_stack.h>
20 
21 unsigned long sdei_exit_mode;
22 
23 /*
24  * VMAP'd stacks checking for stack overflow on exception using sp as a scratch
25  * register, meaning SDEI has to switch to its own stack. We need two stacks as
26  * a critical event may interrupt a normal event that has just taken a
27  * synchronous exception, and is using sp as scratch register. For a critical
28  * event interrupting a normal event, we can't reliably tell if we were on the
29  * sdei stack.
30  * For now, we allocate stacks when the driver is probed.
31  */
32 DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
33 DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
34 
35 #ifdef CONFIG_VMAP_STACK
36 DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
37 DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
38 #endif
39 
40 static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
41 {
42 	unsigned long *p;
43 
44 	p = per_cpu(*ptr, cpu);
45 	if (p) {
46 		per_cpu(*ptr, cpu) = NULL;
47 		vfree(p);
48 	}
49 }
50 
51 static void free_sdei_stacks(void)
52 {
53 	int cpu;
54 
55 	for_each_possible_cpu(cpu) {
56 		_free_sdei_stack(&sdei_stack_normal_ptr, cpu);
57 		_free_sdei_stack(&sdei_stack_critical_ptr, cpu);
58 	}
59 }
60 
61 static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu)
62 {
63 	unsigned long *p;
64 
65 	p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu));
66 	if (!p)
67 		return -ENOMEM;
68 	per_cpu(*ptr, cpu) = p;
69 
70 	return 0;
71 }
72 
73 static int init_sdei_stacks(void)
74 {
75 	int cpu;
76 	int err = 0;
77 
78 	for_each_possible_cpu(cpu) {
79 		err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
80 		if (err)
81 			break;
82 		err = _init_sdei_stack(&sdei_stack_critical_ptr, cpu);
83 		if (err)
84 			break;
85 	}
86 
87 	if (err)
88 		free_sdei_stacks();
89 
90 	return err;
91 }
92 
93 static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
94 {
95 	unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
96 	unsigned long high = low + SDEI_STACK_SIZE;
97 
98 	return on_stack(sp, low, high, STACK_TYPE_SDEI_NORMAL, info);
99 }
100 
101 static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
102 {
103 	unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
104 	unsigned long high = low + SDEI_STACK_SIZE;
105 
106 	return on_stack(sp, low, high, STACK_TYPE_SDEI_CRITICAL, info);
107 }
108 
109 bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
110 {
111 	if (!IS_ENABLED(CONFIG_VMAP_STACK))
112 		return false;
113 
114 	if (on_sdei_critical_stack(sp, info))
115 		return true;
116 
117 	if (on_sdei_normal_stack(sp, info))
118 		return true;
119 
120 	return false;
121 }
122 
123 unsigned long sdei_arch_get_entry_point(int conduit)
124 {
125 	/*
126 	 * SDEI works between adjacent exception levels. If we booted at EL1 we
127 	 * assume a hypervisor is marshalling events. If we booted at EL2 and
128 	 * dropped to EL1 because we don't support VHE, then we can't support
129 	 * SDEI.
130 	 */
131 	if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
132 		pr_err("Not supported on this hardware/boot configuration\n");
133 		return 0;
134 	}
135 
136 	if (IS_ENABLED(CONFIG_VMAP_STACK)) {
137 		if (init_sdei_stacks())
138 			return 0;
139 	}
140 
141 	sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
142 
143 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
144 	if (arm64_kernel_unmapped_at_el0()) {
145 		unsigned long offset;
146 
147 		offset = (unsigned long)__sdei_asm_entry_trampoline -
148 			 (unsigned long)__entry_tramp_text_start;
149 		return TRAMP_VALIAS + offset;
150 	} else
151 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
152 		return (unsigned long)__sdei_asm_handler;
153 
154 }
155 
156 /*
157  * __sdei_handler() returns one of:
158  *  SDEI_EV_HANDLED -  success, return to the interrupted context.
159  *  SDEI_EV_FAILED  -  failure, return this error code to firmare.
160  *  virtual-address -  success, return to this address.
161  */
162 static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
163 					     struct sdei_registered_event *arg)
164 {
165 	u32 mode;
166 	int i, err = 0;
167 	int clobbered_registers = 4;
168 	u64 elr = read_sysreg(elr_el1);
169 	u32 kernel_mode = read_sysreg(CurrentEL) | 1;	/* +SPSel */
170 	unsigned long vbar = read_sysreg(vbar_el1);
171 
172 	if (arm64_kernel_unmapped_at_el0())
173 		clobbered_registers++;
174 
175 	/* Retrieve the missing registers values */
176 	for (i = 0; i < clobbered_registers; i++) {
177 		/* from within the handler, this call always succeeds */
178 		sdei_api_event_context(i, &regs->regs[i]);
179 	}
180 
181 	/*
182 	 * We didn't take an exception to get here, set PAN. UAO will be cleared
183 	 * by sdei_event_handler()s set_fs(USER_DS) call.
184 	 */
185 	__uaccess_enable_hw_pan();
186 
187 	err = sdei_event_handler(regs, arg);
188 	if (err)
189 		return SDEI_EV_FAILED;
190 
191 	if (elr != read_sysreg(elr_el1)) {
192 		/*
193 		 * We took a synchronous exception from the SDEI handler.
194 		 * This could deadlock, and if you interrupt KVM it will
195 		 * hyp-panic instead.
196 		 */
197 		pr_warn("unsafe: exception during handler\n");
198 	}
199 
200 	mode = regs->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK);
201 
202 	/*
203 	 * If we interrupted the kernel with interrupts masked, we always go
204 	 * back to wherever we came from.
205 	 */
206 	if (mode == kernel_mode && !interrupts_enabled(regs))
207 		return SDEI_EV_HANDLED;
208 
209 	/*
210 	 * Otherwise, we pretend this was an IRQ. This lets user space tasks
211 	 * receive signals before we return to them, and KVM to invoke it's
212 	 * world switch to do the same.
213 	 *
214 	 * See DDI0487B.a Table D1-7 'Vector offsets from vector table base
215 	 * address'.
216 	 */
217 	if (mode == kernel_mode)
218 		return vbar + 0x280;
219 	else if (mode & PSR_MODE32_BIT)
220 		return vbar + 0x680;
221 
222 	return vbar + 0x480;
223 }
224 
225 
226 asmlinkage __kprobes notrace unsigned long
227 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
228 {
229 	unsigned long ret;
230 
231 	nmi_enter();
232 
233 	ret = _sdei_handler(regs, arg);
234 
235 	nmi_exit();
236 
237 	return ret;
238 }
239