1f5df2696SJames Morse // SPDX-License-Identifier: GPL-2.0
2f5df2696SJames Morse // Copyright (C) 2017 Arm Ltd.
3f5df2696SJames Morse #define pr_fmt(fmt) "sdei: " fmt
4f5df2696SJames Morse
5e6ea4651SMark Rutland #include <linux/arm-smccc.h>
6f5df2696SJames Morse #include <linux/arm_sdei.h>
7f5df2696SJames Morse #include <linux/hardirq.h>
8f5df2696SJames Morse #include <linux/irqflags.h>
9f5df2696SJames Morse #include <linux/sched/task_stack.h>
10ac20ffbbSSami Tolvanen #include <linux/scs.h>
11f5df2696SJames Morse #include <linux/uaccess.h>
12f5df2696SJames Morse
13f5df2696SJames Morse #include <asm/alternative.h>
14f0cd5ac1SMark Rutland #include <asm/exception.h>
15f5df2696SJames Morse #include <asm/kprobes.h>
1679e9aa59SJames Morse #include <asm/mmu.h>
17f5df2696SJames Morse #include <asm/ptrace.h>
1879e9aa59SJames Morse #include <asm/sections.h>
198a1ccfbcSLaura Abbott #include <asm/stacktrace.h>
20f5df2696SJames Morse #include <asm/sysreg.h>
21f5df2696SJames Morse #include <asm/vmap_stack.h>
22f5df2696SJames Morse
23f5df2696SJames Morse unsigned long sdei_exit_mode;
24f5df2696SJames Morse
25f5df2696SJames Morse /*
26f5df2696SJames Morse * VMAP'd stacks checking for stack overflow on exception using sp as a scratch
27f5df2696SJames Morse * register, meaning SDEI has to switch to its own stack. We need two stacks as
28f5df2696SJames Morse * a critical event may interrupt a normal event that has just taken a
29f5df2696SJames Morse * synchronous exception, and is using sp as scratch register. For a critical
30f5df2696SJames Morse * event interrupting a normal event, we can't reliably tell if we were on the
31f5df2696SJames Morse * sdei stack.
32f5df2696SJames Morse * For now, we allocate stacks when the driver is probed.
33f5df2696SJames Morse */
34f5df2696SJames Morse DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
35f5df2696SJames Morse DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
36f5df2696SJames Morse
37f5df2696SJames Morse #ifdef CONFIG_VMAP_STACK
38f5df2696SJames Morse DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
39f5df2696SJames Morse DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
40f5df2696SJames Morse #endif
41f5df2696SJames Morse
42ac20ffbbSSami Tolvanen DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
43ac20ffbbSSami Tolvanen DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
44ac20ffbbSSami Tolvanen
45ac20ffbbSSami Tolvanen #ifdef CONFIG_SHADOW_CALL_STACK
46ac20ffbbSSami Tolvanen DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
47ac20ffbbSSami Tolvanen DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
48ac20ffbbSSami Tolvanen #endif
49ac20ffbbSSami Tolvanen
50*5cd474e5SD Scott Phillips DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event);
51*5cd474e5SD Scott Phillips DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event);
52*5cd474e5SD Scott Phillips
_free_sdei_stack(unsigned long * __percpu * ptr,int cpu)53f5df2696SJames Morse static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
54f5df2696SJames Morse {
55f5df2696SJames Morse unsigned long *p;
56f5df2696SJames Morse
57f5df2696SJames Morse p = per_cpu(*ptr, cpu);
58f5df2696SJames Morse if (p) {
59f5df2696SJames Morse per_cpu(*ptr, cpu) = NULL;
60f5df2696SJames Morse vfree(p);
61f5df2696SJames Morse }
62f5df2696SJames Morse }
63f5df2696SJames Morse
free_sdei_stacks(void)64f5df2696SJames Morse static void free_sdei_stacks(void)
65f5df2696SJames Morse {
66f5df2696SJames Morse int cpu;
67f5df2696SJames Morse
68eec3bf68SWill Deacon if (!IS_ENABLED(CONFIG_VMAP_STACK))
69eec3bf68SWill Deacon return;
70eec3bf68SWill Deacon
71f5df2696SJames Morse for_each_possible_cpu(cpu) {
72f5df2696SJames Morse _free_sdei_stack(&sdei_stack_normal_ptr, cpu);
73f5df2696SJames Morse _free_sdei_stack(&sdei_stack_critical_ptr, cpu);
74f5df2696SJames Morse }
75f5df2696SJames Morse }
76f5df2696SJames Morse
_init_sdei_stack(unsigned long * __percpu * ptr,int cpu)77f5df2696SJames Morse static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu)
78f5df2696SJames Morse {
79f5df2696SJames Morse unsigned long *p;
80f5df2696SJames Morse
81f5df2696SJames Morse p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu));
82f5df2696SJames Morse if (!p)
83f5df2696SJames Morse return -ENOMEM;
84f5df2696SJames Morse per_cpu(*ptr, cpu) = p;
85f5df2696SJames Morse
86f5df2696SJames Morse return 0;
87f5df2696SJames Morse }
88f5df2696SJames Morse
init_sdei_stacks(void)89f5df2696SJames Morse static int init_sdei_stacks(void)
90f5df2696SJames Morse {
91f5df2696SJames Morse int cpu;
92f5df2696SJames Morse int err = 0;
93f5df2696SJames Morse
94eec3bf68SWill Deacon if (!IS_ENABLED(CONFIG_VMAP_STACK))
95eec3bf68SWill Deacon return 0;
96eec3bf68SWill Deacon
97f5df2696SJames Morse for_each_possible_cpu(cpu) {
98f5df2696SJames Morse err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
99f5df2696SJames Morse if (err)
100f5df2696SJames Morse break;
101f5df2696SJames Morse err = _init_sdei_stack(&sdei_stack_critical_ptr, cpu);
102f5df2696SJames Morse if (err)
103f5df2696SJames Morse break;
104f5df2696SJames Morse }
105f5df2696SJames Morse
106f5df2696SJames Morse if (err)
107f5df2696SJames Morse free_sdei_stacks();
108f5df2696SJames Morse
109f5df2696SJames Morse return err;
110f5df2696SJames Morse }
111f5df2696SJames Morse
_free_sdei_scs(unsigned long * __percpu * ptr,int cpu)112ac20ffbbSSami Tolvanen static void _free_sdei_scs(unsigned long * __percpu *ptr, int cpu)
113ac20ffbbSSami Tolvanen {
114ac20ffbbSSami Tolvanen void *s;
115ac20ffbbSSami Tolvanen
116ac20ffbbSSami Tolvanen s = per_cpu(*ptr, cpu);
117ac20ffbbSSami Tolvanen if (s) {
118ac20ffbbSSami Tolvanen per_cpu(*ptr, cpu) = NULL;
119ac20ffbbSSami Tolvanen scs_free(s);
120ac20ffbbSSami Tolvanen }
121ac20ffbbSSami Tolvanen }
122ac20ffbbSSami Tolvanen
free_sdei_scs(void)123ac20ffbbSSami Tolvanen static void free_sdei_scs(void)
124ac20ffbbSSami Tolvanen {
125ac20ffbbSSami Tolvanen int cpu;
126ac20ffbbSSami Tolvanen
127ac20ffbbSSami Tolvanen for_each_possible_cpu(cpu) {
128ac20ffbbSSami Tolvanen _free_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu);
129ac20ffbbSSami Tolvanen _free_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu);
130ac20ffbbSSami Tolvanen }
131ac20ffbbSSami Tolvanen }
132ac20ffbbSSami Tolvanen
_init_sdei_scs(unsigned long * __percpu * ptr,int cpu)133ac20ffbbSSami Tolvanen static int _init_sdei_scs(unsigned long * __percpu *ptr, int cpu)
134ac20ffbbSSami Tolvanen {
135ac20ffbbSSami Tolvanen void *s;
136ac20ffbbSSami Tolvanen
137ac20ffbbSSami Tolvanen s = scs_alloc(cpu_to_node(cpu));
138ac20ffbbSSami Tolvanen if (!s)
139ac20ffbbSSami Tolvanen return -ENOMEM;
140ac20ffbbSSami Tolvanen per_cpu(*ptr, cpu) = s;
141ac20ffbbSSami Tolvanen
142ac20ffbbSSami Tolvanen return 0;
143ac20ffbbSSami Tolvanen }
144ac20ffbbSSami Tolvanen
init_sdei_scs(void)145ac20ffbbSSami Tolvanen static int init_sdei_scs(void)
146ac20ffbbSSami Tolvanen {
147ac20ffbbSSami Tolvanen int cpu;
148ac20ffbbSSami Tolvanen int err = 0;
149ac20ffbbSSami Tolvanen
1503b619e22SArd Biesheuvel if (!scs_is_enabled())
151eec3bf68SWill Deacon return 0;
152eec3bf68SWill Deacon
153ac20ffbbSSami Tolvanen for_each_possible_cpu(cpu) {
154ac20ffbbSSami Tolvanen err = _init_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu);
155ac20ffbbSSami Tolvanen if (err)
156ac20ffbbSSami Tolvanen break;
157ac20ffbbSSami Tolvanen err = _init_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu);
158ac20ffbbSSami Tolvanen if (err)
159ac20ffbbSSami Tolvanen break;
160ac20ffbbSSami Tolvanen }
161ac20ffbbSSami Tolvanen
162ac20ffbbSSami Tolvanen if (err)
163ac20ffbbSSami Tolvanen free_sdei_scs();
164ac20ffbbSSami Tolvanen
165ac20ffbbSSami Tolvanen return err;
166ac20ffbbSSami Tolvanen }
167ac20ffbbSSami Tolvanen
sdei_arch_get_entry_point(int conduit)168f5df2696SJames Morse unsigned long sdei_arch_get_entry_point(int conduit)
169f5df2696SJames Morse {
170f5df2696SJames Morse /*
171f5df2696SJames Morse * SDEI works between adjacent exception levels. If we booted at EL1 we
172f5df2696SJames Morse * assume a hypervisor is marshalling events. If we booted at EL2 and
173f5df2696SJames Morse * dropped to EL1 because we don't support VHE, then we can't support
174f5df2696SJames Morse * SDEI.
175f5df2696SJames Morse */
176094a3684SPasha Tatashin if (is_hyp_nvhe()) {
177f5df2696SJames Morse pr_err("Not supported on this hardware/boot configuration\n");
178eec3bf68SWill Deacon goto out_err;
179f5df2696SJames Morse }
180f5df2696SJames Morse
181f5df2696SJames Morse if (init_sdei_stacks())
182eec3bf68SWill Deacon goto out_err;
183f5df2696SJames Morse
184eec3bf68SWill Deacon if (init_sdei_scs())
185eec3bf68SWill Deacon goto out_err_free_stacks;
186f5df2696SJames Morse
187e6ea4651SMark Rutland sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
18879e9aa59SJames Morse
18979e9aa59SJames Morse #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
19079e9aa59SJames Morse if (arm64_kernel_unmapped_at_el0()) {
19179e9aa59SJames Morse unsigned long offset;
19279e9aa59SJames Morse
19379e9aa59SJames Morse offset = (unsigned long)__sdei_asm_entry_trampoline -
19479e9aa59SJames Morse (unsigned long)__entry_tramp_text_start;
19579e9aa59SJames Morse return TRAMP_VALIAS + offset;
19679e9aa59SJames Morse } else
19779e9aa59SJames Morse #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
198f5df2696SJames Morse return (unsigned long)__sdei_asm_handler;
19979e9aa59SJames Morse
200eec3bf68SWill Deacon out_err_free_stacks:
201eec3bf68SWill Deacon free_sdei_stacks();
202eec3bf68SWill Deacon out_err:
203eec3bf68SWill Deacon return 0;
204f5df2696SJames Morse }
205f5df2696SJames Morse
206f5df2696SJames Morse /*
207d60b228fSMark Rutland * do_sdei_event() returns one of:
208f5df2696SJames Morse * SDEI_EV_HANDLED - success, return to the interrupted context.
209f5df2696SJames Morse * SDEI_EV_FAILED - failure, return this error code to firmare.
210f5df2696SJames Morse * virtual-address - success, return to this address.
211f5df2696SJames Morse */
do_sdei_event(struct pt_regs * regs,struct sdei_registered_event * arg)212d60b228fSMark Rutland unsigned long __kprobes do_sdei_event(struct pt_regs *regs,
213f5df2696SJames Morse struct sdei_registered_event *arg)
214f5df2696SJames Morse {
215f5df2696SJames Morse u32 mode;
216f5df2696SJames Morse int i, err = 0;
21779e9aa59SJames Morse int clobbered_registers = 4;
218f5df2696SJames Morse u64 elr = read_sysreg(elr_el1);
219f5df2696SJames Morse u32 kernel_mode = read_sysreg(CurrentEL) | 1; /* +SPSel */
220f5df2696SJames Morse unsigned long vbar = read_sysreg(vbar_el1);
221f5df2696SJames Morse
22279e9aa59SJames Morse if (arm64_kernel_unmapped_at_el0())
22379e9aa59SJames Morse clobbered_registers++;
22479e9aa59SJames Morse
225f5df2696SJames Morse /* Retrieve the missing registers values */
226f5df2696SJames Morse for (i = 0; i < clobbered_registers; i++) {
227f5df2696SJames Morse /* from within the handler, this call always succeeds */
228f5df2696SJames Morse sdei_api_event_context(i, ®s->regs[i]);
229f5df2696SJames Morse }
230f5df2696SJames Morse
231f5df2696SJames Morse err = sdei_event_handler(regs, arg);
232f5df2696SJames Morse if (err)
233f5df2696SJames Morse return SDEI_EV_FAILED;
234f5df2696SJames Morse
235f5df2696SJames Morse if (elr != read_sysreg(elr_el1)) {
236f5df2696SJames Morse /*
237f5df2696SJames Morse * We took a synchronous exception from the SDEI handler.
238f5df2696SJames Morse * This could deadlock, and if you interrupt KVM it will
239f5df2696SJames Morse * hyp-panic instead.
240f5df2696SJames Morse */
241f5df2696SJames Morse pr_warn("unsafe: exception during handler\n");
242f5df2696SJames Morse }
243f5df2696SJames Morse
244f5df2696SJames Morse mode = regs->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK);
245f5df2696SJames Morse
246f5df2696SJames Morse /*
247f5df2696SJames Morse * If we interrupted the kernel with interrupts masked, we always go
248f5df2696SJames Morse * back to wherever we came from.
249f5df2696SJames Morse */
250f5df2696SJames Morse if (mode == kernel_mode && !interrupts_enabled(regs))
251f5df2696SJames Morse return SDEI_EV_HANDLED;
252f5df2696SJames Morse
253f5df2696SJames Morse /*
254f5df2696SJames Morse * Otherwise, we pretend this was an IRQ. This lets user space tasks
255f5df2696SJames Morse * receive signals before we return to them, and KVM to invoke it's
256f5df2696SJames Morse * world switch to do the same.
257f5df2696SJames Morse *
258f5df2696SJames Morse * See DDI0487B.a Table D1-7 'Vector offsets from vector table base
259f5df2696SJames Morse * address'.
260f5df2696SJames Morse */
261f5df2696SJames Morse if (mode == kernel_mode)
262f5df2696SJames Morse return vbar + 0x280;
263f5df2696SJames Morse else if (mode & PSR_MODE32_BIT)
264f5df2696SJames Morse return vbar + 0x680;
265f5df2696SJames Morse
266f5df2696SJames Morse return vbar + 0x480;
267f5df2696SJames Morse }
268