xref: /openbmc/linux/arch/arm64/kernel/entry-common.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1ed3768dbSMark Rutland // SPDX-License-Identifier: GPL-2.0
2ed3768dbSMark Rutland /*
3ed3768dbSMark Rutland  * Exception handling code
4ed3768dbSMark Rutland  *
5ed3768dbSMark Rutland  * Copyright (C) 2019 ARM Ltd.
6ed3768dbSMark Rutland  */
7ed3768dbSMark Rutland 
8ed3768dbSMark Rutland #include <linux/context_tracking.h>
938ddf7daSPeter Collingbourne #include <linux/kasan.h>
1033a3581aSMark Rutland #include <linux/linkage.h>
1133a3581aSMark Rutland #include <linux/lockdep.h>
12ed3768dbSMark Rutland #include <linux/ptrace.h>
1333a3581aSMark Rutland #include <linux/sched.h>
1433a3581aSMark Rutland #include <linux/sched/debug.h>
15ed3768dbSMark Rutland #include <linux/thread_info.h>
16ed3768dbSMark Rutland 
17ed3768dbSMark Rutland #include <asm/cpufeature.h>
18ed3768dbSMark Rutland #include <asm/daifflags.h>
19ed3768dbSMark Rutland #include <asm/esr.h>
20ed3768dbSMark Rutland #include <asm/exception.h>
2126dc1293SMark Rutland #include <asm/irq_regs.h>
22ed3768dbSMark Rutland #include <asm/kprobes.h>
23bfe29874SJames Morse #include <asm/mmu.h>
24064dbfb4SMark Rutland #include <asm/processor.h>
25d60b228fSMark Rutland #include <asm/sdei.h>
26064dbfb4SMark Rutland #include <asm/stacktrace.h>
27ed3768dbSMark Rutland #include <asm/sysreg.h>
28cbed5f8dSMark Rutland #include <asm/system_misc.h>
29ed3768dbSMark Rutland 
307cd1ea10SMark Rutland /*
31bc29b71fSMark Rutland  * Handle IRQ/context state management when entering from kernel mode.
32bc29b71fSMark Rutland  * Before this function is called it is not safe to call regular kernel code,
3359598b42SMukesh Ojha  * instrumentable code, or any code which may trigger an exception.
34bc29b71fSMark Rutland  *
357cd1ea10SMark Rutland  * This is intended to match the logic in irqentry_enter(), handling the kernel
367cd1ea10SMark Rutland  * mode transitions only.
377cd1ea10SMark Rutland  */
__enter_from_kernel_mode(struct pt_regs * regs)38bc29b71fSMark Rutland static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs)
397cd1ea10SMark Rutland {
407cd1ea10SMark Rutland 	regs->exit_rcu = false;
417cd1ea10SMark Rutland 
427cd1ea10SMark Rutland 	if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
437cd1ea10SMark Rutland 		lockdep_hardirqs_off(CALLER_ADDR0);
446f0e6c15SFrederic Weisbecker 		ct_irq_enter();
457cd1ea10SMark Rutland 		trace_hardirqs_off_finish();
467cd1ea10SMark Rutland 
477cd1ea10SMark Rutland 		regs->exit_rcu = true;
487cd1ea10SMark Rutland 		return;
497cd1ea10SMark Rutland 	}
507cd1ea10SMark Rutland 
517cd1ea10SMark Rutland 	lockdep_hardirqs_off(CALLER_ADDR0);
527cd1ea10SMark Rutland 	rcu_irq_enter_check_tick();
537cd1ea10SMark Rutland 	trace_hardirqs_off_finish();
54bc29b71fSMark Rutland }
5565812c69SVincenzo Frascino 
enter_from_kernel_mode(struct pt_regs * regs)56bc29b71fSMark Rutland static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
57bc29b71fSMark Rutland {
58bc29b71fSMark Rutland 	__enter_from_kernel_mode(regs);
5965812c69SVincenzo Frascino 	mte_check_tfsr_entry();
6038ddf7daSPeter Collingbourne 	mte_disable_tco_entry(current);
617cd1ea10SMark Rutland }
627cd1ea10SMark Rutland 
637cd1ea10SMark Rutland /*
64bc29b71fSMark Rutland  * Handle IRQ/context state management when exiting to kernel mode.
65bc29b71fSMark Rutland  * After this function returns it is not safe to call regular kernel code,
6659598b42SMukesh Ojha  * instrumentable code, or any code which may trigger an exception.
67bc29b71fSMark Rutland  *
687cd1ea10SMark Rutland  * This is intended to match the logic in irqentry_exit(), handling the kernel
697cd1ea10SMark Rutland  * mode transitions only, and with preemption handled elsewhere.
707cd1ea10SMark Rutland  */
__exit_to_kernel_mode(struct pt_regs * regs)71bc29b71fSMark Rutland static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
727cd1ea10SMark Rutland {
737cd1ea10SMark Rutland 	lockdep_assert_irqs_disabled();
747cd1ea10SMark Rutland 
757cd1ea10SMark Rutland 	if (interrupts_enabled(regs)) {
767cd1ea10SMark Rutland 		if (regs->exit_rcu) {
777cd1ea10SMark Rutland 			trace_hardirqs_on_prepare();
788b023accSNick Desaulniers 			lockdep_hardirqs_on_prepare();
796f0e6c15SFrederic Weisbecker 			ct_irq_exit();
807cd1ea10SMark Rutland 			lockdep_hardirqs_on(CALLER_ADDR0);
817cd1ea10SMark Rutland 			return;
827cd1ea10SMark Rutland 		}
837cd1ea10SMark Rutland 
847cd1ea10SMark Rutland 		trace_hardirqs_on();
857cd1ea10SMark Rutland 	} else {
867cd1ea10SMark Rutland 		if (regs->exit_rcu)
876f0e6c15SFrederic Weisbecker 			ct_irq_exit();
887cd1ea10SMark Rutland 	}
897cd1ea10SMark Rutland }
907cd1ea10SMark Rutland 
exit_to_kernel_mode(struct pt_regs * regs)91bc29b71fSMark Rutland static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
92bc29b71fSMark Rutland {
93bc29b71fSMark Rutland 	mte_check_tfsr_exit();
94bc29b71fSMark Rutland 	__exit_to_kernel_mode(regs);
95bc29b71fSMark Rutland }
96bc29b71fSMark Rutland 
97bc29b71fSMark Rutland /*
98bc29b71fSMark Rutland  * Handle IRQ/context state management when entering from user mode.
99bc29b71fSMark Rutland  * Before this function is called it is not safe to call regular kernel code,
10059598b42SMukesh Ojha  * instrumentable code, or any code which may trigger an exception.
101bc29b71fSMark Rutland  */
__enter_from_user_mode(void)102bc29b71fSMark Rutland static __always_inline void __enter_from_user_mode(void)
10346a2b02dSMark Rutland {
10446a2b02dSMark Rutland 	lockdep_hardirqs_off(CALLER_ADDR0);
10546a2b02dSMark Rutland 	CT_WARN_ON(ct_state() != CONTEXT_USER);
10646a2b02dSMark Rutland 	user_exit_irqoff();
10746a2b02dSMark Rutland 	trace_hardirqs_off_finish();
10838ddf7daSPeter Collingbourne 	mte_disable_tco_entry(current);
10946a2b02dSMark Rutland }
11046a2b02dSMark Rutland 
enter_from_user_mode(struct pt_regs * regs)111e130338eSMark Rutland static __always_inline void enter_from_user_mode(struct pt_regs *regs)
11246a2b02dSMark Rutland {
113bc29b71fSMark Rutland 	__enter_from_user_mode();
114bc29b71fSMark Rutland }
115bc29b71fSMark Rutland 
116bc29b71fSMark Rutland /*
117bc29b71fSMark Rutland  * Handle IRQ/context state management when exiting to user mode.
118bc29b71fSMark Rutland  * After this function returns it is not safe to call regular kernel code,
11959598b42SMukesh Ojha  * instrumentable code, or any code which may trigger an exception.
120bc29b71fSMark Rutland  */
__exit_to_user_mode(void)121bc29b71fSMark Rutland static __always_inline void __exit_to_user_mode(void)
122bc29b71fSMark Rutland {
12346a2b02dSMark Rutland 	trace_hardirqs_on_prepare();
1248b023accSNick Desaulniers 	lockdep_hardirqs_on_prepare();
12546a2b02dSMark Rutland 	user_enter_irqoff();
12646a2b02dSMark Rutland 	lockdep_hardirqs_on(CALLER_ADDR0);
12746a2b02dSMark Rutland }
12846a2b02dSMark Rutland 
exit_to_user_mode_prepare(struct pt_regs * regs)129ab1e29acSEric Chan static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
1304d1c2ee2SMark Rutland {
1314d1c2ee2SMark Rutland 	unsigned long flags;
1324d1c2ee2SMark Rutland 
1334d1c2ee2SMark Rutland 	local_daif_mask();
1344d1c2ee2SMark Rutland 
135342b3808SMark Rutland 	flags = read_thread_flags();
1364d1c2ee2SMark Rutland 	if (unlikely(flags & _TIF_WORK_MASK))
1374d1c2ee2SMark Rutland 		do_notify_resume(regs, flags);
138ab1e29acSEric Chan 
139ab1e29acSEric Chan 	lockdep_sys_exit();
1404d1c2ee2SMark Rutland }
1414d1c2ee2SMark Rutland 
exit_to_user_mode(struct pt_regs * regs)142e130338eSMark Rutland static __always_inline void exit_to_user_mode(struct pt_regs *regs)
1434d1c2ee2SMark Rutland {
144ab1e29acSEric Chan 	exit_to_user_mode_prepare(regs);
145e130338eSMark Rutland 	mte_check_tfsr_exit();
146e130338eSMark Rutland 	__exit_to_user_mode();
147e130338eSMark Rutland }
148e130338eSMark Rutland 
asm_exit_to_user_mode(struct pt_regs * regs)149e130338eSMark Rutland asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
150e130338eSMark Rutland {
151e130338eSMark Rutland 	exit_to_user_mode(regs);
1524d1c2ee2SMark Rutland }
1534d1c2ee2SMark Rutland 
154bc29b71fSMark Rutland /*
155bc29b71fSMark Rutland  * Handle IRQ/context state management when entering an NMI from user/kernel
156bc29b71fSMark Rutland  * mode. Before this function is called it is not safe to call regular kernel
15759598b42SMukesh Ojha  * code, instrumentable code, or any code which may trigger an exception.
158bc29b71fSMark Rutland  */
arm64_enter_nmi(struct pt_regs * regs)1596ecbc78cSMark Rutland static void noinstr arm64_enter_nmi(struct pt_regs *regs)
160f0cd5ac1SMark Rutland {
161f0cd5ac1SMark Rutland 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
162f0cd5ac1SMark Rutland 
163f0cd5ac1SMark Rutland 	__nmi_enter();
164f0cd5ac1SMark Rutland 	lockdep_hardirqs_off(CALLER_ADDR0);
165f0cd5ac1SMark Rutland 	lockdep_hardirq_enter();
166493c1822SFrederic Weisbecker 	ct_nmi_enter();
167f0cd5ac1SMark Rutland 
168f0cd5ac1SMark Rutland 	trace_hardirqs_off_finish();
169f0cd5ac1SMark Rutland 	ftrace_nmi_enter();
170f0cd5ac1SMark Rutland }
171f0cd5ac1SMark Rutland 
172bc29b71fSMark Rutland /*
173bc29b71fSMark Rutland  * Handle IRQ/context state management when exiting an NMI from user/kernel
174bc29b71fSMark Rutland  * mode. After this function returns it is not safe to call regular kernel
17559598b42SMukesh Ojha  * code, instrumentable code, or any code which may trigger an exception.
176bc29b71fSMark Rutland  */
arm64_exit_nmi(struct pt_regs * regs)1776ecbc78cSMark Rutland static void noinstr arm64_exit_nmi(struct pt_regs *regs)
178f0cd5ac1SMark Rutland {
179f0cd5ac1SMark Rutland 	bool restore = regs->lockdep_hardirqs;
180f0cd5ac1SMark Rutland 
181f0cd5ac1SMark Rutland 	ftrace_nmi_exit();
182f0cd5ac1SMark Rutland 	if (restore) {
183f0cd5ac1SMark Rutland 		trace_hardirqs_on_prepare();
1848b023accSNick Desaulniers 		lockdep_hardirqs_on_prepare();
185f0cd5ac1SMark Rutland 	}
186f0cd5ac1SMark Rutland 
187493c1822SFrederic Weisbecker 	ct_nmi_exit();
188f0cd5ac1SMark Rutland 	lockdep_hardirq_exit();
189f0cd5ac1SMark Rutland 	if (restore)
190f0cd5ac1SMark Rutland 		lockdep_hardirqs_on(CALLER_ADDR0);
191f0cd5ac1SMark Rutland 	__nmi_exit();
192f0cd5ac1SMark Rutland }
193f0cd5ac1SMark Rutland 
194bc29b71fSMark Rutland /*
195bc29b71fSMark Rutland  * Handle IRQ/context state management when entering a debug exception from
196bc29b71fSMark Rutland  * kernel mode. Before this function is called it is not safe to call regular
19759598b42SMukesh Ojha  * kernel code, instrumentable code, or any code which may trigger an exception.
198bc29b71fSMark Rutland  */
arm64_enter_el1_dbg(struct pt_regs * regs)19946a2b02dSMark Rutland static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
20046a2b02dSMark Rutland {
20146a2b02dSMark Rutland 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
20246a2b02dSMark Rutland 
20346a2b02dSMark Rutland 	lockdep_hardirqs_off(CALLER_ADDR0);
204493c1822SFrederic Weisbecker 	ct_nmi_enter();
20546a2b02dSMark Rutland 
20646a2b02dSMark Rutland 	trace_hardirqs_off_finish();
20746a2b02dSMark Rutland }
20846a2b02dSMark Rutland 
209bc29b71fSMark Rutland /*
210bc29b71fSMark Rutland  * Handle IRQ/context state management when exiting a debug exception from
211bc29b71fSMark Rutland  * kernel mode. After this function returns it is not safe to call regular
21259598b42SMukesh Ojha  * kernel code, instrumentable code, or any code which may trigger an exception.
213bc29b71fSMark Rutland  */
arm64_exit_el1_dbg(struct pt_regs * regs)21446a2b02dSMark Rutland static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
21546a2b02dSMark Rutland {
21646a2b02dSMark Rutland 	bool restore = regs->lockdep_hardirqs;
21746a2b02dSMark Rutland 
21846a2b02dSMark Rutland 	if (restore) {
21946a2b02dSMark Rutland 		trace_hardirqs_on_prepare();
2208b023accSNick Desaulniers 		lockdep_hardirqs_on_prepare();
22146a2b02dSMark Rutland 	}
22246a2b02dSMark Rutland 
223493c1822SFrederic Weisbecker 	ct_nmi_exit();
22446a2b02dSMark Rutland 	if (restore)
22546a2b02dSMark Rutland 		lockdep_hardirqs_on(CALLER_ADDR0);
22646a2b02dSMark Rutland }
22746a2b02dSMark Rutland 
2281b2d3451SMark Rutland #ifdef CONFIG_PREEMPT_DYNAMIC
2291b2d3451SMark Rutland DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
2301b2d3451SMark Rutland #define need_irq_preemption() \
2311b2d3451SMark Rutland 	(static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
2321b2d3451SMark Rutland #else
2331b2d3451SMark Rutland #define need_irq_preemption()	(IS_ENABLED(CONFIG_PREEMPTION))
2341b2d3451SMark Rutland #endif
2351b2d3451SMark Rutland 
arm64_preempt_schedule_irq(void)236064dbfb4SMark Rutland static void __sched arm64_preempt_schedule_irq(void)
23733a3581aSMark Rutland {
2381b2d3451SMark Rutland 	if (!need_irq_preemption())
2398e12ab7cSMark Rutland 		return;
2408e12ab7cSMark Rutland 
2418e12ab7cSMark Rutland 	/*
2428e12ab7cSMark Rutland 	 * Note: thread_info::preempt_count includes both thread_info::count
2438e12ab7cSMark Rutland 	 * and thread_info::need_resched, and is not equivalent to
2448e12ab7cSMark Rutland 	 * preempt_count().
2458e12ab7cSMark Rutland 	 */
2468e12ab7cSMark Rutland 	if (READ_ONCE(current_thread_info()->preempt_count) != 0)
2478e12ab7cSMark Rutland 		return;
24833a3581aSMark Rutland 
24933a3581aSMark Rutland 	/*
250101a5b66SMark Rutland 	 * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
251101a5b66SMark Rutland 	 * priority masking is used the GIC irqchip driver will clear DAIF.IF
252101a5b66SMark Rutland 	 * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
253101a5b66SMark Rutland 	 * DAIF we must have handled an NMI, so skip preemption.
254101a5b66SMark Rutland 	 */
255101a5b66SMark Rutland 	if (system_uses_irq_prio_masking() && read_sysreg(daif))
256101a5b66SMark Rutland 		return;
257101a5b66SMark Rutland 
258101a5b66SMark Rutland 	/*
25933a3581aSMark Rutland 	 * Preempting a task from an IRQ means we leave copies of PSTATE
26033a3581aSMark Rutland 	 * on the stack. cpufeature's enable calls may modify PSTATE, but
26133a3581aSMark Rutland 	 * resuming one of these preempted tasks would undo those changes.
26233a3581aSMark Rutland 	 *
26333a3581aSMark Rutland 	 * Only allow a task to be preempted once cpufeatures have been
26433a3581aSMark Rutland 	 * enabled.
26533a3581aSMark Rutland 	 */
26633a3581aSMark Rutland 	if (system_capabilities_finalized())
26733a3581aSMark Rutland 		preempt_schedule_irq();
26833a3581aSMark Rutland }
26933a3581aSMark Rutland 
do_interrupt_handler(struct pt_regs * regs,void (* handler)(struct pt_regs *))270064dbfb4SMark Rutland static void do_interrupt_handler(struct pt_regs *regs,
271064dbfb4SMark Rutland 				 void (*handler)(struct pt_regs *))
272064dbfb4SMark Rutland {
27326dc1293SMark Rutland 	struct pt_regs *old_regs = set_irq_regs(regs);
27426dc1293SMark Rutland 
275064dbfb4SMark Rutland 	if (on_thread_stack())
276064dbfb4SMark Rutland 		call_on_irq_stack(regs, handler);
277064dbfb4SMark Rutland 	else
278064dbfb4SMark Rutland 		handler(regs);
27926dc1293SMark Rutland 
28026dc1293SMark Rutland 	set_irq_regs(old_regs);
281064dbfb4SMark Rutland }
282064dbfb4SMark Rutland 
283064dbfb4SMark Rutland extern void (*handle_arch_irq)(struct pt_regs *);
284064dbfb4SMark Rutland extern void (*handle_arch_fiq)(struct pt_regs *);
285064dbfb4SMark Rutland 
__panic_unhandled(struct pt_regs * regs,const char * vector,unsigned long esr)286ca0c2647SMark Rutland static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
2878d56e5c5SAlexandru Elisei 				      unsigned long esr)
288ca0c2647SMark Rutland {
289ca0c2647SMark Rutland 	arm64_enter_nmi(regs);
290ca0c2647SMark Rutland 
291ca0c2647SMark Rutland 	console_verbose();
292ca0c2647SMark Rutland 
2938d56e5c5SAlexandru Elisei 	pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n",
294ca0c2647SMark Rutland 		vector, smp_processor_id(), esr,
295ca0c2647SMark Rutland 		esr_get_class_string(esr));
296ca0c2647SMark Rutland 
297ca0c2647SMark Rutland 	__show_regs(regs);
298ca0c2647SMark Rutland 	panic("Unhandled exception");
299ca0c2647SMark Rutland }
300ca0c2647SMark Rutland 
301ec841aabSMark Rutland #define UNHANDLED(el, regsize, vector)							\
302ec841aabSMark Rutland asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs)	\
303ec841aabSMark Rutland {											\
304ec841aabSMark Rutland 	const char *desc = #regsize "-bit " #el " " #vector;				\
305ec841aabSMark Rutland 	__panic_unhandled(regs, desc, read_sysreg(esr_el1));				\
306cbed5f8dSMark Rutland }
307cbed5f8dSMark Rutland 
3086459b846SMark Rutland #ifdef CONFIG_ARM64_ERRATUM_1463225
3096459b846SMark Rutland static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
3106459b846SMark Rutland 
cortex_a76_erratum_1463225_svc_handler(void)3116459b846SMark Rutland static void cortex_a76_erratum_1463225_svc_handler(void)
3126459b846SMark Rutland {
3136459b846SMark Rutland 	u32 reg, val;
3146459b846SMark Rutland 
3156459b846SMark Rutland 	if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
3166459b846SMark Rutland 		return;
3176459b846SMark Rutland 
3186459b846SMark Rutland 	if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
3196459b846SMark Rutland 		return;
3206459b846SMark Rutland 
3216459b846SMark Rutland 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
3226459b846SMark Rutland 	reg = read_sysreg(mdscr_el1);
3236459b846SMark Rutland 	val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
3246459b846SMark Rutland 	write_sysreg(val, mdscr_el1);
3256459b846SMark Rutland 	asm volatile("msr daifclr, #8");
3266459b846SMark Rutland 	isb();
3276459b846SMark Rutland 
3286459b846SMark Rutland 	/* We will have taken a single-step exception by this point */
3296459b846SMark Rutland 
3306459b846SMark Rutland 	write_sysreg(reg, mdscr_el1);
3316459b846SMark Rutland 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
3326459b846SMark Rutland }
3336459b846SMark Rutland 
334024f4b2eSMark Rutland static __always_inline bool
cortex_a76_erratum_1463225_debug_handler(struct pt_regs * regs)335024f4b2eSMark Rutland cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
3366459b846SMark Rutland {
3376459b846SMark Rutland 	if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
3386459b846SMark Rutland 		return false;
3396459b846SMark Rutland 
3406459b846SMark Rutland 	/*
3416459b846SMark Rutland 	 * We've taken a dummy step exception from the kernel to ensure
3426459b846SMark Rutland 	 * that interrupts are re-enabled on the syscall path. Return back
3436459b846SMark Rutland 	 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
3446459b846SMark Rutland 	 * masked so that we can safely restore the mdscr and get on with
3456459b846SMark Rutland 	 * handling the syscall.
3466459b846SMark Rutland 	 */
3476459b846SMark Rutland 	regs->pstate |= PSR_D_BIT;
3486459b846SMark Rutland 	return true;
3496459b846SMark Rutland }
3506459b846SMark Rutland #else /* CONFIG_ARM64_ERRATUM_1463225 */
cortex_a76_erratum_1463225_svc_handler(void)3516459b846SMark Rutland static void cortex_a76_erratum_1463225_svc_handler(void) { }
cortex_a76_erratum_1463225_debug_handler(struct pt_regs * regs)3526459b846SMark Rutland static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
3536459b846SMark Rutland {
3546459b846SMark Rutland 	return false;
3556459b846SMark Rutland }
3566459b846SMark Rutland #endif /* CONFIG_ARM64_ERRATUM_1463225 */
3576459b846SMark Rutland 
358*f130ac0aSMark Rutland /*
359*f130ac0aSMark Rutland  * As per the ABI exit SME streaming mode and clear the SVE state not
360*f130ac0aSMark Rutland  * shared with FPSIMD on syscall entry.
361*f130ac0aSMark Rutland  */
fp_user_discard(void)362*f130ac0aSMark Rutland static inline void fp_user_discard(void)
363*f130ac0aSMark Rutland {
364*f130ac0aSMark Rutland 	/*
365*f130ac0aSMark Rutland 	 * If SME is active then exit streaming mode.  If ZA is active
366*f130ac0aSMark Rutland 	 * then flush the SVE registers but leave userspace access to
367*f130ac0aSMark Rutland 	 * both SVE and SME enabled, otherwise disable SME for the
368*f130ac0aSMark Rutland 	 * task and fall through to disabling SVE too.  This means
369*f130ac0aSMark Rutland 	 * that after a syscall we never have any streaming mode
370*f130ac0aSMark Rutland 	 * register state to track, if this changes the KVM code will
371*f130ac0aSMark Rutland 	 * need updating.
372*f130ac0aSMark Rutland 	 */
373*f130ac0aSMark Rutland 	if (system_supports_sme())
374*f130ac0aSMark Rutland 		sme_smstop_sm();
375*f130ac0aSMark Rutland 
376*f130ac0aSMark Rutland 	if (!system_supports_sve())
377*f130ac0aSMark Rutland 		return;
378*f130ac0aSMark Rutland 
379*f130ac0aSMark Rutland 	if (test_thread_flag(TIF_SVE)) {
380*f130ac0aSMark Rutland 		unsigned int sve_vq_minus_one;
381*f130ac0aSMark Rutland 
382*f130ac0aSMark Rutland 		sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1;
383*f130ac0aSMark Rutland 		sve_flush_live(true, sve_vq_minus_one);
384*f130ac0aSMark Rutland 	}
385*f130ac0aSMark Rutland }
386*f130ac0aSMark Rutland 
387ec841aabSMark Rutland UNHANDLED(el1t, 64, sync)
388ec841aabSMark Rutland UNHANDLED(el1t, 64, irq)
389ec841aabSMark Rutland UNHANDLED(el1t, 64, fiq)
390ec841aabSMark Rutland UNHANDLED(el1t, 64, error)
391ec841aabSMark Rutland 
el1_abort(struct pt_regs * regs,unsigned long esr)392da192676SMark Rutland static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
393ed3768dbSMark Rutland {
394ed3768dbSMark Rutland 	unsigned long far = read_sysreg(far_el1);
395ed3768dbSMark Rutland 
3967cd1ea10SMark Rutland 	enter_from_kernel_mode(regs);
397ed3768dbSMark Rutland 	local_daif_inherit(regs);
398ed3768dbSMark Rutland 	do_mem_abort(far, esr, regs);
3997cd1ea10SMark Rutland 	local_daif_mask();
4007cd1ea10SMark Rutland 	exit_to_kernel_mode(regs);
401ed3768dbSMark Rutland }
402ed3768dbSMark Rutland 
el1_pc(struct pt_regs * regs,unsigned long esr)403da192676SMark Rutland static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
404ed3768dbSMark Rutland {
405ed3768dbSMark Rutland 	unsigned long far = read_sysreg(far_el1);
406ed3768dbSMark Rutland 
4077cd1ea10SMark Rutland 	enter_from_kernel_mode(regs);
408ed3768dbSMark Rutland 	local_daif_inherit(regs);
409ed3768dbSMark Rutland 	do_sp_pc_abort(far, esr, regs);
4107cd1ea10SMark Rutland 	local_daif_mask();
4117cd1ea10SMark Rutland 	exit_to_kernel_mode(regs);
412ed3768dbSMark Rutland }
413ed3768dbSMark Rutland 
el1_undef(struct pt_regs * regs,unsigned long esr)4140f2cb928SMark Rutland static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
415ed3768dbSMark Rutland {
4167cd1ea10SMark Rutland 	enter_from_kernel_mode(regs);
417ed3768dbSMark Rutland 	local_daif_inherit(regs);
41861d64a37SMark Rutland 	do_el1_undef(regs, esr);
4197cd1ea10SMark Rutland 	local_daif_mask();
4207cd1ea10SMark Rutland 	exit_to_kernel_mode(regs);
421ed3768dbSMark Rutland }
422ed3768dbSMark Rutland 
el1_bti(struct pt_regs * regs,unsigned long esr)423830a2a4dSMark Rutland static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
424830a2a4dSMark Rutland {
425830a2a4dSMark Rutland 	enter_from_kernel_mode(regs);
426830a2a4dSMark Rutland 	local_daif_inherit(regs);
427830a2a4dSMark Rutland 	do_el1_bti(regs, esr);
428830a2a4dSMark Rutland 	local_daif_mask();
429830a2a4dSMark Rutland 	exit_to_kernel_mode(regs);
430830a2a4dSMark Rutland }
431830a2a4dSMark Rutland 
el1_dbg(struct pt_regs * regs,unsigned long esr)432da192676SMark Rutland static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
433ed3768dbSMark Rutland {
434ed3768dbSMark Rutland 	unsigned long far = read_sysreg(far_el1);
435ed3768dbSMark Rutland 
4362a9b3e6aSMark Rutland 	arm64_enter_el1_dbg(regs);
4376459b846SMark Rutland 	if (!cortex_a76_erratum_1463225_debug_handler(regs))
438ed3768dbSMark Rutland 		do_debug_exception(far, esr, regs);
4392a9b3e6aSMark Rutland 	arm64_exit_el1_dbg(regs);
440ed3768dbSMark Rutland }
441ed3768dbSMark Rutland 
el1_fpac(struct pt_regs * regs,unsigned long esr)442da192676SMark Rutland static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
443e16aeb07SAmit Daniel Kachhap {
4447cd1ea10SMark Rutland 	enter_from_kernel_mode(regs);
445e16aeb07SAmit Daniel Kachhap 	local_daif_inherit(regs);
446a1fafa3bSMark Rutland 	do_el1_fpac(regs, esr);
4477cd1ea10SMark Rutland 	local_daif_mask();
4487cd1ea10SMark Rutland 	exit_to_kernel_mode(regs);
449e16aeb07SAmit Daniel Kachhap }
450e16aeb07SAmit Daniel Kachhap 
el1h_64_sync_handler(struct pt_regs * regs)451ec841aabSMark Rutland asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
452ed3768dbSMark Rutland {
453ed3768dbSMark Rutland 	unsigned long esr = read_sysreg(esr_el1);
454ed3768dbSMark Rutland 
455ed3768dbSMark Rutland 	switch (ESR_ELx_EC(esr)) {
456ed3768dbSMark Rutland 	case ESR_ELx_EC_DABT_CUR:
457ed3768dbSMark Rutland 	case ESR_ELx_EC_IABT_CUR:
458ed3768dbSMark Rutland 		el1_abort(regs, esr);
459ed3768dbSMark Rutland 		break;
460ed3768dbSMark Rutland 	/*
461ed3768dbSMark Rutland 	 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
462ed3768dbSMark Rutland 	 * recursive exception when trying to push the initial pt_regs.
463ed3768dbSMark Rutland 	 */
464ed3768dbSMark Rutland 	case ESR_ELx_EC_PC_ALIGN:
465ed3768dbSMark Rutland 		el1_pc(regs, esr);
466ed3768dbSMark Rutland 		break;
467ed3768dbSMark Rutland 	case ESR_ELx_EC_SYS64:
468ed3768dbSMark Rutland 	case ESR_ELx_EC_UNKNOWN:
4690f2cb928SMark Rutland 		el1_undef(regs, esr);
470ed3768dbSMark Rutland 		break;
471830a2a4dSMark Rutland 	case ESR_ELx_EC_BTI:
472830a2a4dSMark Rutland 		el1_bti(regs, esr);
473830a2a4dSMark Rutland 		break;
474ed3768dbSMark Rutland 	case ESR_ELx_EC_BREAKPT_CUR:
475ed3768dbSMark Rutland 	case ESR_ELx_EC_SOFTSTP_CUR:
476ed3768dbSMark Rutland 	case ESR_ELx_EC_WATCHPT_CUR:
477ed3768dbSMark Rutland 	case ESR_ELx_EC_BRK64:
478ed3768dbSMark Rutland 		el1_dbg(regs, esr);
479ed3768dbSMark Rutland 		break;
480e16aeb07SAmit Daniel Kachhap 	case ESR_ELx_EC_FPAC:
481e16aeb07SAmit Daniel Kachhap 		el1_fpac(regs, esr);
482e16aeb07SAmit Daniel Kachhap 		break;
483ed3768dbSMark Rutland 	default:
484afd05e28SMark Rutland 		__panic_unhandled(regs, "64-bit el1h sync", esr);
4850dd2334fSJason Yan 	}
486ed3768dbSMark Rutland }
487582f9583SMark Rutland 
__el1_pnmi(struct pt_regs * regs,void (* handler)(struct pt_regs *))48826dc1293SMark Rutland static __always_inline void __el1_pnmi(struct pt_regs *regs,
489064dbfb4SMark Rutland 				       void (*handler)(struct pt_regs *))
490064dbfb4SMark Rutland {
49126dc1293SMark Rutland 	arm64_enter_nmi(regs);
492064dbfb4SMark Rutland 	do_interrupt_handler(regs, handler);
49326dc1293SMark Rutland 	arm64_exit_nmi(regs);
49426dc1293SMark Rutland }
49526dc1293SMark Rutland 
__el1_irq(struct pt_regs * regs,void (* handler)(struct pt_regs *))49626dc1293SMark Rutland static __always_inline void __el1_irq(struct pt_regs *regs,
49726dc1293SMark Rutland 				      void (*handler)(struct pt_regs *))
49826dc1293SMark Rutland {
49926dc1293SMark Rutland 	enter_from_kernel_mode(regs);
50026dc1293SMark Rutland 
50126dc1293SMark Rutland 	irq_enter_rcu();
50226dc1293SMark Rutland 	do_interrupt_handler(regs, handler);
50326dc1293SMark Rutland 	irq_exit_rcu();
504064dbfb4SMark Rutland 
505064dbfb4SMark Rutland 	arm64_preempt_schedule_irq();
506064dbfb4SMark Rutland 
50726dc1293SMark Rutland 	exit_to_kernel_mode(regs);
50826dc1293SMark Rutland }
el1_interrupt(struct pt_regs * regs,void (* handler)(struct pt_regs *))50926dc1293SMark Rutland static void noinstr el1_interrupt(struct pt_regs *regs,
51026dc1293SMark Rutland 				  void (*handler)(struct pt_regs *))
51126dc1293SMark Rutland {
51226dc1293SMark Rutland 	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
51326dc1293SMark Rutland 
51426dc1293SMark Rutland 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
51526dc1293SMark Rutland 		__el1_pnmi(regs, handler);
51626dc1293SMark Rutland 	else
51726dc1293SMark Rutland 		__el1_irq(regs, handler);
518064dbfb4SMark Rutland }
519064dbfb4SMark Rutland 
el1h_64_irq_handler(struct pt_regs * regs)520ec841aabSMark Rutland asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
521064dbfb4SMark Rutland {
522064dbfb4SMark Rutland 	el1_interrupt(regs, handle_arch_irq);
523064dbfb4SMark Rutland }
524064dbfb4SMark Rutland 
el1h_64_fiq_handler(struct pt_regs * regs)525ec841aabSMark Rutland asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
526064dbfb4SMark Rutland {
527064dbfb4SMark Rutland 	el1_interrupt(regs, handle_arch_fiq);
528064dbfb4SMark Rutland }
529064dbfb4SMark Rutland 
el1h_64_error_handler(struct pt_regs * regs)530ec841aabSMark Rutland asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
531bb8e93a2SMark Rutland {
532bb8e93a2SMark Rutland 	unsigned long esr = read_sysreg(esr_el1);
533bb8e93a2SMark Rutland 
534bb8e93a2SMark Rutland 	local_daif_restore(DAIF_ERRCTX);
535bb8e93a2SMark Rutland 	arm64_enter_nmi(regs);
536bb8e93a2SMark Rutland 	do_serror(regs, esr);
537bb8e93a2SMark Rutland 	arm64_exit_nmi(regs);
538bb8e93a2SMark Rutland }
539bb8e93a2SMark Rutland 
el0_da(struct pt_regs * regs,unsigned long esr)540da192676SMark Rutland static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
541582f9583SMark Rutland {
542582f9583SMark Rutland 	unsigned long far = read_sysreg(far_el1);
543582f9583SMark Rutland 
544e130338eSMark Rutland 	enter_from_user_mode(regs);
545582f9583SMark Rutland 	local_daif_restore(DAIF_PROCCTX);
546582f9583SMark Rutland 	do_mem_abort(far, esr, regs);
547e130338eSMark Rutland 	exit_to_user_mode(regs);
548582f9583SMark Rutland }
549582f9583SMark Rutland 
el0_ia(struct pt_regs * regs,unsigned long esr)550da192676SMark Rutland static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
551582f9583SMark Rutland {
552582f9583SMark Rutland 	unsigned long far = read_sysreg(far_el1);
553582f9583SMark Rutland 
554bfe29874SJames Morse 	/*
555bfe29874SJames Morse 	 * We've taken an instruction abort from userspace and not yet
556bfe29874SJames Morse 	 * re-enabled IRQs. If the address is a kernel address, apply
557bfe29874SJames Morse 	 * BP hardening prior to enabling IRQs and pre-emption.
558bfe29874SJames Morse 	 */
559bfe29874SJames Morse 	if (!is_ttbr0_addr(far))
560bfe29874SJames Morse 		arm64_apply_bp_hardening();
561bfe29874SJames Morse 
562e130338eSMark Rutland 	enter_from_user_mode(regs);
563bfe29874SJames Morse 	local_daif_restore(DAIF_PROCCTX);
564bfe29874SJames Morse 	do_mem_abort(far, esr, regs);
565e130338eSMark Rutland 	exit_to_user_mode(regs);
566582f9583SMark Rutland }
567582f9583SMark Rutland 
el0_fpsimd_acc(struct pt_regs * regs,unsigned long esr)568da192676SMark Rutland static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
569582f9583SMark Rutland {
570e130338eSMark Rutland 	enter_from_user_mode(regs);
571582f9583SMark Rutland 	local_daif_restore(DAIF_PROCCTX);
572582f9583SMark Rutland 	do_fpsimd_acc(esr, regs);
573e130338eSMark Rutland 	exit_to_user_mode(regs);
574582f9583SMark Rutland }
575582f9583SMark Rutland 
el0_sve_acc(struct pt_regs * regs,unsigned long esr)576da192676SMark Rutland static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
577582f9583SMark Rutland {
578e130338eSMark Rutland 	enter_from_user_mode(regs);
579582f9583SMark Rutland 	local_daif_restore(DAIF_PROCCTX);
580582f9583SMark Rutland 	do_sve_acc(esr, regs);
581e130338eSMark Rutland 	exit_to_user_mode(regs);
582582f9583SMark Rutland }
583582f9583SMark Rutland 
el0_sme_acc(struct pt_regs * regs,unsigned long esr)5848bd7f91cSMark Brown static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr)
5858bd7f91cSMark Brown {
5868bd7f91cSMark Brown 	enter_from_user_mode(regs);
5878bd7f91cSMark Brown 	local_daif_restore(DAIF_PROCCTX);
5888bd7f91cSMark Brown 	do_sme_acc(esr, regs);
5898bd7f91cSMark Brown 	exit_to_user_mode(regs);
5908bd7f91cSMark Brown }
5918bd7f91cSMark Brown 
el0_fpsimd_exc(struct pt_regs * regs,unsigned long esr)592da192676SMark Rutland static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
593582f9583SMark Rutland {
594e130338eSMark Rutland 	enter_from_user_mode(regs);
595582f9583SMark Rutland 	local_daif_restore(DAIF_PROCCTX);
596582f9583SMark Rutland 	do_fpsimd_exc(esr, regs);
597e130338eSMark Rutland 	exit_to_user_mode(regs);
598582f9583SMark Rutland }
599582f9583SMark Rutland 
el0_sys(struct pt_regs * regs,unsigned long esr)600da192676SMark Rutland static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
601582f9583SMark Rutland {
602e130338eSMark Rutland 	enter_from_user_mode(regs);
603582f9583SMark Rutland 	local_daif_restore(DAIF_PROCCTX);
604b3a0c010SMark Rutland 	do_el0_sys(esr, regs);
605e130338eSMark Rutland 	exit_to_user_mode(regs);
606582f9583SMark Rutland }
607582f9583SMark Rutland 
el0_pc(struct pt_regs * regs,unsigned long esr)608da192676SMark Rutland static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
609582f9583SMark Rutland {
610582f9583SMark Rutland 	unsigned long far = read_sysreg(far_el1);
611582f9583SMark Rutland 
612bfe29874SJames Morse 	if (!is_ttbr0_addr(instruction_pointer(regs)))
613bfe29874SJames Morse 		arm64_apply_bp_hardening();
614bfe29874SJames Morse 
615e130338eSMark Rutland 	enter_from_user_mode(regs);
616bfe29874SJames Morse 	local_daif_restore(DAIF_PROCCTX);
617582f9583SMark Rutland 	do_sp_pc_abort(far, esr, regs);
618e130338eSMark Rutland 	exit_to_user_mode(regs);
619582f9583SMark Rutland }
620582f9583SMark Rutland 
el0_sp(struct pt_regs * regs,unsigned long esr)621da192676SMark Rutland static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
622582f9583SMark Rutland {
623e130338eSMark Rutland 	enter_from_user_mode(regs);
624f0c0d4b7SMark Rutland 	local_daif_restore(DAIF_PROCCTX);
625582f9583SMark Rutland 	do_sp_pc_abort(regs->sp, esr, regs);
626e130338eSMark Rutland 	exit_to_user_mode(regs);
627582f9583SMark Rutland }
628582f9583SMark Rutland 
el0_undef(struct pt_regs * regs,unsigned long esr)6290f2cb928SMark Rutland static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
630582f9583SMark Rutland {
631e130338eSMark Rutland 	enter_from_user_mode(regs);
632582f9583SMark Rutland 	local_daif_restore(DAIF_PROCCTX);
63361d64a37SMark Rutland 	do_el0_undef(regs, esr);
634e130338eSMark Rutland 	exit_to_user_mode(regs);
635582f9583SMark Rutland }
636582f9583SMark Rutland 
el0_bti(struct pt_regs * regs)637da192676SMark Rutland static void noinstr el0_bti(struct pt_regs *regs)
6388ef8f360SDave Martin {
639e130338eSMark Rutland 	enter_from_user_mode(regs);
6408ef8f360SDave Martin 	local_daif_restore(DAIF_PROCCTX);
641830a2a4dSMark Rutland 	do_el0_bti(regs);
642e130338eSMark Rutland 	exit_to_user_mode(regs);
6438ef8f360SDave Martin }
6448ef8f360SDave Martin 
el0_mops(struct pt_regs * regs,unsigned long esr)6458536ceaaSKristina Martsenko static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr)
6468536ceaaSKristina Martsenko {
6478536ceaaSKristina Martsenko 	enter_from_user_mode(regs);
6488536ceaaSKristina Martsenko 	local_daif_restore(DAIF_PROCCTX);
6498536ceaaSKristina Martsenko 	do_el0_mops(regs, esr);
6508536ceaaSKristina Martsenko 	exit_to_user_mode(regs);
6518536ceaaSKristina Martsenko }
6528536ceaaSKristina Martsenko 
el0_inv(struct pt_regs * regs,unsigned long esr)653da192676SMark Rutland static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
654582f9583SMark Rutland {
655e130338eSMark Rutland 	enter_from_user_mode(regs);
656582f9583SMark Rutland 	local_daif_restore(DAIF_PROCCTX);
657582f9583SMark Rutland 	bad_el0_sync(regs, 0, esr);
658e130338eSMark Rutland 	exit_to_user_mode(regs);
659582f9583SMark Rutland }
660582f9583SMark Rutland 
el0_dbg(struct pt_regs * regs,unsigned long esr)661da192676SMark Rutland static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
662582f9583SMark Rutland {
663582f9583SMark Rutland 	/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
664582f9583SMark Rutland 	unsigned long far = read_sysreg(far_el1);
665582f9583SMark Rutland 
666e130338eSMark Rutland 	enter_from_user_mode(regs);
667582f9583SMark Rutland 	do_debug_exception(far, esr, regs);
668f7c706f0SMark Rutland 	local_daif_restore(DAIF_PROCCTX);
669e130338eSMark Rutland 	exit_to_user_mode(regs);
670582f9583SMark Rutland }
671582f9583SMark Rutland 
el0_svc(struct pt_regs * regs)672da192676SMark Rutland static void noinstr el0_svc(struct pt_regs *regs)
673582f9583SMark Rutland {
674e130338eSMark Rutland 	enter_from_user_mode(regs);
6756459b846SMark Rutland 	cortex_a76_erratum_1463225_svc_handler();
676*f130ac0aSMark Rutland 	fp_user_discard();
677*f130ac0aSMark Rutland 	local_daif_restore(DAIF_PROCCTX);
6787a2c0944SMark Rutland 	do_el0_svc(regs);
679e130338eSMark Rutland 	exit_to_user_mode(regs);
680582f9583SMark Rutland }
681582f9583SMark Rutland 
el0_fpac(struct pt_regs * regs,unsigned long esr)682da192676SMark Rutland static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
683e16aeb07SAmit Daniel Kachhap {
684e130338eSMark Rutland 	enter_from_user_mode(regs);
685e16aeb07SAmit Daniel Kachhap 	local_daif_restore(DAIF_PROCCTX);
686a1fafa3bSMark Rutland 	do_el0_fpac(regs, esr);
687e130338eSMark Rutland 	exit_to_user_mode(regs);
688e16aeb07SAmit Daniel Kachhap }
689e16aeb07SAmit Daniel Kachhap 
el0t_64_sync_handler(struct pt_regs * regs)690ec841aabSMark Rutland asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
691582f9583SMark Rutland {
692582f9583SMark Rutland 	unsigned long esr = read_sysreg(esr_el1);
693582f9583SMark Rutland 
694582f9583SMark Rutland 	switch (ESR_ELx_EC(esr)) {
695582f9583SMark Rutland 	case ESR_ELx_EC_SVC64:
696582f9583SMark Rutland 		el0_svc(regs);
697582f9583SMark Rutland 		break;
698582f9583SMark Rutland 	case ESR_ELx_EC_DABT_LOW:
699582f9583SMark Rutland 		el0_da(regs, esr);
700582f9583SMark Rutland 		break;
701582f9583SMark Rutland 	case ESR_ELx_EC_IABT_LOW:
702582f9583SMark Rutland 		el0_ia(regs, esr);
703582f9583SMark Rutland 		break;
704582f9583SMark Rutland 	case ESR_ELx_EC_FP_ASIMD:
705582f9583SMark Rutland 		el0_fpsimd_acc(regs, esr);
706582f9583SMark Rutland 		break;
707582f9583SMark Rutland 	case ESR_ELx_EC_SVE:
708582f9583SMark Rutland 		el0_sve_acc(regs, esr);
709582f9583SMark Rutland 		break;
7108bd7f91cSMark Brown 	case ESR_ELx_EC_SME:
7118bd7f91cSMark Brown 		el0_sme_acc(regs, esr);
7128bd7f91cSMark Brown 		break;
713582f9583SMark Rutland 	case ESR_ELx_EC_FP_EXC64:
714582f9583SMark Rutland 		el0_fpsimd_exc(regs, esr);
715582f9583SMark Rutland 		break;
716582f9583SMark Rutland 	case ESR_ELx_EC_SYS64:
717582f9583SMark Rutland 	case ESR_ELx_EC_WFx:
718582f9583SMark Rutland 		el0_sys(regs, esr);
719582f9583SMark Rutland 		break;
720582f9583SMark Rutland 	case ESR_ELx_EC_SP_ALIGN:
721582f9583SMark Rutland 		el0_sp(regs, esr);
722582f9583SMark Rutland 		break;
723582f9583SMark Rutland 	case ESR_ELx_EC_PC_ALIGN:
724582f9583SMark Rutland 		el0_pc(regs, esr);
725582f9583SMark Rutland 		break;
726582f9583SMark Rutland 	case ESR_ELx_EC_UNKNOWN:
7270f2cb928SMark Rutland 		el0_undef(regs, esr);
728582f9583SMark Rutland 		break;
7298ef8f360SDave Martin 	case ESR_ELx_EC_BTI:
7308ef8f360SDave Martin 		el0_bti(regs);
7318ef8f360SDave Martin 		break;
7328536ceaaSKristina Martsenko 	case ESR_ELx_EC_MOPS:
7338536ceaaSKristina Martsenko 		el0_mops(regs, esr);
7348536ceaaSKristina Martsenko 		break;
735582f9583SMark Rutland 	case ESR_ELx_EC_BREAKPT_LOW:
736582f9583SMark Rutland 	case ESR_ELx_EC_SOFTSTP_LOW:
737582f9583SMark Rutland 	case ESR_ELx_EC_WATCHPT_LOW:
738582f9583SMark Rutland 	case ESR_ELx_EC_BRK64:
739582f9583SMark Rutland 		el0_dbg(regs, esr);
740582f9583SMark Rutland 		break;
741e16aeb07SAmit Daniel Kachhap 	case ESR_ELx_EC_FPAC:
742e16aeb07SAmit Daniel Kachhap 		el0_fpac(regs, esr);
743e16aeb07SAmit Daniel Kachhap 		break;
744582f9583SMark Rutland 	default:
745582f9583SMark Rutland 		el0_inv(regs, esr);
746582f9583SMark Rutland 	}
747582f9583SMark Rutland }
748582f9583SMark Rutland 
el0_interrupt(struct pt_regs * regs,void (* handler)(struct pt_regs *))749064dbfb4SMark Rutland static void noinstr el0_interrupt(struct pt_regs *regs,
750064dbfb4SMark Rutland 				  void (*handler)(struct pt_regs *))
751064dbfb4SMark Rutland {
752e130338eSMark Rutland 	enter_from_user_mode(regs);
753064dbfb4SMark Rutland 
754064dbfb4SMark Rutland 	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
755064dbfb4SMark Rutland 
756064dbfb4SMark Rutland 	if (regs->pc & BIT(55))
757064dbfb4SMark Rutland 		arm64_apply_bp_hardening();
758064dbfb4SMark Rutland 
75926dc1293SMark Rutland 	irq_enter_rcu();
760064dbfb4SMark Rutland 	do_interrupt_handler(regs, handler);
76126dc1293SMark Rutland 	irq_exit_rcu();
762e130338eSMark Rutland 
763e130338eSMark Rutland 	exit_to_user_mode(regs);
764064dbfb4SMark Rutland }
765064dbfb4SMark Rutland 
__el0_irq_handler_common(struct pt_regs * regs)766064dbfb4SMark Rutland static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
767064dbfb4SMark Rutland {
768064dbfb4SMark Rutland 	el0_interrupt(regs, handle_arch_irq);
769064dbfb4SMark Rutland }
770064dbfb4SMark Rutland 
el0t_64_irq_handler(struct pt_regs * regs)771ec841aabSMark Rutland asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
772064dbfb4SMark Rutland {
773064dbfb4SMark Rutland 	__el0_irq_handler_common(regs);
774064dbfb4SMark Rutland }
775064dbfb4SMark Rutland 
__el0_fiq_handler_common(struct pt_regs * regs)776064dbfb4SMark Rutland static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
777064dbfb4SMark Rutland {
778064dbfb4SMark Rutland 	el0_interrupt(regs, handle_arch_fiq);
779064dbfb4SMark Rutland }
780064dbfb4SMark Rutland 
el0t_64_fiq_handler(struct pt_regs * regs)781ec841aabSMark Rutland asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
782064dbfb4SMark Rutland {
783064dbfb4SMark Rutland 	__el0_fiq_handler_common(regs);
784064dbfb4SMark Rutland }
785064dbfb4SMark Rutland 
__el0_error_handler_common(struct pt_regs * regs)78631a7f0f6SMark Rutland static void noinstr __el0_error_handler_common(struct pt_regs *regs)
787bb8e93a2SMark Rutland {
788bb8e93a2SMark Rutland 	unsigned long esr = read_sysreg(esr_el1);
789bb8e93a2SMark Rutland 
790e130338eSMark Rutland 	enter_from_user_mode(regs);
791bb8e93a2SMark Rutland 	local_daif_restore(DAIF_ERRCTX);
792bb8e93a2SMark Rutland 	arm64_enter_nmi(regs);
793bb8e93a2SMark Rutland 	do_serror(regs, esr);
794bb8e93a2SMark Rutland 	arm64_exit_nmi(regs);
795bb8e93a2SMark Rutland 	local_daif_restore(DAIF_PROCCTX);
796e130338eSMark Rutland 	exit_to_user_mode(regs);
797bb8e93a2SMark Rutland }
798bb8e93a2SMark Rutland 
el0t_64_error_handler(struct pt_regs * regs)799ec841aabSMark Rutland asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
800bb8e93a2SMark Rutland {
801bb8e93a2SMark Rutland 	__el0_error_handler_common(regs);
802bb8e93a2SMark Rutland }
803bb8e93a2SMark Rutland 
804582f9583SMark Rutland #ifdef CONFIG_COMPAT
el0_cp15(struct pt_regs * regs,unsigned long esr)805da192676SMark Rutland static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
806582f9583SMark Rutland {
807e130338eSMark Rutland 	enter_from_user_mode(regs);
808582f9583SMark Rutland 	local_daif_restore(DAIF_PROCCTX);
809b3a0c010SMark Rutland 	do_el0_cp15(esr, regs);
810e130338eSMark Rutland 	exit_to_user_mode(regs);
811582f9583SMark Rutland }
812582f9583SMark Rutland 
el0_svc_compat(struct pt_regs * regs)813da192676SMark Rutland static void noinstr el0_svc_compat(struct pt_regs *regs)
814582f9583SMark Rutland {
815e130338eSMark Rutland 	enter_from_user_mode(regs);
8166459b846SMark Rutland 	cortex_a76_erratum_1463225_svc_handler();
817*f130ac0aSMark Rutland 	local_daif_restore(DAIF_PROCCTX);
8187a2c0944SMark Rutland 	do_el0_svc_compat(regs);
819e130338eSMark Rutland 	exit_to_user_mode(regs);
820582f9583SMark Rutland }
821582f9583SMark Rutland 
el0t_32_sync_handler(struct pt_regs * regs)822ec841aabSMark Rutland asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
823582f9583SMark Rutland {
824582f9583SMark Rutland 	unsigned long esr = read_sysreg(esr_el1);
825582f9583SMark Rutland 
826582f9583SMark Rutland 	switch (ESR_ELx_EC(esr)) {
827582f9583SMark Rutland 	case ESR_ELx_EC_SVC32:
828582f9583SMark Rutland 		el0_svc_compat(regs);
829582f9583SMark Rutland 		break;
830582f9583SMark Rutland 	case ESR_ELx_EC_DABT_LOW:
831582f9583SMark Rutland 		el0_da(regs, esr);
832582f9583SMark Rutland 		break;
833582f9583SMark Rutland 	case ESR_ELx_EC_IABT_LOW:
834582f9583SMark Rutland 		el0_ia(regs, esr);
835582f9583SMark Rutland 		break;
836582f9583SMark Rutland 	case ESR_ELx_EC_FP_ASIMD:
837582f9583SMark Rutland 		el0_fpsimd_acc(regs, esr);
838582f9583SMark Rutland 		break;
839582f9583SMark Rutland 	case ESR_ELx_EC_FP_EXC32:
840582f9583SMark Rutland 		el0_fpsimd_exc(regs, esr);
841582f9583SMark Rutland 		break;
842582f9583SMark Rutland 	case ESR_ELx_EC_PC_ALIGN:
843582f9583SMark Rutland 		el0_pc(regs, esr);
844582f9583SMark Rutland 		break;
845582f9583SMark Rutland 	case ESR_ELx_EC_UNKNOWN:
846582f9583SMark Rutland 	case ESR_ELx_EC_CP14_MR:
847582f9583SMark Rutland 	case ESR_ELx_EC_CP14_LS:
848582f9583SMark Rutland 	case ESR_ELx_EC_CP14_64:
8490f2cb928SMark Rutland 		el0_undef(regs, esr);
850582f9583SMark Rutland 		break;
851582f9583SMark Rutland 	case ESR_ELx_EC_CP15_32:
852582f9583SMark Rutland 	case ESR_ELx_EC_CP15_64:
853582f9583SMark Rutland 		el0_cp15(regs, esr);
854582f9583SMark Rutland 		break;
855582f9583SMark Rutland 	case ESR_ELx_EC_BREAKPT_LOW:
856582f9583SMark Rutland 	case ESR_ELx_EC_SOFTSTP_LOW:
857582f9583SMark Rutland 	case ESR_ELx_EC_WATCHPT_LOW:
858582f9583SMark Rutland 	case ESR_ELx_EC_BKPT32:
859582f9583SMark Rutland 		el0_dbg(regs, esr);
860582f9583SMark Rutland 		break;
861582f9583SMark Rutland 	default:
862582f9583SMark Rutland 		el0_inv(regs, esr);
863582f9583SMark Rutland 	}
864582f9583SMark Rutland }
865bb8e93a2SMark Rutland 
el0t_32_irq_handler(struct pt_regs * regs)866ec841aabSMark Rutland asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
867064dbfb4SMark Rutland {
868064dbfb4SMark Rutland 	__el0_irq_handler_common(regs);
869064dbfb4SMark Rutland }
870064dbfb4SMark Rutland 
el0t_32_fiq_handler(struct pt_regs * regs)871ec841aabSMark Rutland asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
872064dbfb4SMark Rutland {
873064dbfb4SMark Rutland 	__el0_fiq_handler_common(regs);
874064dbfb4SMark Rutland }
875064dbfb4SMark Rutland 
el0t_32_error_handler(struct pt_regs * regs)876ec841aabSMark Rutland asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
877bb8e93a2SMark Rutland {
878bb8e93a2SMark Rutland 	__el0_error_handler_common(regs);
879bb8e93a2SMark Rutland }
880ec841aabSMark Rutland #else /* CONFIG_COMPAT */
881ec841aabSMark Rutland UNHANDLED(el0t, 32, sync)
882ec841aabSMark Rutland UNHANDLED(el0t, 32, irq)
883ec841aabSMark Rutland UNHANDLED(el0t, 32, fiq)
884ec841aabSMark Rutland UNHANDLED(el0t, 32, error)
885582f9583SMark Rutland #endif /* CONFIG_COMPAT */
8868168f098SMark Rutland 
8878168f098SMark Rutland #ifdef CONFIG_VMAP_STACK
handle_bad_stack(struct pt_regs * regs)8885ab6876cSJosh Poimboeuf asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
8898168f098SMark Rutland {
8908d56e5c5SAlexandru Elisei 	unsigned long esr = read_sysreg(esr_el1);
8918168f098SMark Rutland 	unsigned long far = read_sysreg(far_el1);
8928168f098SMark Rutland 
8938168f098SMark Rutland 	arm64_enter_nmi(regs);
8948168f098SMark Rutland 	panic_bad_stack(regs, esr, far);
8958168f098SMark Rutland }
8968168f098SMark Rutland #endif /* CONFIG_VMAP_STACK */
897d60b228fSMark Rutland 
898d60b228fSMark Rutland #ifdef CONFIG_ARM_SDE_INTERFACE
899d60b228fSMark Rutland asmlinkage noinstr unsigned long
__sdei_handler(struct pt_regs * regs,struct sdei_registered_event * arg)900d60b228fSMark Rutland __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
901d60b228fSMark Rutland {
902d60b228fSMark Rutland 	unsigned long ret;
903d60b228fSMark Rutland 
904d60b228fSMark Rutland 	/*
905d60b228fSMark Rutland 	 * We didn't take an exception to get here, so the HW hasn't
906d60b228fSMark Rutland 	 * set/cleared bits in PSTATE that we may rely on.
907d60b228fSMark Rutland 	 *
908d60b228fSMark Rutland 	 * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
909d60b228fSMark Rutland 	 * whether PSTATE bits are inherited unchanged or generated from
910d60b228fSMark Rutland 	 * scratch, and the TF-A implementation always clears PAN and always
911d60b228fSMark Rutland 	 * clears UAO. There are no other known implementations.
912d60b228fSMark Rutland 	 *
913d60b228fSMark Rutland 	 * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
914d60b228fSMark Rutland 	 * PSTATE is modified upon architectural exceptions, and so PAN is
915d60b228fSMark Rutland 	 * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
916d60b228fSMark Rutland 	 * cleared.
917d60b228fSMark Rutland 	 *
918d60b228fSMark Rutland 	 * We must explicitly reset PAN to the expected state, including
919d60b228fSMark Rutland 	 * clearing it when the host isn't using it, in case a VM had it set.
920d60b228fSMark Rutland 	 */
921d60b228fSMark Rutland 	if (system_uses_hw_pan())
922d60b228fSMark Rutland 		set_pstate_pan(1);
923d60b228fSMark Rutland 	else if (cpu_has_pan())
924d60b228fSMark Rutland 		set_pstate_pan(0);
925d60b228fSMark Rutland 
926d60b228fSMark Rutland 	arm64_enter_nmi(regs);
927d60b228fSMark Rutland 	ret = do_sdei_event(regs, arg);
928d60b228fSMark Rutland 	arm64_exit_nmi(regs);
929d60b228fSMark Rutland 
930d60b228fSMark Rutland 	return ret;
931d60b228fSMark Rutland }
932d60b228fSMark Rutland #endif /* CONFIG_ARM_SDE_INTERFACE */
933