xref: /openbmc/linux/arch/arm64/kernel/entry-common.c (revision 405db98b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Exception handling code
4  *
5  * Copyright (C) 2019 ARM Ltd.
6  */
7 
8 #include <linux/context_tracking.h>
9 #include <linux/linkage.h>
10 #include <linux/lockdep.h>
11 #include <linux/ptrace.h>
12 #include <linux/sched.h>
13 #include <linux/sched/debug.h>
14 #include <linux/thread_info.h>
15 
16 #include <asm/cpufeature.h>
17 #include <asm/daifflags.h>
18 #include <asm/esr.h>
19 #include <asm/exception.h>
20 #include <asm/irq_regs.h>
21 #include <asm/kprobes.h>
22 #include <asm/mmu.h>
23 #include <asm/processor.h>
24 #include <asm/sdei.h>
25 #include <asm/stacktrace.h>
26 #include <asm/sysreg.h>
27 #include <asm/system_misc.h>
28 
29 /*
30  * Handle IRQ/context state management when entering from kernel mode.
31  * Before this function is called it is not safe to call regular kernel code,
32  * intrumentable code, or any code which may trigger an exception.
33  *
34  * This is intended to match the logic in irqentry_enter(), handling the kernel
35  * mode transitions only.
36  */
37 static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs)
38 {
39 	regs->exit_rcu = false;
40 
41 	if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
42 		lockdep_hardirqs_off(CALLER_ADDR0);
43 		rcu_irq_enter();
44 		trace_hardirqs_off_finish();
45 
46 		regs->exit_rcu = true;
47 		return;
48 	}
49 
50 	lockdep_hardirqs_off(CALLER_ADDR0);
51 	rcu_irq_enter_check_tick();
52 	trace_hardirqs_off_finish();
53 }
54 
55 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
56 {
57 	__enter_from_kernel_mode(regs);
58 	mte_check_tfsr_entry();
59 }
60 
61 /*
62  * Handle IRQ/context state management when exiting to kernel mode.
63  * After this function returns it is not safe to call regular kernel code,
64  * intrumentable code, or any code which may trigger an exception.
65  *
66  * This is intended to match the logic in irqentry_exit(), handling the kernel
67  * mode transitions only, and with preemption handled elsewhere.
68  */
69 static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
70 {
71 	lockdep_assert_irqs_disabled();
72 
73 	if (interrupts_enabled(regs)) {
74 		if (regs->exit_rcu) {
75 			trace_hardirqs_on_prepare();
76 			lockdep_hardirqs_on_prepare(CALLER_ADDR0);
77 			rcu_irq_exit();
78 			lockdep_hardirqs_on(CALLER_ADDR0);
79 			return;
80 		}
81 
82 		trace_hardirqs_on();
83 	} else {
84 		if (regs->exit_rcu)
85 			rcu_irq_exit();
86 	}
87 }
88 
89 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
90 {
91 	mte_check_tfsr_exit();
92 	__exit_to_kernel_mode(regs);
93 }
94 
95 /*
96  * Handle IRQ/context state management when entering from user mode.
97  * Before this function is called it is not safe to call regular kernel code,
98  * intrumentable code, or any code which may trigger an exception.
99  */
100 static __always_inline void __enter_from_user_mode(void)
101 {
102 	lockdep_hardirqs_off(CALLER_ADDR0);
103 	CT_WARN_ON(ct_state() != CONTEXT_USER);
104 	user_exit_irqoff();
105 	trace_hardirqs_off_finish();
106 }
107 
108 static __always_inline void enter_from_user_mode(struct pt_regs *regs)
109 {
110 	__enter_from_user_mode();
111 }
112 
113 /*
114  * Handle IRQ/context state management when exiting to user mode.
115  * After this function returns it is not safe to call regular kernel code,
116  * intrumentable code, or any code which may trigger an exception.
117  */
118 static __always_inline void __exit_to_user_mode(void)
119 {
120 	trace_hardirqs_on_prepare();
121 	lockdep_hardirqs_on_prepare(CALLER_ADDR0);
122 	user_enter_irqoff();
123 	lockdep_hardirqs_on(CALLER_ADDR0);
124 }
125 
126 static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs)
127 {
128 	unsigned long flags;
129 
130 	local_daif_mask();
131 
132 	flags = READ_ONCE(current_thread_info()->flags);
133 	if (unlikely(flags & _TIF_WORK_MASK))
134 		do_notify_resume(regs, flags);
135 }
136 
137 static __always_inline void exit_to_user_mode(struct pt_regs *regs)
138 {
139 	prepare_exit_to_user_mode(regs);
140 	mte_check_tfsr_exit();
141 	__exit_to_user_mode();
142 }
143 
144 asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
145 {
146 	exit_to_user_mode(regs);
147 }
148 
149 /*
150  * Handle IRQ/context state management when entering an NMI from user/kernel
151  * mode. Before this function is called it is not safe to call regular kernel
152  * code, intrumentable code, or any code which may trigger an exception.
153  */
154 static void noinstr arm64_enter_nmi(struct pt_regs *regs)
155 {
156 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
157 
158 	__nmi_enter();
159 	lockdep_hardirqs_off(CALLER_ADDR0);
160 	lockdep_hardirq_enter();
161 	rcu_nmi_enter();
162 
163 	trace_hardirqs_off_finish();
164 	ftrace_nmi_enter();
165 }
166 
167 /*
168  * Handle IRQ/context state management when exiting an NMI from user/kernel
169  * mode. After this function returns it is not safe to call regular kernel
170  * code, intrumentable code, or any code which may trigger an exception.
171  */
172 static void noinstr arm64_exit_nmi(struct pt_regs *regs)
173 {
174 	bool restore = regs->lockdep_hardirqs;
175 
176 	ftrace_nmi_exit();
177 	if (restore) {
178 		trace_hardirqs_on_prepare();
179 		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
180 	}
181 
182 	rcu_nmi_exit();
183 	lockdep_hardirq_exit();
184 	if (restore)
185 		lockdep_hardirqs_on(CALLER_ADDR0);
186 	__nmi_exit();
187 }
188 
189 /*
190  * Handle IRQ/context state management when entering a debug exception from
191  * kernel mode. Before this function is called it is not safe to call regular
192  * kernel code, intrumentable code, or any code which may trigger an exception.
193  */
194 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
195 {
196 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
197 
198 	lockdep_hardirqs_off(CALLER_ADDR0);
199 	rcu_nmi_enter();
200 
201 	trace_hardirqs_off_finish();
202 }
203 
204 /*
205  * Handle IRQ/context state management when exiting a debug exception from
206  * kernel mode. After this function returns it is not safe to call regular
207  * kernel code, intrumentable code, or any code which may trigger an exception.
208  */
209 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
210 {
211 	bool restore = regs->lockdep_hardirqs;
212 
213 	if (restore) {
214 		trace_hardirqs_on_prepare();
215 		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
216 	}
217 
218 	rcu_nmi_exit();
219 	if (restore)
220 		lockdep_hardirqs_on(CALLER_ADDR0);
221 }
222 
223 static void __sched arm64_preempt_schedule_irq(void)
224 {
225 	lockdep_assert_irqs_disabled();
226 
227 	/*
228 	 * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
229 	 * priority masking is used the GIC irqchip driver will clear DAIF.IF
230 	 * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
231 	 * DAIF we must have handled an NMI, so skip preemption.
232 	 */
233 	if (system_uses_irq_prio_masking() && read_sysreg(daif))
234 		return;
235 
236 	/*
237 	 * Preempting a task from an IRQ means we leave copies of PSTATE
238 	 * on the stack. cpufeature's enable calls may modify PSTATE, but
239 	 * resuming one of these preempted tasks would undo those changes.
240 	 *
241 	 * Only allow a task to be preempted once cpufeatures have been
242 	 * enabled.
243 	 */
244 	if (system_capabilities_finalized())
245 		preempt_schedule_irq();
246 }
247 
248 static void do_interrupt_handler(struct pt_regs *regs,
249 				 void (*handler)(struct pt_regs *))
250 {
251 	struct pt_regs *old_regs = set_irq_regs(regs);
252 
253 	if (on_thread_stack())
254 		call_on_irq_stack(regs, handler);
255 	else
256 		handler(regs);
257 
258 	set_irq_regs(old_regs);
259 }
260 
261 extern void (*handle_arch_irq)(struct pt_regs *);
262 extern void (*handle_arch_fiq)(struct pt_regs *);
263 
264 static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
265 				      unsigned int esr)
266 {
267 	arm64_enter_nmi(regs);
268 
269 	console_verbose();
270 
271 	pr_crit("Unhandled %s exception on CPU%d, ESR 0x%08x -- %s\n",
272 		vector, smp_processor_id(), esr,
273 		esr_get_class_string(esr));
274 
275 	__show_regs(regs);
276 	panic("Unhandled exception");
277 }
278 
279 #define UNHANDLED(el, regsize, vector)							\
280 asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs)	\
281 {											\
282 	const char *desc = #regsize "-bit " #el " " #vector;				\
283 	__panic_unhandled(regs, desc, read_sysreg(esr_el1));				\
284 }
285 
286 #ifdef CONFIG_ARM64_ERRATUM_1463225
287 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
288 
289 static void cortex_a76_erratum_1463225_svc_handler(void)
290 {
291 	u32 reg, val;
292 
293 	if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
294 		return;
295 
296 	if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
297 		return;
298 
299 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
300 	reg = read_sysreg(mdscr_el1);
301 	val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
302 	write_sysreg(val, mdscr_el1);
303 	asm volatile("msr daifclr, #8");
304 	isb();
305 
306 	/* We will have taken a single-step exception by this point */
307 
308 	write_sysreg(reg, mdscr_el1);
309 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
310 }
311 
312 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
313 {
314 	if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
315 		return false;
316 
317 	/*
318 	 * We've taken a dummy step exception from the kernel to ensure
319 	 * that interrupts are re-enabled on the syscall path. Return back
320 	 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
321 	 * masked so that we can safely restore the mdscr and get on with
322 	 * handling the syscall.
323 	 */
324 	regs->pstate |= PSR_D_BIT;
325 	return true;
326 }
327 #else /* CONFIG_ARM64_ERRATUM_1463225 */
328 static void cortex_a76_erratum_1463225_svc_handler(void) { }
329 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
330 {
331 	return false;
332 }
333 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
334 
335 UNHANDLED(el1t, 64, sync)
336 UNHANDLED(el1t, 64, irq)
337 UNHANDLED(el1t, 64, fiq)
338 UNHANDLED(el1t, 64, error)
339 
340 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
341 {
342 	unsigned long far = read_sysreg(far_el1);
343 
344 	enter_from_kernel_mode(regs);
345 	local_daif_inherit(regs);
346 	do_mem_abort(far, esr, regs);
347 	local_daif_mask();
348 	exit_to_kernel_mode(regs);
349 }
350 
351 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
352 {
353 	unsigned long far = read_sysreg(far_el1);
354 
355 	enter_from_kernel_mode(regs);
356 	local_daif_inherit(regs);
357 	do_sp_pc_abort(far, esr, regs);
358 	local_daif_mask();
359 	exit_to_kernel_mode(regs);
360 }
361 
362 static void noinstr el1_undef(struct pt_regs *regs)
363 {
364 	enter_from_kernel_mode(regs);
365 	local_daif_inherit(regs);
366 	do_undefinstr(regs);
367 	local_daif_mask();
368 	exit_to_kernel_mode(regs);
369 }
370 
371 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
372 {
373 	unsigned long far = read_sysreg(far_el1);
374 
375 	arm64_enter_el1_dbg(regs);
376 	if (!cortex_a76_erratum_1463225_debug_handler(regs))
377 		do_debug_exception(far, esr, regs);
378 	arm64_exit_el1_dbg(regs);
379 }
380 
381 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
382 {
383 	enter_from_kernel_mode(regs);
384 	local_daif_inherit(regs);
385 	do_ptrauth_fault(regs, esr);
386 	local_daif_mask();
387 	exit_to_kernel_mode(regs);
388 }
389 
390 asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
391 {
392 	unsigned long esr = read_sysreg(esr_el1);
393 
394 	switch (ESR_ELx_EC(esr)) {
395 	case ESR_ELx_EC_DABT_CUR:
396 	case ESR_ELx_EC_IABT_CUR:
397 		el1_abort(regs, esr);
398 		break;
399 	/*
400 	 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
401 	 * recursive exception when trying to push the initial pt_regs.
402 	 */
403 	case ESR_ELx_EC_PC_ALIGN:
404 		el1_pc(regs, esr);
405 		break;
406 	case ESR_ELx_EC_SYS64:
407 	case ESR_ELx_EC_UNKNOWN:
408 		el1_undef(regs);
409 		break;
410 	case ESR_ELx_EC_BREAKPT_CUR:
411 	case ESR_ELx_EC_SOFTSTP_CUR:
412 	case ESR_ELx_EC_WATCHPT_CUR:
413 	case ESR_ELx_EC_BRK64:
414 		el1_dbg(regs, esr);
415 		break;
416 	case ESR_ELx_EC_FPAC:
417 		el1_fpac(regs, esr);
418 		break;
419 	default:
420 		__panic_unhandled(regs, "64-bit el1h sync", esr);
421 	}
422 }
423 
424 static __always_inline void __el1_pnmi(struct pt_regs *regs,
425 				       void (*handler)(struct pt_regs *))
426 {
427 	arm64_enter_nmi(regs);
428 	do_interrupt_handler(regs, handler);
429 	arm64_exit_nmi(regs);
430 }
431 
432 static __always_inline void __el1_irq(struct pt_regs *regs,
433 				      void (*handler)(struct pt_regs *))
434 {
435 	enter_from_kernel_mode(regs);
436 
437 	irq_enter_rcu();
438 	do_interrupt_handler(regs, handler);
439 	irq_exit_rcu();
440 
441 	/*
442 	 * Note: thread_info::preempt_count includes both thread_info::count
443 	 * and thread_info::need_resched, and is not equivalent to
444 	 * preempt_count().
445 	 */
446 	if (IS_ENABLED(CONFIG_PREEMPTION) &&
447 	    READ_ONCE(current_thread_info()->preempt_count) == 0)
448 		arm64_preempt_schedule_irq();
449 
450 	exit_to_kernel_mode(regs);
451 }
452 static void noinstr el1_interrupt(struct pt_regs *regs,
453 				  void (*handler)(struct pt_regs *))
454 {
455 	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
456 
457 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
458 		__el1_pnmi(regs, handler);
459 	else
460 		__el1_irq(regs, handler);
461 }
462 
463 asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
464 {
465 	el1_interrupt(regs, handle_arch_irq);
466 }
467 
468 asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
469 {
470 	el1_interrupt(regs, handle_arch_fiq);
471 }
472 
473 asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
474 {
475 	unsigned long esr = read_sysreg(esr_el1);
476 
477 	local_daif_restore(DAIF_ERRCTX);
478 	arm64_enter_nmi(regs);
479 	do_serror(regs, esr);
480 	arm64_exit_nmi(regs);
481 }
482 
483 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
484 {
485 	unsigned long far = read_sysreg(far_el1);
486 
487 	enter_from_user_mode(regs);
488 	local_daif_restore(DAIF_PROCCTX);
489 	do_mem_abort(far, esr, regs);
490 	exit_to_user_mode(regs);
491 }
492 
493 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
494 {
495 	unsigned long far = read_sysreg(far_el1);
496 
497 	/*
498 	 * We've taken an instruction abort from userspace and not yet
499 	 * re-enabled IRQs. If the address is a kernel address, apply
500 	 * BP hardening prior to enabling IRQs and pre-emption.
501 	 */
502 	if (!is_ttbr0_addr(far))
503 		arm64_apply_bp_hardening();
504 
505 	enter_from_user_mode(regs);
506 	local_daif_restore(DAIF_PROCCTX);
507 	do_mem_abort(far, esr, regs);
508 	exit_to_user_mode(regs);
509 }
510 
511 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
512 {
513 	enter_from_user_mode(regs);
514 	local_daif_restore(DAIF_PROCCTX);
515 	do_fpsimd_acc(esr, regs);
516 	exit_to_user_mode(regs);
517 }
518 
519 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
520 {
521 	enter_from_user_mode(regs);
522 	local_daif_restore(DAIF_PROCCTX);
523 	do_sve_acc(esr, regs);
524 	exit_to_user_mode(regs);
525 }
526 
527 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
528 {
529 	enter_from_user_mode(regs);
530 	local_daif_restore(DAIF_PROCCTX);
531 	do_fpsimd_exc(esr, regs);
532 	exit_to_user_mode(regs);
533 }
534 
535 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
536 {
537 	enter_from_user_mode(regs);
538 	local_daif_restore(DAIF_PROCCTX);
539 	do_sysinstr(esr, regs);
540 	exit_to_user_mode(regs);
541 }
542 
543 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
544 {
545 	unsigned long far = read_sysreg(far_el1);
546 
547 	if (!is_ttbr0_addr(instruction_pointer(regs)))
548 		arm64_apply_bp_hardening();
549 
550 	enter_from_user_mode(regs);
551 	local_daif_restore(DAIF_PROCCTX);
552 	do_sp_pc_abort(far, esr, regs);
553 	exit_to_user_mode(regs);
554 }
555 
556 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
557 {
558 	enter_from_user_mode(regs);
559 	local_daif_restore(DAIF_PROCCTX);
560 	do_sp_pc_abort(regs->sp, esr, regs);
561 	exit_to_user_mode(regs);
562 }
563 
564 static void noinstr el0_undef(struct pt_regs *regs)
565 {
566 	enter_from_user_mode(regs);
567 	local_daif_restore(DAIF_PROCCTX);
568 	do_undefinstr(regs);
569 	exit_to_user_mode(regs);
570 }
571 
572 static void noinstr el0_bti(struct pt_regs *regs)
573 {
574 	enter_from_user_mode(regs);
575 	local_daif_restore(DAIF_PROCCTX);
576 	do_bti(regs);
577 	exit_to_user_mode(regs);
578 }
579 
580 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
581 {
582 	enter_from_user_mode(regs);
583 	local_daif_restore(DAIF_PROCCTX);
584 	bad_el0_sync(regs, 0, esr);
585 	exit_to_user_mode(regs);
586 }
587 
588 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
589 {
590 	/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
591 	unsigned long far = read_sysreg(far_el1);
592 
593 	enter_from_user_mode(regs);
594 	do_debug_exception(far, esr, regs);
595 	local_daif_restore(DAIF_PROCCTX);
596 	exit_to_user_mode(regs);
597 }
598 
599 static void noinstr el0_svc(struct pt_regs *regs)
600 {
601 	enter_from_user_mode(regs);
602 	cortex_a76_erratum_1463225_svc_handler();
603 	do_el0_svc(regs);
604 	exit_to_user_mode(regs);
605 }
606 
607 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
608 {
609 	enter_from_user_mode(regs);
610 	local_daif_restore(DAIF_PROCCTX);
611 	do_ptrauth_fault(regs, esr);
612 	exit_to_user_mode(regs);
613 }
614 
615 asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
616 {
617 	unsigned long esr = read_sysreg(esr_el1);
618 
619 	switch (ESR_ELx_EC(esr)) {
620 	case ESR_ELx_EC_SVC64:
621 		el0_svc(regs);
622 		break;
623 	case ESR_ELx_EC_DABT_LOW:
624 		el0_da(regs, esr);
625 		break;
626 	case ESR_ELx_EC_IABT_LOW:
627 		el0_ia(regs, esr);
628 		break;
629 	case ESR_ELx_EC_FP_ASIMD:
630 		el0_fpsimd_acc(regs, esr);
631 		break;
632 	case ESR_ELx_EC_SVE:
633 		el0_sve_acc(regs, esr);
634 		break;
635 	case ESR_ELx_EC_FP_EXC64:
636 		el0_fpsimd_exc(regs, esr);
637 		break;
638 	case ESR_ELx_EC_SYS64:
639 	case ESR_ELx_EC_WFx:
640 		el0_sys(regs, esr);
641 		break;
642 	case ESR_ELx_EC_SP_ALIGN:
643 		el0_sp(regs, esr);
644 		break;
645 	case ESR_ELx_EC_PC_ALIGN:
646 		el0_pc(regs, esr);
647 		break;
648 	case ESR_ELx_EC_UNKNOWN:
649 		el0_undef(regs);
650 		break;
651 	case ESR_ELx_EC_BTI:
652 		el0_bti(regs);
653 		break;
654 	case ESR_ELx_EC_BREAKPT_LOW:
655 	case ESR_ELx_EC_SOFTSTP_LOW:
656 	case ESR_ELx_EC_WATCHPT_LOW:
657 	case ESR_ELx_EC_BRK64:
658 		el0_dbg(regs, esr);
659 		break;
660 	case ESR_ELx_EC_FPAC:
661 		el0_fpac(regs, esr);
662 		break;
663 	default:
664 		el0_inv(regs, esr);
665 	}
666 }
667 
668 static void noinstr el0_interrupt(struct pt_regs *regs,
669 				  void (*handler)(struct pt_regs *))
670 {
671 	enter_from_user_mode(regs);
672 
673 	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
674 
675 	if (regs->pc & BIT(55))
676 		arm64_apply_bp_hardening();
677 
678 	irq_enter_rcu();
679 	do_interrupt_handler(regs, handler);
680 	irq_exit_rcu();
681 
682 	exit_to_user_mode(regs);
683 }
684 
685 static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
686 {
687 	el0_interrupt(regs, handle_arch_irq);
688 }
689 
690 asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
691 {
692 	__el0_irq_handler_common(regs);
693 }
694 
695 static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
696 {
697 	el0_interrupt(regs, handle_arch_fiq);
698 }
699 
700 asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
701 {
702 	__el0_fiq_handler_common(regs);
703 }
704 
705 static void noinstr __el0_error_handler_common(struct pt_regs *regs)
706 {
707 	unsigned long esr = read_sysreg(esr_el1);
708 
709 	enter_from_user_mode(regs);
710 	local_daif_restore(DAIF_ERRCTX);
711 	arm64_enter_nmi(regs);
712 	do_serror(regs, esr);
713 	arm64_exit_nmi(regs);
714 	local_daif_restore(DAIF_PROCCTX);
715 	exit_to_user_mode(regs);
716 }
717 
718 asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
719 {
720 	__el0_error_handler_common(regs);
721 }
722 
723 #ifdef CONFIG_COMPAT
724 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
725 {
726 	enter_from_user_mode(regs);
727 	local_daif_restore(DAIF_PROCCTX);
728 	do_cp15instr(esr, regs);
729 	exit_to_user_mode(regs);
730 }
731 
732 static void noinstr el0_svc_compat(struct pt_regs *regs)
733 {
734 	enter_from_user_mode(regs);
735 	cortex_a76_erratum_1463225_svc_handler();
736 	do_el0_svc_compat(regs);
737 	exit_to_user_mode(regs);
738 }
739 
740 asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
741 {
742 	unsigned long esr = read_sysreg(esr_el1);
743 
744 	switch (ESR_ELx_EC(esr)) {
745 	case ESR_ELx_EC_SVC32:
746 		el0_svc_compat(regs);
747 		break;
748 	case ESR_ELx_EC_DABT_LOW:
749 		el0_da(regs, esr);
750 		break;
751 	case ESR_ELx_EC_IABT_LOW:
752 		el0_ia(regs, esr);
753 		break;
754 	case ESR_ELx_EC_FP_ASIMD:
755 		el0_fpsimd_acc(regs, esr);
756 		break;
757 	case ESR_ELx_EC_FP_EXC32:
758 		el0_fpsimd_exc(regs, esr);
759 		break;
760 	case ESR_ELx_EC_PC_ALIGN:
761 		el0_pc(regs, esr);
762 		break;
763 	case ESR_ELx_EC_UNKNOWN:
764 	case ESR_ELx_EC_CP14_MR:
765 	case ESR_ELx_EC_CP14_LS:
766 	case ESR_ELx_EC_CP14_64:
767 		el0_undef(regs);
768 		break;
769 	case ESR_ELx_EC_CP15_32:
770 	case ESR_ELx_EC_CP15_64:
771 		el0_cp15(regs, esr);
772 		break;
773 	case ESR_ELx_EC_BREAKPT_LOW:
774 	case ESR_ELx_EC_SOFTSTP_LOW:
775 	case ESR_ELx_EC_WATCHPT_LOW:
776 	case ESR_ELx_EC_BKPT32:
777 		el0_dbg(regs, esr);
778 		break;
779 	default:
780 		el0_inv(regs, esr);
781 	}
782 }
783 
784 asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
785 {
786 	__el0_irq_handler_common(regs);
787 }
788 
789 asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
790 {
791 	__el0_fiq_handler_common(regs);
792 }
793 
794 asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
795 {
796 	__el0_error_handler_common(regs);
797 }
798 #else /* CONFIG_COMPAT */
799 UNHANDLED(el0t, 32, sync)
800 UNHANDLED(el0t, 32, irq)
801 UNHANDLED(el0t, 32, fiq)
802 UNHANDLED(el0t, 32, error)
803 #endif /* CONFIG_COMPAT */
804 
805 #ifdef CONFIG_VMAP_STACK
806 asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
807 {
808 	unsigned int esr = read_sysreg(esr_el1);
809 	unsigned long far = read_sysreg(far_el1);
810 
811 	arm64_enter_nmi(regs);
812 	panic_bad_stack(regs, esr, far);
813 }
814 #endif /* CONFIG_VMAP_STACK */
815 
816 #ifdef CONFIG_ARM_SDE_INTERFACE
817 asmlinkage noinstr unsigned long
818 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
819 {
820 	unsigned long ret;
821 
822 	/*
823 	 * We didn't take an exception to get here, so the HW hasn't
824 	 * set/cleared bits in PSTATE that we may rely on.
825 	 *
826 	 * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
827 	 * whether PSTATE bits are inherited unchanged or generated from
828 	 * scratch, and the TF-A implementation always clears PAN and always
829 	 * clears UAO. There are no other known implementations.
830 	 *
831 	 * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
832 	 * PSTATE is modified upon architectural exceptions, and so PAN is
833 	 * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
834 	 * cleared.
835 	 *
836 	 * We must explicitly reset PAN to the expected state, including
837 	 * clearing it when the host isn't using it, in case a VM had it set.
838 	 */
839 	if (system_uses_hw_pan())
840 		set_pstate_pan(1);
841 	else if (cpu_has_pan())
842 		set_pstate_pan(0);
843 
844 	arm64_enter_nmi(regs);
845 	ret = do_sdei_event(regs, arg);
846 	arm64_exit_nmi(regs);
847 
848 	return ret;
849 }
850 #endif /* CONFIG_ARM_SDE_INTERFACE */
851