xref: /openbmc/linux/arch/arm64/kernel/entry-common.c (revision aa0dc6a7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Exception handling code
4  *
5  * Copyright (C) 2019 ARM Ltd.
6  */
7 
8 #include <linux/context_tracking.h>
9 #include <linux/linkage.h>
10 #include <linux/lockdep.h>
11 #include <linux/ptrace.h>
12 #include <linux/sched.h>
13 #include <linux/sched/debug.h>
14 #include <linux/thread_info.h>
15 
16 #include <asm/cpufeature.h>
17 #include <asm/daifflags.h>
18 #include <asm/esr.h>
19 #include <asm/exception.h>
20 #include <asm/kprobes.h>
21 #include <asm/mmu.h>
22 #include <asm/processor.h>
23 #include <asm/sdei.h>
24 #include <asm/stacktrace.h>
25 #include <asm/sysreg.h>
26 #include <asm/system_misc.h>
27 
28 /*
29  * This is intended to match the logic in irqentry_enter(), handling the kernel
30  * mode transitions only.
31  */
32 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
33 {
34 	regs->exit_rcu = false;
35 
36 	if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
37 		lockdep_hardirqs_off(CALLER_ADDR0);
38 		rcu_irq_enter();
39 		trace_hardirqs_off_finish();
40 
41 		regs->exit_rcu = true;
42 		return;
43 	}
44 
45 	lockdep_hardirqs_off(CALLER_ADDR0);
46 	rcu_irq_enter_check_tick();
47 	trace_hardirqs_off_finish();
48 
49 	mte_check_tfsr_entry();
50 }
51 
52 /*
53  * This is intended to match the logic in irqentry_exit(), handling the kernel
54  * mode transitions only, and with preemption handled elsewhere.
55  */
56 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
57 {
58 	lockdep_assert_irqs_disabled();
59 
60 	mte_check_tfsr_exit();
61 
62 	if (interrupts_enabled(regs)) {
63 		if (regs->exit_rcu) {
64 			trace_hardirqs_on_prepare();
65 			lockdep_hardirqs_on_prepare(CALLER_ADDR0);
66 			rcu_irq_exit();
67 			lockdep_hardirqs_on(CALLER_ADDR0);
68 			return;
69 		}
70 
71 		trace_hardirqs_on();
72 	} else {
73 		if (regs->exit_rcu)
74 			rcu_irq_exit();
75 	}
76 }
77 
78 static void noinstr arm64_enter_nmi(struct pt_regs *regs)
79 {
80 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
81 
82 	__nmi_enter();
83 	lockdep_hardirqs_off(CALLER_ADDR0);
84 	lockdep_hardirq_enter();
85 	rcu_nmi_enter();
86 
87 	trace_hardirqs_off_finish();
88 	ftrace_nmi_enter();
89 }
90 
91 static void noinstr arm64_exit_nmi(struct pt_regs *regs)
92 {
93 	bool restore = regs->lockdep_hardirqs;
94 
95 	ftrace_nmi_exit();
96 	if (restore) {
97 		trace_hardirqs_on_prepare();
98 		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
99 	}
100 
101 	rcu_nmi_exit();
102 	lockdep_hardirq_exit();
103 	if (restore)
104 		lockdep_hardirqs_on(CALLER_ADDR0);
105 	__nmi_exit();
106 }
107 
108 static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
109 {
110 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
111 		arm64_enter_nmi(regs);
112 	else
113 		enter_from_kernel_mode(regs);
114 }
115 
116 static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
117 {
118 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
119 		arm64_exit_nmi(regs);
120 	else
121 		exit_to_kernel_mode(regs);
122 }
123 
124 static void __sched arm64_preempt_schedule_irq(void)
125 {
126 	lockdep_assert_irqs_disabled();
127 
128 	/*
129 	 * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
130 	 * priority masking is used the GIC irqchip driver will clear DAIF.IF
131 	 * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
132 	 * DAIF we must have handled an NMI, so skip preemption.
133 	 */
134 	if (system_uses_irq_prio_masking() && read_sysreg(daif))
135 		return;
136 
137 	/*
138 	 * Preempting a task from an IRQ means we leave copies of PSTATE
139 	 * on the stack. cpufeature's enable calls may modify PSTATE, but
140 	 * resuming one of these preempted tasks would undo those changes.
141 	 *
142 	 * Only allow a task to be preempted once cpufeatures have been
143 	 * enabled.
144 	 */
145 	if (system_capabilities_finalized())
146 		preempt_schedule_irq();
147 }
148 
149 static void do_interrupt_handler(struct pt_regs *regs,
150 				 void (*handler)(struct pt_regs *))
151 {
152 	if (on_thread_stack())
153 		call_on_irq_stack(regs, handler);
154 	else
155 		handler(regs);
156 }
157 
158 extern void (*handle_arch_irq)(struct pt_regs *);
159 extern void (*handle_arch_fiq)(struct pt_regs *);
160 
161 static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
162 				      unsigned int esr)
163 {
164 	arm64_enter_nmi(regs);
165 
166 	console_verbose();
167 
168 	pr_crit("Unhandled %s exception on CPU%d, ESR 0x%08x -- %s\n",
169 		vector, smp_processor_id(), esr,
170 		esr_get_class_string(esr));
171 
172 	__show_regs(regs);
173 	panic("Unhandled exception");
174 }
175 
176 #define UNHANDLED(el, regsize, vector)							\
177 asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs)	\
178 {											\
179 	const char *desc = #regsize "-bit " #el " " #vector;				\
180 	__panic_unhandled(regs, desc, read_sysreg(esr_el1));				\
181 }
182 
183 #ifdef CONFIG_ARM64_ERRATUM_1463225
184 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
185 
186 static void cortex_a76_erratum_1463225_svc_handler(void)
187 {
188 	u32 reg, val;
189 
190 	if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
191 		return;
192 
193 	if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
194 		return;
195 
196 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
197 	reg = read_sysreg(mdscr_el1);
198 	val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
199 	write_sysreg(val, mdscr_el1);
200 	asm volatile("msr daifclr, #8");
201 	isb();
202 
203 	/* We will have taken a single-step exception by this point */
204 
205 	write_sysreg(reg, mdscr_el1);
206 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
207 }
208 
209 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
210 {
211 	if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
212 		return false;
213 
214 	/*
215 	 * We've taken a dummy step exception from the kernel to ensure
216 	 * that interrupts are re-enabled on the syscall path. Return back
217 	 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
218 	 * masked so that we can safely restore the mdscr and get on with
219 	 * handling the syscall.
220 	 */
221 	regs->pstate |= PSR_D_BIT;
222 	return true;
223 }
224 #else /* CONFIG_ARM64_ERRATUM_1463225 */
225 static void cortex_a76_erratum_1463225_svc_handler(void) { }
226 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
227 {
228 	return false;
229 }
230 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
231 
232 UNHANDLED(el1t, 64, sync)
233 UNHANDLED(el1t, 64, irq)
234 UNHANDLED(el1t, 64, fiq)
235 UNHANDLED(el1t, 64, error)
236 
237 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
238 {
239 	unsigned long far = read_sysreg(far_el1);
240 
241 	enter_from_kernel_mode(regs);
242 	local_daif_inherit(regs);
243 	do_mem_abort(far, esr, regs);
244 	local_daif_mask();
245 	exit_to_kernel_mode(regs);
246 }
247 
248 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
249 {
250 	unsigned long far = read_sysreg(far_el1);
251 
252 	enter_from_kernel_mode(regs);
253 	local_daif_inherit(regs);
254 	do_sp_pc_abort(far, esr, regs);
255 	local_daif_mask();
256 	exit_to_kernel_mode(regs);
257 }
258 
259 static void noinstr el1_undef(struct pt_regs *regs)
260 {
261 	enter_from_kernel_mode(regs);
262 	local_daif_inherit(regs);
263 	do_undefinstr(regs);
264 	local_daif_mask();
265 	exit_to_kernel_mode(regs);
266 }
267 
268 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
269 {
270 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
271 
272 	lockdep_hardirqs_off(CALLER_ADDR0);
273 	rcu_nmi_enter();
274 
275 	trace_hardirqs_off_finish();
276 }
277 
278 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
279 {
280 	bool restore = regs->lockdep_hardirqs;
281 
282 	if (restore) {
283 		trace_hardirqs_on_prepare();
284 		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
285 	}
286 
287 	rcu_nmi_exit();
288 	if (restore)
289 		lockdep_hardirqs_on(CALLER_ADDR0);
290 }
291 
292 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
293 {
294 	unsigned long far = read_sysreg(far_el1);
295 
296 	arm64_enter_el1_dbg(regs);
297 	if (!cortex_a76_erratum_1463225_debug_handler(regs))
298 		do_debug_exception(far, esr, regs);
299 	arm64_exit_el1_dbg(regs);
300 }
301 
302 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
303 {
304 	enter_from_kernel_mode(regs);
305 	local_daif_inherit(regs);
306 	do_ptrauth_fault(regs, esr);
307 	local_daif_mask();
308 	exit_to_kernel_mode(regs);
309 }
310 
311 asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
312 {
313 	unsigned long esr = read_sysreg(esr_el1);
314 
315 	switch (ESR_ELx_EC(esr)) {
316 	case ESR_ELx_EC_DABT_CUR:
317 	case ESR_ELx_EC_IABT_CUR:
318 		el1_abort(regs, esr);
319 		break;
320 	/*
321 	 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
322 	 * recursive exception when trying to push the initial pt_regs.
323 	 */
324 	case ESR_ELx_EC_PC_ALIGN:
325 		el1_pc(regs, esr);
326 		break;
327 	case ESR_ELx_EC_SYS64:
328 	case ESR_ELx_EC_UNKNOWN:
329 		el1_undef(regs);
330 		break;
331 	case ESR_ELx_EC_BREAKPT_CUR:
332 	case ESR_ELx_EC_SOFTSTP_CUR:
333 	case ESR_ELx_EC_WATCHPT_CUR:
334 	case ESR_ELx_EC_BRK64:
335 		el1_dbg(regs, esr);
336 		break;
337 	case ESR_ELx_EC_FPAC:
338 		el1_fpac(regs, esr);
339 		break;
340 	default:
341 		__panic_unhandled(regs, "64-bit el1h sync", esr);
342 	}
343 }
344 
345 static void noinstr el1_interrupt(struct pt_regs *regs,
346 				  void (*handler)(struct pt_regs *))
347 {
348 	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
349 
350 	enter_el1_irq_or_nmi(regs);
351 	do_interrupt_handler(regs, handler);
352 
353 	/*
354 	 * Note: thread_info::preempt_count includes both thread_info::count
355 	 * and thread_info::need_resched, and is not equivalent to
356 	 * preempt_count().
357 	 */
358 	if (IS_ENABLED(CONFIG_PREEMPTION) &&
359 	    READ_ONCE(current_thread_info()->preempt_count) == 0)
360 		arm64_preempt_schedule_irq();
361 
362 	exit_el1_irq_or_nmi(regs);
363 }
364 
365 asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
366 {
367 	el1_interrupt(regs, handle_arch_irq);
368 }
369 
370 asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
371 {
372 	el1_interrupt(regs, handle_arch_fiq);
373 }
374 
375 asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
376 {
377 	unsigned long esr = read_sysreg(esr_el1);
378 
379 	local_daif_restore(DAIF_ERRCTX);
380 	arm64_enter_nmi(regs);
381 	do_serror(regs, esr);
382 	arm64_exit_nmi(regs);
383 }
384 
385 asmlinkage void noinstr enter_from_user_mode(void)
386 {
387 	lockdep_hardirqs_off(CALLER_ADDR0);
388 	CT_WARN_ON(ct_state() != CONTEXT_USER);
389 	user_exit_irqoff();
390 	trace_hardirqs_off_finish();
391 }
392 
393 asmlinkage void noinstr exit_to_user_mode(void)
394 {
395 	mte_check_tfsr_exit();
396 
397 	trace_hardirqs_on_prepare();
398 	lockdep_hardirqs_on_prepare(CALLER_ADDR0);
399 	user_enter_irqoff();
400 	lockdep_hardirqs_on(CALLER_ADDR0);
401 }
402 
403 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
404 {
405 	unsigned long far = read_sysreg(far_el1);
406 
407 	enter_from_user_mode();
408 	local_daif_restore(DAIF_PROCCTX);
409 	do_mem_abort(far, esr, regs);
410 }
411 
412 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
413 {
414 	unsigned long far = read_sysreg(far_el1);
415 
416 	/*
417 	 * We've taken an instruction abort from userspace and not yet
418 	 * re-enabled IRQs. If the address is a kernel address, apply
419 	 * BP hardening prior to enabling IRQs and pre-emption.
420 	 */
421 	if (!is_ttbr0_addr(far))
422 		arm64_apply_bp_hardening();
423 
424 	enter_from_user_mode();
425 	local_daif_restore(DAIF_PROCCTX);
426 	do_mem_abort(far, esr, regs);
427 }
428 
429 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
430 {
431 	enter_from_user_mode();
432 	local_daif_restore(DAIF_PROCCTX);
433 	do_fpsimd_acc(esr, regs);
434 }
435 
436 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
437 {
438 	enter_from_user_mode();
439 	local_daif_restore(DAIF_PROCCTX);
440 	do_sve_acc(esr, regs);
441 }
442 
443 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
444 {
445 	enter_from_user_mode();
446 	local_daif_restore(DAIF_PROCCTX);
447 	do_fpsimd_exc(esr, regs);
448 }
449 
450 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
451 {
452 	enter_from_user_mode();
453 	local_daif_restore(DAIF_PROCCTX);
454 	do_sysinstr(esr, regs);
455 }
456 
457 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
458 {
459 	unsigned long far = read_sysreg(far_el1);
460 
461 	if (!is_ttbr0_addr(instruction_pointer(regs)))
462 		arm64_apply_bp_hardening();
463 
464 	enter_from_user_mode();
465 	local_daif_restore(DAIF_PROCCTX);
466 	do_sp_pc_abort(far, esr, regs);
467 }
468 
469 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
470 {
471 	enter_from_user_mode();
472 	local_daif_restore(DAIF_PROCCTX);
473 	do_sp_pc_abort(regs->sp, esr, regs);
474 }
475 
476 static void noinstr el0_undef(struct pt_regs *regs)
477 {
478 	enter_from_user_mode();
479 	local_daif_restore(DAIF_PROCCTX);
480 	do_undefinstr(regs);
481 }
482 
483 static void noinstr el0_bti(struct pt_regs *regs)
484 {
485 	enter_from_user_mode();
486 	local_daif_restore(DAIF_PROCCTX);
487 	do_bti(regs);
488 }
489 
490 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
491 {
492 	enter_from_user_mode();
493 	local_daif_restore(DAIF_PROCCTX);
494 	bad_el0_sync(regs, 0, esr);
495 }
496 
497 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
498 {
499 	/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
500 	unsigned long far = read_sysreg(far_el1);
501 
502 	enter_from_user_mode();
503 	do_debug_exception(far, esr, regs);
504 	local_daif_restore(DAIF_PROCCTX);
505 }
506 
507 static void noinstr el0_svc(struct pt_regs *regs)
508 {
509 	enter_from_user_mode();
510 	cortex_a76_erratum_1463225_svc_handler();
511 	do_el0_svc(regs);
512 }
513 
514 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
515 {
516 	enter_from_user_mode();
517 	local_daif_restore(DAIF_PROCCTX);
518 	do_ptrauth_fault(regs, esr);
519 }
520 
521 asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
522 {
523 	unsigned long esr = read_sysreg(esr_el1);
524 
525 	switch (ESR_ELx_EC(esr)) {
526 	case ESR_ELx_EC_SVC64:
527 		el0_svc(regs);
528 		break;
529 	case ESR_ELx_EC_DABT_LOW:
530 		el0_da(regs, esr);
531 		break;
532 	case ESR_ELx_EC_IABT_LOW:
533 		el0_ia(regs, esr);
534 		break;
535 	case ESR_ELx_EC_FP_ASIMD:
536 		el0_fpsimd_acc(regs, esr);
537 		break;
538 	case ESR_ELx_EC_SVE:
539 		el0_sve_acc(regs, esr);
540 		break;
541 	case ESR_ELx_EC_FP_EXC64:
542 		el0_fpsimd_exc(regs, esr);
543 		break;
544 	case ESR_ELx_EC_SYS64:
545 	case ESR_ELx_EC_WFx:
546 		el0_sys(regs, esr);
547 		break;
548 	case ESR_ELx_EC_SP_ALIGN:
549 		el0_sp(regs, esr);
550 		break;
551 	case ESR_ELx_EC_PC_ALIGN:
552 		el0_pc(regs, esr);
553 		break;
554 	case ESR_ELx_EC_UNKNOWN:
555 		el0_undef(regs);
556 		break;
557 	case ESR_ELx_EC_BTI:
558 		el0_bti(regs);
559 		break;
560 	case ESR_ELx_EC_BREAKPT_LOW:
561 	case ESR_ELx_EC_SOFTSTP_LOW:
562 	case ESR_ELx_EC_WATCHPT_LOW:
563 	case ESR_ELx_EC_BRK64:
564 		el0_dbg(regs, esr);
565 		break;
566 	case ESR_ELx_EC_FPAC:
567 		el0_fpac(regs, esr);
568 		break;
569 	default:
570 		el0_inv(regs, esr);
571 	}
572 }
573 
574 static void noinstr el0_interrupt(struct pt_regs *regs,
575 				  void (*handler)(struct pt_regs *))
576 {
577 	enter_from_user_mode();
578 
579 	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
580 
581 	if (regs->pc & BIT(55))
582 		arm64_apply_bp_hardening();
583 
584 	do_interrupt_handler(regs, handler);
585 }
586 
587 static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
588 {
589 	el0_interrupt(regs, handle_arch_irq);
590 }
591 
592 asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
593 {
594 	__el0_irq_handler_common(regs);
595 }
596 
597 static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
598 {
599 	el0_interrupt(regs, handle_arch_fiq);
600 }
601 
602 asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
603 {
604 	__el0_fiq_handler_common(regs);
605 }
606 
607 static void __el0_error_handler_common(struct pt_regs *regs)
608 {
609 	unsigned long esr = read_sysreg(esr_el1);
610 
611 	enter_from_user_mode();
612 	local_daif_restore(DAIF_ERRCTX);
613 	arm64_enter_nmi(regs);
614 	do_serror(regs, esr);
615 	arm64_exit_nmi(regs);
616 	local_daif_restore(DAIF_PROCCTX);
617 }
618 
619 asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
620 {
621 	__el0_error_handler_common(regs);
622 }
623 
624 #ifdef CONFIG_COMPAT
625 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
626 {
627 	enter_from_user_mode();
628 	local_daif_restore(DAIF_PROCCTX);
629 	do_cp15instr(esr, regs);
630 }
631 
632 static void noinstr el0_svc_compat(struct pt_regs *regs)
633 {
634 	enter_from_user_mode();
635 	cortex_a76_erratum_1463225_svc_handler();
636 	do_el0_svc_compat(regs);
637 }
638 
639 asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
640 {
641 	unsigned long esr = read_sysreg(esr_el1);
642 
643 	switch (ESR_ELx_EC(esr)) {
644 	case ESR_ELx_EC_SVC32:
645 		el0_svc_compat(regs);
646 		break;
647 	case ESR_ELx_EC_DABT_LOW:
648 		el0_da(regs, esr);
649 		break;
650 	case ESR_ELx_EC_IABT_LOW:
651 		el0_ia(regs, esr);
652 		break;
653 	case ESR_ELx_EC_FP_ASIMD:
654 		el0_fpsimd_acc(regs, esr);
655 		break;
656 	case ESR_ELx_EC_FP_EXC32:
657 		el0_fpsimd_exc(regs, esr);
658 		break;
659 	case ESR_ELx_EC_PC_ALIGN:
660 		el0_pc(regs, esr);
661 		break;
662 	case ESR_ELx_EC_UNKNOWN:
663 	case ESR_ELx_EC_CP14_MR:
664 	case ESR_ELx_EC_CP14_LS:
665 	case ESR_ELx_EC_CP14_64:
666 		el0_undef(regs);
667 		break;
668 	case ESR_ELx_EC_CP15_32:
669 	case ESR_ELx_EC_CP15_64:
670 		el0_cp15(regs, esr);
671 		break;
672 	case ESR_ELx_EC_BREAKPT_LOW:
673 	case ESR_ELx_EC_SOFTSTP_LOW:
674 	case ESR_ELx_EC_WATCHPT_LOW:
675 	case ESR_ELx_EC_BKPT32:
676 		el0_dbg(regs, esr);
677 		break;
678 	default:
679 		el0_inv(regs, esr);
680 	}
681 }
682 
683 asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
684 {
685 	__el0_irq_handler_common(regs);
686 }
687 
688 asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
689 {
690 	__el0_fiq_handler_common(regs);
691 }
692 
693 asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
694 {
695 	__el0_error_handler_common(regs);
696 }
697 #else /* CONFIG_COMPAT */
698 UNHANDLED(el0t, 32, sync)
699 UNHANDLED(el0t, 32, irq)
700 UNHANDLED(el0t, 32, fiq)
701 UNHANDLED(el0t, 32, error)
702 #endif /* CONFIG_COMPAT */
703 
704 #ifdef CONFIG_VMAP_STACK
705 asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
706 {
707 	unsigned int esr = read_sysreg(esr_el1);
708 	unsigned long far = read_sysreg(far_el1);
709 
710 	arm64_enter_nmi(regs);
711 	panic_bad_stack(regs, esr, far);
712 }
713 #endif /* CONFIG_VMAP_STACK */
714 
715 #ifdef CONFIG_ARM_SDE_INTERFACE
716 asmlinkage noinstr unsigned long
717 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
718 {
719 	unsigned long ret;
720 
721 	/*
722 	 * We didn't take an exception to get here, so the HW hasn't
723 	 * set/cleared bits in PSTATE that we may rely on.
724 	 *
725 	 * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
726 	 * whether PSTATE bits are inherited unchanged or generated from
727 	 * scratch, and the TF-A implementation always clears PAN and always
728 	 * clears UAO. There are no other known implementations.
729 	 *
730 	 * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
731 	 * PSTATE is modified upon architectural exceptions, and so PAN is
732 	 * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
733 	 * cleared.
734 	 *
735 	 * We must explicitly reset PAN to the expected state, including
736 	 * clearing it when the host isn't using it, in case a VM had it set.
737 	 */
738 	if (system_uses_hw_pan())
739 		set_pstate_pan(1);
740 	else if (cpu_has_pan())
741 		set_pstate_pan(0);
742 
743 	arm64_enter_nmi(regs);
744 	ret = do_sdei_event(regs, arg);
745 	arm64_exit_nmi(regs);
746 
747 	return ret;
748 }
749 #endif /* CONFIG_ARM_SDE_INTERFACE */
750