1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Exception handling code
4 *
5 * Copyright (C) 2019 ARM Ltd.
6 */
7
8 #include <linux/context_tracking.h>
9 #include <linux/kasan.h>
10 #include <linux/linkage.h>
11 #include <linux/lockdep.h>
12 #include <linux/ptrace.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/thread_info.h>
16
17 #include <asm/cpufeature.h>
18 #include <asm/daifflags.h>
19 #include <asm/esr.h>
20 #include <asm/exception.h>
21 #include <asm/irq_regs.h>
22 #include <asm/kprobes.h>
23 #include <asm/mmu.h>
24 #include <asm/processor.h>
25 #include <asm/sdei.h>
26 #include <asm/stacktrace.h>
27 #include <asm/sysreg.h>
28 #include <asm/system_misc.h>
29
30 /*
31 * Handle IRQ/context state management when entering from kernel mode.
32 * Before this function is called it is not safe to call regular kernel code,
33 * instrumentable code, or any code which may trigger an exception.
34 *
35 * This is intended to match the logic in irqentry_enter(), handling the kernel
36 * mode transitions only.
37 */
__enter_from_kernel_mode(struct pt_regs * regs)38 static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs)
39 {
40 regs->exit_rcu = false;
41
42 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
43 lockdep_hardirqs_off(CALLER_ADDR0);
44 ct_irq_enter();
45 trace_hardirqs_off_finish();
46
47 regs->exit_rcu = true;
48 return;
49 }
50
51 lockdep_hardirqs_off(CALLER_ADDR0);
52 rcu_irq_enter_check_tick();
53 trace_hardirqs_off_finish();
54 }
55
enter_from_kernel_mode(struct pt_regs * regs)56 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
57 {
58 __enter_from_kernel_mode(regs);
59 mte_check_tfsr_entry();
60 mte_disable_tco_entry(current);
61 }
62
63 /*
64 * Handle IRQ/context state management when exiting to kernel mode.
65 * After this function returns it is not safe to call regular kernel code,
66 * instrumentable code, or any code which may trigger an exception.
67 *
68 * This is intended to match the logic in irqentry_exit(), handling the kernel
69 * mode transitions only, and with preemption handled elsewhere.
70 */
__exit_to_kernel_mode(struct pt_regs * regs)71 static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
72 {
73 lockdep_assert_irqs_disabled();
74
75 if (interrupts_enabled(regs)) {
76 if (regs->exit_rcu) {
77 trace_hardirqs_on_prepare();
78 lockdep_hardirqs_on_prepare();
79 ct_irq_exit();
80 lockdep_hardirqs_on(CALLER_ADDR0);
81 return;
82 }
83
84 trace_hardirqs_on();
85 } else {
86 if (regs->exit_rcu)
87 ct_irq_exit();
88 }
89 }
90
exit_to_kernel_mode(struct pt_regs * regs)91 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
92 {
93 mte_check_tfsr_exit();
94 __exit_to_kernel_mode(regs);
95 }
96
97 /*
98 * Handle IRQ/context state management when entering from user mode.
99 * Before this function is called it is not safe to call regular kernel code,
100 * instrumentable code, or any code which may trigger an exception.
101 */
__enter_from_user_mode(void)102 static __always_inline void __enter_from_user_mode(void)
103 {
104 lockdep_hardirqs_off(CALLER_ADDR0);
105 CT_WARN_ON(ct_state() != CONTEXT_USER);
106 user_exit_irqoff();
107 trace_hardirqs_off_finish();
108 mte_disable_tco_entry(current);
109 }
110
enter_from_user_mode(struct pt_regs * regs)111 static __always_inline void enter_from_user_mode(struct pt_regs *regs)
112 {
113 __enter_from_user_mode();
114 }
115
116 /*
117 * Handle IRQ/context state management when exiting to user mode.
118 * After this function returns it is not safe to call regular kernel code,
119 * instrumentable code, or any code which may trigger an exception.
120 */
__exit_to_user_mode(void)121 static __always_inline void __exit_to_user_mode(void)
122 {
123 trace_hardirqs_on_prepare();
124 lockdep_hardirqs_on_prepare();
125 user_enter_irqoff();
126 lockdep_hardirqs_on(CALLER_ADDR0);
127 }
128
exit_to_user_mode_prepare(struct pt_regs * regs)129 static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
130 {
131 unsigned long flags;
132
133 local_daif_mask();
134
135 flags = read_thread_flags();
136 if (unlikely(flags & _TIF_WORK_MASK))
137 do_notify_resume(regs, flags);
138
139 lockdep_sys_exit();
140 }
141
exit_to_user_mode(struct pt_regs * regs)142 static __always_inline void exit_to_user_mode(struct pt_regs *regs)
143 {
144 exit_to_user_mode_prepare(regs);
145 mte_check_tfsr_exit();
146 __exit_to_user_mode();
147 }
148
asm_exit_to_user_mode(struct pt_regs * regs)149 asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
150 {
151 exit_to_user_mode(regs);
152 }
153
154 /*
155 * Handle IRQ/context state management when entering an NMI from user/kernel
156 * mode. Before this function is called it is not safe to call regular kernel
157 * code, instrumentable code, or any code which may trigger an exception.
158 */
arm64_enter_nmi(struct pt_regs * regs)159 static void noinstr arm64_enter_nmi(struct pt_regs *regs)
160 {
161 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
162
163 __nmi_enter();
164 lockdep_hardirqs_off(CALLER_ADDR0);
165 lockdep_hardirq_enter();
166 ct_nmi_enter();
167
168 trace_hardirqs_off_finish();
169 ftrace_nmi_enter();
170 }
171
172 /*
173 * Handle IRQ/context state management when exiting an NMI from user/kernel
174 * mode. After this function returns it is not safe to call regular kernel
175 * code, instrumentable code, or any code which may trigger an exception.
176 */
arm64_exit_nmi(struct pt_regs * regs)177 static void noinstr arm64_exit_nmi(struct pt_regs *regs)
178 {
179 bool restore = regs->lockdep_hardirqs;
180
181 ftrace_nmi_exit();
182 if (restore) {
183 trace_hardirqs_on_prepare();
184 lockdep_hardirqs_on_prepare();
185 }
186
187 ct_nmi_exit();
188 lockdep_hardirq_exit();
189 if (restore)
190 lockdep_hardirqs_on(CALLER_ADDR0);
191 __nmi_exit();
192 }
193
194 /*
195 * Handle IRQ/context state management when entering a debug exception from
196 * kernel mode. Before this function is called it is not safe to call regular
197 * kernel code, instrumentable code, or any code which may trigger an exception.
198 */
arm64_enter_el1_dbg(struct pt_regs * regs)199 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
200 {
201 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
202
203 lockdep_hardirqs_off(CALLER_ADDR0);
204 ct_nmi_enter();
205
206 trace_hardirqs_off_finish();
207 }
208
209 /*
210 * Handle IRQ/context state management when exiting a debug exception from
211 * kernel mode. After this function returns it is not safe to call regular
212 * kernel code, instrumentable code, or any code which may trigger an exception.
213 */
arm64_exit_el1_dbg(struct pt_regs * regs)214 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
215 {
216 bool restore = regs->lockdep_hardirqs;
217
218 if (restore) {
219 trace_hardirqs_on_prepare();
220 lockdep_hardirqs_on_prepare();
221 }
222
223 ct_nmi_exit();
224 if (restore)
225 lockdep_hardirqs_on(CALLER_ADDR0);
226 }
227
228 #ifdef CONFIG_PREEMPT_DYNAMIC
229 DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
230 #define need_irq_preemption() \
231 (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
232 #else
233 #define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION))
234 #endif
235
arm64_preempt_schedule_irq(void)236 static void __sched arm64_preempt_schedule_irq(void)
237 {
238 if (!need_irq_preemption())
239 return;
240
241 /*
242 * Note: thread_info::preempt_count includes both thread_info::count
243 * and thread_info::need_resched, and is not equivalent to
244 * preempt_count().
245 */
246 if (READ_ONCE(current_thread_info()->preempt_count) != 0)
247 return;
248
249 /*
250 * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
251 * priority masking is used the GIC irqchip driver will clear DAIF.IF
252 * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
253 * DAIF we must have handled an NMI, so skip preemption.
254 */
255 if (system_uses_irq_prio_masking() && read_sysreg(daif))
256 return;
257
258 /*
259 * Preempting a task from an IRQ means we leave copies of PSTATE
260 * on the stack. cpufeature's enable calls may modify PSTATE, but
261 * resuming one of these preempted tasks would undo those changes.
262 *
263 * Only allow a task to be preempted once cpufeatures have been
264 * enabled.
265 */
266 if (system_capabilities_finalized())
267 preempt_schedule_irq();
268 }
269
do_interrupt_handler(struct pt_regs * regs,void (* handler)(struct pt_regs *))270 static void do_interrupt_handler(struct pt_regs *regs,
271 void (*handler)(struct pt_regs *))
272 {
273 struct pt_regs *old_regs = set_irq_regs(regs);
274
275 if (on_thread_stack())
276 call_on_irq_stack(regs, handler);
277 else
278 handler(regs);
279
280 set_irq_regs(old_regs);
281 }
282
283 extern void (*handle_arch_irq)(struct pt_regs *);
284 extern void (*handle_arch_fiq)(struct pt_regs *);
285
__panic_unhandled(struct pt_regs * regs,const char * vector,unsigned long esr)286 static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
287 unsigned long esr)
288 {
289 arm64_enter_nmi(regs);
290
291 console_verbose();
292
293 pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n",
294 vector, smp_processor_id(), esr,
295 esr_get_class_string(esr));
296
297 __show_regs(regs);
298 panic("Unhandled exception");
299 }
300
301 #define UNHANDLED(el, regsize, vector) \
302 asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \
303 { \
304 const char *desc = #regsize "-bit " #el " " #vector; \
305 __panic_unhandled(regs, desc, read_sysreg(esr_el1)); \
306 }
307
308 #ifdef CONFIG_ARM64_ERRATUM_1463225
309 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
310
cortex_a76_erratum_1463225_svc_handler(void)311 static void cortex_a76_erratum_1463225_svc_handler(void)
312 {
313 u32 reg, val;
314
315 if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
316 return;
317
318 if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
319 return;
320
321 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
322 reg = read_sysreg(mdscr_el1);
323 val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
324 write_sysreg(val, mdscr_el1);
325 asm volatile("msr daifclr, #8");
326 isb();
327
328 /* We will have taken a single-step exception by this point */
329
330 write_sysreg(reg, mdscr_el1);
331 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
332 }
333
334 static __always_inline bool
cortex_a76_erratum_1463225_debug_handler(struct pt_regs * regs)335 cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
336 {
337 if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
338 return false;
339
340 /*
341 * We've taken a dummy step exception from the kernel to ensure
342 * that interrupts are re-enabled on the syscall path. Return back
343 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
344 * masked so that we can safely restore the mdscr and get on with
345 * handling the syscall.
346 */
347 regs->pstate |= PSR_D_BIT;
348 return true;
349 }
350 #else /* CONFIG_ARM64_ERRATUM_1463225 */
cortex_a76_erratum_1463225_svc_handler(void)351 static void cortex_a76_erratum_1463225_svc_handler(void) { }
cortex_a76_erratum_1463225_debug_handler(struct pt_regs * regs)352 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
353 {
354 return false;
355 }
356 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
357
358 /*
359 * As per the ABI exit SME streaming mode and clear the SVE state not
360 * shared with FPSIMD on syscall entry.
361 */
fp_user_discard(void)362 static inline void fp_user_discard(void)
363 {
364 /*
365 * If SME is active then exit streaming mode. If ZA is active
366 * then flush the SVE registers but leave userspace access to
367 * both SVE and SME enabled, otherwise disable SME for the
368 * task and fall through to disabling SVE too. This means
369 * that after a syscall we never have any streaming mode
370 * register state to track, if this changes the KVM code will
371 * need updating.
372 */
373 if (system_supports_sme())
374 sme_smstop_sm();
375
376 if (!system_supports_sve())
377 return;
378
379 if (test_thread_flag(TIF_SVE)) {
380 unsigned int sve_vq_minus_one;
381
382 sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1;
383 sve_flush_live(true, sve_vq_minus_one);
384 }
385 }
386
387 UNHANDLED(el1t, 64, sync)
388 UNHANDLED(el1t, 64, irq)
389 UNHANDLED(el1t, 64, fiq)
390 UNHANDLED(el1t, 64, error)
391
el1_abort(struct pt_regs * regs,unsigned long esr)392 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
393 {
394 unsigned long far = read_sysreg(far_el1);
395
396 enter_from_kernel_mode(regs);
397 local_daif_inherit(regs);
398 do_mem_abort(far, esr, regs);
399 local_daif_mask();
400 exit_to_kernel_mode(regs);
401 }
402
el1_pc(struct pt_regs * regs,unsigned long esr)403 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
404 {
405 unsigned long far = read_sysreg(far_el1);
406
407 enter_from_kernel_mode(regs);
408 local_daif_inherit(regs);
409 do_sp_pc_abort(far, esr, regs);
410 local_daif_mask();
411 exit_to_kernel_mode(regs);
412 }
413
el1_undef(struct pt_regs * regs,unsigned long esr)414 static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
415 {
416 enter_from_kernel_mode(regs);
417 local_daif_inherit(regs);
418 do_el1_undef(regs, esr);
419 local_daif_mask();
420 exit_to_kernel_mode(regs);
421 }
422
el1_bti(struct pt_regs * regs,unsigned long esr)423 static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
424 {
425 enter_from_kernel_mode(regs);
426 local_daif_inherit(regs);
427 do_el1_bti(regs, esr);
428 local_daif_mask();
429 exit_to_kernel_mode(regs);
430 }
431
el1_dbg(struct pt_regs * regs,unsigned long esr)432 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
433 {
434 unsigned long far = read_sysreg(far_el1);
435
436 arm64_enter_el1_dbg(regs);
437 if (!cortex_a76_erratum_1463225_debug_handler(regs))
438 do_debug_exception(far, esr, regs);
439 arm64_exit_el1_dbg(regs);
440 }
441
el1_fpac(struct pt_regs * regs,unsigned long esr)442 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
443 {
444 enter_from_kernel_mode(regs);
445 local_daif_inherit(regs);
446 do_el1_fpac(regs, esr);
447 local_daif_mask();
448 exit_to_kernel_mode(regs);
449 }
450
el1h_64_sync_handler(struct pt_regs * regs)451 asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
452 {
453 unsigned long esr = read_sysreg(esr_el1);
454
455 switch (ESR_ELx_EC(esr)) {
456 case ESR_ELx_EC_DABT_CUR:
457 case ESR_ELx_EC_IABT_CUR:
458 el1_abort(regs, esr);
459 break;
460 /*
461 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
462 * recursive exception when trying to push the initial pt_regs.
463 */
464 case ESR_ELx_EC_PC_ALIGN:
465 el1_pc(regs, esr);
466 break;
467 case ESR_ELx_EC_SYS64:
468 case ESR_ELx_EC_UNKNOWN:
469 el1_undef(regs, esr);
470 break;
471 case ESR_ELx_EC_BTI:
472 el1_bti(regs, esr);
473 break;
474 case ESR_ELx_EC_BREAKPT_CUR:
475 case ESR_ELx_EC_SOFTSTP_CUR:
476 case ESR_ELx_EC_WATCHPT_CUR:
477 case ESR_ELx_EC_BRK64:
478 el1_dbg(regs, esr);
479 break;
480 case ESR_ELx_EC_FPAC:
481 el1_fpac(regs, esr);
482 break;
483 default:
484 __panic_unhandled(regs, "64-bit el1h sync", esr);
485 }
486 }
487
__el1_pnmi(struct pt_regs * regs,void (* handler)(struct pt_regs *))488 static __always_inline void __el1_pnmi(struct pt_regs *regs,
489 void (*handler)(struct pt_regs *))
490 {
491 arm64_enter_nmi(regs);
492 do_interrupt_handler(regs, handler);
493 arm64_exit_nmi(regs);
494 }
495
__el1_irq(struct pt_regs * regs,void (* handler)(struct pt_regs *))496 static __always_inline void __el1_irq(struct pt_regs *regs,
497 void (*handler)(struct pt_regs *))
498 {
499 enter_from_kernel_mode(regs);
500
501 irq_enter_rcu();
502 do_interrupt_handler(regs, handler);
503 irq_exit_rcu();
504
505 arm64_preempt_schedule_irq();
506
507 exit_to_kernel_mode(regs);
508 }
el1_interrupt(struct pt_regs * regs,void (* handler)(struct pt_regs *))509 static void noinstr el1_interrupt(struct pt_regs *regs,
510 void (*handler)(struct pt_regs *))
511 {
512 write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
513
514 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
515 __el1_pnmi(regs, handler);
516 else
517 __el1_irq(regs, handler);
518 }
519
el1h_64_irq_handler(struct pt_regs * regs)520 asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
521 {
522 el1_interrupt(regs, handle_arch_irq);
523 }
524
el1h_64_fiq_handler(struct pt_regs * regs)525 asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
526 {
527 el1_interrupt(regs, handle_arch_fiq);
528 }
529
el1h_64_error_handler(struct pt_regs * regs)530 asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
531 {
532 unsigned long esr = read_sysreg(esr_el1);
533
534 local_daif_restore(DAIF_ERRCTX);
535 arm64_enter_nmi(regs);
536 do_serror(regs, esr);
537 arm64_exit_nmi(regs);
538 }
539
el0_da(struct pt_regs * regs,unsigned long esr)540 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
541 {
542 unsigned long far = read_sysreg(far_el1);
543
544 enter_from_user_mode(regs);
545 local_daif_restore(DAIF_PROCCTX);
546 do_mem_abort(far, esr, regs);
547 exit_to_user_mode(regs);
548 }
549
el0_ia(struct pt_regs * regs,unsigned long esr)550 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
551 {
552 unsigned long far = read_sysreg(far_el1);
553
554 /*
555 * We've taken an instruction abort from userspace and not yet
556 * re-enabled IRQs. If the address is a kernel address, apply
557 * BP hardening prior to enabling IRQs and pre-emption.
558 */
559 if (!is_ttbr0_addr(far))
560 arm64_apply_bp_hardening();
561
562 enter_from_user_mode(regs);
563 local_daif_restore(DAIF_PROCCTX);
564 do_mem_abort(far, esr, regs);
565 exit_to_user_mode(regs);
566 }
567
el0_fpsimd_acc(struct pt_regs * regs,unsigned long esr)568 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
569 {
570 enter_from_user_mode(regs);
571 local_daif_restore(DAIF_PROCCTX);
572 do_fpsimd_acc(esr, regs);
573 exit_to_user_mode(regs);
574 }
575
el0_sve_acc(struct pt_regs * regs,unsigned long esr)576 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
577 {
578 enter_from_user_mode(regs);
579 local_daif_restore(DAIF_PROCCTX);
580 do_sve_acc(esr, regs);
581 exit_to_user_mode(regs);
582 }
583
el0_sme_acc(struct pt_regs * regs,unsigned long esr)584 static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr)
585 {
586 enter_from_user_mode(regs);
587 local_daif_restore(DAIF_PROCCTX);
588 do_sme_acc(esr, regs);
589 exit_to_user_mode(regs);
590 }
591
el0_fpsimd_exc(struct pt_regs * regs,unsigned long esr)592 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
593 {
594 enter_from_user_mode(regs);
595 local_daif_restore(DAIF_PROCCTX);
596 do_fpsimd_exc(esr, regs);
597 exit_to_user_mode(regs);
598 }
599
el0_sys(struct pt_regs * regs,unsigned long esr)600 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
601 {
602 enter_from_user_mode(regs);
603 local_daif_restore(DAIF_PROCCTX);
604 do_el0_sys(esr, regs);
605 exit_to_user_mode(regs);
606 }
607
el0_pc(struct pt_regs * regs,unsigned long esr)608 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
609 {
610 unsigned long far = read_sysreg(far_el1);
611
612 if (!is_ttbr0_addr(instruction_pointer(regs)))
613 arm64_apply_bp_hardening();
614
615 enter_from_user_mode(regs);
616 local_daif_restore(DAIF_PROCCTX);
617 do_sp_pc_abort(far, esr, regs);
618 exit_to_user_mode(regs);
619 }
620
el0_sp(struct pt_regs * regs,unsigned long esr)621 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
622 {
623 enter_from_user_mode(regs);
624 local_daif_restore(DAIF_PROCCTX);
625 do_sp_pc_abort(regs->sp, esr, regs);
626 exit_to_user_mode(regs);
627 }
628
el0_undef(struct pt_regs * regs,unsigned long esr)629 static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
630 {
631 enter_from_user_mode(regs);
632 local_daif_restore(DAIF_PROCCTX);
633 do_el0_undef(regs, esr);
634 exit_to_user_mode(regs);
635 }
636
el0_bti(struct pt_regs * regs)637 static void noinstr el0_bti(struct pt_regs *regs)
638 {
639 enter_from_user_mode(regs);
640 local_daif_restore(DAIF_PROCCTX);
641 do_el0_bti(regs);
642 exit_to_user_mode(regs);
643 }
644
el0_mops(struct pt_regs * regs,unsigned long esr)645 static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr)
646 {
647 enter_from_user_mode(regs);
648 local_daif_restore(DAIF_PROCCTX);
649 do_el0_mops(regs, esr);
650 exit_to_user_mode(regs);
651 }
652
el0_inv(struct pt_regs * regs,unsigned long esr)653 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
654 {
655 enter_from_user_mode(regs);
656 local_daif_restore(DAIF_PROCCTX);
657 bad_el0_sync(regs, 0, esr);
658 exit_to_user_mode(regs);
659 }
660
el0_dbg(struct pt_regs * regs,unsigned long esr)661 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
662 {
663 /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
664 unsigned long far = read_sysreg(far_el1);
665
666 enter_from_user_mode(regs);
667 do_debug_exception(far, esr, regs);
668 local_daif_restore(DAIF_PROCCTX);
669 exit_to_user_mode(regs);
670 }
671
el0_svc(struct pt_regs * regs)672 static void noinstr el0_svc(struct pt_regs *regs)
673 {
674 enter_from_user_mode(regs);
675 cortex_a76_erratum_1463225_svc_handler();
676 fp_user_discard();
677 local_daif_restore(DAIF_PROCCTX);
678 do_el0_svc(regs);
679 exit_to_user_mode(regs);
680 }
681
el0_fpac(struct pt_regs * regs,unsigned long esr)682 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
683 {
684 enter_from_user_mode(regs);
685 local_daif_restore(DAIF_PROCCTX);
686 do_el0_fpac(regs, esr);
687 exit_to_user_mode(regs);
688 }
689
el0t_64_sync_handler(struct pt_regs * regs)690 asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
691 {
692 unsigned long esr = read_sysreg(esr_el1);
693
694 switch (ESR_ELx_EC(esr)) {
695 case ESR_ELx_EC_SVC64:
696 el0_svc(regs);
697 break;
698 case ESR_ELx_EC_DABT_LOW:
699 el0_da(regs, esr);
700 break;
701 case ESR_ELx_EC_IABT_LOW:
702 el0_ia(regs, esr);
703 break;
704 case ESR_ELx_EC_FP_ASIMD:
705 el0_fpsimd_acc(regs, esr);
706 break;
707 case ESR_ELx_EC_SVE:
708 el0_sve_acc(regs, esr);
709 break;
710 case ESR_ELx_EC_SME:
711 el0_sme_acc(regs, esr);
712 break;
713 case ESR_ELx_EC_FP_EXC64:
714 el0_fpsimd_exc(regs, esr);
715 break;
716 case ESR_ELx_EC_SYS64:
717 case ESR_ELx_EC_WFx:
718 el0_sys(regs, esr);
719 break;
720 case ESR_ELx_EC_SP_ALIGN:
721 el0_sp(regs, esr);
722 break;
723 case ESR_ELx_EC_PC_ALIGN:
724 el0_pc(regs, esr);
725 break;
726 case ESR_ELx_EC_UNKNOWN:
727 el0_undef(regs, esr);
728 break;
729 case ESR_ELx_EC_BTI:
730 el0_bti(regs);
731 break;
732 case ESR_ELx_EC_MOPS:
733 el0_mops(regs, esr);
734 break;
735 case ESR_ELx_EC_BREAKPT_LOW:
736 case ESR_ELx_EC_SOFTSTP_LOW:
737 case ESR_ELx_EC_WATCHPT_LOW:
738 case ESR_ELx_EC_BRK64:
739 el0_dbg(regs, esr);
740 break;
741 case ESR_ELx_EC_FPAC:
742 el0_fpac(regs, esr);
743 break;
744 default:
745 el0_inv(regs, esr);
746 }
747 }
748
el0_interrupt(struct pt_regs * regs,void (* handler)(struct pt_regs *))749 static void noinstr el0_interrupt(struct pt_regs *regs,
750 void (*handler)(struct pt_regs *))
751 {
752 enter_from_user_mode(regs);
753
754 write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
755
756 if (regs->pc & BIT(55))
757 arm64_apply_bp_hardening();
758
759 irq_enter_rcu();
760 do_interrupt_handler(regs, handler);
761 irq_exit_rcu();
762
763 exit_to_user_mode(regs);
764 }
765
__el0_irq_handler_common(struct pt_regs * regs)766 static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
767 {
768 el0_interrupt(regs, handle_arch_irq);
769 }
770
el0t_64_irq_handler(struct pt_regs * regs)771 asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
772 {
773 __el0_irq_handler_common(regs);
774 }
775
__el0_fiq_handler_common(struct pt_regs * regs)776 static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
777 {
778 el0_interrupt(regs, handle_arch_fiq);
779 }
780
el0t_64_fiq_handler(struct pt_regs * regs)781 asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
782 {
783 __el0_fiq_handler_common(regs);
784 }
785
__el0_error_handler_common(struct pt_regs * regs)786 static void noinstr __el0_error_handler_common(struct pt_regs *regs)
787 {
788 unsigned long esr = read_sysreg(esr_el1);
789
790 enter_from_user_mode(regs);
791 local_daif_restore(DAIF_ERRCTX);
792 arm64_enter_nmi(regs);
793 do_serror(regs, esr);
794 arm64_exit_nmi(regs);
795 local_daif_restore(DAIF_PROCCTX);
796 exit_to_user_mode(regs);
797 }
798
el0t_64_error_handler(struct pt_regs * regs)799 asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
800 {
801 __el0_error_handler_common(regs);
802 }
803
804 #ifdef CONFIG_COMPAT
el0_cp15(struct pt_regs * regs,unsigned long esr)805 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
806 {
807 enter_from_user_mode(regs);
808 local_daif_restore(DAIF_PROCCTX);
809 do_el0_cp15(esr, regs);
810 exit_to_user_mode(regs);
811 }
812
el0_svc_compat(struct pt_regs * regs)813 static void noinstr el0_svc_compat(struct pt_regs *regs)
814 {
815 enter_from_user_mode(regs);
816 cortex_a76_erratum_1463225_svc_handler();
817 local_daif_restore(DAIF_PROCCTX);
818 do_el0_svc_compat(regs);
819 exit_to_user_mode(regs);
820 }
821
el0t_32_sync_handler(struct pt_regs * regs)822 asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
823 {
824 unsigned long esr = read_sysreg(esr_el1);
825
826 switch (ESR_ELx_EC(esr)) {
827 case ESR_ELx_EC_SVC32:
828 el0_svc_compat(regs);
829 break;
830 case ESR_ELx_EC_DABT_LOW:
831 el0_da(regs, esr);
832 break;
833 case ESR_ELx_EC_IABT_LOW:
834 el0_ia(regs, esr);
835 break;
836 case ESR_ELx_EC_FP_ASIMD:
837 el0_fpsimd_acc(regs, esr);
838 break;
839 case ESR_ELx_EC_FP_EXC32:
840 el0_fpsimd_exc(regs, esr);
841 break;
842 case ESR_ELx_EC_PC_ALIGN:
843 el0_pc(regs, esr);
844 break;
845 case ESR_ELx_EC_UNKNOWN:
846 case ESR_ELx_EC_CP14_MR:
847 case ESR_ELx_EC_CP14_LS:
848 case ESR_ELx_EC_CP14_64:
849 el0_undef(regs, esr);
850 break;
851 case ESR_ELx_EC_CP15_32:
852 case ESR_ELx_EC_CP15_64:
853 el0_cp15(regs, esr);
854 break;
855 case ESR_ELx_EC_BREAKPT_LOW:
856 case ESR_ELx_EC_SOFTSTP_LOW:
857 case ESR_ELx_EC_WATCHPT_LOW:
858 case ESR_ELx_EC_BKPT32:
859 el0_dbg(regs, esr);
860 break;
861 default:
862 el0_inv(regs, esr);
863 }
864 }
865
el0t_32_irq_handler(struct pt_regs * regs)866 asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
867 {
868 __el0_irq_handler_common(regs);
869 }
870
el0t_32_fiq_handler(struct pt_regs * regs)871 asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
872 {
873 __el0_fiq_handler_common(regs);
874 }
875
el0t_32_error_handler(struct pt_regs * regs)876 asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
877 {
878 __el0_error_handler_common(regs);
879 }
880 #else /* CONFIG_COMPAT */
881 UNHANDLED(el0t, 32, sync)
882 UNHANDLED(el0t, 32, irq)
883 UNHANDLED(el0t, 32, fiq)
884 UNHANDLED(el0t, 32, error)
885 #endif /* CONFIG_COMPAT */
886
887 #ifdef CONFIG_VMAP_STACK
handle_bad_stack(struct pt_regs * regs)888 asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
889 {
890 unsigned long esr = read_sysreg(esr_el1);
891 unsigned long far = read_sysreg(far_el1);
892
893 arm64_enter_nmi(regs);
894 panic_bad_stack(regs, esr, far);
895 }
896 #endif /* CONFIG_VMAP_STACK */
897
898 #ifdef CONFIG_ARM_SDE_INTERFACE
899 asmlinkage noinstr unsigned long
__sdei_handler(struct pt_regs * regs,struct sdei_registered_event * arg)900 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
901 {
902 unsigned long ret;
903
904 /*
905 * We didn't take an exception to get here, so the HW hasn't
906 * set/cleared bits in PSTATE that we may rely on.
907 *
908 * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
909 * whether PSTATE bits are inherited unchanged or generated from
910 * scratch, and the TF-A implementation always clears PAN and always
911 * clears UAO. There are no other known implementations.
912 *
913 * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
914 * PSTATE is modified upon architectural exceptions, and so PAN is
915 * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
916 * cleared.
917 *
918 * We must explicitly reset PAN to the expected state, including
919 * clearing it when the host isn't using it, in case a VM had it set.
920 */
921 if (system_uses_hw_pan())
922 set_pstate_pan(1);
923 else if (cpu_has_pan())
924 set_pstate_pan(0);
925
926 arm64_enter_nmi(regs);
927 ret = do_sdei_event(regs, arg);
928 arm64_exit_nmi(regs);
929
930 return ret;
931 }
932 #endif /* CONFIG_ARM_SDE_INTERFACE */
933