xref: /openbmc/linux/arch/arm64/kernel/entry-common.c (revision 5f66f73b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Exception handling code
4  *
5  * Copyright (C) 2019 ARM Ltd.
6  */
7 
8 #include <linux/context_tracking.h>
9 #include <linux/ptrace.h>
10 #include <linux/thread_info.h>
11 
12 #include <asm/cpufeature.h>
13 #include <asm/daifflags.h>
14 #include <asm/esr.h>
15 #include <asm/exception.h>
16 #include <asm/kprobes.h>
17 #include <asm/mmu.h>
18 #include <asm/sysreg.h>
19 
20 /*
21  * This is intended to match the logic in irqentry_enter(), handling the kernel
22  * mode transitions only.
23  */
24 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
25 {
26 	regs->exit_rcu = false;
27 
28 	if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
29 		lockdep_hardirqs_off(CALLER_ADDR0);
30 		rcu_irq_enter();
31 		trace_hardirqs_off_finish();
32 
33 		regs->exit_rcu = true;
34 		return;
35 	}
36 
37 	lockdep_hardirqs_off(CALLER_ADDR0);
38 	rcu_irq_enter_check_tick();
39 	trace_hardirqs_off_finish();
40 
41 	mte_check_tfsr_entry();
42 }
43 
44 /*
45  * This is intended to match the logic in irqentry_exit(), handling the kernel
46  * mode transitions only, and with preemption handled elsewhere.
47  */
48 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
49 {
50 	lockdep_assert_irqs_disabled();
51 
52 	mte_check_tfsr_exit();
53 
54 	if (interrupts_enabled(regs)) {
55 		if (regs->exit_rcu) {
56 			trace_hardirqs_on_prepare();
57 			lockdep_hardirqs_on_prepare(CALLER_ADDR0);
58 			rcu_irq_exit();
59 			lockdep_hardirqs_on(CALLER_ADDR0);
60 			return;
61 		}
62 
63 		trace_hardirqs_on();
64 	} else {
65 		if (regs->exit_rcu)
66 			rcu_irq_exit();
67 	}
68 }
69 
70 void noinstr arm64_enter_nmi(struct pt_regs *regs)
71 {
72 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
73 
74 	__nmi_enter();
75 	lockdep_hardirqs_off(CALLER_ADDR0);
76 	lockdep_hardirq_enter();
77 	rcu_nmi_enter();
78 
79 	trace_hardirqs_off_finish();
80 	ftrace_nmi_enter();
81 }
82 
83 void noinstr arm64_exit_nmi(struct pt_regs *regs)
84 {
85 	bool restore = regs->lockdep_hardirqs;
86 
87 	ftrace_nmi_exit();
88 	if (restore) {
89 		trace_hardirqs_on_prepare();
90 		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
91 	}
92 
93 	rcu_nmi_exit();
94 	lockdep_hardirq_exit();
95 	if (restore)
96 		lockdep_hardirqs_on(CALLER_ADDR0);
97 	__nmi_exit();
98 }
99 
100 asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
101 {
102 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
103 		arm64_enter_nmi(regs);
104 	else
105 		enter_from_kernel_mode(regs);
106 }
107 
108 asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
109 {
110 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
111 		arm64_exit_nmi(regs);
112 	else
113 		exit_to_kernel_mode(regs);
114 }
115 
116 #ifdef CONFIG_ARM64_ERRATUM_1463225
117 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
118 
119 static void cortex_a76_erratum_1463225_svc_handler(void)
120 {
121 	u32 reg, val;
122 
123 	if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
124 		return;
125 
126 	if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
127 		return;
128 
129 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
130 	reg = read_sysreg(mdscr_el1);
131 	val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
132 	write_sysreg(val, mdscr_el1);
133 	asm volatile("msr daifclr, #8");
134 	isb();
135 
136 	/* We will have taken a single-step exception by this point */
137 
138 	write_sysreg(reg, mdscr_el1);
139 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
140 }
141 
142 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
143 {
144 	if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
145 		return false;
146 
147 	/*
148 	 * We've taken a dummy step exception from the kernel to ensure
149 	 * that interrupts are re-enabled on the syscall path. Return back
150 	 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
151 	 * masked so that we can safely restore the mdscr and get on with
152 	 * handling the syscall.
153 	 */
154 	regs->pstate |= PSR_D_BIT;
155 	return true;
156 }
157 #else /* CONFIG_ARM64_ERRATUM_1463225 */
158 static void cortex_a76_erratum_1463225_svc_handler(void) { }
159 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
160 {
161 	return false;
162 }
163 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
164 
165 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
166 {
167 	unsigned long far = read_sysreg(far_el1);
168 
169 	enter_from_kernel_mode(regs);
170 	local_daif_inherit(regs);
171 	do_mem_abort(far, esr, regs);
172 	local_daif_mask();
173 	exit_to_kernel_mode(regs);
174 }
175 
176 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
177 {
178 	unsigned long far = read_sysreg(far_el1);
179 
180 	enter_from_kernel_mode(regs);
181 	local_daif_inherit(regs);
182 	do_sp_pc_abort(far, esr, regs);
183 	local_daif_mask();
184 	exit_to_kernel_mode(regs);
185 }
186 
187 static void noinstr el1_undef(struct pt_regs *regs)
188 {
189 	enter_from_kernel_mode(regs);
190 	local_daif_inherit(regs);
191 	do_undefinstr(regs);
192 	local_daif_mask();
193 	exit_to_kernel_mode(regs);
194 }
195 
196 static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
197 {
198 	enter_from_kernel_mode(regs);
199 	local_daif_inherit(regs);
200 	bad_mode(regs, 0, esr);
201 	local_daif_mask();
202 	exit_to_kernel_mode(regs);
203 }
204 
205 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
206 {
207 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
208 
209 	lockdep_hardirqs_off(CALLER_ADDR0);
210 	rcu_nmi_enter();
211 
212 	trace_hardirqs_off_finish();
213 }
214 
215 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
216 {
217 	bool restore = regs->lockdep_hardirqs;
218 
219 	if (restore) {
220 		trace_hardirqs_on_prepare();
221 		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
222 	}
223 
224 	rcu_nmi_exit();
225 	if (restore)
226 		lockdep_hardirqs_on(CALLER_ADDR0);
227 }
228 
229 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
230 {
231 	unsigned long far = read_sysreg(far_el1);
232 
233 	/*
234 	 * The CPU masked interrupts, and we are leaving them masked during
235 	 * do_debug_exception(). Update PMR as if we had called
236 	 * local_daif_mask().
237 	 */
238 	if (system_uses_irq_prio_masking())
239 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
240 
241 	arm64_enter_el1_dbg(regs);
242 	if (!cortex_a76_erratum_1463225_debug_handler(regs))
243 		do_debug_exception(far, esr, regs);
244 	arm64_exit_el1_dbg(regs);
245 }
246 
247 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
248 {
249 	enter_from_kernel_mode(regs);
250 	local_daif_inherit(regs);
251 	do_ptrauth_fault(regs, esr);
252 	local_daif_mask();
253 	exit_to_kernel_mode(regs);
254 }
255 
256 asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
257 {
258 	unsigned long esr = read_sysreg(esr_el1);
259 
260 	switch (ESR_ELx_EC(esr)) {
261 	case ESR_ELx_EC_DABT_CUR:
262 	case ESR_ELx_EC_IABT_CUR:
263 		el1_abort(regs, esr);
264 		break;
265 	/*
266 	 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
267 	 * recursive exception when trying to push the initial pt_regs.
268 	 */
269 	case ESR_ELx_EC_PC_ALIGN:
270 		el1_pc(regs, esr);
271 		break;
272 	case ESR_ELx_EC_SYS64:
273 	case ESR_ELx_EC_UNKNOWN:
274 		el1_undef(regs);
275 		break;
276 	case ESR_ELx_EC_BREAKPT_CUR:
277 	case ESR_ELx_EC_SOFTSTP_CUR:
278 	case ESR_ELx_EC_WATCHPT_CUR:
279 	case ESR_ELx_EC_BRK64:
280 		el1_dbg(regs, esr);
281 		break;
282 	case ESR_ELx_EC_FPAC:
283 		el1_fpac(regs, esr);
284 		break;
285 	default:
286 		el1_inv(regs, esr);
287 	}
288 }
289 
290 asmlinkage void noinstr enter_from_user_mode(void)
291 {
292 	lockdep_hardirqs_off(CALLER_ADDR0);
293 	CT_WARN_ON(ct_state() != CONTEXT_USER);
294 	user_exit_irqoff();
295 	trace_hardirqs_off_finish();
296 }
297 
298 asmlinkage void noinstr exit_to_user_mode(void)
299 {
300 	mte_check_tfsr_exit();
301 
302 	trace_hardirqs_on_prepare();
303 	lockdep_hardirqs_on_prepare(CALLER_ADDR0);
304 	user_enter_irqoff();
305 	lockdep_hardirqs_on(CALLER_ADDR0);
306 }
307 
308 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
309 {
310 	unsigned long far = read_sysreg(far_el1);
311 
312 	enter_from_user_mode();
313 	local_daif_restore(DAIF_PROCCTX);
314 	do_mem_abort(far, esr, regs);
315 }
316 
317 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
318 {
319 	unsigned long far = read_sysreg(far_el1);
320 
321 	/*
322 	 * We've taken an instruction abort from userspace and not yet
323 	 * re-enabled IRQs. If the address is a kernel address, apply
324 	 * BP hardening prior to enabling IRQs and pre-emption.
325 	 */
326 	if (!is_ttbr0_addr(far))
327 		arm64_apply_bp_hardening();
328 
329 	enter_from_user_mode();
330 	local_daif_restore(DAIF_PROCCTX);
331 	do_mem_abort(far, esr, regs);
332 }
333 
334 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
335 {
336 	enter_from_user_mode();
337 	local_daif_restore(DAIF_PROCCTX);
338 	do_fpsimd_acc(esr, regs);
339 }
340 
341 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
342 {
343 	enter_from_user_mode();
344 	local_daif_restore(DAIF_PROCCTX);
345 	do_sve_acc(esr, regs);
346 }
347 
348 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
349 {
350 	enter_from_user_mode();
351 	local_daif_restore(DAIF_PROCCTX);
352 	do_fpsimd_exc(esr, regs);
353 }
354 
355 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
356 {
357 	enter_from_user_mode();
358 	local_daif_restore(DAIF_PROCCTX);
359 	do_sysinstr(esr, regs);
360 }
361 
362 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
363 {
364 	unsigned long far = read_sysreg(far_el1);
365 
366 	if (!is_ttbr0_addr(instruction_pointer(regs)))
367 		arm64_apply_bp_hardening();
368 
369 	enter_from_user_mode();
370 	local_daif_restore(DAIF_PROCCTX);
371 	do_sp_pc_abort(far, esr, regs);
372 }
373 
374 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
375 {
376 	enter_from_user_mode();
377 	local_daif_restore(DAIF_PROCCTX);
378 	do_sp_pc_abort(regs->sp, esr, regs);
379 }
380 
381 static void noinstr el0_undef(struct pt_regs *regs)
382 {
383 	enter_from_user_mode();
384 	local_daif_restore(DAIF_PROCCTX);
385 	do_undefinstr(regs);
386 }
387 
388 static void noinstr el0_bti(struct pt_regs *regs)
389 {
390 	enter_from_user_mode();
391 	local_daif_restore(DAIF_PROCCTX);
392 	do_bti(regs);
393 }
394 
395 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
396 {
397 	enter_from_user_mode();
398 	local_daif_restore(DAIF_PROCCTX);
399 	bad_el0_sync(regs, 0, esr);
400 }
401 
402 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
403 {
404 	/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
405 	unsigned long far = read_sysreg(far_el1);
406 
407 	if (system_uses_irq_prio_masking())
408 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
409 
410 	enter_from_user_mode();
411 	do_debug_exception(far, esr, regs);
412 	local_daif_restore(DAIF_PROCCTX_NOIRQ);
413 }
414 
415 static void noinstr el0_svc(struct pt_regs *regs)
416 {
417 	if (system_uses_irq_prio_masking())
418 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
419 
420 	enter_from_user_mode();
421 	cortex_a76_erratum_1463225_svc_handler();
422 	do_el0_svc(regs);
423 }
424 
425 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
426 {
427 	enter_from_user_mode();
428 	local_daif_restore(DAIF_PROCCTX);
429 	do_ptrauth_fault(regs, esr);
430 }
431 
432 asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
433 {
434 	unsigned long esr = read_sysreg(esr_el1);
435 
436 	switch (ESR_ELx_EC(esr)) {
437 	case ESR_ELx_EC_SVC64:
438 		el0_svc(regs);
439 		break;
440 	case ESR_ELx_EC_DABT_LOW:
441 		el0_da(regs, esr);
442 		break;
443 	case ESR_ELx_EC_IABT_LOW:
444 		el0_ia(regs, esr);
445 		break;
446 	case ESR_ELx_EC_FP_ASIMD:
447 		el0_fpsimd_acc(regs, esr);
448 		break;
449 	case ESR_ELx_EC_SVE:
450 		el0_sve_acc(regs, esr);
451 		break;
452 	case ESR_ELx_EC_FP_EXC64:
453 		el0_fpsimd_exc(regs, esr);
454 		break;
455 	case ESR_ELx_EC_SYS64:
456 	case ESR_ELx_EC_WFx:
457 		el0_sys(regs, esr);
458 		break;
459 	case ESR_ELx_EC_SP_ALIGN:
460 		el0_sp(regs, esr);
461 		break;
462 	case ESR_ELx_EC_PC_ALIGN:
463 		el0_pc(regs, esr);
464 		break;
465 	case ESR_ELx_EC_UNKNOWN:
466 		el0_undef(regs);
467 		break;
468 	case ESR_ELx_EC_BTI:
469 		el0_bti(regs);
470 		break;
471 	case ESR_ELx_EC_BREAKPT_LOW:
472 	case ESR_ELx_EC_SOFTSTP_LOW:
473 	case ESR_ELx_EC_WATCHPT_LOW:
474 	case ESR_ELx_EC_BRK64:
475 		el0_dbg(regs, esr);
476 		break;
477 	case ESR_ELx_EC_FPAC:
478 		el0_fpac(regs, esr);
479 		break;
480 	default:
481 		el0_inv(regs, esr);
482 	}
483 }
484 
485 #ifdef CONFIG_COMPAT
486 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
487 {
488 	enter_from_user_mode();
489 	local_daif_restore(DAIF_PROCCTX);
490 	do_cp15instr(esr, regs);
491 }
492 
493 static void noinstr el0_svc_compat(struct pt_regs *regs)
494 {
495 	if (system_uses_irq_prio_masking())
496 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
497 
498 	enter_from_user_mode();
499 	cortex_a76_erratum_1463225_svc_handler();
500 	do_el0_svc_compat(regs);
501 }
502 
503 asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
504 {
505 	unsigned long esr = read_sysreg(esr_el1);
506 
507 	switch (ESR_ELx_EC(esr)) {
508 	case ESR_ELx_EC_SVC32:
509 		el0_svc_compat(regs);
510 		break;
511 	case ESR_ELx_EC_DABT_LOW:
512 		el0_da(regs, esr);
513 		break;
514 	case ESR_ELx_EC_IABT_LOW:
515 		el0_ia(regs, esr);
516 		break;
517 	case ESR_ELx_EC_FP_ASIMD:
518 		el0_fpsimd_acc(regs, esr);
519 		break;
520 	case ESR_ELx_EC_FP_EXC32:
521 		el0_fpsimd_exc(regs, esr);
522 		break;
523 	case ESR_ELx_EC_PC_ALIGN:
524 		el0_pc(regs, esr);
525 		break;
526 	case ESR_ELx_EC_UNKNOWN:
527 	case ESR_ELx_EC_CP14_MR:
528 	case ESR_ELx_EC_CP14_LS:
529 	case ESR_ELx_EC_CP14_64:
530 		el0_undef(regs);
531 		break;
532 	case ESR_ELx_EC_CP15_32:
533 	case ESR_ELx_EC_CP15_64:
534 		el0_cp15(regs, esr);
535 		break;
536 	case ESR_ELx_EC_BREAKPT_LOW:
537 	case ESR_ELx_EC_SOFTSTP_LOW:
538 	case ESR_ELx_EC_WATCHPT_LOW:
539 	case ESR_ELx_EC_BKPT32:
540 		el0_dbg(regs, esr);
541 		break;
542 	default:
543 		el0_inv(regs, esr);
544 	}
545 }
546 #endif /* CONFIG_COMPAT */
547