xref: /openbmc/linux/arch/arm64/kernel/entry-common.c (revision ca48739e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Exception handling code
4  *
5  * Copyright (C) 2019 ARM Ltd.
6  */
7 
8 #include <linux/context_tracking.h>
9 #include <linux/ptrace.h>
10 #include <linux/thread_info.h>
11 
12 #include <asm/cpufeature.h>
13 #include <asm/daifflags.h>
14 #include <asm/esr.h>
15 #include <asm/exception.h>
16 #include <asm/kprobes.h>
17 #include <asm/mmu.h>
18 #include <asm/sysreg.h>
19 
20 /*
21  * This is intended to match the logic in irqentry_enter(), handling the kernel
22  * mode transitions only.
23  */
24 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
25 {
26 	regs->exit_rcu = false;
27 
28 	if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
29 		lockdep_hardirqs_off(CALLER_ADDR0);
30 		rcu_irq_enter();
31 		trace_hardirqs_off_finish();
32 
33 		regs->exit_rcu = true;
34 		return;
35 	}
36 
37 	lockdep_hardirqs_off(CALLER_ADDR0);
38 	rcu_irq_enter_check_tick();
39 	trace_hardirqs_off_finish();
40 }
41 
42 /*
43  * This is intended to match the logic in irqentry_exit(), handling the kernel
44  * mode transitions only, and with preemption handled elsewhere.
45  */
46 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
47 {
48 	lockdep_assert_irqs_disabled();
49 
50 	if (interrupts_enabled(regs)) {
51 		if (regs->exit_rcu) {
52 			trace_hardirqs_on_prepare();
53 			lockdep_hardirqs_on_prepare(CALLER_ADDR0);
54 			rcu_irq_exit();
55 			lockdep_hardirqs_on(CALLER_ADDR0);
56 			return;
57 		}
58 
59 		trace_hardirqs_on();
60 	} else {
61 		if (regs->exit_rcu)
62 			rcu_irq_exit();
63 	}
64 }
65 
66 void noinstr arm64_enter_nmi(struct pt_regs *regs)
67 {
68 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
69 
70 	__nmi_enter();
71 	lockdep_hardirqs_off(CALLER_ADDR0);
72 	lockdep_hardirq_enter();
73 	rcu_nmi_enter();
74 
75 	trace_hardirqs_off_finish();
76 	ftrace_nmi_enter();
77 }
78 
79 void noinstr arm64_exit_nmi(struct pt_regs *regs)
80 {
81 	bool restore = regs->lockdep_hardirqs;
82 
83 	ftrace_nmi_exit();
84 	if (restore) {
85 		trace_hardirqs_on_prepare();
86 		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
87 	}
88 
89 	rcu_nmi_exit();
90 	lockdep_hardirq_exit();
91 	if (restore)
92 		lockdep_hardirqs_on(CALLER_ADDR0);
93 	__nmi_exit();
94 }
95 
96 asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
97 {
98 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
99 		arm64_enter_nmi(regs);
100 	else
101 		enter_from_kernel_mode(regs);
102 }
103 
104 asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
105 {
106 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
107 		arm64_exit_nmi(regs);
108 	else
109 		exit_to_kernel_mode(regs);
110 }
111 
112 #ifdef CONFIG_ARM64_ERRATUM_1463225
113 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
114 
115 static void cortex_a76_erratum_1463225_svc_handler(void)
116 {
117 	u32 reg, val;
118 
119 	if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
120 		return;
121 
122 	if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
123 		return;
124 
125 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
126 	reg = read_sysreg(mdscr_el1);
127 	val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
128 	write_sysreg(val, mdscr_el1);
129 	asm volatile("msr daifclr, #8");
130 	isb();
131 
132 	/* We will have taken a single-step exception by this point */
133 
134 	write_sysreg(reg, mdscr_el1);
135 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
136 }
137 
138 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
139 {
140 	if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
141 		return false;
142 
143 	/*
144 	 * We've taken a dummy step exception from the kernel to ensure
145 	 * that interrupts are re-enabled on the syscall path. Return back
146 	 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
147 	 * masked so that we can safely restore the mdscr and get on with
148 	 * handling the syscall.
149 	 */
150 	regs->pstate |= PSR_D_BIT;
151 	return true;
152 }
153 #else /* CONFIG_ARM64_ERRATUM_1463225 */
154 static void cortex_a76_erratum_1463225_svc_handler(void) { }
155 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
156 {
157 	return false;
158 }
159 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
160 
161 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
162 {
163 	unsigned long far = read_sysreg(far_el1);
164 
165 	enter_from_kernel_mode(regs);
166 	local_daif_inherit(regs);
167 	do_mem_abort(far, esr, regs);
168 	local_daif_mask();
169 	exit_to_kernel_mode(regs);
170 }
171 
172 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
173 {
174 	unsigned long far = read_sysreg(far_el1);
175 
176 	enter_from_kernel_mode(regs);
177 	local_daif_inherit(regs);
178 	do_sp_pc_abort(far, esr, regs);
179 	local_daif_mask();
180 	exit_to_kernel_mode(regs);
181 }
182 
183 static void noinstr el1_undef(struct pt_regs *regs)
184 {
185 	enter_from_kernel_mode(regs);
186 	local_daif_inherit(regs);
187 	do_undefinstr(regs);
188 	local_daif_mask();
189 	exit_to_kernel_mode(regs);
190 }
191 
192 static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
193 {
194 	enter_from_kernel_mode(regs);
195 	local_daif_inherit(regs);
196 	bad_mode(regs, 0, esr);
197 	local_daif_mask();
198 	exit_to_kernel_mode(regs);
199 }
200 
201 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
202 {
203 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
204 
205 	lockdep_hardirqs_off(CALLER_ADDR0);
206 	rcu_nmi_enter();
207 
208 	trace_hardirqs_off_finish();
209 }
210 
211 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
212 {
213 	bool restore = regs->lockdep_hardirqs;
214 
215 	if (restore) {
216 		trace_hardirqs_on_prepare();
217 		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
218 	}
219 
220 	rcu_nmi_exit();
221 	if (restore)
222 		lockdep_hardirqs_on(CALLER_ADDR0);
223 }
224 
225 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
226 {
227 	unsigned long far = read_sysreg(far_el1);
228 
229 	/*
230 	 * The CPU masked interrupts, and we are leaving them masked during
231 	 * do_debug_exception(). Update PMR as if we had called
232 	 * local_daif_mask().
233 	 */
234 	if (system_uses_irq_prio_masking())
235 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
236 
237 	arm64_enter_el1_dbg(regs);
238 	if (!cortex_a76_erratum_1463225_debug_handler(regs))
239 		do_debug_exception(far, esr, regs);
240 	arm64_exit_el1_dbg(regs);
241 }
242 
243 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
244 {
245 	enter_from_kernel_mode(regs);
246 	local_daif_inherit(regs);
247 	do_ptrauth_fault(regs, esr);
248 	local_daif_mask();
249 	exit_to_kernel_mode(regs);
250 }
251 
252 asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
253 {
254 	unsigned long esr = read_sysreg(esr_el1);
255 
256 	switch (ESR_ELx_EC(esr)) {
257 	case ESR_ELx_EC_DABT_CUR:
258 	case ESR_ELx_EC_IABT_CUR:
259 		el1_abort(regs, esr);
260 		break;
261 	/*
262 	 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
263 	 * recursive exception when trying to push the initial pt_regs.
264 	 */
265 	case ESR_ELx_EC_PC_ALIGN:
266 		el1_pc(regs, esr);
267 		break;
268 	case ESR_ELx_EC_SYS64:
269 	case ESR_ELx_EC_UNKNOWN:
270 		el1_undef(regs);
271 		break;
272 	case ESR_ELx_EC_BREAKPT_CUR:
273 	case ESR_ELx_EC_SOFTSTP_CUR:
274 	case ESR_ELx_EC_WATCHPT_CUR:
275 	case ESR_ELx_EC_BRK64:
276 		el1_dbg(regs, esr);
277 		break;
278 	case ESR_ELx_EC_FPAC:
279 		el1_fpac(regs, esr);
280 		break;
281 	default:
282 		el1_inv(regs, esr);
283 	}
284 }
285 
286 asmlinkage void noinstr enter_from_user_mode(void)
287 {
288 	lockdep_hardirqs_off(CALLER_ADDR0);
289 	CT_WARN_ON(ct_state() != CONTEXT_USER);
290 	user_exit_irqoff();
291 	trace_hardirqs_off_finish();
292 }
293 
294 asmlinkage void noinstr exit_to_user_mode(void)
295 {
296 	trace_hardirqs_on_prepare();
297 	lockdep_hardirqs_on_prepare(CALLER_ADDR0);
298 	user_enter_irqoff();
299 	lockdep_hardirqs_on(CALLER_ADDR0);
300 }
301 
302 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
303 {
304 	unsigned long far = read_sysreg(far_el1);
305 
306 	enter_from_user_mode();
307 	local_daif_restore(DAIF_PROCCTX);
308 	do_mem_abort(far, esr, regs);
309 }
310 
311 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
312 {
313 	unsigned long far = read_sysreg(far_el1);
314 
315 	/*
316 	 * We've taken an instruction abort from userspace and not yet
317 	 * re-enabled IRQs. If the address is a kernel address, apply
318 	 * BP hardening prior to enabling IRQs and pre-emption.
319 	 */
320 	if (!is_ttbr0_addr(far))
321 		arm64_apply_bp_hardening();
322 
323 	enter_from_user_mode();
324 	local_daif_restore(DAIF_PROCCTX);
325 	do_mem_abort(far, esr, regs);
326 }
327 
328 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
329 {
330 	enter_from_user_mode();
331 	local_daif_restore(DAIF_PROCCTX);
332 	do_fpsimd_acc(esr, regs);
333 }
334 
335 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
336 {
337 	enter_from_user_mode();
338 	local_daif_restore(DAIF_PROCCTX);
339 	do_sve_acc(esr, regs);
340 }
341 
342 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
343 {
344 	enter_from_user_mode();
345 	local_daif_restore(DAIF_PROCCTX);
346 	do_fpsimd_exc(esr, regs);
347 }
348 
349 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
350 {
351 	enter_from_user_mode();
352 	local_daif_restore(DAIF_PROCCTX);
353 	do_sysinstr(esr, regs);
354 }
355 
356 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
357 {
358 	unsigned long far = read_sysreg(far_el1);
359 
360 	if (!is_ttbr0_addr(instruction_pointer(regs)))
361 		arm64_apply_bp_hardening();
362 
363 	enter_from_user_mode();
364 	local_daif_restore(DAIF_PROCCTX);
365 	do_sp_pc_abort(far, esr, regs);
366 }
367 
368 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
369 {
370 	enter_from_user_mode();
371 	local_daif_restore(DAIF_PROCCTX);
372 	do_sp_pc_abort(regs->sp, esr, regs);
373 }
374 
375 static void noinstr el0_undef(struct pt_regs *regs)
376 {
377 	enter_from_user_mode();
378 	local_daif_restore(DAIF_PROCCTX);
379 	do_undefinstr(regs);
380 }
381 
382 static void noinstr el0_bti(struct pt_regs *regs)
383 {
384 	enter_from_user_mode();
385 	local_daif_restore(DAIF_PROCCTX);
386 	do_bti(regs);
387 }
388 
389 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
390 {
391 	enter_from_user_mode();
392 	local_daif_restore(DAIF_PROCCTX);
393 	bad_el0_sync(regs, 0, esr);
394 }
395 
396 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
397 {
398 	/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
399 	unsigned long far = read_sysreg(far_el1);
400 
401 	if (system_uses_irq_prio_masking())
402 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
403 
404 	enter_from_user_mode();
405 	do_debug_exception(far, esr, regs);
406 	local_daif_restore(DAIF_PROCCTX_NOIRQ);
407 }
408 
409 static void noinstr el0_svc(struct pt_regs *regs)
410 {
411 	if (system_uses_irq_prio_masking())
412 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
413 
414 	enter_from_user_mode();
415 	cortex_a76_erratum_1463225_svc_handler();
416 	do_el0_svc(regs);
417 }
418 
419 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
420 {
421 	enter_from_user_mode();
422 	local_daif_restore(DAIF_PROCCTX);
423 	do_ptrauth_fault(regs, esr);
424 }
425 
426 asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
427 {
428 	unsigned long esr = read_sysreg(esr_el1);
429 
430 	switch (ESR_ELx_EC(esr)) {
431 	case ESR_ELx_EC_SVC64:
432 		el0_svc(regs);
433 		break;
434 	case ESR_ELx_EC_DABT_LOW:
435 		el0_da(regs, esr);
436 		break;
437 	case ESR_ELx_EC_IABT_LOW:
438 		el0_ia(regs, esr);
439 		break;
440 	case ESR_ELx_EC_FP_ASIMD:
441 		el0_fpsimd_acc(regs, esr);
442 		break;
443 	case ESR_ELx_EC_SVE:
444 		el0_sve_acc(regs, esr);
445 		break;
446 	case ESR_ELx_EC_FP_EXC64:
447 		el0_fpsimd_exc(regs, esr);
448 		break;
449 	case ESR_ELx_EC_SYS64:
450 	case ESR_ELx_EC_WFx:
451 		el0_sys(regs, esr);
452 		break;
453 	case ESR_ELx_EC_SP_ALIGN:
454 		el0_sp(regs, esr);
455 		break;
456 	case ESR_ELx_EC_PC_ALIGN:
457 		el0_pc(regs, esr);
458 		break;
459 	case ESR_ELx_EC_UNKNOWN:
460 		el0_undef(regs);
461 		break;
462 	case ESR_ELx_EC_BTI:
463 		el0_bti(regs);
464 		break;
465 	case ESR_ELx_EC_BREAKPT_LOW:
466 	case ESR_ELx_EC_SOFTSTP_LOW:
467 	case ESR_ELx_EC_WATCHPT_LOW:
468 	case ESR_ELx_EC_BRK64:
469 		el0_dbg(regs, esr);
470 		break;
471 	case ESR_ELx_EC_FPAC:
472 		el0_fpac(regs, esr);
473 		break;
474 	default:
475 		el0_inv(regs, esr);
476 	}
477 }
478 
479 #ifdef CONFIG_COMPAT
480 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
481 {
482 	enter_from_user_mode();
483 	local_daif_restore(DAIF_PROCCTX);
484 	do_cp15instr(esr, regs);
485 }
486 
487 static void noinstr el0_svc_compat(struct pt_regs *regs)
488 {
489 	if (system_uses_irq_prio_masking())
490 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
491 
492 	enter_from_user_mode();
493 	cortex_a76_erratum_1463225_svc_handler();
494 	do_el0_svc_compat(regs);
495 }
496 
497 asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
498 {
499 	unsigned long esr = read_sysreg(esr_el1);
500 
501 	switch (ESR_ELx_EC(esr)) {
502 	case ESR_ELx_EC_SVC32:
503 		el0_svc_compat(regs);
504 		break;
505 	case ESR_ELx_EC_DABT_LOW:
506 		el0_da(regs, esr);
507 		break;
508 	case ESR_ELx_EC_IABT_LOW:
509 		el0_ia(regs, esr);
510 		break;
511 	case ESR_ELx_EC_FP_ASIMD:
512 		el0_fpsimd_acc(regs, esr);
513 		break;
514 	case ESR_ELx_EC_FP_EXC32:
515 		el0_fpsimd_exc(regs, esr);
516 		break;
517 	case ESR_ELx_EC_PC_ALIGN:
518 		el0_pc(regs, esr);
519 		break;
520 	case ESR_ELx_EC_UNKNOWN:
521 	case ESR_ELx_EC_CP14_MR:
522 	case ESR_ELx_EC_CP14_LS:
523 	case ESR_ELx_EC_CP14_64:
524 		el0_undef(regs);
525 		break;
526 	case ESR_ELx_EC_CP15_32:
527 	case ESR_ELx_EC_CP15_64:
528 		el0_cp15(regs, esr);
529 		break;
530 	case ESR_ELx_EC_BREAKPT_LOW:
531 	case ESR_ELx_EC_SOFTSTP_LOW:
532 	case ESR_ELx_EC_WATCHPT_LOW:
533 	case ESR_ELx_EC_BKPT32:
534 		el0_dbg(regs, esr);
535 		break;
536 	default:
537 		el0_inv(regs, esr);
538 	}
539 }
540 #endif /* CONFIG_COMPAT */
541