xref: /openbmc/linux/arch/arm64/kernel/entry-common.c (revision 276e552e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Exception handling code
4  *
5  * Copyright (C) 2019 ARM Ltd.
6  */
7 
8 #include <linux/context_tracking.h>
9 #include <linux/ptrace.h>
10 #include <linux/thread_info.h>
11 
12 #include <asm/cpufeature.h>
13 #include <asm/daifflags.h>
14 #include <asm/esr.h>
15 #include <asm/exception.h>
16 #include <asm/kprobes.h>
17 #include <asm/mmu.h>
18 #include <asm/sysreg.h>
19 
20 /*
21  * This is intended to match the logic in irqentry_enter(), handling the kernel
22  * mode transitions only.
23  */
24 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
25 {
26 	regs->exit_rcu = false;
27 
28 	if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
29 		lockdep_hardirqs_off(CALLER_ADDR0);
30 		rcu_irq_enter();
31 		trace_hardirqs_off_finish();
32 
33 		regs->exit_rcu = true;
34 		return;
35 	}
36 
37 	lockdep_hardirqs_off(CALLER_ADDR0);
38 	rcu_irq_enter_check_tick();
39 	trace_hardirqs_off_finish();
40 
41 	mte_check_tfsr_entry();
42 }
43 
44 /*
45  * This is intended to match the logic in irqentry_exit(), handling the kernel
46  * mode transitions only, and with preemption handled elsewhere.
47  */
48 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
49 {
50 	lockdep_assert_irqs_disabled();
51 
52 	mte_check_tfsr_exit();
53 
54 	if (interrupts_enabled(regs)) {
55 		if (regs->exit_rcu) {
56 			trace_hardirqs_on_prepare();
57 			lockdep_hardirqs_on_prepare(CALLER_ADDR0);
58 			rcu_irq_exit();
59 			lockdep_hardirqs_on(CALLER_ADDR0);
60 			return;
61 		}
62 
63 		trace_hardirqs_on();
64 	} else {
65 		if (regs->exit_rcu)
66 			rcu_irq_exit();
67 	}
68 }
69 
70 void noinstr arm64_enter_nmi(struct pt_regs *regs)
71 {
72 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
73 
74 	__nmi_enter();
75 	lockdep_hardirqs_off(CALLER_ADDR0);
76 	lockdep_hardirq_enter();
77 	rcu_nmi_enter();
78 
79 	trace_hardirqs_off_finish();
80 	ftrace_nmi_enter();
81 }
82 
83 void noinstr arm64_exit_nmi(struct pt_regs *regs)
84 {
85 	bool restore = regs->lockdep_hardirqs;
86 
87 	ftrace_nmi_exit();
88 	if (restore) {
89 		trace_hardirqs_on_prepare();
90 		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
91 	}
92 
93 	rcu_nmi_exit();
94 	lockdep_hardirq_exit();
95 	if (restore)
96 		lockdep_hardirqs_on(CALLER_ADDR0);
97 	__nmi_exit();
98 }
99 
100 asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
101 {
102 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
103 		arm64_enter_nmi(regs);
104 	else
105 		enter_from_kernel_mode(regs);
106 }
107 
108 asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
109 {
110 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
111 		arm64_exit_nmi(regs);
112 	else
113 		exit_to_kernel_mode(regs);
114 }
115 
116 #ifdef CONFIG_ARM64_ERRATUM_1463225
117 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
118 
119 static void cortex_a76_erratum_1463225_svc_handler(void)
120 {
121 	u32 reg, val;
122 
123 	if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
124 		return;
125 
126 	if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
127 		return;
128 
129 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
130 	reg = read_sysreg(mdscr_el1);
131 	val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
132 	write_sysreg(val, mdscr_el1);
133 	asm volatile("msr daifclr, #8");
134 	isb();
135 
136 	/* We will have taken a single-step exception by this point */
137 
138 	write_sysreg(reg, mdscr_el1);
139 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
140 }
141 
142 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
143 {
144 	if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
145 		return false;
146 
147 	/*
148 	 * We've taken a dummy step exception from the kernel to ensure
149 	 * that interrupts are re-enabled on the syscall path. Return back
150 	 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
151 	 * masked so that we can safely restore the mdscr and get on with
152 	 * handling the syscall.
153 	 */
154 	regs->pstate |= PSR_D_BIT;
155 	return true;
156 }
157 #else /* CONFIG_ARM64_ERRATUM_1463225 */
158 static void cortex_a76_erratum_1463225_svc_handler(void) { }
159 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
160 {
161 	return false;
162 }
163 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
164 
165 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
166 {
167 	unsigned long far = read_sysreg(far_el1);
168 
169 	enter_from_kernel_mode(regs);
170 	local_daif_inherit(regs);
171 	do_mem_abort(far, esr, regs);
172 	local_daif_mask();
173 	exit_to_kernel_mode(regs);
174 }
175 
176 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
177 {
178 	unsigned long far = read_sysreg(far_el1);
179 
180 	enter_from_kernel_mode(regs);
181 	local_daif_inherit(regs);
182 	do_sp_pc_abort(far, esr, regs);
183 	local_daif_mask();
184 	exit_to_kernel_mode(regs);
185 }
186 
187 static void noinstr el1_undef(struct pt_regs *regs)
188 {
189 	enter_from_kernel_mode(regs);
190 	local_daif_inherit(regs);
191 	do_undefinstr(regs);
192 	local_daif_mask();
193 	exit_to_kernel_mode(regs);
194 }
195 
196 static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
197 {
198 	enter_from_kernel_mode(regs);
199 	local_daif_inherit(regs);
200 	bad_mode(regs, 0, esr);
201 	local_daif_mask();
202 	exit_to_kernel_mode(regs);
203 }
204 
205 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
206 {
207 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
208 
209 	lockdep_hardirqs_off(CALLER_ADDR0);
210 	rcu_nmi_enter();
211 
212 	trace_hardirqs_off_finish();
213 }
214 
215 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
216 {
217 	bool restore = regs->lockdep_hardirqs;
218 
219 	if (restore) {
220 		trace_hardirqs_on_prepare();
221 		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
222 	}
223 
224 	rcu_nmi_exit();
225 	if (restore)
226 		lockdep_hardirqs_on(CALLER_ADDR0);
227 }
228 
229 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
230 {
231 	unsigned long far = read_sysreg(far_el1);
232 
233 	arm64_enter_el1_dbg(regs);
234 	if (!cortex_a76_erratum_1463225_debug_handler(regs))
235 		do_debug_exception(far, esr, regs);
236 	arm64_exit_el1_dbg(regs);
237 }
238 
239 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
240 {
241 	enter_from_kernel_mode(regs);
242 	local_daif_inherit(regs);
243 	do_ptrauth_fault(regs, esr);
244 	local_daif_mask();
245 	exit_to_kernel_mode(regs);
246 }
247 
248 asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
249 {
250 	unsigned long esr = read_sysreg(esr_el1);
251 
252 	switch (ESR_ELx_EC(esr)) {
253 	case ESR_ELx_EC_DABT_CUR:
254 	case ESR_ELx_EC_IABT_CUR:
255 		el1_abort(regs, esr);
256 		break;
257 	/*
258 	 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
259 	 * recursive exception when trying to push the initial pt_regs.
260 	 */
261 	case ESR_ELx_EC_PC_ALIGN:
262 		el1_pc(regs, esr);
263 		break;
264 	case ESR_ELx_EC_SYS64:
265 	case ESR_ELx_EC_UNKNOWN:
266 		el1_undef(regs);
267 		break;
268 	case ESR_ELx_EC_BREAKPT_CUR:
269 	case ESR_ELx_EC_SOFTSTP_CUR:
270 	case ESR_ELx_EC_WATCHPT_CUR:
271 	case ESR_ELx_EC_BRK64:
272 		el1_dbg(regs, esr);
273 		break;
274 	case ESR_ELx_EC_FPAC:
275 		el1_fpac(regs, esr);
276 		break;
277 	default:
278 		el1_inv(regs, esr);
279 	}
280 }
281 
282 asmlinkage void noinstr enter_from_user_mode(void)
283 {
284 	lockdep_hardirqs_off(CALLER_ADDR0);
285 	CT_WARN_ON(ct_state() != CONTEXT_USER);
286 	user_exit_irqoff();
287 	trace_hardirqs_off_finish();
288 }
289 
290 asmlinkage void noinstr exit_to_user_mode(void)
291 {
292 	mte_check_tfsr_exit();
293 
294 	trace_hardirqs_on_prepare();
295 	lockdep_hardirqs_on_prepare(CALLER_ADDR0);
296 	user_enter_irqoff();
297 	lockdep_hardirqs_on(CALLER_ADDR0);
298 }
299 
300 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
301 {
302 	unsigned long far = read_sysreg(far_el1);
303 
304 	enter_from_user_mode();
305 	local_daif_restore(DAIF_PROCCTX);
306 	do_mem_abort(far, esr, regs);
307 }
308 
309 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
310 {
311 	unsigned long far = read_sysreg(far_el1);
312 
313 	/*
314 	 * We've taken an instruction abort from userspace and not yet
315 	 * re-enabled IRQs. If the address is a kernel address, apply
316 	 * BP hardening prior to enabling IRQs and pre-emption.
317 	 */
318 	if (!is_ttbr0_addr(far))
319 		arm64_apply_bp_hardening();
320 
321 	enter_from_user_mode();
322 	local_daif_restore(DAIF_PROCCTX);
323 	do_mem_abort(far, esr, regs);
324 }
325 
326 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
327 {
328 	enter_from_user_mode();
329 	local_daif_restore(DAIF_PROCCTX);
330 	do_fpsimd_acc(esr, regs);
331 }
332 
333 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
334 {
335 	enter_from_user_mode();
336 	local_daif_restore(DAIF_PROCCTX);
337 	do_sve_acc(esr, regs);
338 }
339 
340 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
341 {
342 	enter_from_user_mode();
343 	local_daif_restore(DAIF_PROCCTX);
344 	do_fpsimd_exc(esr, regs);
345 }
346 
347 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
348 {
349 	enter_from_user_mode();
350 	local_daif_restore(DAIF_PROCCTX);
351 	do_sysinstr(esr, regs);
352 }
353 
354 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
355 {
356 	unsigned long far = read_sysreg(far_el1);
357 
358 	if (!is_ttbr0_addr(instruction_pointer(regs)))
359 		arm64_apply_bp_hardening();
360 
361 	enter_from_user_mode();
362 	local_daif_restore(DAIF_PROCCTX);
363 	do_sp_pc_abort(far, esr, regs);
364 }
365 
366 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
367 {
368 	enter_from_user_mode();
369 	local_daif_restore(DAIF_PROCCTX);
370 	do_sp_pc_abort(regs->sp, esr, regs);
371 }
372 
373 static void noinstr el0_undef(struct pt_regs *regs)
374 {
375 	enter_from_user_mode();
376 	local_daif_restore(DAIF_PROCCTX);
377 	do_undefinstr(regs);
378 }
379 
380 static void noinstr el0_bti(struct pt_regs *regs)
381 {
382 	enter_from_user_mode();
383 	local_daif_restore(DAIF_PROCCTX);
384 	do_bti(regs);
385 }
386 
387 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
388 {
389 	enter_from_user_mode();
390 	local_daif_restore(DAIF_PROCCTX);
391 	bad_el0_sync(regs, 0, esr);
392 }
393 
394 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
395 {
396 	/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
397 	unsigned long far = read_sysreg(far_el1);
398 
399 	enter_from_user_mode();
400 	do_debug_exception(far, esr, regs);
401 	local_daif_restore(DAIF_PROCCTX_NOIRQ);
402 }
403 
404 static void noinstr el0_svc(struct pt_regs *regs)
405 {
406 	enter_from_user_mode();
407 	cortex_a76_erratum_1463225_svc_handler();
408 	do_el0_svc(regs);
409 }
410 
411 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
412 {
413 	enter_from_user_mode();
414 	local_daif_restore(DAIF_PROCCTX);
415 	do_ptrauth_fault(regs, esr);
416 }
417 
418 asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
419 {
420 	unsigned long esr = read_sysreg(esr_el1);
421 
422 	switch (ESR_ELx_EC(esr)) {
423 	case ESR_ELx_EC_SVC64:
424 		el0_svc(regs);
425 		break;
426 	case ESR_ELx_EC_DABT_LOW:
427 		el0_da(regs, esr);
428 		break;
429 	case ESR_ELx_EC_IABT_LOW:
430 		el0_ia(regs, esr);
431 		break;
432 	case ESR_ELx_EC_FP_ASIMD:
433 		el0_fpsimd_acc(regs, esr);
434 		break;
435 	case ESR_ELx_EC_SVE:
436 		el0_sve_acc(regs, esr);
437 		break;
438 	case ESR_ELx_EC_FP_EXC64:
439 		el0_fpsimd_exc(regs, esr);
440 		break;
441 	case ESR_ELx_EC_SYS64:
442 	case ESR_ELx_EC_WFx:
443 		el0_sys(regs, esr);
444 		break;
445 	case ESR_ELx_EC_SP_ALIGN:
446 		el0_sp(regs, esr);
447 		break;
448 	case ESR_ELx_EC_PC_ALIGN:
449 		el0_pc(regs, esr);
450 		break;
451 	case ESR_ELx_EC_UNKNOWN:
452 		el0_undef(regs);
453 		break;
454 	case ESR_ELx_EC_BTI:
455 		el0_bti(regs);
456 		break;
457 	case ESR_ELx_EC_BREAKPT_LOW:
458 	case ESR_ELx_EC_SOFTSTP_LOW:
459 	case ESR_ELx_EC_WATCHPT_LOW:
460 	case ESR_ELx_EC_BRK64:
461 		el0_dbg(regs, esr);
462 		break;
463 	case ESR_ELx_EC_FPAC:
464 		el0_fpac(regs, esr);
465 		break;
466 	default:
467 		el0_inv(regs, esr);
468 	}
469 }
470 
471 #ifdef CONFIG_COMPAT
472 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
473 {
474 	enter_from_user_mode();
475 	local_daif_restore(DAIF_PROCCTX);
476 	do_cp15instr(esr, regs);
477 }
478 
479 static void noinstr el0_svc_compat(struct pt_regs *regs)
480 {
481 	enter_from_user_mode();
482 	cortex_a76_erratum_1463225_svc_handler();
483 	do_el0_svc_compat(regs);
484 }
485 
486 asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
487 {
488 	unsigned long esr = read_sysreg(esr_el1);
489 
490 	switch (ESR_ELx_EC(esr)) {
491 	case ESR_ELx_EC_SVC32:
492 		el0_svc_compat(regs);
493 		break;
494 	case ESR_ELx_EC_DABT_LOW:
495 		el0_da(regs, esr);
496 		break;
497 	case ESR_ELx_EC_IABT_LOW:
498 		el0_ia(regs, esr);
499 		break;
500 	case ESR_ELx_EC_FP_ASIMD:
501 		el0_fpsimd_acc(regs, esr);
502 		break;
503 	case ESR_ELx_EC_FP_EXC32:
504 		el0_fpsimd_exc(regs, esr);
505 		break;
506 	case ESR_ELx_EC_PC_ALIGN:
507 		el0_pc(regs, esr);
508 		break;
509 	case ESR_ELx_EC_UNKNOWN:
510 	case ESR_ELx_EC_CP14_MR:
511 	case ESR_ELx_EC_CP14_LS:
512 	case ESR_ELx_EC_CP14_64:
513 		el0_undef(regs);
514 		break;
515 	case ESR_ELx_EC_CP15_32:
516 	case ESR_ELx_EC_CP15_64:
517 		el0_cp15(regs, esr);
518 		break;
519 	case ESR_ELx_EC_BREAKPT_LOW:
520 	case ESR_ELx_EC_SOFTSTP_LOW:
521 	case ESR_ELx_EC_WATCHPT_LOW:
522 	case ESR_ELx_EC_BKPT32:
523 		el0_dbg(regs, esr);
524 		break;
525 	default:
526 		el0_inv(regs, esr);
527 	}
528 }
529 #endif /* CONFIG_COMPAT */
530