xref: /openbmc/linux/arch/arm64/kernel/entry-common.c (revision f5ad1c74)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Exception handling code
4  *
5  * Copyright (C) 2019 ARM Ltd.
6  */
7 
8 #include <linux/context_tracking.h>
9 #include <linux/ptrace.h>
10 #include <linux/thread_info.h>
11 
12 #include <asm/cpufeature.h>
13 #include <asm/daifflags.h>
14 #include <asm/esr.h>
15 #include <asm/exception.h>
16 #include <asm/kprobes.h>
17 #include <asm/mmu.h>
18 #include <asm/sysreg.h>
19 
20 /*
21  * This is intended to match the logic in irqentry_enter(), handling the kernel
22  * mode transitions only.
23  */
24 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
25 {
26 	regs->exit_rcu = false;
27 
28 	if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
29 		lockdep_hardirqs_off(CALLER_ADDR0);
30 		rcu_irq_enter();
31 		trace_hardirqs_off_finish();
32 
33 		regs->exit_rcu = true;
34 		return;
35 	}
36 
37 	lockdep_hardirqs_off(CALLER_ADDR0);
38 	rcu_irq_enter_check_tick();
39 	trace_hardirqs_off_finish();
40 }
41 
42 /*
43  * This is intended to match the logic in irqentry_exit(), handling the kernel
44  * mode transitions only, and with preemption handled elsewhere.
45  */
46 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
47 {
48 	lockdep_assert_irqs_disabled();
49 
50 	if (interrupts_enabled(regs)) {
51 		if (regs->exit_rcu) {
52 			trace_hardirqs_on_prepare();
53 			lockdep_hardirqs_on_prepare(CALLER_ADDR0);
54 			rcu_irq_exit();
55 			lockdep_hardirqs_on(CALLER_ADDR0);
56 			return;
57 		}
58 
59 		trace_hardirqs_on();
60 	} else {
61 		if (regs->exit_rcu)
62 			rcu_irq_exit();
63 	}
64 }
65 
66 void noinstr arm64_enter_nmi(struct pt_regs *regs)
67 {
68 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
69 
70 	__nmi_enter();
71 	lockdep_hardirqs_off(CALLER_ADDR0);
72 	lockdep_hardirq_enter();
73 	rcu_nmi_enter();
74 
75 	trace_hardirqs_off_finish();
76 	ftrace_nmi_enter();
77 }
78 
79 void noinstr arm64_exit_nmi(struct pt_regs *regs)
80 {
81 	bool restore = regs->lockdep_hardirqs;
82 
83 	ftrace_nmi_exit();
84 	if (restore) {
85 		trace_hardirqs_on_prepare();
86 		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
87 	}
88 
89 	rcu_nmi_exit();
90 	lockdep_hardirq_exit();
91 	if (restore)
92 		lockdep_hardirqs_on(CALLER_ADDR0);
93 	__nmi_exit();
94 }
95 
96 asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
97 {
98 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
99 		arm64_enter_nmi(regs);
100 	else
101 		enter_from_kernel_mode(regs);
102 }
103 
104 asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
105 {
106 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
107 		arm64_exit_nmi(regs);
108 	else
109 		exit_to_kernel_mode(regs);
110 }
111 
112 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
113 {
114 	unsigned long far = read_sysreg(far_el1);
115 
116 	enter_from_kernel_mode(regs);
117 	local_daif_inherit(regs);
118 	do_mem_abort(far, esr, regs);
119 	local_daif_mask();
120 	exit_to_kernel_mode(regs);
121 }
122 
123 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
124 {
125 	unsigned long far = read_sysreg(far_el1);
126 
127 	enter_from_kernel_mode(regs);
128 	local_daif_inherit(regs);
129 	do_sp_pc_abort(far, esr, regs);
130 	local_daif_mask();
131 	exit_to_kernel_mode(regs);
132 }
133 
134 static void noinstr el1_undef(struct pt_regs *regs)
135 {
136 	enter_from_kernel_mode(regs);
137 	local_daif_inherit(regs);
138 	do_undefinstr(regs);
139 	local_daif_mask();
140 	exit_to_kernel_mode(regs);
141 }
142 
143 static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
144 {
145 	enter_from_kernel_mode(regs);
146 	local_daif_inherit(regs);
147 	bad_mode(regs, 0, esr);
148 	local_daif_mask();
149 	exit_to_kernel_mode(regs);
150 }
151 
152 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
153 {
154 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
155 
156 	lockdep_hardirqs_off(CALLER_ADDR0);
157 	rcu_nmi_enter();
158 
159 	trace_hardirqs_off_finish();
160 }
161 
162 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
163 {
164 	bool restore = regs->lockdep_hardirqs;
165 
166 	if (restore) {
167 		trace_hardirqs_on_prepare();
168 		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
169 	}
170 
171 	rcu_nmi_exit();
172 	if (restore)
173 		lockdep_hardirqs_on(CALLER_ADDR0);
174 }
175 
176 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
177 {
178 	unsigned long far = read_sysreg(far_el1);
179 
180 	/*
181 	 * The CPU masked interrupts, and we are leaving them masked during
182 	 * do_debug_exception(). Update PMR as if we had called
183 	 * local_daif_mask().
184 	 */
185 	if (system_uses_irq_prio_masking())
186 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
187 
188 	arm64_enter_el1_dbg(regs);
189 	do_debug_exception(far, esr, regs);
190 	arm64_exit_el1_dbg(regs);
191 }
192 
193 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
194 {
195 	enter_from_kernel_mode(regs);
196 	local_daif_inherit(regs);
197 	do_ptrauth_fault(regs, esr);
198 	local_daif_mask();
199 	exit_to_kernel_mode(regs);
200 }
201 
202 asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
203 {
204 	unsigned long esr = read_sysreg(esr_el1);
205 
206 	switch (ESR_ELx_EC(esr)) {
207 	case ESR_ELx_EC_DABT_CUR:
208 	case ESR_ELx_EC_IABT_CUR:
209 		el1_abort(regs, esr);
210 		break;
211 	/*
212 	 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
213 	 * recursive exception when trying to push the initial pt_regs.
214 	 */
215 	case ESR_ELx_EC_PC_ALIGN:
216 		el1_pc(regs, esr);
217 		break;
218 	case ESR_ELx_EC_SYS64:
219 	case ESR_ELx_EC_UNKNOWN:
220 		el1_undef(regs);
221 		break;
222 	case ESR_ELx_EC_BREAKPT_CUR:
223 	case ESR_ELx_EC_SOFTSTP_CUR:
224 	case ESR_ELx_EC_WATCHPT_CUR:
225 	case ESR_ELx_EC_BRK64:
226 		el1_dbg(regs, esr);
227 		break;
228 	case ESR_ELx_EC_FPAC:
229 		el1_fpac(regs, esr);
230 		break;
231 	default:
232 		el1_inv(regs, esr);
233 	}
234 }
235 
236 asmlinkage void noinstr enter_from_user_mode(void)
237 {
238 	lockdep_hardirqs_off(CALLER_ADDR0);
239 	CT_WARN_ON(ct_state() != CONTEXT_USER);
240 	user_exit_irqoff();
241 	trace_hardirqs_off_finish();
242 }
243 
244 asmlinkage void noinstr exit_to_user_mode(void)
245 {
246 	trace_hardirqs_on_prepare();
247 	lockdep_hardirqs_on_prepare(CALLER_ADDR0);
248 	user_enter_irqoff();
249 	lockdep_hardirqs_on(CALLER_ADDR0);
250 }
251 
252 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
253 {
254 	unsigned long far = read_sysreg(far_el1);
255 
256 	enter_from_user_mode();
257 	local_daif_restore(DAIF_PROCCTX);
258 	do_mem_abort(far, esr, regs);
259 }
260 
261 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
262 {
263 	unsigned long far = read_sysreg(far_el1);
264 
265 	/*
266 	 * We've taken an instruction abort from userspace and not yet
267 	 * re-enabled IRQs. If the address is a kernel address, apply
268 	 * BP hardening prior to enabling IRQs and pre-emption.
269 	 */
270 	if (!is_ttbr0_addr(far))
271 		arm64_apply_bp_hardening();
272 
273 	enter_from_user_mode();
274 	local_daif_restore(DAIF_PROCCTX);
275 	do_mem_abort(far, esr, regs);
276 }
277 
278 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
279 {
280 	enter_from_user_mode();
281 	local_daif_restore(DAIF_PROCCTX);
282 	do_fpsimd_acc(esr, regs);
283 }
284 
285 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
286 {
287 	enter_from_user_mode();
288 	local_daif_restore(DAIF_PROCCTX);
289 	do_sve_acc(esr, regs);
290 }
291 
292 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
293 {
294 	enter_from_user_mode();
295 	local_daif_restore(DAIF_PROCCTX);
296 	do_fpsimd_exc(esr, regs);
297 }
298 
299 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
300 {
301 	enter_from_user_mode();
302 	local_daif_restore(DAIF_PROCCTX);
303 	do_sysinstr(esr, regs);
304 }
305 
306 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
307 {
308 	unsigned long far = read_sysreg(far_el1);
309 
310 	if (!is_ttbr0_addr(instruction_pointer(regs)))
311 		arm64_apply_bp_hardening();
312 
313 	enter_from_user_mode();
314 	local_daif_restore(DAIF_PROCCTX);
315 	do_sp_pc_abort(far, esr, regs);
316 }
317 
318 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
319 {
320 	enter_from_user_mode();
321 	local_daif_restore(DAIF_PROCCTX);
322 	do_sp_pc_abort(regs->sp, esr, regs);
323 }
324 
325 static void noinstr el0_undef(struct pt_regs *regs)
326 {
327 	enter_from_user_mode();
328 	local_daif_restore(DAIF_PROCCTX);
329 	do_undefinstr(regs);
330 }
331 
332 static void noinstr el0_bti(struct pt_regs *regs)
333 {
334 	enter_from_user_mode();
335 	local_daif_restore(DAIF_PROCCTX);
336 	do_bti(regs);
337 }
338 
339 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
340 {
341 	enter_from_user_mode();
342 	local_daif_restore(DAIF_PROCCTX);
343 	bad_el0_sync(regs, 0, esr);
344 }
345 
346 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
347 {
348 	/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
349 	unsigned long far = read_sysreg(far_el1);
350 
351 	if (system_uses_irq_prio_masking())
352 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
353 
354 	enter_from_user_mode();
355 	do_debug_exception(far, esr, regs);
356 	local_daif_restore(DAIF_PROCCTX_NOIRQ);
357 }
358 
359 static void noinstr el0_svc(struct pt_regs *regs)
360 {
361 	if (system_uses_irq_prio_masking())
362 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
363 
364 	enter_from_user_mode();
365 	do_el0_svc(regs);
366 }
367 
368 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
369 {
370 	enter_from_user_mode();
371 	local_daif_restore(DAIF_PROCCTX);
372 	do_ptrauth_fault(regs, esr);
373 }
374 
375 asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
376 {
377 	unsigned long esr = read_sysreg(esr_el1);
378 
379 	switch (ESR_ELx_EC(esr)) {
380 	case ESR_ELx_EC_SVC64:
381 		el0_svc(regs);
382 		break;
383 	case ESR_ELx_EC_DABT_LOW:
384 		el0_da(regs, esr);
385 		break;
386 	case ESR_ELx_EC_IABT_LOW:
387 		el0_ia(regs, esr);
388 		break;
389 	case ESR_ELx_EC_FP_ASIMD:
390 		el0_fpsimd_acc(regs, esr);
391 		break;
392 	case ESR_ELx_EC_SVE:
393 		el0_sve_acc(regs, esr);
394 		break;
395 	case ESR_ELx_EC_FP_EXC64:
396 		el0_fpsimd_exc(regs, esr);
397 		break;
398 	case ESR_ELx_EC_SYS64:
399 	case ESR_ELx_EC_WFx:
400 		el0_sys(regs, esr);
401 		break;
402 	case ESR_ELx_EC_SP_ALIGN:
403 		el0_sp(regs, esr);
404 		break;
405 	case ESR_ELx_EC_PC_ALIGN:
406 		el0_pc(regs, esr);
407 		break;
408 	case ESR_ELx_EC_UNKNOWN:
409 		el0_undef(regs);
410 		break;
411 	case ESR_ELx_EC_BTI:
412 		el0_bti(regs);
413 		break;
414 	case ESR_ELx_EC_BREAKPT_LOW:
415 	case ESR_ELx_EC_SOFTSTP_LOW:
416 	case ESR_ELx_EC_WATCHPT_LOW:
417 	case ESR_ELx_EC_BRK64:
418 		el0_dbg(regs, esr);
419 		break;
420 	case ESR_ELx_EC_FPAC:
421 		el0_fpac(regs, esr);
422 		break;
423 	default:
424 		el0_inv(regs, esr);
425 	}
426 }
427 
428 #ifdef CONFIG_COMPAT
429 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
430 {
431 	enter_from_user_mode();
432 	local_daif_restore(DAIF_PROCCTX);
433 	do_cp15instr(esr, regs);
434 }
435 
436 static void noinstr el0_svc_compat(struct pt_regs *regs)
437 {
438 	if (system_uses_irq_prio_masking())
439 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
440 
441 	enter_from_user_mode();
442 	do_el0_svc_compat(regs);
443 }
444 
445 asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
446 {
447 	unsigned long esr = read_sysreg(esr_el1);
448 
449 	switch (ESR_ELx_EC(esr)) {
450 	case ESR_ELx_EC_SVC32:
451 		el0_svc_compat(regs);
452 		break;
453 	case ESR_ELx_EC_DABT_LOW:
454 		el0_da(regs, esr);
455 		break;
456 	case ESR_ELx_EC_IABT_LOW:
457 		el0_ia(regs, esr);
458 		break;
459 	case ESR_ELx_EC_FP_ASIMD:
460 		el0_fpsimd_acc(regs, esr);
461 		break;
462 	case ESR_ELx_EC_FP_EXC32:
463 		el0_fpsimd_exc(regs, esr);
464 		break;
465 	case ESR_ELx_EC_PC_ALIGN:
466 		el0_pc(regs, esr);
467 		break;
468 	case ESR_ELx_EC_UNKNOWN:
469 	case ESR_ELx_EC_CP14_MR:
470 	case ESR_ELx_EC_CP14_LS:
471 	case ESR_ELx_EC_CP14_64:
472 		el0_undef(regs);
473 		break;
474 	case ESR_ELx_EC_CP15_32:
475 	case ESR_ELx_EC_CP15_64:
476 		el0_cp15(regs, esr);
477 		break;
478 	case ESR_ELx_EC_BREAKPT_LOW:
479 	case ESR_ELx_EC_SOFTSTP_LOW:
480 	case ESR_ELx_EC_WATCHPT_LOW:
481 	case ESR_ELx_EC_BKPT32:
482 		el0_dbg(regs, esr);
483 		break;
484 	default:
485 		el0_inv(regs, esr);
486 	}
487 }
488 #endif /* CONFIG_COMPAT */
489