xref: /openbmc/linux/arch/powerpc/include/asm/interrupt.h (revision 3db8aa10de9a478b3086db7894e0266def3d77af)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_INTERRUPT_H
3 #define _ASM_POWERPC_INTERRUPT_H
4 
5 #include <linux/context_tracking.h>
6 #include <linux/hardirq.h>
7 #include <asm/cputime.h>
8 #include <asm/ftrace.h>
9 #include <asm/kprobes.h>
10 #include <asm/runlatch.h>
11 
12 static inline void nap_adjust_return(struct pt_regs *regs)
13 {
14 #ifdef CONFIG_PPC_970_NAP
15 	if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
16 		/* Can avoid a test-and-clear because NMIs do not call this */
17 		clear_thread_local_flags(_TLF_NAPPING);
18 		regs->nip = (unsigned long)power4_idle_nap_return;
19 	}
20 #endif
21 }
22 
23 struct interrupt_state {
24 #ifdef CONFIG_PPC_BOOK3E_64
25 	enum ctx_state ctx_state;
26 #endif
27 };
28 
29 static inline void booke_restore_dbcr0(void)
30 {
31 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
32 	unsigned long dbcr0 = current->thread.debug.dbcr0;
33 
34 	if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) {
35 		mtspr(SPRN_DBSR, -1);
36 		mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]);
37 	}
38 #endif
39 }
40 
41 static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
42 {
43 #ifdef CONFIG_PPC32
44 	if (!arch_irq_disabled_regs(regs))
45 		trace_hardirqs_off();
46 
47 	if (user_mode(regs)) {
48 		kuep_lock();
49 		account_cpu_user_entry();
50 	} else {
51 		kuap_save_and_lock(regs);
52 	}
53 #endif
54 	/*
55 	 * Book3E reconciles irq soft mask in asm
56 	 */
57 #ifdef CONFIG_PPC_BOOK3S_64
58 	if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
59 		trace_hardirqs_off();
60 	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
61 
62 	if (user_mode(regs)) {
63 		CT_WARN_ON(ct_state() != CONTEXT_USER);
64 		user_exit_irqoff();
65 
66 		account_cpu_user_entry();
67 		account_stolen_time();
68 	} else {
69 		/*
70 		 * CT_WARN_ON comes here via program_check_exception,
71 		 * so avoid recursion.
72 		 */
73 		if (TRAP(regs) != 0x700)
74 			CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
75 	}
76 #endif
77 
78 #ifdef CONFIG_PPC_BOOK3E_64
79 	state->ctx_state = exception_enter();
80 	if (user_mode(regs))
81 		account_cpu_user_entry();
82 #endif
83 
84 	booke_restore_dbcr0();
85 }
86 
87 /*
88  * Care should be taken to note that interrupt_exit_prepare and
89  * interrupt_async_exit_prepare do not necessarily return immediately to
90  * regs context (e.g., if regs is usermode, we don't necessarily return to
91  * user mode). Other interrupts might be taken between here and return,
92  * context switch / preemption may occur in the exit path after this, or a
93  * signal may be delivered, etc.
94  *
95  * The real interrupt exit code is platform specific, e.g.,
96  * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
97  *
98  * However interrupt_nmi_exit_prepare does return directly to regs, because
99  * NMIs do not do "exit work" or replay soft-masked interrupts.
100  */
101 static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
102 {
103 #ifdef CONFIG_PPC_BOOK3E_64
104 	exception_exit(state->ctx_state);
105 #endif
106 
107 	if (user_mode(regs))
108 		kuep_unlock();
109 	/*
110 	 * Book3S exits to user via interrupt_exit_user_prepare(), which does
111 	 * context tracking, which is a cleaner way to handle PREEMPT=y
112 	 * and avoid context entry/exit in e.g., preempt_schedule_irq()),
113 	 * which is likely to be where the core code wants to end up.
114 	 *
115 	 * The above comment explains why we can't do the
116 	 *
117 	 *     if (user_mode(regs))
118 	 *         user_exit_irqoff();
119 	 *
120 	 * sequence here.
121 	 */
122 }
123 
124 static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
125 {
126 #ifdef CONFIG_PPC_BOOK3S_64
127 	if (cpu_has_feature(CPU_FTR_CTRL) &&
128 	    !test_thread_local_flags(_TLF_RUNLATCH))
129 		__ppc64_runlatch_on();
130 #endif
131 
132 	interrupt_enter_prepare(regs, state);
133 	irq_enter();
134 }
135 
136 static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
137 {
138 	/*
139 	 * Adjust at exit so the main handler sees the true NIA. This must
140 	 * come before irq_exit() because irq_exit can enable interrupts, and
141 	 * if another interrupt is taken before nap_adjust_return has run
142 	 * here, then that interrupt would return directly to idle nap return.
143 	 */
144 	nap_adjust_return(regs);
145 
146 	irq_exit();
147 	interrupt_exit_prepare(regs, state);
148 }
149 
150 struct interrupt_nmi_state {
151 #ifdef CONFIG_PPC64
152 	u8 irq_soft_mask;
153 	u8 irq_happened;
154 	u8 ftrace_enabled;
155 #endif
156 };
157 
158 static inline bool nmi_disables_ftrace(struct pt_regs *regs)
159 {
160 	/* Allow DEC and PMI to be traced when they are soft-NMI */
161 	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
162 		if (TRAP(regs) == 0x900)
163 		       return false;
164 		if (TRAP(regs) == 0xf00)
165 		       return false;
166 	}
167 	if (IS_ENABLED(CONFIG_PPC_BOOK3E)) {
168 		if (TRAP(regs) == 0x260)
169 			return false;
170 	}
171 
172 	return true;
173 }
174 
175 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
176 {
177 #ifdef CONFIG_PPC64
178 	state->irq_soft_mask = local_paca->irq_soft_mask;
179 	state->irq_happened = local_paca->irq_happened;
180 
181 	/*
182 	 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
183 	 * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile
184 	 * because that goes through irq tracing which we don't want in NMI.
185 	 */
186 	local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
187 	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
188 
189 	/* Don't do any per-CPU operations until interrupt state is fixed */
190 
191 	if (nmi_disables_ftrace(regs)) {
192 		state->ftrace_enabled = this_cpu_get_ftrace_enabled();
193 		this_cpu_set_ftrace_enabled(0);
194 	}
195 #endif
196 
197 	/*
198 	 * Do not use nmi_enter() for pseries hash guest taking a real-mode
199 	 * NMI because not everything it touches is within the RMA limit.
200 	 */
201 	if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
202 			!firmware_has_feature(FW_FEATURE_LPAR) ||
203 			radix_enabled() || (mfmsr() & MSR_DR))
204 		nmi_enter();
205 }
206 
207 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
208 {
209 	if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
210 			!firmware_has_feature(FW_FEATURE_LPAR) ||
211 			radix_enabled() || (mfmsr() & MSR_DR))
212 		nmi_exit();
213 
214 	/*
215 	 * nmi does not call nap_adjust_return because nmi should not create
216 	 * new work to do (must use irq_work for that).
217 	 */
218 
219 #ifdef CONFIG_PPC64
220 	if (nmi_disables_ftrace(regs))
221 		this_cpu_set_ftrace_enabled(state->ftrace_enabled);
222 
223 	/* Check we didn't change the pending interrupt mask. */
224 	WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
225 	local_paca->irq_happened = state->irq_happened;
226 	local_paca->irq_soft_mask = state->irq_soft_mask;
227 #endif
228 }
229 
230 /*
231  * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each
232  * function definition. The reason for this is the noinstr section is placed
233  * after the main text section, i.e., very far away from the interrupt entry
234  * asm. That creates problems with fitting linker stubs when building large
235  * kernels.
236  */
237 #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address
238 
239 /**
240  * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
241  * @func:	Function name of the entry point
242  * @returns:	Returns a value back to asm caller
243  */
244 #define DECLARE_INTERRUPT_HANDLER_RAW(func)				\
245 	__visible long func(struct pt_regs *regs)
246 
247 /**
248  * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
249  * @func:	Function name of the entry point
250  * @returns:	Returns a value back to asm caller
251  *
252  * @func is called from ASM entry code.
253  *
254  * This is a plain function which does no tracing, reconciling, etc.
255  * The macro is written so it acts as function definition. Append the
256  * body with a pair of curly brackets.
257  *
258  * raw interrupt handlers must not enable or disable interrupts, or
259  * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
260  * not be advisable either, although may be possible in a pinch, the
261  * trace will look odd at least.
262  *
263  * A raw handler may call one of the other interrupt handler functions
264  * to be converted into that interrupt context without these restrictions.
265  *
266  * On PPC64, _RAW handlers may return with fast_interrupt_return.
267  *
268  * Specific handlers may have additional restrictions.
269  */
270 #define DEFINE_INTERRUPT_HANDLER_RAW(func)				\
271 static __always_inline long ____##func(struct pt_regs *regs);		\
272 									\
273 interrupt_handler long func(struct pt_regs *regs)			\
274 {									\
275 	long ret;							\
276 									\
277 	ret = ____##func (regs);					\
278 									\
279 	return ret;							\
280 }									\
281 NOKPROBE_SYMBOL(func);							\
282 									\
283 static __always_inline long ____##func(struct pt_regs *regs)
284 
285 /**
286  * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
287  * @func:	Function name of the entry point
288  */
289 #define DECLARE_INTERRUPT_HANDLER(func)					\
290 	__visible void func(struct pt_regs *regs)
291 
292 /**
293  * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
294  * @func:	Function name of the entry point
295  *
296  * @func is called from ASM entry code.
297  *
298  * The macro is written so it acts as function definition. Append the
299  * body with a pair of curly brackets.
300  */
301 #define DEFINE_INTERRUPT_HANDLER(func)					\
302 static __always_inline void ____##func(struct pt_regs *regs);		\
303 									\
304 interrupt_handler void func(struct pt_regs *regs)			\
305 {									\
306 	struct interrupt_state state;					\
307 									\
308 	interrupt_enter_prepare(regs, &state);				\
309 									\
310 	____##func (regs);						\
311 									\
312 	interrupt_exit_prepare(regs, &state);				\
313 }									\
314 NOKPROBE_SYMBOL(func);							\
315 									\
316 static __always_inline void ____##func(struct pt_regs *regs)
317 
318 /**
319  * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
320  * @func:	Function name of the entry point
321  * @returns:	Returns a value back to asm caller
322  */
323 #define DECLARE_INTERRUPT_HANDLER_RET(func)				\
324 	__visible long func(struct pt_regs *regs)
325 
326 /**
327  * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
328  * @func:	Function name of the entry point
329  * @returns:	Returns a value back to asm caller
330  *
331  * @func is called from ASM entry code.
332  *
333  * The macro is written so it acts as function definition. Append the
334  * body with a pair of curly brackets.
335  */
336 #define DEFINE_INTERRUPT_HANDLER_RET(func)				\
337 static __always_inline long ____##func(struct pt_regs *regs);		\
338 									\
339 interrupt_handler long func(struct pt_regs *regs)			\
340 {									\
341 	struct interrupt_state state;					\
342 	long ret;							\
343 									\
344 	interrupt_enter_prepare(regs, &state);				\
345 									\
346 	ret = ____##func (regs);					\
347 									\
348 	interrupt_exit_prepare(regs, &state);				\
349 									\
350 	return ret;							\
351 }									\
352 NOKPROBE_SYMBOL(func);							\
353 									\
354 static __always_inline long ____##func(struct pt_regs *regs)
355 
356 /**
357  * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
358  * @func:	Function name of the entry point
359  */
360 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func)				\
361 	__visible void func(struct pt_regs *regs)
362 
363 /**
364  * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
365  * @func:	Function name of the entry point
366  *
367  * @func is called from ASM entry code.
368  *
369  * The macro is written so it acts as function definition. Append the
370  * body with a pair of curly brackets.
371  */
372 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func)				\
373 static __always_inline void ____##func(struct pt_regs *regs);		\
374 									\
375 interrupt_handler void func(struct pt_regs *regs)			\
376 {									\
377 	struct interrupt_state state;					\
378 									\
379 	interrupt_async_enter_prepare(regs, &state);			\
380 									\
381 	____##func (regs);						\
382 									\
383 	interrupt_async_exit_prepare(regs, &state);			\
384 }									\
385 NOKPROBE_SYMBOL(func);							\
386 									\
387 static __always_inline void ____##func(struct pt_regs *regs)
388 
389 /**
390  * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
391  * @func:	Function name of the entry point
392  * @returns:	Returns a value back to asm caller
393  */
394 #define DECLARE_INTERRUPT_HANDLER_NMI(func)				\
395 	__visible long func(struct pt_regs *regs)
396 
397 /**
398  * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
399  * @func:	Function name of the entry point
400  * @returns:	Returns a value back to asm caller
401  *
402  * @func is called from ASM entry code.
403  *
404  * The macro is written so it acts as function definition. Append the
405  * body with a pair of curly brackets.
406  */
407 #define DEFINE_INTERRUPT_HANDLER_NMI(func)				\
408 static __always_inline long ____##func(struct pt_regs *regs);		\
409 									\
410 interrupt_handler long func(struct pt_regs *regs)			\
411 {									\
412 	struct interrupt_nmi_state state;				\
413 	long ret;							\
414 									\
415 	interrupt_nmi_enter_prepare(regs, &state);			\
416 									\
417 	ret = ____##func (regs);					\
418 									\
419 	interrupt_nmi_exit_prepare(regs, &state);			\
420 									\
421 	return ret;							\
422 }									\
423 NOKPROBE_SYMBOL(func);							\
424 									\
425 static __always_inline long ____##func(struct pt_regs *regs)
426 
427 
428 /* Interrupt handlers */
429 /* kernel/traps.c */
430 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
431 #ifdef CONFIG_PPC_BOOK3S_64
432 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
433 #else
434 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
435 #endif
436 DECLARE_INTERRUPT_HANDLER(SMIException);
437 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
438 DECLARE_INTERRUPT_HANDLER(unknown_exception);
439 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
440 DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception);
441 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
442 DECLARE_INTERRUPT_HANDLER(RunModeException);
443 DECLARE_INTERRUPT_HANDLER(single_step_exception);
444 DECLARE_INTERRUPT_HANDLER(program_check_exception);
445 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
446 DECLARE_INTERRUPT_HANDLER(alignment_exception);
447 DECLARE_INTERRUPT_HANDLER(StackOverflow);
448 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
449 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
450 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
451 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
452 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
453 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
454 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
455 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
456 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
457 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
458 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
459 DECLARE_INTERRUPT_HANDLER(DebugException);
460 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
461 DECLARE_INTERRUPT_HANDLER(CacheLockingException);
462 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
463 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
464 DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException);
465 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
466 
467 /* slb.c */
468 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
469 DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
470 
471 /* hash_utils.c */
472 DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
473 
474 /* fault.c */
475 DECLARE_INTERRUPT_HANDLER_RET(do_page_fault);
476 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
477 
478 /* process.c */
479 DECLARE_INTERRUPT_HANDLER(do_break);
480 
481 /* time.c */
482 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
483 
484 /* mce.c */
485 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
486 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
487 
488 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
489 
490 void __noreturn unrecoverable_exception(struct pt_regs *regs);
491 
492 void replay_system_reset(void);
493 void replay_soft_interrupts(void);
494 
495 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
496 {
497 	if (!arch_irq_disabled_regs(regs))
498 		local_irq_enable();
499 }
500 
501 #endif /* _ASM_POWERPC_INTERRUPT_H */
502