xref: /openbmc/linux/arch/powerpc/include/asm/interrupt.h (revision 98db179a78dd8379e9d2cbfc3f00224168a9344c)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_INTERRUPT_H
3 #define _ASM_POWERPC_INTERRUPT_H
4 
5 #include <linux/context_tracking.h>
6 #include <linux/hardirq.h>
7 #include <asm/cputime.h>
8 #include <asm/ftrace.h>
9 #include <asm/kprobes.h>
10 #include <asm/runlatch.h>
11 
12 static inline void nap_adjust_return(struct pt_regs *regs)
13 {
14 #ifdef CONFIG_PPC_970_NAP
15 	if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
16 		/* Can avoid a test-and-clear because NMIs do not call this */
17 		clear_thread_local_flags(_TLF_NAPPING);
18 		regs->nip = (unsigned long)power4_idle_nap_return;
19 	}
20 #endif
21 }
22 
23 struct interrupt_state {
24 #ifdef CONFIG_PPC_BOOK3E_64
25 	enum ctx_state ctx_state;
26 #endif
27 };
28 
29 static inline void booke_restore_dbcr0(void)
30 {
31 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
32 	unsigned long dbcr0 = current->thread.debug.dbcr0;
33 
34 	if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) {
35 		mtspr(SPRN_DBSR, -1);
36 		mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]);
37 	}
38 #endif
39 }
40 
41 static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
42 {
43 #ifdef CONFIG_PPC32
44 	if (!arch_irq_disabled_regs(regs))
45 		trace_hardirqs_off();
46 
47 	if (user_mode(regs)) {
48 		kuep_lock();
49 		account_cpu_user_entry();
50 	} else {
51 		kuap_save_and_lock(regs);
52 	}
53 #endif
54 	/*
55 	 * Book3E reconciles irq soft mask in asm
56 	 */
57 #ifdef CONFIG_PPC_BOOK3S_64
58 	if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
59 		trace_hardirqs_off();
60 	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
61 
62 	if (user_mode(regs)) {
63 		CT_WARN_ON(ct_state() != CONTEXT_USER);
64 		user_exit_irqoff();
65 
66 		account_cpu_user_entry();
67 		account_stolen_time();
68 	} else {
69 		/*
70 		 * CT_WARN_ON comes here via program_check_exception,
71 		 * so avoid recursion.
72 		 */
73 		if (TRAP(regs) != 0x700)
74 			CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
75 	}
76 #endif
77 
78 #ifdef CONFIG_PPC_BOOK3E_64
79 	state->ctx_state = exception_enter();
80 	if (user_mode(regs))
81 		account_cpu_user_entry();
82 #endif
83 
84 	booke_restore_dbcr0();
85 }
86 
87 /*
88  * Care should be taken to note that interrupt_exit_prepare and
89  * interrupt_async_exit_prepare do not necessarily return immediately to
90  * regs context (e.g., if regs is usermode, we don't necessarily return to
91  * user mode). Other interrupts might be taken between here and return,
92  * context switch / preemption may occur in the exit path after this, or a
93  * signal may be delivered, etc.
94  *
95  * The real interrupt exit code is platform specific, e.g.,
96  * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
97  *
98  * However interrupt_nmi_exit_prepare does return directly to regs, because
99  * NMIs do not do "exit work" or replay soft-masked interrupts.
100  */
101 static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
102 {
103 #ifdef CONFIG_PPC_BOOK3E_64
104 	exception_exit(state->ctx_state);
105 #endif
106 
107 	if (user_mode(regs))
108 		kuep_unlock();
109 	/*
110 	 * Book3S exits to user via interrupt_exit_user_prepare(), which does
111 	 * context tracking, which is a cleaner way to handle PREEMPT=y
112 	 * and avoid context entry/exit in e.g., preempt_schedule_irq()),
113 	 * which is likely to be where the core code wants to end up.
114 	 *
115 	 * The above comment explains why we can't do the
116 	 *
117 	 *     if (user_mode(regs))
118 	 *         user_exit_irqoff();
119 	 *
120 	 * sequence here.
121 	 */
122 }
123 
124 static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
125 {
126 #ifdef CONFIG_PPC_BOOK3S_64
127 	if (cpu_has_feature(CPU_FTR_CTRL) &&
128 	    !test_thread_local_flags(_TLF_RUNLATCH))
129 		__ppc64_runlatch_on();
130 #endif
131 
132 	interrupt_enter_prepare(regs, state);
133 	irq_enter();
134 }
135 
136 static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
137 {
138 	/*
139 	 * Adjust at exit so the main handler sees the true NIA. This must
140 	 * come before irq_exit() because irq_exit can enable interrupts, and
141 	 * if another interrupt is taken before nap_adjust_return has run
142 	 * here, then that interrupt would return directly to idle nap return.
143 	 */
144 	nap_adjust_return(regs);
145 
146 	irq_exit();
147 	interrupt_exit_prepare(regs, state);
148 }
149 
150 struct interrupt_nmi_state {
151 #ifdef CONFIG_PPC64
152 #ifdef CONFIG_PPC_BOOK3S_64
153 	u8 irq_soft_mask;
154 	u8 irq_happened;
155 #endif
156 	u8 ftrace_enabled;
157 #endif
158 };
159 
160 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
161 {
162 #ifdef CONFIG_PPC64
163 #ifdef CONFIG_PPC_BOOK3S_64
164 	state->irq_soft_mask = local_paca->irq_soft_mask;
165 	state->irq_happened = local_paca->irq_happened;
166 
167 	/*
168 	 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
169 	 * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile
170 	 * because that goes through irq tracing which we don't want in NMI.
171 	 */
172 	local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
173 	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
174 
175 	/* Don't do any per-CPU operations until interrupt state is fixed */
176 #endif
177 	/* Allow DEC and PMI to be traced when they are soft-NMI */
178 	if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) {
179 		state->ftrace_enabled = this_cpu_get_ftrace_enabled();
180 		this_cpu_set_ftrace_enabled(0);
181 	}
182 #endif
183 
184 	/*
185 	 * Do not use nmi_enter() for pseries hash guest taking a real-mode
186 	 * NMI because not everything it touches is within the RMA limit.
187 	 */
188 	if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
189 			!firmware_has_feature(FW_FEATURE_LPAR) ||
190 			radix_enabled() || (mfmsr() & MSR_DR))
191 		nmi_enter();
192 }
193 
194 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
195 {
196 	if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
197 			!firmware_has_feature(FW_FEATURE_LPAR) ||
198 			radix_enabled() || (mfmsr() & MSR_DR))
199 		nmi_exit();
200 
201 	/*
202 	 * nmi does not call nap_adjust_return because nmi should not create
203 	 * new work to do (must use irq_work for that).
204 	 */
205 
206 #ifdef CONFIG_PPC64
207 	if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260)
208 		this_cpu_set_ftrace_enabled(state->ftrace_enabled);
209 
210 #ifdef CONFIG_PPC_BOOK3S_64
211 	/* Check we didn't change the pending interrupt mask. */
212 	WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
213 	local_paca->irq_happened = state->irq_happened;
214 	local_paca->irq_soft_mask = state->irq_soft_mask;
215 #endif
216 #endif
217 }
218 
219 /*
220  * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each
221  * function definition. The reason for this is the noinstr section is placed
222  * after the main text section, i.e., very far away from the interrupt entry
223  * asm. That creates problems with fitting linker stubs when building large
224  * kernels.
225  */
226 #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address
227 
228 /**
229  * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
230  * @func:	Function name of the entry point
231  * @returns:	Returns a value back to asm caller
232  */
233 #define DECLARE_INTERRUPT_HANDLER_RAW(func)				\
234 	__visible long func(struct pt_regs *regs)
235 
236 /**
237  * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
238  * @func:	Function name of the entry point
239  * @returns:	Returns a value back to asm caller
240  *
241  * @func is called from ASM entry code.
242  *
243  * This is a plain function which does no tracing, reconciling, etc.
244  * The macro is written so it acts as function definition. Append the
245  * body with a pair of curly brackets.
246  *
247  * raw interrupt handlers must not enable or disable interrupts, or
248  * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
249  * not be advisable either, although may be possible in a pinch, the
250  * trace will look odd at least.
251  *
252  * A raw handler may call one of the other interrupt handler functions
253  * to be converted into that interrupt context without these restrictions.
254  *
255  * On PPC64, _RAW handlers may return with fast_interrupt_return.
256  *
257  * Specific handlers may have additional restrictions.
258  */
259 #define DEFINE_INTERRUPT_HANDLER_RAW(func)				\
260 static __always_inline long ____##func(struct pt_regs *regs);		\
261 									\
262 interrupt_handler long func(struct pt_regs *regs)			\
263 {									\
264 	long ret;							\
265 									\
266 	ret = ____##func (regs);					\
267 									\
268 	return ret;							\
269 }									\
270 NOKPROBE_SYMBOL(func);							\
271 									\
272 static __always_inline long ____##func(struct pt_regs *regs)
273 
274 /**
275  * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
276  * @func:	Function name of the entry point
277  */
278 #define DECLARE_INTERRUPT_HANDLER(func)					\
279 	__visible void func(struct pt_regs *regs)
280 
281 /**
282  * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
283  * @func:	Function name of the entry point
284  *
285  * @func is called from ASM entry code.
286  *
287  * The macro is written so it acts as function definition. Append the
288  * body with a pair of curly brackets.
289  */
290 #define DEFINE_INTERRUPT_HANDLER(func)					\
291 static __always_inline void ____##func(struct pt_regs *regs);		\
292 									\
293 interrupt_handler void func(struct pt_regs *regs)			\
294 {									\
295 	struct interrupt_state state;					\
296 									\
297 	interrupt_enter_prepare(regs, &state);				\
298 									\
299 	____##func (regs);						\
300 									\
301 	interrupt_exit_prepare(regs, &state);				\
302 }									\
303 NOKPROBE_SYMBOL(func);							\
304 									\
305 static __always_inline void ____##func(struct pt_regs *regs)
306 
307 /**
308  * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
309  * @func:	Function name of the entry point
310  * @returns:	Returns a value back to asm caller
311  */
312 #define DECLARE_INTERRUPT_HANDLER_RET(func)				\
313 	__visible long func(struct pt_regs *regs)
314 
315 /**
316  * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
317  * @func:	Function name of the entry point
318  * @returns:	Returns a value back to asm caller
319  *
320  * @func is called from ASM entry code.
321  *
322  * The macro is written so it acts as function definition. Append the
323  * body with a pair of curly brackets.
324  */
325 #define DEFINE_INTERRUPT_HANDLER_RET(func)				\
326 static __always_inline long ____##func(struct pt_regs *regs);		\
327 									\
328 interrupt_handler long func(struct pt_regs *regs)			\
329 {									\
330 	struct interrupt_state state;					\
331 	long ret;							\
332 									\
333 	interrupt_enter_prepare(regs, &state);				\
334 									\
335 	ret = ____##func (regs);					\
336 									\
337 	interrupt_exit_prepare(regs, &state);				\
338 									\
339 	return ret;							\
340 }									\
341 NOKPROBE_SYMBOL(func);							\
342 									\
343 static __always_inline long ____##func(struct pt_regs *regs)
344 
345 /**
346  * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
347  * @func:	Function name of the entry point
348  */
349 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func)				\
350 	__visible void func(struct pt_regs *regs)
351 
352 /**
353  * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
354  * @func:	Function name of the entry point
355  *
356  * @func is called from ASM entry code.
357  *
358  * The macro is written so it acts as function definition. Append the
359  * body with a pair of curly brackets.
360  */
361 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func)				\
362 static __always_inline void ____##func(struct pt_regs *regs);		\
363 									\
364 interrupt_handler void func(struct pt_regs *regs)			\
365 {									\
366 	struct interrupt_state state;					\
367 									\
368 	interrupt_async_enter_prepare(regs, &state);			\
369 									\
370 	____##func (regs);						\
371 									\
372 	interrupt_async_exit_prepare(regs, &state);			\
373 }									\
374 NOKPROBE_SYMBOL(func);							\
375 									\
376 static __always_inline void ____##func(struct pt_regs *regs)
377 
378 /**
379  * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
380  * @func:	Function name of the entry point
381  * @returns:	Returns a value back to asm caller
382  */
383 #define DECLARE_INTERRUPT_HANDLER_NMI(func)				\
384 	__visible long func(struct pt_regs *regs)
385 
386 /**
387  * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
388  * @func:	Function name of the entry point
389  * @returns:	Returns a value back to asm caller
390  *
391  * @func is called from ASM entry code.
392  *
393  * The macro is written so it acts as function definition. Append the
394  * body with a pair of curly brackets.
395  */
396 #define DEFINE_INTERRUPT_HANDLER_NMI(func)				\
397 static __always_inline long ____##func(struct pt_regs *regs);		\
398 									\
399 interrupt_handler long func(struct pt_regs *regs)			\
400 {									\
401 	struct interrupt_nmi_state state;				\
402 	long ret;							\
403 									\
404 	interrupt_nmi_enter_prepare(regs, &state);			\
405 									\
406 	ret = ____##func (regs);					\
407 									\
408 	interrupt_nmi_exit_prepare(regs, &state);			\
409 									\
410 	return ret;							\
411 }									\
412 NOKPROBE_SYMBOL(func);							\
413 									\
414 static __always_inline long ____##func(struct pt_regs *regs)
415 
416 
417 /* Interrupt handlers */
418 /* kernel/traps.c */
419 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
420 #ifdef CONFIG_PPC_BOOK3S_64
421 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
422 #else
423 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
424 #endif
425 DECLARE_INTERRUPT_HANDLER(SMIException);
426 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
427 DECLARE_INTERRUPT_HANDLER(unknown_exception);
428 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
429 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
430 DECLARE_INTERRUPT_HANDLER(RunModeException);
431 DECLARE_INTERRUPT_HANDLER(single_step_exception);
432 DECLARE_INTERRUPT_HANDLER(program_check_exception);
433 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
434 DECLARE_INTERRUPT_HANDLER(alignment_exception);
435 DECLARE_INTERRUPT_HANDLER(StackOverflow);
436 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
437 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
438 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
439 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
440 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
441 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
442 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
443 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
444 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
445 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
446 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
447 DECLARE_INTERRUPT_HANDLER(DebugException);
448 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
449 DECLARE_INTERRUPT_HANDLER(CacheLockingException);
450 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
451 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
452 DECLARE_INTERRUPT_HANDLER(WatchdogException);
453 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
454 
455 /* slb.c */
456 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
457 DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
458 
459 /* hash_utils.c */
460 DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
461 
462 /* fault.c */
463 DECLARE_INTERRUPT_HANDLER_RET(do_page_fault);
464 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
465 
466 /* process.c */
467 DECLARE_INTERRUPT_HANDLER(do_break);
468 
469 /* time.c */
470 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
471 
472 /* mce.c */
473 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
474 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
475 
476 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
477 
478 void __noreturn unrecoverable_exception(struct pt_regs *regs);
479 
480 void replay_system_reset(void);
481 void replay_soft_interrupts(void);
482 
483 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
484 {
485 	if (!arch_irq_disabled_regs(regs))
486 		local_irq_enable();
487 }
488 
489 #endif /* _ASM_POWERPC_INTERRUPT_H */
490