1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_INTERRUPT_H
3 #define _ASM_POWERPC_INTERRUPT_H
4 
5 #include <linux/context_tracking.h>
6 #include <linux/hardirq.h>
7 #include <asm/cputime.h>
8 #include <asm/ftrace.h>
9 #include <asm/kprobes.h>
10 #include <asm/runlatch.h>
11 
12 struct interrupt_state {
13 #ifdef CONFIG_PPC_BOOK3E_64
14 	enum ctx_state ctx_state;
15 #endif
16 };
17 
18 static inline void booke_restore_dbcr0(void)
19 {
20 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
21 	unsigned long dbcr0 = current->thread.debug.dbcr0;
22 
23 	if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) {
24 		mtspr(SPRN_DBSR, -1);
25 		mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]);
26 	}
27 #endif
28 }
29 
30 static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
31 {
32 	/*
33 	 * Book3E reconciles irq soft mask in asm
34 	 */
35 #ifdef CONFIG_PPC_BOOK3S_64
36 	if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
37 		trace_hardirqs_off();
38 	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
39 
40 	if (user_mode(regs)) {
41 		CT_WARN_ON(ct_state() != CONTEXT_USER);
42 		user_exit_irqoff();
43 
44 		account_cpu_user_entry();
45 		account_stolen_time();
46 	} else {
47 		/*
48 		 * CT_WARN_ON comes here via program_check_exception,
49 		 * so avoid recursion.
50 		 */
51 		if (TRAP(regs) != 0x700)
52 			CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
53 	}
54 #endif
55 
56 #ifdef CONFIG_PPC_BOOK3E_64
57 	state->ctx_state = exception_enter();
58 	if (user_mode(regs))
59 		account_cpu_user_entry();
60 #endif
61 }
62 
63 /*
64  * Care should be taken to note that interrupt_exit_prepare and
65  * interrupt_async_exit_prepare do not necessarily return immediately to
66  * regs context (e.g., if regs is usermode, we don't necessarily return to
67  * user mode). Other interrupts might be taken between here and return,
68  * context switch / preemption may occur in the exit path after this, or a
69  * signal may be delivered, etc.
70  *
71  * The real interrupt exit code is platform specific, e.g.,
72  * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
73  *
74  * However interrupt_nmi_exit_prepare does return directly to regs, because
75  * NMIs do not do "exit work" or replay soft-masked interrupts.
76  */
77 static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
78 {
79 #ifdef CONFIG_PPC_BOOK3E_64
80 	exception_exit(state->ctx_state);
81 #endif
82 
83 	/*
84 	 * Book3S exits to user via interrupt_exit_user_prepare(), which does
85 	 * context tracking, which is a cleaner way to handle PREEMPT=y
86 	 * and avoid context entry/exit in e.g., preempt_schedule_irq()),
87 	 * which is likely to be where the core code wants to end up.
88 	 *
89 	 * The above comment explains why we can't do the
90 	 *
91 	 *     if (user_mode(regs))
92 	 *         user_exit_irqoff();
93 	 *
94 	 * sequence here.
95 	 */
96 }
97 
98 static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
99 {
100 #ifdef CONFIG_PPC_BOOK3S_64
101 	if (cpu_has_feature(CPU_FTR_CTRL) &&
102 	    !test_thread_local_flags(_TLF_RUNLATCH))
103 		__ppc64_runlatch_on();
104 #endif
105 
106 	interrupt_enter_prepare(regs, state);
107 	irq_enter();
108 }
109 
110 static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
111 {
112 	irq_exit();
113 	interrupt_exit_prepare(regs, state);
114 }
115 
116 struct interrupt_nmi_state {
117 #ifdef CONFIG_PPC64
118 #ifdef CONFIG_PPC_BOOK3S_64
119 	u8 irq_soft_mask;
120 	u8 irq_happened;
121 #endif
122 	u8 ftrace_enabled;
123 #endif
124 };
125 
126 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
127 {
128 #ifdef CONFIG_PPC64
129 #ifdef CONFIG_PPC_BOOK3S_64
130 	state->irq_soft_mask = local_paca->irq_soft_mask;
131 	state->irq_happened = local_paca->irq_happened;
132 
133 	/*
134 	 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
135 	 * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile
136 	 * because that goes through irq tracing which we don't want in NMI.
137 	 */
138 	local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
139 	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
140 
141 	/* Don't do any per-CPU operations until interrupt state is fixed */
142 #endif
143 	/* Allow DEC and PMI to be traced when they are soft-NMI */
144 	if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) {
145 		state->ftrace_enabled = this_cpu_get_ftrace_enabled();
146 		this_cpu_set_ftrace_enabled(0);
147 	}
148 #endif
149 
150 	/*
151 	 * Do not use nmi_enter() for pseries hash guest taking a real-mode
152 	 * NMI because not everything it touches is within the RMA limit.
153 	 */
154 	if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
155 			!firmware_has_feature(FW_FEATURE_LPAR) ||
156 			radix_enabled() || (mfmsr() & MSR_DR))
157 		nmi_enter();
158 }
159 
160 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
161 {
162 	if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
163 			!firmware_has_feature(FW_FEATURE_LPAR) ||
164 			radix_enabled() || (mfmsr() & MSR_DR))
165 		nmi_exit();
166 
167 #ifdef CONFIG_PPC64
168 	if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260)
169 		this_cpu_set_ftrace_enabled(state->ftrace_enabled);
170 
171 #ifdef CONFIG_PPC_BOOK3S_64
172 	/* Check we didn't change the pending interrupt mask. */
173 	WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
174 	local_paca->irq_happened = state->irq_happened;
175 	local_paca->irq_soft_mask = state->irq_soft_mask;
176 #endif
177 #endif
178 }
179 
180 /*
181  * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each
182  * function definition. The reason for this is the noinstr section is placed
183  * after the main text section, i.e., very far away from the interrupt entry
184  * asm. That creates problems with fitting linker stubs when building large
185  * kernels.
186  */
187 #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address
188 
189 /**
190  * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
191  * @func:	Function name of the entry point
192  * @returns:	Returns a value back to asm caller
193  */
194 #define DECLARE_INTERRUPT_HANDLER_RAW(func)				\
195 	__visible long func(struct pt_regs *regs)
196 
197 /**
198  * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
199  * @func:	Function name of the entry point
200  * @returns:	Returns a value back to asm caller
201  *
202  * @func is called from ASM entry code.
203  *
204  * This is a plain function which does no tracing, reconciling, etc.
205  * The macro is written so it acts as function definition. Append the
206  * body with a pair of curly brackets.
207  *
208  * raw interrupt handlers must not enable or disable interrupts, or
209  * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
210  * not be advisable either, although may be possible in a pinch, the
211  * trace will look odd at least.
212  *
213  * A raw handler may call one of the other interrupt handler functions
214  * to be converted into that interrupt context without these restrictions.
215  *
216  * On PPC64, _RAW handlers may return with fast_interrupt_return.
217  *
218  * Specific handlers may have additional restrictions.
219  */
220 #define DEFINE_INTERRUPT_HANDLER_RAW(func)				\
221 static __always_inline long ____##func(struct pt_regs *regs);		\
222 									\
223 interrupt_handler long func(struct pt_regs *regs)			\
224 {									\
225 	long ret;							\
226 									\
227 	ret = ____##func (regs);					\
228 									\
229 	return ret;							\
230 }									\
231 NOKPROBE_SYMBOL(func);							\
232 									\
233 static __always_inline long ____##func(struct pt_regs *regs)
234 
235 /**
236  * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
237  * @func:	Function name of the entry point
238  */
239 #define DECLARE_INTERRUPT_HANDLER(func)					\
240 	__visible void func(struct pt_regs *regs)
241 
242 /**
243  * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
244  * @func:	Function name of the entry point
245  *
246  * @func is called from ASM entry code.
247  *
248  * The macro is written so it acts as function definition. Append the
249  * body with a pair of curly brackets.
250  */
251 #define DEFINE_INTERRUPT_HANDLER(func)					\
252 static __always_inline void ____##func(struct pt_regs *regs);		\
253 									\
254 interrupt_handler void func(struct pt_regs *regs)			\
255 {									\
256 	struct interrupt_state state;					\
257 									\
258 	interrupt_enter_prepare(regs, &state);				\
259 									\
260 	____##func (regs);						\
261 									\
262 	interrupt_exit_prepare(regs, &state);				\
263 }									\
264 NOKPROBE_SYMBOL(func);							\
265 									\
266 static __always_inline void ____##func(struct pt_regs *regs)
267 
268 /**
269  * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
270  * @func:	Function name of the entry point
271  * @returns:	Returns a value back to asm caller
272  */
273 #define DECLARE_INTERRUPT_HANDLER_RET(func)				\
274 	__visible long func(struct pt_regs *regs)
275 
276 /**
277  * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
278  * @func:	Function name of the entry point
279  * @returns:	Returns a value back to asm caller
280  *
281  * @func is called from ASM entry code.
282  *
283  * The macro is written so it acts as function definition. Append the
284  * body with a pair of curly brackets.
285  */
286 #define DEFINE_INTERRUPT_HANDLER_RET(func)				\
287 static __always_inline long ____##func(struct pt_regs *regs);		\
288 									\
289 interrupt_handler long func(struct pt_regs *regs)			\
290 {									\
291 	struct interrupt_state state;					\
292 	long ret;							\
293 									\
294 	interrupt_enter_prepare(regs, &state);				\
295 									\
296 	ret = ____##func (regs);					\
297 									\
298 	interrupt_exit_prepare(regs, &state);				\
299 									\
300 	return ret;							\
301 }									\
302 NOKPROBE_SYMBOL(func);							\
303 									\
304 static __always_inline long ____##func(struct pt_regs *regs)
305 
306 /**
307  * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
308  * @func:	Function name of the entry point
309  */
310 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func)				\
311 	__visible void func(struct pt_regs *regs)
312 
313 /**
314  * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
315  * @func:	Function name of the entry point
316  *
317  * @func is called from ASM entry code.
318  *
319  * The macro is written so it acts as function definition. Append the
320  * body with a pair of curly brackets.
321  */
322 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func)				\
323 static __always_inline void ____##func(struct pt_regs *regs);		\
324 									\
325 interrupt_handler void func(struct pt_regs *regs)			\
326 {									\
327 	struct interrupt_state state;					\
328 									\
329 	interrupt_async_enter_prepare(regs, &state);			\
330 									\
331 	____##func (regs);						\
332 									\
333 	interrupt_async_exit_prepare(regs, &state);			\
334 }									\
335 NOKPROBE_SYMBOL(func);							\
336 									\
337 static __always_inline void ____##func(struct pt_regs *regs)
338 
339 /**
340  * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
341  * @func:	Function name of the entry point
342  * @returns:	Returns a value back to asm caller
343  */
344 #define DECLARE_INTERRUPT_HANDLER_NMI(func)				\
345 	__visible long func(struct pt_regs *regs)
346 
347 /**
348  * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
349  * @func:	Function name of the entry point
350  * @returns:	Returns a value back to asm caller
351  *
352  * @func is called from ASM entry code.
353  *
354  * The macro is written so it acts as function definition. Append the
355  * body with a pair of curly brackets.
356  */
357 #define DEFINE_INTERRUPT_HANDLER_NMI(func)				\
358 static __always_inline long ____##func(struct pt_regs *regs);		\
359 									\
360 interrupt_handler long func(struct pt_regs *regs)			\
361 {									\
362 	struct interrupt_nmi_state state;				\
363 	long ret;							\
364 									\
365 	interrupt_nmi_enter_prepare(regs, &state);			\
366 									\
367 	ret = ____##func (regs);					\
368 									\
369 	interrupt_nmi_exit_prepare(regs, &state);			\
370 									\
371 	return ret;							\
372 }									\
373 NOKPROBE_SYMBOL(func);							\
374 									\
375 static __always_inline long ____##func(struct pt_regs *regs)
376 
377 
378 /* Interrupt handlers */
379 /* kernel/traps.c */
380 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
381 #ifdef CONFIG_PPC_BOOK3S_64
382 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
383 #else
384 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
385 #endif
386 DECLARE_INTERRUPT_HANDLER(SMIException);
387 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
388 DECLARE_INTERRUPT_HANDLER(unknown_exception);
389 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
390 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
391 DECLARE_INTERRUPT_HANDLER(RunModeException);
392 DECLARE_INTERRUPT_HANDLER(single_step_exception);
393 DECLARE_INTERRUPT_HANDLER(program_check_exception);
394 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
395 DECLARE_INTERRUPT_HANDLER(alignment_exception);
396 DECLARE_INTERRUPT_HANDLER(StackOverflow);
397 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
398 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
399 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
400 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
401 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
402 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
403 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
404 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
405 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
406 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
407 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
408 DECLARE_INTERRUPT_HANDLER(DebugException);
409 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
410 DECLARE_INTERRUPT_HANDLER(CacheLockingException);
411 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
412 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
413 DECLARE_INTERRUPT_HANDLER(unrecoverable_exception);
414 DECLARE_INTERRUPT_HANDLER(WatchdogException);
415 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
416 
417 /* slb.c */
418 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
419 DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
420 
421 /* hash_utils.c */
422 DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
423 
424 /* fault.c */
425 DECLARE_INTERRUPT_HANDLER_RET(do_page_fault);
426 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
427 
428 /* process.c */
429 DECLARE_INTERRUPT_HANDLER(do_break);
430 
431 /* time.c */
432 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
433 
434 /* mce.c */
435 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
436 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
437 
438 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
439 
440 void replay_system_reset(void);
441 void replay_soft_interrupts(void);
442 
443 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
444 {
445 	if (!arch_irq_disabled_regs(regs))
446 		local_irq_enable();
447 }
448 
449 #endif /* _ASM_POWERPC_INTERRUPT_H */
450