irq.c (e93dee186fc95f2058b0c9d2317d8b876b8512db) irq.c (7d7b28b302085e1ec2815bc9f5205af28394c5db)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Derived from arch/i386/kernel/irq.c
4 * Copyright (C) 1992 Linus Torvalds
5 * Adapted from arch/i386 by Gary Thomas
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
8 * Copyright (C) 1996-2001 Cort Dougan

--- 53 unchanged lines hidden (view full) ---

62#include <asm/ptrace.h>
63#include <asm/machdep.h>
64#include <asm/udbg.h>
65#include <asm/smp.h>
66#include <asm/hw_irq.h>
67#include <asm/softirq_stack.h>
68#include <asm/ppc_asm.h>
69
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Derived from arch/i386/kernel/irq.c
4 * Copyright (C) 1992 Linus Torvalds
5 * Adapted from arch/i386 by Gary Thomas
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
8 * Copyright (C) 1996-2001 Cort Dougan

--- 53 unchanged lines hidden (view full) ---

62#include <asm/ptrace.h>
63#include <asm/machdep.h>
64#include <asm/udbg.h>
65#include <asm/smp.h>
66#include <asm/hw_irq.h>
67#include <asm/softirq_stack.h>
68#include <asm/ppc_asm.h>
69
70#ifdef CONFIG_PPC64
71#include <asm/paca.h>
72#include <asm/firmware.h>
73#include <asm/lv1call.h>
74#include <asm/dbell.h>
75#endif
76#define CREATE_TRACE_POINTS
77#include <asm/trace.h>
78#include <asm/cpu_has_feature.h>
79
80DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
81EXPORT_PER_CPU_SYMBOL(irq_stat);
82
83#ifdef CONFIG_PPC32
84atomic_t ppc_n_lost_interrupts;
85
86#ifdef CONFIG_TAU_INT
87extern int tau_initialized;
88u32 tau_interrupts(unsigned long cpu);
89#endif
90#endif /* CONFIG_PPC32 */
91
70#define CREATE_TRACE_POINTS
71#include <asm/trace.h>
72#include <asm/cpu_has_feature.h>
73
74DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
75EXPORT_PER_CPU_SYMBOL(irq_stat);
76
77#ifdef CONFIG_PPC32
78atomic_t ppc_n_lost_interrupts;
79
80#ifdef CONFIG_TAU_INT
81extern int tau_initialized;
82u32 tau_interrupts(unsigned long cpu);
83#endif
84#endif /* CONFIG_PPC32 */
85
92#ifdef CONFIG_PPC64
93
94int distribute_irqs = 1;
95
96static inline notrace unsigned long get_irq_happened(void)
97{
98 unsigned long happened;
99
100 __asm__ __volatile__("lbz %0,%1(13)"
101 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
102
103 return happened;
104}
105
106void replay_soft_interrupts(void)
107{
108 struct pt_regs regs;
109
110 /*
111 * Be careful here, calling these interrupt handlers can cause
112 * softirqs to be raised, which they may run when calling irq_exit,
113 * which will cause local_irq_enable() to be run, which can then
114 * recurse into this function. Don't keep any state across
115 * interrupt handler calls which may change underneath us.
116 *
117 * We use local_paca rather than get_paca() to avoid all the
118 * debug_smp_processor_id() business in this low level function.
119 */
120
121 ppc_save_regs(&regs);
122 regs.softe = IRQS_ENABLED;
123 regs.msr |= MSR_EE;
124
125again:
126 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
127 WARN_ON_ONCE(mfmsr() & MSR_EE);
128
129 /*
130 * Force the delivery of pending soft-disabled interrupts on PS3.
131 * Any HV call will have this side effect.
132 */
133 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
134 u64 tmp, tmp2;
135 lv1_get_version_info(&tmp, &tmp2);
136 }
137
138 /*
139 * Check if an hypervisor Maintenance interrupt happened.
140 * This is a higher priority interrupt than the others, so
141 * replay it first.
142 */
143 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) {
144 local_paca->irq_happened &= ~PACA_IRQ_HMI;
145 regs.trap = INTERRUPT_HMI;
146 handle_hmi_exception(&regs);
147 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
148 hard_irq_disable();
149 }
150
151 if (local_paca->irq_happened & PACA_IRQ_DEC) {
152 local_paca->irq_happened &= ~PACA_IRQ_DEC;
153 regs.trap = INTERRUPT_DECREMENTER;
154 timer_interrupt(&regs);
155 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
156 hard_irq_disable();
157 }
158
159 if (local_paca->irq_happened & PACA_IRQ_EE) {
160 local_paca->irq_happened &= ~PACA_IRQ_EE;
161 regs.trap = INTERRUPT_EXTERNAL;
162 do_IRQ(&regs);
163 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
164 hard_irq_disable();
165 }
166
167 if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) {
168 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
169 regs.trap = INTERRUPT_DOORBELL;
170 doorbell_exception(&regs);
171 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
172 hard_irq_disable();
173 }
174
175 /* Book3E does not support soft-masking PMI interrupts */
176 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) {
177 local_paca->irq_happened &= ~PACA_IRQ_PMI;
178 regs.trap = INTERRUPT_PERFMON;
179 performance_monitor_exception(&regs);
180 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
181 hard_irq_disable();
182 }
183
184 if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) {
185 /*
186 * We are responding to the next interrupt, so interrupt-off
187 * latencies should be reset here.
188 */
189 trace_hardirqs_on();
190 trace_hardirqs_off();
191 goto again;
192 }
193}
194
195#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
196static inline void replay_soft_interrupts_irqrestore(void)
197{
198 unsigned long kuap_state = get_kuap();
199
200 /*
201 * Check if anything calls local_irq_enable/restore() when KUAP is
202 * disabled (user access enabled). We handle that case here by saving
203 * and re-locking AMR but we shouldn't get here in the first place,
204 * hence the warning.
205 */
206 kuap_assert_locked();
207
208 if (kuap_state != AMR_KUAP_BLOCKED)
209 set_kuap(AMR_KUAP_BLOCKED);
210
211 replay_soft_interrupts();
212
213 if (kuap_state != AMR_KUAP_BLOCKED)
214 set_kuap(kuap_state);
215}
216#else
217#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
218#endif
219
220notrace void arch_local_irq_restore(unsigned long mask)
221{
222 unsigned char irq_happened;
223
224 /* Write the new soft-enabled value if it is a disable */
225 if (mask) {
226 irq_soft_mask_set(mask);
227 return;
228 }
229
230 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
231 WARN_ON_ONCE(in_nmi() || in_hardirq());
232
233 /*
234 * After the stb, interrupts are unmasked and there are no interrupts
235 * pending replay. The restart sequence makes this atomic with
236 * respect to soft-masked interrupts. If this was just a simple code
237 * sequence, a soft-masked interrupt could become pending right after
238 * the comparison and before the stb.
239 *
240 * This allows interrupts to be unmasked without hard disabling, and
241 * also without new hard interrupts coming in ahead of pending ones.
242 */
243 asm_volatile_goto(
244"1: \n"
245" lbz 9,%0(13) \n"
246" cmpwi 9,0 \n"
247" bne %l[happened] \n"
248" stb 9,%1(13) \n"
249"2: \n"
250 RESTART_TABLE(1b, 2b, 1b)
251 : : "i" (offsetof(struct paca_struct, irq_happened)),
252 "i" (offsetof(struct paca_struct, irq_soft_mask))
253 : "cr0", "r9"
254 : happened);
255
256 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
257 WARN_ON_ONCE(!(mfmsr() & MSR_EE));
258
259 return;
260
261happened:
262 irq_happened = get_irq_happened();
263 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
264 WARN_ON_ONCE(!irq_happened);
265
266 if (irq_happened == PACA_IRQ_HARD_DIS) {
267 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
268 WARN_ON_ONCE(mfmsr() & MSR_EE);
269 irq_soft_mask_set(IRQS_ENABLED);
270 local_paca->irq_happened = 0;
271 __hard_irq_enable();
272 return;
273 }
274
275 /* Have interrupts to replay, need to hard disable first */
276 if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
277 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
278 if (!(mfmsr() & MSR_EE)) {
279 /*
280 * An interrupt could have come in and cleared
281 * MSR[EE] and set IRQ_HARD_DIS, so check
282 * IRQ_HARD_DIS again and warn if it is still
283 * clear.
284 */
285 irq_happened = get_irq_happened();
286 WARN_ON_ONCE(!(irq_happened & PACA_IRQ_HARD_DIS));
287 }
288 }
289 __hard_irq_disable();
290 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
291 } else {
292 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
293 if (WARN_ON_ONCE(mfmsr() & MSR_EE))
294 __hard_irq_disable();
295 }
296 }
297
298 /*
299 * Disable preempt here, so that the below preempt_enable will
300 * perform resched if required (a replayed interrupt may set
301 * need_resched).
302 */
303 preempt_disable();
304 irq_soft_mask_set(IRQS_ALL_DISABLED);
305 trace_hardirqs_off();
306
307 replay_soft_interrupts_irqrestore();
308 local_paca->irq_happened = 0;
309
310 trace_hardirqs_on();
311 irq_soft_mask_set(IRQS_ENABLED);
312 __hard_irq_enable();
313 preempt_enable();
314}
315EXPORT_SYMBOL(arch_local_irq_restore);
316
317/*
318 * This is a helper to use when about to go into idle low-power
319 * when the latter has the side effect of re-enabling interrupts
320 * (such as calling H_CEDE under pHyp).
321 *
322 * You call this function with interrupts soft-disabled (this is
323 * already the case when ppc_md.power_save is called). The function
324 * will return whether to enter power save or just return.
325 *
326 * In the former case, it will have notified lockdep of interrupts
327 * being re-enabled and generally sanitized the lazy irq state,
328 * and in the latter case it will leave with interrupts hard
329 * disabled and marked as such, so the local_irq_enable() call
330 * in arch_cpu_idle() will properly re-enable everything.
331 */
332bool prep_irq_for_idle(void)
333{
334 /*
335 * First we need to hard disable to ensure no interrupt
336 * occurs before we effectively enter the low power state
337 */
338 __hard_irq_disable();
339 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
340
341 /*
342 * If anything happened while we were soft-disabled,
343 * we return now and do not enter the low power state.
344 */
345 if (lazy_irq_pending())
346 return false;
347
348 /* Tell lockdep we are about to re-enable */
349 trace_hardirqs_on();
350
351 /*
352 * Mark interrupts as soft-enabled and clear the
353 * PACA_IRQ_HARD_DIS from the pending mask since we
354 * are about to hard enable as well as a side effect
355 * of entering the low power state.
356 */
357 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
358 irq_soft_mask_set(IRQS_ENABLED);
359
360 /* Tell the caller to enter the low power state */
361 return true;
362}
363
364#ifdef CONFIG_PPC_BOOK3S
365/*
366 * This is for idle sequences that return with IRQs off, but the
367 * idle state itself wakes on interrupt. Tell the irq tracer that
368 * IRQs are enabled for the duration of idle so it does not get long
369 * off times. Must be paired with fini_irq_for_idle_irqsoff.
370 */
371bool prep_irq_for_idle_irqsoff(void)
372{
373 WARN_ON(!irqs_disabled());
374
375 /*
376 * First we need to hard disable to ensure no interrupt
377 * occurs before we effectively enter the low power state
378 */
379 __hard_irq_disable();
380 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
381
382 /*
383 * If anything happened while we were soft-disabled,
384 * we return now and do not enter the low power state.
385 */
386 if (lazy_irq_pending())
387 return false;
388
389 /* Tell lockdep we are about to re-enable */
390 trace_hardirqs_on();
391
392 return true;
393}
394
395/*
396 * Take the SRR1 wakeup reason, index into this table to find the
397 * appropriate irq_happened bit.
398 *
399 * Sytem reset exceptions taken in idle state also come through here,
400 * but they are NMI interrupts so do not need to wait for IRQs to be
401 * restored, and should be taken as early as practical. These are marked
402 * with 0xff in the table. The Power ISA specifies 0100b as the system
403 * reset interrupt reason.
404 */
405#define IRQ_SYSTEM_RESET 0xff
406
407static const u8 srr1_to_lazyirq[0x10] = {
408 0, 0, 0,
409 PACA_IRQ_DBELL,
410 IRQ_SYSTEM_RESET,
411 PACA_IRQ_DBELL,
412 PACA_IRQ_DEC,
413 0,
414 PACA_IRQ_EE,
415 PACA_IRQ_EE,
416 PACA_IRQ_HMI,
417 0, 0, 0, 0, 0 };
418
419void replay_system_reset(void)
420{
421 struct pt_regs regs;
422
423 ppc_save_regs(&regs);
424 regs.trap = 0x100;
425 get_paca()->in_nmi = 1;
426 system_reset_exception(&regs);
427 get_paca()->in_nmi = 0;
428}
429EXPORT_SYMBOL_GPL(replay_system_reset);
430
431void irq_set_pending_from_srr1(unsigned long srr1)
432{
433 unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
434 u8 reason = srr1_to_lazyirq[idx];
435
436 /*
437 * Take the system reset now, which is immediately after registers
438 * are restored from idle. It's an NMI, so interrupts need not be
439 * re-enabled before it is taken.
440 */
441 if (unlikely(reason == IRQ_SYSTEM_RESET)) {
442 replay_system_reset();
443 return;
444 }
445
446 if (reason == PACA_IRQ_DBELL) {
447 /*
448 * When doorbell triggers a system reset wakeup, the message
449 * is not cleared, so if the doorbell interrupt is replayed
450 * and the IPI handled, the doorbell interrupt would still
451 * fire when EE is enabled.
452 *
453 * To avoid taking the superfluous doorbell interrupt,
454 * execute a msgclr here before the interrupt is replayed.
455 */
456 ppc_msgclr(PPC_DBELL_MSGTYPE);
457 }
458
459 /*
460 * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
461 * so this can be called unconditionally with the SRR1 wake
462 * reason as returned by the idle code, which uses 0 to mean no
463 * interrupt.
464 *
465 * If a future CPU was to designate this as an interrupt reason,
466 * then a new index for no interrupt must be assigned.
467 */
468 local_paca->irq_happened |= reason;
469}
470#endif /* CONFIG_PPC_BOOK3S */
471
472/*
473 * Force a replay of the external interrupt handler on this CPU.
474 */
475void force_external_irq_replay(void)
476{
477 /*
478 * This must only be called with interrupts soft-disabled,
479 * the replay will happen when re-enabling.
480 */
481 WARN_ON(!arch_irqs_disabled());
482
483 /*
484 * Interrupts must always be hard disabled before irq_happened is
485 * modified (to prevent lost update in case of interrupt between
486 * load and store).
487 */
488 __hard_irq_disable();
489 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
490
491 /* Indicate in the PACA that we have an interrupt to replay */
492 local_paca->irq_happened |= PACA_IRQ_EE;
493}
494
495#endif /* CONFIG_PPC64 */
496
497int arch_show_interrupts(struct seq_file *p, int prec)
498{
499 int j;
500
501#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
502 if (tau_initialized) {
503 seq_printf(p, "%*s: ", prec, "TAU");
504 for_each_online_cpu(j)

--- 285 unchanged lines hidden (view full) ---

790 return get_hard_smp_processor_id(cpuid);
791}
792#else
793int irq_choose_cpu(const struct cpumask *mask)
794{
795 return hard_smp_processor_id();
796}
797#endif
86int arch_show_interrupts(struct seq_file *p, int prec)
87{
88 int j;
89
90#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
91 if (tau_initialized) {
92 seq_printf(p, "%*s: ", prec, "TAU");
93 for_each_online_cpu(j)

--- 285 unchanged lines hidden (view full) ---

379 return get_hard_smp_processor_id(cpuid);
380}
381#else
382int irq_choose_cpu(const struct cpumask *mask)
383{
384 return hard_smp_processor_id();
385}
386#endif
798
799#ifdef CONFIG_PPC64
800static int __init setup_noirqdistrib(char *str)
801{
802 distribute_irqs = 0;
803 return 1;
804}
805
806__setup("noirqdistrib", setup_noirqdistrib);
807#endif /* CONFIG_PPC64 */