1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Derived from arch/i386/kernel/irq.c 4 * Copyright (C) 1992 Linus Torvalds 5 * Adapted from arch/i386 by Gary Thomas 6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 7 * Updated and modified by Cort Dougan <cort@fsmlabs.com> 8 * Copyright (C) 1996-2001 Cort Dougan 9 * Adapted for Power Macintosh by Paul Mackerras 10 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) 11 * 12 * This file contains the code used by various IRQ handling routines: 13 * asking for different IRQ's should be done through these routines 14 * instead of just grabbing them. Thus setups with different IRQ numbers 15 * shouldn't result in any weird surprises, and installing new handlers 16 * should be easier. 17 * 18 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the 19 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit 20 * mask register (of which only 16 are defined), hence the weird shifting 21 * and complement of the cached_irq_mask. I want to be able to stuff 22 * this right into the SIU SMASK register. 23 * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx 24 * to reduce code space and undefined function references. 25 */ 26 27 #undef DEBUG 28 29 #include <linux/export.h> 30 #include <linux/threads.h> 31 #include <linux/kernel_stat.h> 32 #include <linux/signal.h> 33 #include <linux/sched.h> 34 #include <linux/ptrace.h> 35 #include <linux/ioport.h> 36 #include <linux/interrupt.h> 37 #include <linux/timex.h> 38 #include <linux/init.h> 39 #include <linux/slab.h> 40 #include <linux/delay.h> 41 #include <linux/irq.h> 42 #include <linux/seq_file.h> 43 #include <linux/cpumask.h> 44 #include <linux/profile.h> 45 #include <linux/bitops.h> 46 #include <linux/list.h> 47 #include <linux/radix-tree.h> 48 #include <linux/mutex.h> 49 #include <linux/pci.h> 50 #include <linux/debugfs.h> 51 #include <linux/of.h> 52 #include <linux/of_irq.h> 53 #include <linux/vmalloc.h> 54 #include <linux/pgtable.h> 55 56 #include <linux/uaccess.h> 57 #include <asm/interrupt.h> 58 #include <asm/io.h> 59 #include <asm/irq.h> 60 #include <asm/cache.h> 61 #include <asm/prom.h> 62 #include <asm/ptrace.h> 63 #include <asm/machdep.h> 64 #include <asm/udbg.h> 65 #include <asm/smp.h> 66 #include <asm/livepatch.h> 67 #include <asm/asm-prototypes.h> 68 #include <asm/hw_irq.h> 69 #include <asm/softirq_stack.h> 70 71 #ifdef CONFIG_PPC64 72 #include <asm/paca.h> 73 #include <asm/firmware.h> 74 #include <asm/lv1call.h> 75 #include <asm/dbell.h> 76 #endif 77 #define CREATE_TRACE_POINTS 78 #include <asm/trace.h> 79 #include <asm/cpu_has_feature.h> 80 81 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 82 EXPORT_PER_CPU_SYMBOL(irq_stat); 83 84 #ifdef CONFIG_PPC32 85 atomic_t ppc_n_lost_interrupts; 86 87 #ifdef CONFIG_TAU_INT 88 extern int tau_initialized; 89 u32 tau_interrupts(unsigned long cpu); 90 #endif 91 #endif /* CONFIG_PPC32 */ 92 93 #ifdef CONFIG_PPC64 94 95 int distribute_irqs = 1; 96 97 static inline notrace unsigned long get_irq_happened(void) 98 { 99 unsigned long happened; 100 101 __asm__ __volatile__("lbz %0,%1(13)" 102 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened))); 103 104 return happened; 105 } 106 107 void replay_soft_interrupts(void) 108 { 109 struct pt_regs regs; 110 111 /* 112 * Be careful here, calling these interrupt handlers can cause 113 * softirqs to be raised, which they may run when calling irq_exit, 114 * which will cause local_irq_enable() to be run, which can then 115 * recurse into this function. Don't keep any state across 116 * interrupt handler calls which may change underneath us. 117 * 118 * We use local_paca rather than get_paca() to avoid all the 119 * debug_smp_processor_id() business in this low level function. 120 */ 121 122 ppc_save_regs(®s); 123 regs.softe = IRQS_ENABLED; 124 regs.msr |= MSR_EE; 125 126 again: 127 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 128 WARN_ON_ONCE(mfmsr() & MSR_EE); 129 130 /* 131 * Force the delivery of pending soft-disabled interrupts on PS3. 132 * Any HV call will have this side effect. 133 */ 134 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { 135 u64 tmp, tmp2; 136 lv1_get_version_info(&tmp, &tmp2); 137 } 138 139 /* 140 * Check if an hypervisor Maintenance interrupt happened. 141 * This is a higher priority interrupt than the others, so 142 * replay it first. 143 */ 144 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) { 145 local_paca->irq_happened &= ~PACA_IRQ_HMI; 146 regs.trap = INTERRUPT_HMI; 147 handle_hmi_exception(®s); 148 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) 149 hard_irq_disable(); 150 } 151 152 if (local_paca->irq_happened & PACA_IRQ_DEC) { 153 local_paca->irq_happened &= ~PACA_IRQ_DEC; 154 regs.trap = INTERRUPT_DECREMENTER; 155 timer_interrupt(®s); 156 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) 157 hard_irq_disable(); 158 } 159 160 if (local_paca->irq_happened & PACA_IRQ_EE) { 161 local_paca->irq_happened &= ~PACA_IRQ_EE; 162 regs.trap = INTERRUPT_EXTERNAL; 163 do_IRQ(®s); 164 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) 165 hard_irq_disable(); 166 } 167 168 if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) { 169 local_paca->irq_happened &= ~PACA_IRQ_DBELL; 170 regs.trap = INTERRUPT_DOORBELL; 171 doorbell_exception(®s); 172 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) 173 hard_irq_disable(); 174 } 175 176 /* Book3E does not support soft-masking PMI interrupts */ 177 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) { 178 local_paca->irq_happened &= ~PACA_IRQ_PMI; 179 regs.trap = INTERRUPT_PERFMON; 180 performance_monitor_exception(®s); 181 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) 182 hard_irq_disable(); 183 } 184 185 if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) { 186 /* 187 * We are responding to the next interrupt, so interrupt-off 188 * latencies should be reset here. 189 */ 190 trace_hardirqs_on(); 191 trace_hardirqs_off(); 192 goto again; 193 } 194 } 195 196 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP) 197 static inline void replay_soft_interrupts_irqrestore(void) 198 { 199 unsigned long kuap_state = get_kuap(); 200 201 /* 202 * Check if anything calls local_irq_enable/restore() when KUAP is 203 * disabled (user access enabled). We handle that case here by saving 204 * and re-locking AMR but we shouldn't get here in the first place, 205 * hence the warning. 206 */ 207 kuap_assert_locked(); 208 209 if (kuap_state != AMR_KUAP_BLOCKED) 210 set_kuap(AMR_KUAP_BLOCKED); 211 212 replay_soft_interrupts(); 213 214 if (kuap_state != AMR_KUAP_BLOCKED) 215 set_kuap(kuap_state); 216 } 217 #else 218 #define replay_soft_interrupts_irqrestore() replay_soft_interrupts() 219 #endif 220 221 #ifdef CONFIG_CC_HAS_ASM_GOTO 222 notrace void arch_local_irq_restore(unsigned long mask) 223 { 224 unsigned char irq_happened; 225 226 /* Write the new soft-enabled value if it is a disable */ 227 if (mask) { 228 irq_soft_mask_set(mask); 229 return; 230 } 231 232 /* 233 * After the stb, interrupts are unmasked and there are no interrupts 234 * pending replay. The restart sequence makes this atomic with 235 * respect to soft-masked interrupts. If this was just a simple code 236 * sequence, a soft-masked interrupt could become pending right after 237 * the comparison and before the stb. 238 * 239 * This allows interrupts to be unmasked without hard disabling, and 240 * also without new hard interrupts coming in ahead of pending ones. 241 */ 242 asm_volatile_goto( 243 "1: \n" 244 " lbz 9,%0(13) \n" 245 " cmpwi 9,0 \n" 246 " bne %l[happened] \n" 247 " stb 9,%1(13) \n" 248 "2: \n" 249 RESTART_TABLE(1b, 2b, 1b) 250 : : "i" (offsetof(struct paca_struct, irq_happened)), 251 "i" (offsetof(struct paca_struct, irq_soft_mask)) 252 : "cr0", "r9" 253 : happened); 254 255 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 256 WARN_ON_ONCE(!(mfmsr() & MSR_EE)); 257 258 return; 259 260 happened: 261 irq_happened = get_irq_happened(); 262 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 263 WARN_ON_ONCE(!irq_happened); 264 265 if (irq_happened == PACA_IRQ_HARD_DIS) { 266 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 267 WARN_ON_ONCE(mfmsr() & MSR_EE); 268 irq_soft_mask_set(IRQS_ENABLED); 269 local_paca->irq_happened = 0; 270 __hard_irq_enable(); 271 return; 272 } 273 274 /* Have interrupts to replay, need to hard disable first */ 275 if (!(irq_happened & PACA_IRQ_HARD_DIS)) { 276 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 277 if (!(mfmsr() & MSR_EE)) { 278 /* 279 * An interrupt could have come in and cleared 280 * MSR[EE] and set IRQ_HARD_DIS, so check 281 * IRQ_HARD_DIS again and warn if it is still 282 * clear. 283 */ 284 irq_happened = get_irq_happened(); 285 WARN_ON_ONCE(!(irq_happened & PACA_IRQ_HARD_DIS)); 286 } 287 } 288 __hard_irq_disable(); 289 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 290 } else { 291 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 292 if (WARN_ON_ONCE(mfmsr() & MSR_EE)) 293 __hard_irq_disable(); 294 } 295 } 296 297 /* 298 * Disable preempt here, so that the below preempt_enable will 299 * perform resched if required (a replayed interrupt may set 300 * need_resched). 301 */ 302 preempt_disable(); 303 irq_soft_mask_set(IRQS_ALL_DISABLED); 304 trace_hardirqs_off(); 305 306 replay_soft_interrupts_irqrestore(); 307 local_paca->irq_happened = 0; 308 309 trace_hardirqs_on(); 310 irq_soft_mask_set(IRQS_ENABLED); 311 __hard_irq_enable(); 312 preempt_enable(); 313 } 314 #else 315 notrace void arch_local_irq_restore(unsigned long mask) 316 { 317 unsigned char irq_happened; 318 319 /* Write the new soft-enabled value */ 320 irq_soft_mask_set(mask); 321 if (mask) 322 return; 323 324 /* 325 * From this point onward, we can take interrupts, preempt, 326 * etc... unless we got hard-disabled. We check if an event 327 * happened. If none happened, we know we can just return. 328 * 329 * We may have preempted before the check below, in which case 330 * we are checking the "new" CPU instead of the old one. This 331 * is only a problem if an event happened on the "old" CPU. 332 * 333 * External interrupt events will have caused interrupts to 334 * be hard-disabled, so there is no problem, we 335 * cannot have preempted. 336 */ 337 irq_happened = get_irq_happened(); 338 if (!irq_happened) { 339 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 340 WARN_ON_ONCE(!(mfmsr() & MSR_EE)); 341 return; 342 } 343 344 /* We need to hard disable to replay. */ 345 if (!(irq_happened & PACA_IRQ_HARD_DIS)) { 346 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 347 WARN_ON_ONCE(!(mfmsr() & MSR_EE)); 348 __hard_irq_disable(); 349 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 350 } else { 351 /* 352 * We should already be hard disabled here. We had bugs 353 * where that wasn't the case so let's dbl check it and 354 * warn if we are wrong. Only do that when IRQ tracing 355 * is enabled as mfmsr() can be costly. 356 */ 357 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 358 if (WARN_ON_ONCE(mfmsr() & MSR_EE)) 359 __hard_irq_disable(); 360 } 361 362 if (irq_happened == PACA_IRQ_HARD_DIS) { 363 local_paca->irq_happened = 0; 364 __hard_irq_enable(); 365 return; 366 } 367 } 368 369 /* 370 * Disable preempt here, so that the below preempt_enable will 371 * perform resched if required (a replayed interrupt may set 372 * need_resched). 373 */ 374 preempt_disable(); 375 irq_soft_mask_set(IRQS_ALL_DISABLED); 376 trace_hardirqs_off(); 377 378 replay_soft_interrupts_irqrestore(); 379 local_paca->irq_happened = 0; 380 381 trace_hardirqs_on(); 382 irq_soft_mask_set(IRQS_ENABLED); 383 __hard_irq_enable(); 384 preempt_enable(); 385 } 386 #endif 387 EXPORT_SYMBOL(arch_local_irq_restore); 388 389 /* 390 * This is a helper to use when about to go into idle low-power 391 * when the latter has the side effect of re-enabling interrupts 392 * (such as calling H_CEDE under pHyp). 393 * 394 * You call this function with interrupts soft-disabled (this is 395 * already the case when ppc_md.power_save is called). The function 396 * will return whether to enter power save or just return. 397 * 398 * In the former case, it will have notified lockdep of interrupts 399 * being re-enabled and generally sanitized the lazy irq state, 400 * and in the latter case it will leave with interrupts hard 401 * disabled and marked as such, so the local_irq_enable() call 402 * in arch_cpu_idle() will properly re-enable everything. 403 */ 404 bool prep_irq_for_idle(void) 405 { 406 /* 407 * First we need to hard disable to ensure no interrupt 408 * occurs before we effectively enter the low power state 409 */ 410 __hard_irq_disable(); 411 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 412 413 /* 414 * If anything happened while we were soft-disabled, 415 * we return now and do not enter the low power state. 416 */ 417 if (lazy_irq_pending()) 418 return false; 419 420 /* Tell lockdep we are about to re-enable */ 421 trace_hardirqs_on(); 422 423 /* 424 * Mark interrupts as soft-enabled and clear the 425 * PACA_IRQ_HARD_DIS from the pending mask since we 426 * are about to hard enable as well as a side effect 427 * of entering the low power state. 428 */ 429 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; 430 irq_soft_mask_set(IRQS_ENABLED); 431 432 /* Tell the caller to enter the low power state */ 433 return true; 434 } 435 436 #ifdef CONFIG_PPC_BOOK3S 437 /* 438 * This is for idle sequences that return with IRQs off, but the 439 * idle state itself wakes on interrupt. Tell the irq tracer that 440 * IRQs are enabled for the duration of idle so it does not get long 441 * off times. Must be paired with fini_irq_for_idle_irqsoff. 442 */ 443 bool prep_irq_for_idle_irqsoff(void) 444 { 445 WARN_ON(!irqs_disabled()); 446 447 /* 448 * First we need to hard disable to ensure no interrupt 449 * occurs before we effectively enter the low power state 450 */ 451 __hard_irq_disable(); 452 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 453 454 /* 455 * If anything happened while we were soft-disabled, 456 * we return now and do not enter the low power state. 457 */ 458 if (lazy_irq_pending()) 459 return false; 460 461 /* Tell lockdep we are about to re-enable */ 462 trace_hardirqs_on(); 463 464 return true; 465 } 466 467 /* 468 * Take the SRR1 wakeup reason, index into this table to find the 469 * appropriate irq_happened bit. 470 * 471 * Sytem reset exceptions taken in idle state also come through here, 472 * but they are NMI interrupts so do not need to wait for IRQs to be 473 * restored, and should be taken as early as practical. These are marked 474 * with 0xff in the table. The Power ISA specifies 0100b as the system 475 * reset interrupt reason. 476 */ 477 #define IRQ_SYSTEM_RESET 0xff 478 479 static const u8 srr1_to_lazyirq[0x10] = { 480 0, 0, 0, 481 PACA_IRQ_DBELL, 482 IRQ_SYSTEM_RESET, 483 PACA_IRQ_DBELL, 484 PACA_IRQ_DEC, 485 0, 486 PACA_IRQ_EE, 487 PACA_IRQ_EE, 488 PACA_IRQ_HMI, 489 0, 0, 0, 0, 0 }; 490 491 void replay_system_reset(void) 492 { 493 struct pt_regs regs; 494 495 ppc_save_regs(®s); 496 regs.trap = 0x100; 497 get_paca()->in_nmi = 1; 498 system_reset_exception(®s); 499 get_paca()->in_nmi = 0; 500 } 501 EXPORT_SYMBOL_GPL(replay_system_reset); 502 503 void irq_set_pending_from_srr1(unsigned long srr1) 504 { 505 unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18; 506 u8 reason = srr1_to_lazyirq[idx]; 507 508 /* 509 * Take the system reset now, which is immediately after registers 510 * are restored from idle. It's an NMI, so interrupts need not be 511 * re-enabled before it is taken. 512 */ 513 if (unlikely(reason == IRQ_SYSTEM_RESET)) { 514 replay_system_reset(); 515 return; 516 } 517 518 if (reason == PACA_IRQ_DBELL) { 519 /* 520 * When doorbell triggers a system reset wakeup, the message 521 * is not cleared, so if the doorbell interrupt is replayed 522 * and the IPI handled, the doorbell interrupt would still 523 * fire when EE is enabled. 524 * 525 * To avoid taking the superfluous doorbell interrupt, 526 * execute a msgclr here before the interrupt is replayed. 527 */ 528 ppc_msgclr(PPC_DBELL_MSGTYPE); 529 } 530 531 /* 532 * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0, 533 * so this can be called unconditionally with the SRR1 wake 534 * reason as returned by the idle code, which uses 0 to mean no 535 * interrupt. 536 * 537 * If a future CPU was to designate this as an interrupt reason, 538 * then a new index for no interrupt must be assigned. 539 */ 540 local_paca->irq_happened |= reason; 541 } 542 #endif /* CONFIG_PPC_BOOK3S */ 543 544 /* 545 * Force a replay of the external interrupt handler on this CPU. 546 */ 547 void force_external_irq_replay(void) 548 { 549 /* 550 * This must only be called with interrupts soft-disabled, 551 * the replay will happen when re-enabling. 552 */ 553 WARN_ON(!arch_irqs_disabled()); 554 555 /* 556 * Interrupts must always be hard disabled before irq_happened is 557 * modified (to prevent lost update in case of interrupt between 558 * load and store). 559 */ 560 __hard_irq_disable(); 561 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 562 563 /* Indicate in the PACA that we have an interrupt to replay */ 564 local_paca->irq_happened |= PACA_IRQ_EE; 565 } 566 567 #endif /* CONFIG_PPC64 */ 568 569 int arch_show_interrupts(struct seq_file *p, int prec) 570 { 571 int j; 572 573 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) 574 if (tau_initialized) { 575 seq_printf(p, "%*s: ", prec, "TAU"); 576 for_each_online_cpu(j) 577 seq_printf(p, "%10u ", tau_interrupts(j)); 578 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); 579 } 580 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ 581 582 seq_printf(p, "%*s: ", prec, "LOC"); 583 for_each_online_cpu(j) 584 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); 585 seq_printf(p, " Local timer interrupts for timer event device\n"); 586 587 seq_printf(p, "%*s: ", prec, "BCT"); 588 for_each_online_cpu(j) 589 seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event); 590 seq_printf(p, " Broadcast timer interrupts for timer event device\n"); 591 592 seq_printf(p, "%*s: ", prec, "LOC"); 593 for_each_online_cpu(j) 594 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); 595 seq_printf(p, " Local timer interrupts for others\n"); 596 597 seq_printf(p, "%*s: ", prec, "SPU"); 598 for_each_online_cpu(j) 599 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); 600 seq_printf(p, " Spurious interrupts\n"); 601 602 seq_printf(p, "%*s: ", prec, "PMI"); 603 for_each_online_cpu(j) 604 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); 605 seq_printf(p, " Performance monitoring interrupts\n"); 606 607 seq_printf(p, "%*s: ", prec, "MCE"); 608 for_each_online_cpu(j) 609 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); 610 seq_printf(p, " Machine check exceptions\n"); 611 612 #ifdef CONFIG_PPC_BOOK3S_64 613 if (cpu_has_feature(CPU_FTR_HVMODE)) { 614 seq_printf(p, "%*s: ", prec, "HMI"); 615 for_each_online_cpu(j) 616 seq_printf(p, "%10u ", paca_ptrs[j]->hmi_irqs); 617 seq_printf(p, " Hypervisor Maintenance Interrupts\n"); 618 } 619 #endif 620 621 seq_printf(p, "%*s: ", prec, "NMI"); 622 for_each_online_cpu(j) 623 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs); 624 seq_printf(p, " System Reset interrupts\n"); 625 626 #ifdef CONFIG_PPC_WATCHDOG 627 seq_printf(p, "%*s: ", prec, "WDG"); 628 for_each_online_cpu(j) 629 seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs); 630 seq_printf(p, " Watchdog soft-NMI interrupts\n"); 631 #endif 632 633 #ifdef CONFIG_PPC_DOORBELL 634 if (cpu_has_feature(CPU_FTR_DBELL)) { 635 seq_printf(p, "%*s: ", prec, "DBL"); 636 for_each_online_cpu(j) 637 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs); 638 seq_printf(p, " Doorbell interrupts\n"); 639 } 640 #endif 641 642 return 0; 643 } 644 645 /* 646 * /proc/stat helpers 647 */ 648 u64 arch_irq_stat_cpu(unsigned int cpu) 649 { 650 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; 651 652 sum += per_cpu(irq_stat, cpu).broadcast_irqs_event; 653 sum += per_cpu(irq_stat, cpu).pmu_irqs; 654 sum += per_cpu(irq_stat, cpu).mce_exceptions; 655 sum += per_cpu(irq_stat, cpu).spurious_irqs; 656 sum += per_cpu(irq_stat, cpu).timer_irqs_others; 657 #ifdef CONFIG_PPC_BOOK3S_64 658 sum += paca_ptrs[cpu]->hmi_irqs; 659 #endif 660 sum += per_cpu(irq_stat, cpu).sreset_irqs; 661 #ifdef CONFIG_PPC_WATCHDOG 662 sum += per_cpu(irq_stat, cpu).soft_nmi_irqs; 663 #endif 664 #ifdef CONFIG_PPC_DOORBELL 665 sum += per_cpu(irq_stat, cpu).doorbell_irqs; 666 #endif 667 668 return sum; 669 } 670 671 static inline void check_stack_overflow(void) 672 { 673 long sp; 674 675 if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW)) 676 return; 677 678 sp = current_stack_pointer & (THREAD_SIZE - 1); 679 680 /* check for stack overflow: is there less than 2KB free? */ 681 if (unlikely(sp < 2048)) { 682 pr_err("do_IRQ: stack overflow: %ld\n", sp); 683 dump_stack(); 684 } 685 } 686 687 static __always_inline void call_do_softirq(const void *sp) 688 { 689 /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */ 690 asm volatile ( 691 PPC_STLU " %%r1, %[offset](%[sp]) ;" 692 "mr %%r1, %[sp] ;" 693 "bl %[callee] ;" 694 PPC_LL " %%r1, 0(%%r1) ;" 695 : // Outputs 696 : // Inputs 697 [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD), 698 [callee] "i" (__do_softirq) 699 : // Clobbers 700 "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6", 701 "cr7", "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", 702 "r11", "r12" 703 ); 704 } 705 706 static __always_inline void call_do_irq(struct pt_regs *regs, void *sp) 707 { 708 register unsigned long r3 asm("r3") = (unsigned long)regs; 709 710 /* Temporarily switch r1 to sp, call __do_irq() then restore r1. */ 711 asm volatile ( 712 PPC_STLU " %%r1, %[offset](%[sp]) ;" 713 "mr %%r1, %[sp] ;" 714 "bl %[callee] ;" 715 PPC_LL " %%r1, 0(%%r1) ;" 716 : // Outputs 717 "+r" (r3) 718 : // Inputs 719 [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD), 720 [callee] "i" (__do_irq) 721 : // Clobbers 722 "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6", 723 "cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", 724 "r11", "r12" 725 ); 726 } 727 728 void __do_irq(struct pt_regs *regs) 729 { 730 unsigned int irq; 731 732 trace_irq_entry(regs); 733 734 /* 735 * Query the platform PIC for the interrupt & ack it. 736 * 737 * This will typically lower the interrupt line to the CPU 738 */ 739 irq = ppc_md.get_irq(); 740 741 /* We can hard enable interrupts now to allow perf interrupts */ 742 may_hard_irq_enable(); 743 744 /* And finally process it */ 745 if (unlikely(!irq)) 746 __this_cpu_inc(irq_stat.spurious_irqs); 747 else 748 generic_handle_irq(irq); 749 750 trace_irq_exit(regs); 751 } 752 753 DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ) 754 { 755 struct pt_regs *old_regs = set_irq_regs(regs); 756 void *cursp, *irqsp, *sirqsp; 757 758 /* Switch to the irq stack to handle this */ 759 cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1)); 760 irqsp = hardirq_ctx[raw_smp_processor_id()]; 761 sirqsp = softirq_ctx[raw_smp_processor_id()]; 762 763 check_stack_overflow(); 764 765 /* Already there ? */ 766 if (unlikely(cursp == irqsp || cursp == sirqsp)) { 767 __do_irq(regs); 768 set_irq_regs(old_regs); 769 return; 770 } 771 /* Switch stack and call */ 772 call_do_irq(regs, irqsp); 773 774 set_irq_regs(old_regs); 775 } 776 777 static void *__init alloc_vm_stack(void) 778 { 779 return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP, 780 NUMA_NO_NODE, (void *)_RET_IP_); 781 } 782 783 static void __init vmap_irqstack_init(void) 784 { 785 int i; 786 787 for_each_possible_cpu(i) { 788 softirq_ctx[i] = alloc_vm_stack(); 789 hardirq_ctx[i] = alloc_vm_stack(); 790 } 791 } 792 793 794 void __init init_IRQ(void) 795 { 796 if (IS_ENABLED(CONFIG_VMAP_STACK)) 797 vmap_irqstack_init(); 798 799 if (ppc_md.init_IRQ) 800 ppc_md.init_IRQ(); 801 } 802 803 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 804 void *critirq_ctx[NR_CPUS] __read_mostly; 805 void *dbgirq_ctx[NR_CPUS] __read_mostly; 806 void *mcheckirq_ctx[NR_CPUS] __read_mostly; 807 #endif 808 809 void *softirq_ctx[NR_CPUS] __read_mostly; 810 void *hardirq_ctx[NR_CPUS] __read_mostly; 811 812 void do_softirq_own_stack(void) 813 { 814 call_do_softirq(softirq_ctx[smp_processor_id()]); 815 } 816 817 irq_hw_number_t virq_to_hw(unsigned int virq) 818 { 819 struct irq_data *irq_data = irq_get_irq_data(virq); 820 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; 821 } 822 EXPORT_SYMBOL_GPL(virq_to_hw); 823 824 #ifdef CONFIG_SMP 825 int irq_choose_cpu(const struct cpumask *mask) 826 { 827 int cpuid; 828 829 if (cpumask_equal(mask, cpu_online_mask)) { 830 static int irq_rover; 831 static DEFINE_RAW_SPINLOCK(irq_rover_lock); 832 unsigned long flags; 833 834 /* Round-robin distribution... */ 835 do_round_robin: 836 raw_spin_lock_irqsave(&irq_rover_lock, flags); 837 838 irq_rover = cpumask_next(irq_rover, cpu_online_mask); 839 if (irq_rover >= nr_cpu_ids) 840 irq_rover = cpumask_first(cpu_online_mask); 841 842 cpuid = irq_rover; 843 844 raw_spin_unlock_irqrestore(&irq_rover_lock, flags); 845 } else { 846 cpuid = cpumask_first_and(mask, cpu_online_mask); 847 if (cpuid >= nr_cpu_ids) 848 goto do_round_robin; 849 } 850 851 return get_hard_smp_processor_id(cpuid); 852 } 853 #else 854 int irq_choose_cpu(const struct cpumask *mask) 855 { 856 return hard_smp_processor_id(); 857 } 858 #endif 859 860 #ifdef CONFIG_PPC64 861 static int __init setup_noirqdistrib(char *str) 862 { 863 distribute_irqs = 0; 864 return 1; 865 } 866 867 __setup("noirqdistrib", setup_noirqdistrib); 868 #endif /* CONFIG_PPC64 */ 869