1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Derived from arch/i386/kernel/irq.c 4 * Copyright (C) 1992 Linus Torvalds 5 * Adapted from arch/i386 by Gary Thomas 6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 7 * Updated and modified by Cort Dougan <cort@fsmlabs.com> 8 * Copyright (C) 1996-2001 Cort Dougan 9 * Adapted for Power Macintosh by Paul Mackerras 10 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) 11 * 12 * This file contains the code used by various IRQ handling routines: 13 * asking for different IRQ's should be done through these routines 14 * instead of just grabbing them. Thus setups with different IRQ numbers 15 * shouldn't result in any weird surprises, and installing new handlers 16 * should be easier. 17 * 18 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the 19 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit 20 * mask register (of which only 16 are defined), hence the weird shifting 21 * and complement of the cached_irq_mask. I want to be able to stuff 22 * this right into the SIU SMASK register. 23 * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx 24 * to reduce code space and undefined function references. 25 */ 26 27 #undef DEBUG 28 29 #include <linux/export.h> 30 #include <linux/threads.h> 31 #include <linux/kernel_stat.h> 32 #include <linux/signal.h> 33 #include <linux/sched.h> 34 #include <linux/ptrace.h> 35 #include <linux/ioport.h> 36 #include <linux/interrupt.h> 37 #include <linux/timex.h> 38 #include <linux/init.h> 39 #include <linux/slab.h> 40 #include <linux/delay.h> 41 #include <linux/irq.h> 42 #include <linux/seq_file.h> 43 #include <linux/cpumask.h> 44 #include <linux/profile.h> 45 #include <linux/bitops.h> 46 #include <linux/list.h> 47 #include <linux/radix-tree.h> 48 #include <linux/mutex.h> 49 #include <linux/pci.h> 50 #include <linux/debugfs.h> 51 #include <linux/of.h> 52 #include <linux/of_irq.h> 53 #include <linux/vmalloc.h> 54 #include <linux/pgtable.h> 55 56 #include <linux/uaccess.h> 57 #include <asm/interrupt.h> 58 #include <asm/io.h> 59 #include <asm/irq.h> 60 #include <asm/cache.h> 61 #include <asm/prom.h> 62 #include <asm/ptrace.h> 63 #include <asm/machdep.h> 64 #include <asm/udbg.h> 65 #include <asm/smp.h> 66 #include <asm/livepatch.h> 67 #include <asm/asm-prototypes.h> 68 #include <asm/hw_irq.h> 69 70 #ifdef CONFIG_PPC64 71 #include <asm/paca.h> 72 #include <asm/firmware.h> 73 #include <asm/lv1call.h> 74 #include <asm/dbell.h> 75 #endif 76 #define CREATE_TRACE_POINTS 77 #include <asm/trace.h> 78 #include <asm/cpu_has_feature.h> 79 80 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 81 EXPORT_PER_CPU_SYMBOL(irq_stat); 82 83 #ifdef CONFIG_PPC32 84 atomic_t ppc_n_lost_interrupts; 85 86 #ifdef CONFIG_TAU_INT 87 extern int tau_initialized; 88 u32 tau_interrupts(unsigned long cpu); 89 #endif 90 #endif /* CONFIG_PPC32 */ 91 92 #ifdef CONFIG_PPC64 93 94 int distribute_irqs = 1; 95 96 static inline notrace unsigned long get_irq_happened(void) 97 { 98 unsigned long happened; 99 100 __asm__ __volatile__("lbz %0,%1(13)" 101 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened))); 102 103 return happened; 104 } 105 106 #ifdef CONFIG_PPC_BOOK3E 107 108 /* This is called whenever we are re-enabling interrupts 109 * and returns either 0 (nothing to do) or 500/900/280 if 110 * there's an EE, DEC or DBELL to generate. 111 * 112 * This is called in two contexts: From arch_local_irq_restore() 113 * before soft-enabling interrupts, and from the exception exit 114 * path when returning from an interrupt from a soft-disabled to 115 * a soft enabled context. In both case we have interrupts hard 116 * disabled. 117 * 118 * We take care of only clearing the bits we handled in the 119 * PACA irq_happened field since we can only re-emit one at a 120 * time and we don't want to "lose" one. 121 */ 122 notrace unsigned int __check_irq_replay(void) 123 { 124 /* 125 * We use local_paca rather than get_paca() to avoid all 126 * the debug_smp_processor_id() business in this low level 127 * function 128 */ 129 unsigned char happened = local_paca->irq_happened; 130 131 /* 132 * We are responding to the next interrupt, so interrupt-off 133 * latencies should be reset here. 134 */ 135 trace_hardirqs_on(); 136 trace_hardirqs_off(); 137 138 if (happened & PACA_IRQ_DEC) { 139 local_paca->irq_happened &= ~PACA_IRQ_DEC; 140 return 0x900; 141 } 142 143 if (happened & PACA_IRQ_EE) { 144 local_paca->irq_happened &= ~PACA_IRQ_EE; 145 return 0x500; 146 } 147 148 if (happened & PACA_IRQ_DBELL) { 149 local_paca->irq_happened &= ~PACA_IRQ_DBELL; 150 return 0x280; 151 } 152 153 if (happened & PACA_IRQ_HARD_DIS) 154 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; 155 156 /* There should be nothing left ! */ 157 BUG_ON(local_paca->irq_happened != 0); 158 159 return 0; 160 } 161 162 /* 163 * This is specifically called by assembly code to re-enable interrupts 164 * if they are currently disabled. This is typically called before 165 * schedule() or do_signal() when returning to userspace. We do it 166 * in C to avoid the burden of dealing with lockdep etc... 167 * 168 * NOTE: This is called with interrupts hard disabled but not marked 169 * as such in paca->irq_happened, so we need to resync this. 170 */ 171 void notrace restore_interrupts(void) 172 { 173 if (irqs_disabled()) { 174 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 175 local_irq_enable(); 176 } else 177 __hard_irq_enable(); 178 } 179 180 #endif /* CONFIG_PPC_BOOK3E */ 181 182 void replay_soft_interrupts(void) 183 { 184 struct pt_regs regs; 185 186 /* 187 * Be careful here, calling these interrupt handlers can cause 188 * softirqs to be raised, which they may run when calling irq_exit, 189 * which will cause local_irq_enable() to be run, which can then 190 * recurse into this function. Don't keep any state across 191 * interrupt handler calls which may change underneath us. 192 * 193 * We use local_paca rather than get_paca() to avoid all the 194 * debug_smp_processor_id() business in this low level function. 195 */ 196 197 ppc_save_regs(®s); 198 regs.softe = IRQS_ENABLED; 199 200 again: 201 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 202 WARN_ON_ONCE(mfmsr() & MSR_EE); 203 204 /* 205 * Force the delivery of pending soft-disabled interrupts on PS3. 206 * Any HV call will have this side effect. 207 */ 208 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { 209 u64 tmp, tmp2; 210 lv1_get_version_info(&tmp, &tmp2); 211 } 212 213 /* 214 * Check if an hypervisor Maintenance interrupt happened. 215 * This is a higher priority interrupt than the others, so 216 * replay it first. 217 */ 218 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) { 219 local_paca->irq_happened &= ~PACA_IRQ_HMI; 220 regs.trap = 0xe60; 221 handle_hmi_exception(®s); 222 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) 223 hard_irq_disable(); 224 } 225 226 if (local_paca->irq_happened & PACA_IRQ_DEC) { 227 local_paca->irq_happened &= ~PACA_IRQ_DEC; 228 regs.trap = 0x900; 229 timer_interrupt(®s); 230 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) 231 hard_irq_disable(); 232 } 233 234 if (local_paca->irq_happened & PACA_IRQ_EE) { 235 local_paca->irq_happened &= ~PACA_IRQ_EE; 236 regs.trap = 0x500; 237 do_IRQ(®s); 238 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) 239 hard_irq_disable(); 240 } 241 242 if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) { 243 local_paca->irq_happened &= ~PACA_IRQ_DBELL; 244 if (IS_ENABLED(CONFIG_PPC_BOOK3E)) 245 regs.trap = 0x280; 246 else 247 regs.trap = 0xa00; 248 doorbell_exception(®s); 249 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) 250 hard_irq_disable(); 251 } 252 253 /* Book3E does not support soft-masking PMI interrupts */ 254 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) { 255 local_paca->irq_happened &= ~PACA_IRQ_PMI; 256 regs.trap = 0xf00; 257 performance_monitor_exception(®s); 258 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) 259 hard_irq_disable(); 260 } 261 262 if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) { 263 /* 264 * We are responding to the next interrupt, so interrupt-off 265 * latencies should be reset here. 266 */ 267 trace_hardirqs_on(); 268 trace_hardirqs_off(); 269 goto again; 270 } 271 } 272 273 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP) 274 static inline void replay_soft_interrupts_irqrestore(void) 275 { 276 unsigned long kuap_state = get_kuap(); 277 278 /* 279 * Check if anything calls local_irq_enable/restore() when KUAP is 280 * disabled (user access enabled). We handle that case here by saving 281 * and re-locking AMR but we shouldn't get here in the first place, 282 * hence the warning. 283 */ 284 kuap_check_amr(); 285 286 if (kuap_state != AMR_KUAP_BLOCKED) 287 set_kuap(AMR_KUAP_BLOCKED); 288 289 replay_soft_interrupts(); 290 291 if (kuap_state != AMR_KUAP_BLOCKED) 292 set_kuap(kuap_state); 293 } 294 #else 295 #define replay_soft_interrupts_irqrestore() replay_soft_interrupts() 296 #endif 297 298 notrace void arch_local_irq_restore(unsigned long mask) 299 { 300 unsigned char irq_happened; 301 302 /* Write the new soft-enabled value */ 303 irq_soft_mask_set(mask); 304 if (mask) 305 return; 306 307 /* 308 * From this point onward, we can take interrupts, preempt, 309 * etc... unless we got hard-disabled. We check if an event 310 * happened. If none happened, we know we can just return. 311 * 312 * We may have preempted before the check below, in which case 313 * we are checking the "new" CPU instead of the old one. This 314 * is only a problem if an event happened on the "old" CPU. 315 * 316 * External interrupt events will have caused interrupts to 317 * be hard-disabled, so there is no problem, we 318 * cannot have preempted. 319 */ 320 irq_happened = get_irq_happened(); 321 if (!irq_happened) { 322 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 323 WARN_ON_ONCE(!(mfmsr() & MSR_EE)); 324 return; 325 } 326 327 /* We need to hard disable to replay. */ 328 if (!(irq_happened & PACA_IRQ_HARD_DIS)) { 329 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 330 WARN_ON_ONCE(!(mfmsr() & MSR_EE)); 331 __hard_irq_disable(); 332 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 333 } else { 334 /* 335 * We should already be hard disabled here. We had bugs 336 * where that wasn't the case so let's dbl check it and 337 * warn if we are wrong. Only do that when IRQ tracing 338 * is enabled as mfmsr() can be costly. 339 */ 340 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 341 if (WARN_ON_ONCE(mfmsr() & MSR_EE)) 342 __hard_irq_disable(); 343 } 344 345 if (irq_happened == PACA_IRQ_HARD_DIS) { 346 local_paca->irq_happened = 0; 347 __hard_irq_enable(); 348 return; 349 } 350 } 351 352 /* 353 * Disable preempt here, so that the below preempt_enable will 354 * perform resched if required (a replayed interrupt may set 355 * need_resched). 356 */ 357 preempt_disable(); 358 irq_soft_mask_set(IRQS_ALL_DISABLED); 359 trace_hardirqs_off(); 360 361 replay_soft_interrupts_irqrestore(); 362 local_paca->irq_happened = 0; 363 364 trace_hardirqs_on(); 365 irq_soft_mask_set(IRQS_ENABLED); 366 __hard_irq_enable(); 367 preempt_enable(); 368 } 369 EXPORT_SYMBOL(arch_local_irq_restore); 370 371 /* 372 * This is a helper to use when about to go into idle low-power 373 * when the latter has the side effect of re-enabling interrupts 374 * (such as calling H_CEDE under pHyp). 375 * 376 * You call this function with interrupts soft-disabled (this is 377 * already the case when ppc_md.power_save is called). The function 378 * will return whether to enter power save or just return. 379 * 380 * In the former case, it will have notified lockdep of interrupts 381 * being re-enabled and generally sanitized the lazy irq state, 382 * and in the latter case it will leave with interrupts hard 383 * disabled and marked as such, so the local_irq_enable() call 384 * in arch_cpu_idle() will properly re-enable everything. 385 */ 386 bool prep_irq_for_idle(void) 387 { 388 /* 389 * First we need to hard disable to ensure no interrupt 390 * occurs before we effectively enter the low power state 391 */ 392 __hard_irq_disable(); 393 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 394 395 /* 396 * If anything happened while we were soft-disabled, 397 * we return now and do not enter the low power state. 398 */ 399 if (lazy_irq_pending()) 400 return false; 401 402 /* Tell lockdep we are about to re-enable */ 403 trace_hardirqs_on(); 404 405 /* 406 * Mark interrupts as soft-enabled and clear the 407 * PACA_IRQ_HARD_DIS from the pending mask since we 408 * are about to hard enable as well as a side effect 409 * of entering the low power state. 410 */ 411 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; 412 irq_soft_mask_set(IRQS_ENABLED); 413 414 /* Tell the caller to enter the low power state */ 415 return true; 416 } 417 418 #ifdef CONFIG_PPC_BOOK3S 419 /* 420 * This is for idle sequences that return with IRQs off, but the 421 * idle state itself wakes on interrupt. Tell the irq tracer that 422 * IRQs are enabled for the duration of idle so it does not get long 423 * off times. Must be paired with fini_irq_for_idle_irqsoff. 424 */ 425 bool prep_irq_for_idle_irqsoff(void) 426 { 427 WARN_ON(!irqs_disabled()); 428 429 /* 430 * First we need to hard disable to ensure no interrupt 431 * occurs before we effectively enter the low power state 432 */ 433 __hard_irq_disable(); 434 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 435 436 /* 437 * If anything happened while we were soft-disabled, 438 * we return now and do not enter the low power state. 439 */ 440 if (lazy_irq_pending()) 441 return false; 442 443 /* Tell lockdep we are about to re-enable */ 444 trace_hardirqs_on(); 445 446 return true; 447 } 448 449 /* 450 * Take the SRR1 wakeup reason, index into this table to find the 451 * appropriate irq_happened bit. 452 * 453 * Sytem reset exceptions taken in idle state also come through here, 454 * but they are NMI interrupts so do not need to wait for IRQs to be 455 * restored, and should be taken as early as practical. These are marked 456 * with 0xff in the table. The Power ISA specifies 0100b as the system 457 * reset interrupt reason. 458 */ 459 #define IRQ_SYSTEM_RESET 0xff 460 461 static const u8 srr1_to_lazyirq[0x10] = { 462 0, 0, 0, 463 PACA_IRQ_DBELL, 464 IRQ_SYSTEM_RESET, 465 PACA_IRQ_DBELL, 466 PACA_IRQ_DEC, 467 0, 468 PACA_IRQ_EE, 469 PACA_IRQ_EE, 470 PACA_IRQ_HMI, 471 0, 0, 0, 0, 0 }; 472 473 void replay_system_reset(void) 474 { 475 struct pt_regs regs; 476 477 ppc_save_regs(®s); 478 regs.trap = 0x100; 479 get_paca()->in_nmi = 1; 480 system_reset_exception(®s); 481 get_paca()->in_nmi = 0; 482 } 483 EXPORT_SYMBOL_GPL(replay_system_reset); 484 485 void irq_set_pending_from_srr1(unsigned long srr1) 486 { 487 unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18; 488 u8 reason = srr1_to_lazyirq[idx]; 489 490 /* 491 * Take the system reset now, which is immediately after registers 492 * are restored from idle. It's an NMI, so interrupts need not be 493 * re-enabled before it is taken. 494 */ 495 if (unlikely(reason == IRQ_SYSTEM_RESET)) { 496 replay_system_reset(); 497 return; 498 } 499 500 if (reason == PACA_IRQ_DBELL) { 501 /* 502 * When doorbell triggers a system reset wakeup, the message 503 * is not cleared, so if the doorbell interrupt is replayed 504 * and the IPI handled, the doorbell interrupt would still 505 * fire when EE is enabled. 506 * 507 * To avoid taking the superfluous doorbell interrupt, 508 * execute a msgclr here before the interrupt is replayed. 509 */ 510 ppc_msgclr(PPC_DBELL_MSGTYPE); 511 } 512 513 /* 514 * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0, 515 * so this can be called unconditionally with the SRR1 wake 516 * reason as returned by the idle code, which uses 0 to mean no 517 * interrupt. 518 * 519 * If a future CPU was to designate this as an interrupt reason, 520 * then a new index for no interrupt must be assigned. 521 */ 522 local_paca->irq_happened |= reason; 523 } 524 #endif /* CONFIG_PPC_BOOK3S */ 525 526 /* 527 * Force a replay of the external interrupt handler on this CPU. 528 */ 529 void force_external_irq_replay(void) 530 { 531 /* 532 * This must only be called with interrupts soft-disabled, 533 * the replay will happen when re-enabling. 534 */ 535 WARN_ON(!arch_irqs_disabled()); 536 537 /* 538 * Interrupts must always be hard disabled before irq_happened is 539 * modified (to prevent lost update in case of interrupt between 540 * load and store). 541 */ 542 __hard_irq_disable(); 543 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 544 545 /* Indicate in the PACA that we have an interrupt to replay */ 546 local_paca->irq_happened |= PACA_IRQ_EE; 547 } 548 549 #endif /* CONFIG_PPC64 */ 550 551 int arch_show_interrupts(struct seq_file *p, int prec) 552 { 553 int j; 554 555 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) 556 if (tau_initialized) { 557 seq_printf(p, "%*s: ", prec, "TAU"); 558 for_each_online_cpu(j) 559 seq_printf(p, "%10u ", tau_interrupts(j)); 560 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); 561 } 562 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ 563 564 seq_printf(p, "%*s: ", prec, "LOC"); 565 for_each_online_cpu(j) 566 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); 567 seq_printf(p, " Local timer interrupts for timer event device\n"); 568 569 seq_printf(p, "%*s: ", prec, "BCT"); 570 for_each_online_cpu(j) 571 seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event); 572 seq_printf(p, " Broadcast timer interrupts for timer event device\n"); 573 574 seq_printf(p, "%*s: ", prec, "LOC"); 575 for_each_online_cpu(j) 576 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); 577 seq_printf(p, " Local timer interrupts for others\n"); 578 579 seq_printf(p, "%*s: ", prec, "SPU"); 580 for_each_online_cpu(j) 581 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); 582 seq_printf(p, " Spurious interrupts\n"); 583 584 seq_printf(p, "%*s: ", prec, "PMI"); 585 for_each_online_cpu(j) 586 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); 587 seq_printf(p, " Performance monitoring interrupts\n"); 588 589 seq_printf(p, "%*s: ", prec, "MCE"); 590 for_each_online_cpu(j) 591 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); 592 seq_printf(p, " Machine check exceptions\n"); 593 594 #ifdef CONFIG_PPC_BOOK3S_64 595 if (cpu_has_feature(CPU_FTR_HVMODE)) { 596 seq_printf(p, "%*s: ", prec, "HMI"); 597 for_each_online_cpu(j) 598 seq_printf(p, "%10u ", paca_ptrs[j]->hmi_irqs); 599 seq_printf(p, " Hypervisor Maintenance Interrupts\n"); 600 } 601 #endif 602 603 seq_printf(p, "%*s: ", prec, "NMI"); 604 for_each_online_cpu(j) 605 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs); 606 seq_printf(p, " System Reset interrupts\n"); 607 608 #ifdef CONFIG_PPC_WATCHDOG 609 seq_printf(p, "%*s: ", prec, "WDG"); 610 for_each_online_cpu(j) 611 seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs); 612 seq_printf(p, " Watchdog soft-NMI interrupts\n"); 613 #endif 614 615 #ifdef CONFIG_PPC_DOORBELL 616 if (cpu_has_feature(CPU_FTR_DBELL)) { 617 seq_printf(p, "%*s: ", prec, "DBL"); 618 for_each_online_cpu(j) 619 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs); 620 seq_printf(p, " Doorbell interrupts\n"); 621 } 622 #endif 623 624 return 0; 625 } 626 627 /* 628 * /proc/stat helpers 629 */ 630 u64 arch_irq_stat_cpu(unsigned int cpu) 631 { 632 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; 633 634 sum += per_cpu(irq_stat, cpu).broadcast_irqs_event; 635 sum += per_cpu(irq_stat, cpu).pmu_irqs; 636 sum += per_cpu(irq_stat, cpu).mce_exceptions; 637 sum += per_cpu(irq_stat, cpu).spurious_irqs; 638 sum += per_cpu(irq_stat, cpu).timer_irqs_others; 639 #ifdef CONFIG_PPC_BOOK3S_64 640 sum += paca_ptrs[cpu]->hmi_irqs; 641 #endif 642 sum += per_cpu(irq_stat, cpu).sreset_irqs; 643 #ifdef CONFIG_PPC_WATCHDOG 644 sum += per_cpu(irq_stat, cpu).soft_nmi_irqs; 645 #endif 646 #ifdef CONFIG_PPC_DOORBELL 647 sum += per_cpu(irq_stat, cpu).doorbell_irqs; 648 #endif 649 650 return sum; 651 } 652 653 static inline void check_stack_overflow(void) 654 { 655 long sp; 656 657 if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW)) 658 return; 659 660 sp = current_stack_pointer & (THREAD_SIZE - 1); 661 662 /* check for stack overflow: is there less than 2KB free? */ 663 if (unlikely(sp < 2048)) { 664 pr_err("do_IRQ: stack overflow: %ld\n", sp); 665 dump_stack(); 666 } 667 } 668 669 void __do_irq(struct pt_regs *regs) 670 { 671 unsigned int irq; 672 673 trace_irq_entry(regs); 674 675 /* 676 * Query the platform PIC for the interrupt & ack it. 677 * 678 * This will typically lower the interrupt line to the CPU 679 */ 680 irq = ppc_md.get_irq(); 681 682 /* We can hard enable interrupts now to allow perf interrupts */ 683 may_hard_irq_enable(); 684 685 /* And finally process it */ 686 if (unlikely(!irq)) 687 __this_cpu_inc(irq_stat.spurious_irqs); 688 else 689 generic_handle_irq(irq); 690 691 trace_irq_exit(regs); 692 } 693 694 DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ) 695 { 696 struct pt_regs *old_regs = set_irq_regs(regs); 697 void *cursp, *irqsp, *sirqsp; 698 699 /* Switch to the irq stack to handle this */ 700 cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1)); 701 irqsp = hardirq_ctx[raw_smp_processor_id()]; 702 sirqsp = softirq_ctx[raw_smp_processor_id()]; 703 704 check_stack_overflow(); 705 706 /* Already there ? */ 707 if (unlikely(cursp == irqsp || cursp == sirqsp)) { 708 __do_irq(regs); 709 set_irq_regs(old_regs); 710 return; 711 } 712 /* Switch stack and call */ 713 call_do_irq(regs, irqsp); 714 715 set_irq_regs(old_regs); 716 } 717 718 static void *__init alloc_vm_stack(void) 719 { 720 return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP, 721 NUMA_NO_NODE, (void *)_RET_IP_); 722 } 723 724 static void __init vmap_irqstack_init(void) 725 { 726 int i; 727 728 for_each_possible_cpu(i) { 729 softirq_ctx[i] = alloc_vm_stack(); 730 hardirq_ctx[i] = alloc_vm_stack(); 731 } 732 } 733 734 735 void __init init_IRQ(void) 736 { 737 if (IS_ENABLED(CONFIG_VMAP_STACK)) 738 vmap_irqstack_init(); 739 740 if (ppc_md.init_IRQ) 741 ppc_md.init_IRQ(); 742 } 743 744 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 745 void *critirq_ctx[NR_CPUS] __read_mostly; 746 void *dbgirq_ctx[NR_CPUS] __read_mostly; 747 void *mcheckirq_ctx[NR_CPUS] __read_mostly; 748 #endif 749 750 void *softirq_ctx[NR_CPUS] __read_mostly; 751 void *hardirq_ctx[NR_CPUS] __read_mostly; 752 753 void do_softirq_own_stack(void) 754 { 755 call_do_softirq(softirq_ctx[smp_processor_id()]); 756 } 757 758 irq_hw_number_t virq_to_hw(unsigned int virq) 759 { 760 struct irq_data *irq_data = irq_get_irq_data(virq); 761 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; 762 } 763 EXPORT_SYMBOL_GPL(virq_to_hw); 764 765 #ifdef CONFIG_SMP 766 int irq_choose_cpu(const struct cpumask *mask) 767 { 768 int cpuid; 769 770 if (cpumask_equal(mask, cpu_online_mask)) { 771 static int irq_rover; 772 static DEFINE_RAW_SPINLOCK(irq_rover_lock); 773 unsigned long flags; 774 775 /* Round-robin distribution... */ 776 do_round_robin: 777 raw_spin_lock_irqsave(&irq_rover_lock, flags); 778 779 irq_rover = cpumask_next(irq_rover, cpu_online_mask); 780 if (irq_rover >= nr_cpu_ids) 781 irq_rover = cpumask_first(cpu_online_mask); 782 783 cpuid = irq_rover; 784 785 raw_spin_unlock_irqrestore(&irq_rover_lock, flags); 786 } else { 787 cpuid = cpumask_first_and(mask, cpu_online_mask); 788 if (cpuid >= nr_cpu_ids) 789 goto do_round_robin; 790 } 791 792 return get_hard_smp_processor_id(cpuid); 793 } 794 #else 795 int irq_choose_cpu(const struct cpumask *mask) 796 { 797 return hard_smp_processor_id(); 798 } 799 #endif 800 801 #ifdef CONFIG_PPC64 802 static int __init setup_noirqdistrib(char *str) 803 { 804 distribute_irqs = 0; 805 return 1; 806 } 807 808 __setup("noirqdistrib", setup_noirqdistrib); 809 #endif /* CONFIG_PPC64 */ 810