1 /* 2 * Derived from arch/i386/kernel/irq.c 3 * Copyright (C) 1992 Linus Torvalds 4 * Adapted from arch/i386 by Gary Thomas 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 6 * Updated and modified by Cort Dougan <cort@fsmlabs.com> 7 * Copyright (C) 1996-2001 Cort Dougan 8 * Adapted for Power Macintosh by Paul Mackerras 9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 * 16 * This file contains the code used by various IRQ handling routines: 17 * asking for different IRQ's should be done through these routines 18 * instead of just grabbing them. Thus setups with different IRQ numbers 19 * shouldn't result in any weird surprises, and installing new handlers 20 * should be easier. 21 * 22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the 23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit 24 * mask register (of which only 16 are defined), hence the weird shifting 25 * and complement of the cached_irq_mask. I want to be able to stuff 26 * this right into the SIU SMASK register. 27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx 28 * to reduce code space and undefined function references. 29 */ 30 31 #undef DEBUG 32 33 #include <linux/export.h> 34 #include <linux/threads.h> 35 #include <linux/kernel_stat.h> 36 #include <linux/signal.h> 37 #include <linux/sched.h> 38 #include <linux/ptrace.h> 39 #include <linux/ioport.h> 40 #include <linux/interrupt.h> 41 #include <linux/timex.h> 42 #include <linux/init.h> 43 #include <linux/slab.h> 44 #include <linux/delay.h> 45 #include <linux/irq.h> 46 #include <linux/seq_file.h> 47 #include <linux/cpumask.h> 48 #include <linux/profile.h> 49 #include <linux/bitops.h> 50 #include <linux/list.h> 51 #include <linux/radix-tree.h> 52 #include <linux/mutex.h> 53 #include <linux/pci.h> 54 #include <linux/debugfs.h> 55 #include <linux/of.h> 56 #include <linux/of_irq.h> 57 58 #include <linux/uaccess.h> 59 #include <asm/io.h> 60 #include <asm/pgtable.h> 61 #include <asm/irq.h> 62 #include <asm/cache.h> 63 #include <asm/prom.h> 64 #include <asm/ptrace.h> 65 #include <asm/machdep.h> 66 #include <asm/udbg.h> 67 #include <asm/smp.h> 68 #include <asm/livepatch.h> 69 #include <asm/asm-prototypes.h> 70 71 #ifdef CONFIG_PPC64 72 #include <asm/paca.h> 73 #include <asm/firmware.h> 74 #include <asm/lv1call.h> 75 #endif 76 #define CREATE_TRACE_POINTS 77 #include <asm/trace.h> 78 #include <asm/cpu_has_feature.h> 79 80 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 81 EXPORT_PER_CPU_SYMBOL(irq_stat); 82 83 int __irq_offset_value; 84 85 #ifdef CONFIG_PPC32 86 EXPORT_SYMBOL(__irq_offset_value); 87 atomic_t ppc_n_lost_interrupts; 88 89 #ifdef CONFIG_TAU_INT 90 extern int tau_initialized; 91 extern int tau_interrupts(int); 92 #endif 93 #endif /* CONFIG_PPC32 */ 94 95 #ifdef CONFIG_PPC64 96 97 int distribute_irqs = 1; 98 99 static inline notrace unsigned long get_irq_happened(void) 100 { 101 unsigned long happened; 102 103 __asm__ __volatile__("lbz %0,%1(13)" 104 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened))); 105 106 return happened; 107 } 108 109 static inline notrace void set_soft_enabled(unsigned long enable) 110 { 111 __asm__ __volatile__("stb %0,%1(13)" 112 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); 113 } 114 115 static inline notrace int decrementer_check_overflow(void) 116 { 117 u64 now = get_tb_or_rtc(); 118 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); 119 120 return now >= *next_tb; 121 } 122 123 /* This is called whenever we are re-enabling interrupts 124 * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if 125 * there's an EE, DEC or DBELL to generate. 126 * 127 * This is called in two contexts: From arch_local_irq_restore() 128 * before soft-enabling interrupts, and from the exception exit 129 * path when returning from an interrupt from a soft-disabled to 130 * a soft enabled context. In both case we have interrupts hard 131 * disabled. 132 * 133 * We take care of only clearing the bits we handled in the 134 * PACA irq_happened field since we can only re-emit one at a 135 * time and we don't want to "lose" one. 136 */ 137 notrace unsigned int __check_irq_replay(void) 138 { 139 /* 140 * We use local_paca rather than get_paca() to avoid all 141 * the debug_smp_processor_id() business in this low level 142 * function 143 */ 144 unsigned char happened = local_paca->irq_happened; 145 146 /* Clear bit 0 which we wouldn't clear otherwise */ 147 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; 148 149 /* 150 * Force the delivery of pending soft-disabled interrupts on PS3. 151 * Any HV call will have this side effect. 152 */ 153 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { 154 u64 tmp, tmp2; 155 lv1_get_version_info(&tmp, &tmp2); 156 } 157 158 /* 159 * Check if an hypervisor Maintenance interrupt happened. 160 * This is a higher priority interrupt than the others, so 161 * replay it first. 162 */ 163 local_paca->irq_happened &= ~PACA_IRQ_HMI; 164 if (happened & PACA_IRQ_HMI) 165 return 0xe60; 166 167 /* 168 * We may have missed a decrementer interrupt. We check the 169 * decrementer itself rather than the paca irq_happened field 170 * in case we also had a rollover while hard disabled 171 */ 172 local_paca->irq_happened &= ~PACA_IRQ_DEC; 173 if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow()) 174 return 0x900; 175 176 /* Finally check if an external interrupt happened */ 177 local_paca->irq_happened &= ~PACA_IRQ_EE; 178 if (happened & PACA_IRQ_EE) 179 return 0x500; 180 181 #ifdef CONFIG_PPC_BOOK3E 182 /* Finally check if an EPR external interrupt happened 183 * this bit is typically set if we need to handle another 184 * "edge" interrupt from within the MPIC "EPR" handler 185 */ 186 local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE; 187 if (happened & PACA_IRQ_EE_EDGE) 188 return 0x500; 189 190 local_paca->irq_happened &= ~PACA_IRQ_DBELL; 191 if (happened & PACA_IRQ_DBELL) 192 return 0x280; 193 #else 194 local_paca->irq_happened &= ~PACA_IRQ_DBELL; 195 if (happened & PACA_IRQ_DBELL) { 196 if (cpu_has_feature(CPU_FTR_HVMODE)) 197 return 0xe80; 198 return 0xa00; 199 } 200 #endif /* CONFIG_PPC_BOOK3E */ 201 202 /* There should be nothing left ! */ 203 BUG_ON(local_paca->irq_happened != 0); 204 205 return 0; 206 } 207 208 notrace void arch_local_irq_restore(unsigned long en) 209 { 210 unsigned char irq_happened; 211 unsigned int replay; 212 213 /* Write the new soft-enabled value */ 214 set_soft_enabled(en); 215 if (!en) 216 return; 217 /* 218 * From this point onward, we can take interrupts, preempt, 219 * etc... unless we got hard-disabled. We check if an event 220 * happened. If none happened, we know we can just return. 221 * 222 * We may have preempted before the check below, in which case 223 * we are checking the "new" CPU instead of the old one. This 224 * is only a problem if an event happened on the "old" CPU. 225 * 226 * External interrupt events will have caused interrupts to 227 * be hard-disabled, so there is no problem, we 228 * cannot have preempted. 229 */ 230 irq_happened = get_irq_happened(); 231 if (!irq_happened) 232 return; 233 234 /* 235 * We need to hard disable to get a trusted value from 236 * __check_irq_replay(). We also need to soft-disable 237 * again to avoid warnings in there due to the use of 238 * per-cpu variables. 239 * 240 * We know that if the value in irq_happened is exactly 0x01 241 * then we are already hard disabled (there are other less 242 * common cases that we'll ignore for now), so we skip the 243 * (expensive) mtmsrd. 244 */ 245 if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) 246 __hard_irq_disable(); 247 #ifdef CONFIG_TRACE_IRQFLAGS 248 else { 249 /* 250 * We should already be hard disabled here. We had bugs 251 * where that wasn't the case so let's dbl check it and 252 * warn if we are wrong. Only do that when IRQ tracing 253 * is enabled as mfmsr() can be costly. 254 */ 255 if (WARN_ON(mfmsr() & MSR_EE)) 256 __hard_irq_disable(); 257 } 258 #endif /* CONFIG_TRACE_IRQFLAGS */ 259 260 set_soft_enabled(0); 261 262 /* 263 * Check if anything needs to be re-emitted. We haven't 264 * soft-enabled yet to avoid warnings in decrementer_check_overflow 265 * accessing per-cpu variables 266 */ 267 replay = __check_irq_replay(); 268 269 /* We can soft-enable now */ 270 set_soft_enabled(1); 271 272 /* 273 * And replay if we have to. This will return with interrupts 274 * hard-enabled. 275 */ 276 if (replay) { 277 __replay_interrupt(replay); 278 return; 279 } 280 281 /* Finally, let's ensure we are hard enabled */ 282 __hard_irq_enable(); 283 } 284 EXPORT_SYMBOL(arch_local_irq_restore); 285 286 /* 287 * This is specifically called by assembly code to re-enable interrupts 288 * if they are currently disabled. This is typically called before 289 * schedule() or do_signal() when returning to userspace. We do it 290 * in C to avoid the burden of dealing with lockdep etc... 291 * 292 * NOTE: This is called with interrupts hard disabled but not marked 293 * as such in paca->irq_happened, so we need to resync this. 294 */ 295 void notrace restore_interrupts(void) 296 { 297 if (irqs_disabled()) { 298 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 299 local_irq_enable(); 300 } else 301 __hard_irq_enable(); 302 } 303 304 /* 305 * This is a helper to use when about to go into idle low-power 306 * when the latter has the side effect of re-enabling interrupts 307 * (such as calling H_CEDE under pHyp). 308 * 309 * You call this function with interrupts soft-disabled (this is 310 * already the case when ppc_md.power_save is called). The function 311 * will return whether to enter power save or just return. 312 * 313 * In the former case, it will have notified lockdep of interrupts 314 * being re-enabled and generally sanitized the lazy irq state, 315 * and in the latter case it will leave with interrupts hard 316 * disabled and marked as such, so the local_irq_enable() call 317 * in arch_cpu_idle() will properly re-enable everything. 318 */ 319 bool prep_irq_for_idle(void) 320 { 321 /* 322 * First we need to hard disable to ensure no interrupt 323 * occurs before we effectively enter the low power state 324 */ 325 hard_irq_disable(); 326 327 /* 328 * If anything happened while we were soft-disabled, 329 * we return now and do not enter the low power state. 330 */ 331 if (lazy_irq_pending()) 332 return false; 333 334 /* Tell lockdep we are about to re-enable */ 335 trace_hardirqs_on(); 336 337 /* 338 * Mark interrupts as soft-enabled and clear the 339 * PACA_IRQ_HARD_DIS from the pending mask since we 340 * are about to hard enable as well as a side effect 341 * of entering the low power state. 342 */ 343 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; 344 local_paca->soft_enabled = 1; 345 346 /* Tell the caller to enter the low power state */ 347 return true; 348 } 349 350 /* 351 * Force a replay of the external interrupt handler on this CPU. 352 */ 353 void force_external_irq_replay(void) 354 { 355 /* 356 * This must only be called with interrupts soft-disabled, 357 * the replay will happen when re-enabling. 358 */ 359 WARN_ON(!arch_irqs_disabled()); 360 361 /* Indicate in the PACA that we have an interrupt to replay */ 362 local_paca->irq_happened |= PACA_IRQ_EE; 363 } 364 365 #endif /* CONFIG_PPC64 */ 366 367 int arch_show_interrupts(struct seq_file *p, int prec) 368 { 369 int j; 370 371 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) 372 if (tau_initialized) { 373 seq_printf(p, "%*s: ", prec, "TAU"); 374 for_each_online_cpu(j) 375 seq_printf(p, "%10u ", tau_interrupts(j)); 376 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); 377 } 378 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ 379 380 seq_printf(p, "%*s: ", prec, "LOC"); 381 for_each_online_cpu(j) 382 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); 383 seq_printf(p, " Local timer interrupts for timer event device\n"); 384 385 seq_printf(p, "%*s: ", prec, "LOC"); 386 for_each_online_cpu(j) 387 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); 388 seq_printf(p, " Local timer interrupts for others\n"); 389 390 seq_printf(p, "%*s: ", prec, "SPU"); 391 for_each_online_cpu(j) 392 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); 393 seq_printf(p, " Spurious interrupts\n"); 394 395 seq_printf(p, "%*s: ", prec, "PMI"); 396 for_each_online_cpu(j) 397 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); 398 seq_printf(p, " Performance monitoring interrupts\n"); 399 400 seq_printf(p, "%*s: ", prec, "MCE"); 401 for_each_online_cpu(j) 402 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); 403 seq_printf(p, " Machine check exceptions\n"); 404 405 if (cpu_has_feature(CPU_FTR_HVMODE)) { 406 seq_printf(p, "%*s: ", prec, "HMI"); 407 for_each_online_cpu(j) 408 seq_printf(p, "%10u ", 409 per_cpu(irq_stat, j).hmi_exceptions); 410 seq_printf(p, " Hypervisor Maintenance Interrupts\n"); 411 } 412 413 #ifdef CONFIG_PPC_DOORBELL 414 if (cpu_has_feature(CPU_FTR_DBELL)) { 415 seq_printf(p, "%*s: ", prec, "DBL"); 416 for_each_online_cpu(j) 417 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs); 418 seq_printf(p, " Doorbell interrupts\n"); 419 } 420 #endif 421 422 return 0; 423 } 424 425 /* 426 * /proc/stat helpers 427 */ 428 u64 arch_irq_stat_cpu(unsigned int cpu) 429 { 430 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; 431 432 sum += per_cpu(irq_stat, cpu).pmu_irqs; 433 sum += per_cpu(irq_stat, cpu).mce_exceptions; 434 sum += per_cpu(irq_stat, cpu).spurious_irqs; 435 sum += per_cpu(irq_stat, cpu).timer_irqs_others; 436 sum += per_cpu(irq_stat, cpu).hmi_exceptions; 437 #ifdef CONFIG_PPC_DOORBELL 438 sum += per_cpu(irq_stat, cpu).doorbell_irqs; 439 #endif 440 441 return sum; 442 } 443 444 static inline void check_stack_overflow(void) 445 { 446 #ifdef CONFIG_DEBUG_STACKOVERFLOW 447 long sp; 448 449 sp = current_stack_pointer() & (THREAD_SIZE-1); 450 451 /* check for stack overflow: is there less than 2KB free? */ 452 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { 453 pr_err("do_IRQ: stack overflow: %ld\n", 454 sp - sizeof(struct thread_info)); 455 dump_stack(); 456 } 457 #endif 458 } 459 460 void __do_irq(struct pt_regs *regs) 461 { 462 unsigned int irq; 463 464 irq_enter(); 465 466 trace_irq_entry(regs); 467 468 check_stack_overflow(); 469 470 /* 471 * Query the platform PIC for the interrupt & ack it. 472 * 473 * This will typically lower the interrupt line to the CPU 474 */ 475 irq = ppc_md.get_irq(); 476 477 /* We can hard enable interrupts now to allow perf interrupts */ 478 may_hard_irq_enable(); 479 480 /* And finally process it */ 481 if (unlikely(!irq)) 482 __this_cpu_inc(irq_stat.spurious_irqs); 483 else 484 generic_handle_irq(irq); 485 486 trace_irq_exit(regs); 487 488 irq_exit(); 489 } 490 491 void do_IRQ(struct pt_regs *regs) 492 { 493 struct pt_regs *old_regs = set_irq_regs(regs); 494 struct thread_info *curtp, *irqtp, *sirqtp; 495 496 /* Switch to the irq stack to handle this */ 497 curtp = current_thread_info(); 498 irqtp = hardirq_ctx[raw_smp_processor_id()]; 499 sirqtp = softirq_ctx[raw_smp_processor_id()]; 500 501 /* Already there ? */ 502 if (unlikely(curtp == irqtp || curtp == sirqtp)) { 503 __do_irq(regs); 504 set_irq_regs(old_regs); 505 return; 506 } 507 508 /* Prepare the thread_info in the irq stack */ 509 irqtp->task = curtp->task; 510 irqtp->flags = 0; 511 512 /* Copy the preempt_count so that the [soft]irq checks work. */ 513 irqtp->preempt_count = curtp->preempt_count; 514 515 /* Switch stack and call */ 516 call_do_irq(regs, irqtp); 517 518 /* Restore stack limit */ 519 irqtp->task = NULL; 520 521 /* Copy back updates to the thread_info */ 522 if (irqtp->flags) 523 set_bits(irqtp->flags, &curtp->flags); 524 525 set_irq_regs(old_regs); 526 } 527 528 void __init init_IRQ(void) 529 { 530 if (ppc_md.init_IRQ) 531 ppc_md.init_IRQ(); 532 533 exc_lvl_ctx_init(); 534 535 irq_ctx_init(); 536 } 537 538 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 539 struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; 540 struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; 541 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; 542 543 void exc_lvl_ctx_init(void) 544 { 545 struct thread_info *tp; 546 int i, cpu_nr; 547 548 for_each_possible_cpu(i) { 549 #ifdef CONFIG_PPC64 550 cpu_nr = i; 551 #else 552 #ifdef CONFIG_SMP 553 cpu_nr = get_hard_smp_processor_id(i); 554 #else 555 cpu_nr = 0; 556 #endif 557 #endif 558 559 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); 560 tp = critirq_ctx[cpu_nr]; 561 tp->cpu = cpu_nr; 562 tp->preempt_count = 0; 563 564 #ifdef CONFIG_BOOKE 565 memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE); 566 tp = dbgirq_ctx[cpu_nr]; 567 tp->cpu = cpu_nr; 568 tp->preempt_count = 0; 569 570 memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE); 571 tp = mcheckirq_ctx[cpu_nr]; 572 tp->cpu = cpu_nr; 573 tp->preempt_count = HARDIRQ_OFFSET; 574 #endif 575 } 576 } 577 #endif 578 579 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; 580 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; 581 582 void irq_ctx_init(void) 583 { 584 struct thread_info *tp; 585 int i; 586 587 for_each_possible_cpu(i) { 588 memset((void *)softirq_ctx[i], 0, THREAD_SIZE); 589 tp = softirq_ctx[i]; 590 tp->cpu = i; 591 klp_init_thread_info(tp); 592 593 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); 594 tp = hardirq_ctx[i]; 595 tp->cpu = i; 596 klp_init_thread_info(tp); 597 } 598 } 599 600 void do_softirq_own_stack(void) 601 { 602 struct thread_info *curtp, *irqtp; 603 604 curtp = current_thread_info(); 605 irqtp = softirq_ctx[smp_processor_id()]; 606 irqtp->task = curtp->task; 607 irqtp->flags = 0; 608 call_do_softirq(irqtp); 609 irqtp->task = NULL; 610 611 /* Set any flag that may have been set on the 612 * alternate stack 613 */ 614 if (irqtp->flags) 615 set_bits(irqtp->flags, &curtp->flags); 616 } 617 618 irq_hw_number_t virq_to_hw(unsigned int virq) 619 { 620 struct irq_data *irq_data = irq_get_irq_data(virq); 621 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; 622 } 623 EXPORT_SYMBOL_GPL(virq_to_hw); 624 625 #ifdef CONFIG_SMP 626 int irq_choose_cpu(const struct cpumask *mask) 627 { 628 int cpuid; 629 630 if (cpumask_equal(mask, cpu_online_mask)) { 631 static int irq_rover; 632 static DEFINE_RAW_SPINLOCK(irq_rover_lock); 633 unsigned long flags; 634 635 /* Round-robin distribution... */ 636 do_round_robin: 637 raw_spin_lock_irqsave(&irq_rover_lock, flags); 638 639 irq_rover = cpumask_next(irq_rover, cpu_online_mask); 640 if (irq_rover >= nr_cpu_ids) 641 irq_rover = cpumask_first(cpu_online_mask); 642 643 cpuid = irq_rover; 644 645 raw_spin_unlock_irqrestore(&irq_rover_lock, flags); 646 } else { 647 cpuid = cpumask_first_and(mask, cpu_online_mask); 648 if (cpuid >= nr_cpu_ids) 649 goto do_round_robin; 650 } 651 652 return get_hard_smp_processor_id(cpuid); 653 } 654 #else 655 int irq_choose_cpu(const struct cpumask *mask) 656 { 657 return hard_smp_processor_id(); 658 } 659 #endif 660 661 int arch_early_irq_init(void) 662 { 663 return 0; 664 } 665 666 #ifdef CONFIG_PPC64 667 static int __init setup_noirqdistrib(char *str) 668 { 669 distribute_irqs = 0; 670 return 1; 671 } 672 673 __setup("noirqdistrib", setup_noirqdistrib); 674 #endif /* CONFIG_PPC64 */ 675