1 /* 2 * linux/kernel/irq/manage.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006 Thomas Gleixner 6 * 7 * This file contains driver APIs to the irq subsystem. 8 */ 9 10 #include <linux/irq.h> 11 #include <linux/kthread.h> 12 #include <linux/module.h> 13 #include <linux/random.h> 14 #include <linux/interrupt.h> 15 #include <linux/slab.h> 16 #include <linux/sched.h> 17 18 #include "internals.h" 19 20 /** 21 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 22 * @irq: interrupt number to wait for 23 * 24 * This function waits for any pending IRQ handlers for this interrupt 25 * to complete before returning. If you use this function while 26 * holding a resource the IRQ handler may need you will deadlock. 27 * 28 * This function may be called - with care - from IRQ context. 29 */ 30 void synchronize_irq(unsigned int irq) 31 { 32 struct irq_desc *desc = irq_to_desc(irq); 33 unsigned int status; 34 35 if (!desc) 36 return; 37 38 do { 39 unsigned long flags; 40 41 /* 42 * Wait until we're out of the critical section. This might 43 * give the wrong answer due to the lack of memory barriers. 44 */ 45 while (desc->status & IRQ_INPROGRESS) 46 cpu_relax(); 47 48 /* Ok, that indicated we're done: double-check carefully. */ 49 raw_spin_lock_irqsave(&desc->lock, flags); 50 status = desc->status; 51 raw_spin_unlock_irqrestore(&desc->lock, flags); 52 53 /* Oops, that failed? */ 54 } while (status & IRQ_INPROGRESS); 55 56 /* 57 * We made sure that no hardirq handler is running. Now verify 58 * that no threaded handlers are active. 59 */ 60 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); 61 } 62 EXPORT_SYMBOL(synchronize_irq); 63 64 #ifdef CONFIG_SMP 65 cpumask_var_t irq_default_affinity; 66 67 /** 68 * irq_can_set_affinity - Check if the affinity of a given irq can be set 69 * @irq: Interrupt to check 70 * 71 */ 72 int irq_can_set_affinity(unsigned int irq) 73 { 74 struct irq_desc *desc = irq_to_desc(irq); 75 76 if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || 77 !desc->chip->set_affinity) 78 return 0; 79 80 return 1; 81 } 82 83 /** 84 * irq_set_thread_affinity - Notify irq threads to adjust affinity 85 * @desc: irq descriptor which has affitnity changed 86 * 87 * We just set IRQTF_AFFINITY and delegate the affinity setting 88 * to the interrupt thread itself. We can not call 89 * set_cpus_allowed_ptr() here as we hold desc->lock and this 90 * code can be called from hard interrupt context. 91 */ 92 void irq_set_thread_affinity(struct irq_desc *desc) 93 { 94 struct irqaction *action = desc->action; 95 96 while (action) { 97 if (action->thread) 98 set_bit(IRQTF_AFFINITY, &action->thread_flags); 99 action = action->next; 100 } 101 } 102 103 /** 104 * irq_set_affinity - Set the irq affinity of a given irq 105 * @irq: Interrupt to set affinity 106 * @cpumask: cpumask 107 * 108 */ 109 int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 110 { 111 struct irq_desc *desc = irq_to_desc(irq); 112 unsigned long flags; 113 114 if (!desc->chip->set_affinity) 115 return -EINVAL; 116 117 raw_spin_lock_irqsave(&desc->lock, flags); 118 119 #ifdef CONFIG_GENERIC_PENDING_IRQ 120 if (desc->status & IRQ_MOVE_PCNTXT) { 121 if (!desc->chip->set_affinity(irq, cpumask)) { 122 cpumask_copy(desc->affinity, cpumask); 123 irq_set_thread_affinity(desc); 124 } 125 } 126 else { 127 desc->status |= IRQ_MOVE_PENDING; 128 cpumask_copy(desc->pending_mask, cpumask); 129 } 130 #else 131 if (!desc->chip->set_affinity(irq, cpumask)) { 132 cpumask_copy(desc->affinity, cpumask); 133 irq_set_thread_affinity(desc); 134 } 135 #endif 136 desc->status |= IRQ_AFFINITY_SET; 137 raw_spin_unlock_irqrestore(&desc->lock, flags); 138 return 0; 139 } 140 141 #ifndef CONFIG_AUTO_IRQ_AFFINITY 142 /* 143 * Generic version of the affinity autoselector. 144 */ 145 static int setup_affinity(unsigned int irq, struct irq_desc *desc) 146 { 147 if (!irq_can_set_affinity(irq)) 148 return 0; 149 150 /* 151 * Preserve an userspace affinity setup, but make sure that 152 * one of the targets is online. 153 */ 154 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 155 if (cpumask_any_and(desc->affinity, cpu_online_mask) 156 < nr_cpu_ids) 157 goto set_affinity; 158 else 159 desc->status &= ~IRQ_AFFINITY_SET; 160 } 161 162 cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); 163 set_affinity: 164 desc->chip->set_affinity(irq, desc->affinity); 165 166 return 0; 167 } 168 #else 169 static inline int setup_affinity(unsigned int irq, struct irq_desc *d) 170 { 171 return irq_select_affinity(irq); 172 } 173 #endif 174 175 /* 176 * Called when affinity is set via /proc/irq 177 */ 178 int irq_select_affinity_usr(unsigned int irq) 179 { 180 struct irq_desc *desc = irq_to_desc(irq); 181 unsigned long flags; 182 int ret; 183 184 raw_spin_lock_irqsave(&desc->lock, flags); 185 ret = setup_affinity(irq, desc); 186 if (!ret) 187 irq_set_thread_affinity(desc); 188 raw_spin_unlock_irqrestore(&desc->lock, flags); 189 190 return ret; 191 } 192 193 #else 194 static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) 195 { 196 return 0; 197 } 198 #endif 199 200 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) 201 { 202 if (suspend) { 203 if (!desc->action || (desc->action->flags & IRQF_TIMER)) 204 return; 205 desc->status |= IRQ_SUSPENDED; 206 } 207 208 if (!desc->depth++) { 209 desc->status |= IRQ_DISABLED; 210 desc->chip->disable(irq); 211 } 212 } 213 214 /** 215 * disable_irq_nosync - disable an irq without waiting 216 * @irq: Interrupt to disable 217 * 218 * Disable the selected interrupt line. Disables and Enables are 219 * nested. 220 * Unlike disable_irq(), this function does not ensure existing 221 * instances of the IRQ handler have completed before returning. 222 * 223 * This function may be called from IRQ context. 224 */ 225 void disable_irq_nosync(unsigned int irq) 226 { 227 struct irq_desc *desc = irq_to_desc(irq); 228 unsigned long flags; 229 230 if (!desc) 231 return; 232 233 chip_bus_lock(irq, desc); 234 raw_spin_lock_irqsave(&desc->lock, flags); 235 __disable_irq(desc, irq, false); 236 raw_spin_unlock_irqrestore(&desc->lock, flags); 237 chip_bus_sync_unlock(irq, desc); 238 } 239 EXPORT_SYMBOL(disable_irq_nosync); 240 241 /** 242 * disable_irq - disable an irq and wait for completion 243 * @irq: Interrupt to disable 244 * 245 * Disable the selected interrupt line. Enables and Disables are 246 * nested. 247 * This function waits for any pending IRQ handlers for this interrupt 248 * to complete before returning. If you use this function while 249 * holding a resource the IRQ handler may need you will deadlock. 250 * 251 * This function may be called - with care - from IRQ context. 252 */ 253 void disable_irq(unsigned int irq) 254 { 255 struct irq_desc *desc = irq_to_desc(irq); 256 257 if (!desc) 258 return; 259 260 disable_irq_nosync(irq); 261 if (desc->action) 262 synchronize_irq(irq); 263 } 264 EXPORT_SYMBOL(disable_irq); 265 266 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) 267 { 268 if (resume) 269 desc->status &= ~IRQ_SUSPENDED; 270 271 switch (desc->depth) { 272 case 0: 273 err_out: 274 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 275 break; 276 case 1: { 277 unsigned int status = desc->status & ~IRQ_DISABLED; 278 279 if (desc->status & IRQ_SUSPENDED) 280 goto err_out; 281 /* Prevent probing on this irq: */ 282 desc->status = status | IRQ_NOPROBE; 283 check_irq_resend(desc, irq); 284 /* fall-through */ 285 } 286 default: 287 desc->depth--; 288 } 289 } 290 291 /** 292 * enable_irq - enable handling of an irq 293 * @irq: Interrupt to enable 294 * 295 * Undoes the effect of one call to disable_irq(). If this 296 * matches the last disable, processing of interrupts on this 297 * IRQ line is re-enabled. 298 * 299 * This function may be called from IRQ context only when 300 * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 301 */ 302 void enable_irq(unsigned int irq) 303 { 304 struct irq_desc *desc = irq_to_desc(irq); 305 unsigned long flags; 306 307 if (!desc) 308 return; 309 310 chip_bus_lock(irq, desc); 311 raw_spin_lock_irqsave(&desc->lock, flags); 312 __enable_irq(desc, irq, false); 313 raw_spin_unlock_irqrestore(&desc->lock, flags); 314 chip_bus_sync_unlock(irq, desc); 315 } 316 EXPORT_SYMBOL(enable_irq); 317 318 static int set_irq_wake_real(unsigned int irq, unsigned int on) 319 { 320 struct irq_desc *desc = irq_to_desc(irq); 321 int ret = -ENXIO; 322 323 if (desc->chip->set_wake) 324 ret = desc->chip->set_wake(irq, on); 325 326 return ret; 327 } 328 329 /** 330 * set_irq_wake - control irq power management wakeup 331 * @irq: interrupt to control 332 * @on: enable/disable power management wakeup 333 * 334 * Enable/disable power management wakeup mode, which is 335 * disabled by default. Enables and disables must match, 336 * just as they match for non-wakeup mode support. 337 * 338 * Wakeup mode lets this IRQ wake the system from sleep 339 * states like "suspend to RAM". 340 */ 341 int set_irq_wake(unsigned int irq, unsigned int on) 342 { 343 struct irq_desc *desc = irq_to_desc(irq); 344 unsigned long flags; 345 int ret = 0; 346 347 /* wakeup-capable irqs can be shared between drivers that 348 * don't need to have the same sleep mode behaviors. 349 */ 350 raw_spin_lock_irqsave(&desc->lock, flags); 351 if (on) { 352 if (desc->wake_depth++ == 0) { 353 ret = set_irq_wake_real(irq, on); 354 if (ret) 355 desc->wake_depth = 0; 356 else 357 desc->status |= IRQ_WAKEUP; 358 } 359 } else { 360 if (desc->wake_depth == 0) { 361 WARN(1, "Unbalanced IRQ %d wake disable\n", irq); 362 } else if (--desc->wake_depth == 0) { 363 ret = set_irq_wake_real(irq, on); 364 if (ret) 365 desc->wake_depth = 1; 366 else 367 desc->status &= ~IRQ_WAKEUP; 368 } 369 } 370 371 raw_spin_unlock_irqrestore(&desc->lock, flags); 372 return ret; 373 } 374 EXPORT_SYMBOL(set_irq_wake); 375 376 /* 377 * Internal function that tells the architecture code whether a 378 * particular irq has been exclusively allocated or is available 379 * for driver use. 380 */ 381 int can_request_irq(unsigned int irq, unsigned long irqflags) 382 { 383 struct irq_desc *desc = irq_to_desc(irq); 384 struct irqaction *action; 385 386 if (!desc) 387 return 0; 388 389 if (desc->status & IRQ_NOREQUEST) 390 return 0; 391 392 action = desc->action; 393 if (action) 394 if (irqflags & action->flags & IRQF_SHARED) 395 action = NULL; 396 397 return !action; 398 } 399 400 void compat_irq_chip_set_default_handler(struct irq_desc *desc) 401 { 402 /* 403 * If the architecture still has not overriden 404 * the flow handler then zap the default. This 405 * should catch incorrect flow-type setting. 406 */ 407 if (desc->handle_irq == &handle_bad_irq) 408 desc->handle_irq = NULL; 409 } 410 411 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 412 unsigned long flags) 413 { 414 int ret; 415 struct irq_chip *chip = desc->chip; 416 417 if (!chip || !chip->set_type) { 418 /* 419 * IRQF_TRIGGER_* but the PIC does not support multiple 420 * flow-types? 421 */ 422 pr_debug("No set_type function for IRQ %d (%s)\n", irq, 423 chip ? (chip->name ? : "unknown") : "unknown"); 424 return 0; 425 } 426 427 /* caller masked out all except trigger mode flags */ 428 ret = chip->set_type(irq, flags); 429 430 if (ret) 431 pr_err("setting trigger mode %d for irq %u failed (%pF)\n", 432 (int)flags, irq, chip->set_type); 433 else { 434 if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 435 flags |= IRQ_LEVEL; 436 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ 437 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); 438 desc->status |= flags; 439 } 440 441 return ret; 442 } 443 444 /* 445 * Default primary interrupt handler for threaded interrupts. Is 446 * assigned as primary handler when request_threaded_irq is called 447 * with handler == NULL. Useful for oneshot interrupts. 448 */ 449 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) 450 { 451 return IRQ_WAKE_THREAD; 452 } 453 454 /* 455 * Primary handler for nested threaded interrupts. Should never be 456 * called. 457 */ 458 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) 459 { 460 WARN(1, "Primary handler called for nested irq %d\n", irq); 461 return IRQ_NONE; 462 } 463 464 static int irq_wait_for_interrupt(struct irqaction *action) 465 { 466 while (!kthread_should_stop()) { 467 set_current_state(TASK_INTERRUPTIBLE); 468 469 if (test_and_clear_bit(IRQTF_RUNTHREAD, 470 &action->thread_flags)) { 471 __set_current_state(TASK_RUNNING); 472 return 0; 473 } 474 schedule(); 475 } 476 return -1; 477 } 478 479 /* 480 * Oneshot interrupts keep the irq line masked until the threaded 481 * handler finished. unmask if the interrupt has not been disabled and 482 * is marked MASKED. 483 */ 484 static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) 485 { 486 chip_bus_lock(irq, desc); 487 raw_spin_lock_irq(&desc->lock); 488 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { 489 desc->status &= ~IRQ_MASKED; 490 desc->chip->unmask(irq); 491 } 492 raw_spin_unlock_irq(&desc->lock); 493 chip_bus_sync_unlock(irq, desc); 494 } 495 496 #ifdef CONFIG_SMP 497 /* 498 * Check whether we need to change the affinity of the interrupt thread. 499 */ 500 static void 501 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) 502 { 503 cpumask_var_t mask; 504 505 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) 506 return; 507 508 /* 509 * In case we are out of memory we set IRQTF_AFFINITY again and 510 * try again next time 511 */ 512 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 513 set_bit(IRQTF_AFFINITY, &action->thread_flags); 514 return; 515 } 516 517 raw_spin_lock_irq(&desc->lock); 518 cpumask_copy(mask, desc->affinity); 519 raw_spin_unlock_irq(&desc->lock); 520 521 set_cpus_allowed_ptr(current, mask); 522 free_cpumask_var(mask); 523 } 524 #else 525 static inline void 526 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } 527 #endif 528 529 /* 530 * Interrupt handler thread 531 */ 532 static int irq_thread(void *data) 533 { 534 struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; 535 struct irqaction *action = data; 536 struct irq_desc *desc = irq_to_desc(action->irq); 537 int wake, oneshot = desc->status & IRQ_ONESHOT; 538 539 sched_setscheduler(current, SCHED_FIFO, ¶m); 540 current->irqaction = action; 541 542 while (!irq_wait_for_interrupt(action)) { 543 544 irq_thread_check_affinity(desc, action); 545 546 atomic_inc(&desc->threads_active); 547 548 raw_spin_lock_irq(&desc->lock); 549 if (unlikely(desc->status & IRQ_DISABLED)) { 550 /* 551 * CHECKME: We might need a dedicated 552 * IRQ_THREAD_PENDING flag here, which 553 * retriggers the thread in check_irq_resend() 554 * but AFAICT IRQ_PENDING should be fine as it 555 * retriggers the interrupt itself --- tglx 556 */ 557 desc->status |= IRQ_PENDING; 558 raw_spin_unlock_irq(&desc->lock); 559 } else { 560 raw_spin_unlock_irq(&desc->lock); 561 562 action->thread_fn(action->irq, action->dev_id); 563 564 if (oneshot) 565 irq_finalize_oneshot(action->irq, desc); 566 } 567 568 wake = atomic_dec_and_test(&desc->threads_active); 569 570 if (wake && waitqueue_active(&desc->wait_for_threads)) 571 wake_up(&desc->wait_for_threads); 572 } 573 574 /* 575 * Clear irqaction. Otherwise exit_irq_thread() would make 576 * fuzz about an active irq thread going into nirvana. 577 */ 578 current->irqaction = NULL; 579 return 0; 580 } 581 582 /* 583 * Called from do_exit() 584 */ 585 void exit_irq_thread(void) 586 { 587 struct task_struct *tsk = current; 588 589 if (!tsk->irqaction) 590 return; 591 592 printk(KERN_ERR 593 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", 594 tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); 595 596 /* 597 * Set the THREAD DIED flag to prevent further wakeups of the 598 * soon to be gone threaded handler. 599 */ 600 set_bit(IRQTF_DIED, &tsk->irqaction->flags); 601 } 602 603 /* 604 * Internal function to register an irqaction - typically used to 605 * allocate special interrupts that are part of the architecture. 606 */ 607 static int 608 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) 609 { 610 struct irqaction *old, **old_ptr; 611 const char *old_name = NULL; 612 unsigned long flags; 613 int nested, shared = 0; 614 int ret; 615 616 if (!desc) 617 return -EINVAL; 618 619 if (desc->chip == &no_irq_chip) 620 return -ENOSYS; 621 /* 622 * Some drivers like serial.c use request_irq() heavily, 623 * so we have to be careful not to interfere with a 624 * running system. 625 */ 626 if (new->flags & IRQF_SAMPLE_RANDOM) { 627 /* 628 * This function might sleep, we want to call it first, 629 * outside of the atomic block. 630 * Yes, this might clear the entropy pool if the wrong 631 * driver is attempted to be loaded, without actually 632 * installing a new handler, but is this really a problem, 633 * only the sysadmin is able to do this. 634 */ 635 rand_initialize_irq(irq); 636 } 637 638 /* Oneshot interrupts are not allowed with shared */ 639 if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED)) 640 return -EINVAL; 641 642 /* 643 * Check whether the interrupt nests into another interrupt 644 * thread. 645 */ 646 nested = desc->status & IRQ_NESTED_THREAD; 647 if (nested) { 648 if (!new->thread_fn) 649 return -EINVAL; 650 /* 651 * Replace the primary handler which was provided from 652 * the driver for non nested interrupt handling by the 653 * dummy function which warns when called. 654 */ 655 new->handler = irq_nested_primary_handler; 656 } 657 658 /* 659 * Create a handler thread when a thread function is supplied 660 * and the interrupt does not nest into another interrupt 661 * thread. 662 */ 663 if (new->thread_fn && !nested) { 664 struct task_struct *t; 665 666 t = kthread_create(irq_thread, new, "irq/%d-%s", irq, 667 new->name); 668 if (IS_ERR(t)) 669 return PTR_ERR(t); 670 /* 671 * We keep the reference to the task struct even if 672 * the thread dies to avoid that the interrupt code 673 * references an already freed task_struct. 674 */ 675 get_task_struct(t); 676 new->thread = t; 677 } 678 679 /* 680 * The following block of code has to be executed atomically 681 */ 682 raw_spin_lock_irqsave(&desc->lock, flags); 683 old_ptr = &desc->action; 684 old = *old_ptr; 685 if (old) { 686 /* 687 * Can't share interrupts unless both agree to and are 688 * the same type (level, edge, polarity). So both flag 689 * fields must have IRQF_SHARED set and the bits which 690 * set the trigger type must match. 691 */ 692 if (!((old->flags & new->flags) & IRQF_SHARED) || 693 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { 694 old_name = old->name; 695 goto mismatch; 696 } 697 698 #if defined(CONFIG_IRQ_PER_CPU) 699 /* All handlers must agree on per-cpuness */ 700 if ((old->flags & IRQF_PERCPU) != 701 (new->flags & IRQF_PERCPU)) 702 goto mismatch; 703 #endif 704 705 /* add new interrupt at end of irq queue */ 706 do { 707 old_ptr = &old->next; 708 old = *old_ptr; 709 } while (old); 710 shared = 1; 711 } 712 713 if (!shared) { 714 irq_chip_set_defaults(desc->chip); 715 716 init_waitqueue_head(&desc->wait_for_threads); 717 718 /* Setup the type (level, edge polarity) if configured: */ 719 if (new->flags & IRQF_TRIGGER_MASK) { 720 ret = __irq_set_trigger(desc, irq, 721 new->flags & IRQF_TRIGGER_MASK); 722 723 if (ret) 724 goto out_thread; 725 } else 726 compat_irq_chip_set_default_handler(desc); 727 #if defined(CONFIG_IRQ_PER_CPU) 728 if (new->flags & IRQF_PERCPU) 729 desc->status |= IRQ_PER_CPU; 730 #endif 731 732 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT | 733 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); 734 735 if (new->flags & IRQF_ONESHOT) 736 desc->status |= IRQ_ONESHOT; 737 738 if (!(desc->status & IRQ_NOAUTOEN)) { 739 desc->depth = 0; 740 desc->status &= ~IRQ_DISABLED; 741 desc->chip->startup(irq); 742 } else 743 /* Undo nested disables: */ 744 desc->depth = 1; 745 746 /* Exclude IRQ from balancing if requested */ 747 if (new->flags & IRQF_NOBALANCING) 748 desc->status |= IRQ_NO_BALANCING; 749 750 /* Set default affinity mask once everything is setup */ 751 setup_affinity(irq, desc); 752 753 } else if ((new->flags & IRQF_TRIGGER_MASK) 754 && (new->flags & IRQF_TRIGGER_MASK) 755 != (desc->status & IRQ_TYPE_SENSE_MASK)) { 756 /* hope the handler works with the actual trigger mode... */ 757 pr_warning("IRQ %d uses trigger mode %d; requested %d\n", 758 irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), 759 (int)(new->flags & IRQF_TRIGGER_MASK)); 760 } 761 762 new->irq = irq; 763 *old_ptr = new; 764 765 /* Reset broken irq detection when installing new handler */ 766 desc->irq_count = 0; 767 desc->irqs_unhandled = 0; 768 769 /* 770 * Check whether we disabled the irq via the spurious handler 771 * before. Reenable it and give it another chance. 772 */ 773 if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { 774 desc->status &= ~IRQ_SPURIOUS_DISABLED; 775 __enable_irq(desc, irq, false); 776 } 777 778 raw_spin_unlock_irqrestore(&desc->lock, flags); 779 780 /* 781 * Strictly no need to wake it up, but hung_task complains 782 * when no hard interrupt wakes the thread up. 783 */ 784 if (new->thread) 785 wake_up_process(new->thread); 786 787 register_irq_proc(irq, desc); 788 new->dir = NULL; 789 register_handler_proc(irq, new); 790 791 return 0; 792 793 mismatch: 794 #ifdef CONFIG_DEBUG_SHIRQ 795 if (!(new->flags & IRQF_PROBE_SHARED)) { 796 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); 797 if (old_name) 798 printk(KERN_ERR "current handler: %s\n", old_name); 799 dump_stack(); 800 } 801 #endif 802 ret = -EBUSY; 803 804 out_thread: 805 raw_spin_unlock_irqrestore(&desc->lock, flags); 806 if (new->thread) { 807 struct task_struct *t = new->thread; 808 809 new->thread = NULL; 810 if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) 811 kthread_stop(t); 812 put_task_struct(t); 813 } 814 return ret; 815 } 816 817 /** 818 * setup_irq - setup an interrupt 819 * @irq: Interrupt line to setup 820 * @act: irqaction for the interrupt 821 * 822 * Used to statically setup interrupts in the early boot process. 823 */ 824 int setup_irq(unsigned int irq, struct irqaction *act) 825 { 826 struct irq_desc *desc = irq_to_desc(irq); 827 828 return __setup_irq(irq, desc, act); 829 } 830 EXPORT_SYMBOL_GPL(setup_irq); 831 832 /* 833 * Internal function to unregister an irqaction - used to free 834 * regular and special interrupts that are part of the architecture. 835 */ 836 static struct irqaction *__free_irq(unsigned int irq, void *dev_id) 837 { 838 struct irq_desc *desc = irq_to_desc(irq); 839 struct irqaction *action, **action_ptr; 840 unsigned long flags; 841 842 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 843 844 if (!desc) 845 return NULL; 846 847 raw_spin_lock_irqsave(&desc->lock, flags); 848 849 /* 850 * There can be multiple actions per IRQ descriptor, find the right 851 * one based on the dev_id: 852 */ 853 action_ptr = &desc->action; 854 for (;;) { 855 action = *action_ptr; 856 857 if (!action) { 858 WARN(1, "Trying to free already-free IRQ %d\n", irq); 859 raw_spin_unlock_irqrestore(&desc->lock, flags); 860 861 return NULL; 862 } 863 864 if (action->dev_id == dev_id) 865 break; 866 action_ptr = &action->next; 867 } 868 869 /* Found it - now remove it from the list of entries: */ 870 *action_ptr = action->next; 871 872 /* Currently used only by UML, might disappear one day: */ 873 #ifdef CONFIG_IRQ_RELEASE_METHOD 874 if (desc->chip->release) 875 desc->chip->release(irq, dev_id); 876 #endif 877 878 /* If this was the last handler, shut down the IRQ line: */ 879 if (!desc->action) { 880 desc->status |= IRQ_DISABLED; 881 if (desc->chip->shutdown) 882 desc->chip->shutdown(irq); 883 else 884 desc->chip->disable(irq); 885 } 886 887 raw_spin_unlock_irqrestore(&desc->lock, flags); 888 889 unregister_handler_proc(irq, action); 890 891 /* Make sure it's not being used on another CPU: */ 892 synchronize_irq(irq); 893 894 #ifdef CONFIG_DEBUG_SHIRQ 895 /* 896 * It's a shared IRQ -- the driver ought to be prepared for an IRQ 897 * event to happen even now it's being freed, so let's make sure that 898 * is so by doing an extra call to the handler .... 899 * 900 * ( We do this after actually deregistering it, to make sure that a 901 * 'real' IRQ doesn't run in * parallel with our fake. ) 902 */ 903 if (action->flags & IRQF_SHARED) { 904 local_irq_save(flags); 905 action->handler(irq, dev_id); 906 local_irq_restore(flags); 907 } 908 #endif 909 910 if (action->thread) { 911 if (!test_bit(IRQTF_DIED, &action->thread_flags)) 912 kthread_stop(action->thread); 913 put_task_struct(action->thread); 914 } 915 916 return action; 917 } 918 919 /** 920 * remove_irq - free an interrupt 921 * @irq: Interrupt line to free 922 * @act: irqaction for the interrupt 923 * 924 * Used to remove interrupts statically setup by the early boot process. 925 */ 926 void remove_irq(unsigned int irq, struct irqaction *act) 927 { 928 __free_irq(irq, act->dev_id); 929 } 930 EXPORT_SYMBOL_GPL(remove_irq); 931 932 /** 933 * free_irq - free an interrupt allocated with request_irq 934 * @irq: Interrupt line to free 935 * @dev_id: Device identity to free 936 * 937 * Remove an interrupt handler. The handler is removed and if the 938 * interrupt line is no longer in use by any driver it is disabled. 939 * On a shared IRQ the caller must ensure the interrupt is disabled 940 * on the card it drives before calling this function. The function 941 * does not return until any executing interrupts for this IRQ 942 * have completed. 943 * 944 * This function must not be called from interrupt context. 945 */ 946 void free_irq(unsigned int irq, void *dev_id) 947 { 948 struct irq_desc *desc = irq_to_desc(irq); 949 950 if (!desc) 951 return; 952 953 chip_bus_lock(irq, desc); 954 kfree(__free_irq(irq, dev_id)); 955 chip_bus_sync_unlock(irq, desc); 956 } 957 EXPORT_SYMBOL(free_irq); 958 959 /** 960 * request_threaded_irq - allocate an interrupt line 961 * @irq: Interrupt line to allocate 962 * @handler: Function to be called when the IRQ occurs. 963 * Primary handler for threaded interrupts 964 * If NULL and thread_fn != NULL the default 965 * primary handler is installed 966 * @thread_fn: Function called from the irq handler thread 967 * If NULL, no irq thread is created 968 * @irqflags: Interrupt type flags 969 * @devname: An ascii name for the claiming device 970 * @dev_id: A cookie passed back to the handler function 971 * 972 * This call allocates interrupt resources and enables the 973 * interrupt line and IRQ handling. From the point this 974 * call is made your handler function may be invoked. Since 975 * your handler function must clear any interrupt the board 976 * raises, you must take care both to initialise your hardware 977 * and to set up the interrupt handler in the right order. 978 * 979 * If you want to set up a threaded irq handler for your device 980 * then you need to supply @handler and @thread_fn. @handler ist 981 * still called in hard interrupt context and has to check 982 * whether the interrupt originates from the device. If yes it 983 * needs to disable the interrupt on the device and return 984 * IRQ_WAKE_THREAD which will wake up the handler thread and run 985 * @thread_fn. This split handler design is necessary to support 986 * shared interrupts. 987 * 988 * Dev_id must be globally unique. Normally the address of the 989 * device data structure is used as the cookie. Since the handler 990 * receives this value it makes sense to use it. 991 * 992 * If your interrupt is shared you must pass a non NULL dev_id 993 * as this is required when freeing the interrupt. 994 * 995 * Flags: 996 * 997 * IRQF_SHARED Interrupt is shared 998 * IRQF_DISABLED Disable local interrupts while processing 999 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy 1000 * IRQF_TRIGGER_* Specify active edge(s) or level 1001 * 1002 */ 1003 int request_threaded_irq(unsigned int irq, irq_handler_t handler, 1004 irq_handler_t thread_fn, unsigned long irqflags, 1005 const char *devname, void *dev_id) 1006 { 1007 struct irqaction *action; 1008 struct irq_desc *desc; 1009 int retval; 1010 1011 /* 1012 * handle_IRQ_event() always ignores IRQF_DISABLED except for 1013 * the _first_ irqaction (sigh). That can cause oopsing, but 1014 * the behavior is classified as "will not fix" so we need to 1015 * start nudging drivers away from using that idiom. 1016 */ 1017 if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) == 1018 (IRQF_SHARED|IRQF_DISABLED)) { 1019 pr_warning( 1020 "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n", 1021 irq, devname); 1022 } 1023 1024 #ifdef CONFIG_LOCKDEP 1025 /* 1026 * Lockdep wants atomic interrupt handlers: 1027 */ 1028 irqflags |= IRQF_DISABLED; 1029 #endif 1030 /* 1031 * Sanity-check: shared interrupts must pass in a real dev-ID, 1032 * otherwise we'll have trouble later trying to figure out 1033 * which interrupt is which (messes up the interrupt freeing 1034 * logic etc). 1035 */ 1036 if ((irqflags & IRQF_SHARED) && !dev_id) 1037 return -EINVAL; 1038 1039 desc = irq_to_desc(irq); 1040 if (!desc) 1041 return -EINVAL; 1042 1043 if (desc->status & IRQ_NOREQUEST) 1044 return -EINVAL; 1045 1046 if (!handler) { 1047 if (!thread_fn) 1048 return -EINVAL; 1049 handler = irq_default_primary_handler; 1050 } 1051 1052 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 1053 if (!action) 1054 return -ENOMEM; 1055 1056 action->handler = handler; 1057 action->thread_fn = thread_fn; 1058 action->flags = irqflags; 1059 action->name = devname; 1060 action->dev_id = dev_id; 1061 1062 chip_bus_lock(irq, desc); 1063 retval = __setup_irq(irq, desc, action); 1064 chip_bus_sync_unlock(irq, desc); 1065 1066 if (retval) 1067 kfree(action); 1068 1069 #ifdef CONFIG_DEBUG_SHIRQ 1070 if (!retval && (irqflags & IRQF_SHARED)) { 1071 /* 1072 * It's a shared IRQ -- the driver ought to be prepared for it 1073 * to happen immediately, so let's make sure.... 1074 * We disable the irq to make sure that a 'real' IRQ doesn't 1075 * run in parallel with our fake. 1076 */ 1077 unsigned long flags; 1078 1079 disable_irq(irq); 1080 local_irq_save(flags); 1081 1082 handler(irq, dev_id); 1083 1084 local_irq_restore(flags); 1085 enable_irq(irq); 1086 } 1087 #endif 1088 return retval; 1089 } 1090 EXPORT_SYMBOL(request_threaded_irq); 1091