1 /* 2 * linux/kernel/irq/manage.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006 Thomas Gleixner 6 * 7 * This file contains driver APIs to the irq subsystem. 8 */ 9 10 #include <linux/irq.h> 11 #include <linux/kthread.h> 12 #include <linux/module.h> 13 #include <linux/random.h> 14 #include <linux/interrupt.h> 15 #include <linux/slab.h> 16 #include <linux/sched.h> 17 18 #include "internals.h" 19 20 /** 21 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 22 * @irq: interrupt number to wait for 23 * 24 * This function waits for any pending IRQ handlers for this interrupt 25 * to complete before returning. If you use this function while 26 * holding a resource the IRQ handler may need you will deadlock. 27 * 28 * This function may be called - with care - from IRQ context. 29 */ 30 void synchronize_irq(unsigned int irq) 31 { 32 struct irq_desc *desc = irq_to_desc(irq); 33 unsigned int status; 34 35 if (!desc) 36 return; 37 38 do { 39 unsigned long flags; 40 41 /* 42 * Wait until we're out of the critical section. This might 43 * give the wrong answer due to the lack of memory barriers. 44 */ 45 while (desc->status & IRQ_INPROGRESS) 46 cpu_relax(); 47 48 /* Ok, that indicated we're done: double-check carefully. */ 49 spin_lock_irqsave(&desc->lock, flags); 50 status = desc->status; 51 spin_unlock_irqrestore(&desc->lock, flags); 52 53 /* Oops, that failed? */ 54 } while (status & IRQ_INPROGRESS); 55 56 /* 57 * We made sure that no hardirq handler is running. Now verify 58 * that no threaded handlers are active. 59 */ 60 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); 61 } 62 EXPORT_SYMBOL(synchronize_irq); 63 64 #ifdef CONFIG_SMP 65 cpumask_var_t irq_default_affinity; 66 67 /** 68 * irq_can_set_affinity - Check if the affinity of a given irq can be set 69 * @irq: Interrupt to check 70 * 71 */ 72 int irq_can_set_affinity(unsigned int irq) 73 { 74 struct irq_desc *desc = irq_to_desc(irq); 75 76 if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || 77 !desc->chip->set_affinity) 78 return 0; 79 80 return 1; 81 } 82 83 void 84 irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) 85 { 86 struct irqaction *action = desc->action; 87 88 while (action) { 89 if (action->thread) 90 set_cpus_allowed_ptr(action->thread, cpumask); 91 action = action->next; 92 } 93 } 94 95 /** 96 * irq_set_affinity - Set the irq affinity of a given irq 97 * @irq: Interrupt to set affinity 98 * @cpumask: cpumask 99 * 100 */ 101 int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 102 { 103 struct irq_desc *desc = irq_to_desc(irq); 104 unsigned long flags; 105 106 if (!desc->chip->set_affinity) 107 return -EINVAL; 108 109 spin_lock_irqsave(&desc->lock, flags); 110 111 #ifdef CONFIG_GENERIC_PENDING_IRQ 112 if (desc->status & IRQ_MOVE_PCNTXT) { 113 if (!desc->chip->set_affinity(irq, cpumask)) { 114 cpumask_copy(desc->affinity, cpumask); 115 irq_set_thread_affinity(desc, cpumask); 116 } 117 } 118 else { 119 desc->status |= IRQ_MOVE_PENDING; 120 cpumask_copy(desc->pending_mask, cpumask); 121 } 122 #else 123 if (!desc->chip->set_affinity(irq, cpumask)) { 124 cpumask_copy(desc->affinity, cpumask); 125 irq_set_thread_affinity(desc, cpumask); 126 } 127 #endif 128 desc->status |= IRQ_AFFINITY_SET; 129 spin_unlock_irqrestore(&desc->lock, flags); 130 return 0; 131 } 132 133 #ifndef CONFIG_AUTO_IRQ_AFFINITY 134 /* 135 * Generic version of the affinity autoselector. 136 */ 137 static int setup_affinity(unsigned int irq, struct irq_desc *desc) 138 { 139 if (!irq_can_set_affinity(irq)) 140 return 0; 141 142 /* 143 * Preserve an userspace affinity setup, but make sure that 144 * one of the targets is online. 145 */ 146 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 147 if (cpumask_any_and(desc->affinity, cpu_online_mask) 148 < nr_cpu_ids) 149 goto set_affinity; 150 else 151 desc->status &= ~IRQ_AFFINITY_SET; 152 } 153 154 cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); 155 set_affinity: 156 desc->chip->set_affinity(irq, desc->affinity); 157 158 return 0; 159 } 160 #else 161 static inline int setup_affinity(unsigned int irq, struct irq_desc *d) 162 { 163 return irq_select_affinity(irq); 164 } 165 #endif 166 167 /* 168 * Called when affinity is set via /proc/irq 169 */ 170 int irq_select_affinity_usr(unsigned int irq) 171 { 172 struct irq_desc *desc = irq_to_desc(irq); 173 unsigned long flags; 174 int ret; 175 176 spin_lock_irqsave(&desc->lock, flags); 177 ret = setup_affinity(irq, desc); 178 if (!ret) 179 irq_set_thread_affinity(desc, desc->affinity); 180 spin_unlock_irqrestore(&desc->lock, flags); 181 182 return ret; 183 } 184 185 #else 186 static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) 187 { 188 return 0; 189 } 190 #endif 191 192 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) 193 { 194 if (suspend) { 195 if (!desc->action || (desc->action->flags & IRQF_TIMER)) 196 return; 197 desc->status |= IRQ_SUSPENDED; 198 } 199 200 if (!desc->depth++) { 201 desc->status |= IRQ_DISABLED; 202 desc->chip->disable(irq); 203 } 204 } 205 206 /** 207 * disable_irq_nosync - disable an irq without waiting 208 * @irq: Interrupt to disable 209 * 210 * Disable the selected interrupt line. Disables and Enables are 211 * nested. 212 * Unlike disable_irq(), this function does not ensure existing 213 * instances of the IRQ handler have completed before returning. 214 * 215 * This function may be called from IRQ context. 216 */ 217 void disable_irq_nosync(unsigned int irq) 218 { 219 struct irq_desc *desc = irq_to_desc(irq); 220 unsigned long flags; 221 222 if (!desc) 223 return; 224 225 spin_lock_irqsave(&desc->lock, flags); 226 __disable_irq(desc, irq, false); 227 spin_unlock_irqrestore(&desc->lock, flags); 228 } 229 EXPORT_SYMBOL(disable_irq_nosync); 230 231 /** 232 * disable_irq - disable an irq and wait for completion 233 * @irq: Interrupt to disable 234 * 235 * Disable the selected interrupt line. Enables and Disables are 236 * nested. 237 * This function waits for any pending IRQ handlers for this interrupt 238 * to complete before returning. If you use this function while 239 * holding a resource the IRQ handler may need you will deadlock. 240 * 241 * This function may be called - with care - from IRQ context. 242 */ 243 void disable_irq(unsigned int irq) 244 { 245 struct irq_desc *desc = irq_to_desc(irq); 246 247 if (!desc) 248 return; 249 250 disable_irq_nosync(irq); 251 if (desc->action) 252 synchronize_irq(irq); 253 } 254 EXPORT_SYMBOL(disable_irq); 255 256 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) 257 { 258 if (resume) 259 desc->status &= ~IRQ_SUSPENDED; 260 261 switch (desc->depth) { 262 case 0: 263 err_out: 264 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 265 break; 266 case 1: { 267 unsigned int status = desc->status & ~IRQ_DISABLED; 268 269 if (desc->status & IRQ_SUSPENDED) 270 goto err_out; 271 /* Prevent probing on this irq: */ 272 desc->status = status | IRQ_NOPROBE; 273 check_irq_resend(desc, irq); 274 /* fall-through */ 275 } 276 default: 277 desc->depth--; 278 } 279 } 280 281 /** 282 * enable_irq - enable handling of an irq 283 * @irq: Interrupt to enable 284 * 285 * Undoes the effect of one call to disable_irq(). If this 286 * matches the last disable, processing of interrupts on this 287 * IRQ line is re-enabled. 288 * 289 * This function may be called from IRQ context. 290 */ 291 void enable_irq(unsigned int irq) 292 { 293 struct irq_desc *desc = irq_to_desc(irq); 294 unsigned long flags; 295 296 if (!desc) 297 return; 298 299 spin_lock_irqsave(&desc->lock, flags); 300 __enable_irq(desc, irq, false); 301 spin_unlock_irqrestore(&desc->lock, flags); 302 } 303 EXPORT_SYMBOL(enable_irq); 304 305 static int set_irq_wake_real(unsigned int irq, unsigned int on) 306 { 307 struct irq_desc *desc = irq_to_desc(irq); 308 int ret = -ENXIO; 309 310 if (desc->chip->set_wake) 311 ret = desc->chip->set_wake(irq, on); 312 313 return ret; 314 } 315 316 /** 317 * set_irq_wake - control irq power management wakeup 318 * @irq: interrupt to control 319 * @on: enable/disable power management wakeup 320 * 321 * Enable/disable power management wakeup mode, which is 322 * disabled by default. Enables and disables must match, 323 * just as they match for non-wakeup mode support. 324 * 325 * Wakeup mode lets this IRQ wake the system from sleep 326 * states like "suspend to RAM". 327 */ 328 int set_irq_wake(unsigned int irq, unsigned int on) 329 { 330 struct irq_desc *desc = irq_to_desc(irq); 331 unsigned long flags; 332 int ret = 0; 333 334 /* wakeup-capable irqs can be shared between drivers that 335 * don't need to have the same sleep mode behaviors. 336 */ 337 spin_lock_irqsave(&desc->lock, flags); 338 if (on) { 339 if (desc->wake_depth++ == 0) { 340 ret = set_irq_wake_real(irq, on); 341 if (ret) 342 desc->wake_depth = 0; 343 else 344 desc->status |= IRQ_WAKEUP; 345 } 346 } else { 347 if (desc->wake_depth == 0) { 348 WARN(1, "Unbalanced IRQ %d wake disable\n", irq); 349 } else if (--desc->wake_depth == 0) { 350 ret = set_irq_wake_real(irq, on); 351 if (ret) 352 desc->wake_depth = 1; 353 else 354 desc->status &= ~IRQ_WAKEUP; 355 } 356 } 357 358 spin_unlock_irqrestore(&desc->lock, flags); 359 return ret; 360 } 361 EXPORT_SYMBOL(set_irq_wake); 362 363 /* 364 * Internal function that tells the architecture code whether a 365 * particular irq has been exclusively allocated or is available 366 * for driver use. 367 */ 368 int can_request_irq(unsigned int irq, unsigned long irqflags) 369 { 370 struct irq_desc *desc = irq_to_desc(irq); 371 struct irqaction *action; 372 373 if (!desc) 374 return 0; 375 376 if (desc->status & IRQ_NOREQUEST) 377 return 0; 378 379 action = desc->action; 380 if (action) 381 if (irqflags & action->flags & IRQF_SHARED) 382 action = NULL; 383 384 return !action; 385 } 386 387 void compat_irq_chip_set_default_handler(struct irq_desc *desc) 388 { 389 /* 390 * If the architecture still has not overriden 391 * the flow handler then zap the default. This 392 * should catch incorrect flow-type setting. 393 */ 394 if (desc->handle_irq == &handle_bad_irq) 395 desc->handle_irq = NULL; 396 } 397 398 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 399 unsigned long flags) 400 { 401 int ret; 402 struct irq_chip *chip = desc->chip; 403 404 if (!chip || !chip->set_type) { 405 /* 406 * IRQF_TRIGGER_* but the PIC does not support multiple 407 * flow-types? 408 */ 409 pr_debug("No set_type function for IRQ %d (%s)\n", irq, 410 chip ? (chip->name ? : "unknown") : "unknown"); 411 return 0; 412 } 413 414 /* caller masked out all except trigger mode flags */ 415 ret = chip->set_type(irq, flags); 416 417 if (ret) 418 pr_err("setting trigger mode %d for irq %u failed (%pF)\n", 419 (int)flags, irq, chip->set_type); 420 else { 421 if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 422 flags |= IRQ_LEVEL; 423 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ 424 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); 425 desc->status |= flags; 426 } 427 428 return ret; 429 } 430 431 static int irq_wait_for_interrupt(struct irqaction *action) 432 { 433 while (!kthread_should_stop()) { 434 set_current_state(TASK_INTERRUPTIBLE); 435 436 if (test_and_clear_bit(IRQTF_RUNTHREAD, 437 &action->thread_flags)) { 438 __set_current_state(TASK_RUNNING); 439 return 0; 440 } 441 schedule(); 442 } 443 return -1; 444 } 445 446 /* 447 * Interrupt handler thread 448 */ 449 static int irq_thread(void *data) 450 { 451 struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; 452 struct irqaction *action = data; 453 struct irq_desc *desc = irq_to_desc(action->irq); 454 int wake; 455 456 sched_setscheduler(current, SCHED_FIFO, ¶m); 457 current->irqaction = action; 458 459 while (!irq_wait_for_interrupt(action)) { 460 461 atomic_inc(&desc->threads_active); 462 463 spin_lock_irq(&desc->lock); 464 if (unlikely(desc->status & IRQ_DISABLED)) { 465 /* 466 * CHECKME: We might need a dedicated 467 * IRQ_THREAD_PENDING flag here, which 468 * retriggers the thread in check_irq_resend() 469 * but AFAICT IRQ_PENDING should be fine as it 470 * retriggers the interrupt itself --- tglx 471 */ 472 desc->status |= IRQ_PENDING; 473 spin_unlock_irq(&desc->lock); 474 } else { 475 spin_unlock_irq(&desc->lock); 476 477 action->thread_fn(action->irq, action->dev_id); 478 } 479 480 wake = atomic_dec_and_test(&desc->threads_active); 481 482 if (wake && waitqueue_active(&desc->wait_for_threads)) 483 wake_up(&desc->wait_for_threads); 484 } 485 486 /* 487 * Clear irqaction. Otherwise exit_irq_thread() would make 488 * fuzz about an active irq thread going into nirvana. 489 */ 490 current->irqaction = NULL; 491 return 0; 492 } 493 494 /* 495 * Called from do_exit() 496 */ 497 void exit_irq_thread(void) 498 { 499 struct task_struct *tsk = current; 500 501 if (!tsk->irqaction) 502 return; 503 504 printk(KERN_ERR 505 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", 506 tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); 507 508 /* 509 * Set the THREAD DIED flag to prevent further wakeups of the 510 * soon to be gone threaded handler. 511 */ 512 set_bit(IRQTF_DIED, &tsk->irqaction->flags); 513 } 514 515 /* 516 * Internal function to register an irqaction - typically used to 517 * allocate special interrupts that are part of the architecture. 518 */ 519 static int 520 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) 521 { 522 struct irqaction *old, **old_ptr; 523 const char *old_name = NULL; 524 unsigned long flags; 525 int shared = 0; 526 int ret; 527 528 if (!desc) 529 return -EINVAL; 530 531 if (desc->chip == &no_irq_chip) 532 return -ENOSYS; 533 /* 534 * Some drivers like serial.c use request_irq() heavily, 535 * so we have to be careful not to interfere with a 536 * running system. 537 */ 538 if (new->flags & IRQF_SAMPLE_RANDOM) { 539 /* 540 * This function might sleep, we want to call it first, 541 * outside of the atomic block. 542 * Yes, this might clear the entropy pool if the wrong 543 * driver is attempted to be loaded, without actually 544 * installing a new handler, but is this really a problem, 545 * only the sysadmin is able to do this. 546 */ 547 rand_initialize_irq(irq); 548 } 549 550 /* 551 * Threaded handler ? 552 */ 553 if (new->thread_fn) { 554 struct task_struct *t; 555 556 t = kthread_create(irq_thread, new, "irq/%d-%s", irq, 557 new->name); 558 if (IS_ERR(t)) 559 return PTR_ERR(t); 560 /* 561 * We keep the reference to the task struct even if 562 * the thread dies to avoid that the interrupt code 563 * references an already freed task_struct. 564 */ 565 get_task_struct(t); 566 new->thread = t; 567 wake_up_process(t); 568 } 569 570 /* 571 * The following block of code has to be executed atomically 572 */ 573 spin_lock_irqsave(&desc->lock, flags); 574 old_ptr = &desc->action; 575 old = *old_ptr; 576 if (old) { 577 /* 578 * Can't share interrupts unless both agree to and are 579 * the same type (level, edge, polarity). So both flag 580 * fields must have IRQF_SHARED set and the bits which 581 * set the trigger type must match. 582 */ 583 if (!((old->flags & new->flags) & IRQF_SHARED) || 584 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { 585 old_name = old->name; 586 goto mismatch; 587 } 588 589 #if defined(CONFIG_IRQ_PER_CPU) 590 /* All handlers must agree on per-cpuness */ 591 if ((old->flags & IRQF_PERCPU) != 592 (new->flags & IRQF_PERCPU)) 593 goto mismatch; 594 #endif 595 596 /* add new interrupt at end of irq queue */ 597 do { 598 old_ptr = &old->next; 599 old = *old_ptr; 600 } while (old); 601 shared = 1; 602 } 603 604 if (!shared) { 605 irq_chip_set_defaults(desc->chip); 606 607 init_waitqueue_head(&desc->wait_for_threads); 608 609 /* Setup the type (level, edge polarity) if configured: */ 610 if (new->flags & IRQF_TRIGGER_MASK) { 611 ret = __irq_set_trigger(desc, irq, 612 new->flags & IRQF_TRIGGER_MASK); 613 614 if (ret) 615 goto out_thread; 616 } else 617 compat_irq_chip_set_default_handler(desc); 618 #if defined(CONFIG_IRQ_PER_CPU) 619 if (new->flags & IRQF_PERCPU) 620 desc->status |= IRQ_PER_CPU; 621 #endif 622 623 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | 624 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); 625 626 if (!(desc->status & IRQ_NOAUTOEN)) { 627 desc->depth = 0; 628 desc->status &= ~IRQ_DISABLED; 629 desc->chip->startup(irq); 630 } else 631 /* Undo nested disables: */ 632 desc->depth = 1; 633 634 /* Exclude IRQ from balancing if requested */ 635 if (new->flags & IRQF_NOBALANCING) 636 desc->status |= IRQ_NO_BALANCING; 637 638 /* Set default affinity mask once everything is setup */ 639 setup_affinity(irq, desc); 640 641 } else if ((new->flags & IRQF_TRIGGER_MASK) 642 && (new->flags & IRQF_TRIGGER_MASK) 643 != (desc->status & IRQ_TYPE_SENSE_MASK)) { 644 /* hope the handler works with the actual trigger mode... */ 645 pr_warning("IRQ %d uses trigger mode %d; requested %d\n", 646 irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), 647 (int)(new->flags & IRQF_TRIGGER_MASK)); 648 } 649 650 *old_ptr = new; 651 652 /* Reset broken irq detection when installing new handler */ 653 desc->irq_count = 0; 654 desc->irqs_unhandled = 0; 655 656 /* 657 * Check whether we disabled the irq via the spurious handler 658 * before. Reenable it and give it another chance. 659 */ 660 if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { 661 desc->status &= ~IRQ_SPURIOUS_DISABLED; 662 __enable_irq(desc, irq, false); 663 } 664 665 spin_unlock_irqrestore(&desc->lock, flags); 666 667 new->irq = irq; 668 register_irq_proc(irq, desc); 669 new->dir = NULL; 670 register_handler_proc(irq, new); 671 672 return 0; 673 674 mismatch: 675 #ifdef CONFIG_DEBUG_SHIRQ 676 if (!(new->flags & IRQF_PROBE_SHARED)) { 677 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); 678 if (old_name) 679 printk(KERN_ERR "current handler: %s\n", old_name); 680 dump_stack(); 681 } 682 #endif 683 ret = -EBUSY; 684 685 out_thread: 686 spin_unlock_irqrestore(&desc->lock, flags); 687 if (new->thread) { 688 struct task_struct *t = new->thread; 689 690 new->thread = NULL; 691 if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) 692 kthread_stop(t); 693 put_task_struct(t); 694 } 695 return ret; 696 } 697 698 /** 699 * setup_irq - setup an interrupt 700 * @irq: Interrupt line to setup 701 * @act: irqaction for the interrupt 702 * 703 * Used to statically setup interrupts in the early boot process. 704 */ 705 int setup_irq(unsigned int irq, struct irqaction *act) 706 { 707 struct irq_desc *desc = irq_to_desc(irq); 708 709 return __setup_irq(irq, desc, act); 710 } 711 EXPORT_SYMBOL_GPL(setup_irq); 712 713 /* 714 * Internal function to unregister an irqaction - used to free 715 * regular and special interrupts that are part of the architecture. 716 */ 717 static struct irqaction *__free_irq(unsigned int irq, void *dev_id) 718 { 719 struct irq_desc *desc = irq_to_desc(irq); 720 struct irqaction *action, **action_ptr; 721 struct task_struct *irqthread; 722 unsigned long flags; 723 724 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 725 726 if (!desc) 727 return NULL; 728 729 spin_lock_irqsave(&desc->lock, flags); 730 731 /* 732 * There can be multiple actions per IRQ descriptor, find the right 733 * one based on the dev_id: 734 */ 735 action_ptr = &desc->action; 736 for (;;) { 737 action = *action_ptr; 738 739 if (!action) { 740 WARN(1, "Trying to free already-free IRQ %d\n", irq); 741 spin_unlock_irqrestore(&desc->lock, flags); 742 743 return NULL; 744 } 745 746 if (action->dev_id == dev_id) 747 break; 748 action_ptr = &action->next; 749 } 750 751 /* Found it - now remove it from the list of entries: */ 752 *action_ptr = action->next; 753 754 /* Currently used only by UML, might disappear one day: */ 755 #ifdef CONFIG_IRQ_RELEASE_METHOD 756 if (desc->chip->release) 757 desc->chip->release(irq, dev_id); 758 #endif 759 760 /* If this was the last handler, shut down the IRQ line: */ 761 if (!desc->action) { 762 desc->status |= IRQ_DISABLED; 763 if (desc->chip->shutdown) 764 desc->chip->shutdown(irq); 765 else 766 desc->chip->disable(irq); 767 } 768 769 irqthread = action->thread; 770 action->thread = NULL; 771 772 spin_unlock_irqrestore(&desc->lock, flags); 773 774 unregister_handler_proc(irq, action); 775 776 /* Make sure it's not being used on another CPU: */ 777 synchronize_irq(irq); 778 779 if (irqthread) { 780 if (!test_bit(IRQTF_DIED, &action->thread_flags)) 781 kthread_stop(irqthread); 782 put_task_struct(irqthread); 783 } 784 785 #ifdef CONFIG_DEBUG_SHIRQ 786 /* 787 * It's a shared IRQ -- the driver ought to be prepared for an IRQ 788 * event to happen even now it's being freed, so let's make sure that 789 * is so by doing an extra call to the handler .... 790 * 791 * ( We do this after actually deregistering it, to make sure that a 792 * 'real' IRQ doesn't run in * parallel with our fake. ) 793 */ 794 if (action->flags & IRQF_SHARED) { 795 local_irq_save(flags); 796 action->handler(irq, dev_id); 797 local_irq_restore(flags); 798 } 799 #endif 800 return action; 801 } 802 803 /** 804 * remove_irq - free an interrupt 805 * @irq: Interrupt line to free 806 * @act: irqaction for the interrupt 807 * 808 * Used to remove interrupts statically setup by the early boot process. 809 */ 810 void remove_irq(unsigned int irq, struct irqaction *act) 811 { 812 __free_irq(irq, act->dev_id); 813 } 814 EXPORT_SYMBOL_GPL(remove_irq); 815 816 /** 817 * free_irq - free an interrupt allocated with request_irq 818 * @irq: Interrupt line to free 819 * @dev_id: Device identity to free 820 * 821 * Remove an interrupt handler. The handler is removed and if the 822 * interrupt line is no longer in use by any driver it is disabled. 823 * On a shared IRQ the caller must ensure the interrupt is disabled 824 * on the card it drives before calling this function. The function 825 * does not return until any executing interrupts for this IRQ 826 * have completed. 827 * 828 * This function must not be called from interrupt context. 829 */ 830 void free_irq(unsigned int irq, void *dev_id) 831 { 832 kfree(__free_irq(irq, dev_id)); 833 } 834 EXPORT_SYMBOL(free_irq); 835 836 /** 837 * request_threaded_irq - allocate an interrupt line 838 * @irq: Interrupt line to allocate 839 * @handler: Function to be called when the IRQ occurs. 840 * Primary handler for threaded interrupts 841 * @thread_fn: Function called from the irq handler thread 842 * If NULL, no irq thread is created 843 * @irqflags: Interrupt type flags 844 * @devname: An ascii name for the claiming device 845 * @dev_id: A cookie passed back to the handler function 846 * 847 * This call allocates interrupt resources and enables the 848 * interrupt line and IRQ handling. From the point this 849 * call is made your handler function may be invoked. Since 850 * your handler function must clear any interrupt the board 851 * raises, you must take care both to initialise your hardware 852 * and to set up the interrupt handler in the right order. 853 * 854 * If you want to set up a threaded irq handler for your device 855 * then you need to supply @handler and @thread_fn. @handler ist 856 * still called in hard interrupt context and has to check 857 * whether the interrupt originates from the device. If yes it 858 * needs to disable the interrupt on the device and return 859 * IRQ_THREAD_WAKE which will wake up the handler thread and run 860 * @thread_fn. This split handler design is necessary to support 861 * shared interrupts. 862 * 863 * Dev_id must be globally unique. Normally the address of the 864 * device data structure is used as the cookie. Since the handler 865 * receives this value it makes sense to use it. 866 * 867 * If your interrupt is shared you must pass a non NULL dev_id 868 * as this is required when freeing the interrupt. 869 * 870 * Flags: 871 * 872 * IRQF_SHARED Interrupt is shared 873 * IRQF_DISABLED Disable local interrupts while processing 874 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy 875 * IRQF_TRIGGER_* Specify active edge(s) or level 876 * 877 */ 878 int request_threaded_irq(unsigned int irq, irq_handler_t handler, 879 irq_handler_t thread_fn, unsigned long irqflags, 880 const char *devname, void *dev_id) 881 { 882 struct irqaction *action; 883 struct irq_desc *desc; 884 int retval; 885 886 /* 887 * handle_IRQ_event() always ignores IRQF_DISABLED except for 888 * the _first_ irqaction (sigh). That can cause oopsing, but 889 * the behavior is classified as "will not fix" so we need to 890 * start nudging drivers away from using that idiom. 891 */ 892 if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) == 893 (IRQF_SHARED|IRQF_DISABLED)) { 894 pr_warning( 895 "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n", 896 irq, devname); 897 } 898 899 #ifdef CONFIG_LOCKDEP 900 /* 901 * Lockdep wants atomic interrupt handlers: 902 */ 903 irqflags |= IRQF_DISABLED; 904 #endif 905 /* 906 * Sanity-check: shared interrupts must pass in a real dev-ID, 907 * otherwise we'll have trouble later trying to figure out 908 * which interrupt is which (messes up the interrupt freeing 909 * logic etc). 910 */ 911 if ((irqflags & IRQF_SHARED) && !dev_id) 912 return -EINVAL; 913 914 desc = irq_to_desc(irq); 915 if (!desc) 916 return -EINVAL; 917 918 if (desc->status & IRQ_NOREQUEST) 919 return -EINVAL; 920 if (!handler) 921 return -EINVAL; 922 923 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 924 if (!action) 925 return -ENOMEM; 926 927 action->handler = handler; 928 action->thread_fn = thread_fn; 929 action->flags = irqflags; 930 action->name = devname; 931 action->dev_id = dev_id; 932 933 retval = __setup_irq(irq, desc, action); 934 if (retval) 935 kfree(action); 936 937 #ifdef CONFIG_DEBUG_SHIRQ 938 if (irqflags & IRQF_SHARED) { 939 /* 940 * It's a shared IRQ -- the driver ought to be prepared for it 941 * to happen immediately, so let's make sure.... 942 * We disable the irq to make sure that a 'real' IRQ doesn't 943 * run in parallel with our fake. 944 */ 945 unsigned long flags; 946 947 disable_irq(irq); 948 local_irq_save(flags); 949 950 handler(irq, dev_id); 951 952 local_irq_restore(flags); 953 enable_irq(irq); 954 } 955 #endif 956 return retval; 957 } 958 EXPORT_SYMBOL(request_threaded_irq); 959