1 /* 2 * linux/kernel/irq/manage.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006 Thomas Gleixner 6 * 7 * This file contains driver APIs to the irq subsystem. 8 */ 9 10 #define pr_fmt(fmt) "genirq: " fmt 11 12 #include <linux/irq.h> 13 #include <linux/kthread.h> 14 #include <linux/module.h> 15 #include <linux/random.h> 16 #include <linux/interrupt.h> 17 #include <linux/slab.h> 18 #include <linux/sched.h> 19 #include <linux/sched/rt.h> 20 #include <linux/task_work.h> 21 22 #include "internals.h" 23 24 #ifdef CONFIG_IRQ_FORCED_THREADING 25 __read_mostly bool force_irqthreads; 26 27 static int __init setup_forced_irqthreads(char *arg) 28 { 29 force_irqthreads = true; 30 return 0; 31 } 32 early_param("threadirqs", setup_forced_irqthreads); 33 #endif 34 35 static void __synchronize_hardirq(struct irq_desc *desc) 36 { 37 bool inprogress; 38 39 do { 40 unsigned long flags; 41 42 /* 43 * Wait until we're out of the critical section. This might 44 * give the wrong answer due to the lack of memory barriers. 45 */ 46 while (irqd_irq_inprogress(&desc->irq_data)) 47 cpu_relax(); 48 49 /* Ok, that indicated we're done: double-check carefully. */ 50 raw_spin_lock_irqsave(&desc->lock, flags); 51 inprogress = irqd_irq_inprogress(&desc->irq_data); 52 raw_spin_unlock_irqrestore(&desc->lock, flags); 53 54 /* Oops, that failed? */ 55 } while (inprogress); 56 } 57 58 /** 59 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) 60 * @irq: interrupt number to wait for 61 * 62 * This function waits for any pending hard IRQ handlers for this 63 * interrupt to complete before returning. If you use this 64 * function while holding a resource the IRQ handler may need you 65 * will deadlock. It does not take associated threaded handlers 66 * into account. 67 * 68 * Do not use this for shutdown scenarios where you must be sure 69 * that all parts (hardirq and threaded handler) have completed. 70 * 71 * This function may be called - with care - from IRQ context. 72 */ 73 void synchronize_hardirq(unsigned int irq) 74 { 75 struct irq_desc *desc = irq_to_desc(irq); 76 77 if (desc) 78 __synchronize_hardirq(desc); 79 } 80 EXPORT_SYMBOL(synchronize_hardirq); 81 82 /** 83 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 84 * @irq: interrupt number to wait for 85 * 86 * This function waits for any pending IRQ handlers for this interrupt 87 * to complete before returning. If you use this function while 88 * holding a resource the IRQ handler may need you will deadlock. 89 * 90 * This function may be called - with care - from IRQ context. 91 */ 92 void synchronize_irq(unsigned int irq) 93 { 94 struct irq_desc *desc = irq_to_desc(irq); 95 96 if (desc) { 97 __synchronize_hardirq(desc); 98 /* 99 * We made sure that no hardirq handler is 100 * running. Now verify that no threaded handlers are 101 * active. 102 */ 103 wait_event(desc->wait_for_threads, 104 !atomic_read(&desc->threads_active)); 105 } 106 } 107 EXPORT_SYMBOL(synchronize_irq); 108 109 #ifdef CONFIG_SMP 110 cpumask_var_t irq_default_affinity; 111 112 /** 113 * irq_can_set_affinity - Check if the affinity of a given irq can be set 114 * @irq: Interrupt to check 115 * 116 */ 117 int irq_can_set_affinity(unsigned int irq) 118 { 119 struct irq_desc *desc = irq_to_desc(irq); 120 121 if (!desc || !irqd_can_balance(&desc->irq_data) || 122 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) 123 return 0; 124 125 return 1; 126 } 127 128 /** 129 * irq_set_thread_affinity - Notify irq threads to adjust affinity 130 * @desc: irq descriptor which has affitnity changed 131 * 132 * We just set IRQTF_AFFINITY and delegate the affinity setting 133 * to the interrupt thread itself. We can not call 134 * set_cpus_allowed_ptr() here as we hold desc->lock and this 135 * code can be called from hard interrupt context. 136 */ 137 void irq_set_thread_affinity(struct irq_desc *desc) 138 { 139 struct irqaction *action = desc->action; 140 141 while (action) { 142 if (action->thread) 143 set_bit(IRQTF_AFFINITY, &action->thread_flags); 144 action = action->next; 145 } 146 } 147 148 #ifdef CONFIG_GENERIC_PENDING_IRQ 149 static inline bool irq_can_move_pcntxt(struct irq_data *data) 150 { 151 return irqd_can_move_in_process_context(data); 152 } 153 static inline bool irq_move_pending(struct irq_data *data) 154 { 155 return irqd_is_setaffinity_pending(data); 156 } 157 static inline void 158 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) 159 { 160 cpumask_copy(desc->pending_mask, mask); 161 } 162 static inline void 163 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) 164 { 165 cpumask_copy(mask, desc->pending_mask); 166 } 167 #else 168 static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } 169 static inline bool irq_move_pending(struct irq_data *data) { return false; } 170 static inline void 171 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } 172 static inline void 173 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } 174 #endif 175 176 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, 177 bool force) 178 { 179 struct irq_desc *desc = irq_data_to_desc(data); 180 struct irq_chip *chip = irq_data_get_irq_chip(data); 181 int ret; 182 183 ret = chip->irq_set_affinity(data, mask, force); 184 switch (ret) { 185 case IRQ_SET_MASK_OK: 186 cpumask_copy(data->affinity, mask); 187 case IRQ_SET_MASK_OK_NOCOPY: 188 irq_set_thread_affinity(desc); 189 ret = 0; 190 } 191 192 return ret; 193 } 194 195 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, 196 bool force) 197 { 198 struct irq_chip *chip = irq_data_get_irq_chip(data); 199 struct irq_desc *desc = irq_data_to_desc(data); 200 int ret = 0; 201 202 if (!chip || !chip->irq_set_affinity) 203 return -EINVAL; 204 205 if (irq_can_move_pcntxt(data)) { 206 ret = irq_do_set_affinity(data, mask, force); 207 } else { 208 irqd_set_move_pending(data); 209 irq_copy_pending(desc, mask); 210 } 211 212 if (desc->affinity_notify) { 213 kref_get(&desc->affinity_notify->kref); 214 schedule_work(&desc->affinity_notify->work); 215 } 216 irqd_set(data, IRQD_AFFINITY_SET); 217 218 return ret; 219 } 220 221 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) 222 { 223 struct irq_desc *desc = irq_to_desc(irq); 224 unsigned long flags; 225 int ret; 226 227 if (!desc) 228 return -EINVAL; 229 230 raw_spin_lock_irqsave(&desc->lock, flags); 231 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); 232 raw_spin_unlock_irqrestore(&desc->lock, flags); 233 return ret; 234 } 235 236 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) 237 { 238 unsigned long flags; 239 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 240 241 if (!desc) 242 return -EINVAL; 243 desc->affinity_hint = m; 244 irq_put_desc_unlock(desc, flags); 245 return 0; 246 } 247 EXPORT_SYMBOL_GPL(irq_set_affinity_hint); 248 249 static void irq_affinity_notify(struct work_struct *work) 250 { 251 struct irq_affinity_notify *notify = 252 container_of(work, struct irq_affinity_notify, work); 253 struct irq_desc *desc = irq_to_desc(notify->irq); 254 cpumask_var_t cpumask; 255 unsigned long flags; 256 257 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) 258 goto out; 259 260 raw_spin_lock_irqsave(&desc->lock, flags); 261 if (irq_move_pending(&desc->irq_data)) 262 irq_get_pending(cpumask, desc); 263 else 264 cpumask_copy(cpumask, desc->irq_data.affinity); 265 raw_spin_unlock_irqrestore(&desc->lock, flags); 266 267 notify->notify(notify, cpumask); 268 269 free_cpumask_var(cpumask); 270 out: 271 kref_put(¬ify->kref, notify->release); 272 } 273 274 /** 275 * irq_set_affinity_notifier - control notification of IRQ affinity changes 276 * @irq: Interrupt for which to enable/disable notification 277 * @notify: Context for notification, or %NULL to disable 278 * notification. Function pointers must be initialised; 279 * the other fields will be initialised by this function. 280 * 281 * Must be called in process context. Notification may only be enabled 282 * after the IRQ is allocated and must be disabled before the IRQ is 283 * freed using free_irq(). 284 */ 285 int 286 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) 287 { 288 struct irq_desc *desc = irq_to_desc(irq); 289 struct irq_affinity_notify *old_notify; 290 unsigned long flags; 291 292 /* The release function is promised process context */ 293 might_sleep(); 294 295 if (!desc) 296 return -EINVAL; 297 298 /* Complete initialisation of *notify */ 299 if (notify) { 300 notify->irq = irq; 301 kref_init(¬ify->kref); 302 INIT_WORK(¬ify->work, irq_affinity_notify); 303 } 304 305 raw_spin_lock_irqsave(&desc->lock, flags); 306 old_notify = desc->affinity_notify; 307 desc->affinity_notify = notify; 308 raw_spin_unlock_irqrestore(&desc->lock, flags); 309 310 if (old_notify) 311 kref_put(&old_notify->kref, old_notify->release); 312 313 return 0; 314 } 315 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); 316 317 #ifndef CONFIG_AUTO_IRQ_AFFINITY 318 /* 319 * Generic version of the affinity autoselector. 320 */ 321 static int 322 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) 323 { 324 struct cpumask *set = irq_default_affinity; 325 int node = desc->irq_data.node; 326 327 /* Excludes PER_CPU and NO_BALANCE interrupts */ 328 if (!irq_can_set_affinity(irq)) 329 return 0; 330 331 /* 332 * Preserve an userspace affinity setup, but make sure that 333 * one of the targets is online. 334 */ 335 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { 336 if (cpumask_intersects(desc->irq_data.affinity, 337 cpu_online_mask)) 338 set = desc->irq_data.affinity; 339 else 340 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); 341 } 342 343 cpumask_and(mask, cpu_online_mask, set); 344 if (node != NUMA_NO_NODE) { 345 const struct cpumask *nodemask = cpumask_of_node(node); 346 347 /* make sure at least one of the cpus in nodemask is online */ 348 if (cpumask_intersects(mask, nodemask)) 349 cpumask_and(mask, mask, nodemask); 350 } 351 irq_do_set_affinity(&desc->irq_data, mask, false); 352 return 0; 353 } 354 #else 355 static inline int 356 setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) 357 { 358 return irq_select_affinity(irq); 359 } 360 #endif 361 362 /* 363 * Called when affinity is set via /proc/irq 364 */ 365 int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) 366 { 367 struct irq_desc *desc = irq_to_desc(irq); 368 unsigned long flags; 369 int ret; 370 371 raw_spin_lock_irqsave(&desc->lock, flags); 372 ret = setup_affinity(irq, desc, mask); 373 raw_spin_unlock_irqrestore(&desc->lock, flags); 374 return ret; 375 } 376 377 #else 378 static inline int 379 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) 380 { 381 return 0; 382 } 383 #endif 384 385 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) 386 { 387 if (suspend) { 388 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) 389 return; 390 desc->istate |= IRQS_SUSPENDED; 391 } 392 393 if (!desc->depth++) 394 irq_disable(desc); 395 } 396 397 static int __disable_irq_nosync(unsigned int irq) 398 { 399 unsigned long flags; 400 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 401 402 if (!desc) 403 return -EINVAL; 404 __disable_irq(desc, irq, false); 405 irq_put_desc_busunlock(desc, flags); 406 return 0; 407 } 408 409 /** 410 * disable_irq_nosync - disable an irq without waiting 411 * @irq: Interrupt to disable 412 * 413 * Disable the selected interrupt line. Disables and Enables are 414 * nested. 415 * Unlike disable_irq(), this function does not ensure existing 416 * instances of the IRQ handler have completed before returning. 417 * 418 * This function may be called from IRQ context. 419 */ 420 void disable_irq_nosync(unsigned int irq) 421 { 422 __disable_irq_nosync(irq); 423 } 424 EXPORT_SYMBOL(disable_irq_nosync); 425 426 /** 427 * disable_irq - disable an irq and wait for completion 428 * @irq: Interrupt to disable 429 * 430 * Disable the selected interrupt line. Enables and Disables are 431 * nested. 432 * This function waits for any pending IRQ handlers for this interrupt 433 * to complete before returning. If you use this function while 434 * holding a resource the IRQ handler may need you will deadlock. 435 * 436 * This function may be called - with care - from IRQ context. 437 */ 438 void disable_irq(unsigned int irq) 439 { 440 if (!__disable_irq_nosync(irq)) 441 synchronize_irq(irq); 442 } 443 EXPORT_SYMBOL(disable_irq); 444 445 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) 446 { 447 if (resume) { 448 if (!(desc->istate & IRQS_SUSPENDED)) { 449 if (!desc->action) 450 return; 451 if (!(desc->action->flags & IRQF_FORCE_RESUME)) 452 return; 453 /* Pretend that it got disabled ! */ 454 desc->depth++; 455 } 456 desc->istate &= ~IRQS_SUSPENDED; 457 } 458 459 switch (desc->depth) { 460 case 0: 461 err_out: 462 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 463 break; 464 case 1: { 465 if (desc->istate & IRQS_SUSPENDED) 466 goto err_out; 467 /* Prevent probing on this irq: */ 468 irq_settings_set_noprobe(desc); 469 irq_enable(desc); 470 check_irq_resend(desc, irq); 471 /* fall-through */ 472 } 473 default: 474 desc->depth--; 475 } 476 } 477 478 /** 479 * enable_irq - enable handling of an irq 480 * @irq: Interrupt to enable 481 * 482 * Undoes the effect of one call to disable_irq(). If this 483 * matches the last disable, processing of interrupts on this 484 * IRQ line is re-enabled. 485 * 486 * This function may be called from IRQ context only when 487 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 488 */ 489 void enable_irq(unsigned int irq) 490 { 491 unsigned long flags; 492 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 493 494 if (!desc) 495 return; 496 if (WARN(!desc->irq_data.chip, 497 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) 498 goto out; 499 500 __enable_irq(desc, irq, false); 501 out: 502 irq_put_desc_busunlock(desc, flags); 503 } 504 EXPORT_SYMBOL(enable_irq); 505 506 static int set_irq_wake_real(unsigned int irq, unsigned int on) 507 { 508 struct irq_desc *desc = irq_to_desc(irq); 509 int ret = -ENXIO; 510 511 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) 512 return 0; 513 514 if (desc->irq_data.chip->irq_set_wake) 515 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); 516 517 return ret; 518 } 519 520 /** 521 * irq_set_irq_wake - control irq power management wakeup 522 * @irq: interrupt to control 523 * @on: enable/disable power management wakeup 524 * 525 * Enable/disable power management wakeup mode, which is 526 * disabled by default. Enables and disables must match, 527 * just as they match for non-wakeup mode support. 528 * 529 * Wakeup mode lets this IRQ wake the system from sleep 530 * states like "suspend to RAM". 531 */ 532 int irq_set_irq_wake(unsigned int irq, unsigned int on) 533 { 534 unsigned long flags; 535 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 536 int ret = 0; 537 538 if (!desc) 539 return -EINVAL; 540 541 /* wakeup-capable irqs can be shared between drivers that 542 * don't need to have the same sleep mode behaviors. 543 */ 544 if (on) { 545 if (desc->wake_depth++ == 0) { 546 ret = set_irq_wake_real(irq, on); 547 if (ret) 548 desc->wake_depth = 0; 549 else 550 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); 551 } 552 } else { 553 if (desc->wake_depth == 0) { 554 WARN(1, "Unbalanced IRQ %d wake disable\n", irq); 555 } else if (--desc->wake_depth == 0) { 556 ret = set_irq_wake_real(irq, on); 557 if (ret) 558 desc->wake_depth = 1; 559 else 560 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); 561 } 562 } 563 irq_put_desc_busunlock(desc, flags); 564 return ret; 565 } 566 EXPORT_SYMBOL(irq_set_irq_wake); 567 568 /* 569 * Internal function that tells the architecture code whether a 570 * particular irq has been exclusively allocated or is available 571 * for driver use. 572 */ 573 int can_request_irq(unsigned int irq, unsigned long irqflags) 574 { 575 unsigned long flags; 576 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 577 int canrequest = 0; 578 579 if (!desc) 580 return 0; 581 582 if (irq_settings_can_request(desc)) { 583 if (!desc->action || 584 irqflags & desc->action->flags & IRQF_SHARED) 585 canrequest = 1; 586 } 587 irq_put_desc_unlock(desc, flags); 588 return canrequest; 589 } 590 591 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 592 unsigned long flags) 593 { 594 struct irq_chip *chip = desc->irq_data.chip; 595 int ret, unmask = 0; 596 597 if (!chip || !chip->irq_set_type) { 598 /* 599 * IRQF_TRIGGER_* but the PIC does not support multiple 600 * flow-types? 601 */ 602 pr_debug("No set_type function for IRQ %d (%s)\n", irq, 603 chip ? (chip->name ? : "unknown") : "unknown"); 604 return 0; 605 } 606 607 flags &= IRQ_TYPE_SENSE_MASK; 608 609 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { 610 if (!irqd_irq_masked(&desc->irq_data)) 611 mask_irq(desc); 612 if (!irqd_irq_disabled(&desc->irq_data)) 613 unmask = 1; 614 } 615 616 /* caller masked out all except trigger mode flags */ 617 ret = chip->irq_set_type(&desc->irq_data, flags); 618 619 switch (ret) { 620 case IRQ_SET_MASK_OK: 621 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); 622 irqd_set(&desc->irq_data, flags); 623 624 case IRQ_SET_MASK_OK_NOCOPY: 625 flags = irqd_get_trigger_type(&desc->irq_data); 626 irq_settings_set_trigger_mask(desc, flags); 627 irqd_clear(&desc->irq_data, IRQD_LEVEL); 628 irq_settings_clr_level(desc); 629 if (flags & IRQ_TYPE_LEVEL_MASK) { 630 irq_settings_set_level(desc); 631 irqd_set(&desc->irq_data, IRQD_LEVEL); 632 } 633 634 ret = 0; 635 break; 636 default: 637 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", 638 flags, irq, chip->irq_set_type); 639 } 640 if (unmask) 641 unmask_irq(desc); 642 return ret; 643 } 644 645 #ifdef CONFIG_HARDIRQS_SW_RESEND 646 int irq_set_parent(int irq, int parent_irq) 647 { 648 unsigned long flags; 649 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 650 651 if (!desc) 652 return -EINVAL; 653 654 desc->parent_irq = parent_irq; 655 656 irq_put_desc_unlock(desc, flags); 657 return 0; 658 } 659 #endif 660 661 /* 662 * Default primary interrupt handler for threaded interrupts. Is 663 * assigned as primary handler when request_threaded_irq is called 664 * with handler == NULL. Useful for oneshot interrupts. 665 */ 666 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) 667 { 668 return IRQ_WAKE_THREAD; 669 } 670 671 /* 672 * Primary handler for nested threaded interrupts. Should never be 673 * called. 674 */ 675 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) 676 { 677 WARN(1, "Primary handler called for nested irq %d\n", irq); 678 return IRQ_NONE; 679 } 680 681 static int irq_wait_for_interrupt(struct irqaction *action) 682 { 683 set_current_state(TASK_INTERRUPTIBLE); 684 685 while (!kthread_should_stop()) { 686 687 if (test_and_clear_bit(IRQTF_RUNTHREAD, 688 &action->thread_flags)) { 689 __set_current_state(TASK_RUNNING); 690 return 0; 691 } 692 schedule(); 693 set_current_state(TASK_INTERRUPTIBLE); 694 } 695 __set_current_state(TASK_RUNNING); 696 return -1; 697 } 698 699 /* 700 * Oneshot interrupts keep the irq line masked until the threaded 701 * handler finished. unmask if the interrupt has not been disabled and 702 * is marked MASKED. 703 */ 704 static void irq_finalize_oneshot(struct irq_desc *desc, 705 struct irqaction *action) 706 { 707 if (!(desc->istate & IRQS_ONESHOT)) 708 return; 709 again: 710 chip_bus_lock(desc); 711 raw_spin_lock_irq(&desc->lock); 712 713 /* 714 * Implausible though it may be we need to protect us against 715 * the following scenario: 716 * 717 * The thread is faster done than the hard interrupt handler 718 * on the other CPU. If we unmask the irq line then the 719 * interrupt can come in again and masks the line, leaves due 720 * to IRQS_INPROGRESS and the irq line is masked forever. 721 * 722 * This also serializes the state of shared oneshot handlers 723 * versus "desc->threads_onehsot |= action->thread_mask;" in 724 * irq_wake_thread(). See the comment there which explains the 725 * serialization. 726 */ 727 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { 728 raw_spin_unlock_irq(&desc->lock); 729 chip_bus_sync_unlock(desc); 730 cpu_relax(); 731 goto again; 732 } 733 734 /* 735 * Now check again, whether the thread should run. Otherwise 736 * we would clear the threads_oneshot bit of this thread which 737 * was just set. 738 */ 739 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) 740 goto out_unlock; 741 742 desc->threads_oneshot &= ~action->thread_mask; 743 744 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && 745 irqd_irq_masked(&desc->irq_data)) 746 unmask_threaded_irq(desc); 747 748 out_unlock: 749 raw_spin_unlock_irq(&desc->lock); 750 chip_bus_sync_unlock(desc); 751 } 752 753 #ifdef CONFIG_SMP 754 /* 755 * Check whether we need to change the affinity of the interrupt thread. 756 */ 757 static void 758 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) 759 { 760 cpumask_var_t mask; 761 bool valid = true; 762 763 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) 764 return; 765 766 /* 767 * In case we are out of memory we set IRQTF_AFFINITY again and 768 * try again next time 769 */ 770 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 771 set_bit(IRQTF_AFFINITY, &action->thread_flags); 772 return; 773 } 774 775 raw_spin_lock_irq(&desc->lock); 776 /* 777 * This code is triggered unconditionally. Check the affinity 778 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. 779 */ 780 if (desc->irq_data.affinity) 781 cpumask_copy(mask, desc->irq_data.affinity); 782 else 783 valid = false; 784 raw_spin_unlock_irq(&desc->lock); 785 786 if (valid) 787 set_cpus_allowed_ptr(current, mask); 788 free_cpumask_var(mask); 789 } 790 #else 791 static inline void 792 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } 793 #endif 794 795 /* 796 * Interrupts which are not explicitely requested as threaded 797 * interrupts rely on the implicit bh/preempt disable of the hard irq 798 * context. So we need to disable bh here to avoid deadlocks and other 799 * side effects. 800 */ 801 static irqreturn_t 802 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) 803 { 804 irqreturn_t ret; 805 806 local_bh_disable(); 807 ret = action->thread_fn(action->irq, action->dev_id); 808 irq_finalize_oneshot(desc, action); 809 local_bh_enable(); 810 return ret; 811 } 812 813 /* 814 * Interrupts explicitly requested as threaded interrupts want to be 815 * preemtible - many of them need to sleep and wait for slow busses to 816 * complete. 817 */ 818 static irqreturn_t irq_thread_fn(struct irq_desc *desc, 819 struct irqaction *action) 820 { 821 irqreturn_t ret; 822 823 ret = action->thread_fn(action->irq, action->dev_id); 824 irq_finalize_oneshot(desc, action); 825 return ret; 826 } 827 828 static void wake_threads_waitq(struct irq_desc *desc) 829 { 830 if (atomic_dec_and_test(&desc->threads_active)) 831 wake_up(&desc->wait_for_threads); 832 } 833 834 static void irq_thread_dtor(struct callback_head *unused) 835 { 836 struct task_struct *tsk = current; 837 struct irq_desc *desc; 838 struct irqaction *action; 839 840 if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) 841 return; 842 843 action = kthread_data(tsk); 844 845 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", 846 tsk->comm, tsk->pid, action->irq); 847 848 849 desc = irq_to_desc(action->irq); 850 /* 851 * If IRQTF_RUNTHREAD is set, we need to decrement 852 * desc->threads_active and wake possible waiters. 853 */ 854 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) 855 wake_threads_waitq(desc); 856 857 /* Prevent a stale desc->threads_oneshot */ 858 irq_finalize_oneshot(desc, action); 859 } 860 861 /* 862 * Interrupt handler thread 863 */ 864 static int irq_thread(void *data) 865 { 866 struct callback_head on_exit_work; 867 struct irqaction *action = data; 868 struct irq_desc *desc = irq_to_desc(action->irq); 869 irqreturn_t (*handler_fn)(struct irq_desc *desc, 870 struct irqaction *action); 871 872 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, 873 &action->thread_flags)) 874 handler_fn = irq_forced_thread_fn; 875 else 876 handler_fn = irq_thread_fn; 877 878 init_task_work(&on_exit_work, irq_thread_dtor); 879 task_work_add(current, &on_exit_work, false); 880 881 irq_thread_check_affinity(desc, action); 882 883 while (!irq_wait_for_interrupt(action)) { 884 irqreturn_t action_ret; 885 886 irq_thread_check_affinity(desc, action); 887 888 action_ret = handler_fn(desc, action); 889 if (action_ret == IRQ_HANDLED) 890 atomic_inc(&desc->threads_handled); 891 892 wake_threads_waitq(desc); 893 } 894 895 /* 896 * This is the regular exit path. __free_irq() is stopping the 897 * thread via kthread_stop() after calling 898 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the 899 * oneshot mask bit can be set. We cannot verify that as we 900 * cannot touch the oneshot mask at this point anymore as 901 * __setup_irq() might have given out currents thread_mask 902 * again. 903 */ 904 task_work_cancel(current, irq_thread_dtor); 905 return 0; 906 } 907 908 /** 909 * irq_wake_thread - wake the irq thread for the action identified by dev_id 910 * @irq: Interrupt line 911 * @dev_id: Device identity for which the thread should be woken 912 * 913 */ 914 void irq_wake_thread(unsigned int irq, void *dev_id) 915 { 916 struct irq_desc *desc = irq_to_desc(irq); 917 struct irqaction *action; 918 unsigned long flags; 919 920 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) 921 return; 922 923 raw_spin_lock_irqsave(&desc->lock, flags); 924 for (action = desc->action; action; action = action->next) { 925 if (action->dev_id == dev_id) { 926 if (action->thread) 927 __irq_wake_thread(desc, action); 928 break; 929 } 930 } 931 raw_spin_unlock_irqrestore(&desc->lock, flags); 932 } 933 EXPORT_SYMBOL_GPL(irq_wake_thread); 934 935 static void irq_setup_forced_threading(struct irqaction *new) 936 { 937 if (!force_irqthreads) 938 return; 939 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) 940 return; 941 942 new->flags |= IRQF_ONESHOT; 943 944 if (!new->thread_fn) { 945 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); 946 new->thread_fn = new->handler; 947 new->handler = irq_default_primary_handler; 948 } 949 } 950 951 static int irq_request_resources(struct irq_desc *desc) 952 { 953 struct irq_data *d = &desc->irq_data; 954 struct irq_chip *c = d->chip; 955 956 return c->irq_request_resources ? c->irq_request_resources(d) : 0; 957 } 958 959 static void irq_release_resources(struct irq_desc *desc) 960 { 961 struct irq_data *d = &desc->irq_data; 962 struct irq_chip *c = d->chip; 963 964 if (c->irq_release_resources) 965 c->irq_release_resources(d); 966 } 967 968 /* 969 * Internal function to register an irqaction - typically used to 970 * allocate special interrupts that are part of the architecture. 971 */ 972 static int 973 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) 974 { 975 struct irqaction *old, **old_ptr; 976 unsigned long flags, thread_mask = 0; 977 int ret, nested, shared = 0; 978 cpumask_var_t mask; 979 980 if (!desc) 981 return -EINVAL; 982 983 if (desc->irq_data.chip == &no_irq_chip) 984 return -ENOSYS; 985 if (!try_module_get(desc->owner)) 986 return -ENODEV; 987 988 /* 989 * Check whether the interrupt nests into another interrupt 990 * thread. 991 */ 992 nested = irq_settings_is_nested_thread(desc); 993 if (nested) { 994 if (!new->thread_fn) { 995 ret = -EINVAL; 996 goto out_mput; 997 } 998 /* 999 * Replace the primary handler which was provided from 1000 * the driver for non nested interrupt handling by the 1001 * dummy function which warns when called. 1002 */ 1003 new->handler = irq_nested_primary_handler; 1004 } else { 1005 if (irq_settings_can_thread(desc)) 1006 irq_setup_forced_threading(new); 1007 } 1008 1009 /* 1010 * Create a handler thread when a thread function is supplied 1011 * and the interrupt does not nest into another interrupt 1012 * thread. 1013 */ 1014 if (new->thread_fn && !nested) { 1015 struct task_struct *t; 1016 static const struct sched_param param = { 1017 .sched_priority = MAX_USER_RT_PRIO/2, 1018 }; 1019 1020 t = kthread_create(irq_thread, new, "irq/%d-%s", irq, 1021 new->name); 1022 if (IS_ERR(t)) { 1023 ret = PTR_ERR(t); 1024 goto out_mput; 1025 } 1026 1027 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m); 1028 1029 /* 1030 * We keep the reference to the task struct even if 1031 * the thread dies to avoid that the interrupt code 1032 * references an already freed task_struct. 1033 */ 1034 get_task_struct(t); 1035 new->thread = t; 1036 /* 1037 * Tell the thread to set its affinity. This is 1038 * important for shared interrupt handlers as we do 1039 * not invoke setup_affinity() for the secondary 1040 * handlers as everything is already set up. Even for 1041 * interrupts marked with IRQF_NO_BALANCE this is 1042 * correct as we want the thread to move to the cpu(s) 1043 * on which the requesting code placed the interrupt. 1044 */ 1045 set_bit(IRQTF_AFFINITY, &new->thread_flags); 1046 } 1047 1048 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 1049 ret = -ENOMEM; 1050 goto out_thread; 1051 } 1052 1053 /* 1054 * Drivers are often written to work w/o knowledge about the 1055 * underlying irq chip implementation, so a request for a 1056 * threaded irq without a primary hard irq context handler 1057 * requires the ONESHOT flag to be set. Some irq chips like 1058 * MSI based interrupts are per se one shot safe. Check the 1059 * chip flags, so we can avoid the unmask dance at the end of 1060 * the threaded handler for those. 1061 */ 1062 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) 1063 new->flags &= ~IRQF_ONESHOT; 1064 1065 /* 1066 * The following block of code has to be executed atomically 1067 */ 1068 raw_spin_lock_irqsave(&desc->lock, flags); 1069 old_ptr = &desc->action; 1070 old = *old_ptr; 1071 if (old) { 1072 /* 1073 * Can't share interrupts unless both agree to and are 1074 * the same type (level, edge, polarity). So both flag 1075 * fields must have IRQF_SHARED set and the bits which 1076 * set the trigger type must match. Also all must 1077 * agree on ONESHOT. 1078 */ 1079 if (!((old->flags & new->flags) & IRQF_SHARED) || 1080 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || 1081 ((old->flags ^ new->flags) & IRQF_ONESHOT)) 1082 goto mismatch; 1083 1084 /* All handlers must agree on per-cpuness */ 1085 if ((old->flags & IRQF_PERCPU) != 1086 (new->flags & IRQF_PERCPU)) 1087 goto mismatch; 1088 1089 /* add new interrupt at end of irq queue */ 1090 do { 1091 /* 1092 * Or all existing action->thread_mask bits, 1093 * so we can find the next zero bit for this 1094 * new action. 1095 */ 1096 thread_mask |= old->thread_mask; 1097 old_ptr = &old->next; 1098 old = *old_ptr; 1099 } while (old); 1100 shared = 1; 1101 } 1102 1103 /* 1104 * Setup the thread mask for this irqaction for ONESHOT. For 1105 * !ONESHOT irqs the thread mask is 0 so we can avoid a 1106 * conditional in irq_wake_thread(). 1107 */ 1108 if (new->flags & IRQF_ONESHOT) { 1109 /* 1110 * Unlikely to have 32 resp 64 irqs sharing one line, 1111 * but who knows. 1112 */ 1113 if (thread_mask == ~0UL) { 1114 ret = -EBUSY; 1115 goto out_mask; 1116 } 1117 /* 1118 * The thread_mask for the action is or'ed to 1119 * desc->thread_active to indicate that the 1120 * IRQF_ONESHOT thread handler has been woken, but not 1121 * yet finished. The bit is cleared when a thread 1122 * completes. When all threads of a shared interrupt 1123 * line have completed desc->threads_active becomes 1124 * zero and the interrupt line is unmasked. See 1125 * handle.c:irq_wake_thread() for further information. 1126 * 1127 * If no thread is woken by primary (hard irq context) 1128 * interrupt handlers, then desc->threads_active is 1129 * also checked for zero to unmask the irq line in the 1130 * affected hard irq flow handlers 1131 * (handle_[fasteoi|level]_irq). 1132 * 1133 * The new action gets the first zero bit of 1134 * thread_mask assigned. See the loop above which or's 1135 * all existing action->thread_mask bits. 1136 */ 1137 new->thread_mask = 1 << ffz(thread_mask); 1138 1139 } else if (new->handler == irq_default_primary_handler && 1140 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { 1141 /* 1142 * The interrupt was requested with handler = NULL, so 1143 * we use the default primary handler for it. But it 1144 * does not have the oneshot flag set. In combination 1145 * with level interrupts this is deadly, because the 1146 * default primary handler just wakes the thread, then 1147 * the irq lines is reenabled, but the device still 1148 * has the level irq asserted. Rinse and repeat.... 1149 * 1150 * While this works for edge type interrupts, we play 1151 * it safe and reject unconditionally because we can't 1152 * say for sure which type this interrupt really 1153 * has. The type flags are unreliable as the 1154 * underlying chip implementation can override them. 1155 */ 1156 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", 1157 irq); 1158 ret = -EINVAL; 1159 goto out_mask; 1160 } 1161 1162 if (!shared) { 1163 ret = irq_request_resources(desc); 1164 if (ret) { 1165 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", 1166 new->name, irq, desc->irq_data.chip->name); 1167 goto out_mask; 1168 } 1169 1170 init_waitqueue_head(&desc->wait_for_threads); 1171 1172 /* Setup the type (level, edge polarity) if configured: */ 1173 if (new->flags & IRQF_TRIGGER_MASK) { 1174 ret = __irq_set_trigger(desc, irq, 1175 new->flags & IRQF_TRIGGER_MASK); 1176 1177 if (ret) 1178 goto out_mask; 1179 } 1180 1181 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ 1182 IRQS_ONESHOT | IRQS_WAITING); 1183 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 1184 1185 if (new->flags & IRQF_PERCPU) { 1186 irqd_set(&desc->irq_data, IRQD_PER_CPU); 1187 irq_settings_set_per_cpu(desc); 1188 } 1189 1190 if (new->flags & IRQF_ONESHOT) 1191 desc->istate |= IRQS_ONESHOT; 1192 1193 if (irq_settings_can_autoenable(desc)) 1194 irq_startup(desc, true); 1195 else 1196 /* Undo nested disables: */ 1197 desc->depth = 1; 1198 1199 /* Exclude IRQ from balancing if requested */ 1200 if (new->flags & IRQF_NOBALANCING) { 1201 irq_settings_set_no_balancing(desc); 1202 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 1203 } 1204 1205 /* Set default affinity mask once everything is setup */ 1206 setup_affinity(irq, desc, mask); 1207 1208 } else if (new->flags & IRQF_TRIGGER_MASK) { 1209 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; 1210 unsigned int omsk = irq_settings_get_trigger_mask(desc); 1211 1212 if (nmsk != omsk) 1213 /* hope the handler works with current trigger mode */ 1214 pr_warning("irq %d uses trigger mode %u; requested %u\n", 1215 irq, nmsk, omsk); 1216 } 1217 1218 new->irq = irq; 1219 *old_ptr = new; 1220 1221 /* Reset broken irq detection when installing new handler */ 1222 desc->irq_count = 0; 1223 desc->irqs_unhandled = 0; 1224 1225 /* 1226 * Check whether we disabled the irq via the spurious handler 1227 * before. Reenable it and give it another chance. 1228 */ 1229 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { 1230 desc->istate &= ~IRQS_SPURIOUS_DISABLED; 1231 __enable_irq(desc, irq, false); 1232 } 1233 1234 raw_spin_unlock_irqrestore(&desc->lock, flags); 1235 1236 /* 1237 * Strictly no need to wake it up, but hung_task complains 1238 * when no hard interrupt wakes the thread up. 1239 */ 1240 if (new->thread) 1241 wake_up_process(new->thread); 1242 1243 register_irq_proc(irq, desc); 1244 new->dir = NULL; 1245 register_handler_proc(irq, new); 1246 free_cpumask_var(mask); 1247 1248 return 0; 1249 1250 mismatch: 1251 if (!(new->flags & IRQF_PROBE_SHARED)) { 1252 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", 1253 irq, new->flags, new->name, old->flags, old->name); 1254 #ifdef CONFIG_DEBUG_SHIRQ 1255 dump_stack(); 1256 #endif 1257 } 1258 ret = -EBUSY; 1259 1260 out_mask: 1261 raw_spin_unlock_irqrestore(&desc->lock, flags); 1262 free_cpumask_var(mask); 1263 1264 out_thread: 1265 if (new->thread) { 1266 struct task_struct *t = new->thread; 1267 1268 new->thread = NULL; 1269 kthread_stop(t); 1270 put_task_struct(t); 1271 } 1272 out_mput: 1273 module_put(desc->owner); 1274 return ret; 1275 } 1276 1277 /** 1278 * setup_irq - setup an interrupt 1279 * @irq: Interrupt line to setup 1280 * @act: irqaction for the interrupt 1281 * 1282 * Used to statically setup interrupts in the early boot process. 1283 */ 1284 int setup_irq(unsigned int irq, struct irqaction *act) 1285 { 1286 int retval; 1287 struct irq_desc *desc = irq_to_desc(irq); 1288 1289 if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1290 return -EINVAL; 1291 chip_bus_lock(desc); 1292 retval = __setup_irq(irq, desc, act); 1293 chip_bus_sync_unlock(desc); 1294 1295 return retval; 1296 } 1297 EXPORT_SYMBOL_GPL(setup_irq); 1298 1299 /* 1300 * Internal function to unregister an irqaction - used to free 1301 * regular and special interrupts that are part of the architecture. 1302 */ 1303 static struct irqaction *__free_irq(unsigned int irq, void *dev_id) 1304 { 1305 struct irq_desc *desc = irq_to_desc(irq); 1306 struct irqaction *action, **action_ptr; 1307 unsigned long flags; 1308 1309 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 1310 1311 if (!desc) 1312 return NULL; 1313 1314 raw_spin_lock_irqsave(&desc->lock, flags); 1315 1316 /* 1317 * There can be multiple actions per IRQ descriptor, find the right 1318 * one based on the dev_id: 1319 */ 1320 action_ptr = &desc->action; 1321 for (;;) { 1322 action = *action_ptr; 1323 1324 if (!action) { 1325 WARN(1, "Trying to free already-free IRQ %d\n", irq); 1326 raw_spin_unlock_irqrestore(&desc->lock, flags); 1327 1328 return NULL; 1329 } 1330 1331 if (action->dev_id == dev_id) 1332 break; 1333 action_ptr = &action->next; 1334 } 1335 1336 /* Found it - now remove it from the list of entries: */ 1337 *action_ptr = action->next; 1338 1339 /* If this was the last handler, shut down the IRQ line: */ 1340 if (!desc->action) { 1341 irq_shutdown(desc); 1342 irq_release_resources(desc); 1343 } 1344 1345 #ifdef CONFIG_SMP 1346 /* make sure affinity_hint is cleaned up */ 1347 if (WARN_ON_ONCE(desc->affinity_hint)) 1348 desc->affinity_hint = NULL; 1349 #endif 1350 1351 raw_spin_unlock_irqrestore(&desc->lock, flags); 1352 1353 unregister_handler_proc(irq, action); 1354 1355 /* Make sure it's not being used on another CPU: */ 1356 synchronize_irq(irq); 1357 1358 #ifdef CONFIG_DEBUG_SHIRQ 1359 /* 1360 * It's a shared IRQ -- the driver ought to be prepared for an IRQ 1361 * event to happen even now it's being freed, so let's make sure that 1362 * is so by doing an extra call to the handler .... 1363 * 1364 * ( We do this after actually deregistering it, to make sure that a 1365 * 'real' IRQ doesn't run in * parallel with our fake. ) 1366 */ 1367 if (action->flags & IRQF_SHARED) { 1368 local_irq_save(flags); 1369 action->handler(irq, dev_id); 1370 local_irq_restore(flags); 1371 } 1372 #endif 1373 1374 if (action->thread) { 1375 kthread_stop(action->thread); 1376 put_task_struct(action->thread); 1377 } 1378 1379 module_put(desc->owner); 1380 return action; 1381 } 1382 1383 /** 1384 * remove_irq - free an interrupt 1385 * @irq: Interrupt line to free 1386 * @act: irqaction for the interrupt 1387 * 1388 * Used to remove interrupts statically setup by the early boot process. 1389 */ 1390 void remove_irq(unsigned int irq, struct irqaction *act) 1391 { 1392 struct irq_desc *desc = irq_to_desc(irq); 1393 1394 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1395 __free_irq(irq, act->dev_id); 1396 } 1397 EXPORT_SYMBOL_GPL(remove_irq); 1398 1399 /** 1400 * free_irq - free an interrupt allocated with request_irq 1401 * @irq: Interrupt line to free 1402 * @dev_id: Device identity to free 1403 * 1404 * Remove an interrupt handler. The handler is removed and if the 1405 * interrupt line is no longer in use by any driver it is disabled. 1406 * On a shared IRQ the caller must ensure the interrupt is disabled 1407 * on the card it drives before calling this function. The function 1408 * does not return until any executing interrupts for this IRQ 1409 * have completed. 1410 * 1411 * This function must not be called from interrupt context. 1412 */ 1413 void free_irq(unsigned int irq, void *dev_id) 1414 { 1415 struct irq_desc *desc = irq_to_desc(irq); 1416 1417 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1418 return; 1419 1420 #ifdef CONFIG_SMP 1421 if (WARN_ON(desc->affinity_notify)) 1422 desc->affinity_notify = NULL; 1423 #endif 1424 1425 chip_bus_lock(desc); 1426 kfree(__free_irq(irq, dev_id)); 1427 chip_bus_sync_unlock(desc); 1428 } 1429 EXPORT_SYMBOL(free_irq); 1430 1431 /** 1432 * request_threaded_irq - allocate an interrupt line 1433 * @irq: Interrupt line to allocate 1434 * @handler: Function to be called when the IRQ occurs. 1435 * Primary handler for threaded interrupts 1436 * If NULL and thread_fn != NULL the default 1437 * primary handler is installed 1438 * @thread_fn: Function called from the irq handler thread 1439 * If NULL, no irq thread is created 1440 * @irqflags: Interrupt type flags 1441 * @devname: An ascii name for the claiming device 1442 * @dev_id: A cookie passed back to the handler function 1443 * 1444 * This call allocates interrupt resources and enables the 1445 * interrupt line and IRQ handling. From the point this 1446 * call is made your handler function may be invoked. Since 1447 * your handler function must clear any interrupt the board 1448 * raises, you must take care both to initialise your hardware 1449 * and to set up the interrupt handler in the right order. 1450 * 1451 * If you want to set up a threaded irq handler for your device 1452 * then you need to supply @handler and @thread_fn. @handler is 1453 * still called in hard interrupt context and has to check 1454 * whether the interrupt originates from the device. If yes it 1455 * needs to disable the interrupt on the device and return 1456 * IRQ_WAKE_THREAD which will wake up the handler thread and run 1457 * @thread_fn. This split handler design is necessary to support 1458 * shared interrupts. 1459 * 1460 * Dev_id must be globally unique. Normally the address of the 1461 * device data structure is used as the cookie. Since the handler 1462 * receives this value it makes sense to use it. 1463 * 1464 * If your interrupt is shared you must pass a non NULL dev_id 1465 * as this is required when freeing the interrupt. 1466 * 1467 * Flags: 1468 * 1469 * IRQF_SHARED Interrupt is shared 1470 * IRQF_TRIGGER_* Specify active edge(s) or level 1471 * 1472 */ 1473 int request_threaded_irq(unsigned int irq, irq_handler_t handler, 1474 irq_handler_t thread_fn, unsigned long irqflags, 1475 const char *devname, void *dev_id) 1476 { 1477 struct irqaction *action; 1478 struct irq_desc *desc; 1479 int retval; 1480 1481 /* 1482 * Sanity-check: shared interrupts must pass in a real dev-ID, 1483 * otherwise we'll have trouble later trying to figure out 1484 * which interrupt is which (messes up the interrupt freeing 1485 * logic etc). 1486 */ 1487 if ((irqflags & IRQF_SHARED) && !dev_id) 1488 return -EINVAL; 1489 1490 desc = irq_to_desc(irq); 1491 if (!desc) 1492 return -EINVAL; 1493 1494 if (!irq_settings_can_request(desc) || 1495 WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1496 return -EINVAL; 1497 1498 if (!handler) { 1499 if (!thread_fn) 1500 return -EINVAL; 1501 handler = irq_default_primary_handler; 1502 } 1503 1504 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 1505 if (!action) 1506 return -ENOMEM; 1507 1508 action->handler = handler; 1509 action->thread_fn = thread_fn; 1510 action->flags = irqflags; 1511 action->name = devname; 1512 action->dev_id = dev_id; 1513 1514 chip_bus_lock(desc); 1515 retval = __setup_irq(irq, desc, action); 1516 chip_bus_sync_unlock(desc); 1517 1518 if (retval) 1519 kfree(action); 1520 1521 #ifdef CONFIG_DEBUG_SHIRQ_FIXME 1522 if (!retval && (irqflags & IRQF_SHARED)) { 1523 /* 1524 * It's a shared IRQ -- the driver ought to be prepared for it 1525 * to happen immediately, so let's make sure.... 1526 * We disable the irq to make sure that a 'real' IRQ doesn't 1527 * run in parallel with our fake. 1528 */ 1529 unsigned long flags; 1530 1531 disable_irq(irq); 1532 local_irq_save(flags); 1533 1534 handler(irq, dev_id); 1535 1536 local_irq_restore(flags); 1537 enable_irq(irq); 1538 } 1539 #endif 1540 return retval; 1541 } 1542 EXPORT_SYMBOL(request_threaded_irq); 1543 1544 /** 1545 * request_any_context_irq - allocate an interrupt line 1546 * @irq: Interrupt line to allocate 1547 * @handler: Function to be called when the IRQ occurs. 1548 * Threaded handler for threaded interrupts. 1549 * @flags: Interrupt type flags 1550 * @name: An ascii name for the claiming device 1551 * @dev_id: A cookie passed back to the handler function 1552 * 1553 * This call allocates interrupt resources and enables the 1554 * interrupt line and IRQ handling. It selects either a 1555 * hardirq or threaded handling method depending on the 1556 * context. 1557 * 1558 * On failure, it returns a negative value. On success, 1559 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. 1560 */ 1561 int request_any_context_irq(unsigned int irq, irq_handler_t handler, 1562 unsigned long flags, const char *name, void *dev_id) 1563 { 1564 struct irq_desc *desc = irq_to_desc(irq); 1565 int ret; 1566 1567 if (!desc) 1568 return -EINVAL; 1569 1570 if (irq_settings_is_nested_thread(desc)) { 1571 ret = request_threaded_irq(irq, NULL, handler, 1572 flags, name, dev_id); 1573 return !ret ? IRQC_IS_NESTED : ret; 1574 } 1575 1576 ret = request_irq(irq, handler, flags, name, dev_id); 1577 return !ret ? IRQC_IS_HARDIRQ : ret; 1578 } 1579 EXPORT_SYMBOL_GPL(request_any_context_irq); 1580 1581 void enable_percpu_irq(unsigned int irq, unsigned int type) 1582 { 1583 unsigned int cpu = smp_processor_id(); 1584 unsigned long flags; 1585 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 1586 1587 if (!desc) 1588 return; 1589 1590 type &= IRQ_TYPE_SENSE_MASK; 1591 if (type != IRQ_TYPE_NONE) { 1592 int ret; 1593 1594 ret = __irq_set_trigger(desc, irq, type); 1595 1596 if (ret) { 1597 WARN(1, "failed to set type for IRQ%d\n", irq); 1598 goto out; 1599 } 1600 } 1601 1602 irq_percpu_enable(desc, cpu); 1603 out: 1604 irq_put_desc_unlock(desc, flags); 1605 } 1606 EXPORT_SYMBOL_GPL(enable_percpu_irq); 1607 1608 void disable_percpu_irq(unsigned int irq) 1609 { 1610 unsigned int cpu = smp_processor_id(); 1611 unsigned long flags; 1612 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 1613 1614 if (!desc) 1615 return; 1616 1617 irq_percpu_disable(desc, cpu); 1618 irq_put_desc_unlock(desc, flags); 1619 } 1620 EXPORT_SYMBOL_GPL(disable_percpu_irq); 1621 1622 /* 1623 * Internal function to unregister a percpu irqaction. 1624 */ 1625 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) 1626 { 1627 struct irq_desc *desc = irq_to_desc(irq); 1628 struct irqaction *action; 1629 unsigned long flags; 1630 1631 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 1632 1633 if (!desc) 1634 return NULL; 1635 1636 raw_spin_lock_irqsave(&desc->lock, flags); 1637 1638 action = desc->action; 1639 if (!action || action->percpu_dev_id != dev_id) { 1640 WARN(1, "Trying to free already-free IRQ %d\n", irq); 1641 goto bad; 1642 } 1643 1644 if (!cpumask_empty(desc->percpu_enabled)) { 1645 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", 1646 irq, cpumask_first(desc->percpu_enabled)); 1647 goto bad; 1648 } 1649 1650 /* Found it - now remove it from the list of entries: */ 1651 desc->action = NULL; 1652 1653 raw_spin_unlock_irqrestore(&desc->lock, flags); 1654 1655 unregister_handler_proc(irq, action); 1656 1657 module_put(desc->owner); 1658 return action; 1659 1660 bad: 1661 raw_spin_unlock_irqrestore(&desc->lock, flags); 1662 return NULL; 1663 } 1664 1665 /** 1666 * remove_percpu_irq - free a per-cpu interrupt 1667 * @irq: Interrupt line to free 1668 * @act: irqaction for the interrupt 1669 * 1670 * Used to remove interrupts statically setup by the early boot process. 1671 */ 1672 void remove_percpu_irq(unsigned int irq, struct irqaction *act) 1673 { 1674 struct irq_desc *desc = irq_to_desc(irq); 1675 1676 if (desc && irq_settings_is_per_cpu_devid(desc)) 1677 __free_percpu_irq(irq, act->percpu_dev_id); 1678 } 1679 1680 /** 1681 * free_percpu_irq - free an interrupt allocated with request_percpu_irq 1682 * @irq: Interrupt line to free 1683 * @dev_id: Device identity to free 1684 * 1685 * Remove a percpu interrupt handler. The handler is removed, but 1686 * the interrupt line is not disabled. This must be done on each 1687 * CPU before calling this function. The function does not return 1688 * until any executing interrupts for this IRQ have completed. 1689 * 1690 * This function must not be called from interrupt context. 1691 */ 1692 void free_percpu_irq(unsigned int irq, void __percpu *dev_id) 1693 { 1694 struct irq_desc *desc = irq_to_desc(irq); 1695 1696 if (!desc || !irq_settings_is_per_cpu_devid(desc)) 1697 return; 1698 1699 chip_bus_lock(desc); 1700 kfree(__free_percpu_irq(irq, dev_id)); 1701 chip_bus_sync_unlock(desc); 1702 } 1703 1704 /** 1705 * setup_percpu_irq - setup a per-cpu interrupt 1706 * @irq: Interrupt line to setup 1707 * @act: irqaction for the interrupt 1708 * 1709 * Used to statically setup per-cpu interrupts in the early boot process. 1710 */ 1711 int setup_percpu_irq(unsigned int irq, struct irqaction *act) 1712 { 1713 struct irq_desc *desc = irq_to_desc(irq); 1714 int retval; 1715 1716 if (!desc || !irq_settings_is_per_cpu_devid(desc)) 1717 return -EINVAL; 1718 chip_bus_lock(desc); 1719 retval = __setup_irq(irq, desc, act); 1720 chip_bus_sync_unlock(desc); 1721 1722 return retval; 1723 } 1724 1725 /** 1726 * request_percpu_irq - allocate a percpu interrupt line 1727 * @irq: Interrupt line to allocate 1728 * @handler: Function to be called when the IRQ occurs. 1729 * @devname: An ascii name for the claiming device 1730 * @dev_id: A percpu cookie passed back to the handler function 1731 * 1732 * This call allocates interrupt resources, but doesn't 1733 * automatically enable the interrupt. It has to be done on each 1734 * CPU using enable_percpu_irq(). 1735 * 1736 * Dev_id must be globally unique. It is a per-cpu variable, and 1737 * the handler gets called with the interrupted CPU's instance of 1738 * that variable. 1739 */ 1740 int request_percpu_irq(unsigned int irq, irq_handler_t handler, 1741 const char *devname, void __percpu *dev_id) 1742 { 1743 struct irqaction *action; 1744 struct irq_desc *desc; 1745 int retval; 1746 1747 if (!dev_id) 1748 return -EINVAL; 1749 1750 desc = irq_to_desc(irq); 1751 if (!desc || !irq_settings_can_request(desc) || 1752 !irq_settings_is_per_cpu_devid(desc)) 1753 return -EINVAL; 1754 1755 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 1756 if (!action) 1757 return -ENOMEM; 1758 1759 action->handler = handler; 1760 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND; 1761 action->name = devname; 1762 action->percpu_dev_id = dev_id; 1763 1764 chip_bus_lock(desc); 1765 retval = __setup_irq(irq, desc, action); 1766 chip_bus_sync_unlock(desc); 1767 1768 if (retval) 1769 kfree(action); 1770 1771 return retval; 1772 } 1773