11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/irq/manage.c 31da177e4SLinus Torvalds * 4a34db9b2SIngo Molnar * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5a34db9b2SIngo Molnar * Copyright (C) 2005-2006 Thomas Gleixner 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * This file contains driver APIs to the irq subsystem. 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 101da177e4SLinus Torvalds #include <linux/irq.h> 111da177e4SLinus Torvalds #include <linux/module.h> 121da177e4SLinus Torvalds #include <linux/random.h> 131da177e4SLinus Torvalds #include <linux/interrupt.h> 141aeb272cSRobert P. J. Day #include <linux/slab.h> 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include "internals.h" 171da177e4SLinus Torvalds 181da177e4SLinus Torvalds #ifdef CONFIG_SMP 19d036e67bSRusty Russell cpumask_var_t irq_default_affinity; 201da177e4SLinus Torvalds 21d036e67bSRusty Russell static int init_irq_default_affinity(void) 22d036e67bSRusty Russell { 23d036e67bSRusty Russell alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL); 24d036e67bSRusty Russell cpumask_setall(irq_default_affinity); 25d036e67bSRusty Russell return 0; 26d036e67bSRusty Russell } 27d036e67bSRusty Russell core_initcall(init_irq_default_affinity); 2818404756SMax Krasnyansky 291da177e4SLinus Torvalds /** 301da177e4SLinus Torvalds * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 311e5d5331SRandy Dunlap * @irq: interrupt number to wait for 321da177e4SLinus Torvalds * 331da177e4SLinus Torvalds * This function waits for any pending IRQ handlers for this interrupt 341da177e4SLinus Torvalds * to complete before returning. If you use this function while 351da177e4SLinus Torvalds * holding a resource the IRQ handler may need you will deadlock. 361da177e4SLinus Torvalds * 371da177e4SLinus Torvalds * This function may be called - with care - from IRQ context. 381da177e4SLinus Torvalds */ 391da177e4SLinus Torvalds void synchronize_irq(unsigned int irq) 401da177e4SLinus Torvalds { 41cb5bc832SYinghai Lu struct irq_desc *desc = irq_to_desc(irq); 42a98ce5c6SHerbert Xu unsigned int status; 431da177e4SLinus Torvalds 447d94f7caSYinghai Lu if (!desc) 45c2b5a251SMatthew Wilcox return; 46c2b5a251SMatthew Wilcox 47a98ce5c6SHerbert Xu do { 48a98ce5c6SHerbert Xu unsigned long flags; 49a98ce5c6SHerbert Xu 50a98ce5c6SHerbert Xu /* 51a98ce5c6SHerbert Xu * Wait until we're out of the critical section. This might 52a98ce5c6SHerbert Xu * give the wrong answer due to the lack of memory barriers. 53a98ce5c6SHerbert Xu */ 541da177e4SLinus Torvalds while (desc->status & IRQ_INPROGRESS) 551da177e4SLinus Torvalds cpu_relax(); 56a98ce5c6SHerbert Xu 57a98ce5c6SHerbert Xu /* Ok, that indicated we're done: double-check carefully. */ 58a98ce5c6SHerbert Xu spin_lock_irqsave(&desc->lock, flags); 59a98ce5c6SHerbert Xu status = desc->status; 60a98ce5c6SHerbert Xu spin_unlock_irqrestore(&desc->lock, flags); 61a98ce5c6SHerbert Xu 62a98ce5c6SHerbert Xu /* Oops, that failed? */ 63a98ce5c6SHerbert Xu } while (status & IRQ_INPROGRESS); 641da177e4SLinus Torvalds } 651da177e4SLinus Torvalds EXPORT_SYMBOL(synchronize_irq); 661da177e4SLinus Torvalds 67771ee3b0SThomas Gleixner /** 68771ee3b0SThomas Gleixner * irq_can_set_affinity - Check if the affinity of a given irq can be set 69771ee3b0SThomas Gleixner * @irq: Interrupt to check 70771ee3b0SThomas Gleixner * 71771ee3b0SThomas Gleixner */ 72771ee3b0SThomas Gleixner int irq_can_set_affinity(unsigned int irq) 73771ee3b0SThomas Gleixner { 7408678b08SYinghai Lu struct irq_desc *desc = irq_to_desc(irq); 75771ee3b0SThomas Gleixner 76771ee3b0SThomas Gleixner if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || 77771ee3b0SThomas Gleixner !desc->chip->set_affinity) 78771ee3b0SThomas Gleixner return 0; 79771ee3b0SThomas Gleixner 80771ee3b0SThomas Gleixner return 1; 81771ee3b0SThomas Gleixner } 82771ee3b0SThomas Gleixner 83771ee3b0SThomas Gleixner /** 84771ee3b0SThomas Gleixner * irq_set_affinity - Set the irq affinity of a given irq 85771ee3b0SThomas Gleixner * @irq: Interrupt to set affinity 86771ee3b0SThomas Gleixner * @cpumask: cpumask 87771ee3b0SThomas Gleixner * 88771ee3b0SThomas Gleixner */ 890de26520SRusty Russell int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 90771ee3b0SThomas Gleixner { 9108678b08SYinghai Lu struct irq_desc *desc = irq_to_desc(irq); 92f6d87f4bSThomas Gleixner unsigned long flags; 93771ee3b0SThomas Gleixner 94771ee3b0SThomas Gleixner if (!desc->chip->set_affinity) 95771ee3b0SThomas Gleixner return -EINVAL; 96771ee3b0SThomas Gleixner 97f6d87f4bSThomas Gleixner spin_lock_irqsave(&desc->lock, flags); 98f6d87f4bSThomas Gleixner 99771ee3b0SThomas Gleixner #ifdef CONFIG_GENERIC_PENDING_IRQ 100932775a4Svenkatesh.pallipadi@intel.com if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { 1010de26520SRusty Russell cpumask_copy(&desc->affinity, cpumask); 10272b1e22dSSuresh Siddha desc->chip->set_affinity(irq, cpumask); 103f6d87f4bSThomas Gleixner } else { 104f6d87f4bSThomas Gleixner desc->status |= IRQ_MOVE_PENDING; 1050de26520SRusty Russell cpumask_copy(&desc->pending_mask, cpumask); 106f6d87f4bSThomas Gleixner } 107771ee3b0SThomas Gleixner #else 1080de26520SRusty Russell cpumask_copy(&desc->affinity, cpumask); 109771ee3b0SThomas Gleixner desc->chip->set_affinity(irq, cpumask); 110771ee3b0SThomas Gleixner #endif 111f6d87f4bSThomas Gleixner desc->status |= IRQ_AFFINITY_SET; 112f6d87f4bSThomas Gleixner spin_unlock_irqrestore(&desc->lock, flags); 113771ee3b0SThomas Gleixner return 0; 114771ee3b0SThomas Gleixner } 115771ee3b0SThomas Gleixner 11618404756SMax Krasnyansky #ifndef CONFIG_AUTO_IRQ_AFFINITY 11718404756SMax Krasnyansky /* 11818404756SMax Krasnyansky * Generic version of the affinity autoselector. 11918404756SMax Krasnyansky */ 120f6d87f4bSThomas Gleixner int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) 12118404756SMax Krasnyansky { 12218404756SMax Krasnyansky if (!irq_can_set_affinity(irq)) 12318404756SMax Krasnyansky return 0; 12418404756SMax Krasnyansky 125f6d87f4bSThomas Gleixner /* 126f6d87f4bSThomas Gleixner * Preserve an userspace affinity setup, but make sure that 127f6d87f4bSThomas Gleixner * one of the targets is online. 128f6d87f4bSThomas Gleixner */ 129612e3684SThomas Gleixner if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 1300de26520SRusty Russell if (cpumask_any_and(&desc->affinity, cpu_online_mask) 1310de26520SRusty Russell < nr_cpu_ids) 1320de26520SRusty Russell goto set_affinity; 133f6d87f4bSThomas Gleixner else 134f6d87f4bSThomas Gleixner desc->status &= ~IRQ_AFFINITY_SET; 135f6d87f4bSThomas Gleixner } 136f6d87f4bSThomas Gleixner 137d036e67bSRusty Russell cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); 1380de26520SRusty Russell set_affinity: 1390de26520SRusty Russell desc->chip->set_affinity(irq, &desc->affinity); 14018404756SMax Krasnyansky 14118404756SMax Krasnyansky return 0; 14218404756SMax Krasnyansky } 143f6d87f4bSThomas Gleixner #else 144f6d87f4bSThomas Gleixner static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) 145f6d87f4bSThomas Gleixner { 146f6d87f4bSThomas Gleixner return irq_select_affinity(irq); 147f6d87f4bSThomas Gleixner } 14818404756SMax Krasnyansky #endif 14918404756SMax Krasnyansky 150f6d87f4bSThomas Gleixner /* 151f6d87f4bSThomas Gleixner * Called when affinity is set via /proc/irq 152f6d87f4bSThomas Gleixner */ 153f6d87f4bSThomas Gleixner int irq_select_affinity_usr(unsigned int irq) 154f6d87f4bSThomas Gleixner { 155f6d87f4bSThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 156f6d87f4bSThomas Gleixner unsigned long flags; 157f6d87f4bSThomas Gleixner int ret; 158f6d87f4bSThomas Gleixner 159f6d87f4bSThomas Gleixner spin_lock_irqsave(&desc->lock, flags); 160f6d87f4bSThomas Gleixner ret = do_irq_select_affinity(irq, desc); 161f6d87f4bSThomas Gleixner spin_unlock_irqrestore(&desc->lock, flags); 162f6d87f4bSThomas Gleixner 163f6d87f4bSThomas Gleixner return ret; 164f6d87f4bSThomas Gleixner } 165f6d87f4bSThomas Gleixner 166f6d87f4bSThomas Gleixner #else 167f131e243SIngo Molnar static inline int do_irq_select_affinity(int irq, struct irq_desc *desc) 168f6d87f4bSThomas Gleixner { 169f6d87f4bSThomas Gleixner return 0; 170f6d87f4bSThomas Gleixner } 1711da177e4SLinus Torvalds #endif 1721da177e4SLinus Torvalds 1731da177e4SLinus Torvalds /** 1741da177e4SLinus Torvalds * disable_irq_nosync - disable an irq without waiting 1751da177e4SLinus Torvalds * @irq: Interrupt to disable 1761da177e4SLinus Torvalds * 1771da177e4SLinus Torvalds * Disable the selected interrupt line. Disables and Enables are 1781da177e4SLinus Torvalds * nested. 1791da177e4SLinus Torvalds * Unlike disable_irq(), this function does not ensure existing 1801da177e4SLinus Torvalds * instances of the IRQ handler have completed before returning. 1811da177e4SLinus Torvalds * 1821da177e4SLinus Torvalds * This function may be called from IRQ context. 1831da177e4SLinus Torvalds */ 1841da177e4SLinus Torvalds void disable_irq_nosync(unsigned int irq) 1851da177e4SLinus Torvalds { 186d3c60047SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 1871da177e4SLinus Torvalds unsigned long flags; 1881da177e4SLinus Torvalds 1897d94f7caSYinghai Lu if (!desc) 190c2b5a251SMatthew Wilcox return; 191c2b5a251SMatthew Wilcox 1921da177e4SLinus Torvalds spin_lock_irqsave(&desc->lock, flags); 1931da177e4SLinus Torvalds if (!desc->depth++) { 1941da177e4SLinus Torvalds desc->status |= IRQ_DISABLED; 195d1bef4edSIngo Molnar desc->chip->disable(irq); 1961da177e4SLinus Torvalds } 1971da177e4SLinus Torvalds spin_unlock_irqrestore(&desc->lock, flags); 1981da177e4SLinus Torvalds } 1991da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq_nosync); 2001da177e4SLinus Torvalds 2011da177e4SLinus Torvalds /** 2021da177e4SLinus Torvalds * disable_irq - disable an irq and wait for completion 2031da177e4SLinus Torvalds * @irq: Interrupt to disable 2041da177e4SLinus Torvalds * 2051da177e4SLinus Torvalds * Disable the selected interrupt line. Enables and Disables are 2061da177e4SLinus Torvalds * nested. 2071da177e4SLinus Torvalds * This function waits for any pending IRQ handlers for this interrupt 2081da177e4SLinus Torvalds * to complete before returning. If you use this function while 2091da177e4SLinus Torvalds * holding a resource the IRQ handler may need you will deadlock. 2101da177e4SLinus Torvalds * 2111da177e4SLinus Torvalds * This function may be called - with care - from IRQ context. 2121da177e4SLinus Torvalds */ 2131da177e4SLinus Torvalds void disable_irq(unsigned int irq) 2141da177e4SLinus Torvalds { 215d3c60047SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 2161da177e4SLinus Torvalds 2177d94f7caSYinghai Lu if (!desc) 218c2b5a251SMatthew Wilcox return; 219c2b5a251SMatthew Wilcox 2201da177e4SLinus Torvalds disable_irq_nosync(irq); 2211da177e4SLinus Torvalds if (desc->action) 2221da177e4SLinus Torvalds synchronize_irq(irq); 2231da177e4SLinus Torvalds } 2241da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq); 2251da177e4SLinus Torvalds 2261adb0850SThomas Gleixner static void __enable_irq(struct irq_desc *desc, unsigned int irq) 2271adb0850SThomas Gleixner { 2281adb0850SThomas Gleixner switch (desc->depth) { 2291adb0850SThomas Gleixner case 0: 230b8c512f6SArjan van de Ven WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 2311adb0850SThomas Gleixner break; 2321adb0850SThomas Gleixner case 1: { 2331adb0850SThomas Gleixner unsigned int status = desc->status & ~IRQ_DISABLED; 2341adb0850SThomas Gleixner 2351adb0850SThomas Gleixner /* Prevent probing on this irq: */ 2361adb0850SThomas Gleixner desc->status = status | IRQ_NOPROBE; 2371adb0850SThomas Gleixner check_irq_resend(desc, irq); 2381adb0850SThomas Gleixner /* fall-through */ 2391adb0850SThomas Gleixner } 2401adb0850SThomas Gleixner default: 2411adb0850SThomas Gleixner desc->depth--; 2421adb0850SThomas Gleixner } 2431adb0850SThomas Gleixner } 2441adb0850SThomas Gleixner 2451da177e4SLinus Torvalds /** 2461da177e4SLinus Torvalds * enable_irq - enable handling of an irq 2471da177e4SLinus Torvalds * @irq: Interrupt to enable 2481da177e4SLinus Torvalds * 2491da177e4SLinus Torvalds * Undoes the effect of one call to disable_irq(). If this 2501da177e4SLinus Torvalds * matches the last disable, processing of interrupts on this 2511da177e4SLinus Torvalds * IRQ line is re-enabled. 2521da177e4SLinus Torvalds * 2531da177e4SLinus Torvalds * This function may be called from IRQ context. 2541da177e4SLinus Torvalds */ 2551da177e4SLinus Torvalds void enable_irq(unsigned int irq) 2561da177e4SLinus Torvalds { 257d3c60047SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 2581da177e4SLinus Torvalds unsigned long flags; 2591da177e4SLinus Torvalds 2607d94f7caSYinghai Lu if (!desc) 261c2b5a251SMatthew Wilcox return; 262c2b5a251SMatthew Wilcox 2631da177e4SLinus Torvalds spin_lock_irqsave(&desc->lock, flags); 2641adb0850SThomas Gleixner __enable_irq(desc, irq); 2651da177e4SLinus Torvalds spin_unlock_irqrestore(&desc->lock, flags); 2661da177e4SLinus Torvalds } 2671da177e4SLinus Torvalds EXPORT_SYMBOL(enable_irq); 2681da177e4SLinus Torvalds 2690c5d1eb7SDavid Brownell static int set_irq_wake_real(unsigned int irq, unsigned int on) 2702db87321SUwe Kleine-König { 27108678b08SYinghai Lu struct irq_desc *desc = irq_to_desc(irq); 2722db87321SUwe Kleine-König int ret = -ENXIO; 2732db87321SUwe Kleine-König 2742db87321SUwe Kleine-König if (desc->chip->set_wake) 2752db87321SUwe Kleine-König ret = desc->chip->set_wake(irq, on); 2762db87321SUwe Kleine-König 2772db87321SUwe Kleine-König return ret; 2782db87321SUwe Kleine-König } 2792db87321SUwe Kleine-König 280ba9a2331SThomas Gleixner /** 281ba9a2331SThomas Gleixner * set_irq_wake - control irq power management wakeup 282ba9a2331SThomas Gleixner * @irq: interrupt to control 283ba9a2331SThomas Gleixner * @on: enable/disable power management wakeup 284ba9a2331SThomas Gleixner * 28515a647ebSDavid Brownell * Enable/disable power management wakeup mode, which is 28615a647ebSDavid Brownell * disabled by default. Enables and disables must match, 28715a647ebSDavid Brownell * just as they match for non-wakeup mode support. 28815a647ebSDavid Brownell * 28915a647ebSDavid Brownell * Wakeup mode lets this IRQ wake the system from sleep 29015a647ebSDavid Brownell * states like "suspend to RAM". 291ba9a2331SThomas Gleixner */ 292ba9a2331SThomas Gleixner int set_irq_wake(unsigned int irq, unsigned int on) 293ba9a2331SThomas Gleixner { 29408678b08SYinghai Lu struct irq_desc *desc = irq_to_desc(irq); 295ba9a2331SThomas Gleixner unsigned long flags; 2962db87321SUwe Kleine-König int ret = 0; 297ba9a2331SThomas Gleixner 29815a647ebSDavid Brownell /* wakeup-capable irqs can be shared between drivers that 29915a647ebSDavid Brownell * don't need to have the same sleep mode behaviors. 30015a647ebSDavid Brownell */ 301ba9a2331SThomas Gleixner spin_lock_irqsave(&desc->lock, flags); 30215a647ebSDavid Brownell if (on) { 3032db87321SUwe Kleine-König if (desc->wake_depth++ == 0) { 3042db87321SUwe Kleine-König ret = set_irq_wake_real(irq, on); 3052db87321SUwe Kleine-König if (ret) 3062db87321SUwe Kleine-König desc->wake_depth = 0; 30715a647ebSDavid Brownell else 3082db87321SUwe Kleine-König desc->status |= IRQ_WAKEUP; 3092db87321SUwe Kleine-König } 31015a647ebSDavid Brownell } else { 31115a647ebSDavid Brownell if (desc->wake_depth == 0) { 3127a2c4770SArjan van de Ven WARN(1, "Unbalanced IRQ %d wake disable\n", irq); 3132db87321SUwe Kleine-König } else if (--desc->wake_depth == 0) { 3142db87321SUwe Kleine-König ret = set_irq_wake_real(irq, on); 3152db87321SUwe Kleine-König if (ret) 3162db87321SUwe Kleine-König desc->wake_depth = 1; 31715a647ebSDavid Brownell else 3182db87321SUwe Kleine-König desc->status &= ~IRQ_WAKEUP; 31915a647ebSDavid Brownell } 3202db87321SUwe Kleine-König } 3212db87321SUwe Kleine-König 322ba9a2331SThomas Gleixner spin_unlock_irqrestore(&desc->lock, flags); 323ba9a2331SThomas Gleixner return ret; 324ba9a2331SThomas Gleixner } 325ba9a2331SThomas Gleixner EXPORT_SYMBOL(set_irq_wake); 326ba9a2331SThomas Gleixner 3271da177e4SLinus Torvalds /* 3281da177e4SLinus Torvalds * Internal function that tells the architecture code whether a 3291da177e4SLinus Torvalds * particular irq has been exclusively allocated or is available 3301da177e4SLinus Torvalds * for driver use. 3311da177e4SLinus Torvalds */ 3321da177e4SLinus Torvalds int can_request_irq(unsigned int irq, unsigned long irqflags) 3331da177e4SLinus Torvalds { 334d3c60047SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 3351da177e4SLinus Torvalds struct irqaction *action; 3361da177e4SLinus Torvalds 3377d94f7caSYinghai Lu if (!desc) 3387d94f7caSYinghai Lu return 0; 3397d94f7caSYinghai Lu 3407d94f7caSYinghai Lu if (desc->status & IRQ_NOREQUEST) 3411da177e4SLinus Torvalds return 0; 3421da177e4SLinus Torvalds 34308678b08SYinghai Lu action = desc->action; 3441da177e4SLinus Torvalds if (action) 3453cca53b0SThomas Gleixner if (irqflags & action->flags & IRQF_SHARED) 3461da177e4SLinus Torvalds action = NULL; 3471da177e4SLinus Torvalds 3481da177e4SLinus Torvalds return !action; 3491da177e4SLinus Torvalds } 3501da177e4SLinus Torvalds 3516a6de9efSThomas Gleixner void compat_irq_chip_set_default_handler(struct irq_desc *desc) 3526a6de9efSThomas Gleixner { 3536a6de9efSThomas Gleixner /* 3546a6de9efSThomas Gleixner * If the architecture still has not overriden 3556a6de9efSThomas Gleixner * the flow handler then zap the default. This 3566a6de9efSThomas Gleixner * should catch incorrect flow-type setting. 3576a6de9efSThomas Gleixner */ 3586a6de9efSThomas Gleixner if (desc->handle_irq == &handle_bad_irq) 3596a6de9efSThomas Gleixner desc->handle_irq = NULL; 3606a6de9efSThomas Gleixner } 3616a6de9efSThomas Gleixner 3620c5d1eb7SDavid Brownell int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 36382736f4dSUwe Kleine-König unsigned long flags) 36482736f4dSUwe Kleine-König { 36582736f4dSUwe Kleine-König int ret; 3660c5d1eb7SDavid Brownell struct irq_chip *chip = desc->chip; 36782736f4dSUwe Kleine-König 36882736f4dSUwe Kleine-König if (!chip || !chip->set_type) { 36982736f4dSUwe Kleine-König /* 37082736f4dSUwe Kleine-König * IRQF_TRIGGER_* but the PIC does not support multiple 37182736f4dSUwe Kleine-König * flow-types? 37282736f4dSUwe Kleine-König */ 3733ff68a6aSMark Nelson pr_debug("No set_type function for IRQ %d (%s)\n", irq, 37482736f4dSUwe Kleine-König chip ? (chip->name ? : "unknown") : "unknown"); 37582736f4dSUwe Kleine-König return 0; 37682736f4dSUwe Kleine-König } 37782736f4dSUwe Kleine-König 378f2b662daSDavid Brownell /* caller masked out all except trigger mode flags */ 379f2b662daSDavid Brownell ret = chip->set_type(irq, flags); 38082736f4dSUwe Kleine-König 38182736f4dSUwe Kleine-König if (ret) 382c69ad71bSDavid Brownell pr_err("setting trigger mode %d for irq %u failed (%pF)\n", 383f2b662daSDavid Brownell (int)flags, irq, chip->set_type); 3840c5d1eb7SDavid Brownell else { 385f2b662daSDavid Brownell if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 386f2b662daSDavid Brownell flags |= IRQ_LEVEL; 3870c5d1eb7SDavid Brownell /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ 388f2b662daSDavid Brownell desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); 389f2b662daSDavid Brownell desc->status |= flags; 3900c5d1eb7SDavid Brownell } 39182736f4dSUwe Kleine-König 39282736f4dSUwe Kleine-König return ret; 39382736f4dSUwe Kleine-König } 39482736f4dSUwe Kleine-König 3951da177e4SLinus Torvalds /* 3961da177e4SLinus Torvalds * Internal function to register an irqaction - typically used to 3971da177e4SLinus Torvalds * allocate special interrupts that are part of the architecture. 3981da177e4SLinus Torvalds */ 399d3c60047SThomas Gleixner static int 400d3c60047SThomas Gleixner __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) 4011da177e4SLinus Torvalds { 402*f17c7545SIngo Molnar struct irqaction *old, **old_ptr; 4038b126b77SAndrew Morton const char *old_name = NULL; 4041da177e4SLinus Torvalds unsigned long flags; 4051da177e4SLinus Torvalds int shared = 0; 40682736f4dSUwe Kleine-König int ret; 4071da177e4SLinus Torvalds 4087d94f7caSYinghai Lu if (!desc) 409c2b5a251SMatthew Wilcox return -EINVAL; 410c2b5a251SMatthew Wilcox 411f1c2662cSIngo Molnar if (desc->chip == &no_irq_chip) 4121da177e4SLinus Torvalds return -ENOSYS; 4131da177e4SLinus Torvalds /* 4141da177e4SLinus Torvalds * Some drivers like serial.c use request_irq() heavily, 4151da177e4SLinus Torvalds * so we have to be careful not to interfere with a 4161da177e4SLinus Torvalds * running system. 4171da177e4SLinus Torvalds */ 4183cca53b0SThomas Gleixner if (new->flags & IRQF_SAMPLE_RANDOM) { 4191da177e4SLinus Torvalds /* 4201da177e4SLinus Torvalds * This function might sleep, we want to call it first, 4211da177e4SLinus Torvalds * outside of the atomic block. 4221da177e4SLinus Torvalds * Yes, this might clear the entropy pool if the wrong 4231da177e4SLinus Torvalds * driver is attempted to be loaded, without actually 4241da177e4SLinus Torvalds * installing a new handler, but is this really a problem, 4251da177e4SLinus Torvalds * only the sysadmin is able to do this. 4261da177e4SLinus Torvalds */ 4271da177e4SLinus Torvalds rand_initialize_irq(irq); 4281da177e4SLinus Torvalds } 4291da177e4SLinus Torvalds 4301da177e4SLinus Torvalds /* 4311da177e4SLinus Torvalds * The following block of code has to be executed atomically 4321da177e4SLinus Torvalds */ 4331da177e4SLinus Torvalds spin_lock_irqsave(&desc->lock, flags); 434*f17c7545SIngo Molnar old_ptr = &desc->action; 435*f17c7545SIngo Molnar old = *old_ptr; 43606fcb0c6SIngo Molnar if (old) { 437e76de9f8SThomas Gleixner /* 438e76de9f8SThomas Gleixner * Can't share interrupts unless both agree to and are 439e76de9f8SThomas Gleixner * the same type (level, edge, polarity). So both flag 4403cca53b0SThomas Gleixner * fields must have IRQF_SHARED set and the bits which 441e76de9f8SThomas Gleixner * set the trigger type must match. 442e76de9f8SThomas Gleixner */ 4433cca53b0SThomas Gleixner if (!((old->flags & new->flags) & IRQF_SHARED) || 4448b126b77SAndrew Morton ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { 4458b126b77SAndrew Morton old_name = old->name; 446f5163427SDimitri Sivanich goto mismatch; 4478b126b77SAndrew Morton } 448f5163427SDimitri Sivanich 449284c6680SThomas Gleixner #if defined(CONFIG_IRQ_PER_CPU) 450f5163427SDimitri Sivanich /* All handlers must agree on per-cpuness */ 4513cca53b0SThomas Gleixner if ((old->flags & IRQF_PERCPU) != 4523cca53b0SThomas Gleixner (new->flags & IRQF_PERCPU)) 453f5163427SDimitri Sivanich goto mismatch; 454f5163427SDimitri Sivanich #endif 4551da177e4SLinus Torvalds 4561da177e4SLinus Torvalds /* add new interrupt at end of irq queue */ 4571da177e4SLinus Torvalds do { 458*f17c7545SIngo Molnar old_ptr = &old->next; 459*f17c7545SIngo Molnar old = *old_ptr; 4601da177e4SLinus Torvalds } while (old); 4611da177e4SLinus Torvalds shared = 1; 4621da177e4SLinus Torvalds } 4631da177e4SLinus Torvalds 4641da177e4SLinus Torvalds if (!shared) { 4656a6de9efSThomas Gleixner irq_chip_set_defaults(desc->chip); 466e76de9f8SThomas Gleixner 46782736f4dSUwe Kleine-König /* Setup the type (level, edge polarity) if configured: */ 46882736f4dSUwe Kleine-König if (new->flags & IRQF_TRIGGER_MASK) { 469f2b662daSDavid Brownell ret = __irq_set_trigger(desc, irq, 470f2b662daSDavid Brownell new->flags & IRQF_TRIGGER_MASK); 47182736f4dSUwe Kleine-König 47282736f4dSUwe Kleine-König if (ret) { 47382736f4dSUwe Kleine-König spin_unlock_irqrestore(&desc->lock, flags); 47482736f4dSUwe Kleine-König return ret; 47582736f4dSUwe Kleine-König } 47682736f4dSUwe Kleine-König } else 47782736f4dSUwe Kleine-König compat_irq_chip_set_default_handler(desc); 478f75d222bSAhmed S. Darwish #if defined(CONFIG_IRQ_PER_CPU) 479f75d222bSAhmed S. Darwish if (new->flags & IRQF_PERCPU) 480f75d222bSAhmed S. Darwish desc->status |= IRQ_PER_CPU; 481f75d222bSAhmed S. Darwish #endif 482f75d222bSAhmed S. Darwish 48394d39e1fSThomas Gleixner desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | 4841adb0850SThomas Gleixner IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); 48594d39e1fSThomas Gleixner 48694d39e1fSThomas Gleixner if (!(desc->status & IRQ_NOAUTOEN)) { 4871da177e4SLinus Torvalds desc->depth = 0; 48894d39e1fSThomas Gleixner desc->status &= ~IRQ_DISABLED; 489d1bef4edSIngo Molnar desc->chip->startup(irq); 490e76de9f8SThomas Gleixner } else 491e76de9f8SThomas Gleixner /* Undo nested disables: */ 492e76de9f8SThomas Gleixner desc->depth = 1; 49318404756SMax Krasnyansky 494612e3684SThomas Gleixner /* Exclude IRQ from balancing if requested */ 495612e3684SThomas Gleixner if (new->flags & IRQF_NOBALANCING) 496612e3684SThomas Gleixner desc->status |= IRQ_NO_BALANCING; 497612e3684SThomas Gleixner 49818404756SMax Krasnyansky /* Set default affinity mask once everything is setup */ 499f6d87f4bSThomas Gleixner do_irq_select_affinity(irq, desc); 5000c5d1eb7SDavid Brownell 5010c5d1eb7SDavid Brownell } else if ((new->flags & IRQF_TRIGGER_MASK) 5020c5d1eb7SDavid Brownell && (new->flags & IRQF_TRIGGER_MASK) 5030c5d1eb7SDavid Brownell != (desc->status & IRQ_TYPE_SENSE_MASK)) { 5040c5d1eb7SDavid Brownell /* hope the handler works with the actual trigger mode... */ 5050c5d1eb7SDavid Brownell pr_warning("IRQ %d uses trigger mode %d; requested %d\n", 5060c5d1eb7SDavid Brownell irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), 5070c5d1eb7SDavid Brownell (int)(new->flags & IRQF_TRIGGER_MASK)); 50894d39e1fSThomas Gleixner } 50982736f4dSUwe Kleine-König 510*f17c7545SIngo Molnar *old_ptr = new; 51182736f4dSUwe Kleine-König 5128528b0f1SLinus Torvalds /* Reset broken irq detection when installing new handler */ 5138528b0f1SLinus Torvalds desc->irq_count = 0; 5148528b0f1SLinus Torvalds desc->irqs_unhandled = 0; 5151adb0850SThomas Gleixner 5161adb0850SThomas Gleixner /* 5171adb0850SThomas Gleixner * Check whether we disabled the irq via the spurious handler 5181adb0850SThomas Gleixner * before. Reenable it and give it another chance. 5191adb0850SThomas Gleixner */ 5201adb0850SThomas Gleixner if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { 5211adb0850SThomas Gleixner desc->status &= ~IRQ_SPURIOUS_DISABLED; 5221adb0850SThomas Gleixner __enable_irq(desc, irq); 5231adb0850SThomas Gleixner } 5241adb0850SThomas Gleixner 5251da177e4SLinus Torvalds spin_unlock_irqrestore(&desc->lock, flags); 5261da177e4SLinus Torvalds 5271da177e4SLinus Torvalds new->irq = irq; 5282c6927a3SYinghai Lu register_irq_proc(irq, desc); 5291da177e4SLinus Torvalds new->dir = NULL; 5301da177e4SLinus Torvalds register_handler_proc(irq, new); 5311da177e4SLinus Torvalds 5321da177e4SLinus Torvalds return 0; 533f5163427SDimitri Sivanich 534f5163427SDimitri Sivanich mismatch: 5353f050447SAlan Cox #ifdef CONFIG_DEBUG_SHIRQ 5363cca53b0SThomas Gleixner if (!(new->flags & IRQF_PROBE_SHARED)) { 537e8c4b9d0SBjorn Helgaas printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); 5388b126b77SAndrew Morton if (old_name) 5398b126b77SAndrew Morton printk(KERN_ERR "current handler: %s\n", old_name); 540f5163427SDimitri Sivanich dump_stack(); 54113e87ec6SAndrew Morton } 5423f050447SAlan Cox #endif 5438b126b77SAndrew Morton spin_unlock_irqrestore(&desc->lock, flags); 544f5163427SDimitri Sivanich return -EBUSY; 5451da177e4SLinus Torvalds } 5461da177e4SLinus Torvalds 5471da177e4SLinus Torvalds /** 548d3c60047SThomas Gleixner * setup_irq - setup an interrupt 549d3c60047SThomas Gleixner * @irq: Interrupt line to setup 550d3c60047SThomas Gleixner * @act: irqaction for the interrupt 551d3c60047SThomas Gleixner * 552d3c60047SThomas Gleixner * Used to statically setup interrupts in the early boot process. 553d3c60047SThomas Gleixner */ 554d3c60047SThomas Gleixner int setup_irq(unsigned int irq, struct irqaction *act) 555d3c60047SThomas Gleixner { 556d3c60047SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 557d3c60047SThomas Gleixner 558d3c60047SThomas Gleixner return __setup_irq(irq, desc, act); 559d3c60047SThomas Gleixner } 560d3c60047SThomas Gleixner 561d3c60047SThomas Gleixner /** 5621da177e4SLinus Torvalds * free_irq - free an interrupt 5631da177e4SLinus Torvalds * @irq: Interrupt line to free 5641da177e4SLinus Torvalds * @dev_id: Device identity to free 5651da177e4SLinus Torvalds * 5661da177e4SLinus Torvalds * Remove an interrupt handler. The handler is removed and if the 5671da177e4SLinus Torvalds * interrupt line is no longer in use by any driver it is disabled. 5681da177e4SLinus Torvalds * On a shared IRQ the caller must ensure the interrupt is disabled 5691da177e4SLinus Torvalds * on the card it drives before calling this function. The function 5701da177e4SLinus Torvalds * does not return until any executing interrupts for this IRQ 5711da177e4SLinus Torvalds * have completed. 5721da177e4SLinus Torvalds * 5731da177e4SLinus Torvalds * This function must not be called from interrupt context. 5741da177e4SLinus Torvalds */ 5751da177e4SLinus Torvalds void free_irq(unsigned int irq, void *dev_id) 5761da177e4SLinus Torvalds { 577d3c60047SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 578*f17c7545SIngo Molnar struct irqaction *action, **action_ptr; 5791da177e4SLinus Torvalds unsigned long flags; 5801da177e4SLinus Torvalds 581ae88a23bSIngo Molnar WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 5827d94f7caSYinghai Lu 5837d94f7caSYinghai Lu if (!desc) 5841da177e4SLinus Torvalds return; 5851da177e4SLinus Torvalds 5861da177e4SLinus Torvalds spin_lock_irqsave(&desc->lock, flags); 587ae88a23bSIngo Molnar 588ae88a23bSIngo Molnar /* 589ae88a23bSIngo Molnar * There can be multiple actions per IRQ descriptor, find the right 590ae88a23bSIngo Molnar * one based on the dev_id: 591ae88a23bSIngo Molnar */ 592*f17c7545SIngo Molnar action_ptr = &desc->action; 5931da177e4SLinus Torvalds for (;;) { 594*f17c7545SIngo Molnar action = *action_ptr; 5951da177e4SLinus Torvalds 596ae88a23bSIngo Molnar if (!action) { 597ae88a23bSIngo Molnar WARN(1, "Trying to free already-free IRQ %d\n", irq); 598ae88a23bSIngo Molnar spin_unlock_irqrestore(&desc->lock, flags); 599ae88a23bSIngo Molnar 600ae88a23bSIngo Molnar return; 601ae88a23bSIngo Molnar } 6021da177e4SLinus Torvalds 6038316e381SIngo Molnar if (action->dev_id == dev_id) 604ae88a23bSIngo Molnar break; 605*f17c7545SIngo Molnar action_ptr = &action->next; 606ae88a23bSIngo Molnar } 607ae88a23bSIngo Molnar 608ae88a23bSIngo Molnar /* Found it - now remove it from the list of entries: */ 609*f17c7545SIngo Molnar *action_ptr = action->next; 610dbce706eSPaolo 'Blaisorblade' Giarrusso 611ae88a23bSIngo Molnar /* Currently used only by UML, might disappear one day: */ 612b77d6adcSPaolo 'Blaisorblade' Giarrusso #ifdef CONFIG_IRQ_RELEASE_METHOD 613d1bef4edSIngo Molnar if (desc->chip->release) 614d1bef4edSIngo Molnar desc->chip->release(irq, dev_id); 615b77d6adcSPaolo 'Blaisorblade' Giarrusso #endif 616dbce706eSPaolo 'Blaisorblade' Giarrusso 617ae88a23bSIngo Molnar /* If this was the last handler, shut down the IRQ line: */ 6181da177e4SLinus Torvalds if (!desc->action) { 6191da177e4SLinus Torvalds desc->status |= IRQ_DISABLED; 620d1bef4edSIngo Molnar if (desc->chip->shutdown) 621d1bef4edSIngo Molnar desc->chip->shutdown(irq); 6221da177e4SLinus Torvalds else 623d1bef4edSIngo Molnar desc->chip->disable(irq); 6241da177e4SLinus Torvalds } 6251da177e4SLinus Torvalds spin_unlock_irqrestore(&desc->lock, flags); 626ae88a23bSIngo Molnar 6271da177e4SLinus Torvalds unregister_handler_proc(irq, action); 6281da177e4SLinus Torvalds 629ae88a23bSIngo Molnar /* Make sure it's not being used on another CPU: */ 6301da177e4SLinus Torvalds synchronize_irq(irq); 631ae88a23bSIngo Molnar 6321d99493bSDavid Woodhouse #ifdef CONFIG_DEBUG_SHIRQ 6331d99493bSDavid Woodhouse /* 634ae88a23bSIngo Molnar * It's a shared IRQ -- the driver ought to be prepared for an IRQ 635ae88a23bSIngo Molnar * event to happen even now it's being freed, so let's make sure that 636ae88a23bSIngo Molnar * is so by doing an extra call to the handler .... 637ae88a23bSIngo Molnar * 638ae88a23bSIngo Molnar * ( We do this after actually deregistering it, to make sure that a 639ae88a23bSIngo Molnar * 'real' IRQ doesn't run in * parallel with our fake. ) 6401d99493bSDavid Woodhouse */ 6411d99493bSDavid Woodhouse if (action->flags & IRQF_SHARED) { 6421d99493bSDavid Woodhouse local_irq_save(flags); 6431d99493bSDavid Woodhouse action->handler(irq, dev_id); 6441d99493bSDavid Woodhouse local_irq_restore(flags); 6451d99493bSDavid Woodhouse } 6461d99493bSDavid Woodhouse #endif 6471da177e4SLinus Torvalds kfree(action); 6481da177e4SLinus Torvalds } 6491da177e4SLinus Torvalds EXPORT_SYMBOL(free_irq); 6501da177e4SLinus Torvalds 6511da177e4SLinus Torvalds /** 6521da177e4SLinus Torvalds * request_irq - allocate an interrupt line 6531da177e4SLinus Torvalds * @irq: Interrupt line to allocate 6541da177e4SLinus Torvalds * @handler: Function to be called when the IRQ occurs 6551da177e4SLinus Torvalds * @irqflags: Interrupt type flags 6561da177e4SLinus Torvalds * @devname: An ascii name for the claiming device 6571da177e4SLinus Torvalds * @dev_id: A cookie passed back to the handler function 6581da177e4SLinus Torvalds * 6591da177e4SLinus Torvalds * This call allocates interrupt resources and enables the 6601da177e4SLinus Torvalds * interrupt line and IRQ handling. From the point this 6611da177e4SLinus Torvalds * call is made your handler function may be invoked. Since 6621da177e4SLinus Torvalds * your handler function must clear any interrupt the board 6631da177e4SLinus Torvalds * raises, you must take care both to initialise your hardware 6641da177e4SLinus Torvalds * and to set up the interrupt handler in the right order. 6651da177e4SLinus Torvalds * 6661da177e4SLinus Torvalds * Dev_id must be globally unique. Normally the address of the 6671da177e4SLinus Torvalds * device data structure is used as the cookie. Since the handler 6681da177e4SLinus Torvalds * receives this value it makes sense to use it. 6691da177e4SLinus Torvalds * 6701da177e4SLinus Torvalds * If your interrupt is shared you must pass a non NULL dev_id 6711da177e4SLinus Torvalds * as this is required when freeing the interrupt. 6721da177e4SLinus Torvalds * 6731da177e4SLinus Torvalds * Flags: 6741da177e4SLinus Torvalds * 6753cca53b0SThomas Gleixner * IRQF_SHARED Interrupt is shared 6763cca53b0SThomas Gleixner * IRQF_DISABLED Disable local interrupts while processing 6773cca53b0SThomas Gleixner * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy 6780c5d1eb7SDavid Brownell * IRQF_TRIGGER_* Specify active edge(s) or level 6791da177e4SLinus Torvalds * 6801da177e4SLinus Torvalds */ 681da482792SDavid Howells int request_irq(unsigned int irq, irq_handler_t handler, 6821da177e4SLinus Torvalds unsigned long irqflags, const char *devname, void *dev_id) 6831da177e4SLinus Torvalds { 6841da177e4SLinus Torvalds struct irqaction *action; 68508678b08SYinghai Lu struct irq_desc *desc; 686d3c60047SThomas Gleixner int retval; 6871da177e4SLinus Torvalds 688470c6623SDavid Brownell /* 689470c6623SDavid Brownell * handle_IRQ_event() always ignores IRQF_DISABLED except for 690470c6623SDavid Brownell * the _first_ irqaction (sigh). That can cause oopsing, but 691470c6623SDavid Brownell * the behavior is classified as "will not fix" so we need to 692470c6623SDavid Brownell * start nudging drivers away from using that idiom. 693470c6623SDavid Brownell */ 694327ec569SIngo Molnar if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) == 695327ec569SIngo Molnar (IRQF_SHARED|IRQF_DISABLED)) { 696327ec569SIngo Molnar pr_warning( 697327ec569SIngo Molnar "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n", 698470c6623SDavid Brownell irq, devname); 699327ec569SIngo Molnar } 700470c6623SDavid Brownell 701fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP 702fbb9ce95SIngo Molnar /* 703fbb9ce95SIngo Molnar * Lockdep wants atomic interrupt handlers: 704fbb9ce95SIngo Molnar */ 70538515e90SThomas Gleixner irqflags |= IRQF_DISABLED; 706fbb9ce95SIngo Molnar #endif 7071da177e4SLinus Torvalds /* 7081da177e4SLinus Torvalds * Sanity-check: shared interrupts must pass in a real dev-ID, 7091da177e4SLinus Torvalds * otherwise we'll have trouble later trying to figure out 7101da177e4SLinus Torvalds * which interrupt is which (messes up the interrupt freeing 7111da177e4SLinus Torvalds * logic etc). 7121da177e4SLinus Torvalds */ 7133cca53b0SThomas Gleixner if ((irqflags & IRQF_SHARED) && !dev_id) 7141da177e4SLinus Torvalds return -EINVAL; 7157d94f7caSYinghai Lu 716cb5bc832SYinghai Lu desc = irq_to_desc(irq); 7177d94f7caSYinghai Lu if (!desc) 7181da177e4SLinus Torvalds return -EINVAL; 7197d94f7caSYinghai Lu 72008678b08SYinghai Lu if (desc->status & IRQ_NOREQUEST) 7216550c775SThomas Gleixner return -EINVAL; 7221da177e4SLinus Torvalds if (!handler) 7231da177e4SLinus Torvalds return -EINVAL; 7241da177e4SLinus Torvalds 7250e43785cSJohannes Weiner action = kmalloc(sizeof(struct irqaction), GFP_KERNEL); 7261da177e4SLinus Torvalds if (!action) 7271da177e4SLinus Torvalds return -ENOMEM; 7281da177e4SLinus Torvalds 7291da177e4SLinus Torvalds action->handler = handler; 7301da177e4SLinus Torvalds action->flags = irqflags; 7311da177e4SLinus Torvalds cpus_clear(action->mask); 7321da177e4SLinus Torvalds action->name = devname; 7331da177e4SLinus Torvalds action->next = NULL; 7341da177e4SLinus Torvalds action->dev_id = dev_id; 7351da177e4SLinus Torvalds 736d3c60047SThomas Gleixner retval = __setup_irq(irq, desc, action); 737377bf1e4SAnton Vorontsov if (retval) 738377bf1e4SAnton Vorontsov kfree(action); 739377bf1e4SAnton Vorontsov 740a304e1b8SDavid Woodhouse #ifdef CONFIG_DEBUG_SHIRQ 741a304e1b8SDavid Woodhouse if (irqflags & IRQF_SHARED) { 742a304e1b8SDavid Woodhouse /* 743a304e1b8SDavid Woodhouse * It's a shared IRQ -- the driver ought to be prepared for it 744a304e1b8SDavid Woodhouse * to happen immediately, so let's make sure.... 745377bf1e4SAnton Vorontsov * We disable the irq to make sure that a 'real' IRQ doesn't 746377bf1e4SAnton Vorontsov * run in parallel with our fake. 747a304e1b8SDavid Woodhouse */ 748a304e1b8SDavid Woodhouse unsigned long flags; 749a304e1b8SDavid Woodhouse 750377bf1e4SAnton Vorontsov disable_irq(irq); 751a304e1b8SDavid Woodhouse local_irq_save(flags); 752377bf1e4SAnton Vorontsov 753a304e1b8SDavid Woodhouse handler(irq, dev_id); 754377bf1e4SAnton Vorontsov 755a304e1b8SDavid Woodhouse local_irq_restore(flags); 756377bf1e4SAnton Vorontsov enable_irq(irq); 757a304e1b8SDavid Woodhouse } 758a304e1b8SDavid Woodhouse #endif 7591da177e4SLinus Torvalds return retval; 7601da177e4SLinus Torvalds } 7611da177e4SLinus Torvalds EXPORT_SYMBOL(request_irq); 762