11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/irq/manage.c 31da177e4SLinus Torvalds * 4a34db9b2SIngo Molnar * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5a34db9b2SIngo Molnar * Copyright (C) 2005-2006 Thomas Gleixner 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * This file contains driver APIs to the irq subsystem. 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 101da177e4SLinus Torvalds #include <linux/irq.h> 111da177e4SLinus Torvalds #include <linux/module.h> 121da177e4SLinus Torvalds #include <linux/random.h> 131da177e4SLinus Torvalds #include <linux/interrupt.h> 141aeb272cSRobert P. J. Day #include <linux/slab.h> 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include "internals.h" 171da177e4SLinus Torvalds 181267a8dfSDavid Daney #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 19d036e67bSRusty Russell cpumask_var_t irq_default_affinity; 201da177e4SLinus Torvalds 211da177e4SLinus Torvalds /** 221da177e4SLinus Torvalds * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 231e5d5331SRandy Dunlap * @irq: interrupt number to wait for 241da177e4SLinus Torvalds * 251da177e4SLinus Torvalds * This function waits for any pending IRQ handlers for this interrupt 261da177e4SLinus Torvalds * to complete before returning. If you use this function while 271da177e4SLinus Torvalds * holding a resource the IRQ handler may need you will deadlock. 281da177e4SLinus Torvalds * 291da177e4SLinus Torvalds * This function may be called - with care - from IRQ context. 301da177e4SLinus Torvalds */ 311da177e4SLinus Torvalds void synchronize_irq(unsigned int irq) 321da177e4SLinus Torvalds { 33cb5bc832SYinghai Lu struct irq_desc *desc = irq_to_desc(irq); 34a98ce5c6SHerbert Xu unsigned int status; 351da177e4SLinus Torvalds 367d94f7caSYinghai Lu if (!desc) 37c2b5a251SMatthew Wilcox return; 38c2b5a251SMatthew Wilcox 39a98ce5c6SHerbert Xu do { 40a98ce5c6SHerbert Xu unsigned long flags; 41a98ce5c6SHerbert Xu 42a98ce5c6SHerbert Xu /* 43a98ce5c6SHerbert Xu * Wait until we're out of the critical section. This might 44a98ce5c6SHerbert Xu * give the wrong answer due to the lack of memory barriers. 45a98ce5c6SHerbert Xu */ 461da177e4SLinus Torvalds while (desc->status & IRQ_INPROGRESS) 471da177e4SLinus Torvalds cpu_relax(); 48a98ce5c6SHerbert Xu 49a98ce5c6SHerbert Xu /* Ok, that indicated we're done: double-check carefully. */ 50a98ce5c6SHerbert Xu spin_lock_irqsave(&desc->lock, flags); 51a98ce5c6SHerbert Xu status = desc->status; 52a98ce5c6SHerbert Xu spin_unlock_irqrestore(&desc->lock, flags); 53a98ce5c6SHerbert Xu 54a98ce5c6SHerbert Xu /* Oops, that failed? */ 55a98ce5c6SHerbert Xu } while (status & IRQ_INPROGRESS); 561da177e4SLinus Torvalds } 571da177e4SLinus Torvalds EXPORT_SYMBOL(synchronize_irq); 581da177e4SLinus Torvalds 59771ee3b0SThomas Gleixner /** 60771ee3b0SThomas Gleixner * irq_can_set_affinity - Check if the affinity of a given irq can be set 61771ee3b0SThomas Gleixner * @irq: Interrupt to check 62771ee3b0SThomas Gleixner * 63771ee3b0SThomas Gleixner */ 64771ee3b0SThomas Gleixner int irq_can_set_affinity(unsigned int irq) 65771ee3b0SThomas Gleixner { 6608678b08SYinghai Lu struct irq_desc *desc = irq_to_desc(irq); 67771ee3b0SThomas Gleixner 68771ee3b0SThomas Gleixner if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || 69771ee3b0SThomas Gleixner !desc->chip->set_affinity) 70771ee3b0SThomas Gleixner return 0; 71771ee3b0SThomas Gleixner 72771ee3b0SThomas Gleixner return 1; 73771ee3b0SThomas Gleixner } 74771ee3b0SThomas Gleixner 75771ee3b0SThomas Gleixner /** 76771ee3b0SThomas Gleixner * irq_set_affinity - Set the irq affinity of a given irq 77771ee3b0SThomas Gleixner * @irq: Interrupt to set affinity 78771ee3b0SThomas Gleixner * @cpumask: cpumask 79771ee3b0SThomas Gleixner * 80771ee3b0SThomas Gleixner */ 810de26520SRusty Russell int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 82771ee3b0SThomas Gleixner { 8308678b08SYinghai Lu struct irq_desc *desc = irq_to_desc(irq); 84f6d87f4bSThomas Gleixner unsigned long flags; 85771ee3b0SThomas Gleixner 86771ee3b0SThomas Gleixner if (!desc->chip->set_affinity) 87771ee3b0SThomas Gleixner return -EINVAL; 88771ee3b0SThomas Gleixner 89f6d87f4bSThomas Gleixner spin_lock_irqsave(&desc->lock, flags); 90f6d87f4bSThomas Gleixner 91771ee3b0SThomas Gleixner #ifdef CONFIG_GENERIC_PENDING_IRQ 92932775a4Svenkatesh.pallipadi@intel.com if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { 930de26520SRusty Russell cpumask_copy(&desc->affinity, cpumask); 9472b1e22dSSuresh Siddha desc->chip->set_affinity(irq, cpumask); 95f6d87f4bSThomas Gleixner } else { 96f6d87f4bSThomas Gleixner desc->status |= IRQ_MOVE_PENDING; 970de26520SRusty Russell cpumask_copy(&desc->pending_mask, cpumask); 98f6d87f4bSThomas Gleixner } 99771ee3b0SThomas Gleixner #else 1000de26520SRusty Russell cpumask_copy(&desc->affinity, cpumask); 101771ee3b0SThomas Gleixner desc->chip->set_affinity(irq, cpumask); 102771ee3b0SThomas Gleixner #endif 103f6d87f4bSThomas Gleixner desc->status |= IRQ_AFFINITY_SET; 104f6d87f4bSThomas Gleixner spin_unlock_irqrestore(&desc->lock, flags); 105771ee3b0SThomas Gleixner return 0; 106771ee3b0SThomas Gleixner } 107771ee3b0SThomas Gleixner 10818404756SMax Krasnyansky #ifndef CONFIG_AUTO_IRQ_AFFINITY 10918404756SMax Krasnyansky /* 11018404756SMax Krasnyansky * Generic version of the affinity autoselector. 11118404756SMax Krasnyansky */ 112f6d87f4bSThomas Gleixner int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) 11318404756SMax Krasnyansky { 11418404756SMax Krasnyansky if (!irq_can_set_affinity(irq)) 11518404756SMax Krasnyansky return 0; 11618404756SMax Krasnyansky 117f6d87f4bSThomas Gleixner /* 118f6d87f4bSThomas Gleixner * Preserve an userspace affinity setup, but make sure that 119f6d87f4bSThomas Gleixner * one of the targets is online. 120f6d87f4bSThomas Gleixner */ 121612e3684SThomas Gleixner if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 1220de26520SRusty Russell if (cpumask_any_and(&desc->affinity, cpu_online_mask) 1230de26520SRusty Russell < nr_cpu_ids) 1240de26520SRusty Russell goto set_affinity; 125f6d87f4bSThomas Gleixner else 126f6d87f4bSThomas Gleixner desc->status &= ~IRQ_AFFINITY_SET; 127f6d87f4bSThomas Gleixner } 128f6d87f4bSThomas Gleixner 129d036e67bSRusty Russell cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); 1300de26520SRusty Russell set_affinity: 1310de26520SRusty Russell desc->chip->set_affinity(irq, &desc->affinity); 13218404756SMax Krasnyansky 13318404756SMax Krasnyansky return 0; 13418404756SMax Krasnyansky } 135f6d87f4bSThomas Gleixner #else 136f6d87f4bSThomas Gleixner static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) 137f6d87f4bSThomas Gleixner { 138f6d87f4bSThomas Gleixner return irq_select_affinity(irq); 139f6d87f4bSThomas Gleixner } 14018404756SMax Krasnyansky #endif 14118404756SMax Krasnyansky 142f6d87f4bSThomas Gleixner /* 143f6d87f4bSThomas Gleixner * Called when affinity is set via /proc/irq 144f6d87f4bSThomas Gleixner */ 145f6d87f4bSThomas Gleixner int irq_select_affinity_usr(unsigned int irq) 146f6d87f4bSThomas Gleixner { 147f6d87f4bSThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 148f6d87f4bSThomas Gleixner unsigned long flags; 149f6d87f4bSThomas Gleixner int ret; 150f6d87f4bSThomas Gleixner 151f6d87f4bSThomas Gleixner spin_lock_irqsave(&desc->lock, flags); 152f6d87f4bSThomas Gleixner ret = do_irq_select_affinity(irq, desc); 153f6d87f4bSThomas Gleixner spin_unlock_irqrestore(&desc->lock, flags); 154f6d87f4bSThomas Gleixner 155f6d87f4bSThomas Gleixner return ret; 156f6d87f4bSThomas Gleixner } 157f6d87f4bSThomas Gleixner 158f6d87f4bSThomas Gleixner #else 159f131e243SIngo Molnar static inline int do_irq_select_affinity(int irq, struct irq_desc *desc) 160f6d87f4bSThomas Gleixner { 161f6d87f4bSThomas Gleixner return 0; 162f6d87f4bSThomas Gleixner } 1631da177e4SLinus Torvalds #endif 1641da177e4SLinus Torvalds 1651da177e4SLinus Torvalds /** 1661da177e4SLinus Torvalds * disable_irq_nosync - disable an irq without waiting 1671da177e4SLinus Torvalds * @irq: Interrupt to disable 1681da177e4SLinus Torvalds * 1691da177e4SLinus Torvalds * Disable the selected interrupt line. Disables and Enables are 1701da177e4SLinus Torvalds * nested. 1711da177e4SLinus Torvalds * Unlike disable_irq(), this function does not ensure existing 1721da177e4SLinus Torvalds * instances of the IRQ handler have completed before returning. 1731da177e4SLinus Torvalds * 1741da177e4SLinus Torvalds * This function may be called from IRQ context. 1751da177e4SLinus Torvalds */ 1761da177e4SLinus Torvalds void disable_irq_nosync(unsigned int irq) 1771da177e4SLinus Torvalds { 178d3c60047SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 1791da177e4SLinus Torvalds unsigned long flags; 1801da177e4SLinus Torvalds 1817d94f7caSYinghai Lu if (!desc) 182c2b5a251SMatthew Wilcox return; 183c2b5a251SMatthew Wilcox 1841da177e4SLinus Torvalds spin_lock_irqsave(&desc->lock, flags); 1851da177e4SLinus Torvalds if (!desc->depth++) { 1861da177e4SLinus Torvalds desc->status |= IRQ_DISABLED; 187d1bef4edSIngo Molnar desc->chip->disable(irq); 1881da177e4SLinus Torvalds } 1891da177e4SLinus Torvalds spin_unlock_irqrestore(&desc->lock, flags); 1901da177e4SLinus Torvalds } 1911da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq_nosync); 1921da177e4SLinus Torvalds 1931da177e4SLinus Torvalds /** 1941da177e4SLinus Torvalds * disable_irq - disable an irq and wait for completion 1951da177e4SLinus Torvalds * @irq: Interrupt to disable 1961da177e4SLinus Torvalds * 1971da177e4SLinus Torvalds * Disable the selected interrupt line. Enables and Disables are 1981da177e4SLinus Torvalds * nested. 1991da177e4SLinus Torvalds * This function waits for any pending IRQ handlers for this interrupt 2001da177e4SLinus Torvalds * to complete before returning. If you use this function while 2011da177e4SLinus Torvalds * holding a resource the IRQ handler may need you will deadlock. 2021da177e4SLinus Torvalds * 2031da177e4SLinus Torvalds * This function may be called - with care - from IRQ context. 2041da177e4SLinus Torvalds */ 2051da177e4SLinus Torvalds void disable_irq(unsigned int irq) 2061da177e4SLinus Torvalds { 207d3c60047SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 2081da177e4SLinus Torvalds 2097d94f7caSYinghai Lu if (!desc) 210c2b5a251SMatthew Wilcox return; 211c2b5a251SMatthew Wilcox 2121da177e4SLinus Torvalds disable_irq_nosync(irq); 2131da177e4SLinus Torvalds if (desc->action) 2141da177e4SLinus Torvalds synchronize_irq(irq); 2151da177e4SLinus Torvalds } 2161da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq); 2171da177e4SLinus Torvalds 2181adb0850SThomas Gleixner static void __enable_irq(struct irq_desc *desc, unsigned int irq) 2191adb0850SThomas Gleixner { 2201adb0850SThomas Gleixner switch (desc->depth) { 2211adb0850SThomas Gleixner case 0: 222b8c512f6SArjan van de Ven WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 2231adb0850SThomas Gleixner break; 2241adb0850SThomas Gleixner case 1: { 2251adb0850SThomas Gleixner unsigned int status = desc->status & ~IRQ_DISABLED; 2261adb0850SThomas Gleixner 2271adb0850SThomas Gleixner /* Prevent probing on this irq: */ 2281adb0850SThomas Gleixner desc->status = status | IRQ_NOPROBE; 2291adb0850SThomas Gleixner check_irq_resend(desc, irq); 2301adb0850SThomas Gleixner /* fall-through */ 2311adb0850SThomas Gleixner } 2321adb0850SThomas Gleixner default: 2331adb0850SThomas Gleixner desc->depth--; 2341adb0850SThomas Gleixner } 2351adb0850SThomas Gleixner } 2361adb0850SThomas Gleixner 2371da177e4SLinus Torvalds /** 2381da177e4SLinus Torvalds * enable_irq - enable handling of an irq 2391da177e4SLinus Torvalds * @irq: Interrupt to enable 2401da177e4SLinus Torvalds * 2411da177e4SLinus Torvalds * Undoes the effect of one call to disable_irq(). If this 2421da177e4SLinus Torvalds * matches the last disable, processing of interrupts on this 2431da177e4SLinus Torvalds * IRQ line is re-enabled. 2441da177e4SLinus Torvalds * 2451da177e4SLinus Torvalds * This function may be called from IRQ context. 2461da177e4SLinus Torvalds */ 2471da177e4SLinus Torvalds void enable_irq(unsigned int irq) 2481da177e4SLinus Torvalds { 249d3c60047SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 2501da177e4SLinus Torvalds unsigned long flags; 2511da177e4SLinus Torvalds 2527d94f7caSYinghai Lu if (!desc) 253c2b5a251SMatthew Wilcox return; 254c2b5a251SMatthew Wilcox 2551da177e4SLinus Torvalds spin_lock_irqsave(&desc->lock, flags); 2561adb0850SThomas Gleixner __enable_irq(desc, irq); 2571da177e4SLinus Torvalds spin_unlock_irqrestore(&desc->lock, flags); 2581da177e4SLinus Torvalds } 2591da177e4SLinus Torvalds EXPORT_SYMBOL(enable_irq); 2601da177e4SLinus Torvalds 2610c5d1eb7SDavid Brownell static int set_irq_wake_real(unsigned int irq, unsigned int on) 2622db87321SUwe Kleine-König { 26308678b08SYinghai Lu struct irq_desc *desc = irq_to_desc(irq); 2642db87321SUwe Kleine-König int ret = -ENXIO; 2652db87321SUwe Kleine-König 2662db87321SUwe Kleine-König if (desc->chip->set_wake) 2672db87321SUwe Kleine-König ret = desc->chip->set_wake(irq, on); 2682db87321SUwe Kleine-König 2692db87321SUwe Kleine-König return ret; 2702db87321SUwe Kleine-König } 2712db87321SUwe Kleine-König 272ba9a2331SThomas Gleixner /** 273ba9a2331SThomas Gleixner * set_irq_wake - control irq power management wakeup 274ba9a2331SThomas Gleixner * @irq: interrupt to control 275ba9a2331SThomas Gleixner * @on: enable/disable power management wakeup 276ba9a2331SThomas Gleixner * 27715a647ebSDavid Brownell * Enable/disable power management wakeup mode, which is 27815a647ebSDavid Brownell * disabled by default. Enables and disables must match, 27915a647ebSDavid Brownell * just as they match for non-wakeup mode support. 28015a647ebSDavid Brownell * 28115a647ebSDavid Brownell * Wakeup mode lets this IRQ wake the system from sleep 28215a647ebSDavid Brownell * states like "suspend to RAM". 283ba9a2331SThomas Gleixner */ 284ba9a2331SThomas Gleixner int set_irq_wake(unsigned int irq, unsigned int on) 285ba9a2331SThomas Gleixner { 28608678b08SYinghai Lu struct irq_desc *desc = irq_to_desc(irq); 287ba9a2331SThomas Gleixner unsigned long flags; 2882db87321SUwe Kleine-König int ret = 0; 289ba9a2331SThomas Gleixner 29015a647ebSDavid Brownell /* wakeup-capable irqs can be shared between drivers that 29115a647ebSDavid Brownell * don't need to have the same sleep mode behaviors. 29215a647ebSDavid Brownell */ 293ba9a2331SThomas Gleixner spin_lock_irqsave(&desc->lock, flags); 29415a647ebSDavid Brownell if (on) { 2952db87321SUwe Kleine-König if (desc->wake_depth++ == 0) { 2962db87321SUwe Kleine-König ret = set_irq_wake_real(irq, on); 2972db87321SUwe Kleine-König if (ret) 2982db87321SUwe Kleine-König desc->wake_depth = 0; 29915a647ebSDavid Brownell else 3002db87321SUwe Kleine-König desc->status |= IRQ_WAKEUP; 3012db87321SUwe Kleine-König } 30215a647ebSDavid Brownell } else { 30315a647ebSDavid Brownell if (desc->wake_depth == 0) { 3047a2c4770SArjan van de Ven WARN(1, "Unbalanced IRQ %d wake disable\n", irq); 3052db87321SUwe Kleine-König } else if (--desc->wake_depth == 0) { 3062db87321SUwe Kleine-König ret = set_irq_wake_real(irq, on); 3072db87321SUwe Kleine-König if (ret) 3082db87321SUwe Kleine-König desc->wake_depth = 1; 30915a647ebSDavid Brownell else 3102db87321SUwe Kleine-König desc->status &= ~IRQ_WAKEUP; 31115a647ebSDavid Brownell } 3122db87321SUwe Kleine-König } 3132db87321SUwe Kleine-König 314ba9a2331SThomas Gleixner spin_unlock_irqrestore(&desc->lock, flags); 315ba9a2331SThomas Gleixner return ret; 316ba9a2331SThomas Gleixner } 317ba9a2331SThomas Gleixner EXPORT_SYMBOL(set_irq_wake); 318ba9a2331SThomas Gleixner 3191da177e4SLinus Torvalds /* 3201da177e4SLinus Torvalds * Internal function that tells the architecture code whether a 3211da177e4SLinus Torvalds * particular irq has been exclusively allocated or is available 3221da177e4SLinus Torvalds * for driver use. 3231da177e4SLinus Torvalds */ 3241da177e4SLinus Torvalds int can_request_irq(unsigned int irq, unsigned long irqflags) 3251da177e4SLinus Torvalds { 326d3c60047SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 3271da177e4SLinus Torvalds struct irqaction *action; 3281da177e4SLinus Torvalds 3297d94f7caSYinghai Lu if (!desc) 3307d94f7caSYinghai Lu return 0; 3317d94f7caSYinghai Lu 3327d94f7caSYinghai Lu if (desc->status & IRQ_NOREQUEST) 3331da177e4SLinus Torvalds return 0; 3341da177e4SLinus Torvalds 33508678b08SYinghai Lu action = desc->action; 3361da177e4SLinus Torvalds if (action) 3373cca53b0SThomas Gleixner if (irqflags & action->flags & IRQF_SHARED) 3381da177e4SLinus Torvalds action = NULL; 3391da177e4SLinus Torvalds 3401da177e4SLinus Torvalds return !action; 3411da177e4SLinus Torvalds } 3421da177e4SLinus Torvalds 3436a6de9efSThomas Gleixner void compat_irq_chip_set_default_handler(struct irq_desc *desc) 3446a6de9efSThomas Gleixner { 3456a6de9efSThomas Gleixner /* 3466a6de9efSThomas Gleixner * If the architecture still has not overriden 3476a6de9efSThomas Gleixner * the flow handler then zap the default. This 3486a6de9efSThomas Gleixner * should catch incorrect flow-type setting. 3496a6de9efSThomas Gleixner */ 3506a6de9efSThomas Gleixner if (desc->handle_irq == &handle_bad_irq) 3516a6de9efSThomas Gleixner desc->handle_irq = NULL; 3526a6de9efSThomas Gleixner } 3536a6de9efSThomas Gleixner 3540c5d1eb7SDavid Brownell int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 35582736f4dSUwe Kleine-König unsigned long flags) 35682736f4dSUwe Kleine-König { 35782736f4dSUwe Kleine-König int ret; 3580c5d1eb7SDavid Brownell struct irq_chip *chip = desc->chip; 35982736f4dSUwe Kleine-König 36082736f4dSUwe Kleine-König if (!chip || !chip->set_type) { 36182736f4dSUwe Kleine-König /* 36282736f4dSUwe Kleine-König * IRQF_TRIGGER_* but the PIC does not support multiple 36382736f4dSUwe Kleine-König * flow-types? 36482736f4dSUwe Kleine-König */ 3653ff68a6aSMark Nelson pr_debug("No set_type function for IRQ %d (%s)\n", irq, 36682736f4dSUwe Kleine-König chip ? (chip->name ? : "unknown") : "unknown"); 36782736f4dSUwe Kleine-König return 0; 36882736f4dSUwe Kleine-König } 36982736f4dSUwe Kleine-König 370f2b662daSDavid Brownell /* caller masked out all except trigger mode flags */ 371f2b662daSDavid Brownell ret = chip->set_type(irq, flags); 37282736f4dSUwe Kleine-König 37382736f4dSUwe Kleine-König if (ret) 374c69ad71bSDavid Brownell pr_err("setting trigger mode %d for irq %u failed (%pF)\n", 375f2b662daSDavid Brownell (int)flags, irq, chip->set_type); 3760c5d1eb7SDavid Brownell else { 377f2b662daSDavid Brownell if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 378f2b662daSDavid Brownell flags |= IRQ_LEVEL; 3790c5d1eb7SDavid Brownell /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ 380f2b662daSDavid Brownell desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); 381f2b662daSDavid Brownell desc->status |= flags; 3820c5d1eb7SDavid Brownell } 38382736f4dSUwe Kleine-König 38482736f4dSUwe Kleine-König return ret; 38582736f4dSUwe Kleine-König } 38682736f4dSUwe Kleine-König 3871da177e4SLinus Torvalds /* 3881da177e4SLinus Torvalds * Internal function to register an irqaction - typically used to 3891da177e4SLinus Torvalds * allocate special interrupts that are part of the architecture. 3901da177e4SLinus Torvalds */ 391d3c60047SThomas Gleixner static int 392d3c60047SThomas Gleixner __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) 3931da177e4SLinus Torvalds { 394f17c7545SIngo Molnar struct irqaction *old, **old_ptr; 3958b126b77SAndrew Morton const char *old_name = NULL; 3961da177e4SLinus Torvalds unsigned long flags; 3971da177e4SLinus Torvalds int shared = 0; 39882736f4dSUwe Kleine-König int ret; 3991da177e4SLinus Torvalds 4007d94f7caSYinghai Lu if (!desc) 401c2b5a251SMatthew Wilcox return -EINVAL; 402c2b5a251SMatthew Wilcox 403f1c2662cSIngo Molnar if (desc->chip == &no_irq_chip) 4041da177e4SLinus Torvalds return -ENOSYS; 4051da177e4SLinus Torvalds /* 4061da177e4SLinus Torvalds * Some drivers like serial.c use request_irq() heavily, 4071da177e4SLinus Torvalds * so we have to be careful not to interfere with a 4081da177e4SLinus Torvalds * running system. 4091da177e4SLinus Torvalds */ 4103cca53b0SThomas Gleixner if (new->flags & IRQF_SAMPLE_RANDOM) { 4111da177e4SLinus Torvalds /* 4121da177e4SLinus Torvalds * This function might sleep, we want to call it first, 4131da177e4SLinus Torvalds * outside of the atomic block. 4141da177e4SLinus Torvalds * Yes, this might clear the entropy pool if the wrong 4151da177e4SLinus Torvalds * driver is attempted to be loaded, without actually 4161da177e4SLinus Torvalds * installing a new handler, but is this really a problem, 4171da177e4SLinus Torvalds * only the sysadmin is able to do this. 4181da177e4SLinus Torvalds */ 4191da177e4SLinus Torvalds rand_initialize_irq(irq); 4201da177e4SLinus Torvalds } 4211da177e4SLinus Torvalds 4221da177e4SLinus Torvalds /* 4231da177e4SLinus Torvalds * The following block of code has to be executed atomically 4241da177e4SLinus Torvalds */ 4251da177e4SLinus Torvalds spin_lock_irqsave(&desc->lock, flags); 426f17c7545SIngo Molnar old_ptr = &desc->action; 427f17c7545SIngo Molnar old = *old_ptr; 42806fcb0c6SIngo Molnar if (old) { 429e76de9f8SThomas Gleixner /* 430e76de9f8SThomas Gleixner * Can't share interrupts unless both agree to and are 431e76de9f8SThomas Gleixner * the same type (level, edge, polarity). So both flag 4323cca53b0SThomas Gleixner * fields must have IRQF_SHARED set and the bits which 433e76de9f8SThomas Gleixner * set the trigger type must match. 434e76de9f8SThomas Gleixner */ 4353cca53b0SThomas Gleixner if (!((old->flags & new->flags) & IRQF_SHARED) || 4368b126b77SAndrew Morton ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { 4378b126b77SAndrew Morton old_name = old->name; 438f5163427SDimitri Sivanich goto mismatch; 4398b126b77SAndrew Morton } 440f5163427SDimitri Sivanich 441284c6680SThomas Gleixner #if defined(CONFIG_IRQ_PER_CPU) 442f5163427SDimitri Sivanich /* All handlers must agree on per-cpuness */ 4433cca53b0SThomas Gleixner if ((old->flags & IRQF_PERCPU) != 4443cca53b0SThomas Gleixner (new->flags & IRQF_PERCPU)) 445f5163427SDimitri Sivanich goto mismatch; 446f5163427SDimitri Sivanich #endif 4471da177e4SLinus Torvalds 4481da177e4SLinus Torvalds /* add new interrupt at end of irq queue */ 4491da177e4SLinus Torvalds do { 450f17c7545SIngo Molnar old_ptr = &old->next; 451f17c7545SIngo Molnar old = *old_ptr; 4521da177e4SLinus Torvalds } while (old); 4531da177e4SLinus Torvalds shared = 1; 4541da177e4SLinus Torvalds } 4551da177e4SLinus Torvalds 4561da177e4SLinus Torvalds if (!shared) { 4576a6de9efSThomas Gleixner irq_chip_set_defaults(desc->chip); 458e76de9f8SThomas Gleixner 45982736f4dSUwe Kleine-König /* Setup the type (level, edge polarity) if configured: */ 46082736f4dSUwe Kleine-König if (new->flags & IRQF_TRIGGER_MASK) { 461f2b662daSDavid Brownell ret = __irq_set_trigger(desc, irq, 462f2b662daSDavid Brownell new->flags & IRQF_TRIGGER_MASK); 46382736f4dSUwe Kleine-König 46482736f4dSUwe Kleine-König if (ret) { 46582736f4dSUwe Kleine-König spin_unlock_irqrestore(&desc->lock, flags); 46682736f4dSUwe Kleine-König return ret; 46782736f4dSUwe Kleine-König } 46882736f4dSUwe Kleine-König } else 46982736f4dSUwe Kleine-König compat_irq_chip_set_default_handler(desc); 470f75d222bSAhmed S. Darwish #if defined(CONFIG_IRQ_PER_CPU) 471f75d222bSAhmed S. Darwish if (new->flags & IRQF_PERCPU) 472f75d222bSAhmed S. Darwish desc->status |= IRQ_PER_CPU; 473f75d222bSAhmed S. Darwish #endif 474f75d222bSAhmed S. Darwish 47594d39e1fSThomas Gleixner desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | 4761adb0850SThomas Gleixner IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); 47794d39e1fSThomas Gleixner 47894d39e1fSThomas Gleixner if (!(desc->status & IRQ_NOAUTOEN)) { 4791da177e4SLinus Torvalds desc->depth = 0; 48094d39e1fSThomas Gleixner desc->status &= ~IRQ_DISABLED; 481d1bef4edSIngo Molnar desc->chip->startup(irq); 482e76de9f8SThomas Gleixner } else 483e76de9f8SThomas Gleixner /* Undo nested disables: */ 484e76de9f8SThomas Gleixner desc->depth = 1; 48518404756SMax Krasnyansky 486612e3684SThomas Gleixner /* Exclude IRQ from balancing if requested */ 487612e3684SThomas Gleixner if (new->flags & IRQF_NOBALANCING) 488612e3684SThomas Gleixner desc->status |= IRQ_NO_BALANCING; 489612e3684SThomas Gleixner 49018404756SMax Krasnyansky /* Set default affinity mask once everything is setup */ 491f6d87f4bSThomas Gleixner do_irq_select_affinity(irq, desc); 4920c5d1eb7SDavid Brownell 4930c5d1eb7SDavid Brownell } else if ((new->flags & IRQF_TRIGGER_MASK) 4940c5d1eb7SDavid Brownell && (new->flags & IRQF_TRIGGER_MASK) 4950c5d1eb7SDavid Brownell != (desc->status & IRQ_TYPE_SENSE_MASK)) { 4960c5d1eb7SDavid Brownell /* hope the handler works with the actual trigger mode... */ 4970c5d1eb7SDavid Brownell pr_warning("IRQ %d uses trigger mode %d; requested %d\n", 4980c5d1eb7SDavid Brownell irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), 4990c5d1eb7SDavid Brownell (int)(new->flags & IRQF_TRIGGER_MASK)); 50094d39e1fSThomas Gleixner } 50182736f4dSUwe Kleine-König 502f17c7545SIngo Molnar *old_ptr = new; 50382736f4dSUwe Kleine-König 5048528b0f1SLinus Torvalds /* Reset broken irq detection when installing new handler */ 5058528b0f1SLinus Torvalds desc->irq_count = 0; 5068528b0f1SLinus Torvalds desc->irqs_unhandled = 0; 5071adb0850SThomas Gleixner 5081adb0850SThomas Gleixner /* 5091adb0850SThomas Gleixner * Check whether we disabled the irq via the spurious handler 5101adb0850SThomas Gleixner * before. Reenable it and give it another chance. 5111adb0850SThomas Gleixner */ 5121adb0850SThomas Gleixner if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { 5131adb0850SThomas Gleixner desc->status &= ~IRQ_SPURIOUS_DISABLED; 5141adb0850SThomas Gleixner __enable_irq(desc, irq); 5151adb0850SThomas Gleixner } 5161adb0850SThomas Gleixner 5171da177e4SLinus Torvalds spin_unlock_irqrestore(&desc->lock, flags); 5181da177e4SLinus Torvalds 5191da177e4SLinus Torvalds new->irq = irq; 5202c6927a3SYinghai Lu register_irq_proc(irq, desc); 5211da177e4SLinus Torvalds new->dir = NULL; 5221da177e4SLinus Torvalds register_handler_proc(irq, new); 5231da177e4SLinus Torvalds 5241da177e4SLinus Torvalds return 0; 525f5163427SDimitri Sivanich 526f5163427SDimitri Sivanich mismatch: 5273f050447SAlan Cox #ifdef CONFIG_DEBUG_SHIRQ 5283cca53b0SThomas Gleixner if (!(new->flags & IRQF_PROBE_SHARED)) { 529e8c4b9d0SBjorn Helgaas printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); 5308b126b77SAndrew Morton if (old_name) 5318b126b77SAndrew Morton printk(KERN_ERR "current handler: %s\n", old_name); 532f5163427SDimitri Sivanich dump_stack(); 53313e87ec6SAndrew Morton } 5343f050447SAlan Cox #endif 5358b126b77SAndrew Morton spin_unlock_irqrestore(&desc->lock, flags); 536f5163427SDimitri Sivanich return -EBUSY; 5371da177e4SLinus Torvalds } 5381da177e4SLinus Torvalds 5391da177e4SLinus Torvalds /** 540d3c60047SThomas Gleixner * setup_irq - setup an interrupt 541d3c60047SThomas Gleixner * @irq: Interrupt line to setup 542d3c60047SThomas Gleixner * @act: irqaction for the interrupt 543d3c60047SThomas Gleixner * 544d3c60047SThomas Gleixner * Used to statically setup interrupts in the early boot process. 545d3c60047SThomas Gleixner */ 546d3c60047SThomas Gleixner int setup_irq(unsigned int irq, struct irqaction *act) 547d3c60047SThomas Gleixner { 548d3c60047SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 549d3c60047SThomas Gleixner 550d3c60047SThomas Gleixner return __setup_irq(irq, desc, act); 551d3c60047SThomas Gleixner } 552d3c60047SThomas Gleixner 553d3c60047SThomas Gleixner /** 554*f21cfb25SMagnus Damm * remove_irq - free an interrupt 5551da177e4SLinus Torvalds * @irq: Interrupt line to free 5561da177e4SLinus Torvalds * @dev_id: Device identity to free 5571da177e4SLinus Torvalds * 558*f21cfb25SMagnus Damm * Used to remove interrupts statically setup by the early boot process. 5591da177e4SLinus Torvalds */ 560*f21cfb25SMagnus Damm 561*f21cfb25SMagnus Damm struct irqaction *remove_irq(unsigned int irq, void *dev_id) 5621da177e4SLinus Torvalds { 563d3c60047SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 564f17c7545SIngo Molnar struct irqaction *action, **action_ptr; 5651da177e4SLinus Torvalds unsigned long flags; 5661da177e4SLinus Torvalds 567ae88a23bSIngo Molnar WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 5687d94f7caSYinghai Lu 5697d94f7caSYinghai Lu if (!desc) 570*f21cfb25SMagnus Damm return NULL; 5711da177e4SLinus Torvalds 5721da177e4SLinus Torvalds spin_lock_irqsave(&desc->lock, flags); 573ae88a23bSIngo Molnar 574ae88a23bSIngo Molnar /* 575ae88a23bSIngo Molnar * There can be multiple actions per IRQ descriptor, find the right 576ae88a23bSIngo Molnar * one based on the dev_id: 577ae88a23bSIngo Molnar */ 578f17c7545SIngo Molnar action_ptr = &desc->action; 5791da177e4SLinus Torvalds for (;;) { 580f17c7545SIngo Molnar action = *action_ptr; 5811da177e4SLinus Torvalds 582ae88a23bSIngo Molnar if (!action) { 583ae88a23bSIngo Molnar WARN(1, "Trying to free already-free IRQ %d\n", irq); 584ae88a23bSIngo Molnar spin_unlock_irqrestore(&desc->lock, flags); 585ae88a23bSIngo Molnar 586*f21cfb25SMagnus Damm return NULL; 587ae88a23bSIngo Molnar } 5881da177e4SLinus Torvalds 5898316e381SIngo Molnar if (action->dev_id == dev_id) 590ae88a23bSIngo Molnar break; 591f17c7545SIngo Molnar action_ptr = &action->next; 592ae88a23bSIngo Molnar } 593ae88a23bSIngo Molnar 594ae88a23bSIngo Molnar /* Found it - now remove it from the list of entries: */ 595f17c7545SIngo Molnar *action_ptr = action->next; 596dbce706eSPaolo 'Blaisorblade' Giarrusso 597ae88a23bSIngo Molnar /* Currently used only by UML, might disappear one day: */ 598b77d6adcSPaolo 'Blaisorblade' Giarrusso #ifdef CONFIG_IRQ_RELEASE_METHOD 599d1bef4edSIngo Molnar if (desc->chip->release) 600d1bef4edSIngo Molnar desc->chip->release(irq, dev_id); 601b77d6adcSPaolo 'Blaisorblade' Giarrusso #endif 602dbce706eSPaolo 'Blaisorblade' Giarrusso 603ae88a23bSIngo Molnar /* If this was the last handler, shut down the IRQ line: */ 6041da177e4SLinus Torvalds if (!desc->action) { 6051da177e4SLinus Torvalds desc->status |= IRQ_DISABLED; 606d1bef4edSIngo Molnar if (desc->chip->shutdown) 607d1bef4edSIngo Molnar desc->chip->shutdown(irq); 6081da177e4SLinus Torvalds else 609d1bef4edSIngo Molnar desc->chip->disable(irq); 6101da177e4SLinus Torvalds } 6111da177e4SLinus Torvalds spin_unlock_irqrestore(&desc->lock, flags); 612ae88a23bSIngo Molnar 6131da177e4SLinus Torvalds unregister_handler_proc(irq, action); 6141da177e4SLinus Torvalds 615ae88a23bSIngo Molnar /* Make sure it's not being used on another CPU: */ 6161da177e4SLinus Torvalds synchronize_irq(irq); 617ae88a23bSIngo Molnar 6181d99493bSDavid Woodhouse #ifdef CONFIG_DEBUG_SHIRQ 6191d99493bSDavid Woodhouse /* 620ae88a23bSIngo Molnar * It's a shared IRQ -- the driver ought to be prepared for an IRQ 621ae88a23bSIngo Molnar * event to happen even now it's being freed, so let's make sure that 622ae88a23bSIngo Molnar * is so by doing an extra call to the handler .... 623ae88a23bSIngo Molnar * 624ae88a23bSIngo Molnar * ( We do this after actually deregistering it, to make sure that a 625ae88a23bSIngo Molnar * 'real' IRQ doesn't run in * parallel with our fake. ) 6261d99493bSDavid Woodhouse */ 6271d99493bSDavid Woodhouse if (action->flags & IRQF_SHARED) { 6281d99493bSDavid Woodhouse local_irq_save(flags); 6291d99493bSDavid Woodhouse action->handler(irq, dev_id); 6301d99493bSDavid Woodhouse local_irq_restore(flags); 6311d99493bSDavid Woodhouse } 6321d99493bSDavid Woodhouse #endif 633*f21cfb25SMagnus Damm return action; 634*f21cfb25SMagnus Damm } 635*f21cfb25SMagnus Damm 636*f21cfb25SMagnus Damm /** 637*f21cfb25SMagnus Damm * free_irq - free an interrupt allocated with request_irq 638*f21cfb25SMagnus Damm * @irq: Interrupt line to free 639*f21cfb25SMagnus Damm * @dev_id: Device identity to free 640*f21cfb25SMagnus Damm * 641*f21cfb25SMagnus Damm * Remove an interrupt handler. The handler is removed and if the 642*f21cfb25SMagnus Damm * interrupt line is no longer in use by any driver it is disabled. 643*f21cfb25SMagnus Damm * On a shared IRQ the caller must ensure the interrupt is disabled 644*f21cfb25SMagnus Damm * on the card it drives before calling this function. The function 645*f21cfb25SMagnus Damm * does not return until any executing interrupts for this IRQ 646*f21cfb25SMagnus Damm * have completed. 647*f21cfb25SMagnus Damm * 648*f21cfb25SMagnus Damm * This function must not be called from interrupt context. 649*f21cfb25SMagnus Damm */ 650*f21cfb25SMagnus Damm void free_irq(unsigned int irq, void *dev_id) 651*f21cfb25SMagnus Damm { 652*f21cfb25SMagnus Damm kfree(remove_irq(irq, dev_id)); 6531da177e4SLinus Torvalds } 6541da177e4SLinus Torvalds EXPORT_SYMBOL(free_irq); 6551da177e4SLinus Torvalds 6561da177e4SLinus Torvalds /** 6571da177e4SLinus Torvalds * request_irq - allocate an interrupt line 6581da177e4SLinus Torvalds * @irq: Interrupt line to allocate 6591da177e4SLinus Torvalds * @handler: Function to be called when the IRQ occurs 6601da177e4SLinus Torvalds * @irqflags: Interrupt type flags 6611da177e4SLinus Torvalds * @devname: An ascii name for the claiming device 6621da177e4SLinus Torvalds * @dev_id: A cookie passed back to the handler function 6631da177e4SLinus Torvalds * 6641da177e4SLinus Torvalds * This call allocates interrupt resources and enables the 6651da177e4SLinus Torvalds * interrupt line and IRQ handling. From the point this 6661da177e4SLinus Torvalds * call is made your handler function may be invoked. Since 6671da177e4SLinus Torvalds * your handler function must clear any interrupt the board 6681da177e4SLinus Torvalds * raises, you must take care both to initialise your hardware 6691da177e4SLinus Torvalds * and to set up the interrupt handler in the right order. 6701da177e4SLinus Torvalds * 6711da177e4SLinus Torvalds * Dev_id must be globally unique. Normally the address of the 6721da177e4SLinus Torvalds * device data structure is used as the cookie. Since the handler 6731da177e4SLinus Torvalds * receives this value it makes sense to use it. 6741da177e4SLinus Torvalds * 6751da177e4SLinus Torvalds * If your interrupt is shared you must pass a non NULL dev_id 6761da177e4SLinus Torvalds * as this is required when freeing the interrupt. 6771da177e4SLinus Torvalds * 6781da177e4SLinus Torvalds * Flags: 6791da177e4SLinus Torvalds * 6803cca53b0SThomas Gleixner * IRQF_SHARED Interrupt is shared 6813cca53b0SThomas Gleixner * IRQF_DISABLED Disable local interrupts while processing 6823cca53b0SThomas Gleixner * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy 6830c5d1eb7SDavid Brownell * IRQF_TRIGGER_* Specify active edge(s) or level 6841da177e4SLinus Torvalds * 6851da177e4SLinus Torvalds */ 686da482792SDavid Howells int request_irq(unsigned int irq, irq_handler_t handler, 6871da177e4SLinus Torvalds unsigned long irqflags, const char *devname, void *dev_id) 6881da177e4SLinus Torvalds { 6891da177e4SLinus Torvalds struct irqaction *action; 69008678b08SYinghai Lu struct irq_desc *desc; 691d3c60047SThomas Gleixner int retval; 6921da177e4SLinus Torvalds 693470c6623SDavid Brownell /* 694470c6623SDavid Brownell * handle_IRQ_event() always ignores IRQF_DISABLED except for 695470c6623SDavid Brownell * the _first_ irqaction (sigh). That can cause oopsing, but 696470c6623SDavid Brownell * the behavior is classified as "will not fix" so we need to 697470c6623SDavid Brownell * start nudging drivers away from using that idiom. 698470c6623SDavid Brownell */ 699327ec569SIngo Molnar if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) == 700327ec569SIngo Molnar (IRQF_SHARED|IRQF_DISABLED)) { 701327ec569SIngo Molnar pr_warning( 702327ec569SIngo Molnar "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n", 703470c6623SDavid Brownell irq, devname); 704327ec569SIngo Molnar } 705470c6623SDavid Brownell 706fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP 707fbb9ce95SIngo Molnar /* 708fbb9ce95SIngo Molnar * Lockdep wants atomic interrupt handlers: 709fbb9ce95SIngo Molnar */ 71038515e90SThomas Gleixner irqflags |= IRQF_DISABLED; 711fbb9ce95SIngo Molnar #endif 7121da177e4SLinus Torvalds /* 7131da177e4SLinus Torvalds * Sanity-check: shared interrupts must pass in a real dev-ID, 7141da177e4SLinus Torvalds * otherwise we'll have trouble later trying to figure out 7151da177e4SLinus Torvalds * which interrupt is which (messes up the interrupt freeing 7161da177e4SLinus Torvalds * logic etc). 7171da177e4SLinus Torvalds */ 7183cca53b0SThomas Gleixner if ((irqflags & IRQF_SHARED) && !dev_id) 7191da177e4SLinus Torvalds return -EINVAL; 7207d94f7caSYinghai Lu 721cb5bc832SYinghai Lu desc = irq_to_desc(irq); 7227d94f7caSYinghai Lu if (!desc) 7231da177e4SLinus Torvalds return -EINVAL; 7247d94f7caSYinghai Lu 72508678b08SYinghai Lu if (desc->status & IRQ_NOREQUEST) 7266550c775SThomas Gleixner return -EINVAL; 7271da177e4SLinus Torvalds if (!handler) 7281da177e4SLinus Torvalds return -EINVAL; 7291da177e4SLinus Torvalds 7300e43785cSJohannes Weiner action = kmalloc(sizeof(struct irqaction), GFP_KERNEL); 7311da177e4SLinus Torvalds if (!action) 7321da177e4SLinus Torvalds return -ENOMEM; 7331da177e4SLinus Torvalds 7341da177e4SLinus Torvalds action->handler = handler; 7351da177e4SLinus Torvalds action->flags = irqflags; 7361da177e4SLinus Torvalds cpus_clear(action->mask); 7371da177e4SLinus Torvalds action->name = devname; 7381da177e4SLinus Torvalds action->next = NULL; 7391da177e4SLinus Torvalds action->dev_id = dev_id; 7401da177e4SLinus Torvalds 741d3c60047SThomas Gleixner retval = __setup_irq(irq, desc, action); 742377bf1e4SAnton Vorontsov if (retval) 743377bf1e4SAnton Vorontsov kfree(action); 744377bf1e4SAnton Vorontsov 745a304e1b8SDavid Woodhouse #ifdef CONFIG_DEBUG_SHIRQ 746a304e1b8SDavid Woodhouse if (irqflags & IRQF_SHARED) { 747a304e1b8SDavid Woodhouse /* 748a304e1b8SDavid Woodhouse * It's a shared IRQ -- the driver ought to be prepared for it 749a304e1b8SDavid Woodhouse * to happen immediately, so let's make sure.... 750377bf1e4SAnton Vorontsov * We disable the irq to make sure that a 'real' IRQ doesn't 751377bf1e4SAnton Vorontsov * run in parallel with our fake. 752a304e1b8SDavid Woodhouse */ 753a304e1b8SDavid Woodhouse unsigned long flags; 754a304e1b8SDavid Woodhouse 755377bf1e4SAnton Vorontsov disable_irq(irq); 756a304e1b8SDavid Woodhouse local_irq_save(flags); 757377bf1e4SAnton Vorontsov 758a304e1b8SDavid Woodhouse handler(irq, dev_id); 759377bf1e4SAnton Vorontsov 760a304e1b8SDavid Woodhouse local_irq_restore(flags); 761377bf1e4SAnton Vorontsov enable_irq(irq); 762a304e1b8SDavid Woodhouse } 763a304e1b8SDavid Woodhouse #endif 7641da177e4SLinus Torvalds return retval; 7651da177e4SLinus Torvalds } 7661da177e4SLinus Torvalds EXPORT_SYMBOL(request_irq); 767