11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/irq/manage.c 31da177e4SLinus Torvalds * 4a34db9b2SIngo Molnar * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5a34db9b2SIngo Molnar * Copyright (C) 2005-2006 Thomas Gleixner 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * This file contains driver APIs to the irq subsystem. 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 1097fd75b7SAndrew Morton #define pr_fmt(fmt) "genirq: " fmt 1197fd75b7SAndrew Morton 121da177e4SLinus Torvalds #include <linux/irq.h> 133aa551c9SThomas Gleixner #include <linux/kthread.h> 141da177e4SLinus Torvalds #include <linux/module.h> 151da177e4SLinus Torvalds #include <linux/random.h> 161da177e4SLinus Torvalds #include <linux/interrupt.h> 171aeb272cSRobert P. J. Day #include <linux/slab.h> 183aa551c9SThomas Gleixner #include <linux/sched.h> 198bd75c77SClark Williams #include <linux/sched/rt.h> 204d1d61a6SOleg Nesterov #include <linux/task_work.h> 211da177e4SLinus Torvalds 221da177e4SLinus Torvalds #include "internals.h" 231da177e4SLinus Torvalds 248d32a307SThomas Gleixner #ifdef CONFIG_IRQ_FORCED_THREADING 258d32a307SThomas Gleixner __read_mostly bool force_irqthreads; 268d32a307SThomas Gleixner 278d32a307SThomas Gleixner static int __init setup_forced_irqthreads(char *arg) 288d32a307SThomas Gleixner { 298d32a307SThomas Gleixner force_irqthreads = true; 308d32a307SThomas Gleixner return 0; 318d32a307SThomas Gleixner } 328d32a307SThomas Gleixner early_param("threadirqs", setup_forced_irqthreads); 338d32a307SThomas Gleixner #endif 348d32a307SThomas Gleixner 3518258f72SThomas Gleixner static void __synchronize_hardirq(struct irq_desc *desc) 361da177e4SLinus Torvalds { 3732f4125eSThomas Gleixner bool inprogress; 381da177e4SLinus Torvalds 39a98ce5c6SHerbert Xu do { 40a98ce5c6SHerbert Xu unsigned long flags; 41a98ce5c6SHerbert Xu 42a98ce5c6SHerbert Xu /* 43a98ce5c6SHerbert Xu * Wait until we're out of the critical section. This might 44a98ce5c6SHerbert Xu * give the wrong answer due to the lack of memory barriers. 45a98ce5c6SHerbert Xu */ 4632f4125eSThomas Gleixner while (irqd_irq_inprogress(&desc->irq_data)) 471da177e4SLinus Torvalds cpu_relax(); 48a98ce5c6SHerbert Xu 49a98ce5c6SHerbert Xu /* Ok, that indicated we're done: double-check carefully. */ 50239007b8SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 5132f4125eSThomas Gleixner inprogress = irqd_irq_inprogress(&desc->irq_data); 52239007b8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 53a98ce5c6SHerbert Xu 54a98ce5c6SHerbert Xu /* Oops, that failed? */ 5532f4125eSThomas Gleixner } while (inprogress); 5618258f72SThomas Gleixner } 573aa551c9SThomas Gleixner 5818258f72SThomas Gleixner /** 5918258f72SThomas Gleixner * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) 6018258f72SThomas Gleixner * @irq: interrupt number to wait for 6118258f72SThomas Gleixner * 6218258f72SThomas Gleixner * This function waits for any pending hard IRQ handlers for this 6318258f72SThomas Gleixner * interrupt to complete before returning. If you use this 6418258f72SThomas Gleixner * function while holding a resource the IRQ handler may need you 6518258f72SThomas Gleixner * will deadlock. It does not take associated threaded handlers 6618258f72SThomas Gleixner * into account. 6718258f72SThomas Gleixner * 6818258f72SThomas Gleixner * Do not use this for shutdown scenarios where you must be sure 6918258f72SThomas Gleixner * that all parts (hardirq and threaded handler) have completed. 7018258f72SThomas Gleixner * 7118258f72SThomas Gleixner * This function may be called - with care - from IRQ context. 723aa551c9SThomas Gleixner */ 7318258f72SThomas Gleixner void synchronize_hardirq(unsigned int irq) 7418258f72SThomas Gleixner { 7518258f72SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 7618258f72SThomas Gleixner 7718258f72SThomas Gleixner if (desc) 7818258f72SThomas Gleixner __synchronize_hardirq(desc); 7918258f72SThomas Gleixner } 8018258f72SThomas Gleixner EXPORT_SYMBOL(synchronize_hardirq); 8118258f72SThomas Gleixner 8218258f72SThomas Gleixner /** 8318258f72SThomas Gleixner * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 8418258f72SThomas Gleixner * @irq: interrupt number to wait for 8518258f72SThomas Gleixner * 8618258f72SThomas Gleixner * This function waits for any pending IRQ handlers for this interrupt 8718258f72SThomas Gleixner * to complete before returning. If you use this function while 8818258f72SThomas Gleixner * holding a resource the IRQ handler may need you will deadlock. 8918258f72SThomas Gleixner * 9018258f72SThomas Gleixner * This function may be called - with care - from IRQ context. 9118258f72SThomas Gleixner */ 9218258f72SThomas Gleixner void synchronize_irq(unsigned int irq) 9318258f72SThomas Gleixner { 9418258f72SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 9518258f72SThomas Gleixner 9618258f72SThomas Gleixner if (desc) { 9718258f72SThomas Gleixner __synchronize_hardirq(desc); 9818258f72SThomas Gleixner /* 9918258f72SThomas Gleixner * We made sure that no hardirq handler is 10018258f72SThomas Gleixner * running. Now verify that no threaded handlers are 10118258f72SThomas Gleixner * active. 10218258f72SThomas Gleixner */ 10318258f72SThomas Gleixner wait_event(desc->wait_for_threads, 10418258f72SThomas Gleixner !atomic_read(&desc->threads_active)); 10518258f72SThomas Gleixner } 1061da177e4SLinus Torvalds } 1071da177e4SLinus Torvalds EXPORT_SYMBOL(synchronize_irq); 1081da177e4SLinus Torvalds 1093aa551c9SThomas Gleixner #ifdef CONFIG_SMP 1103aa551c9SThomas Gleixner cpumask_var_t irq_default_affinity; 1113aa551c9SThomas Gleixner 112771ee3b0SThomas Gleixner /** 113771ee3b0SThomas Gleixner * irq_can_set_affinity - Check if the affinity of a given irq can be set 114771ee3b0SThomas Gleixner * @irq: Interrupt to check 115771ee3b0SThomas Gleixner * 116771ee3b0SThomas Gleixner */ 117771ee3b0SThomas Gleixner int irq_can_set_affinity(unsigned int irq) 118771ee3b0SThomas Gleixner { 11908678b08SYinghai Lu struct irq_desc *desc = irq_to_desc(irq); 120771ee3b0SThomas Gleixner 121bce43032SThomas Gleixner if (!desc || !irqd_can_balance(&desc->irq_data) || 122bce43032SThomas Gleixner !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) 123771ee3b0SThomas Gleixner return 0; 124771ee3b0SThomas Gleixner 125771ee3b0SThomas Gleixner return 1; 126771ee3b0SThomas Gleixner } 127771ee3b0SThomas Gleixner 128591d2fb0SThomas Gleixner /** 129591d2fb0SThomas Gleixner * irq_set_thread_affinity - Notify irq threads to adjust affinity 130591d2fb0SThomas Gleixner * @desc: irq descriptor which has affitnity changed 131591d2fb0SThomas Gleixner * 132591d2fb0SThomas Gleixner * We just set IRQTF_AFFINITY and delegate the affinity setting 133591d2fb0SThomas Gleixner * to the interrupt thread itself. We can not call 134591d2fb0SThomas Gleixner * set_cpus_allowed_ptr() here as we hold desc->lock and this 135591d2fb0SThomas Gleixner * code can be called from hard interrupt context. 136591d2fb0SThomas Gleixner */ 137591d2fb0SThomas Gleixner void irq_set_thread_affinity(struct irq_desc *desc) 1383aa551c9SThomas Gleixner { 1393aa551c9SThomas Gleixner struct irqaction *action = desc->action; 1403aa551c9SThomas Gleixner 1413aa551c9SThomas Gleixner while (action) { 1423aa551c9SThomas Gleixner if (action->thread) 143591d2fb0SThomas Gleixner set_bit(IRQTF_AFFINITY, &action->thread_flags); 1443aa551c9SThomas Gleixner action = action->next; 1453aa551c9SThomas Gleixner } 1463aa551c9SThomas Gleixner } 1473aa551c9SThomas Gleixner 1481fa46f1fSThomas Gleixner #ifdef CONFIG_GENERIC_PENDING_IRQ 1490ef5ca1eSThomas Gleixner static inline bool irq_can_move_pcntxt(struct irq_data *data) 1501fa46f1fSThomas Gleixner { 1510ef5ca1eSThomas Gleixner return irqd_can_move_in_process_context(data); 1521fa46f1fSThomas Gleixner } 1530ef5ca1eSThomas Gleixner static inline bool irq_move_pending(struct irq_data *data) 1541fa46f1fSThomas Gleixner { 1550ef5ca1eSThomas Gleixner return irqd_is_setaffinity_pending(data); 1561fa46f1fSThomas Gleixner } 1571fa46f1fSThomas Gleixner static inline void 1581fa46f1fSThomas Gleixner irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) 1591fa46f1fSThomas Gleixner { 1601fa46f1fSThomas Gleixner cpumask_copy(desc->pending_mask, mask); 1611fa46f1fSThomas Gleixner } 1621fa46f1fSThomas Gleixner static inline void 1631fa46f1fSThomas Gleixner irq_get_pending(struct cpumask *mask, struct irq_desc *desc) 1641fa46f1fSThomas Gleixner { 1651fa46f1fSThomas Gleixner cpumask_copy(mask, desc->pending_mask); 1661fa46f1fSThomas Gleixner } 1671fa46f1fSThomas Gleixner #else 1680ef5ca1eSThomas Gleixner static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } 169cd22c0e4SThomas Gleixner static inline bool irq_move_pending(struct irq_data *data) { return false; } 1701fa46f1fSThomas Gleixner static inline void 1711fa46f1fSThomas Gleixner irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } 1721fa46f1fSThomas Gleixner static inline void 1731fa46f1fSThomas Gleixner irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } 1741fa46f1fSThomas Gleixner #endif 1751fa46f1fSThomas Gleixner 176818b0f3bSJiang Liu int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, 177818b0f3bSJiang Liu bool force) 178818b0f3bSJiang Liu { 179818b0f3bSJiang Liu struct irq_desc *desc = irq_data_to_desc(data); 180818b0f3bSJiang Liu struct irq_chip *chip = irq_data_get_irq_chip(data); 181818b0f3bSJiang Liu int ret; 182818b0f3bSJiang Liu 18301f8fa4fSThomas Gleixner ret = chip->irq_set_affinity(data, mask, force); 184818b0f3bSJiang Liu switch (ret) { 185818b0f3bSJiang Liu case IRQ_SET_MASK_OK: 1862cb62547SJiang Liu case IRQ_SET_MASK_OK_DONE: 187818b0f3bSJiang Liu cpumask_copy(data->affinity, mask); 188818b0f3bSJiang Liu case IRQ_SET_MASK_OK_NOCOPY: 189818b0f3bSJiang Liu irq_set_thread_affinity(desc); 190818b0f3bSJiang Liu ret = 0; 191818b0f3bSJiang Liu } 192818b0f3bSJiang Liu 193818b0f3bSJiang Liu return ret; 194818b0f3bSJiang Liu } 195818b0f3bSJiang Liu 19601f8fa4fSThomas Gleixner int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, 19701f8fa4fSThomas Gleixner bool force) 198c2d0c555SDavid Daney { 199c2d0c555SDavid Daney struct irq_chip *chip = irq_data_get_irq_chip(data); 200c2d0c555SDavid Daney struct irq_desc *desc = irq_data_to_desc(data); 201c2d0c555SDavid Daney int ret = 0; 202c2d0c555SDavid Daney 203c2d0c555SDavid Daney if (!chip || !chip->irq_set_affinity) 204c2d0c555SDavid Daney return -EINVAL; 205c2d0c555SDavid Daney 2060ef5ca1eSThomas Gleixner if (irq_can_move_pcntxt(data)) { 20701f8fa4fSThomas Gleixner ret = irq_do_set_affinity(data, mask, force); 208c2d0c555SDavid Daney } else { 209c2d0c555SDavid Daney irqd_set_move_pending(data); 210c2d0c555SDavid Daney irq_copy_pending(desc, mask); 211c2d0c555SDavid Daney } 212c2d0c555SDavid Daney 213c2d0c555SDavid Daney if (desc->affinity_notify) { 214c2d0c555SDavid Daney kref_get(&desc->affinity_notify->kref); 215c2d0c555SDavid Daney schedule_work(&desc->affinity_notify->work); 216c2d0c555SDavid Daney } 217c2d0c555SDavid Daney irqd_set(data, IRQD_AFFINITY_SET); 218c2d0c555SDavid Daney 219c2d0c555SDavid Daney return ret; 220c2d0c555SDavid Daney } 221c2d0c555SDavid Daney 22201f8fa4fSThomas Gleixner int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) 223771ee3b0SThomas Gleixner { 22408678b08SYinghai Lu struct irq_desc *desc = irq_to_desc(irq); 225f6d87f4bSThomas Gleixner unsigned long flags; 226c2d0c555SDavid Daney int ret; 227771ee3b0SThomas Gleixner 228c2d0c555SDavid Daney if (!desc) 229771ee3b0SThomas Gleixner return -EINVAL; 230771ee3b0SThomas Gleixner 231239007b8SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 23201f8fa4fSThomas Gleixner ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); 233239007b8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 2341fa46f1fSThomas Gleixner return ret; 235771ee3b0SThomas Gleixner } 236771ee3b0SThomas Gleixner 237e7a297b0SPeter P Waskiewicz Jr int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) 238e7a297b0SPeter P Waskiewicz Jr { 239e7a297b0SPeter P Waskiewicz Jr unsigned long flags; 24031d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 241e7a297b0SPeter P Waskiewicz Jr 242e7a297b0SPeter P Waskiewicz Jr if (!desc) 243e7a297b0SPeter P Waskiewicz Jr return -EINVAL; 244e7a297b0SPeter P Waskiewicz Jr desc->affinity_hint = m; 24502725e74SThomas Gleixner irq_put_desc_unlock(desc, flags); 246*e2e64a93SJesse Brandeburg /* set the initial affinity to prevent every interrupt being on CPU0 */ 247*e2e64a93SJesse Brandeburg __irq_set_affinity(irq, m, false); 248e7a297b0SPeter P Waskiewicz Jr return 0; 249e7a297b0SPeter P Waskiewicz Jr } 250e7a297b0SPeter P Waskiewicz Jr EXPORT_SYMBOL_GPL(irq_set_affinity_hint); 251e7a297b0SPeter P Waskiewicz Jr 252cd7eab44SBen Hutchings static void irq_affinity_notify(struct work_struct *work) 253cd7eab44SBen Hutchings { 254cd7eab44SBen Hutchings struct irq_affinity_notify *notify = 255cd7eab44SBen Hutchings container_of(work, struct irq_affinity_notify, work); 256cd7eab44SBen Hutchings struct irq_desc *desc = irq_to_desc(notify->irq); 257cd7eab44SBen Hutchings cpumask_var_t cpumask; 258cd7eab44SBen Hutchings unsigned long flags; 259cd7eab44SBen Hutchings 2601fa46f1fSThomas Gleixner if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) 261cd7eab44SBen Hutchings goto out; 262cd7eab44SBen Hutchings 263cd7eab44SBen Hutchings raw_spin_lock_irqsave(&desc->lock, flags); 2640ef5ca1eSThomas Gleixner if (irq_move_pending(&desc->irq_data)) 2651fa46f1fSThomas Gleixner irq_get_pending(cpumask, desc); 266cd7eab44SBen Hutchings else 2671fb0ef31SThomas Gleixner cpumask_copy(cpumask, desc->irq_data.affinity); 268cd7eab44SBen Hutchings raw_spin_unlock_irqrestore(&desc->lock, flags); 269cd7eab44SBen Hutchings 270cd7eab44SBen Hutchings notify->notify(notify, cpumask); 271cd7eab44SBen Hutchings 272cd7eab44SBen Hutchings free_cpumask_var(cpumask); 273cd7eab44SBen Hutchings out: 274cd7eab44SBen Hutchings kref_put(¬ify->kref, notify->release); 275cd7eab44SBen Hutchings } 276cd7eab44SBen Hutchings 277cd7eab44SBen Hutchings /** 278cd7eab44SBen Hutchings * irq_set_affinity_notifier - control notification of IRQ affinity changes 279cd7eab44SBen Hutchings * @irq: Interrupt for which to enable/disable notification 280cd7eab44SBen Hutchings * @notify: Context for notification, or %NULL to disable 281cd7eab44SBen Hutchings * notification. Function pointers must be initialised; 282cd7eab44SBen Hutchings * the other fields will be initialised by this function. 283cd7eab44SBen Hutchings * 284cd7eab44SBen Hutchings * Must be called in process context. Notification may only be enabled 285cd7eab44SBen Hutchings * after the IRQ is allocated and must be disabled before the IRQ is 286cd7eab44SBen Hutchings * freed using free_irq(). 287cd7eab44SBen Hutchings */ 288cd7eab44SBen Hutchings int 289cd7eab44SBen Hutchings irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) 290cd7eab44SBen Hutchings { 291cd7eab44SBen Hutchings struct irq_desc *desc = irq_to_desc(irq); 292cd7eab44SBen Hutchings struct irq_affinity_notify *old_notify; 293cd7eab44SBen Hutchings unsigned long flags; 294cd7eab44SBen Hutchings 295cd7eab44SBen Hutchings /* The release function is promised process context */ 296cd7eab44SBen Hutchings might_sleep(); 297cd7eab44SBen Hutchings 298cd7eab44SBen Hutchings if (!desc) 299cd7eab44SBen Hutchings return -EINVAL; 300cd7eab44SBen Hutchings 301cd7eab44SBen Hutchings /* Complete initialisation of *notify */ 302cd7eab44SBen Hutchings if (notify) { 303cd7eab44SBen Hutchings notify->irq = irq; 304cd7eab44SBen Hutchings kref_init(¬ify->kref); 305cd7eab44SBen Hutchings INIT_WORK(¬ify->work, irq_affinity_notify); 306cd7eab44SBen Hutchings } 307cd7eab44SBen Hutchings 308cd7eab44SBen Hutchings raw_spin_lock_irqsave(&desc->lock, flags); 309cd7eab44SBen Hutchings old_notify = desc->affinity_notify; 310cd7eab44SBen Hutchings desc->affinity_notify = notify; 311cd7eab44SBen Hutchings raw_spin_unlock_irqrestore(&desc->lock, flags); 312cd7eab44SBen Hutchings 313cd7eab44SBen Hutchings if (old_notify) 314cd7eab44SBen Hutchings kref_put(&old_notify->kref, old_notify->release); 315cd7eab44SBen Hutchings 316cd7eab44SBen Hutchings return 0; 317cd7eab44SBen Hutchings } 318cd7eab44SBen Hutchings EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); 319cd7eab44SBen Hutchings 32018404756SMax Krasnyansky #ifndef CONFIG_AUTO_IRQ_AFFINITY 32118404756SMax Krasnyansky /* 32218404756SMax Krasnyansky * Generic version of the affinity autoselector. 32318404756SMax Krasnyansky */ 3243b8249e7SThomas Gleixner static int 3253b8249e7SThomas Gleixner setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) 32618404756SMax Krasnyansky { 327569bda8dSThomas Gleixner struct cpumask *set = irq_default_affinity; 328818b0f3bSJiang Liu int node = desc->irq_data.node; 329569bda8dSThomas Gleixner 330b008207cSThomas Gleixner /* Excludes PER_CPU and NO_BALANCE interrupts */ 33118404756SMax Krasnyansky if (!irq_can_set_affinity(irq)) 33218404756SMax Krasnyansky return 0; 33318404756SMax Krasnyansky 334f6d87f4bSThomas Gleixner /* 335f6d87f4bSThomas Gleixner * Preserve an userspace affinity setup, but make sure that 336f6d87f4bSThomas Gleixner * one of the targets is online. 337f6d87f4bSThomas Gleixner */ 3382bdd1055SThomas Gleixner if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { 339569bda8dSThomas Gleixner if (cpumask_intersects(desc->irq_data.affinity, 340569bda8dSThomas Gleixner cpu_online_mask)) 341569bda8dSThomas Gleixner set = desc->irq_data.affinity; 3420c6f8a8bSThomas Gleixner else 3432bdd1055SThomas Gleixner irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); 3442bdd1055SThomas Gleixner } 34518404756SMax Krasnyansky 3463b8249e7SThomas Gleixner cpumask_and(mask, cpu_online_mask, set); 347241fc640SPrarit Bhargava if (node != NUMA_NO_NODE) { 348241fc640SPrarit Bhargava const struct cpumask *nodemask = cpumask_of_node(node); 349241fc640SPrarit Bhargava 350241fc640SPrarit Bhargava /* make sure at least one of the cpus in nodemask is online */ 351241fc640SPrarit Bhargava if (cpumask_intersects(mask, nodemask)) 352241fc640SPrarit Bhargava cpumask_and(mask, mask, nodemask); 353241fc640SPrarit Bhargava } 354818b0f3bSJiang Liu irq_do_set_affinity(&desc->irq_data, mask, false); 35518404756SMax Krasnyansky return 0; 35618404756SMax Krasnyansky } 357f6d87f4bSThomas Gleixner #else 3583b8249e7SThomas Gleixner static inline int 3593b8249e7SThomas Gleixner setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) 360f6d87f4bSThomas Gleixner { 361f6d87f4bSThomas Gleixner return irq_select_affinity(irq); 362f6d87f4bSThomas Gleixner } 36318404756SMax Krasnyansky #endif 36418404756SMax Krasnyansky 365f6d87f4bSThomas Gleixner /* 366f6d87f4bSThomas Gleixner * Called when affinity is set via /proc/irq 367f6d87f4bSThomas Gleixner */ 3683b8249e7SThomas Gleixner int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) 369f6d87f4bSThomas Gleixner { 370f6d87f4bSThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 371f6d87f4bSThomas Gleixner unsigned long flags; 372f6d87f4bSThomas Gleixner int ret; 373f6d87f4bSThomas Gleixner 374239007b8SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 3753b8249e7SThomas Gleixner ret = setup_affinity(irq, desc, mask); 376239007b8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 377f6d87f4bSThomas Gleixner return ret; 378f6d87f4bSThomas Gleixner } 379f6d87f4bSThomas Gleixner 380f6d87f4bSThomas Gleixner #else 3813b8249e7SThomas Gleixner static inline int 3823b8249e7SThomas Gleixner setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) 383f6d87f4bSThomas Gleixner { 384f6d87f4bSThomas Gleixner return 0; 385f6d87f4bSThomas Gleixner } 3861da177e4SLinus Torvalds #endif 3871da177e4SLinus Torvalds 3888df2e02cSThomas Gleixner void __disable_irq(struct irq_desc *desc, unsigned int irq) 3890a0c5168SRafael J. Wysocki { 3903aae994fSThomas Gleixner if (!desc->depth++) 39187923470SThomas Gleixner irq_disable(desc); 3920a0c5168SRafael J. Wysocki } 3930a0c5168SRafael J. Wysocki 39402725e74SThomas Gleixner static int __disable_irq_nosync(unsigned int irq) 39502725e74SThomas Gleixner { 39602725e74SThomas Gleixner unsigned long flags; 39731d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 39802725e74SThomas Gleixner 39902725e74SThomas Gleixner if (!desc) 40002725e74SThomas Gleixner return -EINVAL; 4018df2e02cSThomas Gleixner __disable_irq(desc, irq); 40202725e74SThomas Gleixner irq_put_desc_busunlock(desc, flags); 40302725e74SThomas Gleixner return 0; 40402725e74SThomas Gleixner } 40502725e74SThomas Gleixner 4061da177e4SLinus Torvalds /** 4071da177e4SLinus Torvalds * disable_irq_nosync - disable an irq without waiting 4081da177e4SLinus Torvalds * @irq: Interrupt to disable 4091da177e4SLinus Torvalds * 4101da177e4SLinus Torvalds * Disable the selected interrupt line. Disables and Enables are 4111da177e4SLinus Torvalds * nested. 4121da177e4SLinus Torvalds * Unlike disable_irq(), this function does not ensure existing 4131da177e4SLinus Torvalds * instances of the IRQ handler have completed before returning. 4141da177e4SLinus Torvalds * 4151da177e4SLinus Torvalds * This function may be called from IRQ context. 4161da177e4SLinus Torvalds */ 4171da177e4SLinus Torvalds void disable_irq_nosync(unsigned int irq) 4181da177e4SLinus Torvalds { 41902725e74SThomas Gleixner __disable_irq_nosync(irq); 4201da177e4SLinus Torvalds } 4211da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq_nosync); 4221da177e4SLinus Torvalds 4231da177e4SLinus Torvalds /** 4241da177e4SLinus Torvalds * disable_irq - disable an irq and wait for completion 4251da177e4SLinus Torvalds * @irq: Interrupt to disable 4261da177e4SLinus Torvalds * 4271da177e4SLinus Torvalds * Disable the selected interrupt line. Enables and Disables are 4281da177e4SLinus Torvalds * nested. 4291da177e4SLinus Torvalds * This function waits for any pending IRQ handlers for this interrupt 4301da177e4SLinus Torvalds * to complete before returning. If you use this function while 4311da177e4SLinus Torvalds * holding a resource the IRQ handler may need you will deadlock. 4321da177e4SLinus Torvalds * 4331da177e4SLinus Torvalds * This function may be called - with care - from IRQ context. 4341da177e4SLinus Torvalds */ 4351da177e4SLinus Torvalds void disable_irq(unsigned int irq) 4361da177e4SLinus Torvalds { 43702725e74SThomas Gleixner if (!__disable_irq_nosync(irq)) 4381da177e4SLinus Torvalds synchronize_irq(irq); 4391da177e4SLinus Torvalds } 4401da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq); 4411da177e4SLinus Torvalds 4428df2e02cSThomas Gleixner void __enable_irq(struct irq_desc *desc, unsigned int irq) 4431adb0850SThomas Gleixner { 4441adb0850SThomas Gleixner switch (desc->depth) { 4451adb0850SThomas Gleixner case 0: 4460a0c5168SRafael J. Wysocki err_out: 447b8c512f6SArjan van de Ven WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 4481adb0850SThomas Gleixner break; 4491adb0850SThomas Gleixner case 1: { 450c531e836SThomas Gleixner if (desc->istate & IRQS_SUSPENDED) 4510a0c5168SRafael J. Wysocki goto err_out; 4521adb0850SThomas Gleixner /* Prevent probing on this irq: */ 4531ccb4e61SThomas Gleixner irq_settings_set_noprobe(desc); 4543aae994fSThomas Gleixner irq_enable(desc); 4551adb0850SThomas Gleixner check_irq_resend(desc, irq); 4561adb0850SThomas Gleixner /* fall-through */ 4571adb0850SThomas Gleixner } 4581adb0850SThomas Gleixner default: 4591adb0850SThomas Gleixner desc->depth--; 4601adb0850SThomas Gleixner } 4611adb0850SThomas Gleixner } 4621adb0850SThomas Gleixner 4631da177e4SLinus Torvalds /** 4641da177e4SLinus Torvalds * enable_irq - enable handling of an irq 4651da177e4SLinus Torvalds * @irq: Interrupt to enable 4661da177e4SLinus Torvalds * 4671da177e4SLinus Torvalds * Undoes the effect of one call to disable_irq(). If this 4681da177e4SLinus Torvalds * matches the last disable, processing of interrupts on this 4691da177e4SLinus Torvalds * IRQ line is re-enabled. 4701da177e4SLinus Torvalds * 47170aedd24SThomas Gleixner * This function may be called from IRQ context only when 4726b8ff312SThomas Gleixner * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 4731da177e4SLinus Torvalds */ 4741da177e4SLinus Torvalds void enable_irq(unsigned int irq) 4751da177e4SLinus Torvalds { 4761da177e4SLinus Torvalds unsigned long flags; 47731d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 4781da177e4SLinus Torvalds 4797d94f7caSYinghai Lu if (!desc) 480c2b5a251SMatthew Wilcox return; 48150f7c032SThomas Gleixner if (WARN(!desc->irq_data.chip, 4822656c366SThomas Gleixner KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) 48302725e74SThomas Gleixner goto out; 4842656c366SThomas Gleixner 4858df2e02cSThomas Gleixner __enable_irq(desc, irq); 48602725e74SThomas Gleixner out: 48702725e74SThomas Gleixner irq_put_desc_busunlock(desc, flags); 4881da177e4SLinus Torvalds } 4891da177e4SLinus Torvalds EXPORT_SYMBOL(enable_irq); 4901da177e4SLinus Torvalds 4910c5d1eb7SDavid Brownell static int set_irq_wake_real(unsigned int irq, unsigned int on) 4922db87321SUwe Kleine-König { 49308678b08SYinghai Lu struct irq_desc *desc = irq_to_desc(irq); 4942db87321SUwe Kleine-König int ret = -ENXIO; 4952db87321SUwe Kleine-König 49660f96b41SSantosh Shilimkar if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) 49760f96b41SSantosh Shilimkar return 0; 49860f96b41SSantosh Shilimkar 4992f7e99bbSThomas Gleixner if (desc->irq_data.chip->irq_set_wake) 5002f7e99bbSThomas Gleixner ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); 5012db87321SUwe Kleine-König 5022db87321SUwe Kleine-König return ret; 5032db87321SUwe Kleine-König } 5042db87321SUwe Kleine-König 505ba9a2331SThomas Gleixner /** 506a0cd9ca2SThomas Gleixner * irq_set_irq_wake - control irq power management wakeup 507ba9a2331SThomas Gleixner * @irq: interrupt to control 508ba9a2331SThomas Gleixner * @on: enable/disable power management wakeup 509ba9a2331SThomas Gleixner * 51015a647ebSDavid Brownell * Enable/disable power management wakeup mode, which is 51115a647ebSDavid Brownell * disabled by default. Enables and disables must match, 51215a647ebSDavid Brownell * just as they match for non-wakeup mode support. 51315a647ebSDavid Brownell * 51415a647ebSDavid Brownell * Wakeup mode lets this IRQ wake the system from sleep 51515a647ebSDavid Brownell * states like "suspend to RAM". 516ba9a2331SThomas Gleixner */ 517a0cd9ca2SThomas Gleixner int irq_set_irq_wake(unsigned int irq, unsigned int on) 518ba9a2331SThomas Gleixner { 519ba9a2331SThomas Gleixner unsigned long flags; 52031d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 5212db87321SUwe Kleine-König int ret = 0; 522ba9a2331SThomas Gleixner 52313863a66SJesper Juhl if (!desc) 52413863a66SJesper Juhl return -EINVAL; 52513863a66SJesper Juhl 52615a647ebSDavid Brownell /* wakeup-capable irqs can be shared between drivers that 52715a647ebSDavid Brownell * don't need to have the same sleep mode behaviors. 52815a647ebSDavid Brownell */ 52915a647ebSDavid Brownell if (on) { 5302db87321SUwe Kleine-König if (desc->wake_depth++ == 0) { 5312db87321SUwe Kleine-König ret = set_irq_wake_real(irq, on); 5322db87321SUwe Kleine-König if (ret) 5332db87321SUwe Kleine-König desc->wake_depth = 0; 53415a647ebSDavid Brownell else 5357f94226fSThomas Gleixner irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); 5362db87321SUwe Kleine-König } 53715a647ebSDavid Brownell } else { 53815a647ebSDavid Brownell if (desc->wake_depth == 0) { 5397a2c4770SArjan van de Ven WARN(1, "Unbalanced IRQ %d wake disable\n", irq); 5402db87321SUwe Kleine-König } else if (--desc->wake_depth == 0) { 5412db87321SUwe Kleine-König ret = set_irq_wake_real(irq, on); 5422db87321SUwe Kleine-König if (ret) 5432db87321SUwe Kleine-König desc->wake_depth = 1; 54415a647ebSDavid Brownell else 5457f94226fSThomas Gleixner irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); 54615a647ebSDavid Brownell } 5472db87321SUwe Kleine-König } 54802725e74SThomas Gleixner irq_put_desc_busunlock(desc, flags); 549ba9a2331SThomas Gleixner return ret; 550ba9a2331SThomas Gleixner } 551a0cd9ca2SThomas Gleixner EXPORT_SYMBOL(irq_set_irq_wake); 552ba9a2331SThomas Gleixner 5531da177e4SLinus Torvalds /* 5541da177e4SLinus Torvalds * Internal function that tells the architecture code whether a 5551da177e4SLinus Torvalds * particular irq has been exclusively allocated or is available 5561da177e4SLinus Torvalds * for driver use. 5571da177e4SLinus Torvalds */ 5581da177e4SLinus Torvalds int can_request_irq(unsigned int irq, unsigned long irqflags) 5591da177e4SLinus Torvalds { 560cc8c3b78SThomas Gleixner unsigned long flags; 56131d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 56202725e74SThomas Gleixner int canrequest = 0; 5631da177e4SLinus Torvalds 5647d94f7caSYinghai Lu if (!desc) 5657d94f7caSYinghai Lu return 0; 5667d94f7caSYinghai Lu 56702725e74SThomas Gleixner if (irq_settings_can_request(desc)) { 5682779db8dSBen Hutchings if (!desc->action || 5692779db8dSBen Hutchings irqflags & desc->action->flags & IRQF_SHARED) 57002725e74SThomas Gleixner canrequest = 1; 57102725e74SThomas Gleixner } 57202725e74SThomas Gleixner irq_put_desc_unlock(desc, flags); 57302725e74SThomas Gleixner return canrequest; 5741da177e4SLinus Torvalds } 5751da177e4SLinus Torvalds 5760c5d1eb7SDavid Brownell int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 57782736f4dSUwe Kleine-König unsigned long flags) 57882736f4dSUwe Kleine-König { 5796b8ff312SThomas Gleixner struct irq_chip *chip = desc->irq_data.chip; 580d4d5e089SThomas Gleixner int ret, unmask = 0; 58182736f4dSUwe Kleine-König 582b2ba2c30SThomas Gleixner if (!chip || !chip->irq_set_type) { 58382736f4dSUwe Kleine-König /* 58482736f4dSUwe Kleine-König * IRQF_TRIGGER_* but the PIC does not support multiple 58582736f4dSUwe Kleine-König * flow-types? 58682736f4dSUwe Kleine-König */ 58797fd75b7SAndrew Morton pr_debug("No set_type function for IRQ %d (%s)\n", irq, 58882736f4dSUwe Kleine-König chip ? (chip->name ? : "unknown") : "unknown"); 58982736f4dSUwe Kleine-König return 0; 59082736f4dSUwe Kleine-König } 59182736f4dSUwe Kleine-König 592876dbd4cSThomas Gleixner flags &= IRQ_TYPE_SENSE_MASK; 593d4d5e089SThomas Gleixner 594d4d5e089SThomas Gleixner if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { 59532f4125eSThomas Gleixner if (!irqd_irq_masked(&desc->irq_data)) 596d4d5e089SThomas Gleixner mask_irq(desc); 59732f4125eSThomas Gleixner if (!irqd_irq_disabled(&desc->irq_data)) 598d4d5e089SThomas Gleixner unmask = 1; 599d4d5e089SThomas Gleixner } 600d4d5e089SThomas Gleixner 601f2b662daSDavid Brownell /* caller masked out all except trigger mode flags */ 602b2ba2c30SThomas Gleixner ret = chip->irq_set_type(&desc->irq_data, flags); 60382736f4dSUwe Kleine-König 604876dbd4cSThomas Gleixner switch (ret) { 605876dbd4cSThomas Gleixner case IRQ_SET_MASK_OK: 6062cb62547SJiang Liu case IRQ_SET_MASK_OK_DONE: 607876dbd4cSThomas Gleixner irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); 608876dbd4cSThomas Gleixner irqd_set(&desc->irq_data, flags); 609876dbd4cSThomas Gleixner 610876dbd4cSThomas Gleixner case IRQ_SET_MASK_OK_NOCOPY: 611876dbd4cSThomas Gleixner flags = irqd_get_trigger_type(&desc->irq_data); 612876dbd4cSThomas Gleixner irq_settings_set_trigger_mask(desc, flags); 613876dbd4cSThomas Gleixner irqd_clear(&desc->irq_data, IRQD_LEVEL); 614876dbd4cSThomas Gleixner irq_settings_clr_level(desc); 615876dbd4cSThomas Gleixner if (flags & IRQ_TYPE_LEVEL_MASK) { 616876dbd4cSThomas Gleixner irq_settings_set_level(desc); 617876dbd4cSThomas Gleixner irqd_set(&desc->irq_data, IRQD_LEVEL); 618876dbd4cSThomas Gleixner } 61946732475SThomas Gleixner 620d4d5e089SThomas Gleixner ret = 0; 6218fff39e0SThomas Gleixner break; 622876dbd4cSThomas Gleixner default: 62397fd75b7SAndrew Morton pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", 624876dbd4cSThomas Gleixner flags, irq, chip->irq_set_type); 6250c5d1eb7SDavid Brownell } 626d4d5e089SThomas Gleixner if (unmask) 627d4d5e089SThomas Gleixner unmask_irq(desc); 62882736f4dSUwe Kleine-König return ret; 62982736f4dSUwe Kleine-König } 63082736f4dSUwe Kleine-König 631293a7a0aSThomas Gleixner #ifdef CONFIG_HARDIRQS_SW_RESEND 632293a7a0aSThomas Gleixner int irq_set_parent(int irq, int parent_irq) 633293a7a0aSThomas Gleixner { 634293a7a0aSThomas Gleixner unsigned long flags; 635293a7a0aSThomas Gleixner struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 636293a7a0aSThomas Gleixner 637293a7a0aSThomas Gleixner if (!desc) 638293a7a0aSThomas Gleixner return -EINVAL; 639293a7a0aSThomas Gleixner 640293a7a0aSThomas Gleixner desc->parent_irq = parent_irq; 641293a7a0aSThomas Gleixner 642293a7a0aSThomas Gleixner irq_put_desc_unlock(desc, flags); 643293a7a0aSThomas Gleixner return 0; 644293a7a0aSThomas Gleixner } 645293a7a0aSThomas Gleixner #endif 646293a7a0aSThomas Gleixner 647b25c340cSThomas Gleixner /* 648b25c340cSThomas Gleixner * Default primary interrupt handler for threaded interrupts. Is 649b25c340cSThomas Gleixner * assigned as primary handler when request_threaded_irq is called 650b25c340cSThomas Gleixner * with handler == NULL. Useful for oneshot interrupts. 651b25c340cSThomas Gleixner */ 652b25c340cSThomas Gleixner static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) 653b25c340cSThomas Gleixner { 654b25c340cSThomas Gleixner return IRQ_WAKE_THREAD; 655b25c340cSThomas Gleixner } 656b25c340cSThomas Gleixner 657399b5da2SThomas Gleixner /* 658399b5da2SThomas Gleixner * Primary handler for nested threaded interrupts. Should never be 659399b5da2SThomas Gleixner * called. 660399b5da2SThomas Gleixner */ 661399b5da2SThomas Gleixner static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) 662399b5da2SThomas Gleixner { 663399b5da2SThomas Gleixner WARN(1, "Primary handler called for nested irq %d\n", irq); 664399b5da2SThomas Gleixner return IRQ_NONE; 665399b5da2SThomas Gleixner } 666399b5da2SThomas Gleixner 6673aa551c9SThomas Gleixner static int irq_wait_for_interrupt(struct irqaction *action) 6683aa551c9SThomas Gleixner { 6693aa551c9SThomas Gleixner set_current_state(TASK_INTERRUPTIBLE); 670f48fe81eSThomas Gleixner 671550acb19SIdo Yariv while (!kthread_should_stop()) { 672550acb19SIdo Yariv 673f48fe81eSThomas Gleixner if (test_and_clear_bit(IRQTF_RUNTHREAD, 674f48fe81eSThomas Gleixner &action->thread_flags)) { 6753aa551c9SThomas Gleixner __set_current_state(TASK_RUNNING); 6763aa551c9SThomas Gleixner return 0; 677f48fe81eSThomas Gleixner } 6783aa551c9SThomas Gleixner schedule(); 679550acb19SIdo Yariv set_current_state(TASK_INTERRUPTIBLE); 6803aa551c9SThomas Gleixner } 681550acb19SIdo Yariv __set_current_state(TASK_RUNNING); 6823aa551c9SThomas Gleixner return -1; 6833aa551c9SThomas Gleixner } 6843aa551c9SThomas Gleixner 685b25c340cSThomas Gleixner /* 686b25c340cSThomas Gleixner * Oneshot interrupts keep the irq line masked until the threaded 687b25c340cSThomas Gleixner * handler finished. unmask if the interrupt has not been disabled and 688b25c340cSThomas Gleixner * is marked MASKED. 689b25c340cSThomas Gleixner */ 690b5faba21SThomas Gleixner static void irq_finalize_oneshot(struct irq_desc *desc, 691f3f79e38SAlexander Gordeev struct irqaction *action) 692b25c340cSThomas Gleixner { 693b5faba21SThomas Gleixner if (!(desc->istate & IRQS_ONESHOT)) 694b5faba21SThomas Gleixner return; 6950b1adaa0SThomas Gleixner again: 6963876ec9eSThomas Gleixner chip_bus_lock(desc); 697239007b8SThomas Gleixner raw_spin_lock_irq(&desc->lock); 6980b1adaa0SThomas Gleixner 6990b1adaa0SThomas Gleixner /* 7000b1adaa0SThomas Gleixner * Implausible though it may be we need to protect us against 7010b1adaa0SThomas Gleixner * the following scenario: 7020b1adaa0SThomas Gleixner * 7030b1adaa0SThomas Gleixner * The thread is faster done than the hard interrupt handler 7040b1adaa0SThomas Gleixner * on the other CPU. If we unmask the irq line then the 7050b1adaa0SThomas Gleixner * interrupt can come in again and masks the line, leaves due 706009b4c3bSThomas Gleixner * to IRQS_INPROGRESS and the irq line is masked forever. 707b5faba21SThomas Gleixner * 708b5faba21SThomas Gleixner * This also serializes the state of shared oneshot handlers 709b5faba21SThomas Gleixner * versus "desc->threads_onehsot |= action->thread_mask;" in 710b5faba21SThomas Gleixner * irq_wake_thread(). See the comment there which explains the 711b5faba21SThomas Gleixner * serialization. 7120b1adaa0SThomas Gleixner */ 71332f4125eSThomas Gleixner if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { 7140b1adaa0SThomas Gleixner raw_spin_unlock_irq(&desc->lock); 7153876ec9eSThomas Gleixner chip_bus_sync_unlock(desc); 7160b1adaa0SThomas Gleixner cpu_relax(); 7170b1adaa0SThomas Gleixner goto again; 7180b1adaa0SThomas Gleixner } 7190b1adaa0SThomas Gleixner 720b5faba21SThomas Gleixner /* 721b5faba21SThomas Gleixner * Now check again, whether the thread should run. Otherwise 722b5faba21SThomas Gleixner * we would clear the threads_oneshot bit of this thread which 723b5faba21SThomas Gleixner * was just set. 724b5faba21SThomas Gleixner */ 725f3f79e38SAlexander Gordeev if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) 726b5faba21SThomas Gleixner goto out_unlock; 727b5faba21SThomas Gleixner 728b5faba21SThomas Gleixner desc->threads_oneshot &= ~action->thread_mask; 729b5faba21SThomas Gleixner 73032f4125eSThomas Gleixner if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && 73132f4125eSThomas Gleixner irqd_irq_masked(&desc->irq_data)) 732328a4978SThomas Gleixner unmask_threaded_irq(desc); 73332f4125eSThomas Gleixner 734b5faba21SThomas Gleixner out_unlock: 735239007b8SThomas Gleixner raw_spin_unlock_irq(&desc->lock); 7363876ec9eSThomas Gleixner chip_bus_sync_unlock(desc); 737b25c340cSThomas Gleixner } 738b25c340cSThomas Gleixner 73961f38261SBruno Premont #ifdef CONFIG_SMP 7403aa551c9SThomas Gleixner /* 741b04c644eSChuansheng Liu * Check whether we need to change the affinity of the interrupt thread. 742591d2fb0SThomas Gleixner */ 743591d2fb0SThomas Gleixner static void 744591d2fb0SThomas Gleixner irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) 745591d2fb0SThomas Gleixner { 746591d2fb0SThomas Gleixner cpumask_var_t mask; 74704aa530eSThomas Gleixner bool valid = true; 748591d2fb0SThomas Gleixner 749591d2fb0SThomas Gleixner if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) 750591d2fb0SThomas Gleixner return; 751591d2fb0SThomas Gleixner 752591d2fb0SThomas Gleixner /* 753591d2fb0SThomas Gleixner * In case we are out of memory we set IRQTF_AFFINITY again and 754591d2fb0SThomas Gleixner * try again next time 755591d2fb0SThomas Gleixner */ 756591d2fb0SThomas Gleixner if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 757591d2fb0SThomas Gleixner set_bit(IRQTF_AFFINITY, &action->thread_flags); 758591d2fb0SThomas Gleixner return; 759591d2fb0SThomas Gleixner } 760591d2fb0SThomas Gleixner 761239007b8SThomas Gleixner raw_spin_lock_irq(&desc->lock); 76204aa530eSThomas Gleixner /* 76304aa530eSThomas Gleixner * This code is triggered unconditionally. Check the affinity 76404aa530eSThomas Gleixner * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. 76504aa530eSThomas Gleixner */ 76604aa530eSThomas Gleixner if (desc->irq_data.affinity) 7676b8ff312SThomas Gleixner cpumask_copy(mask, desc->irq_data.affinity); 76804aa530eSThomas Gleixner else 76904aa530eSThomas Gleixner valid = false; 770239007b8SThomas Gleixner raw_spin_unlock_irq(&desc->lock); 771591d2fb0SThomas Gleixner 77204aa530eSThomas Gleixner if (valid) 773591d2fb0SThomas Gleixner set_cpus_allowed_ptr(current, mask); 774591d2fb0SThomas Gleixner free_cpumask_var(mask); 775591d2fb0SThomas Gleixner } 77661f38261SBruno Premont #else 77761f38261SBruno Premont static inline void 77861f38261SBruno Premont irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } 77961f38261SBruno Premont #endif 780591d2fb0SThomas Gleixner 781591d2fb0SThomas Gleixner /* 7828d32a307SThomas Gleixner * Interrupts which are not explicitely requested as threaded 7838d32a307SThomas Gleixner * interrupts rely on the implicit bh/preempt disable of the hard irq 7848d32a307SThomas Gleixner * context. So we need to disable bh here to avoid deadlocks and other 7858d32a307SThomas Gleixner * side effects. 7868d32a307SThomas Gleixner */ 7873a43e05fSSebastian Andrzej Siewior static irqreturn_t 7888d32a307SThomas Gleixner irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) 7898d32a307SThomas Gleixner { 7903a43e05fSSebastian Andrzej Siewior irqreturn_t ret; 7913a43e05fSSebastian Andrzej Siewior 7928d32a307SThomas Gleixner local_bh_disable(); 7933a43e05fSSebastian Andrzej Siewior ret = action->thread_fn(action->irq, action->dev_id); 794f3f79e38SAlexander Gordeev irq_finalize_oneshot(desc, action); 7958d32a307SThomas Gleixner local_bh_enable(); 7963a43e05fSSebastian Andrzej Siewior return ret; 7978d32a307SThomas Gleixner } 7988d32a307SThomas Gleixner 7998d32a307SThomas Gleixner /* 800f788e7bfSXie XiuQi * Interrupts explicitly requested as threaded interrupts want to be 8018d32a307SThomas Gleixner * preemtible - many of them need to sleep and wait for slow busses to 8028d32a307SThomas Gleixner * complete. 8038d32a307SThomas Gleixner */ 8043a43e05fSSebastian Andrzej Siewior static irqreturn_t irq_thread_fn(struct irq_desc *desc, 8053a43e05fSSebastian Andrzej Siewior struct irqaction *action) 8068d32a307SThomas Gleixner { 8073a43e05fSSebastian Andrzej Siewior irqreturn_t ret; 8083a43e05fSSebastian Andrzej Siewior 8093a43e05fSSebastian Andrzej Siewior ret = action->thread_fn(action->irq, action->dev_id); 810f3f79e38SAlexander Gordeev irq_finalize_oneshot(desc, action); 8113a43e05fSSebastian Andrzej Siewior return ret; 8128d32a307SThomas Gleixner } 8138d32a307SThomas Gleixner 8147140ea19SIdo Yariv static void wake_threads_waitq(struct irq_desc *desc) 8157140ea19SIdo Yariv { 816c685689fSChuansheng Liu if (atomic_dec_and_test(&desc->threads_active)) 8177140ea19SIdo Yariv wake_up(&desc->wait_for_threads); 8187140ea19SIdo Yariv } 8197140ea19SIdo Yariv 82067d12145SAl Viro static void irq_thread_dtor(struct callback_head *unused) 8214d1d61a6SOleg Nesterov { 8224d1d61a6SOleg Nesterov struct task_struct *tsk = current; 8234d1d61a6SOleg Nesterov struct irq_desc *desc; 8244d1d61a6SOleg Nesterov struct irqaction *action; 8254d1d61a6SOleg Nesterov 8264d1d61a6SOleg Nesterov if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) 8274d1d61a6SOleg Nesterov return; 8284d1d61a6SOleg Nesterov 8294d1d61a6SOleg Nesterov action = kthread_data(tsk); 8304d1d61a6SOleg Nesterov 831fb21affaSLinus Torvalds pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", 83219af395dSAlan Cox tsk->comm, tsk->pid, action->irq); 8334d1d61a6SOleg Nesterov 8344d1d61a6SOleg Nesterov 8354d1d61a6SOleg Nesterov desc = irq_to_desc(action->irq); 8364d1d61a6SOleg Nesterov /* 8374d1d61a6SOleg Nesterov * If IRQTF_RUNTHREAD is set, we need to decrement 8384d1d61a6SOleg Nesterov * desc->threads_active and wake possible waiters. 8394d1d61a6SOleg Nesterov */ 8404d1d61a6SOleg Nesterov if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) 8414d1d61a6SOleg Nesterov wake_threads_waitq(desc); 8424d1d61a6SOleg Nesterov 8434d1d61a6SOleg Nesterov /* Prevent a stale desc->threads_oneshot */ 8444d1d61a6SOleg Nesterov irq_finalize_oneshot(desc, action); 8454d1d61a6SOleg Nesterov } 8464d1d61a6SOleg Nesterov 8478d32a307SThomas Gleixner /* 8483aa551c9SThomas Gleixner * Interrupt handler thread 8493aa551c9SThomas Gleixner */ 8503aa551c9SThomas Gleixner static int irq_thread(void *data) 8513aa551c9SThomas Gleixner { 85267d12145SAl Viro struct callback_head on_exit_work; 8533aa551c9SThomas Gleixner struct irqaction *action = data; 8543aa551c9SThomas Gleixner struct irq_desc *desc = irq_to_desc(action->irq); 8553a43e05fSSebastian Andrzej Siewior irqreturn_t (*handler_fn)(struct irq_desc *desc, 8563a43e05fSSebastian Andrzej Siewior struct irqaction *action); 8573aa551c9SThomas Gleixner 858540b60e2SAlexander Gordeev if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, 8598d32a307SThomas Gleixner &action->thread_flags)) 8608d32a307SThomas Gleixner handler_fn = irq_forced_thread_fn; 8618d32a307SThomas Gleixner else 8628d32a307SThomas Gleixner handler_fn = irq_thread_fn; 8638d32a307SThomas Gleixner 86441f9d29fSAl Viro init_task_work(&on_exit_work, irq_thread_dtor); 8654d1d61a6SOleg Nesterov task_work_add(current, &on_exit_work, false); 8663aa551c9SThomas Gleixner 867f3de44edSSankara Muthukrishnan irq_thread_check_affinity(desc, action); 868f3de44edSSankara Muthukrishnan 8693aa551c9SThomas Gleixner while (!irq_wait_for_interrupt(action)) { 8707140ea19SIdo Yariv irqreturn_t action_ret; 8713aa551c9SThomas Gleixner 872591d2fb0SThomas Gleixner irq_thread_check_affinity(desc, action); 873591d2fb0SThomas Gleixner 8743a43e05fSSebastian Andrzej Siewior action_ret = handler_fn(desc, action); 8751e77d0a1SThomas Gleixner if (action_ret == IRQ_HANDLED) 8761e77d0a1SThomas Gleixner atomic_inc(&desc->threads_handled); 8777140ea19SIdo Yariv 8787140ea19SIdo Yariv wake_threads_waitq(desc); 8793aa551c9SThomas Gleixner } 8803aa551c9SThomas Gleixner 8817140ea19SIdo Yariv /* 8827140ea19SIdo Yariv * This is the regular exit path. __free_irq() is stopping the 8837140ea19SIdo Yariv * thread via kthread_stop() after calling 8847140ea19SIdo Yariv * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the 885e04268b0SThomas Gleixner * oneshot mask bit can be set. We cannot verify that as we 886e04268b0SThomas Gleixner * cannot touch the oneshot mask at this point anymore as 887e04268b0SThomas Gleixner * __setup_irq() might have given out currents thread_mask 888e04268b0SThomas Gleixner * again. 8893aa551c9SThomas Gleixner */ 8904d1d61a6SOleg Nesterov task_work_cancel(current, irq_thread_dtor); 8913aa551c9SThomas Gleixner return 0; 8923aa551c9SThomas Gleixner } 8933aa551c9SThomas Gleixner 894a92444c6SThomas Gleixner /** 895a92444c6SThomas Gleixner * irq_wake_thread - wake the irq thread for the action identified by dev_id 896a92444c6SThomas Gleixner * @irq: Interrupt line 897a92444c6SThomas Gleixner * @dev_id: Device identity for which the thread should be woken 898a92444c6SThomas Gleixner * 899a92444c6SThomas Gleixner */ 900a92444c6SThomas Gleixner void irq_wake_thread(unsigned int irq, void *dev_id) 901a92444c6SThomas Gleixner { 902a92444c6SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 903a92444c6SThomas Gleixner struct irqaction *action; 904a92444c6SThomas Gleixner unsigned long flags; 905a92444c6SThomas Gleixner 906a92444c6SThomas Gleixner if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) 907a92444c6SThomas Gleixner return; 908a92444c6SThomas Gleixner 909a92444c6SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 910a92444c6SThomas Gleixner for (action = desc->action; action; action = action->next) { 911a92444c6SThomas Gleixner if (action->dev_id == dev_id) { 912a92444c6SThomas Gleixner if (action->thread) 913a92444c6SThomas Gleixner __irq_wake_thread(desc, action); 914a92444c6SThomas Gleixner break; 915a92444c6SThomas Gleixner } 916a92444c6SThomas Gleixner } 917a92444c6SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 918a92444c6SThomas Gleixner } 919a92444c6SThomas Gleixner EXPORT_SYMBOL_GPL(irq_wake_thread); 920a92444c6SThomas Gleixner 9218d32a307SThomas Gleixner static void irq_setup_forced_threading(struct irqaction *new) 9228d32a307SThomas Gleixner { 9238d32a307SThomas Gleixner if (!force_irqthreads) 9248d32a307SThomas Gleixner return; 9258d32a307SThomas Gleixner if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) 9268d32a307SThomas Gleixner return; 9278d32a307SThomas Gleixner 9288d32a307SThomas Gleixner new->flags |= IRQF_ONESHOT; 9298d32a307SThomas Gleixner 9308d32a307SThomas Gleixner if (!new->thread_fn) { 9318d32a307SThomas Gleixner set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); 9328d32a307SThomas Gleixner new->thread_fn = new->handler; 9338d32a307SThomas Gleixner new->handler = irq_default_primary_handler; 9348d32a307SThomas Gleixner } 9358d32a307SThomas Gleixner } 9368d32a307SThomas Gleixner 937c1bacbaeSThomas Gleixner static int irq_request_resources(struct irq_desc *desc) 938c1bacbaeSThomas Gleixner { 939c1bacbaeSThomas Gleixner struct irq_data *d = &desc->irq_data; 940c1bacbaeSThomas Gleixner struct irq_chip *c = d->chip; 941c1bacbaeSThomas Gleixner 942c1bacbaeSThomas Gleixner return c->irq_request_resources ? c->irq_request_resources(d) : 0; 943c1bacbaeSThomas Gleixner } 944c1bacbaeSThomas Gleixner 945c1bacbaeSThomas Gleixner static void irq_release_resources(struct irq_desc *desc) 946c1bacbaeSThomas Gleixner { 947c1bacbaeSThomas Gleixner struct irq_data *d = &desc->irq_data; 948c1bacbaeSThomas Gleixner struct irq_chip *c = d->chip; 949c1bacbaeSThomas Gleixner 950c1bacbaeSThomas Gleixner if (c->irq_release_resources) 951c1bacbaeSThomas Gleixner c->irq_release_resources(d); 952c1bacbaeSThomas Gleixner } 953c1bacbaeSThomas Gleixner 9541da177e4SLinus Torvalds /* 9551da177e4SLinus Torvalds * Internal function to register an irqaction - typically used to 9561da177e4SLinus Torvalds * allocate special interrupts that are part of the architecture. 9571da177e4SLinus Torvalds */ 958d3c60047SThomas Gleixner static int 959d3c60047SThomas Gleixner __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) 9601da177e4SLinus Torvalds { 961f17c7545SIngo Molnar struct irqaction *old, **old_ptr; 962b5faba21SThomas Gleixner unsigned long flags, thread_mask = 0; 9633b8249e7SThomas Gleixner int ret, nested, shared = 0; 9643b8249e7SThomas Gleixner cpumask_var_t mask; 9651da177e4SLinus Torvalds 9667d94f7caSYinghai Lu if (!desc) 967c2b5a251SMatthew Wilcox return -EINVAL; 968c2b5a251SMatthew Wilcox 9696b8ff312SThomas Gleixner if (desc->irq_data.chip == &no_irq_chip) 9701da177e4SLinus Torvalds return -ENOSYS; 971b6873807SSebastian Andrzej Siewior if (!try_module_get(desc->owner)) 972b6873807SSebastian Andrzej Siewior return -ENODEV; 9731da177e4SLinus Torvalds 9741da177e4SLinus Torvalds /* 975399b5da2SThomas Gleixner * Check whether the interrupt nests into another interrupt 976399b5da2SThomas Gleixner * thread. 9773aa551c9SThomas Gleixner */ 9781ccb4e61SThomas Gleixner nested = irq_settings_is_nested_thread(desc); 979399b5da2SThomas Gleixner if (nested) { 980b6873807SSebastian Andrzej Siewior if (!new->thread_fn) { 981b6873807SSebastian Andrzej Siewior ret = -EINVAL; 982b6873807SSebastian Andrzej Siewior goto out_mput; 983b6873807SSebastian Andrzej Siewior } 984399b5da2SThomas Gleixner /* 985399b5da2SThomas Gleixner * Replace the primary handler which was provided from 986399b5da2SThomas Gleixner * the driver for non nested interrupt handling by the 987399b5da2SThomas Gleixner * dummy function which warns when called. 988399b5da2SThomas Gleixner */ 989399b5da2SThomas Gleixner new->handler = irq_nested_primary_handler; 9908d32a307SThomas Gleixner } else { 9917f1b1244SPaul Mundt if (irq_settings_can_thread(desc)) 9928d32a307SThomas Gleixner irq_setup_forced_threading(new); 993399b5da2SThomas Gleixner } 994399b5da2SThomas Gleixner 995399b5da2SThomas Gleixner /* 996399b5da2SThomas Gleixner * Create a handler thread when a thread function is supplied 997399b5da2SThomas Gleixner * and the interrupt does not nest into another interrupt 998399b5da2SThomas Gleixner * thread. 999399b5da2SThomas Gleixner */ 1000399b5da2SThomas Gleixner if (new->thread_fn && !nested) { 10013aa551c9SThomas Gleixner struct task_struct *t; 1002ee238713SIvo Sieben static const struct sched_param param = { 1003ee238713SIvo Sieben .sched_priority = MAX_USER_RT_PRIO/2, 1004ee238713SIvo Sieben }; 10053aa551c9SThomas Gleixner 10063aa551c9SThomas Gleixner t = kthread_create(irq_thread, new, "irq/%d-%s", irq, 10073aa551c9SThomas Gleixner new->name); 1008b6873807SSebastian Andrzej Siewior if (IS_ERR(t)) { 1009b6873807SSebastian Andrzej Siewior ret = PTR_ERR(t); 1010b6873807SSebastian Andrzej Siewior goto out_mput; 1011b6873807SSebastian Andrzej Siewior } 1012ee238713SIvo Sieben 1013bbfe65c2SThomas Pfaff sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m); 1014ee238713SIvo Sieben 10153aa551c9SThomas Gleixner /* 10163aa551c9SThomas Gleixner * We keep the reference to the task struct even if 10173aa551c9SThomas Gleixner * the thread dies to avoid that the interrupt code 10183aa551c9SThomas Gleixner * references an already freed task_struct. 10193aa551c9SThomas Gleixner */ 10203aa551c9SThomas Gleixner get_task_struct(t); 10213aa551c9SThomas Gleixner new->thread = t; 102204aa530eSThomas Gleixner /* 102304aa530eSThomas Gleixner * Tell the thread to set its affinity. This is 102404aa530eSThomas Gleixner * important for shared interrupt handlers as we do 102504aa530eSThomas Gleixner * not invoke setup_affinity() for the secondary 102604aa530eSThomas Gleixner * handlers as everything is already set up. Even for 102704aa530eSThomas Gleixner * interrupts marked with IRQF_NO_BALANCE this is 102804aa530eSThomas Gleixner * correct as we want the thread to move to the cpu(s) 102904aa530eSThomas Gleixner * on which the requesting code placed the interrupt. 103004aa530eSThomas Gleixner */ 103104aa530eSThomas Gleixner set_bit(IRQTF_AFFINITY, &new->thread_flags); 10323aa551c9SThomas Gleixner } 10333aa551c9SThomas Gleixner 10343b8249e7SThomas Gleixner if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 10353b8249e7SThomas Gleixner ret = -ENOMEM; 10363b8249e7SThomas Gleixner goto out_thread; 10373b8249e7SThomas Gleixner } 10383b8249e7SThomas Gleixner 10393aa551c9SThomas Gleixner /* 1040dc9b229aSThomas Gleixner * Drivers are often written to work w/o knowledge about the 1041dc9b229aSThomas Gleixner * underlying irq chip implementation, so a request for a 1042dc9b229aSThomas Gleixner * threaded irq without a primary hard irq context handler 1043dc9b229aSThomas Gleixner * requires the ONESHOT flag to be set. Some irq chips like 1044dc9b229aSThomas Gleixner * MSI based interrupts are per se one shot safe. Check the 1045dc9b229aSThomas Gleixner * chip flags, so we can avoid the unmask dance at the end of 1046dc9b229aSThomas Gleixner * the threaded handler for those. 1047dc9b229aSThomas Gleixner */ 1048dc9b229aSThomas Gleixner if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) 1049dc9b229aSThomas Gleixner new->flags &= ~IRQF_ONESHOT; 1050dc9b229aSThomas Gleixner 1051dc9b229aSThomas Gleixner /* 10521da177e4SLinus Torvalds * The following block of code has to be executed atomically 10531da177e4SLinus Torvalds */ 1054239007b8SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 1055f17c7545SIngo Molnar old_ptr = &desc->action; 1056f17c7545SIngo Molnar old = *old_ptr; 105706fcb0c6SIngo Molnar if (old) { 1058e76de9f8SThomas Gleixner /* 1059e76de9f8SThomas Gleixner * Can't share interrupts unless both agree to and are 1060e76de9f8SThomas Gleixner * the same type (level, edge, polarity). So both flag 10613cca53b0SThomas Gleixner * fields must have IRQF_SHARED set and the bits which 10629d591eddSThomas Gleixner * set the trigger type must match. Also all must 10639d591eddSThomas Gleixner * agree on ONESHOT. 1064e76de9f8SThomas Gleixner */ 10653cca53b0SThomas Gleixner if (!((old->flags & new->flags) & IRQF_SHARED) || 10669d591eddSThomas Gleixner ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || 1067f5d89470SThomas Gleixner ((old->flags ^ new->flags) & IRQF_ONESHOT)) 1068f5163427SDimitri Sivanich goto mismatch; 1069f5163427SDimitri Sivanich 1070f5163427SDimitri Sivanich /* All handlers must agree on per-cpuness */ 10713cca53b0SThomas Gleixner if ((old->flags & IRQF_PERCPU) != 10723cca53b0SThomas Gleixner (new->flags & IRQF_PERCPU)) 1073f5163427SDimitri Sivanich goto mismatch; 10741da177e4SLinus Torvalds 10751da177e4SLinus Torvalds /* add new interrupt at end of irq queue */ 10761da177e4SLinus Torvalds do { 107752abb700SThomas Gleixner /* 107852abb700SThomas Gleixner * Or all existing action->thread_mask bits, 107952abb700SThomas Gleixner * so we can find the next zero bit for this 108052abb700SThomas Gleixner * new action. 108152abb700SThomas Gleixner */ 1082b5faba21SThomas Gleixner thread_mask |= old->thread_mask; 1083f17c7545SIngo Molnar old_ptr = &old->next; 1084f17c7545SIngo Molnar old = *old_ptr; 10851da177e4SLinus Torvalds } while (old); 10861da177e4SLinus Torvalds shared = 1; 10871da177e4SLinus Torvalds } 10881da177e4SLinus Torvalds 1089b5faba21SThomas Gleixner /* 109052abb700SThomas Gleixner * Setup the thread mask for this irqaction for ONESHOT. For 109152abb700SThomas Gleixner * !ONESHOT irqs the thread mask is 0 so we can avoid a 109252abb700SThomas Gleixner * conditional in irq_wake_thread(). 1093b5faba21SThomas Gleixner */ 109452abb700SThomas Gleixner if (new->flags & IRQF_ONESHOT) { 109552abb700SThomas Gleixner /* 109652abb700SThomas Gleixner * Unlikely to have 32 resp 64 irqs sharing one line, 109752abb700SThomas Gleixner * but who knows. 109852abb700SThomas Gleixner */ 109952abb700SThomas Gleixner if (thread_mask == ~0UL) { 1100b5faba21SThomas Gleixner ret = -EBUSY; 1101b5faba21SThomas Gleixner goto out_mask; 1102b5faba21SThomas Gleixner } 110352abb700SThomas Gleixner /* 110452abb700SThomas Gleixner * The thread_mask for the action is or'ed to 110552abb700SThomas Gleixner * desc->thread_active to indicate that the 110652abb700SThomas Gleixner * IRQF_ONESHOT thread handler has been woken, but not 110752abb700SThomas Gleixner * yet finished. The bit is cleared when a thread 110852abb700SThomas Gleixner * completes. When all threads of a shared interrupt 110952abb700SThomas Gleixner * line have completed desc->threads_active becomes 111052abb700SThomas Gleixner * zero and the interrupt line is unmasked. See 111152abb700SThomas Gleixner * handle.c:irq_wake_thread() for further information. 111252abb700SThomas Gleixner * 111352abb700SThomas Gleixner * If no thread is woken by primary (hard irq context) 111452abb700SThomas Gleixner * interrupt handlers, then desc->threads_active is 111552abb700SThomas Gleixner * also checked for zero to unmask the irq line in the 111652abb700SThomas Gleixner * affected hard irq flow handlers 111752abb700SThomas Gleixner * (handle_[fasteoi|level]_irq). 111852abb700SThomas Gleixner * 111952abb700SThomas Gleixner * The new action gets the first zero bit of 112052abb700SThomas Gleixner * thread_mask assigned. See the loop above which or's 112152abb700SThomas Gleixner * all existing action->thread_mask bits. 112252abb700SThomas Gleixner */ 1123b5faba21SThomas Gleixner new->thread_mask = 1 << ffz(thread_mask); 11241c6c6952SThomas Gleixner 1125dc9b229aSThomas Gleixner } else if (new->handler == irq_default_primary_handler && 1126dc9b229aSThomas Gleixner !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { 11271c6c6952SThomas Gleixner /* 11281c6c6952SThomas Gleixner * The interrupt was requested with handler = NULL, so 11291c6c6952SThomas Gleixner * we use the default primary handler for it. But it 11301c6c6952SThomas Gleixner * does not have the oneshot flag set. In combination 11311c6c6952SThomas Gleixner * with level interrupts this is deadly, because the 11321c6c6952SThomas Gleixner * default primary handler just wakes the thread, then 11331c6c6952SThomas Gleixner * the irq lines is reenabled, but the device still 11341c6c6952SThomas Gleixner * has the level irq asserted. Rinse and repeat.... 11351c6c6952SThomas Gleixner * 11361c6c6952SThomas Gleixner * While this works for edge type interrupts, we play 11371c6c6952SThomas Gleixner * it safe and reject unconditionally because we can't 11381c6c6952SThomas Gleixner * say for sure which type this interrupt really 11391c6c6952SThomas Gleixner * has. The type flags are unreliable as the 11401c6c6952SThomas Gleixner * underlying chip implementation can override them. 11411c6c6952SThomas Gleixner */ 114297fd75b7SAndrew Morton pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", 11431c6c6952SThomas Gleixner irq); 11441c6c6952SThomas Gleixner ret = -EINVAL; 11451c6c6952SThomas Gleixner goto out_mask; 114652abb700SThomas Gleixner } 1147b5faba21SThomas Gleixner 11481da177e4SLinus Torvalds if (!shared) { 1149c1bacbaeSThomas Gleixner ret = irq_request_resources(desc); 1150c1bacbaeSThomas Gleixner if (ret) { 1151c1bacbaeSThomas Gleixner pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", 1152c1bacbaeSThomas Gleixner new->name, irq, desc->irq_data.chip->name); 1153c1bacbaeSThomas Gleixner goto out_mask; 1154c1bacbaeSThomas Gleixner } 1155c1bacbaeSThomas Gleixner 11563aa551c9SThomas Gleixner init_waitqueue_head(&desc->wait_for_threads); 11573aa551c9SThomas Gleixner 115882736f4dSUwe Kleine-König /* Setup the type (level, edge polarity) if configured: */ 115982736f4dSUwe Kleine-König if (new->flags & IRQF_TRIGGER_MASK) { 1160f2b662daSDavid Brownell ret = __irq_set_trigger(desc, irq, 1161f2b662daSDavid Brownell new->flags & IRQF_TRIGGER_MASK); 116282736f4dSUwe Kleine-König 11633aa551c9SThomas Gleixner if (ret) 11643b8249e7SThomas Gleixner goto out_mask; 1165091738a2SThomas Gleixner } 1166f75d222bSAhmed S. Darwish 1167009b4c3bSThomas Gleixner desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ 116832f4125eSThomas Gleixner IRQS_ONESHOT | IRQS_WAITING); 116932f4125eSThomas Gleixner irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 117094d39e1fSThomas Gleixner 1171a005677bSThomas Gleixner if (new->flags & IRQF_PERCPU) { 1172a005677bSThomas Gleixner irqd_set(&desc->irq_data, IRQD_PER_CPU); 1173a005677bSThomas Gleixner irq_settings_set_per_cpu(desc); 1174a005677bSThomas Gleixner } 11756a58fb3bSThomas Gleixner 1176b25c340cSThomas Gleixner if (new->flags & IRQF_ONESHOT) 11773d67baecSThomas Gleixner desc->istate |= IRQS_ONESHOT; 1178b25c340cSThomas Gleixner 11791ccb4e61SThomas Gleixner if (irq_settings_can_autoenable(desc)) 1180b4bc724eSThomas Gleixner irq_startup(desc, true); 118146999238SThomas Gleixner else 1182e76de9f8SThomas Gleixner /* Undo nested disables: */ 1183e76de9f8SThomas Gleixner desc->depth = 1; 118418404756SMax Krasnyansky 1185612e3684SThomas Gleixner /* Exclude IRQ from balancing if requested */ 1186a005677bSThomas Gleixner if (new->flags & IRQF_NOBALANCING) { 1187a005677bSThomas Gleixner irq_settings_set_no_balancing(desc); 1188a005677bSThomas Gleixner irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 1189a005677bSThomas Gleixner } 1190612e3684SThomas Gleixner 119118404756SMax Krasnyansky /* Set default affinity mask once everything is setup */ 11923b8249e7SThomas Gleixner setup_affinity(irq, desc, mask); 11930c5d1eb7SDavid Brownell 1194876dbd4cSThomas Gleixner } else if (new->flags & IRQF_TRIGGER_MASK) { 1195876dbd4cSThomas Gleixner unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; 1196876dbd4cSThomas Gleixner unsigned int omsk = irq_settings_get_trigger_mask(desc); 1197876dbd4cSThomas Gleixner 1198876dbd4cSThomas Gleixner if (nmsk != omsk) 1199876dbd4cSThomas Gleixner /* hope the handler works with current trigger mode */ 120097fd75b7SAndrew Morton pr_warning("irq %d uses trigger mode %u; requested %u\n", 1201876dbd4cSThomas Gleixner irq, nmsk, omsk); 120294d39e1fSThomas Gleixner } 120382736f4dSUwe Kleine-König 120469ab8494SThomas Gleixner new->irq = irq; 1205f17c7545SIngo Molnar *old_ptr = new; 120682736f4dSUwe Kleine-König 1207cab303beSThomas Gleixner irq_pm_install_action(desc, new); 1208cab303beSThomas Gleixner 12098528b0f1SLinus Torvalds /* Reset broken irq detection when installing new handler */ 12108528b0f1SLinus Torvalds desc->irq_count = 0; 12118528b0f1SLinus Torvalds desc->irqs_unhandled = 0; 12121adb0850SThomas Gleixner 12131adb0850SThomas Gleixner /* 12141adb0850SThomas Gleixner * Check whether we disabled the irq via the spurious handler 12151adb0850SThomas Gleixner * before. Reenable it and give it another chance. 12161adb0850SThomas Gleixner */ 12177acdd53eSThomas Gleixner if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { 12187acdd53eSThomas Gleixner desc->istate &= ~IRQS_SPURIOUS_DISABLED; 12198df2e02cSThomas Gleixner __enable_irq(desc, irq); 12201adb0850SThomas Gleixner } 12211adb0850SThomas Gleixner 1222239007b8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 12231da177e4SLinus Torvalds 122469ab8494SThomas Gleixner /* 122569ab8494SThomas Gleixner * Strictly no need to wake it up, but hung_task complains 122669ab8494SThomas Gleixner * when no hard interrupt wakes the thread up. 122769ab8494SThomas Gleixner */ 122869ab8494SThomas Gleixner if (new->thread) 122969ab8494SThomas Gleixner wake_up_process(new->thread); 123069ab8494SThomas Gleixner 12312c6927a3SYinghai Lu register_irq_proc(irq, desc); 12321da177e4SLinus Torvalds new->dir = NULL; 12331da177e4SLinus Torvalds register_handler_proc(irq, new); 12344f5058c3SXiaotian Feng free_cpumask_var(mask); 12351da177e4SLinus Torvalds 12361da177e4SLinus Torvalds return 0; 1237f5163427SDimitri Sivanich 1238f5163427SDimitri Sivanich mismatch: 12393cca53b0SThomas Gleixner if (!(new->flags & IRQF_PROBE_SHARED)) { 124097fd75b7SAndrew Morton pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", 1241f5d89470SThomas Gleixner irq, new->flags, new->name, old->flags, old->name); 1242f5d89470SThomas Gleixner #ifdef CONFIG_DEBUG_SHIRQ 1243f5163427SDimitri Sivanich dump_stack(); 12443f050447SAlan Cox #endif 1245f5d89470SThomas Gleixner } 12463aa551c9SThomas Gleixner ret = -EBUSY; 12473aa551c9SThomas Gleixner 12483b8249e7SThomas Gleixner out_mask: 12491c389795SDan Carpenter raw_spin_unlock_irqrestore(&desc->lock, flags); 12503b8249e7SThomas Gleixner free_cpumask_var(mask); 12513b8249e7SThomas Gleixner 12523aa551c9SThomas Gleixner out_thread: 12533aa551c9SThomas Gleixner if (new->thread) { 12543aa551c9SThomas Gleixner struct task_struct *t = new->thread; 12553aa551c9SThomas Gleixner 12563aa551c9SThomas Gleixner new->thread = NULL; 12573aa551c9SThomas Gleixner kthread_stop(t); 12583aa551c9SThomas Gleixner put_task_struct(t); 12593aa551c9SThomas Gleixner } 1260b6873807SSebastian Andrzej Siewior out_mput: 1261b6873807SSebastian Andrzej Siewior module_put(desc->owner); 12623aa551c9SThomas Gleixner return ret; 12631da177e4SLinus Torvalds } 12641da177e4SLinus Torvalds 12651da177e4SLinus Torvalds /** 1266d3c60047SThomas Gleixner * setup_irq - setup an interrupt 1267d3c60047SThomas Gleixner * @irq: Interrupt line to setup 1268d3c60047SThomas Gleixner * @act: irqaction for the interrupt 1269d3c60047SThomas Gleixner * 1270d3c60047SThomas Gleixner * Used to statically setup interrupts in the early boot process. 1271d3c60047SThomas Gleixner */ 1272d3c60047SThomas Gleixner int setup_irq(unsigned int irq, struct irqaction *act) 1273d3c60047SThomas Gleixner { 1274986c011dSDavid Daney int retval; 1275d3c60047SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 1276d3c60047SThomas Gleixner 127731d9d9b6SMarc Zyngier if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) 127831d9d9b6SMarc Zyngier return -EINVAL; 1279986c011dSDavid Daney chip_bus_lock(desc); 1280986c011dSDavid Daney retval = __setup_irq(irq, desc, act); 1281986c011dSDavid Daney chip_bus_sync_unlock(desc); 1282986c011dSDavid Daney 1283986c011dSDavid Daney return retval; 1284d3c60047SThomas Gleixner } 1285eb53b4e8SMagnus Damm EXPORT_SYMBOL_GPL(setup_irq); 1286d3c60047SThomas Gleixner 1287cbf94f06SMagnus Damm /* 1288cbf94f06SMagnus Damm * Internal function to unregister an irqaction - used to free 1289cbf94f06SMagnus Damm * regular and special interrupts that are part of the architecture. 12901da177e4SLinus Torvalds */ 1291cbf94f06SMagnus Damm static struct irqaction *__free_irq(unsigned int irq, void *dev_id) 12921da177e4SLinus Torvalds { 1293d3c60047SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 1294f17c7545SIngo Molnar struct irqaction *action, **action_ptr; 12951da177e4SLinus Torvalds unsigned long flags; 12961da177e4SLinus Torvalds 1297ae88a23bSIngo Molnar WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 12987d94f7caSYinghai Lu 12997d94f7caSYinghai Lu if (!desc) 1300f21cfb25SMagnus Damm return NULL; 13011da177e4SLinus Torvalds 1302239007b8SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 1303ae88a23bSIngo Molnar 1304ae88a23bSIngo Molnar /* 1305ae88a23bSIngo Molnar * There can be multiple actions per IRQ descriptor, find the right 1306ae88a23bSIngo Molnar * one based on the dev_id: 1307ae88a23bSIngo Molnar */ 1308f17c7545SIngo Molnar action_ptr = &desc->action; 13091da177e4SLinus Torvalds for (;;) { 1310f17c7545SIngo Molnar action = *action_ptr; 13111da177e4SLinus Torvalds 1312ae88a23bSIngo Molnar if (!action) { 1313ae88a23bSIngo Molnar WARN(1, "Trying to free already-free IRQ %d\n", irq); 1314239007b8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 1315ae88a23bSIngo Molnar 1316f21cfb25SMagnus Damm return NULL; 1317ae88a23bSIngo Molnar } 13181da177e4SLinus Torvalds 13198316e381SIngo Molnar if (action->dev_id == dev_id) 1320ae88a23bSIngo Molnar break; 1321f17c7545SIngo Molnar action_ptr = &action->next; 1322ae88a23bSIngo Molnar } 1323ae88a23bSIngo Molnar 1324ae88a23bSIngo Molnar /* Found it - now remove it from the list of entries: */ 1325f17c7545SIngo Molnar *action_ptr = action->next; 1326dbce706eSPaolo 'Blaisorblade' Giarrusso 1327cab303beSThomas Gleixner irq_pm_remove_action(desc, action); 1328cab303beSThomas Gleixner 1329ae88a23bSIngo Molnar /* If this was the last handler, shut down the IRQ line: */ 1330c1bacbaeSThomas Gleixner if (!desc->action) { 133146999238SThomas Gleixner irq_shutdown(desc); 1332c1bacbaeSThomas Gleixner irq_release_resources(desc); 1333c1bacbaeSThomas Gleixner } 13343aa551c9SThomas Gleixner 1335e7a297b0SPeter P Waskiewicz Jr #ifdef CONFIG_SMP 1336e7a297b0SPeter P Waskiewicz Jr /* make sure affinity_hint is cleaned up */ 1337e7a297b0SPeter P Waskiewicz Jr if (WARN_ON_ONCE(desc->affinity_hint)) 1338e7a297b0SPeter P Waskiewicz Jr desc->affinity_hint = NULL; 1339e7a297b0SPeter P Waskiewicz Jr #endif 1340e7a297b0SPeter P Waskiewicz Jr 1341239007b8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 1342ae88a23bSIngo Molnar 13431da177e4SLinus Torvalds unregister_handler_proc(irq, action); 13441da177e4SLinus Torvalds 1345ae88a23bSIngo Molnar /* Make sure it's not being used on another CPU: */ 13461da177e4SLinus Torvalds synchronize_irq(irq); 1347ae88a23bSIngo Molnar 13481d99493bSDavid Woodhouse #ifdef CONFIG_DEBUG_SHIRQ 13491d99493bSDavid Woodhouse /* 1350ae88a23bSIngo Molnar * It's a shared IRQ -- the driver ought to be prepared for an IRQ 1351ae88a23bSIngo Molnar * event to happen even now it's being freed, so let's make sure that 1352ae88a23bSIngo Molnar * is so by doing an extra call to the handler .... 1353ae88a23bSIngo Molnar * 1354ae88a23bSIngo Molnar * ( We do this after actually deregistering it, to make sure that a 1355ae88a23bSIngo Molnar * 'real' IRQ doesn't run in * parallel with our fake. ) 13561d99493bSDavid Woodhouse */ 13571d99493bSDavid Woodhouse if (action->flags & IRQF_SHARED) { 13581d99493bSDavid Woodhouse local_irq_save(flags); 13591d99493bSDavid Woodhouse action->handler(irq, dev_id); 13601d99493bSDavid Woodhouse local_irq_restore(flags); 13611d99493bSDavid Woodhouse } 13621d99493bSDavid Woodhouse #endif 13632d860ad7SLinus Torvalds 13642d860ad7SLinus Torvalds if (action->thread) { 13652d860ad7SLinus Torvalds kthread_stop(action->thread); 13662d860ad7SLinus Torvalds put_task_struct(action->thread); 13672d860ad7SLinus Torvalds } 13682d860ad7SLinus Torvalds 1369b6873807SSebastian Andrzej Siewior module_put(desc->owner); 1370f21cfb25SMagnus Damm return action; 1371f21cfb25SMagnus Damm } 13721da177e4SLinus Torvalds 13731da177e4SLinus Torvalds /** 1374cbf94f06SMagnus Damm * remove_irq - free an interrupt 1375cbf94f06SMagnus Damm * @irq: Interrupt line to free 1376cbf94f06SMagnus Damm * @act: irqaction for the interrupt 1377cbf94f06SMagnus Damm * 1378cbf94f06SMagnus Damm * Used to remove interrupts statically setup by the early boot process. 1379cbf94f06SMagnus Damm */ 1380cbf94f06SMagnus Damm void remove_irq(unsigned int irq, struct irqaction *act) 1381cbf94f06SMagnus Damm { 138231d9d9b6SMarc Zyngier struct irq_desc *desc = irq_to_desc(irq); 138331d9d9b6SMarc Zyngier 138431d9d9b6SMarc Zyngier if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1385cbf94f06SMagnus Damm __free_irq(irq, act->dev_id); 1386cbf94f06SMagnus Damm } 1387eb53b4e8SMagnus Damm EXPORT_SYMBOL_GPL(remove_irq); 1388cbf94f06SMagnus Damm 1389cbf94f06SMagnus Damm /** 1390f21cfb25SMagnus Damm * free_irq - free an interrupt allocated with request_irq 13911da177e4SLinus Torvalds * @irq: Interrupt line to free 13921da177e4SLinus Torvalds * @dev_id: Device identity to free 13931da177e4SLinus Torvalds * 13941da177e4SLinus Torvalds * Remove an interrupt handler. The handler is removed and if the 13951da177e4SLinus Torvalds * interrupt line is no longer in use by any driver it is disabled. 13961da177e4SLinus Torvalds * On a shared IRQ the caller must ensure the interrupt is disabled 13971da177e4SLinus Torvalds * on the card it drives before calling this function. The function 13981da177e4SLinus Torvalds * does not return until any executing interrupts for this IRQ 13991da177e4SLinus Torvalds * have completed. 14001da177e4SLinus Torvalds * 14011da177e4SLinus Torvalds * This function must not be called from interrupt context. 14021da177e4SLinus Torvalds */ 14031da177e4SLinus Torvalds void free_irq(unsigned int irq, void *dev_id) 14041da177e4SLinus Torvalds { 140570aedd24SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 140670aedd24SThomas Gleixner 140731d9d9b6SMarc Zyngier if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) 140870aedd24SThomas Gleixner return; 140970aedd24SThomas Gleixner 1410cd7eab44SBen Hutchings #ifdef CONFIG_SMP 1411cd7eab44SBen Hutchings if (WARN_ON(desc->affinity_notify)) 1412cd7eab44SBen Hutchings desc->affinity_notify = NULL; 1413cd7eab44SBen Hutchings #endif 1414cd7eab44SBen Hutchings 14153876ec9eSThomas Gleixner chip_bus_lock(desc); 1416cbf94f06SMagnus Damm kfree(__free_irq(irq, dev_id)); 14173876ec9eSThomas Gleixner chip_bus_sync_unlock(desc); 14181da177e4SLinus Torvalds } 14191da177e4SLinus Torvalds EXPORT_SYMBOL(free_irq); 14201da177e4SLinus Torvalds 14211da177e4SLinus Torvalds /** 14223aa551c9SThomas Gleixner * request_threaded_irq - allocate an interrupt line 14231da177e4SLinus Torvalds * @irq: Interrupt line to allocate 14243aa551c9SThomas Gleixner * @handler: Function to be called when the IRQ occurs. 14253aa551c9SThomas Gleixner * Primary handler for threaded interrupts 1426b25c340cSThomas Gleixner * If NULL and thread_fn != NULL the default 1427b25c340cSThomas Gleixner * primary handler is installed 14283aa551c9SThomas Gleixner * @thread_fn: Function called from the irq handler thread 14293aa551c9SThomas Gleixner * If NULL, no irq thread is created 14301da177e4SLinus Torvalds * @irqflags: Interrupt type flags 14311da177e4SLinus Torvalds * @devname: An ascii name for the claiming device 14321da177e4SLinus Torvalds * @dev_id: A cookie passed back to the handler function 14331da177e4SLinus Torvalds * 14341da177e4SLinus Torvalds * This call allocates interrupt resources and enables the 14351da177e4SLinus Torvalds * interrupt line and IRQ handling. From the point this 14361da177e4SLinus Torvalds * call is made your handler function may be invoked. Since 14371da177e4SLinus Torvalds * your handler function must clear any interrupt the board 14381da177e4SLinus Torvalds * raises, you must take care both to initialise your hardware 14391da177e4SLinus Torvalds * and to set up the interrupt handler in the right order. 14401da177e4SLinus Torvalds * 14413aa551c9SThomas Gleixner * If you want to set up a threaded irq handler for your device 14426d21af4fSJavi Merino * then you need to supply @handler and @thread_fn. @handler is 14433aa551c9SThomas Gleixner * still called in hard interrupt context and has to check 14443aa551c9SThomas Gleixner * whether the interrupt originates from the device. If yes it 14453aa551c9SThomas Gleixner * needs to disable the interrupt on the device and return 144639a2eddbSSteven Rostedt * IRQ_WAKE_THREAD which will wake up the handler thread and run 14473aa551c9SThomas Gleixner * @thread_fn. This split handler design is necessary to support 14483aa551c9SThomas Gleixner * shared interrupts. 14493aa551c9SThomas Gleixner * 14501da177e4SLinus Torvalds * Dev_id must be globally unique. Normally the address of the 14511da177e4SLinus Torvalds * device data structure is used as the cookie. Since the handler 14521da177e4SLinus Torvalds * receives this value it makes sense to use it. 14531da177e4SLinus Torvalds * 14541da177e4SLinus Torvalds * If your interrupt is shared you must pass a non NULL dev_id 14551da177e4SLinus Torvalds * as this is required when freeing the interrupt. 14561da177e4SLinus Torvalds * 14571da177e4SLinus Torvalds * Flags: 14581da177e4SLinus Torvalds * 14593cca53b0SThomas Gleixner * IRQF_SHARED Interrupt is shared 14600c5d1eb7SDavid Brownell * IRQF_TRIGGER_* Specify active edge(s) or level 14611da177e4SLinus Torvalds * 14621da177e4SLinus Torvalds */ 14633aa551c9SThomas Gleixner int request_threaded_irq(unsigned int irq, irq_handler_t handler, 14643aa551c9SThomas Gleixner irq_handler_t thread_fn, unsigned long irqflags, 14653aa551c9SThomas Gleixner const char *devname, void *dev_id) 14661da177e4SLinus Torvalds { 14671da177e4SLinus Torvalds struct irqaction *action; 146808678b08SYinghai Lu struct irq_desc *desc; 1469d3c60047SThomas Gleixner int retval; 14701da177e4SLinus Torvalds 1471470c6623SDavid Brownell /* 14721da177e4SLinus Torvalds * Sanity-check: shared interrupts must pass in a real dev-ID, 14731da177e4SLinus Torvalds * otherwise we'll have trouble later trying to figure out 14741da177e4SLinus Torvalds * which interrupt is which (messes up the interrupt freeing 14751da177e4SLinus Torvalds * logic etc). 14761da177e4SLinus Torvalds */ 14773cca53b0SThomas Gleixner if ((irqflags & IRQF_SHARED) && !dev_id) 14781da177e4SLinus Torvalds return -EINVAL; 14797d94f7caSYinghai Lu 1480cb5bc832SYinghai Lu desc = irq_to_desc(irq); 14817d94f7caSYinghai Lu if (!desc) 14821da177e4SLinus Torvalds return -EINVAL; 14837d94f7caSYinghai Lu 148431d9d9b6SMarc Zyngier if (!irq_settings_can_request(desc) || 148531d9d9b6SMarc Zyngier WARN_ON(irq_settings_is_per_cpu_devid(desc))) 14866550c775SThomas Gleixner return -EINVAL; 1487b25c340cSThomas Gleixner 1488b25c340cSThomas Gleixner if (!handler) { 1489b25c340cSThomas Gleixner if (!thread_fn) 14901da177e4SLinus Torvalds return -EINVAL; 1491b25c340cSThomas Gleixner handler = irq_default_primary_handler; 1492b25c340cSThomas Gleixner } 14931da177e4SLinus Torvalds 149445535732SThomas Gleixner action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 14951da177e4SLinus Torvalds if (!action) 14961da177e4SLinus Torvalds return -ENOMEM; 14971da177e4SLinus Torvalds 14981da177e4SLinus Torvalds action->handler = handler; 14993aa551c9SThomas Gleixner action->thread_fn = thread_fn; 15001da177e4SLinus Torvalds action->flags = irqflags; 15011da177e4SLinus Torvalds action->name = devname; 15021da177e4SLinus Torvalds action->dev_id = dev_id; 15031da177e4SLinus Torvalds 15043876ec9eSThomas Gleixner chip_bus_lock(desc); 1505d3c60047SThomas Gleixner retval = __setup_irq(irq, desc, action); 15063876ec9eSThomas Gleixner chip_bus_sync_unlock(desc); 150770aedd24SThomas Gleixner 1508377bf1e4SAnton Vorontsov if (retval) 1509377bf1e4SAnton Vorontsov kfree(action); 1510377bf1e4SAnton Vorontsov 15116d83f94dSThomas Gleixner #ifdef CONFIG_DEBUG_SHIRQ_FIXME 15126ce51c43SLuis Henriques if (!retval && (irqflags & IRQF_SHARED)) { 1513a304e1b8SDavid Woodhouse /* 1514a304e1b8SDavid Woodhouse * It's a shared IRQ -- the driver ought to be prepared for it 1515a304e1b8SDavid Woodhouse * to happen immediately, so let's make sure.... 1516377bf1e4SAnton Vorontsov * We disable the irq to make sure that a 'real' IRQ doesn't 1517377bf1e4SAnton Vorontsov * run in parallel with our fake. 1518a304e1b8SDavid Woodhouse */ 1519a304e1b8SDavid Woodhouse unsigned long flags; 1520a304e1b8SDavid Woodhouse 1521377bf1e4SAnton Vorontsov disable_irq(irq); 1522a304e1b8SDavid Woodhouse local_irq_save(flags); 1523377bf1e4SAnton Vorontsov 1524a304e1b8SDavid Woodhouse handler(irq, dev_id); 1525377bf1e4SAnton Vorontsov 1526a304e1b8SDavid Woodhouse local_irq_restore(flags); 1527377bf1e4SAnton Vorontsov enable_irq(irq); 1528a304e1b8SDavid Woodhouse } 1529a304e1b8SDavid Woodhouse #endif 15301da177e4SLinus Torvalds return retval; 15311da177e4SLinus Torvalds } 15323aa551c9SThomas Gleixner EXPORT_SYMBOL(request_threaded_irq); 1533ae731f8dSMarc Zyngier 1534ae731f8dSMarc Zyngier /** 1535ae731f8dSMarc Zyngier * request_any_context_irq - allocate an interrupt line 1536ae731f8dSMarc Zyngier * @irq: Interrupt line to allocate 1537ae731f8dSMarc Zyngier * @handler: Function to be called when the IRQ occurs. 1538ae731f8dSMarc Zyngier * Threaded handler for threaded interrupts. 1539ae731f8dSMarc Zyngier * @flags: Interrupt type flags 1540ae731f8dSMarc Zyngier * @name: An ascii name for the claiming device 1541ae731f8dSMarc Zyngier * @dev_id: A cookie passed back to the handler function 1542ae731f8dSMarc Zyngier * 1543ae731f8dSMarc Zyngier * This call allocates interrupt resources and enables the 1544ae731f8dSMarc Zyngier * interrupt line and IRQ handling. It selects either a 1545ae731f8dSMarc Zyngier * hardirq or threaded handling method depending on the 1546ae731f8dSMarc Zyngier * context. 1547ae731f8dSMarc Zyngier * 1548ae731f8dSMarc Zyngier * On failure, it returns a negative value. On success, 1549ae731f8dSMarc Zyngier * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. 1550ae731f8dSMarc Zyngier */ 1551ae731f8dSMarc Zyngier int request_any_context_irq(unsigned int irq, irq_handler_t handler, 1552ae731f8dSMarc Zyngier unsigned long flags, const char *name, void *dev_id) 1553ae731f8dSMarc Zyngier { 1554ae731f8dSMarc Zyngier struct irq_desc *desc = irq_to_desc(irq); 1555ae731f8dSMarc Zyngier int ret; 1556ae731f8dSMarc Zyngier 1557ae731f8dSMarc Zyngier if (!desc) 1558ae731f8dSMarc Zyngier return -EINVAL; 1559ae731f8dSMarc Zyngier 15601ccb4e61SThomas Gleixner if (irq_settings_is_nested_thread(desc)) { 1561ae731f8dSMarc Zyngier ret = request_threaded_irq(irq, NULL, handler, 1562ae731f8dSMarc Zyngier flags, name, dev_id); 1563ae731f8dSMarc Zyngier return !ret ? IRQC_IS_NESTED : ret; 1564ae731f8dSMarc Zyngier } 1565ae731f8dSMarc Zyngier 1566ae731f8dSMarc Zyngier ret = request_irq(irq, handler, flags, name, dev_id); 1567ae731f8dSMarc Zyngier return !ret ? IRQC_IS_HARDIRQ : ret; 1568ae731f8dSMarc Zyngier } 1569ae731f8dSMarc Zyngier EXPORT_SYMBOL_GPL(request_any_context_irq); 157031d9d9b6SMarc Zyngier 15711e7c5fd2SMarc Zyngier void enable_percpu_irq(unsigned int irq, unsigned int type) 157231d9d9b6SMarc Zyngier { 157331d9d9b6SMarc Zyngier unsigned int cpu = smp_processor_id(); 157431d9d9b6SMarc Zyngier unsigned long flags; 157531d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 157631d9d9b6SMarc Zyngier 157731d9d9b6SMarc Zyngier if (!desc) 157831d9d9b6SMarc Zyngier return; 157931d9d9b6SMarc Zyngier 15801e7c5fd2SMarc Zyngier type &= IRQ_TYPE_SENSE_MASK; 15811e7c5fd2SMarc Zyngier if (type != IRQ_TYPE_NONE) { 15821e7c5fd2SMarc Zyngier int ret; 15831e7c5fd2SMarc Zyngier 15841e7c5fd2SMarc Zyngier ret = __irq_set_trigger(desc, irq, type); 15851e7c5fd2SMarc Zyngier 15861e7c5fd2SMarc Zyngier if (ret) { 158732cffddeSThomas Gleixner WARN(1, "failed to set type for IRQ%d\n", irq); 15881e7c5fd2SMarc Zyngier goto out; 15891e7c5fd2SMarc Zyngier } 15901e7c5fd2SMarc Zyngier } 15911e7c5fd2SMarc Zyngier 159231d9d9b6SMarc Zyngier irq_percpu_enable(desc, cpu); 15931e7c5fd2SMarc Zyngier out: 159431d9d9b6SMarc Zyngier irq_put_desc_unlock(desc, flags); 159531d9d9b6SMarc Zyngier } 159636a5df85SChris Metcalf EXPORT_SYMBOL_GPL(enable_percpu_irq); 159731d9d9b6SMarc Zyngier 159831d9d9b6SMarc Zyngier void disable_percpu_irq(unsigned int irq) 159931d9d9b6SMarc Zyngier { 160031d9d9b6SMarc Zyngier unsigned int cpu = smp_processor_id(); 160131d9d9b6SMarc Zyngier unsigned long flags; 160231d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 160331d9d9b6SMarc Zyngier 160431d9d9b6SMarc Zyngier if (!desc) 160531d9d9b6SMarc Zyngier return; 160631d9d9b6SMarc Zyngier 160731d9d9b6SMarc Zyngier irq_percpu_disable(desc, cpu); 160831d9d9b6SMarc Zyngier irq_put_desc_unlock(desc, flags); 160931d9d9b6SMarc Zyngier } 161036a5df85SChris Metcalf EXPORT_SYMBOL_GPL(disable_percpu_irq); 161131d9d9b6SMarc Zyngier 161231d9d9b6SMarc Zyngier /* 161331d9d9b6SMarc Zyngier * Internal function to unregister a percpu irqaction. 161431d9d9b6SMarc Zyngier */ 161531d9d9b6SMarc Zyngier static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) 161631d9d9b6SMarc Zyngier { 161731d9d9b6SMarc Zyngier struct irq_desc *desc = irq_to_desc(irq); 161831d9d9b6SMarc Zyngier struct irqaction *action; 161931d9d9b6SMarc Zyngier unsigned long flags; 162031d9d9b6SMarc Zyngier 162131d9d9b6SMarc Zyngier WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 162231d9d9b6SMarc Zyngier 162331d9d9b6SMarc Zyngier if (!desc) 162431d9d9b6SMarc Zyngier return NULL; 162531d9d9b6SMarc Zyngier 162631d9d9b6SMarc Zyngier raw_spin_lock_irqsave(&desc->lock, flags); 162731d9d9b6SMarc Zyngier 162831d9d9b6SMarc Zyngier action = desc->action; 162931d9d9b6SMarc Zyngier if (!action || action->percpu_dev_id != dev_id) { 163031d9d9b6SMarc Zyngier WARN(1, "Trying to free already-free IRQ %d\n", irq); 163131d9d9b6SMarc Zyngier goto bad; 163231d9d9b6SMarc Zyngier } 163331d9d9b6SMarc Zyngier 163431d9d9b6SMarc Zyngier if (!cpumask_empty(desc->percpu_enabled)) { 163531d9d9b6SMarc Zyngier WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", 163631d9d9b6SMarc Zyngier irq, cpumask_first(desc->percpu_enabled)); 163731d9d9b6SMarc Zyngier goto bad; 163831d9d9b6SMarc Zyngier } 163931d9d9b6SMarc Zyngier 164031d9d9b6SMarc Zyngier /* Found it - now remove it from the list of entries: */ 164131d9d9b6SMarc Zyngier desc->action = NULL; 164231d9d9b6SMarc Zyngier 164331d9d9b6SMarc Zyngier raw_spin_unlock_irqrestore(&desc->lock, flags); 164431d9d9b6SMarc Zyngier 164531d9d9b6SMarc Zyngier unregister_handler_proc(irq, action); 164631d9d9b6SMarc Zyngier 164731d9d9b6SMarc Zyngier module_put(desc->owner); 164831d9d9b6SMarc Zyngier return action; 164931d9d9b6SMarc Zyngier 165031d9d9b6SMarc Zyngier bad: 165131d9d9b6SMarc Zyngier raw_spin_unlock_irqrestore(&desc->lock, flags); 165231d9d9b6SMarc Zyngier return NULL; 165331d9d9b6SMarc Zyngier } 165431d9d9b6SMarc Zyngier 165531d9d9b6SMarc Zyngier /** 165631d9d9b6SMarc Zyngier * remove_percpu_irq - free a per-cpu interrupt 165731d9d9b6SMarc Zyngier * @irq: Interrupt line to free 165831d9d9b6SMarc Zyngier * @act: irqaction for the interrupt 165931d9d9b6SMarc Zyngier * 166031d9d9b6SMarc Zyngier * Used to remove interrupts statically setup by the early boot process. 166131d9d9b6SMarc Zyngier */ 166231d9d9b6SMarc Zyngier void remove_percpu_irq(unsigned int irq, struct irqaction *act) 166331d9d9b6SMarc Zyngier { 166431d9d9b6SMarc Zyngier struct irq_desc *desc = irq_to_desc(irq); 166531d9d9b6SMarc Zyngier 166631d9d9b6SMarc Zyngier if (desc && irq_settings_is_per_cpu_devid(desc)) 166731d9d9b6SMarc Zyngier __free_percpu_irq(irq, act->percpu_dev_id); 166831d9d9b6SMarc Zyngier } 166931d9d9b6SMarc Zyngier 167031d9d9b6SMarc Zyngier /** 167131d9d9b6SMarc Zyngier * free_percpu_irq - free an interrupt allocated with request_percpu_irq 167231d9d9b6SMarc Zyngier * @irq: Interrupt line to free 167331d9d9b6SMarc Zyngier * @dev_id: Device identity to free 167431d9d9b6SMarc Zyngier * 167531d9d9b6SMarc Zyngier * Remove a percpu interrupt handler. The handler is removed, but 167631d9d9b6SMarc Zyngier * the interrupt line is not disabled. This must be done on each 167731d9d9b6SMarc Zyngier * CPU before calling this function. The function does not return 167831d9d9b6SMarc Zyngier * until any executing interrupts for this IRQ have completed. 167931d9d9b6SMarc Zyngier * 168031d9d9b6SMarc Zyngier * This function must not be called from interrupt context. 168131d9d9b6SMarc Zyngier */ 168231d9d9b6SMarc Zyngier void free_percpu_irq(unsigned int irq, void __percpu *dev_id) 168331d9d9b6SMarc Zyngier { 168431d9d9b6SMarc Zyngier struct irq_desc *desc = irq_to_desc(irq); 168531d9d9b6SMarc Zyngier 168631d9d9b6SMarc Zyngier if (!desc || !irq_settings_is_per_cpu_devid(desc)) 168731d9d9b6SMarc Zyngier return; 168831d9d9b6SMarc Zyngier 168931d9d9b6SMarc Zyngier chip_bus_lock(desc); 169031d9d9b6SMarc Zyngier kfree(__free_percpu_irq(irq, dev_id)); 169131d9d9b6SMarc Zyngier chip_bus_sync_unlock(desc); 169231d9d9b6SMarc Zyngier } 169331d9d9b6SMarc Zyngier 169431d9d9b6SMarc Zyngier /** 169531d9d9b6SMarc Zyngier * setup_percpu_irq - setup a per-cpu interrupt 169631d9d9b6SMarc Zyngier * @irq: Interrupt line to setup 169731d9d9b6SMarc Zyngier * @act: irqaction for the interrupt 169831d9d9b6SMarc Zyngier * 169931d9d9b6SMarc Zyngier * Used to statically setup per-cpu interrupts in the early boot process. 170031d9d9b6SMarc Zyngier */ 170131d9d9b6SMarc Zyngier int setup_percpu_irq(unsigned int irq, struct irqaction *act) 170231d9d9b6SMarc Zyngier { 170331d9d9b6SMarc Zyngier struct irq_desc *desc = irq_to_desc(irq); 170431d9d9b6SMarc Zyngier int retval; 170531d9d9b6SMarc Zyngier 170631d9d9b6SMarc Zyngier if (!desc || !irq_settings_is_per_cpu_devid(desc)) 170731d9d9b6SMarc Zyngier return -EINVAL; 170831d9d9b6SMarc Zyngier chip_bus_lock(desc); 170931d9d9b6SMarc Zyngier retval = __setup_irq(irq, desc, act); 171031d9d9b6SMarc Zyngier chip_bus_sync_unlock(desc); 171131d9d9b6SMarc Zyngier 171231d9d9b6SMarc Zyngier return retval; 171331d9d9b6SMarc Zyngier } 171431d9d9b6SMarc Zyngier 171531d9d9b6SMarc Zyngier /** 171631d9d9b6SMarc Zyngier * request_percpu_irq - allocate a percpu interrupt line 171731d9d9b6SMarc Zyngier * @irq: Interrupt line to allocate 171831d9d9b6SMarc Zyngier * @handler: Function to be called when the IRQ occurs. 171931d9d9b6SMarc Zyngier * @devname: An ascii name for the claiming device 172031d9d9b6SMarc Zyngier * @dev_id: A percpu cookie passed back to the handler function 172131d9d9b6SMarc Zyngier * 172231d9d9b6SMarc Zyngier * This call allocates interrupt resources, but doesn't 172331d9d9b6SMarc Zyngier * automatically enable the interrupt. It has to be done on each 172431d9d9b6SMarc Zyngier * CPU using enable_percpu_irq(). 172531d9d9b6SMarc Zyngier * 172631d9d9b6SMarc Zyngier * Dev_id must be globally unique. It is a per-cpu variable, and 172731d9d9b6SMarc Zyngier * the handler gets called with the interrupted CPU's instance of 172831d9d9b6SMarc Zyngier * that variable. 172931d9d9b6SMarc Zyngier */ 173031d9d9b6SMarc Zyngier int request_percpu_irq(unsigned int irq, irq_handler_t handler, 173131d9d9b6SMarc Zyngier const char *devname, void __percpu *dev_id) 173231d9d9b6SMarc Zyngier { 173331d9d9b6SMarc Zyngier struct irqaction *action; 173431d9d9b6SMarc Zyngier struct irq_desc *desc; 173531d9d9b6SMarc Zyngier int retval; 173631d9d9b6SMarc Zyngier 173731d9d9b6SMarc Zyngier if (!dev_id) 173831d9d9b6SMarc Zyngier return -EINVAL; 173931d9d9b6SMarc Zyngier 174031d9d9b6SMarc Zyngier desc = irq_to_desc(irq); 174131d9d9b6SMarc Zyngier if (!desc || !irq_settings_can_request(desc) || 174231d9d9b6SMarc Zyngier !irq_settings_is_per_cpu_devid(desc)) 174331d9d9b6SMarc Zyngier return -EINVAL; 174431d9d9b6SMarc Zyngier 174531d9d9b6SMarc Zyngier action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 174631d9d9b6SMarc Zyngier if (!action) 174731d9d9b6SMarc Zyngier return -ENOMEM; 174831d9d9b6SMarc Zyngier 174931d9d9b6SMarc Zyngier action->handler = handler; 17502ed0e645SMarc Zyngier action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND; 175131d9d9b6SMarc Zyngier action->name = devname; 175231d9d9b6SMarc Zyngier action->percpu_dev_id = dev_id; 175331d9d9b6SMarc Zyngier 175431d9d9b6SMarc Zyngier chip_bus_lock(desc); 175531d9d9b6SMarc Zyngier retval = __setup_irq(irq, desc, action); 175631d9d9b6SMarc Zyngier chip_bus_sync_unlock(desc); 175731d9d9b6SMarc Zyngier 175831d9d9b6SMarc Zyngier if (retval) 175931d9d9b6SMarc Zyngier kfree(action); 176031d9d9b6SMarc Zyngier 176131d9d9b6SMarc Zyngier return retval; 176231d9d9b6SMarc Zyngier } 1763