152a65ff5SThomas Gleixner // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 3a34db9b2SIngo Molnar * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 4a34db9b2SIngo Molnar * Copyright (C) 2005-2006 Thomas Gleixner 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * This file contains driver APIs to the irq subsystem. 71da177e4SLinus Torvalds */ 81da177e4SLinus Torvalds 997fd75b7SAndrew Morton #define pr_fmt(fmt) "genirq: " fmt 1097fd75b7SAndrew Morton 111da177e4SLinus Torvalds #include <linux/irq.h> 123aa551c9SThomas Gleixner #include <linux/kthread.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/random.h> 151da177e4SLinus Torvalds #include <linux/interrupt.h> 164001d8e8SThomas Gleixner #include <linux/irqdomain.h> 171aeb272cSRobert P. J. Day #include <linux/slab.h> 183aa551c9SThomas Gleixner #include <linux/sched.h> 198bd75c77SClark Williams #include <linux/sched/rt.h> 200881e7bdSIngo Molnar #include <linux/sched/task.h> 2111ea68f5SMing Lei #include <linux/sched/isolation.h> 22ae7e81c0SIngo Molnar #include <uapi/linux/sched/types.h> 234d1d61a6SOleg Nesterov #include <linux/task_work.h> 241da177e4SLinus Torvalds 251da177e4SLinus Torvalds #include "internals.h" 261da177e4SLinus Torvalds 27b6a32bbdSThomas Gleixner #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT) 288d32a307SThomas Gleixner __read_mostly bool force_irqthreads; 2947b82e88SSebastian Andrzej Siewior EXPORT_SYMBOL_GPL(force_irqthreads); 308d32a307SThomas Gleixner 318d32a307SThomas Gleixner static int __init setup_forced_irqthreads(char *arg) 328d32a307SThomas Gleixner { 338d32a307SThomas Gleixner force_irqthreads = true; 348d32a307SThomas Gleixner return 0; 358d32a307SThomas Gleixner } 368d32a307SThomas Gleixner early_param("threadirqs", setup_forced_irqthreads); 378d32a307SThomas Gleixner #endif 388d32a307SThomas Gleixner 3962e04686SThomas Gleixner static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) 401da177e4SLinus Torvalds { 4162e04686SThomas Gleixner struct irq_data *irqd = irq_desc_get_irq_data(desc); 4232f4125eSThomas Gleixner bool inprogress; 431da177e4SLinus Torvalds 44a98ce5c6SHerbert Xu do { 45a98ce5c6SHerbert Xu unsigned long flags; 46a98ce5c6SHerbert Xu 47a98ce5c6SHerbert Xu /* 48a98ce5c6SHerbert Xu * Wait until we're out of the critical section. This might 49a98ce5c6SHerbert Xu * give the wrong answer due to the lack of memory barriers. 50a98ce5c6SHerbert Xu */ 5132f4125eSThomas Gleixner while (irqd_irq_inprogress(&desc->irq_data)) 521da177e4SLinus Torvalds cpu_relax(); 53a98ce5c6SHerbert Xu 54a98ce5c6SHerbert Xu /* Ok, that indicated we're done: double-check carefully. */ 55239007b8SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 5632f4125eSThomas Gleixner inprogress = irqd_irq_inprogress(&desc->irq_data); 5762e04686SThomas Gleixner 5862e04686SThomas Gleixner /* 5962e04686SThomas Gleixner * If requested and supported, check at the chip whether it 6062e04686SThomas Gleixner * is in flight at the hardware level, i.e. already pending 6162e04686SThomas Gleixner * in a CPU and waiting for service and acknowledge. 6262e04686SThomas Gleixner */ 6362e04686SThomas Gleixner if (!inprogress && sync_chip) { 6462e04686SThomas Gleixner /* 6562e04686SThomas Gleixner * Ignore the return code. inprogress is only updated 6662e04686SThomas Gleixner * when the chip supports it. 6762e04686SThomas Gleixner */ 6862e04686SThomas Gleixner __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE, 6962e04686SThomas Gleixner &inprogress); 7062e04686SThomas Gleixner } 71239007b8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 72a98ce5c6SHerbert Xu 73a98ce5c6SHerbert Xu /* Oops, that failed? */ 7432f4125eSThomas Gleixner } while (inprogress); 7518258f72SThomas Gleixner } 763aa551c9SThomas Gleixner 7718258f72SThomas Gleixner /** 7818258f72SThomas Gleixner * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) 7918258f72SThomas Gleixner * @irq: interrupt number to wait for 8018258f72SThomas Gleixner * 8118258f72SThomas Gleixner * This function waits for any pending hard IRQ handlers for this 8218258f72SThomas Gleixner * interrupt to complete before returning. If you use this 8318258f72SThomas Gleixner * function while holding a resource the IRQ handler may need you 8418258f72SThomas Gleixner * will deadlock. It does not take associated threaded handlers 8518258f72SThomas Gleixner * into account. 8618258f72SThomas Gleixner * 8718258f72SThomas Gleixner * Do not use this for shutdown scenarios where you must be sure 8818258f72SThomas Gleixner * that all parts (hardirq and threaded handler) have completed. 8918258f72SThomas Gleixner * 9002cea395SPeter Zijlstra * Returns: false if a threaded handler is active. 9102cea395SPeter Zijlstra * 9218258f72SThomas Gleixner * This function may be called - with care - from IRQ context. 9362e04686SThomas Gleixner * 9462e04686SThomas Gleixner * It does not check whether there is an interrupt in flight at the 9562e04686SThomas Gleixner * hardware level, but not serviced yet, as this might deadlock when 9662e04686SThomas Gleixner * called with interrupts disabled and the target CPU of the interrupt 9762e04686SThomas Gleixner * is the current CPU. 983aa551c9SThomas Gleixner */ 9902cea395SPeter Zijlstra bool synchronize_hardirq(unsigned int irq) 10018258f72SThomas Gleixner { 10118258f72SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 10218258f72SThomas Gleixner 10302cea395SPeter Zijlstra if (desc) { 10462e04686SThomas Gleixner __synchronize_hardirq(desc, false); 10502cea395SPeter Zijlstra return !atomic_read(&desc->threads_active); 10602cea395SPeter Zijlstra } 10702cea395SPeter Zijlstra 10802cea395SPeter Zijlstra return true; 10918258f72SThomas Gleixner } 11018258f72SThomas Gleixner EXPORT_SYMBOL(synchronize_hardirq); 11118258f72SThomas Gleixner 11218258f72SThomas Gleixner /** 11318258f72SThomas Gleixner * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 11418258f72SThomas Gleixner * @irq: interrupt number to wait for 11518258f72SThomas Gleixner * 11618258f72SThomas Gleixner * This function waits for any pending IRQ handlers for this interrupt 11718258f72SThomas Gleixner * to complete before returning. If you use this function while 11818258f72SThomas Gleixner * holding a resource the IRQ handler may need you will deadlock. 11918258f72SThomas Gleixner * 1201d21f2afSThomas Gleixner * Can only be called from preemptible code as it might sleep when 1211d21f2afSThomas Gleixner * an interrupt thread is associated to @irq. 12262e04686SThomas Gleixner * 12362e04686SThomas Gleixner * It optionally makes sure (when the irq chip supports that method) 12462e04686SThomas Gleixner * that the interrupt is not pending in any CPU and waiting for 12562e04686SThomas Gleixner * service. 12618258f72SThomas Gleixner */ 12718258f72SThomas Gleixner void synchronize_irq(unsigned int irq) 12818258f72SThomas Gleixner { 12918258f72SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 13018258f72SThomas Gleixner 13118258f72SThomas Gleixner if (desc) { 13262e04686SThomas Gleixner __synchronize_hardirq(desc, true); 13318258f72SThomas Gleixner /* 13418258f72SThomas Gleixner * We made sure that no hardirq handler is 13518258f72SThomas Gleixner * running. Now verify that no threaded handlers are 13618258f72SThomas Gleixner * active. 13718258f72SThomas Gleixner */ 13818258f72SThomas Gleixner wait_event(desc->wait_for_threads, 13918258f72SThomas Gleixner !atomic_read(&desc->threads_active)); 14018258f72SThomas Gleixner } 1411da177e4SLinus Torvalds } 1421da177e4SLinus Torvalds EXPORT_SYMBOL(synchronize_irq); 1431da177e4SLinus Torvalds 1443aa551c9SThomas Gleixner #ifdef CONFIG_SMP 1453aa551c9SThomas Gleixner cpumask_var_t irq_default_affinity; 1463aa551c9SThomas Gleixner 1479c255583SThomas Gleixner static bool __irq_can_set_affinity(struct irq_desc *desc) 148e019c249SJiang Liu { 149e019c249SJiang Liu if (!desc || !irqd_can_balance(&desc->irq_data) || 150e019c249SJiang Liu !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) 1519c255583SThomas Gleixner return false; 1529c255583SThomas Gleixner return true; 153e019c249SJiang Liu } 154e019c249SJiang Liu 155771ee3b0SThomas Gleixner /** 156771ee3b0SThomas Gleixner * irq_can_set_affinity - Check if the affinity of a given irq can be set 157771ee3b0SThomas Gleixner * @irq: Interrupt to check 158771ee3b0SThomas Gleixner * 159771ee3b0SThomas Gleixner */ 160771ee3b0SThomas Gleixner int irq_can_set_affinity(unsigned int irq) 161771ee3b0SThomas Gleixner { 162e019c249SJiang Liu return __irq_can_set_affinity(irq_to_desc(irq)); 163771ee3b0SThomas Gleixner } 164771ee3b0SThomas Gleixner 165591d2fb0SThomas Gleixner /** 1669c255583SThomas Gleixner * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space 1679c255583SThomas Gleixner * @irq: Interrupt to check 1689c255583SThomas Gleixner * 1699c255583SThomas Gleixner * Like irq_can_set_affinity() above, but additionally checks for the 1709c255583SThomas Gleixner * AFFINITY_MANAGED flag. 1719c255583SThomas Gleixner */ 1729c255583SThomas Gleixner bool irq_can_set_affinity_usr(unsigned int irq) 1739c255583SThomas Gleixner { 1749c255583SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 1759c255583SThomas Gleixner 1769c255583SThomas Gleixner return __irq_can_set_affinity(desc) && 1779c255583SThomas Gleixner !irqd_affinity_is_managed(&desc->irq_data); 1789c255583SThomas Gleixner } 1799c255583SThomas Gleixner 1809c255583SThomas Gleixner /** 181591d2fb0SThomas Gleixner * irq_set_thread_affinity - Notify irq threads to adjust affinity 182591d2fb0SThomas Gleixner * @desc: irq descriptor which has affitnity changed 183591d2fb0SThomas Gleixner * 184591d2fb0SThomas Gleixner * We just set IRQTF_AFFINITY and delegate the affinity setting 185591d2fb0SThomas Gleixner * to the interrupt thread itself. We can not call 186591d2fb0SThomas Gleixner * set_cpus_allowed_ptr() here as we hold desc->lock and this 187591d2fb0SThomas Gleixner * code can be called from hard interrupt context. 188591d2fb0SThomas Gleixner */ 189591d2fb0SThomas Gleixner void irq_set_thread_affinity(struct irq_desc *desc) 1903aa551c9SThomas Gleixner { 191f944b5a7SDaniel Lezcano struct irqaction *action; 1923aa551c9SThomas Gleixner 193f944b5a7SDaniel Lezcano for_each_action_of_desc(desc, action) 1943aa551c9SThomas Gleixner if (action->thread) 195591d2fb0SThomas Gleixner set_bit(IRQTF_AFFINITY, &action->thread_flags); 1963aa551c9SThomas Gleixner } 1973aa551c9SThomas Gleixner 19819e1d4e9SThomas Gleixner static void irq_validate_effective_affinity(struct irq_data *data) 19919e1d4e9SThomas Gleixner { 20019e1d4e9SThomas Gleixner #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 20119e1d4e9SThomas Gleixner const struct cpumask *m = irq_data_get_effective_affinity_mask(data); 20219e1d4e9SThomas Gleixner struct irq_chip *chip = irq_data_get_irq_chip(data); 20319e1d4e9SThomas Gleixner 20419e1d4e9SThomas Gleixner if (!cpumask_empty(m)) 20519e1d4e9SThomas Gleixner return; 20619e1d4e9SThomas Gleixner pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n", 20719e1d4e9SThomas Gleixner chip->name, data->irq); 20819e1d4e9SThomas Gleixner #endif 20919e1d4e9SThomas Gleixner } 21019e1d4e9SThomas Gleixner 211818b0f3bSJiang Liu int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, 212818b0f3bSJiang Liu bool force) 213818b0f3bSJiang Liu { 214818b0f3bSJiang Liu struct irq_desc *desc = irq_data_to_desc(data); 215818b0f3bSJiang Liu struct irq_chip *chip = irq_data_get_irq_chip(data); 216818b0f3bSJiang Liu int ret; 217818b0f3bSJiang Liu 218e43b3b58SThomas Gleixner if (!chip || !chip->irq_set_affinity) 219e43b3b58SThomas Gleixner return -EINVAL; 220e43b3b58SThomas Gleixner 22111ea68f5SMing Lei /* 22211ea68f5SMing Lei * If this is a managed interrupt and housekeeping is enabled on 22311ea68f5SMing Lei * it check whether the requested affinity mask intersects with 22411ea68f5SMing Lei * a housekeeping CPU. If so, then remove the isolated CPUs from 22511ea68f5SMing Lei * the mask and just keep the housekeeping CPU(s). This prevents 22611ea68f5SMing Lei * the affinity setter from routing the interrupt to an isolated 22711ea68f5SMing Lei * CPU to avoid that I/O submitted from a housekeeping CPU causes 22811ea68f5SMing Lei * interrupts on an isolated one. 22911ea68f5SMing Lei * 23011ea68f5SMing Lei * If the masks do not intersect or include online CPU(s) then 23111ea68f5SMing Lei * keep the requested mask. The isolated target CPUs are only 23211ea68f5SMing Lei * receiving interrupts when the I/O operation was submitted 23311ea68f5SMing Lei * directly from them. 23411ea68f5SMing Lei * 23511ea68f5SMing Lei * If all housekeeping CPUs in the affinity mask are offline, the 23611ea68f5SMing Lei * interrupt will be migrated by the CPU hotplug code once a 23711ea68f5SMing Lei * housekeeping CPU which belongs to the affinity mask comes 23811ea68f5SMing Lei * online. 23911ea68f5SMing Lei */ 24011ea68f5SMing Lei if (irqd_affinity_is_managed(data) && 24111ea68f5SMing Lei housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) { 24211ea68f5SMing Lei const struct cpumask *hk_mask, *prog_mask; 24311ea68f5SMing Lei 24411ea68f5SMing Lei static DEFINE_RAW_SPINLOCK(tmp_mask_lock); 24511ea68f5SMing Lei static struct cpumask tmp_mask; 24611ea68f5SMing Lei 24711ea68f5SMing Lei hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ); 24811ea68f5SMing Lei 24911ea68f5SMing Lei raw_spin_lock(&tmp_mask_lock); 25011ea68f5SMing Lei cpumask_and(&tmp_mask, mask, hk_mask); 25111ea68f5SMing Lei if (!cpumask_intersects(&tmp_mask, cpu_online_mask)) 25211ea68f5SMing Lei prog_mask = mask; 25311ea68f5SMing Lei else 25411ea68f5SMing Lei prog_mask = &tmp_mask; 25511ea68f5SMing Lei ret = chip->irq_set_affinity(data, prog_mask, force); 25611ea68f5SMing Lei raw_spin_unlock(&tmp_mask_lock); 25711ea68f5SMing Lei } else { 25801f8fa4fSThomas Gleixner ret = chip->irq_set_affinity(data, mask, force); 25911ea68f5SMing Lei } 260818b0f3bSJiang Liu switch (ret) { 261818b0f3bSJiang Liu case IRQ_SET_MASK_OK: 2622cb62547SJiang Liu case IRQ_SET_MASK_OK_DONE: 2639df872faSJiang Liu cpumask_copy(desc->irq_common_data.affinity, mask); 26493417a3fSGustavo A. R. Silva /* fall through */ 265818b0f3bSJiang Liu case IRQ_SET_MASK_OK_NOCOPY: 26619e1d4e9SThomas Gleixner irq_validate_effective_affinity(data); 267818b0f3bSJiang Liu irq_set_thread_affinity(desc); 268818b0f3bSJiang Liu ret = 0; 269818b0f3bSJiang Liu } 270818b0f3bSJiang Liu 271818b0f3bSJiang Liu return ret; 272818b0f3bSJiang Liu } 273818b0f3bSJiang Liu 27412f47073SThomas Gleixner #ifdef CONFIG_GENERIC_PENDING_IRQ 27512f47073SThomas Gleixner static inline int irq_set_affinity_pending(struct irq_data *data, 27612f47073SThomas Gleixner const struct cpumask *dest) 27712f47073SThomas Gleixner { 27812f47073SThomas Gleixner struct irq_desc *desc = irq_data_to_desc(data); 27912f47073SThomas Gleixner 28012f47073SThomas Gleixner irqd_set_move_pending(data); 28112f47073SThomas Gleixner irq_copy_pending(desc, dest); 28212f47073SThomas Gleixner return 0; 28312f47073SThomas Gleixner } 28412f47073SThomas Gleixner #else 28512f47073SThomas Gleixner static inline int irq_set_affinity_pending(struct irq_data *data, 28612f47073SThomas Gleixner const struct cpumask *dest) 28712f47073SThomas Gleixner { 28812f47073SThomas Gleixner return -EBUSY; 28912f47073SThomas Gleixner } 29012f47073SThomas Gleixner #endif 29112f47073SThomas Gleixner 29212f47073SThomas Gleixner static int irq_try_set_affinity(struct irq_data *data, 29312f47073SThomas Gleixner const struct cpumask *dest, bool force) 29412f47073SThomas Gleixner { 29512f47073SThomas Gleixner int ret = irq_do_set_affinity(data, dest, force); 29612f47073SThomas Gleixner 29712f47073SThomas Gleixner /* 29812f47073SThomas Gleixner * In case that the underlying vector management is busy and the 29912f47073SThomas Gleixner * architecture supports the generic pending mechanism then utilize 30012f47073SThomas Gleixner * this to avoid returning an error to user space. 30112f47073SThomas Gleixner */ 30212f47073SThomas Gleixner if (ret == -EBUSY && !force) 30312f47073SThomas Gleixner ret = irq_set_affinity_pending(data, dest); 30412f47073SThomas Gleixner return ret; 30512f47073SThomas Gleixner } 30612f47073SThomas Gleixner 30701f8fa4fSThomas Gleixner int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, 30801f8fa4fSThomas Gleixner bool force) 309c2d0c555SDavid Daney { 310c2d0c555SDavid Daney struct irq_chip *chip = irq_data_get_irq_chip(data); 311c2d0c555SDavid Daney struct irq_desc *desc = irq_data_to_desc(data); 312c2d0c555SDavid Daney int ret = 0; 313c2d0c555SDavid Daney 314c2d0c555SDavid Daney if (!chip || !chip->irq_set_affinity) 315c2d0c555SDavid Daney return -EINVAL; 316c2d0c555SDavid Daney 31712f47073SThomas Gleixner if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) { 31812f47073SThomas Gleixner ret = irq_try_set_affinity(data, mask, force); 319c2d0c555SDavid Daney } else { 320c2d0c555SDavid Daney irqd_set_move_pending(data); 321c2d0c555SDavid Daney irq_copy_pending(desc, mask); 322c2d0c555SDavid Daney } 323c2d0c555SDavid Daney 324c2d0c555SDavid Daney if (desc->affinity_notify) { 325c2d0c555SDavid Daney kref_get(&desc->affinity_notify->kref); 326c2d0c555SDavid Daney schedule_work(&desc->affinity_notify->work); 327c2d0c555SDavid Daney } 328c2d0c555SDavid Daney irqd_set(data, IRQD_AFFINITY_SET); 329c2d0c555SDavid Daney 330c2d0c555SDavid Daney return ret; 331c2d0c555SDavid Daney } 332c2d0c555SDavid Daney 33301f8fa4fSThomas Gleixner int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) 334771ee3b0SThomas Gleixner { 33508678b08SYinghai Lu struct irq_desc *desc = irq_to_desc(irq); 336f6d87f4bSThomas Gleixner unsigned long flags; 337c2d0c555SDavid Daney int ret; 338771ee3b0SThomas Gleixner 339c2d0c555SDavid Daney if (!desc) 340771ee3b0SThomas Gleixner return -EINVAL; 341771ee3b0SThomas Gleixner 342239007b8SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 34301f8fa4fSThomas Gleixner ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); 344239007b8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 3451fa46f1fSThomas Gleixner return ret; 346771ee3b0SThomas Gleixner } 347771ee3b0SThomas Gleixner 348e7a297b0SPeter P Waskiewicz Jr int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) 349e7a297b0SPeter P Waskiewicz Jr { 350e7a297b0SPeter P Waskiewicz Jr unsigned long flags; 35131d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 352e7a297b0SPeter P Waskiewicz Jr 353e7a297b0SPeter P Waskiewicz Jr if (!desc) 354e7a297b0SPeter P Waskiewicz Jr return -EINVAL; 355e7a297b0SPeter P Waskiewicz Jr desc->affinity_hint = m; 35602725e74SThomas Gleixner irq_put_desc_unlock(desc, flags); 357e2e64a93SJesse Brandeburg /* set the initial affinity to prevent every interrupt being on CPU0 */ 3584fe7ffb7SJesse Brandeburg if (m) 359e2e64a93SJesse Brandeburg __irq_set_affinity(irq, m, false); 360e7a297b0SPeter P Waskiewicz Jr return 0; 361e7a297b0SPeter P Waskiewicz Jr } 362e7a297b0SPeter P Waskiewicz Jr EXPORT_SYMBOL_GPL(irq_set_affinity_hint); 363e7a297b0SPeter P Waskiewicz Jr 364cd7eab44SBen Hutchings static void irq_affinity_notify(struct work_struct *work) 365cd7eab44SBen Hutchings { 366cd7eab44SBen Hutchings struct irq_affinity_notify *notify = 367cd7eab44SBen Hutchings container_of(work, struct irq_affinity_notify, work); 368cd7eab44SBen Hutchings struct irq_desc *desc = irq_to_desc(notify->irq); 369cd7eab44SBen Hutchings cpumask_var_t cpumask; 370cd7eab44SBen Hutchings unsigned long flags; 371cd7eab44SBen Hutchings 3721fa46f1fSThomas Gleixner if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) 373cd7eab44SBen Hutchings goto out; 374cd7eab44SBen Hutchings 375cd7eab44SBen Hutchings raw_spin_lock_irqsave(&desc->lock, flags); 3760ef5ca1eSThomas Gleixner if (irq_move_pending(&desc->irq_data)) 3771fa46f1fSThomas Gleixner irq_get_pending(cpumask, desc); 378cd7eab44SBen Hutchings else 3799df872faSJiang Liu cpumask_copy(cpumask, desc->irq_common_data.affinity); 380cd7eab44SBen Hutchings raw_spin_unlock_irqrestore(&desc->lock, flags); 381cd7eab44SBen Hutchings 382cd7eab44SBen Hutchings notify->notify(notify, cpumask); 383cd7eab44SBen Hutchings 384cd7eab44SBen Hutchings free_cpumask_var(cpumask); 385cd7eab44SBen Hutchings out: 386cd7eab44SBen Hutchings kref_put(¬ify->kref, notify->release); 387cd7eab44SBen Hutchings } 388cd7eab44SBen Hutchings 389cd7eab44SBen Hutchings /** 390cd7eab44SBen Hutchings * irq_set_affinity_notifier - control notification of IRQ affinity changes 391cd7eab44SBen Hutchings * @irq: Interrupt for which to enable/disable notification 392cd7eab44SBen Hutchings * @notify: Context for notification, or %NULL to disable 393cd7eab44SBen Hutchings * notification. Function pointers must be initialised; 394cd7eab44SBen Hutchings * the other fields will be initialised by this function. 395cd7eab44SBen Hutchings * 396cd7eab44SBen Hutchings * Must be called in process context. Notification may only be enabled 397cd7eab44SBen Hutchings * after the IRQ is allocated and must be disabled before the IRQ is 398cd7eab44SBen Hutchings * freed using free_irq(). 399cd7eab44SBen Hutchings */ 400cd7eab44SBen Hutchings int 401cd7eab44SBen Hutchings irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) 402cd7eab44SBen Hutchings { 403cd7eab44SBen Hutchings struct irq_desc *desc = irq_to_desc(irq); 404cd7eab44SBen Hutchings struct irq_affinity_notify *old_notify; 405cd7eab44SBen Hutchings unsigned long flags; 406cd7eab44SBen Hutchings 407cd7eab44SBen Hutchings /* The release function is promised process context */ 408cd7eab44SBen Hutchings might_sleep(); 409cd7eab44SBen Hutchings 410b525903cSJulien Thierry if (!desc || desc->istate & IRQS_NMI) 411cd7eab44SBen Hutchings return -EINVAL; 412cd7eab44SBen Hutchings 413cd7eab44SBen Hutchings /* Complete initialisation of *notify */ 414cd7eab44SBen Hutchings if (notify) { 415cd7eab44SBen Hutchings notify->irq = irq; 416cd7eab44SBen Hutchings kref_init(¬ify->kref); 417cd7eab44SBen Hutchings INIT_WORK(¬ify->work, irq_affinity_notify); 418cd7eab44SBen Hutchings } 419cd7eab44SBen Hutchings 420cd7eab44SBen Hutchings raw_spin_lock_irqsave(&desc->lock, flags); 421cd7eab44SBen Hutchings old_notify = desc->affinity_notify; 422cd7eab44SBen Hutchings desc->affinity_notify = notify; 423cd7eab44SBen Hutchings raw_spin_unlock_irqrestore(&desc->lock, flags); 424cd7eab44SBen Hutchings 42559c39840SPrasad Sodagudi if (old_notify) { 42659c39840SPrasad Sodagudi cancel_work_sync(&old_notify->work); 427cd7eab44SBen Hutchings kref_put(&old_notify->kref, old_notify->release); 42859c39840SPrasad Sodagudi } 429cd7eab44SBen Hutchings 430cd7eab44SBen Hutchings return 0; 431cd7eab44SBen Hutchings } 432cd7eab44SBen Hutchings EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); 433cd7eab44SBen Hutchings 43418404756SMax Krasnyansky #ifndef CONFIG_AUTO_IRQ_AFFINITY 43518404756SMax Krasnyansky /* 43618404756SMax Krasnyansky * Generic version of the affinity autoselector. 43718404756SMax Krasnyansky */ 43843564bd9SThomas Gleixner int irq_setup_affinity(struct irq_desc *desc) 43918404756SMax Krasnyansky { 440569bda8dSThomas Gleixner struct cpumask *set = irq_default_affinity; 441cba4235eSThomas Gleixner int ret, node = irq_desc_get_node(desc); 442cba4235eSThomas Gleixner static DEFINE_RAW_SPINLOCK(mask_lock); 443cba4235eSThomas Gleixner static struct cpumask mask; 444569bda8dSThomas Gleixner 445b008207cSThomas Gleixner /* Excludes PER_CPU and NO_BALANCE interrupts */ 446e019c249SJiang Liu if (!__irq_can_set_affinity(desc)) 44718404756SMax Krasnyansky return 0; 44818404756SMax Krasnyansky 449cba4235eSThomas Gleixner raw_spin_lock(&mask_lock); 450f6d87f4bSThomas Gleixner /* 4519332ef9dSMasahiro Yamada * Preserve the managed affinity setting and a userspace affinity 45206ee6d57SThomas Gleixner * setup, but make sure that one of the targets is online. 453f6d87f4bSThomas Gleixner */ 45406ee6d57SThomas Gleixner if (irqd_affinity_is_managed(&desc->irq_data) || 45506ee6d57SThomas Gleixner irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { 4569df872faSJiang Liu if (cpumask_intersects(desc->irq_common_data.affinity, 457569bda8dSThomas Gleixner cpu_online_mask)) 4589df872faSJiang Liu set = desc->irq_common_data.affinity; 4590c6f8a8bSThomas Gleixner else 4602bdd1055SThomas Gleixner irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); 4612bdd1055SThomas Gleixner } 46218404756SMax Krasnyansky 463cba4235eSThomas Gleixner cpumask_and(&mask, cpu_online_mask, set); 464bddda606SSrinivas Ramana if (cpumask_empty(&mask)) 465bddda606SSrinivas Ramana cpumask_copy(&mask, cpu_online_mask); 466bddda606SSrinivas Ramana 467241fc640SPrarit Bhargava if (node != NUMA_NO_NODE) { 468241fc640SPrarit Bhargava const struct cpumask *nodemask = cpumask_of_node(node); 469241fc640SPrarit Bhargava 470241fc640SPrarit Bhargava /* make sure at least one of the cpus in nodemask is online */ 471cba4235eSThomas Gleixner if (cpumask_intersects(&mask, nodemask)) 472cba4235eSThomas Gleixner cpumask_and(&mask, &mask, nodemask); 473241fc640SPrarit Bhargava } 474cba4235eSThomas Gleixner ret = irq_do_set_affinity(&desc->irq_data, &mask, false); 475cba4235eSThomas Gleixner raw_spin_unlock(&mask_lock); 476cba4235eSThomas Gleixner return ret; 47718404756SMax Krasnyansky } 478f6d87f4bSThomas Gleixner #else 479a8a98eacSJiang Liu /* Wrapper for ALPHA specific affinity selector magic */ 480cba4235eSThomas Gleixner int irq_setup_affinity(struct irq_desc *desc) 481f6d87f4bSThomas Gleixner { 482cba4235eSThomas Gleixner return irq_select_affinity(irq_desc_get_irq(desc)); 483f6d87f4bSThomas Gleixner } 48418404756SMax Krasnyansky #endif 48518404756SMax Krasnyansky 486f6d87f4bSThomas Gleixner /* 487cba4235eSThomas Gleixner * Called when a bogus affinity is set via /proc/irq 488f6d87f4bSThomas Gleixner */ 489cba4235eSThomas Gleixner int irq_select_affinity_usr(unsigned int irq) 490f6d87f4bSThomas Gleixner { 491f6d87f4bSThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 492f6d87f4bSThomas Gleixner unsigned long flags; 493f6d87f4bSThomas Gleixner int ret; 494f6d87f4bSThomas Gleixner 495239007b8SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 496cba4235eSThomas Gleixner ret = irq_setup_affinity(desc); 497239007b8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 498f6d87f4bSThomas Gleixner return ret; 499f6d87f4bSThomas Gleixner } 5001da177e4SLinus Torvalds #endif 5011da177e4SLinus Torvalds 502fcf1ae2fSFeng Wu /** 503fcf1ae2fSFeng Wu * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt 504fcf1ae2fSFeng Wu * @irq: interrupt number to set affinity 505250a53d6SChristoffer Dall * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU 506250a53d6SChristoffer Dall * specific data for percpu_devid interrupts 507fcf1ae2fSFeng Wu * 508fcf1ae2fSFeng Wu * This function uses the vCPU specific data to set the vCPU 509fcf1ae2fSFeng Wu * affinity for an irq. The vCPU specific data is passed from 510fcf1ae2fSFeng Wu * outside, such as KVM. One example code path is as below: 511fcf1ae2fSFeng Wu * KVM -> IOMMU -> irq_set_vcpu_affinity(). 512fcf1ae2fSFeng Wu */ 513fcf1ae2fSFeng Wu int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) 514fcf1ae2fSFeng Wu { 515fcf1ae2fSFeng Wu unsigned long flags; 516fcf1ae2fSFeng Wu struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 517fcf1ae2fSFeng Wu struct irq_data *data; 518fcf1ae2fSFeng Wu struct irq_chip *chip; 519fcf1ae2fSFeng Wu int ret = -ENOSYS; 520fcf1ae2fSFeng Wu 521fcf1ae2fSFeng Wu if (!desc) 522fcf1ae2fSFeng Wu return -EINVAL; 523fcf1ae2fSFeng Wu 524fcf1ae2fSFeng Wu data = irq_desc_get_irq_data(desc); 5250abce64aSMarc Zyngier do { 526fcf1ae2fSFeng Wu chip = irq_data_get_irq_chip(data); 527fcf1ae2fSFeng Wu if (chip && chip->irq_set_vcpu_affinity) 5280abce64aSMarc Zyngier break; 5290abce64aSMarc Zyngier #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 5300abce64aSMarc Zyngier data = data->parent_data; 5310abce64aSMarc Zyngier #else 5320abce64aSMarc Zyngier data = NULL; 5330abce64aSMarc Zyngier #endif 5340abce64aSMarc Zyngier } while (data); 5350abce64aSMarc Zyngier 5360abce64aSMarc Zyngier if (data) 537fcf1ae2fSFeng Wu ret = chip->irq_set_vcpu_affinity(data, vcpu_info); 538fcf1ae2fSFeng Wu irq_put_desc_unlock(desc, flags); 539fcf1ae2fSFeng Wu 540fcf1ae2fSFeng Wu return ret; 541fcf1ae2fSFeng Wu } 542fcf1ae2fSFeng Wu EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity); 543fcf1ae2fSFeng Wu 54479ff1cdaSJiang Liu void __disable_irq(struct irq_desc *desc) 5450a0c5168SRafael J. Wysocki { 5463aae994fSThomas Gleixner if (!desc->depth++) 54787923470SThomas Gleixner irq_disable(desc); 5480a0c5168SRafael J. Wysocki } 5490a0c5168SRafael J. Wysocki 55002725e74SThomas Gleixner static int __disable_irq_nosync(unsigned int irq) 55102725e74SThomas Gleixner { 55202725e74SThomas Gleixner unsigned long flags; 55331d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 55402725e74SThomas Gleixner 55502725e74SThomas Gleixner if (!desc) 55602725e74SThomas Gleixner return -EINVAL; 55779ff1cdaSJiang Liu __disable_irq(desc); 55802725e74SThomas Gleixner irq_put_desc_busunlock(desc, flags); 55902725e74SThomas Gleixner return 0; 56002725e74SThomas Gleixner } 56102725e74SThomas Gleixner 5621da177e4SLinus Torvalds /** 5631da177e4SLinus Torvalds * disable_irq_nosync - disable an irq without waiting 5641da177e4SLinus Torvalds * @irq: Interrupt to disable 5651da177e4SLinus Torvalds * 5661da177e4SLinus Torvalds * Disable the selected interrupt line. Disables and Enables are 5671da177e4SLinus Torvalds * nested. 5681da177e4SLinus Torvalds * Unlike disable_irq(), this function does not ensure existing 5691da177e4SLinus Torvalds * instances of the IRQ handler have completed before returning. 5701da177e4SLinus Torvalds * 5711da177e4SLinus Torvalds * This function may be called from IRQ context. 5721da177e4SLinus Torvalds */ 5731da177e4SLinus Torvalds void disable_irq_nosync(unsigned int irq) 5741da177e4SLinus Torvalds { 57502725e74SThomas Gleixner __disable_irq_nosync(irq); 5761da177e4SLinus Torvalds } 5771da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq_nosync); 5781da177e4SLinus Torvalds 5791da177e4SLinus Torvalds /** 5801da177e4SLinus Torvalds * disable_irq - disable an irq and wait for completion 5811da177e4SLinus Torvalds * @irq: Interrupt to disable 5821da177e4SLinus Torvalds * 5831da177e4SLinus Torvalds * Disable the selected interrupt line. Enables and Disables are 5841da177e4SLinus Torvalds * nested. 5851da177e4SLinus Torvalds * This function waits for any pending IRQ handlers for this interrupt 5861da177e4SLinus Torvalds * to complete before returning. If you use this function while 5871da177e4SLinus Torvalds * holding a resource the IRQ handler may need you will deadlock. 5881da177e4SLinus Torvalds * 5891da177e4SLinus Torvalds * This function may be called - with care - from IRQ context. 5901da177e4SLinus Torvalds */ 5911da177e4SLinus Torvalds void disable_irq(unsigned int irq) 5921da177e4SLinus Torvalds { 59302725e74SThomas Gleixner if (!__disable_irq_nosync(irq)) 5941da177e4SLinus Torvalds synchronize_irq(irq); 5951da177e4SLinus Torvalds } 5961da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq); 5971da177e4SLinus Torvalds 59802cea395SPeter Zijlstra /** 59902cea395SPeter Zijlstra * disable_hardirq - disables an irq and waits for hardirq completion 60002cea395SPeter Zijlstra * @irq: Interrupt to disable 60102cea395SPeter Zijlstra * 60202cea395SPeter Zijlstra * Disable the selected interrupt line. Enables and Disables are 60302cea395SPeter Zijlstra * nested. 60402cea395SPeter Zijlstra * This function waits for any pending hard IRQ handlers for this 60502cea395SPeter Zijlstra * interrupt to complete before returning. If you use this function while 60602cea395SPeter Zijlstra * holding a resource the hard IRQ handler may need you will deadlock. 60702cea395SPeter Zijlstra * 60802cea395SPeter Zijlstra * When used to optimistically disable an interrupt from atomic context 60902cea395SPeter Zijlstra * the return value must be checked. 61002cea395SPeter Zijlstra * 61102cea395SPeter Zijlstra * Returns: false if a threaded handler is active. 61202cea395SPeter Zijlstra * 61302cea395SPeter Zijlstra * This function may be called - with care - from IRQ context. 61402cea395SPeter Zijlstra */ 61502cea395SPeter Zijlstra bool disable_hardirq(unsigned int irq) 61602cea395SPeter Zijlstra { 61702cea395SPeter Zijlstra if (!__disable_irq_nosync(irq)) 61802cea395SPeter Zijlstra return synchronize_hardirq(irq); 61902cea395SPeter Zijlstra 62002cea395SPeter Zijlstra return false; 62102cea395SPeter Zijlstra } 62202cea395SPeter Zijlstra EXPORT_SYMBOL_GPL(disable_hardirq); 62302cea395SPeter Zijlstra 624b525903cSJulien Thierry /** 625b525903cSJulien Thierry * disable_nmi_nosync - disable an nmi without waiting 626b525903cSJulien Thierry * @irq: Interrupt to disable 627b525903cSJulien Thierry * 628b525903cSJulien Thierry * Disable the selected interrupt line. Disables and enables are 629b525903cSJulien Thierry * nested. 630b525903cSJulien Thierry * The interrupt to disable must have been requested through request_nmi. 631b525903cSJulien Thierry * Unlike disable_nmi(), this function does not ensure existing 632b525903cSJulien Thierry * instances of the IRQ handler have completed before returning. 633b525903cSJulien Thierry */ 634b525903cSJulien Thierry void disable_nmi_nosync(unsigned int irq) 635b525903cSJulien Thierry { 636b525903cSJulien Thierry disable_irq_nosync(irq); 637b525903cSJulien Thierry } 638b525903cSJulien Thierry 63979ff1cdaSJiang Liu void __enable_irq(struct irq_desc *desc) 6401adb0850SThomas Gleixner { 6411adb0850SThomas Gleixner switch (desc->depth) { 6421adb0850SThomas Gleixner case 0: 6430a0c5168SRafael J. Wysocki err_out: 64479ff1cdaSJiang Liu WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", 64579ff1cdaSJiang Liu irq_desc_get_irq(desc)); 6461adb0850SThomas Gleixner break; 6471adb0850SThomas Gleixner case 1: { 648c531e836SThomas Gleixner if (desc->istate & IRQS_SUSPENDED) 6490a0c5168SRafael J. Wysocki goto err_out; 6501adb0850SThomas Gleixner /* Prevent probing on this irq: */ 6511ccb4e61SThomas Gleixner irq_settings_set_noprobe(desc); 652201d7f47SThomas Gleixner /* 653201d7f47SThomas Gleixner * Call irq_startup() not irq_enable() here because the 654201d7f47SThomas Gleixner * interrupt might be marked NOAUTOEN. So irq_startup() 655201d7f47SThomas Gleixner * needs to be invoked when it gets enabled the first 656201d7f47SThomas Gleixner * time. If it was already started up, then irq_startup() 657201d7f47SThomas Gleixner * will invoke irq_enable() under the hood. 658201d7f47SThomas Gleixner */ 659c942cee4SThomas Gleixner irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE); 660201d7f47SThomas Gleixner break; 6611adb0850SThomas Gleixner } 6621adb0850SThomas Gleixner default: 6631adb0850SThomas Gleixner desc->depth--; 6641adb0850SThomas Gleixner } 6651adb0850SThomas Gleixner } 6661adb0850SThomas Gleixner 6671da177e4SLinus Torvalds /** 6681da177e4SLinus Torvalds * enable_irq - enable handling of an irq 6691da177e4SLinus Torvalds * @irq: Interrupt to enable 6701da177e4SLinus Torvalds * 6711da177e4SLinus Torvalds * Undoes the effect of one call to disable_irq(). If this 6721da177e4SLinus Torvalds * matches the last disable, processing of interrupts on this 6731da177e4SLinus Torvalds * IRQ line is re-enabled. 6741da177e4SLinus Torvalds * 67570aedd24SThomas Gleixner * This function may be called from IRQ context only when 6766b8ff312SThomas Gleixner * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 6771da177e4SLinus Torvalds */ 6781da177e4SLinus Torvalds void enable_irq(unsigned int irq) 6791da177e4SLinus Torvalds { 6801da177e4SLinus Torvalds unsigned long flags; 68131d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 6821da177e4SLinus Torvalds 6837d94f7caSYinghai Lu if (!desc) 684c2b5a251SMatthew Wilcox return; 68550f7c032SThomas Gleixner if (WARN(!desc->irq_data.chip, 6862656c366SThomas Gleixner KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) 68702725e74SThomas Gleixner goto out; 6882656c366SThomas Gleixner 68979ff1cdaSJiang Liu __enable_irq(desc); 69002725e74SThomas Gleixner out: 69102725e74SThomas Gleixner irq_put_desc_busunlock(desc, flags); 6921da177e4SLinus Torvalds } 6931da177e4SLinus Torvalds EXPORT_SYMBOL(enable_irq); 6941da177e4SLinus Torvalds 695b525903cSJulien Thierry /** 696b525903cSJulien Thierry * enable_nmi - enable handling of an nmi 697b525903cSJulien Thierry * @irq: Interrupt to enable 698b525903cSJulien Thierry * 699b525903cSJulien Thierry * The interrupt to enable must have been requested through request_nmi. 700b525903cSJulien Thierry * Undoes the effect of one call to disable_nmi(). If this 701b525903cSJulien Thierry * matches the last disable, processing of interrupts on this 702b525903cSJulien Thierry * IRQ line is re-enabled. 703b525903cSJulien Thierry */ 704b525903cSJulien Thierry void enable_nmi(unsigned int irq) 705b525903cSJulien Thierry { 706b525903cSJulien Thierry enable_irq(irq); 707b525903cSJulien Thierry } 708b525903cSJulien Thierry 7090c5d1eb7SDavid Brownell static int set_irq_wake_real(unsigned int irq, unsigned int on) 7102db87321SUwe Kleine-König { 71108678b08SYinghai Lu struct irq_desc *desc = irq_to_desc(irq); 7122db87321SUwe Kleine-König int ret = -ENXIO; 7132db87321SUwe Kleine-König 71460f96b41SSantosh Shilimkar if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) 71560f96b41SSantosh Shilimkar return 0; 71660f96b41SSantosh Shilimkar 7172f7e99bbSThomas Gleixner if (desc->irq_data.chip->irq_set_wake) 7182f7e99bbSThomas Gleixner ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); 7192db87321SUwe Kleine-König 7202db87321SUwe Kleine-König return ret; 7212db87321SUwe Kleine-König } 7222db87321SUwe Kleine-König 723ba9a2331SThomas Gleixner /** 724a0cd9ca2SThomas Gleixner * irq_set_irq_wake - control irq power management wakeup 725ba9a2331SThomas Gleixner * @irq: interrupt to control 726ba9a2331SThomas Gleixner * @on: enable/disable power management wakeup 727ba9a2331SThomas Gleixner * 72815a647ebSDavid Brownell * Enable/disable power management wakeup mode, which is 72915a647ebSDavid Brownell * disabled by default. Enables and disables must match, 73015a647ebSDavid Brownell * just as they match for non-wakeup mode support. 73115a647ebSDavid Brownell * 73215a647ebSDavid Brownell * Wakeup mode lets this IRQ wake the system from sleep 73315a647ebSDavid Brownell * states like "suspend to RAM". 734*f9f21ceaSStephen Boyd * 735*f9f21ceaSStephen Boyd * Note: irq enable/disable state is completely orthogonal 736*f9f21ceaSStephen Boyd * to the enable/disable state of irq wake. An irq can be 737*f9f21ceaSStephen Boyd * disabled with disable_irq() and still wake the system as 738*f9f21ceaSStephen Boyd * long as the irq has wake enabled. If this does not hold, 739*f9f21ceaSStephen Boyd * then the underlying irq chip and the related driver need 740*f9f21ceaSStephen Boyd * to be investigated. 741ba9a2331SThomas Gleixner */ 742a0cd9ca2SThomas Gleixner int irq_set_irq_wake(unsigned int irq, unsigned int on) 743ba9a2331SThomas Gleixner { 744ba9a2331SThomas Gleixner unsigned long flags; 74531d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 7462db87321SUwe Kleine-König int ret = 0; 747ba9a2331SThomas Gleixner 74813863a66SJesper Juhl if (!desc) 74913863a66SJesper Juhl return -EINVAL; 75013863a66SJesper Juhl 751b525903cSJulien Thierry /* Don't use NMIs as wake up interrupts please */ 752b525903cSJulien Thierry if (desc->istate & IRQS_NMI) { 753b525903cSJulien Thierry ret = -EINVAL; 754b525903cSJulien Thierry goto out_unlock; 755b525903cSJulien Thierry } 756b525903cSJulien Thierry 75715a647ebSDavid Brownell /* wakeup-capable irqs can be shared between drivers that 75815a647ebSDavid Brownell * don't need to have the same sleep mode behaviors. 75915a647ebSDavid Brownell */ 76015a647ebSDavid Brownell if (on) { 7612db87321SUwe Kleine-König if (desc->wake_depth++ == 0) { 7622db87321SUwe Kleine-König ret = set_irq_wake_real(irq, on); 7632db87321SUwe Kleine-König if (ret) 7642db87321SUwe Kleine-König desc->wake_depth = 0; 76515a647ebSDavid Brownell else 7667f94226fSThomas Gleixner irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); 7672db87321SUwe Kleine-König } 76815a647ebSDavid Brownell } else { 76915a647ebSDavid Brownell if (desc->wake_depth == 0) { 7707a2c4770SArjan van de Ven WARN(1, "Unbalanced IRQ %d wake disable\n", irq); 7712db87321SUwe Kleine-König } else if (--desc->wake_depth == 0) { 7722db87321SUwe Kleine-König ret = set_irq_wake_real(irq, on); 7732db87321SUwe Kleine-König if (ret) 7742db87321SUwe Kleine-König desc->wake_depth = 1; 77515a647ebSDavid Brownell else 7767f94226fSThomas Gleixner irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); 77715a647ebSDavid Brownell } 7782db87321SUwe Kleine-König } 779b525903cSJulien Thierry 780b525903cSJulien Thierry out_unlock: 78102725e74SThomas Gleixner irq_put_desc_busunlock(desc, flags); 782ba9a2331SThomas Gleixner return ret; 783ba9a2331SThomas Gleixner } 784a0cd9ca2SThomas Gleixner EXPORT_SYMBOL(irq_set_irq_wake); 785ba9a2331SThomas Gleixner 7861da177e4SLinus Torvalds /* 7871da177e4SLinus Torvalds * Internal function that tells the architecture code whether a 7881da177e4SLinus Torvalds * particular irq has been exclusively allocated or is available 7891da177e4SLinus Torvalds * for driver use. 7901da177e4SLinus Torvalds */ 7911da177e4SLinus Torvalds int can_request_irq(unsigned int irq, unsigned long irqflags) 7921da177e4SLinus Torvalds { 793cc8c3b78SThomas Gleixner unsigned long flags; 79431d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 79502725e74SThomas Gleixner int canrequest = 0; 7961da177e4SLinus Torvalds 7977d94f7caSYinghai Lu if (!desc) 7987d94f7caSYinghai Lu return 0; 7997d94f7caSYinghai Lu 80002725e74SThomas Gleixner if (irq_settings_can_request(desc)) { 8012779db8dSBen Hutchings if (!desc->action || 8022779db8dSBen Hutchings irqflags & desc->action->flags & IRQF_SHARED) 80302725e74SThomas Gleixner canrequest = 1; 80402725e74SThomas Gleixner } 80502725e74SThomas Gleixner irq_put_desc_unlock(desc, flags); 80602725e74SThomas Gleixner return canrequest; 8071da177e4SLinus Torvalds } 8081da177e4SLinus Torvalds 809a1ff541aSJiang Liu int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) 81082736f4dSUwe Kleine-König { 8116b8ff312SThomas Gleixner struct irq_chip *chip = desc->irq_data.chip; 812d4d5e089SThomas Gleixner int ret, unmask = 0; 81382736f4dSUwe Kleine-König 814b2ba2c30SThomas Gleixner if (!chip || !chip->irq_set_type) { 81582736f4dSUwe Kleine-König /* 81682736f4dSUwe Kleine-König * IRQF_TRIGGER_* but the PIC does not support multiple 81782736f4dSUwe Kleine-König * flow-types? 81882736f4dSUwe Kleine-König */ 819a1ff541aSJiang Liu pr_debug("No set_type function for IRQ %d (%s)\n", 820a1ff541aSJiang Liu irq_desc_get_irq(desc), 82182736f4dSUwe Kleine-König chip ? (chip->name ? : "unknown") : "unknown"); 82282736f4dSUwe Kleine-König return 0; 82382736f4dSUwe Kleine-König } 82482736f4dSUwe Kleine-König 825d4d5e089SThomas Gleixner if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { 82632f4125eSThomas Gleixner if (!irqd_irq_masked(&desc->irq_data)) 827d4d5e089SThomas Gleixner mask_irq(desc); 82832f4125eSThomas Gleixner if (!irqd_irq_disabled(&desc->irq_data)) 829d4d5e089SThomas Gleixner unmask = 1; 830d4d5e089SThomas Gleixner } 831d4d5e089SThomas Gleixner 83200b992deSAlexander Kuleshov /* Mask all flags except trigger mode */ 83300b992deSAlexander Kuleshov flags &= IRQ_TYPE_SENSE_MASK; 834b2ba2c30SThomas Gleixner ret = chip->irq_set_type(&desc->irq_data, flags); 83582736f4dSUwe Kleine-König 836876dbd4cSThomas Gleixner switch (ret) { 837876dbd4cSThomas Gleixner case IRQ_SET_MASK_OK: 8382cb62547SJiang Liu case IRQ_SET_MASK_OK_DONE: 839876dbd4cSThomas Gleixner irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); 840876dbd4cSThomas Gleixner irqd_set(&desc->irq_data, flags); 84144133f7eSMathieu Malaterre /* fall through */ 842876dbd4cSThomas Gleixner 843876dbd4cSThomas Gleixner case IRQ_SET_MASK_OK_NOCOPY: 844876dbd4cSThomas Gleixner flags = irqd_get_trigger_type(&desc->irq_data); 845876dbd4cSThomas Gleixner irq_settings_set_trigger_mask(desc, flags); 846876dbd4cSThomas Gleixner irqd_clear(&desc->irq_data, IRQD_LEVEL); 847876dbd4cSThomas Gleixner irq_settings_clr_level(desc); 848876dbd4cSThomas Gleixner if (flags & IRQ_TYPE_LEVEL_MASK) { 849876dbd4cSThomas Gleixner irq_settings_set_level(desc); 850876dbd4cSThomas Gleixner irqd_set(&desc->irq_data, IRQD_LEVEL); 851876dbd4cSThomas Gleixner } 85246732475SThomas Gleixner 853d4d5e089SThomas Gleixner ret = 0; 8548fff39e0SThomas Gleixner break; 855876dbd4cSThomas Gleixner default: 856d75f773cSSakari Ailus pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n", 857a1ff541aSJiang Liu flags, irq_desc_get_irq(desc), chip->irq_set_type); 8580c5d1eb7SDavid Brownell } 859d4d5e089SThomas Gleixner if (unmask) 860d4d5e089SThomas Gleixner unmask_irq(desc); 86182736f4dSUwe Kleine-König return ret; 86282736f4dSUwe Kleine-König } 86382736f4dSUwe Kleine-König 864293a7a0aSThomas Gleixner #ifdef CONFIG_HARDIRQS_SW_RESEND 865293a7a0aSThomas Gleixner int irq_set_parent(int irq, int parent_irq) 866293a7a0aSThomas Gleixner { 867293a7a0aSThomas Gleixner unsigned long flags; 868293a7a0aSThomas Gleixner struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 869293a7a0aSThomas Gleixner 870293a7a0aSThomas Gleixner if (!desc) 871293a7a0aSThomas Gleixner return -EINVAL; 872293a7a0aSThomas Gleixner 873293a7a0aSThomas Gleixner desc->parent_irq = parent_irq; 874293a7a0aSThomas Gleixner 875293a7a0aSThomas Gleixner irq_put_desc_unlock(desc, flags); 876293a7a0aSThomas Gleixner return 0; 877293a7a0aSThomas Gleixner } 8783118dac5SSudip Mukherjee EXPORT_SYMBOL_GPL(irq_set_parent); 879293a7a0aSThomas Gleixner #endif 880293a7a0aSThomas Gleixner 881b25c340cSThomas Gleixner /* 882b25c340cSThomas Gleixner * Default primary interrupt handler for threaded interrupts. Is 883b25c340cSThomas Gleixner * assigned as primary handler when request_threaded_irq is called 884b25c340cSThomas Gleixner * with handler == NULL. Useful for oneshot interrupts. 885b25c340cSThomas Gleixner */ 886b25c340cSThomas Gleixner static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) 887b25c340cSThomas Gleixner { 888b25c340cSThomas Gleixner return IRQ_WAKE_THREAD; 889b25c340cSThomas Gleixner } 890b25c340cSThomas Gleixner 891399b5da2SThomas Gleixner /* 892399b5da2SThomas Gleixner * Primary handler for nested threaded interrupts. Should never be 893399b5da2SThomas Gleixner * called. 894399b5da2SThomas Gleixner */ 895399b5da2SThomas Gleixner static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) 896399b5da2SThomas Gleixner { 897399b5da2SThomas Gleixner WARN(1, "Primary handler called for nested irq %d\n", irq); 898399b5da2SThomas Gleixner return IRQ_NONE; 899399b5da2SThomas Gleixner } 900399b5da2SThomas Gleixner 9012a1d3ab8SThomas Gleixner static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id) 9022a1d3ab8SThomas Gleixner { 9032a1d3ab8SThomas Gleixner WARN(1, "Secondary action handler called for irq %d\n", irq); 9042a1d3ab8SThomas Gleixner return IRQ_NONE; 9052a1d3ab8SThomas Gleixner } 9062a1d3ab8SThomas Gleixner 9073aa551c9SThomas Gleixner static int irq_wait_for_interrupt(struct irqaction *action) 9083aa551c9SThomas Gleixner { 909519cc865SLukas Wunner for (;;) { 9103aa551c9SThomas Gleixner set_current_state(TASK_INTERRUPTIBLE); 911f48fe81eSThomas Gleixner 912519cc865SLukas Wunner if (kthread_should_stop()) { 913519cc865SLukas Wunner /* may need to run one last time */ 914519cc865SLukas Wunner if (test_and_clear_bit(IRQTF_RUNTHREAD, 915519cc865SLukas Wunner &action->thread_flags)) { 916519cc865SLukas Wunner __set_current_state(TASK_RUNNING); 917519cc865SLukas Wunner return 0; 918519cc865SLukas Wunner } 919519cc865SLukas Wunner __set_current_state(TASK_RUNNING); 920519cc865SLukas Wunner return -1; 921519cc865SLukas Wunner } 922550acb19SIdo Yariv 923f48fe81eSThomas Gleixner if (test_and_clear_bit(IRQTF_RUNTHREAD, 924f48fe81eSThomas Gleixner &action->thread_flags)) { 9253aa551c9SThomas Gleixner __set_current_state(TASK_RUNNING); 9263aa551c9SThomas Gleixner return 0; 927f48fe81eSThomas Gleixner } 9283aa551c9SThomas Gleixner schedule(); 9293aa551c9SThomas Gleixner } 9303aa551c9SThomas Gleixner } 9313aa551c9SThomas Gleixner 932b25c340cSThomas Gleixner /* 933b25c340cSThomas Gleixner * Oneshot interrupts keep the irq line masked until the threaded 934b25c340cSThomas Gleixner * handler finished. unmask if the interrupt has not been disabled and 935b25c340cSThomas Gleixner * is marked MASKED. 936b25c340cSThomas Gleixner */ 937b5faba21SThomas Gleixner static void irq_finalize_oneshot(struct irq_desc *desc, 938f3f79e38SAlexander Gordeev struct irqaction *action) 939b25c340cSThomas Gleixner { 9402a1d3ab8SThomas Gleixner if (!(desc->istate & IRQS_ONESHOT) || 9412a1d3ab8SThomas Gleixner action->handler == irq_forced_secondary_handler) 942b5faba21SThomas Gleixner return; 9430b1adaa0SThomas Gleixner again: 9443876ec9eSThomas Gleixner chip_bus_lock(desc); 945239007b8SThomas Gleixner raw_spin_lock_irq(&desc->lock); 9460b1adaa0SThomas Gleixner 9470b1adaa0SThomas Gleixner /* 9480b1adaa0SThomas Gleixner * Implausible though it may be we need to protect us against 9490b1adaa0SThomas Gleixner * the following scenario: 9500b1adaa0SThomas Gleixner * 9510b1adaa0SThomas Gleixner * The thread is faster done than the hard interrupt handler 9520b1adaa0SThomas Gleixner * on the other CPU. If we unmask the irq line then the 9530b1adaa0SThomas Gleixner * interrupt can come in again and masks the line, leaves due 954009b4c3bSThomas Gleixner * to IRQS_INPROGRESS and the irq line is masked forever. 955b5faba21SThomas Gleixner * 956b5faba21SThomas Gleixner * This also serializes the state of shared oneshot handlers 957b5faba21SThomas Gleixner * versus "desc->threads_onehsot |= action->thread_mask;" in 958b5faba21SThomas Gleixner * irq_wake_thread(). See the comment there which explains the 959b5faba21SThomas Gleixner * serialization. 9600b1adaa0SThomas Gleixner */ 96132f4125eSThomas Gleixner if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { 9620b1adaa0SThomas Gleixner raw_spin_unlock_irq(&desc->lock); 9633876ec9eSThomas Gleixner chip_bus_sync_unlock(desc); 9640b1adaa0SThomas Gleixner cpu_relax(); 9650b1adaa0SThomas Gleixner goto again; 9660b1adaa0SThomas Gleixner } 9670b1adaa0SThomas Gleixner 968b5faba21SThomas Gleixner /* 969b5faba21SThomas Gleixner * Now check again, whether the thread should run. Otherwise 970b5faba21SThomas Gleixner * we would clear the threads_oneshot bit of this thread which 971b5faba21SThomas Gleixner * was just set. 972b5faba21SThomas Gleixner */ 973f3f79e38SAlexander Gordeev if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) 974b5faba21SThomas Gleixner goto out_unlock; 975b5faba21SThomas Gleixner 976b5faba21SThomas Gleixner desc->threads_oneshot &= ~action->thread_mask; 977b5faba21SThomas Gleixner 97832f4125eSThomas Gleixner if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && 97932f4125eSThomas Gleixner irqd_irq_masked(&desc->irq_data)) 980328a4978SThomas Gleixner unmask_threaded_irq(desc); 98132f4125eSThomas Gleixner 982b5faba21SThomas Gleixner out_unlock: 983239007b8SThomas Gleixner raw_spin_unlock_irq(&desc->lock); 9843876ec9eSThomas Gleixner chip_bus_sync_unlock(desc); 985b25c340cSThomas Gleixner } 986b25c340cSThomas Gleixner 98761f38261SBruno Premont #ifdef CONFIG_SMP 9883aa551c9SThomas Gleixner /* 989b04c644eSChuansheng Liu * Check whether we need to change the affinity of the interrupt thread. 990591d2fb0SThomas Gleixner */ 991591d2fb0SThomas Gleixner static void 992591d2fb0SThomas Gleixner irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) 993591d2fb0SThomas Gleixner { 994591d2fb0SThomas Gleixner cpumask_var_t mask; 99504aa530eSThomas Gleixner bool valid = true; 996591d2fb0SThomas Gleixner 997591d2fb0SThomas Gleixner if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) 998591d2fb0SThomas Gleixner return; 999591d2fb0SThomas Gleixner 1000591d2fb0SThomas Gleixner /* 1001591d2fb0SThomas Gleixner * In case we are out of memory we set IRQTF_AFFINITY again and 1002591d2fb0SThomas Gleixner * try again next time 1003591d2fb0SThomas Gleixner */ 1004591d2fb0SThomas Gleixner if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 1005591d2fb0SThomas Gleixner set_bit(IRQTF_AFFINITY, &action->thread_flags); 1006591d2fb0SThomas Gleixner return; 1007591d2fb0SThomas Gleixner } 1008591d2fb0SThomas Gleixner 1009239007b8SThomas Gleixner raw_spin_lock_irq(&desc->lock); 101004aa530eSThomas Gleixner /* 101104aa530eSThomas Gleixner * This code is triggered unconditionally. Check the affinity 101204aa530eSThomas Gleixner * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. 101304aa530eSThomas Gleixner */ 1014cbf86999SThomas Gleixner if (cpumask_available(desc->irq_common_data.affinity)) { 1015cbf86999SThomas Gleixner const struct cpumask *m; 1016cbf86999SThomas Gleixner 1017cbf86999SThomas Gleixner m = irq_data_get_effective_affinity_mask(&desc->irq_data); 1018cbf86999SThomas Gleixner cpumask_copy(mask, m); 1019cbf86999SThomas Gleixner } else { 102004aa530eSThomas Gleixner valid = false; 1021cbf86999SThomas Gleixner } 1022239007b8SThomas Gleixner raw_spin_unlock_irq(&desc->lock); 1023591d2fb0SThomas Gleixner 102404aa530eSThomas Gleixner if (valid) 1025591d2fb0SThomas Gleixner set_cpus_allowed_ptr(current, mask); 1026591d2fb0SThomas Gleixner free_cpumask_var(mask); 1027591d2fb0SThomas Gleixner } 102861f38261SBruno Premont #else 102961f38261SBruno Premont static inline void 103061f38261SBruno Premont irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } 103161f38261SBruno Premont #endif 1032591d2fb0SThomas Gleixner 1033591d2fb0SThomas Gleixner /* 1034c5f48c0aSIngo Molnar * Interrupts which are not explicitly requested as threaded 10358d32a307SThomas Gleixner * interrupts rely on the implicit bh/preempt disable of the hard irq 10368d32a307SThomas Gleixner * context. So we need to disable bh here to avoid deadlocks and other 10378d32a307SThomas Gleixner * side effects. 10388d32a307SThomas Gleixner */ 10393a43e05fSSebastian Andrzej Siewior static irqreturn_t 10408d32a307SThomas Gleixner irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) 10418d32a307SThomas Gleixner { 10423a43e05fSSebastian Andrzej Siewior irqreturn_t ret; 10433a43e05fSSebastian Andrzej Siewior 10448d32a307SThomas Gleixner local_bh_disable(); 10453a43e05fSSebastian Andrzej Siewior ret = action->thread_fn(action->irq, action->dev_id); 1046746a923bSLukas Wunner if (ret == IRQ_HANDLED) 1047746a923bSLukas Wunner atomic_inc(&desc->threads_handled); 1048746a923bSLukas Wunner 1049f3f79e38SAlexander Gordeev irq_finalize_oneshot(desc, action); 10508d32a307SThomas Gleixner local_bh_enable(); 10513a43e05fSSebastian Andrzej Siewior return ret; 10528d32a307SThomas Gleixner } 10538d32a307SThomas Gleixner 10548d32a307SThomas Gleixner /* 1055f788e7bfSXie XiuQi * Interrupts explicitly requested as threaded interrupts want to be 10568d32a307SThomas Gleixner * preemtible - many of them need to sleep and wait for slow busses to 10578d32a307SThomas Gleixner * complete. 10588d32a307SThomas Gleixner */ 10593a43e05fSSebastian Andrzej Siewior static irqreturn_t irq_thread_fn(struct irq_desc *desc, 10603a43e05fSSebastian Andrzej Siewior struct irqaction *action) 10618d32a307SThomas Gleixner { 10623a43e05fSSebastian Andrzej Siewior irqreturn_t ret; 10633a43e05fSSebastian Andrzej Siewior 10643a43e05fSSebastian Andrzej Siewior ret = action->thread_fn(action->irq, action->dev_id); 1065746a923bSLukas Wunner if (ret == IRQ_HANDLED) 1066746a923bSLukas Wunner atomic_inc(&desc->threads_handled); 1067746a923bSLukas Wunner 1068f3f79e38SAlexander Gordeev irq_finalize_oneshot(desc, action); 10693a43e05fSSebastian Andrzej Siewior return ret; 10708d32a307SThomas Gleixner } 10718d32a307SThomas Gleixner 10727140ea19SIdo Yariv static void wake_threads_waitq(struct irq_desc *desc) 10737140ea19SIdo Yariv { 1074c685689fSChuansheng Liu if (atomic_dec_and_test(&desc->threads_active)) 10757140ea19SIdo Yariv wake_up(&desc->wait_for_threads); 10767140ea19SIdo Yariv } 10777140ea19SIdo Yariv 107867d12145SAl Viro static void irq_thread_dtor(struct callback_head *unused) 10794d1d61a6SOleg Nesterov { 10804d1d61a6SOleg Nesterov struct task_struct *tsk = current; 10814d1d61a6SOleg Nesterov struct irq_desc *desc; 10824d1d61a6SOleg Nesterov struct irqaction *action; 10834d1d61a6SOleg Nesterov 10844d1d61a6SOleg Nesterov if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) 10854d1d61a6SOleg Nesterov return; 10864d1d61a6SOleg Nesterov 10874d1d61a6SOleg Nesterov action = kthread_data(tsk); 10884d1d61a6SOleg Nesterov 1089fb21affaSLinus Torvalds pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", 109019af395dSAlan Cox tsk->comm, tsk->pid, action->irq); 10914d1d61a6SOleg Nesterov 10924d1d61a6SOleg Nesterov 10934d1d61a6SOleg Nesterov desc = irq_to_desc(action->irq); 10944d1d61a6SOleg Nesterov /* 10954d1d61a6SOleg Nesterov * If IRQTF_RUNTHREAD is set, we need to decrement 10964d1d61a6SOleg Nesterov * desc->threads_active and wake possible waiters. 10974d1d61a6SOleg Nesterov */ 10984d1d61a6SOleg Nesterov if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) 10994d1d61a6SOleg Nesterov wake_threads_waitq(desc); 11004d1d61a6SOleg Nesterov 11014d1d61a6SOleg Nesterov /* Prevent a stale desc->threads_oneshot */ 11024d1d61a6SOleg Nesterov irq_finalize_oneshot(desc, action); 11034d1d61a6SOleg Nesterov } 11044d1d61a6SOleg Nesterov 11052a1d3ab8SThomas Gleixner static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) 11062a1d3ab8SThomas Gleixner { 11072a1d3ab8SThomas Gleixner struct irqaction *secondary = action->secondary; 11082a1d3ab8SThomas Gleixner 11092a1d3ab8SThomas Gleixner if (WARN_ON_ONCE(!secondary)) 11102a1d3ab8SThomas Gleixner return; 11112a1d3ab8SThomas Gleixner 11122a1d3ab8SThomas Gleixner raw_spin_lock_irq(&desc->lock); 11132a1d3ab8SThomas Gleixner __irq_wake_thread(desc, secondary); 11142a1d3ab8SThomas Gleixner raw_spin_unlock_irq(&desc->lock); 11152a1d3ab8SThomas Gleixner } 11162a1d3ab8SThomas Gleixner 11178d32a307SThomas Gleixner /* 11183aa551c9SThomas Gleixner * Interrupt handler thread 11193aa551c9SThomas Gleixner */ 11203aa551c9SThomas Gleixner static int irq_thread(void *data) 11213aa551c9SThomas Gleixner { 112267d12145SAl Viro struct callback_head on_exit_work; 11233aa551c9SThomas Gleixner struct irqaction *action = data; 11243aa551c9SThomas Gleixner struct irq_desc *desc = irq_to_desc(action->irq); 11253a43e05fSSebastian Andrzej Siewior irqreturn_t (*handler_fn)(struct irq_desc *desc, 11263a43e05fSSebastian Andrzej Siewior struct irqaction *action); 11273aa551c9SThomas Gleixner 1128540b60e2SAlexander Gordeev if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, 11298d32a307SThomas Gleixner &action->thread_flags)) 11308d32a307SThomas Gleixner handler_fn = irq_forced_thread_fn; 11318d32a307SThomas Gleixner else 11328d32a307SThomas Gleixner handler_fn = irq_thread_fn; 11338d32a307SThomas Gleixner 113441f9d29fSAl Viro init_task_work(&on_exit_work, irq_thread_dtor); 11354d1d61a6SOleg Nesterov task_work_add(current, &on_exit_work, false); 11363aa551c9SThomas Gleixner 1137f3de44edSSankara Muthukrishnan irq_thread_check_affinity(desc, action); 1138f3de44edSSankara Muthukrishnan 11393aa551c9SThomas Gleixner while (!irq_wait_for_interrupt(action)) { 11407140ea19SIdo Yariv irqreturn_t action_ret; 11413aa551c9SThomas Gleixner 1142591d2fb0SThomas Gleixner irq_thread_check_affinity(desc, action); 1143591d2fb0SThomas Gleixner 11443a43e05fSSebastian Andrzej Siewior action_ret = handler_fn(desc, action); 11452a1d3ab8SThomas Gleixner if (action_ret == IRQ_WAKE_THREAD) 11462a1d3ab8SThomas Gleixner irq_wake_secondary(desc, action); 11477140ea19SIdo Yariv 11487140ea19SIdo Yariv wake_threads_waitq(desc); 11493aa551c9SThomas Gleixner } 11503aa551c9SThomas Gleixner 11517140ea19SIdo Yariv /* 11527140ea19SIdo Yariv * This is the regular exit path. __free_irq() is stopping the 11537140ea19SIdo Yariv * thread via kthread_stop() after calling 1154519cc865SLukas Wunner * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the 1155836557bdSLukas Wunner * oneshot mask bit can be set. 11563aa551c9SThomas Gleixner */ 11574d1d61a6SOleg Nesterov task_work_cancel(current, irq_thread_dtor); 11583aa551c9SThomas Gleixner return 0; 11593aa551c9SThomas Gleixner } 11603aa551c9SThomas Gleixner 1161a92444c6SThomas Gleixner /** 1162a92444c6SThomas Gleixner * irq_wake_thread - wake the irq thread for the action identified by dev_id 1163a92444c6SThomas Gleixner * @irq: Interrupt line 1164a92444c6SThomas Gleixner * @dev_id: Device identity for which the thread should be woken 1165a92444c6SThomas Gleixner * 1166a92444c6SThomas Gleixner */ 1167a92444c6SThomas Gleixner void irq_wake_thread(unsigned int irq, void *dev_id) 1168a92444c6SThomas Gleixner { 1169a92444c6SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 1170a92444c6SThomas Gleixner struct irqaction *action; 1171a92444c6SThomas Gleixner unsigned long flags; 1172a92444c6SThomas Gleixner 1173a92444c6SThomas Gleixner if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1174a92444c6SThomas Gleixner return; 1175a92444c6SThomas Gleixner 1176a92444c6SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 1177f944b5a7SDaniel Lezcano for_each_action_of_desc(desc, action) { 1178a92444c6SThomas Gleixner if (action->dev_id == dev_id) { 1179a92444c6SThomas Gleixner if (action->thread) 1180a92444c6SThomas Gleixner __irq_wake_thread(desc, action); 1181a92444c6SThomas Gleixner break; 1182a92444c6SThomas Gleixner } 1183a92444c6SThomas Gleixner } 1184a92444c6SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 1185a92444c6SThomas Gleixner } 1186a92444c6SThomas Gleixner EXPORT_SYMBOL_GPL(irq_wake_thread); 1187a92444c6SThomas Gleixner 11882a1d3ab8SThomas Gleixner static int irq_setup_forced_threading(struct irqaction *new) 11898d32a307SThomas Gleixner { 11908d32a307SThomas Gleixner if (!force_irqthreads) 11912a1d3ab8SThomas Gleixner return 0; 11928d32a307SThomas Gleixner if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) 11932a1d3ab8SThomas Gleixner return 0; 11948d32a307SThomas Gleixner 1195d1f0301bSThomas Gleixner /* 1196d1f0301bSThomas Gleixner * No further action required for interrupts which are requested as 1197d1f0301bSThomas Gleixner * threaded interrupts already 1198d1f0301bSThomas Gleixner */ 1199d1f0301bSThomas Gleixner if (new->handler == irq_default_primary_handler) 1200d1f0301bSThomas Gleixner return 0; 1201d1f0301bSThomas Gleixner 12028d32a307SThomas Gleixner new->flags |= IRQF_ONESHOT; 12038d32a307SThomas Gleixner 12042a1d3ab8SThomas Gleixner /* 12052a1d3ab8SThomas Gleixner * Handle the case where we have a real primary handler and a 12062a1d3ab8SThomas Gleixner * thread handler. We force thread them as well by creating a 12072a1d3ab8SThomas Gleixner * secondary action. 12082a1d3ab8SThomas Gleixner */ 1209d1f0301bSThomas Gleixner if (new->handler && new->thread_fn) { 12102a1d3ab8SThomas Gleixner /* Allocate the secondary action */ 12112a1d3ab8SThomas Gleixner new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 12122a1d3ab8SThomas Gleixner if (!new->secondary) 12132a1d3ab8SThomas Gleixner return -ENOMEM; 12142a1d3ab8SThomas Gleixner new->secondary->handler = irq_forced_secondary_handler; 12152a1d3ab8SThomas Gleixner new->secondary->thread_fn = new->thread_fn; 12162a1d3ab8SThomas Gleixner new->secondary->dev_id = new->dev_id; 12172a1d3ab8SThomas Gleixner new->secondary->irq = new->irq; 12182a1d3ab8SThomas Gleixner new->secondary->name = new->name; 12192a1d3ab8SThomas Gleixner } 12202a1d3ab8SThomas Gleixner /* Deal with the primary handler */ 12218d32a307SThomas Gleixner set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); 12228d32a307SThomas Gleixner new->thread_fn = new->handler; 12238d32a307SThomas Gleixner new->handler = irq_default_primary_handler; 12242a1d3ab8SThomas Gleixner return 0; 12258d32a307SThomas Gleixner } 12268d32a307SThomas Gleixner 1227c1bacbaeSThomas Gleixner static int irq_request_resources(struct irq_desc *desc) 1228c1bacbaeSThomas Gleixner { 1229c1bacbaeSThomas Gleixner struct irq_data *d = &desc->irq_data; 1230c1bacbaeSThomas Gleixner struct irq_chip *c = d->chip; 1231c1bacbaeSThomas Gleixner 1232c1bacbaeSThomas Gleixner return c->irq_request_resources ? c->irq_request_resources(d) : 0; 1233c1bacbaeSThomas Gleixner } 1234c1bacbaeSThomas Gleixner 1235c1bacbaeSThomas Gleixner static void irq_release_resources(struct irq_desc *desc) 1236c1bacbaeSThomas Gleixner { 1237c1bacbaeSThomas Gleixner struct irq_data *d = &desc->irq_data; 1238c1bacbaeSThomas Gleixner struct irq_chip *c = d->chip; 1239c1bacbaeSThomas Gleixner 1240c1bacbaeSThomas Gleixner if (c->irq_release_resources) 1241c1bacbaeSThomas Gleixner c->irq_release_resources(d); 1242c1bacbaeSThomas Gleixner } 1243c1bacbaeSThomas Gleixner 1244b525903cSJulien Thierry static bool irq_supports_nmi(struct irq_desc *desc) 1245b525903cSJulien Thierry { 1246b525903cSJulien Thierry struct irq_data *d = irq_desc_get_irq_data(desc); 1247b525903cSJulien Thierry 1248b525903cSJulien Thierry #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1249b525903cSJulien Thierry /* Only IRQs directly managed by the root irqchip can be set as NMI */ 1250b525903cSJulien Thierry if (d->parent_data) 1251b525903cSJulien Thierry return false; 1252b525903cSJulien Thierry #endif 1253b525903cSJulien Thierry /* Don't support NMIs for chips behind a slow bus */ 1254b525903cSJulien Thierry if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock) 1255b525903cSJulien Thierry return false; 1256b525903cSJulien Thierry 1257b525903cSJulien Thierry return d->chip->flags & IRQCHIP_SUPPORTS_NMI; 1258b525903cSJulien Thierry } 1259b525903cSJulien Thierry 1260b525903cSJulien Thierry static int irq_nmi_setup(struct irq_desc *desc) 1261b525903cSJulien Thierry { 1262b525903cSJulien Thierry struct irq_data *d = irq_desc_get_irq_data(desc); 1263b525903cSJulien Thierry struct irq_chip *c = d->chip; 1264b525903cSJulien Thierry 1265b525903cSJulien Thierry return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL; 1266b525903cSJulien Thierry } 1267b525903cSJulien Thierry 1268b525903cSJulien Thierry static void irq_nmi_teardown(struct irq_desc *desc) 1269b525903cSJulien Thierry { 1270b525903cSJulien Thierry struct irq_data *d = irq_desc_get_irq_data(desc); 1271b525903cSJulien Thierry struct irq_chip *c = d->chip; 1272b525903cSJulien Thierry 1273b525903cSJulien Thierry if (c->irq_nmi_teardown) 1274b525903cSJulien Thierry c->irq_nmi_teardown(d); 1275b525903cSJulien Thierry } 1276b525903cSJulien Thierry 12772a1d3ab8SThomas Gleixner static int 12782a1d3ab8SThomas Gleixner setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) 12792a1d3ab8SThomas Gleixner { 12802a1d3ab8SThomas Gleixner struct task_struct *t; 12812a1d3ab8SThomas Gleixner struct sched_param param = { 12822a1d3ab8SThomas Gleixner .sched_priority = MAX_USER_RT_PRIO/2, 12832a1d3ab8SThomas Gleixner }; 12842a1d3ab8SThomas Gleixner 12852a1d3ab8SThomas Gleixner if (!secondary) { 12862a1d3ab8SThomas Gleixner t = kthread_create(irq_thread, new, "irq/%d-%s", irq, 12872a1d3ab8SThomas Gleixner new->name); 12882a1d3ab8SThomas Gleixner } else { 12892a1d3ab8SThomas Gleixner t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq, 12902a1d3ab8SThomas Gleixner new->name); 12912a1d3ab8SThomas Gleixner param.sched_priority -= 1; 12922a1d3ab8SThomas Gleixner } 12932a1d3ab8SThomas Gleixner 12942a1d3ab8SThomas Gleixner if (IS_ERR(t)) 12952a1d3ab8SThomas Gleixner return PTR_ERR(t); 12962a1d3ab8SThomas Gleixner 12972a1d3ab8SThomas Gleixner sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m); 12982a1d3ab8SThomas Gleixner 12992a1d3ab8SThomas Gleixner /* 13002a1d3ab8SThomas Gleixner * We keep the reference to the task struct even if 13012a1d3ab8SThomas Gleixner * the thread dies to avoid that the interrupt code 13022a1d3ab8SThomas Gleixner * references an already freed task_struct. 13032a1d3ab8SThomas Gleixner */ 13047b3c92b8SMatthew Wilcox (Oracle) new->thread = get_task_struct(t); 13052a1d3ab8SThomas Gleixner /* 13062a1d3ab8SThomas Gleixner * Tell the thread to set its affinity. This is 13072a1d3ab8SThomas Gleixner * important for shared interrupt handlers as we do 13082a1d3ab8SThomas Gleixner * not invoke setup_affinity() for the secondary 13092a1d3ab8SThomas Gleixner * handlers as everything is already set up. Even for 13102a1d3ab8SThomas Gleixner * interrupts marked with IRQF_NO_BALANCE this is 13112a1d3ab8SThomas Gleixner * correct as we want the thread to move to the cpu(s) 13122a1d3ab8SThomas Gleixner * on which the requesting code placed the interrupt. 13132a1d3ab8SThomas Gleixner */ 13142a1d3ab8SThomas Gleixner set_bit(IRQTF_AFFINITY, &new->thread_flags); 13152a1d3ab8SThomas Gleixner return 0; 13162a1d3ab8SThomas Gleixner } 13172a1d3ab8SThomas Gleixner 13181da177e4SLinus Torvalds /* 13191da177e4SLinus Torvalds * Internal function to register an irqaction - typically used to 13201da177e4SLinus Torvalds * allocate special interrupts that are part of the architecture. 132119d39a38SThomas Gleixner * 132219d39a38SThomas Gleixner * Locking rules: 132319d39a38SThomas Gleixner * 132419d39a38SThomas Gleixner * desc->request_mutex Provides serialization against a concurrent free_irq() 132519d39a38SThomas Gleixner * chip_bus_lock Provides serialization for slow bus operations 132619d39a38SThomas Gleixner * desc->lock Provides serialization against hard interrupts 132719d39a38SThomas Gleixner * 132819d39a38SThomas Gleixner * chip_bus_lock and desc->lock are sufficient for all other management and 132919d39a38SThomas Gleixner * interrupt related functions. desc->request_mutex solely serializes 133019d39a38SThomas Gleixner * request/free_irq(). 13311da177e4SLinus Torvalds */ 1332d3c60047SThomas Gleixner static int 1333d3c60047SThomas Gleixner __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) 13341da177e4SLinus Torvalds { 1335f17c7545SIngo Molnar struct irqaction *old, **old_ptr; 1336b5faba21SThomas Gleixner unsigned long flags, thread_mask = 0; 13373b8249e7SThomas Gleixner int ret, nested, shared = 0; 13381da177e4SLinus Torvalds 13397d94f7caSYinghai Lu if (!desc) 1340c2b5a251SMatthew Wilcox return -EINVAL; 1341c2b5a251SMatthew Wilcox 13426b8ff312SThomas Gleixner if (desc->irq_data.chip == &no_irq_chip) 13431da177e4SLinus Torvalds return -ENOSYS; 1344b6873807SSebastian Andrzej Siewior if (!try_module_get(desc->owner)) 1345b6873807SSebastian Andrzej Siewior return -ENODEV; 13461da177e4SLinus Torvalds 13472a1d3ab8SThomas Gleixner new->irq = irq; 13482a1d3ab8SThomas Gleixner 13491da177e4SLinus Torvalds /* 13504b357daeSJon Hunter * If the trigger type is not specified by the caller, 13514b357daeSJon Hunter * then use the default for this interrupt. 13524b357daeSJon Hunter */ 13534b357daeSJon Hunter if (!(new->flags & IRQF_TRIGGER_MASK)) 13544b357daeSJon Hunter new->flags |= irqd_get_trigger_type(&desc->irq_data); 13554b357daeSJon Hunter 13564b357daeSJon Hunter /* 1357399b5da2SThomas Gleixner * Check whether the interrupt nests into another interrupt 1358399b5da2SThomas Gleixner * thread. 13593aa551c9SThomas Gleixner */ 13601ccb4e61SThomas Gleixner nested = irq_settings_is_nested_thread(desc); 1361399b5da2SThomas Gleixner if (nested) { 1362b6873807SSebastian Andrzej Siewior if (!new->thread_fn) { 1363b6873807SSebastian Andrzej Siewior ret = -EINVAL; 1364b6873807SSebastian Andrzej Siewior goto out_mput; 1365b6873807SSebastian Andrzej Siewior } 1366399b5da2SThomas Gleixner /* 1367399b5da2SThomas Gleixner * Replace the primary handler which was provided from 1368399b5da2SThomas Gleixner * the driver for non nested interrupt handling by the 1369399b5da2SThomas Gleixner * dummy function which warns when called. 1370399b5da2SThomas Gleixner */ 1371399b5da2SThomas Gleixner new->handler = irq_nested_primary_handler; 13728d32a307SThomas Gleixner } else { 13732a1d3ab8SThomas Gleixner if (irq_settings_can_thread(desc)) { 13742a1d3ab8SThomas Gleixner ret = irq_setup_forced_threading(new); 13752a1d3ab8SThomas Gleixner if (ret) 13762a1d3ab8SThomas Gleixner goto out_mput; 13772a1d3ab8SThomas Gleixner } 1378399b5da2SThomas Gleixner } 1379399b5da2SThomas Gleixner 1380399b5da2SThomas Gleixner /* 1381399b5da2SThomas Gleixner * Create a handler thread when a thread function is supplied 1382399b5da2SThomas Gleixner * and the interrupt does not nest into another interrupt 1383399b5da2SThomas Gleixner * thread. 1384399b5da2SThomas Gleixner */ 1385399b5da2SThomas Gleixner if (new->thread_fn && !nested) { 13862a1d3ab8SThomas Gleixner ret = setup_irq_thread(new, irq, false); 13872a1d3ab8SThomas Gleixner if (ret) 1388b6873807SSebastian Andrzej Siewior goto out_mput; 13892a1d3ab8SThomas Gleixner if (new->secondary) { 13902a1d3ab8SThomas Gleixner ret = setup_irq_thread(new->secondary, irq, true); 13912a1d3ab8SThomas Gleixner if (ret) 13922a1d3ab8SThomas Gleixner goto out_thread; 1393b6873807SSebastian Andrzej Siewior } 13943aa551c9SThomas Gleixner } 13953aa551c9SThomas Gleixner 13963aa551c9SThomas Gleixner /* 1397dc9b229aSThomas Gleixner * Drivers are often written to work w/o knowledge about the 1398dc9b229aSThomas Gleixner * underlying irq chip implementation, so a request for a 1399dc9b229aSThomas Gleixner * threaded irq without a primary hard irq context handler 1400dc9b229aSThomas Gleixner * requires the ONESHOT flag to be set. Some irq chips like 1401dc9b229aSThomas Gleixner * MSI based interrupts are per se one shot safe. Check the 1402dc9b229aSThomas Gleixner * chip flags, so we can avoid the unmask dance at the end of 1403dc9b229aSThomas Gleixner * the threaded handler for those. 1404dc9b229aSThomas Gleixner */ 1405dc9b229aSThomas Gleixner if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) 1406dc9b229aSThomas Gleixner new->flags &= ~IRQF_ONESHOT; 1407dc9b229aSThomas Gleixner 140819d39a38SThomas Gleixner /* 140919d39a38SThomas Gleixner * Protects against a concurrent __free_irq() call which might wait 1410519cc865SLukas Wunner * for synchronize_hardirq() to complete without holding the optional 1411836557bdSLukas Wunner * chip bus lock and desc->lock. Also protects against handing out 1412836557bdSLukas Wunner * a recycled oneshot thread_mask bit while it's still in use by 1413836557bdSLukas Wunner * its previous owner. 141419d39a38SThomas Gleixner */ 14159114014cSThomas Gleixner mutex_lock(&desc->request_mutex); 141619d39a38SThomas Gleixner 141719d39a38SThomas Gleixner /* 141819d39a38SThomas Gleixner * Acquire bus lock as the irq_request_resources() callback below 141919d39a38SThomas Gleixner * might rely on the serialization or the magic power management 142019d39a38SThomas Gleixner * functions which are abusing the irq_bus_lock() callback, 142119d39a38SThomas Gleixner */ 142219d39a38SThomas Gleixner chip_bus_lock(desc); 142319d39a38SThomas Gleixner 142419d39a38SThomas Gleixner /* First installed action requests resources. */ 142546e48e25SThomas Gleixner if (!desc->action) { 142646e48e25SThomas Gleixner ret = irq_request_resources(desc); 142746e48e25SThomas Gleixner if (ret) { 142846e48e25SThomas Gleixner pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", 142946e48e25SThomas Gleixner new->name, irq, desc->irq_data.chip->name); 143019d39a38SThomas Gleixner goto out_bus_unlock; 143146e48e25SThomas Gleixner } 143246e48e25SThomas Gleixner } 14339114014cSThomas Gleixner 1434dc9b229aSThomas Gleixner /* 14351da177e4SLinus Torvalds * The following block of code has to be executed atomically 143619d39a38SThomas Gleixner * protected against a concurrent interrupt and any of the other 143719d39a38SThomas Gleixner * management calls which are not serialized via 143819d39a38SThomas Gleixner * desc->request_mutex or the optional bus lock. 14391da177e4SLinus Torvalds */ 1440239007b8SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 1441f17c7545SIngo Molnar old_ptr = &desc->action; 1442f17c7545SIngo Molnar old = *old_ptr; 144306fcb0c6SIngo Molnar if (old) { 1444e76de9f8SThomas Gleixner /* 1445e76de9f8SThomas Gleixner * Can't share interrupts unless both agree to and are 1446e76de9f8SThomas Gleixner * the same type (level, edge, polarity). So both flag 14473cca53b0SThomas Gleixner * fields must have IRQF_SHARED set and the bits which 14489d591eddSThomas Gleixner * set the trigger type must match. Also all must 14499d591eddSThomas Gleixner * agree on ONESHOT. 1450b525903cSJulien Thierry * Interrupt lines used for NMIs cannot be shared. 1451e76de9f8SThomas Gleixner */ 14524f8413a3SMarc Zyngier unsigned int oldtype; 14534f8413a3SMarc Zyngier 1454b525903cSJulien Thierry if (desc->istate & IRQS_NMI) { 1455b525903cSJulien Thierry pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n", 1456b525903cSJulien Thierry new->name, irq, desc->irq_data.chip->name); 1457b525903cSJulien Thierry ret = -EINVAL; 1458b525903cSJulien Thierry goto out_unlock; 1459b525903cSJulien Thierry } 1460b525903cSJulien Thierry 14614f8413a3SMarc Zyngier /* 14624f8413a3SMarc Zyngier * If nobody did set the configuration before, inherit 14634f8413a3SMarc Zyngier * the one provided by the requester. 14644f8413a3SMarc Zyngier */ 14654f8413a3SMarc Zyngier if (irqd_trigger_type_was_set(&desc->irq_data)) { 14664f8413a3SMarc Zyngier oldtype = irqd_get_trigger_type(&desc->irq_data); 14674f8413a3SMarc Zyngier } else { 14684f8413a3SMarc Zyngier oldtype = new->flags & IRQF_TRIGGER_MASK; 14694f8413a3SMarc Zyngier irqd_set_trigger_type(&desc->irq_data, oldtype); 14704f8413a3SMarc Zyngier } 1471382bd4deSHans de Goede 14723cca53b0SThomas Gleixner if (!((old->flags & new->flags) & IRQF_SHARED) || 1473382bd4deSHans de Goede (oldtype != (new->flags & IRQF_TRIGGER_MASK)) || 1474f5d89470SThomas Gleixner ((old->flags ^ new->flags) & IRQF_ONESHOT)) 1475f5163427SDimitri Sivanich goto mismatch; 1476f5163427SDimitri Sivanich 1477f5163427SDimitri Sivanich /* All handlers must agree on per-cpuness */ 14783cca53b0SThomas Gleixner if ((old->flags & IRQF_PERCPU) != 14793cca53b0SThomas Gleixner (new->flags & IRQF_PERCPU)) 1480f5163427SDimitri Sivanich goto mismatch; 14811da177e4SLinus Torvalds 14821da177e4SLinus Torvalds /* add new interrupt at end of irq queue */ 14831da177e4SLinus Torvalds do { 148452abb700SThomas Gleixner /* 148552abb700SThomas Gleixner * Or all existing action->thread_mask bits, 148652abb700SThomas Gleixner * so we can find the next zero bit for this 148752abb700SThomas Gleixner * new action. 148852abb700SThomas Gleixner */ 1489b5faba21SThomas Gleixner thread_mask |= old->thread_mask; 1490f17c7545SIngo Molnar old_ptr = &old->next; 1491f17c7545SIngo Molnar old = *old_ptr; 14921da177e4SLinus Torvalds } while (old); 14931da177e4SLinus Torvalds shared = 1; 14941da177e4SLinus Torvalds } 14951da177e4SLinus Torvalds 1496b5faba21SThomas Gleixner /* 149752abb700SThomas Gleixner * Setup the thread mask for this irqaction for ONESHOT. For 149852abb700SThomas Gleixner * !ONESHOT irqs the thread mask is 0 so we can avoid a 149952abb700SThomas Gleixner * conditional in irq_wake_thread(). 1500b5faba21SThomas Gleixner */ 150152abb700SThomas Gleixner if (new->flags & IRQF_ONESHOT) { 150252abb700SThomas Gleixner /* 150352abb700SThomas Gleixner * Unlikely to have 32 resp 64 irqs sharing one line, 150452abb700SThomas Gleixner * but who knows. 150552abb700SThomas Gleixner */ 150652abb700SThomas Gleixner if (thread_mask == ~0UL) { 1507b5faba21SThomas Gleixner ret = -EBUSY; 1508cba4235eSThomas Gleixner goto out_unlock; 1509b5faba21SThomas Gleixner } 151052abb700SThomas Gleixner /* 151152abb700SThomas Gleixner * The thread_mask for the action is or'ed to 151252abb700SThomas Gleixner * desc->thread_active to indicate that the 151352abb700SThomas Gleixner * IRQF_ONESHOT thread handler has been woken, but not 151452abb700SThomas Gleixner * yet finished. The bit is cleared when a thread 151552abb700SThomas Gleixner * completes. When all threads of a shared interrupt 151652abb700SThomas Gleixner * line have completed desc->threads_active becomes 151752abb700SThomas Gleixner * zero and the interrupt line is unmasked. See 151852abb700SThomas Gleixner * handle.c:irq_wake_thread() for further information. 151952abb700SThomas Gleixner * 152052abb700SThomas Gleixner * If no thread is woken by primary (hard irq context) 152152abb700SThomas Gleixner * interrupt handlers, then desc->threads_active is 152252abb700SThomas Gleixner * also checked for zero to unmask the irq line in the 152352abb700SThomas Gleixner * affected hard irq flow handlers 152452abb700SThomas Gleixner * (handle_[fasteoi|level]_irq). 152552abb700SThomas Gleixner * 152652abb700SThomas Gleixner * The new action gets the first zero bit of 152752abb700SThomas Gleixner * thread_mask assigned. See the loop above which or's 152852abb700SThomas Gleixner * all existing action->thread_mask bits. 152952abb700SThomas Gleixner */ 1530ffc661c9SRasmus Villemoes new->thread_mask = 1UL << ffz(thread_mask); 15311c6c6952SThomas Gleixner 1532dc9b229aSThomas Gleixner } else if (new->handler == irq_default_primary_handler && 1533dc9b229aSThomas Gleixner !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { 15341c6c6952SThomas Gleixner /* 15351c6c6952SThomas Gleixner * The interrupt was requested with handler = NULL, so 15361c6c6952SThomas Gleixner * we use the default primary handler for it. But it 15371c6c6952SThomas Gleixner * does not have the oneshot flag set. In combination 15381c6c6952SThomas Gleixner * with level interrupts this is deadly, because the 15391c6c6952SThomas Gleixner * default primary handler just wakes the thread, then 15401c6c6952SThomas Gleixner * the irq lines is reenabled, but the device still 15411c6c6952SThomas Gleixner * has the level irq asserted. Rinse and repeat.... 15421c6c6952SThomas Gleixner * 15431c6c6952SThomas Gleixner * While this works for edge type interrupts, we play 15441c6c6952SThomas Gleixner * it safe and reject unconditionally because we can't 15451c6c6952SThomas Gleixner * say for sure which type this interrupt really 15461c6c6952SThomas Gleixner * has. The type flags are unreliable as the 15471c6c6952SThomas Gleixner * underlying chip implementation can override them. 15481c6c6952SThomas Gleixner */ 1549025af39bSLuca Ceresoli pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n", 1550025af39bSLuca Ceresoli new->name, irq); 15511c6c6952SThomas Gleixner ret = -EINVAL; 1552cba4235eSThomas Gleixner goto out_unlock; 155352abb700SThomas Gleixner } 1554b5faba21SThomas Gleixner 15551da177e4SLinus Torvalds if (!shared) { 15563aa551c9SThomas Gleixner init_waitqueue_head(&desc->wait_for_threads); 15573aa551c9SThomas Gleixner 155882736f4dSUwe Kleine-König /* Setup the type (level, edge polarity) if configured: */ 155982736f4dSUwe Kleine-König if (new->flags & IRQF_TRIGGER_MASK) { 1560a1ff541aSJiang Liu ret = __irq_set_trigger(desc, 1561f2b662daSDavid Brownell new->flags & IRQF_TRIGGER_MASK); 156282736f4dSUwe Kleine-König 156319d39a38SThomas Gleixner if (ret) 1564cba4235eSThomas Gleixner goto out_unlock; 1565091738a2SThomas Gleixner } 1566f75d222bSAhmed S. Darwish 1567c942cee4SThomas Gleixner /* 1568c942cee4SThomas Gleixner * Activate the interrupt. That activation must happen 1569c942cee4SThomas Gleixner * independently of IRQ_NOAUTOEN. request_irq() can fail 1570c942cee4SThomas Gleixner * and the callers are supposed to handle 1571c942cee4SThomas Gleixner * that. enable_irq() of an interrupt requested with 1572c942cee4SThomas Gleixner * IRQ_NOAUTOEN is not supposed to fail. The activation 1573c942cee4SThomas Gleixner * keeps it in shutdown mode, it merily associates 1574c942cee4SThomas Gleixner * resources if necessary and if that's not possible it 1575c942cee4SThomas Gleixner * fails. Interrupts which are in managed shutdown mode 1576c942cee4SThomas Gleixner * will simply ignore that activation request. 1577c942cee4SThomas Gleixner */ 1578c942cee4SThomas Gleixner ret = irq_activate(desc); 1579c942cee4SThomas Gleixner if (ret) 1580c942cee4SThomas Gleixner goto out_unlock; 1581c942cee4SThomas Gleixner 1582009b4c3bSThomas Gleixner desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ 158332f4125eSThomas Gleixner IRQS_ONESHOT | IRQS_WAITING); 158432f4125eSThomas Gleixner irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 158594d39e1fSThomas Gleixner 1586a005677bSThomas Gleixner if (new->flags & IRQF_PERCPU) { 1587a005677bSThomas Gleixner irqd_set(&desc->irq_data, IRQD_PER_CPU); 1588a005677bSThomas Gleixner irq_settings_set_per_cpu(desc); 1589a005677bSThomas Gleixner } 15906a58fb3bSThomas Gleixner 1591b25c340cSThomas Gleixner if (new->flags & IRQF_ONESHOT) 15923d67baecSThomas Gleixner desc->istate |= IRQS_ONESHOT; 1593b25c340cSThomas Gleixner 15942e051552SThomas Gleixner /* Exclude IRQ from balancing if requested */ 15952e051552SThomas Gleixner if (new->flags & IRQF_NOBALANCING) { 15962e051552SThomas Gleixner irq_settings_set_no_balancing(desc); 15972e051552SThomas Gleixner irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 15982e051552SThomas Gleixner } 15992e051552SThomas Gleixner 160004c848d3SThomas Gleixner if (irq_settings_can_autoenable(desc)) { 16014cde9c6bSThomas Gleixner irq_startup(desc, IRQ_RESEND, IRQ_START_COND); 160204c848d3SThomas Gleixner } else { 160304c848d3SThomas Gleixner /* 160404c848d3SThomas Gleixner * Shared interrupts do not go well with disabling 160504c848d3SThomas Gleixner * auto enable. The sharing interrupt might request 160604c848d3SThomas Gleixner * it while it's still disabled and then wait for 160704c848d3SThomas Gleixner * interrupts forever. 160804c848d3SThomas Gleixner */ 160904c848d3SThomas Gleixner WARN_ON_ONCE(new->flags & IRQF_SHARED); 1610e76de9f8SThomas Gleixner /* Undo nested disables: */ 1611e76de9f8SThomas Gleixner desc->depth = 1; 161204c848d3SThomas Gleixner } 161318404756SMax Krasnyansky 1614876dbd4cSThomas Gleixner } else if (new->flags & IRQF_TRIGGER_MASK) { 1615876dbd4cSThomas Gleixner unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; 16167ee7e87dSThomas Gleixner unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); 1617876dbd4cSThomas Gleixner 1618876dbd4cSThomas Gleixner if (nmsk != omsk) 1619876dbd4cSThomas Gleixner /* hope the handler works with current trigger mode */ 1620a395d6a7SJoe Perches pr_warn("irq %d uses trigger mode %u; requested %u\n", 16217ee7e87dSThomas Gleixner irq, omsk, nmsk); 162294d39e1fSThomas Gleixner } 162382736f4dSUwe Kleine-König 1624f17c7545SIngo Molnar *old_ptr = new; 162582736f4dSUwe Kleine-König 1626cab303beSThomas Gleixner irq_pm_install_action(desc, new); 1627cab303beSThomas Gleixner 16288528b0f1SLinus Torvalds /* Reset broken irq detection when installing new handler */ 16298528b0f1SLinus Torvalds desc->irq_count = 0; 16308528b0f1SLinus Torvalds desc->irqs_unhandled = 0; 16311adb0850SThomas Gleixner 16321adb0850SThomas Gleixner /* 16331adb0850SThomas Gleixner * Check whether we disabled the irq via the spurious handler 16341adb0850SThomas Gleixner * before. Reenable it and give it another chance. 16351adb0850SThomas Gleixner */ 16367acdd53eSThomas Gleixner if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { 16377acdd53eSThomas Gleixner desc->istate &= ~IRQS_SPURIOUS_DISABLED; 163879ff1cdaSJiang Liu __enable_irq(desc); 16391adb0850SThomas Gleixner } 16401adb0850SThomas Gleixner 1641239007b8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 16423a90795eSThomas Gleixner chip_bus_sync_unlock(desc); 16439114014cSThomas Gleixner mutex_unlock(&desc->request_mutex); 16441da177e4SLinus Torvalds 1645b2d3d61aSDaniel Lezcano irq_setup_timings(desc, new); 1646b2d3d61aSDaniel Lezcano 164769ab8494SThomas Gleixner /* 164869ab8494SThomas Gleixner * Strictly no need to wake it up, but hung_task complains 164969ab8494SThomas Gleixner * when no hard interrupt wakes the thread up. 165069ab8494SThomas Gleixner */ 165169ab8494SThomas Gleixner if (new->thread) 165269ab8494SThomas Gleixner wake_up_process(new->thread); 16532a1d3ab8SThomas Gleixner if (new->secondary) 16542a1d3ab8SThomas Gleixner wake_up_process(new->secondary->thread); 165569ab8494SThomas Gleixner 16562c6927a3SYinghai Lu register_irq_proc(irq, desc); 16571da177e4SLinus Torvalds new->dir = NULL; 16581da177e4SLinus Torvalds register_handler_proc(irq, new); 16591da177e4SLinus Torvalds return 0; 1660f5163427SDimitri Sivanich 1661f5163427SDimitri Sivanich mismatch: 16623cca53b0SThomas Gleixner if (!(new->flags & IRQF_PROBE_SHARED)) { 166397fd75b7SAndrew Morton pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", 1664f5d89470SThomas Gleixner irq, new->flags, new->name, old->flags, old->name); 1665f5d89470SThomas Gleixner #ifdef CONFIG_DEBUG_SHIRQ 1666f5163427SDimitri Sivanich dump_stack(); 16673f050447SAlan Cox #endif 1668f5d89470SThomas Gleixner } 16693aa551c9SThomas Gleixner ret = -EBUSY; 16703aa551c9SThomas Gleixner 1671cba4235eSThomas Gleixner out_unlock: 16721c389795SDan Carpenter raw_spin_unlock_irqrestore(&desc->lock, flags); 16733b8249e7SThomas Gleixner 167446e48e25SThomas Gleixner if (!desc->action) 167546e48e25SThomas Gleixner irq_release_resources(desc); 167619d39a38SThomas Gleixner out_bus_unlock: 167719d39a38SThomas Gleixner chip_bus_sync_unlock(desc); 16789114014cSThomas Gleixner mutex_unlock(&desc->request_mutex); 16799114014cSThomas Gleixner 16803aa551c9SThomas Gleixner out_thread: 16813aa551c9SThomas Gleixner if (new->thread) { 16823aa551c9SThomas Gleixner struct task_struct *t = new->thread; 16833aa551c9SThomas Gleixner 16843aa551c9SThomas Gleixner new->thread = NULL; 16853aa551c9SThomas Gleixner kthread_stop(t); 16863aa551c9SThomas Gleixner put_task_struct(t); 16873aa551c9SThomas Gleixner } 16882a1d3ab8SThomas Gleixner if (new->secondary && new->secondary->thread) { 16892a1d3ab8SThomas Gleixner struct task_struct *t = new->secondary->thread; 16902a1d3ab8SThomas Gleixner 16912a1d3ab8SThomas Gleixner new->secondary->thread = NULL; 16922a1d3ab8SThomas Gleixner kthread_stop(t); 16932a1d3ab8SThomas Gleixner put_task_struct(t); 16942a1d3ab8SThomas Gleixner } 1695b6873807SSebastian Andrzej Siewior out_mput: 1696b6873807SSebastian Andrzej Siewior module_put(desc->owner); 16973aa551c9SThomas Gleixner return ret; 16981da177e4SLinus Torvalds } 16991da177e4SLinus Torvalds 17001da177e4SLinus Torvalds /** 1701d3c60047SThomas Gleixner * setup_irq - setup an interrupt 1702d3c60047SThomas Gleixner * @irq: Interrupt line to setup 1703d3c60047SThomas Gleixner * @act: irqaction for the interrupt 1704d3c60047SThomas Gleixner * 1705d3c60047SThomas Gleixner * Used to statically setup interrupts in the early boot process. 1706d3c60047SThomas Gleixner */ 1707d3c60047SThomas Gleixner int setup_irq(unsigned int irq, struct irqaction *act) 1708d3c60047SThomas Gleixner { 1709986c011dSDavid Daney int retval; 1710d3c60047SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 1711d3c60047SThomas Gleixner 17129b5d585dSJon Hunter if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) 171331d9d9b6SMarc Zyngier return -EINVAL; 1714be45beb2SJon Hunter 1715be45beb2SJon Hunter retval = irq_chip_pm_get(&desc->irq_data); 1716be45beb2SJon Hunter if (retval < 0) 1717be45beb2SJon Hunter return retval; 1718be45beb2SJon Hunter 1719986c011dSDavid Daney retval = __setup_irq(irq, desc, act); 1720986c011dSDavid Daney 1721be45beb2SJon Hunter if (retval) 1722be45beb2SJon Hunter irq_chip_pm_put(&desc->irq_data); 1723be45beb2SJon Hunter 1724986c011dSDavid Daney return retval; 1725d3c60047SThomas Gleixner } 1726eb53b4e8SMagnus Damm EXPORT_SYMBOL_GPL(setup_irq); 1727d3c60047SThomas Gleixner 1728cbf94f06SMagnus Damm /* 1729cbf94f06SMagnus Damm * Internal function to unregister an irqaction - used to free 1730cbf94f06SMagnus Damm * regular and special interrupts that are part of the architecture. 17311da177e4SLinus Torvalds */ 173283ac4ca9SUwe Kleine König static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) 17331da177e4SLinus Torvalds { 173483ac4ca9SUwe Kleine König unsigned irq = desc->irq_data.irq; 1735f17c7545SIngo Molnar struct irqaction *action, **action_ptr; 17361da177e4SLinus Torvalds unsigned long flags; 17371da177e4SLinus Torvalds 1738ae88a23bSIngo Molnar WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 17397d94f7caSYinghai Lu 17409114014cSThomas Gleixner mutex_lock(&desc->request_mutex); 1741abc7e40cSThomas Gleixner chip_bus_lock(desc); 1742239007b8SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 1743ae88a23bSIngo Molnar 1744ae88a23bSIngo Molnar /* 1745ae88a23bSIngo Molnar * There can be multiple actions per IRQ descriptor, find the right 1746ae88a23bSIngo Molnar * one based on the dev_id: 1747ae88a23bSIngo Molnar */ 1748f17c7545SIngo Molnar action_ptr = &desc->action; 17491da177e4SLinus Torvalds for (;;) { 1750f17c7545SIngo Molnar action = *action_ptr; 17511da177e4SLinus Torvalds 1752ae88a23bSIngo Molnar if (!action) { 1753ae88a23bSIngo Molnar WARN(1, "Trying to free already-free IRQ %d\n", irq); 1754239007b8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 1755abc7e40cSThomas Gleixner chip_bus_sync_unlock(desc); 175619d39a38SThomas Gleixner mutex_unlock(&desc->request_mutex); 1757f21cfb25SMagnus Damm return NULL; 1758ae88a23bSIngo Molnar } 17591da177e4SLinus Torvalds 17608316e381SIngo Molnar if (action->dev_id == dev_id) 1761ae88a23bSIngo Molnar break; 1762f17c7545SIngo Molnar action_ptr = &action->next; 1763ae88a23bSIngo Molnar } 1764ae88a23bSIngo Molnar 1765ae88a23bSIngo Molnar /* Found it - now remove it from the list of entries: */ 1766f17c7545SIngo Molnar *action_ptr = action->next; 1767dbce706eSPaolo 'Blaisorblade' Giarrusso 1768cab303beSThomas Gleixner irq_pm_remove_action(desc, action); 1769cab303beSThomas Gleixner 1770ae88a23bSIngo Molnar /* If this was the last handler, shut down the IRQ line: */ 1771c1bacbaeSThomas Gleixner if (!desc->action) { 1772e9849777SThomas Gleixner irq_settings_clr_disable_unlazy(desc); 17734001d8e8SThomas Gleixner /* Only shutdown. Deactivate after synchronize_hardirq() */ 177446999238SThomas Gleixner irq_shutdown(desc); 1775c1bacbaeSThomas Gleixner } 17763aa551c9SThomas Gleixner 1777e7a297b0SPeter P Waskiewicz Jr #ifdef CONFIG_SMP 1778e7a297b0SPeter P Waskiewicz Jr /* make sure affinity_hint is cleaned up */ 1779e7a297b0SPeter P Waskiewicz Jr if (WARN_ON_ONCE(desc->affinity_hint)) 1780e7a297b0SPeter P Waskiewicz Jr desc->affinity_hint = NULL; 1781e7a297b0SPeter P Waskiewicz Jr #endif 1782e7a297b0SPeter P Waskiewicz Jr 1783239007b8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 178419d39a38SThomas Gleixner /* 178519d39a38SThomas Gleixner * Drop bus_lock here so the changes which were done in the chip 178619d39a38SThomas Gleixner * callbacks above are synced out to the irq chips which hang 1787519cc865SLukas Wunner * behind a slow bus (I2C, SPI) before calling synchronize_hardirq(). 178819d39a38SThomas Gleixner * 178919d39a38SThomas Gleixner * Aside of that the bus_lock can also be taken from the threaded 179019d39a38SThomas Gleixner * handler in irq_finalize_oneshot() which results in a deadlock 1791519cc865SLukas Wunner * because kthread_stop() would wait forever for the thread to 179219d39a38SThomas Gleixner * complete, which is blocked on the bus lock. 179319d39a38SThomas Gleixner * 179419d39a38SThomas Gleixner * The still held desc->request_mutex() protects against a 179519d39a38SThomas Gleixner * concurrent request_irq() of this irq so the release of resources 179619d39a38SThomas Gleixner * and timing data is properly serialized. 179719d39a38SThomas Gleixner */ 1798abc7e40cSThomas Gleixner chip_bus_sync_unlock(desc); 1799ae88a23bSIngo Molnar 18001da177e4SLinus Torvalds unregister_handler_proc(irq, action); 18011da177e4SLinus Torvalds 180262e04686SThomas Gleixner /* 180362e04686SThomas Gleixner * Make sure it's not being used on another CPU and if the chip 180462e04686SThomas Gleixner * supports it also make sure that there is no (not yet serviced) 180562e04686SThomas Gleixner * interrupt in flight at the hardware level. 180662e04686SThomas Gleixner */ 180762e04686SThomas Gleixner __synchronize_hardirq(desc, true); 1808ae88a23bSIngo Molnar 18091d99493bSDavid Woodhouse #ifdef CONFIG_DEBUG_SHIRQ 18101d99493bSDavid Woodhouse /* 1811ae88a23bSIngo Molnar * It's a shared IRQ -- the driver ought to be prepared for an IRQ 1812ae88a23bSIngo Molnar * event to happen even now it's being freed, so let's make sure that 1813ae88a23bSIngo Molnar * is so by doing an extra call to the handler .... 1814ae88a23bSIngo Molnar * 1815ae88a23bSIngo Molnar * ( We do this after actually deregistering it, to make sure that a 18160a13ec0bSJonathan Neuschäfer * 'real' IRQ doesn't run in parallel with our fake. ) 18171d99493bSDavid Woodhouse */ 18181d99493bSDavid Woodhouse if (action->flags & IRQF_SHARED) { 18191d99493bSDavid Woodhouse local_irq_save(flags); 18201d99493bSDavid Woodhouse action->handler(irq, dev_id); 18211d99493bSDavid Woodhouse local_irq_restore(flags); 18221d99493bSDavid Woodhouse } 18231d99493bSDavid Woodhouse #endif 18242d860ad7SLinus Torvalds 1825519cc865SLukas Wunner /* 1826519cc865SLukas Wunner * The action has already been removed above, but the thread writes 1827519cc865SLukas Wunner * its oneshot mask bit when it completes. Though request_mutex is 1828519cc865SLukas Wunner * held across this which prevents __setup_irq() from handing out 1829519cc865SLukas Wunner * the same bit to a newly requested action. 1830519cc865SLukas Wunner */ 18312d860ad7SLinus Torvalds if (action->thread) { 18322d860ad7SLinus Torvalds kthread_stop(action->thread); 18332d860ad7SLinus Torvalds put_task_struct(action->thread); 18342a1d3ab8SThomas Gleixner if (action->secondary && action->secondary->thread) { 18352a1d3ab8SThomas Gleixner kthread_stop(action->secondary->thread); 18362a1d3ab8SThomas Gleixner put_task_struct(action->secondary->thread); 18372a1d3ab8SThomas Gleixner } 18382d860ad7SLinus Torvalds } 18392d860ad7SLinus Torvalds 184019d39a38SThomas Gleixner /* Last action releases resources */ 18412343877fSThomas Gleixner if (!desc->action) { 184219d39a38SThomas Gleixner /* 184319d39a38SThomas Gleixner * Reaquire bus lock as irq_release_resources() might 184419d39a38SThomas Gleixner * require it to deallocate resources over the slow bus. 184519d39a38SThomas Gleixner */ 184619d39a38SThomas Gleixner chip_bus_lock(desc); 18474001d8e8SThomas Gleixner /* 18484001d8e8SThomas Gleixner * There is no interrupt on the fly anymore. Deactivate it 18494001d8e8SThomas Gleixner * completely. 18504001d8e8SThomas Gleixner */ 18514001d8e8SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 18524001d8e8SThomas Gleixner irq_domain_deactivate_irq(&desc->irq_data); 18534001d8e8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 18544001d8e8SThomas Gleixner 185546e48e25SThomas Gleixner irq_release_resources(desc); 185619d39a38SThomas Gleixner chip_bus_sync_unlock(desc); 18572343877fSThomas Gleixner irq_remove_timings(desc); 18582343877fSThomas Gleixner } 185946e48e25SThomas Gleixner 18609114014cSThomas Gleixner mutex_unlock(&desc->request_mutex); 18619114014cSThomas Gleixner 1862be45beb2SJon Hunter irq_chip_pm_put(&desc->irq_data); 1863b6873807SSebastian Andrzej Siewior module_put(desc->owner); 18642a1d3ab8SThomas Gleixner kfree(action->secondary); 1865f21cfb25SMagnus Damm return action; 1866f21cfb25SMagnus Damm } 18671da177e4SLinus Torvalds 18681da177e4SLinus Torvalds /** 1869cbf94f06SMagnus Damm * remove_irq - free an interrupt 1870cbf94f06SMagnus Damm * @irq: Interrupt line to free 1871cbf94f06SMagnus Damm * @act: irqaction for the interrupt 1872cbf94f06SMagnus Damm * 1873cbf94f06SMagnus Damm * Used to remove interrupts statically setup by the early boot process. 1874cbf94f06SMagnus Damm */ 1875cbf94f06SMagnus Damm void remove_irq(unsigned int irq, struct irqaction *act) 1876cbf94f06SMagnus Damm { 187731d9d9b6SMarc Zyngier struct irq_desc *desc = irq_to_desc(irq); 187831d9d9b6SMarc Zyngier 187931d9d9b6SMarc Zyngier if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) 188083ac4ca9SUwe Kleine König __free_irq(desc, act->dev_id); 1881cbf94f06SMagnus Damm } 1882eb53b4e8SMagnus Damm EXPORT_SYMBOL_GPL(remove_irq); 1883cbf94f06SMagnus Damm 1884cbf94f06SMagnus Damm /** 1885f21cfb25SMagnus Damm * free_irq - free an interrupt allocated with request_irq 18861da177e4SLinus Torvalds * @irq: Interrupt line to free 18871da177e4SLinus Torvalds * @dev_id: Device identity to free 18881da177e4SLinus Torvalds * 18891da177e4SLinus Torvalds * Remove an interrupt handler. The handler is removed and if the 18901da177e4SLinus Torvalds * interrupt line is no longer in use by any driver it is disabled. 18911da177e4SLinus Torvalds * On a shared IRQ the caller must ensure the interrupt is disabled 18921da177e4SLinus Torvalds * on the card it drives before calling this function. The function 18931da177e4SLinus Torvalds * does not return until any executing interrupts for this IRQ 18941da177e4SLinus Torvalds * have completed. 18951da177e4SLinus Torvalds * 18961da177e4SLinus Torvalds * This function must not be called from interrupt context. 189725ce4be7SChristoph Hellwig * 189825ce4be7SChristoph Hellwig * Returns the devname argument passed to request_irq. 18991da177e4SLinus Torvalds */ 190025ce4be7SChristoph Hellwig const void *free_irq(unsigned int irq, void *dev_id) 19011da177e4SLinus Torvalds { 190270aedd24SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 190325ce4be7SChristoph Hellwig struct irqaction *action; 190425ce4be7SChristoph Hellwig const char *devname; 190570aedd24SThomas Gleixner 190631d9d9b6SMarc Zyngier if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) 190725ce4be7SChristoph Hellwig return NULL; 190870aedd24SThomas Gleixner 1909cd7eab44SBen Hutchings #ifdef CONFIG_SMP 1910cd7eab44SBen Hutchings if (WARN_ON(desc->affinity_notify)) 1911cd7eab44SBen Hutchings desc->affinity_notify = NULL; 1912cd7eab44SBen Hutchings #endif 1913cd7eab44SBen Hutchings 191483ac4ca9SUwe Kleine König action = __free_irq(desc, dev_id); 19152827a418SAlexandru Moise 19162827a418SAlexandru Moise if (!action) 19172827a418SAlexandru Moise return NULL; 19182827a418SAlexandru Moise 191925ce4be7SChristoph Hellwig devname = action->name; 192025ce4be7SChristoph Hellwig kfree(action); 192125ce4be7SChristoph Hellwig return devname; 19221da177e4SLinus Torvalds } 19231da177e4SLinus Torvalds EXPORT_SYMBOL(free_irq); 19241da177e4SLinus Torvalds 1925b525903cSJulien Thierry /* This function must be called with desc->lock held */ 1926b525903cSJulien Thierry static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc) 1927b525903cSJulien Thierry { 1928b525903cSJulien Thierry const char *devname = NULL; 1929b525903cSJulien Thierry 1930b525903cSJulien Thierry desc->istate &= ~IRQS_NMI; 1931b525903cSJulien Thierry 1932b525903cSJulien Thierry if (!WARN_ON(desc->action == NULL)) { 1933b525903cSJulien Thierry irq_pm_remove_action(desc, desc->action); 1934b525903cSJulien Thierry devname = desc->action->name; 1935b525903cSJulien Thierry unregister_handler_proc(irq, desc->action); 1936b525903cSJulien Thierry 1937b525903cSJulien Thierry kfree(desc->action); 1938b525903cSJulien Thierry desc->action = NULL; 1939b525903cSJulien Thierry } 1940b525903cSJulien Thierry 1941b525903cSJulien Thierry irq_settings_clr_disable_unlazy(desc); 19424001d8e8SThomas Gleixner irq_shutdown_and_deactivate(desc); 1943b525903cSJulien Thierry 1944b525903cSJulien Thierry irq_release_resources(desc); 1945b525903cSJulien Thierry 1946b525903cSJulien Thierry irq_chip_pm_put(&desc->irq_data); 1947b525903cSJulien Thierry module_put(desc->owner); 1948b525903cSJulien Thierry 1949b525903cSJulien Thierry return devname; 1950b525903cSJulien Thierry } 1951b525903cSJulien Thierry 1952b525903cSJulien Thierry const void *free_nmi(unsigned int irq, void *dev_id) 1953b525903cSJulien Thierry { 1954b525903cSJulien Thierry struct irq_desc *desc = irq_to_desc(irq); 1955b525903cSJulien Thierry unsigned long flags; 1956b525903cSJulien Thierry const void *devname; 1957b525903cSJulien Thierry 1958b525903cSJulien Thierry if (!desc || WARN_ON(!(desc->istate & IRQS_NMI))) 1959b525903cSJulien Thierry return NULL; 1960b525903cSJulien Thierry 1961b525903cSJulien Thierry if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1962b525903cSJulien Thierry return NULL; 1963b525903cSJulien Thierry 1964b525903cSJulien Thierry /* NMI still enabled */ 1965b525903cSJulien Thierry if (WARN_ON(desc->depth == 0)) 1966b525903cSJulien Thierry disable_nmi_nosync(irq); 1967b525903cSJulien Thierry 1968b525903cSJulien Thierry raw_spin_lock_irqsave(&desc->lock, flags); 1969b525903cSJulien Thierry 1970b525903cSJulien Thierry irq_nmi_teardown(desc); 1971b525903cSJulien Thierry devname = __cleanup_nmi(irq, desc); 1972b525903cSJulien Thierry 1973b525903cSJulien Thierry raw_spin_unlock_irqrestore(&desc->lock, flags); 1974b525903cSJulien Thierry 1975b525903cSJulien Thierry return devname; 1976b525903cSJulien Thierry } 1977b525903cSJulien Thierry 19781da177e4SLinus Torvalds /** 19793aa551c9SThomas Gleixner * request_threaded_irq - allocate an interrupt line 19801da177e4SLinus Torvalds * @irq: Interrupt line to allocate 19813aa551c9SThomas Gleixner * @handler: Function to be called when the IRQ occurs. 19823aa551c9SThomas Gleixner * Primary handler for threaded interrupts 1983b25c340cSThomas Gleixner * If NULL and thread_fn != NULL the default 1984b25c340cSThomas Gleixner * primary handler is installed 19853aa551c9SThomas Gleixner * @thread_fn: Function called from the irq handler thread 19863aa551c9SThomas Gleixner * If NULL, no irq thread is created 19871da177e4SLinus Torvalds * @irqflags: Interrupt type flags 19881da177e4SLinus Torvalds * @devname: An ascii name for the claiming device 19891da177e4SLinus Torvalds * @dev_id: A cookie passed back to the handler function 19901da177e4SLinus Torvalds * 19911da177e4SLinus Torvalds * This call allocates interrupt resources and enables the 19921da177e4SLinus Torvalds * interrupt line and IRQ handling. From the point this 19931da177e4SLinus Torvalds * call is made your handler function may be invoked. Since 19941da177e4SLinus Torvalds * your handler function must clear any interrupt the board 19951da177e4SLinus Torvalds * raises, you must take care both to initialise your hardware 19961da177e4SLinus Torvalds * and to set up the interrupt handler in the right order. 19971da177e4SLinus Torvalds * 19983aa551c9SThomas Gleixner * If you want to set up a threaded irq handler for your device 19996d21af4fSJavi Merino * then you need to supply @handler and @thread_fn. @handler is 20003aa551c9SThomas Gleixner * still called in hard interrupt context and has to check 20013aa551c9SThomas Gleixner * whether the interrupt originates from the device. If yes it 20023aa551c9SThomas Gleixner * needs to disable the interrupt on the device and return 200339a2eddbSSteven Rostedt * IRQ_WAKE_THREAD which will wake up the handler thread and run 20043aa551c9SThomas Gleixner * @thread_fn. This split handler design is necessary to support 20053aa551c9SThomas Gleixner * shared interrupts. 20063aa551c9SThomas Gleixner * 20071da177e4SLinus Torvalds * Dev_id must be globally unique. Normally the address of the 20081da177e4SLinus Torvalds * device data structure is used as the cookie. Since the handler 20091da177e4SLinus Torvalds * receives this value it makes sense to use it. 20101da177e4SLinus Torvalds * 20111da177e4SLinus Torvalds * If your interrupt is shared you must pass a non NULL dev_id 20121da177e4SLinus Torvalds * as this is required when freeing the interrupt. 20131da177e4SLinus Torvalds * 20141da177e4SLinus Torvalds * Flags: 20151da177e4SLinus Torvalds * 20163cca53b0SThomas Gleixner * IRQF_SHARED Interrupt is shared 20170c5d1eb7SDavid Brownell * IRQF_TRIGGER_* Specify active edge(s) or level 20181da177e4SLinus Torvalds * 20191da177e4SLinus Torvalds */ 20203aa551c9SThomas Gleixner int request_threaded_irq(unsigned int irq, irq_handler_t handler, 20213aa551c9SThomas Gleixner irq_handler_t thread_fn, unsigned long irqflags, 20223aa551c9SThomas Gleixner const char *devname, void *dev_id) 20231da177e4SLinus Torvalds { 20241da177e4SLinus Torvalds struct irqaction *action; 202508678b08SYinghai Lu struct irq_desc *desc; 2026d3c60047SThomas Gleixner int retval; 20271da177e4SLinus Torvalds 2028e237a551SChen Fan if (irq == IRQ_NOTCONNECTED) 2029e237a551SChen Fan return -ENOTCONN; 2030e237a551SChen Fan 2031470c6623SDavid Brownell /* 20321da177e4SLinus Torvalds * Sanity-check: shared interrupts must pass in a real dev-ID, 20331da177e4SLinus Torvalds * otherwise we'll have trouble later trying to figure out 20341da177e4SLinus Torvalds * which interrupt is which (messes up the interrupt freeing 20351da177e4SLinus Torvalds * logic etc). 203617f48034SRafael J. Wysocki * 203717f48034SRafael J. Wysocki * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and 203817f48034SRafael J. Wysocki * it cannot be set along with IRQF_NO_SUSPEND. 20391da177e4SLinus Torvalds */ 204017f48034SRafael J. Wysocki if (((irqflags & IRQF_SHARED) && !dev_id) || 204117f48034SRafael J. Wysocki (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) || 204217f48034SRafael J. Wysocki ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND))) 20431da177e4SLinus Torvalds return -EINVAL; 20447d94f7caSYinghai Lu 2045cb5bc832SYinghai Lu desc = irq_to_desc(irq); 20467d94f7caSYinghai Lu if (!desc) 20471da177e4SLinus Torvalds return -EINVAL; 20487d94f7caSYinghai Lu 204931d9d9b6SMarc Zyngier if (!irq_settings_can_request(desc) || 205031d9d9b6SMarc Zyngier WARN_ON(irq_settings_is_per_cpu_devid(desc))) 20516550c775SThomas Gleixner return -EINVAL; 2052b25c340cSThomas Gleixner 2053b25c340cSThomas Gleixner if (!handler) { 2054b25c340cSThomas Gleixner if (!thread_fn) 20551da177e4SLinus Torvalds return -EINVAL; 2056b25c340cSThomas Gleixner handler = irq_default_primary_handler; 2057b25c340cSThomas Gleixner } 20581da177e4SLinus Torvalds 205945535732SThomas Gleixner action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 20601da177e4SLinus Torvalds if (!action) 20611da177e4SLinus Torvalds return -ENOMEM; 20621da177e4SLinus Torvalds 20631da177e4SLinus Torvalds action->handler = handler; 20643aa551c9SThomas Gleixner action->thread_fn = thread_fn; 20651da177e4SLinus Torvalds action->flags = irqflags; 20661da177e4SLinus Torvalds action->name = devname; 20671da177e4SLinus Torvalds action->dev_id = dev_id; 20681da177e4SLinus Torvalds 2069be45beb2SJon Hunter retval = irq_chip_pm_get(&desc->irq_data); 20704396f46cSShawn Lin if (retval < 0) { 20714396f46cSShawn Lin kfree(action); 2072be45beb2SJon Hunter return retval; 20734396f46cSShawn Lin } 2074be45beb2SJon Hunter 2075d3c60047SThomas Gleixner retval = __setup_irq(irq, desc, action); 207670aedd24SThomas Gleixner 20772a1d3ab8SThomas Gleixner if (retval) { 2078be45beb2SJon Hunter irq_chip_pm_put(&desc->irq_data); 20792a1d3ab8SThomas Gleixner kfree(action->secondary); 2080377bf1e4SAnton Vorontsov kfree(action); 20812a1d3ab8SThomas Gleixner } 2082377bf1e4SAnton Vorontsov 20836d83f94dSThomas Gleixner #ifdef CONFIG_DEBUG_SHIRQ_FIXME 20846ce51c43SLuis Henriques if (!retval && (irqflags & IRQF_SHARED)) { 2085a304e1b8SDavid Woodhouse /* 2086a304e1b8SDavid Woodhouse * It's a shared IRQ -- the driver ought to be prepared for it 2087a304e1b8SDavid Woodhouse * to happen immediately, so let's make sure.... 2088377bf1e4SAnton Vorontsov * We disable the irq to make sure that a 'real' IRQ doesn't 2089377bf1e4SAnton Vorontsov * run in parallel with our fake. 2090a304e1b8SDavid Woodhouse */ 2091a304e1b8SDavid Woodhouse unsigned long flags; 2092a304e1b8SDavid Woodhouse 2093377bf1e4SAnton Vorontsov disable_irq(irq); 2094a304e1b8SDavid Woodhouse local_irq_save(flags); 2095377bf1e4SAnton Vorontsov 2096a304e1b8SDavid Woodhouse handler(irq, dev_id); 2097377bf1e4SAnton Vorontsov 2098a304e1b8SDavid Woodhouse local_irq_restore(flags); 2099377bf1e4SAnton Vorontsov enable_irq(irq); 2100a304e1b8SDavid Woodhouse } 2101a304e1b8SDavid Woodhouse #endif 21021da177e4SLinus Torvalds return retval; 21031da177e4SLinus Torvalds } 21043aa551c9SThomas Gleixner EXPORT_SYMBOL(request_threaded_irq); 2105ae731f8dSMarc Zyngier 2106ae731f8dSMarc Zyngier /** 2107ae731f8dSMarc Zyngier * request_any_context_irq - allocate an interrupt line 2108ae731f8dSMarc Zyngier * @irq: Interrupt line to allocate 2109ae731f8dSMarc Zyngier * @handler: Function to be called when the IRQ occurs. 2110ae731f8dSMarc Zyngier * Threaded handler for threaded interrupts. 2111ae731f8dSMarc Zyngier * @flags: Interrupt type flags 2112ae731f8dSMarc Zyngier * @name: An ascii name for the claiming device 2113ae731f8dSMarc Zyngier * @dev_id: A cookie passed back to the handler function 2114ae731f8dSMarc Zyngier * 2115ae731f8dSMarc Zyngier * This call allocates interrupt resources and enables the 2116ae731f8dSMarc Zyngier * interrupt line and IRQ handling. It selects either a 2117ae731f8dSMarc Zyngier * hardirq or threaded handling method depending on the 2118ae731f8dSMarc Zyngier * context. 2119ae731f8dSMarc Zyngier * 2120ae731f8dSMarc Zyngier * On failure, it returns a negative value. On success, 2121ae731f8dSMarc Zyngier * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. 2122ae731f8dSMarc Zyngier */ 2123ae731f8dSMarc Zyngier int request_any_context_irq(unsigned int irq, irq_handler_t handler, 2124ae731f8dSMarc Zyngier unsigned long flags, const char *name, void *dev_id) 2125ae731f8dSMarc Zyngier { 2126e237a551SChen Fan struct irq_desc *desc; 2127ae731f8dSMarc Zyngier int ret; 2128ae731f8dSMarc Zyngier 2129e237a551SChen Fan if (irq == IRQ_NOTCONNECTED) 2130e237a551SChen Fan return -ENOTCONN; 2131e237a551SChen Fan 2132e237a551SChen Fan desc = irq_to_desc(irq); 2133ae731f8dSMarc Zyngier if (!desc) 2134ae731f8dSMarc Zyngier return -EINVAL; 2135ae731f8dSMarc Zyngier 21361ccb4e61SThomas Gleixner if (irq_settings_is_nested_thread(desc)) { 2137ae731f8dSMarc Zyngier ret = request_threaded_irq(irq, NULL, handler, 2138ae731f8dSMarc Zyngier flags, name, dev_id); 2139ae731f8dSMarc Zyngier return !ret ? IRQC_IS_NESTED : ret; 2140ae731f8dSMarc Zyngier } 2141ae731f8dSMarc Zyngier 2142ae731f8dSMarc Zyngier ret = request_irq(irq, handler, flags, name, dev_id); 2143ae731f8dSMarc Zyngier return !ret ? IRQC_IS_HARDIRQ : ret; 2144ae731f8dSMarc Zyngier } 2145ae731f8dSMarc Zyngier EXPORT_SYMBOL_GPL(request_any_context_irq); 214631d9d9b6SMarc Zyngier 2147b525903cSJulien Thierry /** 2148b525903cSJulien Thierry * request_nmi - allocate an interrupt line for NMI delivery 2149b525903cSJulien Thierry * @irq: Interrupt line to allocate 2150b525903cSJulien Thierry * @handler: Function to be called when the IRQ occurs. 2151b525903cSJulien Thierry * Threaded handler for threaded interrupts. 2152b525903cSJulien Thierry * @irqflags: Interrupt type flags 2153b525903cSJulien Thierry * @name: An ascii name for the claiming device 2154b525903cSJulien Thierry * @dev_id: A cookie passed back to the handler function 2155b525903cSJulien Thierry * 2156b525903cSJulien Thierry * This call allocates interrupt resources and enables the 2157b525903cSJulien Thierry * interrupt line and IRQ handling. It sets up the IRQ line 2158b525903cSJulien Thierry * to be handled as an NMI. 2159b525903cSJulien Thierry * 2160b525903cSJulien Thierry * An interrupt line delivering NMIs cannot be shared and IRQ handling 2161b525903cSJulien Thierry * cannot be threaded. 2162b525903cSJulien Thierry * 2163b525903cSJulien Thierry * Interrupt lines requested for NMI delivering must produce per cpu 2164b525903cSJulien Thierry * interrupts and have auto enabling setting disabled. 2165b525903cSJulien Thierry * 2166b525903cSJulien Thierry * Dev_id must be globally unique. Normally the address of the 2167b525903cSJulien Thierry * device data structure is used as the cookie. Since the handler 2168b525903cSJulien Thierry * receives this value it makes sense to use it. 2169b525903cSJulien Thierry * 2170b525903cSJulien Thierry * If the interrupt line cannot be used to deliver NMIs, function 2171b525903cSJulien Thierry * will fail and return a negative value. 2172b525903cSJulien Thierry */ 2173b525903cSJulien Thierry int request_nmi(unsigned int irq, irq_handler_t handler, 2174b525903cSJulien Thierry unsigned long irqflags, const char *name, void *dev_id) 2175b525903cSJulien Thierry { 2176b525903cSJulien Thierry struct irqaction *action; 2177b525903cSJulien Thierry struct irq_desc *desc; 2178b525903cSJulien Thierry unsigned long flags; 2179b525903cSJulien Thierry int retval; 2180b525903cSJulien Thierry 2181b525903cSJulien Thierry if (irq == IRQ_NOTCONNECTED) 2182b525903cSJulien Thierry return -ENOTCONN; 2183b525903cSJulien Thierry 2184b525903cSJulien Thierry /* NMI cannot be shared, used for Polling */ 2185b525903cSJulien Thierry if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL)) 2186b525903cSJulien Thierry return -EINVAL; 2187b525903cSJulien Thierry 2188b525903cSJulien Thierry if (!(irqflags & IRQF_PERCPU)) 2189b525903cSJulien Thierry return -EINVAL; 2190b525903cSJulien Thierry 2191b525903cSJulien Thierry if (!handler) 2192b525903cSJulien Thierry return -EINVAL; 2193b525903cSJulien Thierry 2194b525903cSJulien Thierry desc = irq_to_desc(irq); 2195b525903cSJulien Thierry 2196b525903cSJulien Thierry if (!desc || irq_settings_can_autoenable(desc) || 2197b525903cSJulien Thierry !irq_settings_can_request(desc) || 2198b525903cSJulien Thierry WARN_ON(irq_settings_is_per_cpu_devid(desc)) || 2199b525903cSJulien Thierry !irq_supports_nmi(desc)) 2200b525903cSJulien Thierry return -EINVAL; 2201b525903cSJulien Thierry 2202b525903cSJulien Thierry action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 2203b525903cSJulien Thierry if (!action) 2204b525903cSJulien Thierry return -ENOMEM; 2205b525903cSJulien Thierry 2206b525903cSJulien Thierry action->handler = handler; 2207b525903cSJulien Thierry action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING; 2208b525903cSJulien Thierry action->name = name; 2209b525903cSJulien Thierry action->dev_id = dev_id; 2210b525903cSJulien Thierry 2211b525903cSJulien Thierry retval = irq_chip_pm_get(&desc->irq_data); 2212b525903cSJulien Thierry if (retval < 0) 2213b525903cSJulien Thierry goto err_out; 2214b525903cSJulien Thierry 2215b525903cSJulien Thierry retval = __setup_irq(irq, desc, action); 2216b525903cSJulien Thierry if (retval) 2217b525903cSJulien Thierry goto err_irq_setup; 2218b525903cSJulien Thierry 2219b525903cSJulien Thierry raw_spin_lock_irqsave(&desc->lock, flags); 2220b525903cSJulien Thierry 2221b525903cSJulien Thierry /* Setup NMI state */ 2222b525903cSJulien Thierry desc->istate |= IRQS_NMI; 2223b525903cSJulien Thierry retval = irq_nmi_setup(desc); 2224b525903cSJulien Thierry if (retval) { 2225b525903cSJulien Thierry __cleanup_nmi(irq, desc); 2226b525903cSJulien Thierry raw_spin_unlock_irqrestore(&desc->lock, flags); 2227b525903cSJulien Thierry return -EINVAL; 2228b525903cSJulien Thierry } 2229b525903cSJulien Thierry 2230b525903cSJulien Thierry raw_spin_unlock_irqrestore(&desc->lock, flags); 2231b525903cSJulien Thierry 2232b525903cSJulien Thierry return 0; 2233b525903cSJulien Thierry 2234b525903cSJulien Thierry err_irq_setup: 2235b525903cSJulien Thierry irq_chip_pm_put(&desc->irq_data); 2236b525903cSJulien Thierry err_out: 2237b525903cSJulien Thierry kfree(action); 2238b525903cSJulien Thierry 2239b525903cSJulien Thierry return retval; 2240b525903cSJulien Thierry } 2241b525903cSJulien Thierry 22421e7c5fd2SMarc Zyngier void enable_percpu_irq(unsigned int irq, unsigned int type) 224331d9d9b6SMarc Zyngier { 224431d9d9b6SMarc Zyngier unsigned int cpu = smp_processor_id(); 224531d9d9b6SMarc Zyngier unsigned long flags; 224631d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 224731d9d9b6SMarc Zyngier 224831d9d9b6SMarc Zyngier if (!desc) 224931d9d9b6SMarc Zyngier return; 225031d9d9b6SMarc Zyngier 2251f35ad083SMarc Zyngier /* 2252f35ad083SMarc Zyngier * If the trigger type is not specified by the caller, then 2253f35ad083SMarc Zyngier * use the default for this interrupt. 2254f35ad083SMarc Zyngier */ 22551e7c5fd2SMarc Zyngier type &= IRQ_TYPE_SENSE_MASK; 2256f35ad083SMarc Zyngier if (type == IRQ_TYPE_NONE) 2257f35ad083SMarc Zyngier type = irqd_get_trigger_type(&desc->irq_data); 2258f35ad083SMarc Zyngier 22591e7c5fd2SMarc Zyngier if (type != IRQ_TYPE_NONE) { 22601e7c5fd2SMarc Zyngier int ret; 22611e7c5fd2SMarc Zyngier 2262a1ff541aSJiang Liu ret = __irq_set_trigger(desc, type); 22631e7c5fd2SMarc Zyngier 22641e7c5fd2SMarc Zyngier if (ret) { 226532cffddeSThomas Gleixner WARN(1, "failed to set type for IRQ%d\n", irq); 22661e7c5fd2SMarc Zyngier goto out; 22671e7c5fd2SMarc Zyngier } 22681e7c5fd2SMarc Zyngier } 22691e7c5fd2SMarc Zyngier 227031d9d9b6SMarc Zyngier irq_percpu_enable(desc, cpu); 22711e7c5fd2SMarc Zyngier out: 227231d9d9b6SMarc Zyngier irq_put_desc_unlock(desc, flags); 227331d9d9b6SMarc Zyngier } 227436a5df85SChris Metcalf EXPORT_SYMBOL_GPL(enable_percpu_irq); 227531d9d9b6SMarc Zyngier 22764b078c3fSJulien Thierry void enable_percpu_nmi(unsigned int irq, unsigned int type) 22774b078c3fSJulien Thierry { 22784b078c3fSJulien Thierry enable_percpu_irq(irq, type); 22794b078c3fSJulien Thierry } 22804b078c3fSJulien Thierry 2281f0cb3220SThomas Petazzoni /** 2282f0cb3220SThomas Petazzoni * irq_percpu_is_enabled - Check whether the per cpu irq is enabled 2283f0cb3220SThomas Petazzoni * @irq: Linux irq number to check for 2284f0cb3220SThomas Petazzoni * 2285f0cb3220SThomas Petazzoni * Must be called from a non migratable context. Returns the enable 2286f0cb3220SThomas Petazzoni * state of a per cpu interrupt on the current cpu. 2287f0cb3220SThomas Petazzoni */ 2288f0cb3220SThomas Petazzoni bool irq_percpu_is_enabled(unsigned int irq) 2289f0cb3220SThomas Petazzoni { 2290f0cb3220SThomas Petazzoni unsigned int cpu = smp_processor_id(); 2291f0cb3220SThomas Petazzoni struct irq_desc *desc; 2292f0cb3220SThomas Petazzoni unsigned long flags; 2293f0cb3220SThomas Petazzoni bool is_enabled; 2294f0cb3220SThomas Petazzoni 2295f0cb3220SThomas Petazzoni desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 2296f0cb3220SThomas Petazzoni if (!desc) 2297f0cb3220SThomas Petazzoni return false; 2298f0cb3220SThomas Petazzoni 2299f0cb3220SThomas Petazzoni is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); 2300f0cb3220SThomas Petazzoni irq_put_desc_unlock(desc, flags); 2301f0cb3220SThomas Petazzoni 2302f0cb3220SThomas Petazzoni return is_enabled; 2303f0cb3220SThomas Petazzoni } 2304f0cb3220SThomas Petazzoni EXPORT_SYMBOL_GPL(irq_percpu_is_enabled); 2305f0cb3220SThomas Petazzoni 230631d9d9b6SMarc Zyngier void disable_percpu_irq(unsigned int irq) 230731d9d9b6SMarc Zyngier { 230831d9d9b6SMarc Zyngier unsigned int cpu = smp_processor_id(); 230931d9d9b6SMarc Zyngier unsigned long flags; 231031d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 231131d9d9b6SMarc Zyngier 231231d9d9b6SMarc Zyngier if (!desc) 231331d9d9b6SMarc Zyngier return; 231431d9d9b6SMarc Zyngier 231531d9d9b6SMarc Zyngier irq_percpu_disable(desc, cpu); 231631d9d9b6SMarc Zyngier irq_put_desc_unlock(desc, flags); 231731d9d9b6SMarc Zyngier } 231836a5df85SChris Metcalf EXPORT_SYMBOL_GPL(disable_percpu_irq); 231931d9d9b6SMarc Zyngier 23204b078c3fSJulien Thierry void disable_percpu_nmi(unsigned int irq) 23214b078c3fSJulien Thierry { 23224b078c3fSJulien Thierry disable_percpu_irq(irq); 23234b078c3fSJulien Thierry } 23244b078c3fSJulien Thierry 232531d9d9b6SMarc Zyngier /* 232631d9d9b6SMarc Zyngier * Internal function to unregister a percpu irqaction. 232731d9d9b6SMarc Zyngier */ 232831d9d9b6SMarc Zyngier static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) 232931d9d9b6SMarc Zyngier { 233031d9d9b6SMarc Zyngier struct irq_desc *desc = irq_to_desc(irq); 233131d9d9b6SMarc Zyngier struct irqaction *action; 233231d9d9b6SMarc Zyngier unsigned long flags; 233331d9d9b6SMarc Zyngier 233431d9d9b6SMarc Zyngier WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 233531d9d9b6SMarc Zyngier 233631d9d9b6SMarc Zyngier if (!desc) 233731d9d9b6SMarc Zyngier return NULL; 233831d9d9b6SMarc Zyngier 233931d9d9b6SMarc Zyngier raw_spin_lock_irqsave(&desc->lock, flags); 234031d9d9b6SMarc Zyngier 234131d9d9b6SMarc Zyngier action = desc->action; 234231d9d9b6SMarc Zyngier if (!action || action->percpu_dev_id != dev_id) { 234331d9d9b6SMarc Zyngier WARN(1, "Trying to free already-free IRQ %d\n", irq); 234431d9d9b6SMarc Zyngier goto bad; 234531d9d9b6SMarc Zyngier } 234631d9d9b6SMarc Zyngier 234731d9d9b6SMarc Zyngier if (!cpumask_empty(desc->percpu_enabled)) { 234831d9d9b6SMarc Zyngier WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", 234931d9d9b6SMarc Zyngier irq, cpumask_first(desc->percpu_enabled)); 235031d9d9b6SMarc Zyngier goto bad; 235131d9d9b6SMarc Zyngier } 235231d9d9b6SMarc Zyngier 235331d9d9b6SMarc Zyngier /* Found it - now remove it from the list of entries: */ 235431d9d9b6SMarc Zyngier desc->action = NULL; 235531d9d9b6SMarc Zyngier 23564b078c3fSJulien Thierry desc->istate &= ~IRQS_NMI; 23574b078c3fSJulien Thierry 235831d9d9b6SMarc Zyngier raw_spin_unlock_irqrestore(&desc->lock, flags); 235931d9d9b6SMarc Zyngier 236031d9d9b6SMarc Zyngier unregister_handler_proc(irq, action); 236131d9d9b6SMarc Zyngier 2362be45beb2SJon Hunter irq_chip_pm_put(&desc->irq_data); 236331d9d9b6SMarc Zyngier module_put(desc->owner); 236431d9d9b6SMarc Zyngier return action; 236531d9d9b6SMarc Zyngier 236631d9d9b6SMarc Zyngier bad: 236731d9d9b6SMarc Zyngier raw_spin_unlock_irqrestore(&desc->lock, flags); 236831d9d9b6SMarc Zyngier return NULL; 236931d9d9b6SMarc Zyngier } 237031d9d9b6SMarc Zyngier 237131d9d9b6SMarc Zyngier /** 237231d9d9b6SMarc Zyngier * remove_percpu_irq - free a per-cpu interrupt 237331d9d9b6SMarc Zyngier * @irq: Interrupt line to free 237431d9d9b6SMarc Zyngier * @act: irqaction for the interrupt 237531d9d9b6SMarc Zyngier * 237631d9d9b6SMarc Zyngier * Used to remove interrupts statically setup by the early boot process. 237731d9d9b6SMarc Zyngier */ 237831d9d9b6SMarc Zyngier void remove_percpu_irq(unsigned int irq, struct irqaction *act) 237931d9d9b6SMarc Zyngier { 238031d9d9b6SMarc Zyngier struct irq_desc *desc = irq_to_desc(irq); 238131d9d9b6SMarc Zyngier 238231d9d9b6SMarc Zyngier if (desc && irq_settings_is_per_cpu_devid(desc)) 238331d9d9b6SMarc Zyngier __free_percpu_irq(irq, act->percpu_dev_id); 238431d9d9b6SMarc Zyngier } 238531d9d9b6SMarc Zyngier 238631d9d9b6SMarc Zyngier /** 238731d9d9b6SMarc Zyngier * free_percpu_irq - free an interrupt allocated with request_percpu_irq 238831d9d9b6SMarc Zyngier * @irq: Interrupt line to free 238931d9d9b6SMarc Zyngier * @dev_id: Device identity to free 239031d9d9b6SMarc Zyngier * 239131d9d9b6SMarc Zyngier * Remove a percpu interrupt handler. The handler is removed, but 239231d9d9b6SMarc Zyngier * the interrupt line is not disabled. This must be done on each 239331d9d9b6SMarc Zyngier * CPU before calling this function. The function does not return 239431d9d9b6SMarc Zyngier * until any executing interrupts for this IRQ have completed. 239531d9d9b6SMarc Zyngier * 239631d9d9b6SMarc Zyngier * This function must not be called from interrupt context. 239731d9d9b6SMarc Zyngier */ 239831d9d9b6SMarc Zyngier void free_percpu_irq(unsigned int irq, void __percpu *dev_id) 239931d9d9b6SMarc Zyngier { 240031d9d9b6SMarc Zyngier struct irq_desc *desc = irq_to_desc(irq); 240131d9d9b6SMarc Zyngier 240231d9d9b6SMarc Zyngier if (!desc || !irq_settings_is_per_cpu_devid(desc)) 240331d9d9b6SMarc Zyngier return; 240431d9d9b6SMarc Zyngier 240531d9d9b6SMarc Zyngier chip_bus_lock(desc); 240631d9d9b6SMarc Zyngier kfree(__free_percpu_irq(irq, dev_id)); 240731d9d9b6SMarc Zyngier chip_bus_sync_unlock(desc); 240831d9d9b6SMarc Zyngier } 2409aec2e2adSMaxime Ripard EXPORT_SYMBOL_GPL(free_percpu_irq); 241031d9d9b6SMarc Zyngier 24114b078c3fSJulien Thierry void free_percpu_nmi(unsigned int irq, void __percpu *dev_id) 24124b078c3fSJulien Thierry { 24134b078c3fSJulien Thierry struct irq_desc *desc = irq_to_desc(irq); 24144b078c3fSJulien Thierry 24154b078c3fSJulien Thierry if (!desc || !irq_settings_is_per_cpu_devid(desc)) 24164b078c3fSJulien Thierry return; 24174b078c3fSJulien Thierry 24184b078c3fSJulien Thierry if (WARN_ON(!(desc->istate & IRQS_NMI))) 24194b078c3fSJulien Thierry return; 24204b078c3fSJulien Thierry 24214b078c3fSJulien Thierry kfree(__free_percpu_irq(irq, dev_id)); 24224b078c3fSJulien Thierry } 24234b078c3fSJulien Thierry 242431d9d9b6SMarc Zyngier /** 242531d9d9b6SMarc Zyngier * setup_percpu_irq - setup a per-cpu interrupt 242631d9d9b6SMarc Zyngier * @irq: Interrupt line to setup 242731d9d9b6SMarc Zyngier * @act: irqaction for the interrupt 242831d9d9b6SMarc Zyngier * 242931d9d9b6SMarc Zyngier * Used to statically setup per-cpu interrupts in the early boot process. 243031d9d9b6SMarc Zyngier */ 243131d9d9b6SMarc Zyngier int setup_percpu_irq(unsigned int irq, struct irqaction *act) 243231d9d9b6SMarc Zyngier { 243331d9d9b6SMarc Zyngier struct irq_desc *desc = irq_to_desc(irq); 243431d9d9b6SMarc Zyngier int retval; 243531d9d9b6SMarc Zyngier 243631d9d9b6SMarc Zyngier if (!desc || !irq_settings_is_per_cpu_devid(desc)) 243731d9d9b6SMarc Zyngier return -EINVAL; 2438be45beb2SJon Hunter 2439be45beb2SJon Hunter retval = irq_chip_pm_get(&desc->irq_data); 2440be45beb2SJon Hunter if (retval < 0) 2441be45beb2SJon Hunter return retval; 2442be45beb2SJon Hunter 244331d9d9b6SMarc Zyngier retval = __setup_irq(irq, desc, act); 244431d9d9b6SMarc Zyngier 2445be45beb2SJon Hunter if (retval) 2446be45beb2SJon Hunter irq_chip_pm_put(&desc->irq_data); 2447be45beb2SJon Hunter 244831d9d9b6SMarc Zyngier return retval; 244931d9d9b6SMarc Zyngier } 245031d9d9b6SMarc Zyngier 245131d9d9b6SMarc Zyngier /** 2452c80081b9SDaniel Lezcano * __request_percpu_irq - allocate a percpu interrupt line 245331d9d9b6SMarc Zyngier * @irq: Interrupt line to allocate 245431d9d9b6SMarc Zyngier * @handler: Function to be called when the IRQ occurs. 2455c80081b9SDaniel Lezcano * @flags: Interrupt type flags (IRQF_TIMER only) 245631d9d9b6SMarc Zyngier * @devname: An ascii name for the claiming device 245731d9d9b6SMarc Zyngier * @dev_id: A percpu cookie passed back to the handler function 245831d9d9b6SMarc Zyngier * 2459a1b7febdSMaxime Ripard * This call allocates interrupt resources and enables the 2460a1b7febdSMaxime Ripard * interrupt on the local CPU. If the interrupt is supposed to be 2461a1b7febdSMaxime Ripard * enabled on other CPUs, it has to be done on each CPU using 2462a1b7febdSMaxime Ripard * enable_percpu_irq(). 246331d9d9b6SMarc Zyngier * 246431d9d9b6SMarc Zyngier * Dev_id must be globally unique. It is a per-cpu variable, and 246531d9d9b6SMarc Zyngier * the handler gets called with the interrupted CPU's instance of 246631d9d9b6SMarc Zyngier * that variable. 246731d9d9b6SMarc Zyngier */ 2468c80081b9SDaniel Lezcano int __request_percpu_irq(unsigned int irq, irq_handler_t handler, 2469c80081b9SDaniel Lezcano unsigned long flags, const char *devname, 2470c80081b9SDaniel Lezcano void __percpu *dev_id) 247131d9d9b6SMarc Zyngier { 247231d9d9b6SMarc Zyngier struct irqaction *action; 247331d9d9b6SMarc Zyngier struct irq_desc *desc; 247431d9d9b6SMarc Zyngier int retval; 247531d9d9b6SMarc Zyngier 247631d9d9b6SMarc Zyngier if (!dev_id) 247731d9d9b6SMarc Zyngier return -EINVAL; 247831d9d9b6SMarc Zyngier 247931d9d9b6SMarc Zyngier desc = irq_to_desc(irq); 248031d9d9b6SMarc Zyngier if (!desc || !irq_settings_can_request(desc) || 248131d9d9b6SMarc Zyngier !irq_settings_is_per_cpu_devid(desc)) 248231d9d9b6SMarc Zyngier return -EINVAL; 248331d9d9b6SMarc Zyngier 2484c80081b9SDaniel Lezcano if (flags && flags != IRQF_TIMER) 2485c80081b9SDaniel Lezcano return -EINVAL; 2486c80081b9SDaniel Lezcano 248731d9d9b6SMarc Zyngier action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 248831d9d9b6SMarc Zyngier if (!action) 248931d9d9b6SMarc Zyngier return -ENOMEM; 249031d9d9b6SMarc Zyngier 249131d9d9b6SMarc Zyngier action->handler = handler; 2492c80081b9SDaniel Lezcano action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND; 249331d9d9b6SMarc Zyngier action->name = devname; 249431d9d9b6SMarc Zyngier action->percpu_dev_id = dev_id; 249531d9d9b6SMarc Zyngier 2496be45beb2SJon Hunter retval = irq_chip_pm_get(&desc->irq_data); 24974396f46cSShawn Lin if (retval < 0) { 24984396f46cSShawn Lin kfree(action); 2499be45beb2SJon Hunter return retval; 25004396f46cSShawn Lin } 2501be45beb2SJon Hunter 250231d9d9b6SMarc Zyngier retval = __setup_irq(irq, desc, action); 250331d9d9b6SMarc Zyngier 2504be45beb2SJon Hunter if (retval) { 2505be45beb2SJon Hunter irq_chip_pm_put(&desc->irq_data); 250631d9d9b6SMarc Zyngier kfree(action); 2507be45beb2SJon Hunter } 250831d9d9b6SMarc Zyngier 250931d9d9b6SMarc Zyngier return retval; 251031d9d9b6SMarc Zyngier } 2511c80081b9SDaniel Lezcano EXPORT_SYMBOL_GPL(__request_percpu_irq); 25121b7047edSMarc Zyngier 25131b7047edSMarc Zyngier /** 25144b078c3fSJulien Thierry * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery 25154b078c3fSJulien Thierry * @irq: Interrupt line to allocate 25164b078c3fSJulien Thierry * @handler: Function to be called when the IRQ occurs. 25174b078c3fSJulien Thierry * @name: An ascii name for the claiming device 25184b078c3fSJulien Thierry * @dev_id: A percpu cookie passed back to the handler function 25194b078c3fSJulien Thierry * 25204b078c3fSJulien Thierry * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs 2521a5186694SJulien Thierry * have to be setup on each CPU by calling prepare_percpu_nmi() before 2522a5186694SJulien Thierry * being enabled on the same CPU by using enable_percpu_nmi(). 25234b078c3fSJulien Thierry * 25244b078c3fSJulien Thierry * Dev_id must be globally unique. It is a per-cpu variable, and 25254b078c3fSJulien Thierry * the handler gets called with the interrupted CPU's instance of 25264b078c3fSJulien Thierry * that variable. 25274b078c3fSJulien Thierry * 25284b078c3fSJulien Thierry * Interrupt lines requested for NMI delivering should have auto enabling 25294b078c3fSJulien Thierry * setting disabled. 25304b078c3fSJulien Thierry * 25314b078c3fSJulien Thierry * If the interrupt line cannot be used to deliver NMIs, function 25324b078c3fSJulien Thierry * will fail returning a negative value. 25334b078c3fSJulien Thierry */ 25344b078c3fSJulien Thierry int request_percpu_nmi(unsigned int irq, irq_handler_t handler, 25354b078c3fSJulien Thierry const char *name, void __percpu *dev_id) 25364b078c3fSJulien Thierry { 25374b078c3fSJulien Thierry struct irqaction *action; 25384b078c3fSJulien Thierry struct irq_desc *desc; 25394b078c3fSJulien Thierry unsigned long flags; 25404b078c3fSJulien Thierry int retval; 25414b078c3fSJulien Thierry 25424b078c3fSJulien Thierry if (!handler) 25434b078c3fSJulien Thierry return -EINVAL; 25444b078c3fSJulien Thierry 25454b078c3fSJulien Thierry desc = irq_to_desc(irq); 25464b078c3fSJulien Thierry 25474b078c3fSJulien Thierry if (!desc || !irq_settings_can_request(desc) || 25484b078c3fSJulien Thierry !irq_settings_is_per_cpu_devid(desc) || 25494b078c3fSJulien Thierry irq_settings_can_autoenable(desc) || 25504b078c3fSJulien Thierry !irq_supports_nmi(desc)) 25514b078c3fSJulien Thierry return -EINVAL; 25524b078c3fSJulien Thierry 25534b078c3fSJulien Thierry /* The line cannot already be NMI */ 25544b078c3fSJulien Thierry if (desc->istate & IRQS_NMI) 25554b078c3fSJulien Thierry return -EINVAL; 25564b078c3fSJulien Thierry 25574b078c3fSJulien Thierry action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 25584b078c3fSJulien Thierry if (!action) 25594b078c3fSJulien Thierry return -ENOMEM; 25604b078c3fSJulien Thierry 25614b078c3fSJulien Thierry action->handler = handler; 25624b078c3fSJulien Thierry action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD 25634b078c3fSJulien Thierry | IRQF_NOBALANCING; 25644b078c3fSJulien Thierry action->name = name; 25654b078c3fSJulien Thierry action->percpu_dev_id = dev_id; 25664b078c3fSJulien Thierry 25674b078c3fSJulien Thierry retval = irq_chip_pm_get(&desc->irq_data); 25684b078c3fSJulien Thierry if (retval < 0) 25694b078c3fSJulien Thierry goto err_out; 25704b078c3fSJulien Thierry 25714b078c3fSJulien Thierry retval = __setup_irq(irq, desc, action); 25724b078c3fSJulien Thierry if (retval) 25734b078c3fSJulien Thierry goto err_irq_setup; 25744b078c3fSJulien Thierry 25754b078c3fSJulien Thierry raw_spin_lock_irqsave(&desc->lock, flags); 25764b078c3fSJulien Thierry desc->istate |= IRQS_NMI; 25774b078c3fSJulien Thierry raw_spin_unlock_irqrestore(&desc->lock, flags); 25784b078c3fSJulien Thierry 25794b078c3fSJulien Thierry return 0; 25804b078c3fSJulien Thierry 25814b078c3fSJulien Thierry err_irq_setup: 25824b078c3fSJulien Thierry irq_chip_pm_put(&desc->irq_data); 25834b078c3fSJulien Thierry err_out: 25844b078c3fSJulien Thierry kfree(action); 25854b078c3fSJulien Thierry 25864b078c3fSJulien Thierry return retval; 25874b078c3fSJulien Thierry } 25884b078c3fSJulien Thierry 25894b078c3fSJulien Thierry /** 25904b078c3fSJulien Thierry * prepare_percpu_nmi - performs CPU local setup for NMI delivery 25914b078c3fSJulien Thierry * @irq: Interrupt line to prepare for NMI delivery 25924b078c3fSJulien Thierry * 25934b078c3fSJulien Thierry * This call prepares an interrupt line to deliver NMI on the current CPU, 25944b078c3fSJulien Thierry * before that interrupt line gets enabled with enable_percpu_nmi(). 25954b078c3fSJulien Thierry * 25964b078c3fSJulien Thierry * As a CPU local operation, this should be called from non-preemptible 25974b078c3fSJulien Thierry * context. 25984b078c3fSJulien Thierry * 25994b078c3fSJulien Thierry * If the interrupt line cannot be used to deliver NMIs, function 26004b078c3fSJulien Thierry * will fail returning a negative value. 26014b078c3fSJulien Thierry */ 26024b078c3fSJulien Thierry int prepare_percpu_nmi(unsigned int irq) 26034b078c3fSJulien Thierry { 26044b078c3fSJulien Thierry unsigned long flags; 26054b078c3fSJulien Thierry struct irq_desc *desc; 26064b078c3fSJulien Thierry int ret = 0; 26074b078c3fSJulien Thierry 26084b078c3fSJulien Thierry WARN_ON(preemptible()); 26094b078c3fSJulien Thierry 26104b078c3fSJulien Thierry desc = irq_get_desc_lock(irq, &flags, 26114b078c3fSJulien Thierry IRQ_GET_DESC_CHECK_PERCPU); 26124b078c3fSJulien Thierry if (!desc) 26134b078c3fSJulien Thierry return -EINVAL; 26144b078c3fSJulien Thierry 26154b078c3fSJulien Thierry if (WARN(!(desc->istate & IRQS_NMI), 26164b078c3fSJulien Thierry KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n", 26174b078c3fSJulien Thierry irq)) { 26184b078c3fSJulien Thierry ret = -EINVAL; 26194b078c3fSJulien Thierry goto out; 26204b078c3fSJulien Thierry } 26214b078c3fSJulien Thierry 26224b078c3fSJulien Thierry ret = irq_nmi_setup(desc); 26234b078c3fSJulien Thierry if (ret) { 26244b078c3fSJulien Thierry pr_err("Failed to setup NMI delivery: irq %u\n", irq); 26254b078c3fSJulien Thierry goto out; 26264b078c3fSJulien Thierry } 26274b078c3fSJulien Thierry 26284b078c3fSJulien Thierry out: 26294b078c3fSJulien Thierry irq_put_desc_unlock(desc, flags); 26304b078c3fSJulien Thierry return ret; 26314b078c3fSJulien Thierry } 26324b078c3fSJulien Thierry 26334b078c3fSJulien Thierry /** 26344b078c3fSJulien Thierry * teardown_percpu_nmi - undoes NMI setup of IRQ line 26354b078c3fSJulien Thierry * @irq: Interrupt line from which CPU local NMI configuration should be 26364b078c3fSJulien Thierry * removed 26374b078c3fSJulien Thierry * 26384b078c3fSJulien Thierry * This call undoes the setup done by prepare_percpu_nmi(). 26394b078c3fSJulien Thierry * 26404b078c3fSJulien Thierry * IRQ line should not be enabled for the current CPU. 26414b078c3fSJulien Thierry * 26424b078c3fSJulien Thierry * As a CPU local operation, this should be called from non-preemptible 26434b078c3fSJulien Thierry * context. 26444b078c3fSJulien Thierry */ 26454b078c3fSJulien Thierry void teardown_percpu_nmi(unsigned int irq) 26464b078c3fSJulien Thierry { 26474b078c3fSJulien Thierry unsigned long flags; 26484b078c3fSJulien Thierry struct irq_desc *desc; 26494b078c3fSJulien Thierry 26504b078c3fSJulien Thierry WARN_ON(preemptible()); 26514b078c3fSJulien Thierry 26524b078c3fSJulien Thierry desc = irq_get_desc_lock(irq, &flags, 26534b078c3fSJulien Thierry IRQ_GET_DESC_CHECK_PERCPU); 26544b078c3fSJulien Thierry if (!desc) 26554b078c3fSJulien Thierry return; 26564b078c3fSJulien Thierry 26574b078c3fSJulien Thierry if (WARN_ON(!(desc->istate & IRQS_NMI))) 26584b078c3fSJulien Thierry goto out; 26594b078c3fSJulien Thierry 26604b078c3fSJulien Thierry irq_nmi_teardown(desc); 26614b078c3fSJulien Thierry out: 26624b078c3fSJulien Thierry irq_put_desc_unlock(desc, flags); 26634b078c3fSJulien Thierry } 26644b078c3fSJulien Thierry 266562e04686SThomas Gleixner int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which, 266662e04686SThomas Gleixner bool *state) 266762e04686SThomas Gleixner { 266862e04686SThomas Gleixner struct irq_chip *chip; 266962e04686SThomas Gleixner int err = -EINVAL; 267062e04686SThomas Gleixner 267162e04686SThomas Gleixner do { 267262e04686SThomas Gleixner chip = irq_data_get_irq_chip(data); 267362e04686SThomas Gleixner if (chip->irq_get_irqchip_state) 267462e04686SThomas Gleixner break; 267562e04686SThomas Gleixner #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 267662e04686SThomas Gleixner data = data->parent_data; 267762e04686SThomas Gleixner #else 267862e04686SThomas Gleixner data = NULL; 267962e04686SThomas Gleixner #endif 268062e04686SThomas Gleixner } while (data); 268162e04686SThomas Gleixner 268262e04686SThomas Gleixner if (data) 268362e04686SThomas Gleixner err = chip->irq_get_irqchip_state(data, which, state); 268462e04686SThomas Gleixner return err; 268562e04686SThomas Gleixner } 268662e04686SThomas Gleixner 26874b078c3fSJulien Thierry /** 26881b7047edSMarc Zyngier * irq_get_irqchip_state - returns the irqchip state of a interrupt. 26891b7047edSMarc Zyngier * @irq: Interrupt line that is forwarded to a VM 26901b7047edSMarc Zyngier * @which: One of IRQCHIP_STATE_* the caller wants to know about 26911b7047edSMarc Zyngier * @state: a pointer to a boolean where the state is to be storeed 26921b7047edSMarc Zyngier * 26931b7047edSMarc Zyngier * This call snapshots the internal irqchip state of an 26941b7047edSMarc Zyngier * interrupt, returning into @state the bit corresponding to 26951b7047edSMarc Zyngier * stage @which 26961b7047edSMarc Zyngier * 26971b7047edSMarc Zyngier * This function should be called with preemption disabled if the 26981b7047edSMarc Zyngier * interrupt controller has per-cpu registers. 26991b7047edSMarc Zyngier */ 27001b7047edSMarc Zyngier int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, 27011b7047edSMarc Zyngier bool *state) 27021b7047edSMarc Zyngier { 27031b7047edSMarc Zyngier struct irq_desc *desc; 27041b7047edSMarc Zyngier struct irq_data *data; 27051b7047edSMarc Zyngier unsigned long flags; 27061b7047edSMarc Zyngier int err = -EINVAL; 27071b7047edSMarc Zyngier 27081b7047edSMarc Zyngier desc = irq_get_desc_buslock(irq, &flags, 0); 27091b7047edSMarc Zyngier if (!desc) 27101b7047edSMarc Zyngier return err; 27111b7047edSMarc Zyngier 27121b7047edSMarc Zyngier data = irq_desc_get_irq_data(desc); 27131b7047edSMarc Zyngier 271462e04686SThomas Gleixner err = __irq_get_irqchip_state(data, which, state); 27151b7047edSMarc Zyngier 27161b7047edSMarc Zyngier irq_put_desc_busunlock(desc, flags); 27171b7047edSMarc Zyngier return err; 27181b7047edSMarc Zyngier } 27191ee4fb3eSBjorn Andersson EXPORT_SYMBOL_GPL(irq_get_irqchip_state); 27201b7047edSMarc Zyngier 27211b7047edSMarc Zyngier /** 27221b7047edSMarc Zyngier * irq_set_irqchip_state - set the state of a forwarded interrupt. 27231b7047edSMarc Zyngier * @irq: Interrupt line that is forwarded to a VM 27241b7047edSMarc Zyngier * @which: State to be restored (one of IRQCHIP_STATE_*) 27251b7047edSMarc Zyngier * @val: Value corresponding to @which 27261b7047edSMarc Zyngier * 27271b7047edSMarc Zyngier * This call sets the internal irqchip state of an interrupt, 27281b7047edSMarc Zyngier * depending on the value of @which. 27291b7047edSMarc Zyngier * 27301b7047edSMarc Zyngier * This function should be called with preemption disabled if the 27311b7047edSMarc Zyngier * interrupt controller has per-cpu registers. 27321b7047edSMarc Zyngier */ 27331b7047edSMarc Zyngier int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, 27341b7047edSMarc Zyngier bool val) 27351b7047edSMarc Zyngier { 27361b7047edSMarc Zyngier struct irq_desc *desc; 27371b7047edSMarc Zyngier struct irq_data *data; 27381b7047edSMarc Zyngier struct irq_chip *chip; 27391b7047edSMarc Zyngier unsigned long flags; 27401b7047edSMarc Zyngier int err = -EINVAL; 27411b7047edSMarc Zyngier 27421b7047edSMarc Zyngier desc = irq_get_desc_buslock(irq, &flags, 0); 27431b7047edSMarc Zyngier if (!desc) 27441b7047edSMarc Zyngier return err; 27451b7047edSMarc Zyngier 27461b7047edSMarc Zyngier data = irq_desc_get_irq_data(desc); 27471b7047edSMarc Zyngier 27481b7047edSMarc Zyngier do { 27491b7047edSMarc Zyngier chip = irq_data_get_irq_chip(data); 27501b7047edSMarc Zyngier if (chip->irq_set_irqchip_state) 27511b7047edSMarc Zyngier break; 27521b7047edSMarc Zyngier #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 27531b7047edSMarc Zyngier data = data->parent_data; 27541b7047edSMarc Zyngier #else 27551b7047edSMarc Zyngier data = NULL; 27561b7047edSMarc Zyngier #endif 27571b7047edSMarc Zyngier } while (data); 27581b7047edSMarc Zyngier 27591b7047edSMarc Zyngier if (data) 27601b7047edSMarc Zyngier err = chip->irq_set_irqchip_state(data, which, val); 27611b7047edSMarc Zyngier 27621b7047edSMarc Zyngier irq_put_desc_busunlock(desc, flags); 27631b7047edSMarc Zyngier return err; 27641b7047edSMarc Zyngier } 27651ee4fb3eSBjorn Andersson EXPORT_SYMBOL_GPL(irq_set_irqchip_state); 2766