152a65ff5SThomas Gleixner // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 3a34db9b2SIngo Molnar * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 4a34db9b2SIngo Molnar * Copyright (C) 2005-2006 Thomas Gleixner 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * This file contains driver APIs to the irq subsystem. 71da177e4SLinus Torvalds */ 81da177e4SLinus Torvalds 997fd75b7SAndrew Morton #define pr_fmt(fmt) "genirq: " fmt 1097fd75b7SAndrew Morton 111da177e4SLinus Torvalds #include <linux/irq.h> 123aa551c9SThomas Gleixner #include <linux/kthread.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/random.h> 151da177e4SLinus Torvalds #include <linux/interrupt.h> 164001d8e8SThomas Gleixner #include <linux/irqdomain.h> 171aeb272cSRobert P. J. Day #include <linux/slab.h> 183aa551c9SThomas Gleixner #include <linux/sched.h> 198bd75c77SClark Williams #include <linux/sched/rt.h> 200881e7bdSIngo Molnar #include <linux/sched/task.h> 2111ea68f5SMing Lei #include <linux/sched/isolation.h> 22ae7e81c0SIngo Molnar #include <uapi/linux/sched/types.h> 234d1d61a6SOleg Nesterov #include <linux/task_work.h> 241da177e4SLinus Torvalds 251da177e4SLinus Torvalds #include "internals.h" 261da177e4SLinus Torvalds 27b6a32bbdSThomas Gleixner #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT) 2891cc470eSTanner Love DEFINE_STATIC_KEY_FALSE(force_irqthreads_key); 298d32a307SThomas Gleixner 308d32a307SThomas Gleixner static int __init setup_forced_irqthreads(char *arg) 318d32a307SThomas Gleixner { 3291cc470eSTanner Love static_branch_enable(&force_irqthreads_key); 338d32a307SThomas Gleixner return 0; 348d32a307SThomas Gleixner } 358d32a307SThomas Gleixner early_param("threadirqs", setup_forced_irqthreads); 368d32a307SThomas Gleixner #endif 378d32a307SThomas Gleixner 3862e04686SThomas Gleixner static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) 391da177e4SLinus Torvalds { 4062e04686SThomas Gleixner struct irq_data *irqd = irq_desc_get_irq_data(desc); 4132f4125eSThomas Gleixner bool inprogress; 421da177e4SLinus Torvalds 43a98ce5c6SHerbert Xu do { 44a98ce5c6SHerbert Xu unsigned long flags; 45a98ce5c6SHerbert Xu 46a98ce5c6SHerbert Xu /* 47a98ce5c6SHerbert Xu * Wait until we're out of the critical section. This might 48a98ce5c6SHerbert Xu * give the wrong answer due to the lack of memory barriers. 49a98ce5c6SHerbert Xu */ 5032f4125eSThomas Gleixner while (irqd_irq_inprogress(&desc->irq_data)) 511da177e4SLinus Torvalds cpu_relax(); 52a98ce5c6SHerbert Xu 53a98ce5c6SHerbert Xu /* Ok, that indicated we're done: double-check carefully. */ 54239007b8SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 5532f4125eSThomas Gleixner inprogress = irqd_irq_inprogress(&desc->irq_data); 5662e04686SThomas Gleixner 5762e04686SThomas Gleixner /* 5862e04686SThomas Gleixner * If requested and supported, check at the chip whether it 5962e04686SThomas Gleixner * is in flight at the hardware level, i.e. already pending 6062e04686SThomas Gleixner * in a CPU and waiting for service and acknowledge. 6162e04686SThomas Gleixner */ 6262e04686SThomas Gleixner if (!inprogress && sync_chip) { 6362e04686SThomas Gleixner /* 6462e04686SThomas Gleixner * Ignore the return code. inprogress is only updated 6562e04686SThomas Gleixner * when the chip supports it. 6662e04686SThomas Gleixner */ 6762e04686SThomas Gleixner __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE, 6862e04686SThomas Gleixner &inprogress); 6962e04686SThomas Gleixner } 70239007b8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 71a98ce5c6SHerbert Xu 72a98ce5c6SHerbert Xu /* Oops, that failed? */ 7332f4125eSThomas Gleixner } while (inprogress); 7418258f72SThomas Gleixner } 753aa551c9SThomas Gleixner 7618258f72SThomas Gleixner /** 7718258f72SThomas Gleixner * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) 7818258f72SThomas Gleixner * @irq: interrupt number to wait for 7918258f72SThomas Gleixner * 8018258f72SThomas Gleixner * This function waits for any pending hard IRQ handlers for this 8118258f72SThomas Gleixner * interrupt to complete before returning. If you use this 8218258f72SThomas Gleixner * function while holding a resource the IRQ handler may need you 8318258f72SThomas Gleixner * will deadlock. It does not take associated threaded handlers 8418258f72SThomas Gleixner * into account. 8518258f72SThomas Gleixner * 8618258f72SThomas Gleixner * Do not use this for shutdown scenarios where you must be sure 8718258f72SThomas Gleixner * that all parts (hardirq and threaded handler) have completed. 8818258f72SThomas Gleixner * 8902cea395SPeter Zijlstra * Returns: false if a threaded handler is active. 9002cea395SPeter Zijlstra * 9118258f72SThomas Gleixner * This function may be called - with care - from IRQ context. 9262e04686SThomas Gleixner * 9362e04686SThomas Gleixner * It does not check whether there is an interrupt in flight at the 9462e04686SThomas Gleixner * hardware level, but not serviced yet, as this might deadlock when 9562e04686SThomas Gleixner * called with interrupts disabled and the target CPU of the interrupt 9662e04686SThomas Gleixner * is the current CPU. 973aa551c9SThomas Gleixner */ 9802cea395SPeter Zijlstra bool synchronize_hardirq(unsigned int irq) 9918258f72SThomas Gleixner { 10018258f72SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 10118258f72SThomas Gleixner 10202cea395SPeter Zijlstra if (desc) { 10362e04686SThomas Gleixner __synchronize_hardirq(desc, false); 10402cea395SPeter Zijlstra return !atomic_read(&desc->threads_active); 10502cea395SPeter Zijlstra } 10602cea395SPeter Zijlstra 10702cea395SPeter Zijlstra return true; 10818258f72SThomas Gleixner } 10918258f72SThomas Gleixner EXPORT_SYMBOL(synchronize_hardirq); 11018258f72SThomas Gleixner 11118258f72SThomas Gleixner /** 11218258f72SThomas Gleixner * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 11318258f72SThomas Gleixner * @irq: interrupt number to wait for 11418258f72SThomas Gleixner * 11518258f72SThomas Gleixner * This function waits for any pending IRQ handlers for this interrupt 11618258f72SThomas Gleixner * to complete before returning. If you use this function while 11718258f72SThomas Gleixner * holding a resource the IRQ handler may need you will deadlock. 11818258f72SThomas Gleixner * 1191d21f2afSThomas Gleixner * Can only be called from preemptible code as it might sleep when 1201d21f2afSThomas Gleixner * an interrupt thread is associated to @irq. 12162e04686SThomas Gleixner * 12262e04686SThomas Gleixner * It optionally makes sure (when the irq chip supports that method) 12362e04686SThomas Gleixner * that the interrupt is not pending in any CPU and waiting for 12462e04686SThomas Gleixner * service. 12518258f72SThomas Gleixner */ 12618258f72SThomas Gleixner void synchronize_irq(unsigned int irq) 12718258f72SThomas Gleixner { 12818258f72SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 12918258f72SThomas Gleixner 13018258f72SThomas Gleixner if (desc) { 13162e04686SThomas Gleixner __synchronize_hardirq(desc, true); 13218258f72SThomas Gleixner /* 13318258f72SThomas Gleixner * We made sure that no hardirq handler is 13418258f72SThomas Gleixner * running. Now verify that no threaded handlers are 13518258f72SThomas Gleixner * active. 13618258f72SThomas Gleixner */ 13718258f72SThomas Gleixner wait_event(desc->wait_for_threads, 13818258f72SThomas Gleixner !atomic_read(&desc->threads_active)); 13918258f72SThomas Gleixner } 1401da177e4SLinus Torvalds } 1411da177e4SLinus Torvalds EXPORT_SYMBOL(synchronize_irq); 1421da177e4SLinus Torvalds 1433aa551c9SThomas Gleixner #ifdef CONFIG_SMP 1443aa551c9SThomas Gleixner cpumask_var_t irq_default_affinity; 1453aa551c9SThomas Gleixner 1469c255583SThomas Gleixner static bool __irq_can_set_affinity(struct irq_desc *desc) 147e019c249SJiang Liu { 148e019c249SJiang Liu if (!desc || !irqd_can_balance(&desc->irq_data) || 149e019c249SJiang Liu !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) 1509c255583SThomas Gleixner return false; 1519c255583SThomas Gleixner return true; 152e019c249SJiang Liu } 153e019c249SJiang Liu 154771ee3b0SThomas Gleixner /** 155771ee3b0SThomas Gleixner * irq_can_set_affinity - Check if the affinity of a given irq can be set 156771ee3b0SThomas Gleixner * @irq: Interrupt to check 157771ee3b0SThomas Gleixner * 158771ee3b0SThomas Gleixner */ 159771ee3b0SThomas Gleixner int irq_can_set_affinity(unsigned int irq) 160771ee3b0SThomas Gleixner { 161e019c249SJiang Liu return __irq_can_set_affinity(irq_to_desc(irq)); 162771ee3b0SThomas Gleixner } 163771ee3b0SThomas Gleixner 164591d2fb0SThomas Gleixner /** 1659c255583SThomas Gleixner * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space 1669c255583SThomas Gleixner * @irq: Interrupt to check 1679c255583SThomas Gleixner * 1689c255583SThomas Gleixner * Like irq_can_set_affinity() above, but additionally checks for the 1699c255583SThomas Gleixner * AFFINITY_MANAGED flag. 1709c255583SThomas Gleixner */ 1719c255583SThomas Gleixner bool irq_can_set_affinity_usr(unsigned int irq) 1729c255583SThomas Gleixner { 1739c255583SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 1749c255583SThomas Gleixner 1759c255583SThomas Gleixner return __irq_can_set_affinity(desc) && 1769c255583SThomas Gleixner !irqd_affinity_is_managed(&desc->irq_data); 1779c255583SThomas Gleixner } 1789c255583SThomas Gleixner 1799c255583SThomas Gleixner /** 180591d2fb0SThomas Gleixner * irq_set_thread_affinity - Notify irq threads to adjust affinity 1815c982c58SKrzysztof Kozlowski * @desc: irq descriptor which has affinity changed 182591d2fb0SThomas Gleixner * 183591d2fb0SThomas Gleixner * We just set IRQTF_AFFINITY and delegate the affinity setting 184591d2fb0SThomas Gleixner * to the interrupt thread itself. We can not call 185591d2fb0SThomas Gleixner * set_cpus_allowed_ptr() here as we hold desc->lock and this 186591d2fb0SThomas Gleixner * code can be called from hard interrupt context. 187591d2fb0SThomas Gleixner */ 188591d2fb0SThomas Gleixner void irq_set_thread_affinity(struct irq_desc *desc) 1893aa551c9SThomas Gleixner { 190f944b5a7SDaniel Lezcano struct irqaction *action; 1913aa551c9SThomas Gleixner 192f944b5a7SDaniel Lezcano for_each_action_of_desc(desc, action) 1933aa551c9SThomas Gleixner if (action->thread) 194591d2fb0SThomas Gleixner set_bit(IRQTF_AFFINITY, &action->thread_flags); 1953aa551c9SThomas Gleixner } 1963aa551c9SThomas Gleixner 197baedb87dSThomas Gleixner #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 19819e1d4e9SThomas Gleixner static void irq_validate_effective_affinity(struct irq_data *data) 19919e1d4e9SThomas Gleixner { 20019e1d4e9SThomas Gleixner const struct cpumask *m = irq_data_get_effective_affinity_mask(data); 20119e1d4e9SThomas Gleixner struct irq_chip *chip = irq_data_get_irq_chip(data); 20219e1d4e9SThomas Gleixner 20319e1d4e9SThomas Gleixner if (!cpumask_empty(m)) 20419e1d4e9SThomas Gleixner return; 20519e1d4e9SThomas Gleixner pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n", 20619e1d4e9SThomas Gleixner chip->name, data->irq); 20719e1d4e9SThomas Gleixner } 208baedb87dSThomas Gleixner #else 209baedb87dSThomas Gleixner static inline void irq_validate_effective_affinity(struct irq_data *data) { } 210baedb87dSThomas Gleixner #endif 211baedb87dSThomas Gleixner 212818b0f3bSJiang Liu int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, 213818b0f3bSJiang Liu bool force) 214818b0f3bSJiang Liu { 215818b0f3bSJiang Liu struct irq_desc *desc = irq_data_to_desc(data); 216818b0f3bSJiang Liu struct irq_chip *chip = irq_data_get_irq_chip(data); 21733de0aa4SMarc Zyngier const struct cpumask *prog_mask; 218818b0f3bSJiang Liu int ret; 219818b0f3bSJiang Liu 22033de0aa4SMarc Zyngier static DEFINE_RAW_SPINLOCK(tmp_mask_lock); 22133de0aa4SMarc Zyngier static struct cpumask tmp_mask; 22233de0aa4SMarc Zyngier 223e43b3b58SThomas Gleixner if (!chip || !chip->irq_set_affinity) 224e43b3b58SThomas Gleixner return -EINVAL; 225e43b3b58SThomas Gleixner 22633de0aa4SMarc Zyngier raw_spin_lock(&tmp_mask_lock); 22711ea68f5SMing Lei /* 22811ea68f5SMing Lei * If this is a managed interrupt and housekeeping is enabled on 22911ea68f5SMing Lei * it check whether the requested affinity mask intersects with 23011ea68f5SMing Lei * a housekeeping CPU. If so, then remove the isolated CPUs from 23111ea68f5SMing Lei * the mask and just keep the housekeeping CPU(s). This prevents 23211ea68f5SMing Lei * the affinity setter from routing the interrupt to an isolated 23311ea68f5SMing Lei * CPU to avoid that I/O submitted from a housekeeping CPU causes 23411ea68f5SMing Lei * interrupts on an isolated one. 23511ea68f5SMing Lei * 23611ea68f5SMing Lei * If the masks do not intersect or include online CPU(s) then 23711ea68f5SMing Lei * keep the requested mask. The isolated target CPUs are only 23811ea68f5SMing Lei * receiving interrupts when the I/O operation was submitted 23911ea68f5SMing Lei * directly from them. 24011ea68f5SMing Lei * 24111ea68f5SMing Lei * If all housekeeping CPUs in the affinity mask are offline, the 24211ea68f5SMing Lei * interrupt will be migrated by the CPU hotplug code once a 24311ea68f5SMing Lei * housekeeping CPU which belongs to the affinity mask comes 24411ea68f5SMing Lei * online. 24511ea68f5SMing Lei */ 24611ea68f5SMing Lei if (irqd_affinity_is_managed(data) && 24704d4e665SFrederic Weisbecker housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) { 24833de0aa4SMarc Zyngier const struct cpumask *hk_mask; 24911ea68f5SMing Lei 25004d4e665SFrederic Weisbecker hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ); 25111ea68f5SMing Lei 25211ea68f5SMing Lei cpumask_and(&tmp_mask, mask, hk_mask); 25311ea68f5SMing Lei if (!cpumask_intersects(&tmp_mask, cpu_online_mask)) 25411ea68f5SMing Lei prog_mask = mask; 25511ea68f5SMing Lei else 25611ea68f5SMing Lei prog_mask = &tmp_mask; 25711ea68f5SMing Lei } else { 25833de0aa4SMarc Zyngier prog_mask = mask; 25911ea68f5SMing Lei } 26033de0aa4SMarc Zyngier 261c48c8b82SMarc Zyngier /* 262c48c8b82SMarc Zyngier * Make sure we only provide online CPUs to the irqchip, 263c48c8b82SMarc Zyngier * unless we are being asked to force the affinity (in which 264c48c8b82SMarc Zyngier * case we do as we are told). 265c48c8b82SMarc Zyngier */ 26633de0aa4SMarc Zyngier cpumask_and(&tmp_mask, prog_mask, cpu_online_mask); 267c48c8b82SMarc Zyngier if (!force && !cpumask_empty(&tmp_mask)) 26833de0aa4SMarc Zyngier ret = chip->irq_set_affinity(data, &tmp_mask, force); 269c48c8b82SMarc Zyngier else if (force) 270c48c8b82SMarc Zyngier ret = chip->irq_set_affinity(data, mask, force); 27133de0aa4SMarc Zyngier else 27233de0aa4SMarc Zyngier ret = -EINVAL; 27333de0aa4SMarc Zyngier 27433de0aa4SMarc Zyngier raw_spin_unlock(&tmp_mask_lock); 27533de0aa4SMarc Zyngier 276818b0f3bSJiang Liu switch (ret) { 277818b0f3bSJiang Liu case IRQ_SET_MASK_OK: 2782cb62547SJiang Liu case IRQ_SET_MASK_OK_DONE: 2799df872faSJiang Liu cpumask_copy(desc->irq_common_data.affinity, mask); 280df561f66SGustavo A. R. Silva fallthrough; 281818b0f3bSJiang Liu case IRQ_SET_MASK_OK_NOCOPY: 28219e1d4e9SThomas Gleixner irq_validate_effective_affinity(data); 283818b0f3bSJiang Liu irq_set_thread_affinity(desc); 284818b0f3bSJiang Liu ret = 0; 285818b0f3bSJiang Liu } 286818b0f3bSJiang Liu 287818b0f3bSJiang Liu return ret; 288818b0f3bSJiang Liu } 289818b0f3bSJiang Liu 29012f47073SThomas Gleixner #ifdef CONFIG_GENERIC_PENDING_IRQ 29112f47073SThomas Gleixner static inline int irq_set_affinity_pending(struct irq_data *data, 29212f47073SThomas Gleixner const struct cpumask *dest) 29312f47073SThomas Gleixner { 29412f47073SThomas Gleixner struct irq_desc *desc = irq_data_to_desc(data); 29512f47073SThomas Gleixner 29612f47073SThomas Gleixner irqd_set_move_pending(data); 29712f47073SThomas Gleixner irq_copy_pending(desc, dest); 29812f47073SThomas Gleixner return 0; 29912f47073SThomas Gleixner } 30012f47073SThomas Gleixner #else 30112f47073SThomas Gleixner static inline int irq_set_affinity_pending(struct irq_data *data, 30212f47073SThomas Gleixner const struct cpumask *dest) 30312f47073SThomas Gleixner { 30412f47073SThomas Gleixner return -EBUSY; 30512f47073SThomas Gleixner } 30612f47073SThomas Gleixner #endif 30712f47073SThomas Gleixner 30812f47073SThomas Gleixner static int irq_try_set_affinity(struct irq_data *data, 30912f47073SThomas Gleixner const struct cpumask *dest, bool force) 31012f47073SThomas Gleixner { 31112f47073SThomas Gleixner int ret = irq_do_set_affinity(data, dest, force); 31212f47073SThomas Gleixner 31312f47073SThomas Gleixner /* 31412f47073SThomas Gleixner * In case that the underlying vector management is busy and the 31512f47073SThomas Gleixner * architecture supports the generic pending mechanism then utilize 31612f47073SThomas Gleixner * this to avoid returning an error to user space. 31712f47073SThomas Gleixner */ 31812f47073SThomas Gleixner if (ret == -EBUSY && !force) 31912f47073SThomas Gleixner ret = irq_set_affinity_pending(data, dest); 32012f47073SThomas Gleixner return ret; 32112f47073SThomas Gleixner } 32212f47073SThomas Gleixner 323baedb87dSThomas Gleixner static bool irq_set_affinity_deactivated(struct irq_data *data, 324baedb87dSThomas Gleixner const struct cpumask *mask, bool force) 325baedb87dSThomas Gleixner { 326baedb87dSThomas Gleixner struct irq_desc *desc = irq_data_to_desc(data); 327baedb87dSThomas Gleixner 328baedb87dSThomas Gleixner /* 329f0c7bacaSThomas Gleixner * Handle irq chips which can handle affinity only in activated 330f0c7bacaSThomas Gleixner * state correctly 331f0c7bacaSThomas Gleixner * 332baedb87dSThomas Gleixner * If the interrupt is not yet activated, just store the affinity 333baedb87dSThomas Gleixner * mask and do not call the chip driver at all. On activation the 334baedb87dSThomas Gleixner * driver has to make sure anyway that the interrupt is in a 335a359f757SIngo Molnar * usable state so startup works. 336baedb87dSThomas Gleixner */ 337f0c7bacaSThomas Gleixner if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || 338f0c7bacaSThomas Gleixner irqd_is_activated(data) || !irqd_affinity_on_activate(data)) 339baedb87dSThomas Gleixner return false; 340baedb87dSThomas Gleixner 341baedb87dSThomas Gleixner cpumask_copy(desc->irq_common_data.affinity, mask); 342*61030630SSamuel Holland irq_data_update_effective_affinity(data, mask); 343baedb87dSThomas Gleixner irqd_set(data, IRQD_AFFINITY_SET); 344baedb87dSThomas Gleixner return true; 345baedb87dSThomas Gleixner } 346baedb87dSThomas Gleixner 34701f8fa4fSThomas Gleixner int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, 34801f8fa4fSThomas Gleixner bool force) 349c2d0c555SDavid Daney { 350c2d0c555SDavid Daney struct irq_chip *chip = irq_data_get_irq_chip(data); 351c2d0c555SDavid Daney struct irq_desc *desc = irq_data_to_desc(data); 352c2d0c555SDavid Daney int ret = 0; 353c2d0c555SDavid Daney 354c2d0c555SDavid Daney if (!chip || !chip->irq_set_affinity) 355c2d0c555SDavid Daney return -EINVAL; 356c2d0c555SDavid Daney 357baedb87dSThomas Gleixner if (irq_set_affinity_deactivated(data, mask, force)) 358baedb87dSThomas Gleixner return 0; 359baedb87dSThomas Gleixner 36012f47073SThomas Gleixner if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) { 36112f47073SThomas Gleixner ret = irq_try_set_affinity(data, mask, force); 362c2d0c555SDavid Daney } else { 363c2d0c555SDavid Daney irqd_set_move_pending(data); 364c2d0c555SDavid Daney irq_copy_pending(desc, mask); 365c2d0c555SDavid Daney } 366c2d0c555SDavid Daney 367c2d0c555SDavid Daney if (desc->affinity_notify) { 368c2d0c555SDavid Daney kref_get(&desc->affinity_notify->kref); 369df81dfcfSEdward Cree if (!schedule_work(&desc->affinity_notify->work)) { 370df81dfcfSEdward Cree /* Work was already scheduled, drop our extra ref */ 371df81dfcfSEdward Cree kref_put(&desc->affinity_notify->kref, 372df81dfcfSEdward Cree desc->affinity_notify->release); 373df81dfcfSEdward Cree } 374c2d0c555SDavid Daney } 375c2d0c555SDavid Daney irqd_set(data, IRQD_AFFINITY_SET); 376c2d0c555SDavid Daney 377c2d0c555SDavid Daney return ret; 378c2d0c555SDavid Daney } 379c2d0c555SDavid Daney 3801d3aec89SJohn Garry /** 3811d3aec89SJohn Garry * irq_update_affinity_desc - Update affinity management for an interrupt 3821d3aec89SJohn Garry * @irq: The interrupt number to update 3831d3aec89SJohn Garry * @affinity: Pointer to the affinity descriptor 3841d3aec89SJohn Garry * 3851d3aec89SJohn Garry * This interface can be used to configure the affinity management of 3861d3aec89SJohn Garry * interrupts which have been allocated already. 3871d3aec89SJohn Garry * 3881d3aec89SJohn Garry * There are certain limitations on when it may be used - attempts to use it 3891d3aec89SJohn Garry * for when the kernel is configured for generic IRQ reservation mode (in 3901d3aec89SJohn Garry * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with 3911d3aec89SJohn Garry * managed/non-managed interrupt accounting. In addition, attempts to use it on 3921d3aec89SJohn Garry * an interrupt which is already started or which has already been configured 3931d3aec89SJohn Garry * as managed will also fail, as these mean invalid init state or double init. 3941d3aec89SJohn Garry */ 3951d3aec89SJohn Garry int irq_update_affinity_desc(unsigned int irq, 3961d3aec89SJohn Garry struct irq_affinity_desc *affinity) 3971d3aec89SJohn Garry { 3981d3aec89SJohn Garry struct irq_desc *desc; 3991d3aec89SJohn Garry unsigned long flags; 4001d3aec89SJohn Garry bool activated; 4011d3aec89SJohn Garry int ret = 0; 4021d3aec89SJohn Garry 4031d3aec89SJohn Garry /* 4041d3aec89SJohn Garry * Supporting this with the reservation scheme used by x86 needs 4051d3aec89SJohn Garry * some more thought. Fail it for now. 4061d3aec89SJohn Garry */ 4071d3aec89SJohn Garry if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE)) 4081d3aec89SJohn Garry return -EOPNOTSUPP; 4091d3aec89SJohn Garry 4101d3aec89SJohn Garry desc = irq_get_desc_buslock(irq, &flags, 0); 4111d3aec89SJohn Garry if (!desc) 4121d3aec89SJohn Garry return -EINVAL; 4131d3aec89SJohn Garry 4141d3aec89SJohn Garry /* Requires the interrupt to be shut down */ 4151d3aec89SJohn Garry if (irqd_is_started(&desc->irq_data)) { 4161d3aec89SJohn Garry ret = -EBUSY; 4171d3aec89SJohn Garry goto out_unlock; 4181d3aec89SJohn Garry } 4191d3aec89SJohn Garry 4201d3aec89SJohn Garry /* Interrupts which are already managed cannot be modified */ 4211d3aec89SJohn Garry if (irqd_affinity_is_managed(&desc->irq_data)) { 4221d3aec89SJohn Garry ret = -EBUSY; 4231d3aec89SJohn Garry goto out_unlock; 4241d3aec89SJohn Garry } 4251d3aec89SJohn Garry 4261d3aec89SJohn Garry /* 4271d3aec89SJohn Garry * Deactivate the interrupt. That's required to undo 4281d3aec89SJohn Garry * anything an earlier activation has established. 4291d3aec89SJohn Garry */ 4301d3aec89SJohn Garry activated = irqd_is_activated(&desc->irq_data); 4311d3aec89SJohn Garry if (activated) 4321d3aec89SJohn Garry irq_domain_deactivate_irq(&desc->irq_data); 4331d3aec89SJohn Garry 4341d3aec89SJohn Garry if (affinity->is_managed) { 4351d3aec89SJohn Garry irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED); 4361d3aec89SJohn Garry irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN); 4371d3aec89SJohn Garry } 4381d3aec89SJohn Garry 4391d3aec89SJohn Garry cpumask_copy(desc->irq_common_data.affinity, &affinity->mask); 4401d3aec89SJohn Garry 4411d3aec89SJohn Garry /* Restore the activation state */ 4421d3aec89SJohn Garry if (activated) 4431d3aec89SJohn Garry irq_domain_activate_irq(&desc->irq_data, false); 4441d3aec89SJohn Garry 4451d3aec89SJohn Garry out_unlock: 4461d3aec89SJohn Garry irq_put_desc_busunlock(desc, flags); 4471d3aec89SJohn Garry return ret; 4481d3aec89SJohn Garry } 4491d3aec89SJohn Garry 4504d80d6caSThomas Gleixner static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, 4514d80d6caSThomas Gleixner bool force) 452771ee3b0SThomas Gleixner { 45308678b08SYinghai Lu struct irq_desc *desc = irq_to_desc(irq); 454f6d87f4bSThomas Gleixner unsigned long flags; 455c2d0c555SDavid Daney int ret; 456771ee3b0SThomas Gleixner 457c2d0c555SDavid Daney if (!desc) 458771ee3b0SThomas Gleixner return -EINVAL; 459771ee3b0SThomas Gleixner 460239007b8SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 46101f8fa4fSThomas Gleixner ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); 462239007b8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 4631fa46f1fSThomas Gleixner return ret; 464771ee3b0SThomas Gleixner } 465771ee3b0SThomas Gleixner 4664d80d6caSThomas Gleixner /** 4674d80d6caSThomas Gleixner * irq_set_affinity - Set the irq affinity of a given irq 4684d80d6caSThomas Gleixner * @irq: Interrupt to set affinity 4694d80d6caSThomas Gleixner * @cpumask: cpumask 4704d80d6caSThomas Gleixner * 4714d80d6caSThomas Gleixner * Fails if cpumask does not contain an online CPU 4724d80d6caSThomas Gleixner */ 4734d80d6caSThomas Gleixner int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 4744d80d6caSThomas Gleixner { 4754d80d6caSThomas Gleixner return __irq_set_affinity(irq, cpumask, false); 4764d80d6caSThomas Gleixner } 4774d80d6caSThomas Gleixner EXPORT_SYMBOL_GPL(irq_set_affinity); 4784d80d6caSThomas Gleixner 4794d80d6caSThomas Gleixner /** 4804d80d6caSThomas Gleixner * irq_force_affinity - Force the irq affinity of a given irq 4814d80d6caSThomas Gleixner * @irq: Interrupt to set affinity 4824d80d6caSThomas Gleixner * @cpumask: cpumask 4834d80d6caSThomas Gleixner * 4844d80d6caSThomas Gleixner * Same as irq_set_affinity, but without checking the mask against 4854d80d6caSThomas Gleixner * online cpus. 4864d80d6caSThomas Gleixner * 4874d80d6caSThomas Gleixner * Solely for low level cpu hotplug code, where we need to make per 4884d80d6caSThomas Gleixner * cpu interrupts affine before the cpu becomes online. 4894d80d6caSThomas Gleixner */ 4904d80d6caSThomas Gleixner int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) 4914d80d6caSThomas Gleixner { 4924d80d6caSThomas Gleixner return __irq_set_affinity(irq, cpumask, true); 4934d80d6caSThomas Gleixner } 4944d80d6caSThomas Gleixner EXPORT_SYMBOL_GPL(irq_force_affinity); 4954d80d6caSThomas Gleixner 49665c7cdedSThomas Gleixner int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m, 49765c7cdedSThomas Gleixner bool setaffinity) 498e7a297b0SPeter P Waskiewicz Jr { 499e7a297b0SPeter P Waskiewicz Jr unsigned long flags; 50031d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 501e7a297b0SPeter P Waskiewicz Jr 502e7a297b0SPeter P Waskiewicz Jr if (!desc) 503e7a297b0SPeter P Waskiewicz Jr return -EINVAL; 504e7a297b0SPeter P Waskiewicz Jr desc->affinity_hint = m; 50502725e74SThomas Gleixner irq_put_desc_unlock(desc, flags); 50665c7cdedSThomas Gleixner if (m && setaffinity) 507e2e64a93SJesse Brandeburg __irq_set_affinity(irq, m, false); 508e7a297b0SPeter P Waskiewicz Jr return 0; 509e7a297b0SPeter P Waskiewicz Jr } 51065c7cdedSThomas Gleixner EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint); 511e7a297b0SPeter P Waskiewicz Jr 512cd7eab44SBen Hutchings static void irq_affinity_notify(struct work_struct *work) 513cd7eab44SBen Hutchings { 514cd7eab44SBen Hutchings struct irq_affinity_notify *notify = 515cd7eab44SBen Hutchings container_of(work, struct irq_affinity_notify, work); 516cd7eab44SBen Hutchings struct irq_desc *desc = irq_to_desc(notify->irq); 517cd7eab44SBen Hutchings cpumask_var_t cpumask; 518cd7eab44SBen Hutchings unsigned long flags; 519cd7eab44SBen Hutchings 5201fa46f1fSThomas Gleixner if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) 521cd7eab44SBen Hutchings goto out; 522cd7eab44SBen Hutchings 523cd7eab44SBen Hutchings raw_spin_lock_irqsave(&desc->lock, flags); 5240ef5ca1eSThomas Gleixner if (irq_move_pending(&desc->irq_data)) 5251fa46f1fSThomas Gleixner irq_get_pending(cpumask, desc); 526cd7eab44SBen Hutchings else 5279df872faSJiang Liu cpumask_copy(cpumask, desc->irq_common_data.affinity); 528cd7eab44SBen Hutchings raw_spin_unlock_irqrestore(&desc->lock, flags); 529cd7eab44SBen Hutchings 530cd7eab44SBen Hutchings notify->notify(notify, cpumask); 531cd7eab44SBen Hutchings 532cd7eab44SBen Hutchings free_cpumask_var(cpumask); 533cd7eab44SBen Hutchings out: 534cd7eab44SBen Hutchings kref_put(¬ify->kref, notify->release); 535cd7eab44SBen Hutchings } 536cd7eab44SBen Hutchings 537cd7eab44SBen Hutchings /** 538cd7eab44SBen Hutchings * irq_set_affinity_notifier - control notification of IRQ affinity changes 539cd7eab44SBen Hutchings * @irq: Interrupt for which to enable/disable notification 540cd7eab44SBen Hutchings * @notify: Context for notification, or %NULL to disable 541cd7eab44SBen Hutchings * notification. Function pointers must be initialised; 542cd7eab44SBen Hutchings * the other fields will be initialised by this function. 543cd7eab44SBen Hutchings * 544cd7eab44SBen Hutchings * Must be called in process context. Notification may only be enabled 545cd7eab44SBen Hutchings * after the IRQ is allocated and must be disabled before the IRQ is 546cd7eab44SBen Hutchings * freed using free_irq(). 547cd7eab44SBen Hutchings */ 548cd7eab44SBen Hutchings int 549cd7eab44SBen Hutchings irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) 550cd7eab44SBen Hutchings { 551cd7eab44SBen Hutchings struct irq_desc *desc = irq_to_desc(irq); 552cd7eab44SBen Hutchings struct irq_affinity_notify *old_notify; 553cd7eab44SBen Hutchings unsigned long flags; 554cd7eab44SBen Hutchings 555cd7eab44SBen Hutchings /* The release function is promised process context */ 556cd7eab44SBen Hutchings might_sleep(); 557cd7eab44SBen Hutchings 558b525903cSJulien Thierry if (!desc || desc->istate & IRQS_NMI) 559cd7eab44SBen Hutchings return -EINVAL; 560cd7eab44SBen Hutchings 561cd7eab44SBen Hutchings /* Complete initialisation of *notify */ 562cd7eab44SBen Hutchings if (notify) { 563cd7eab44SBen Hutchings notify->irq = irq; 564cd7eab44SBen Hutchings kref_init(¬ify->kref); 565cd7eab44SBen Hutchings INIT_WORK(¬ify->work, irq_affinity_notify); 566cd7eab44SBen Hutchings } 567cd7eab44SBen Hutchings 568cd7eab44SBen Hutchings raw_spin_lock_irqsave(&desc->lock, flags); 569cd7eab44SBen Hutchings old_notify = desc->affinity_notify; 570cd7eab44SBen Hutchings desc->affinity_notify = notify; 571cd7eab44SBen Hutchings raw_spin_unlock_irqrestore(&desc->lock, flags); 572cd7eab44SBen Hutchings 57359c39840SPrasad Sodagudi if (old_notify) { 574df81dfcfSEdward Cree if (cancel_work_sync(&old_notify->work)) { 575df81dfcfSEdward Cree /* Pending work had a ref, put that one too */ 576df81dfcfSEdward Cree kref_put(&old_notify->kref, old_notify->release); 577df81dfcfSEdward Cree } 578cd7eab44SBen Hutchings kref_put(&old_notify->kref, old_notify->release); 57959c39840SPrasad Sodagudi } 580cd7eab44SBen Hutchings 581cd7eab44SBen Hutchings return 0; 582cd7eab44SBen Hutchings } 583cd7eab44SBen Hutchings EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); 584cd7eab44SBen Hutchings 58518404756SMax Krasnyansky #ifndef CONFIG_AUTO_IRQ_AFFINITY 58618404756SMax Krasnyansky /* 58718404756SMax Krasnyansky * Generic version of the affinity autoselector. 58818404756SMax Krasnyansky */ 58943564bd9SThomas Gleixner int irq_setup_affinity(struct irq_desc *desc) 59018404756SMax Krasnyansky { 591569bda8dSThomas Gleixner struct cpumask *set = irq_default_affinity; 592cba4235eSThomas Gleixner int ret, node = irq_desc_get_node(desc); 593cba4235eSThomas Gleixner static DEFINE_RAW_SPINLOCK(mask_lock); 594cba4235eSThomas Gleixner static struct cpumask mask; 595569bda8dSThomas Gleixner 596b008207cSThomas Gleixner /* Excludes PER_CPU and NO_BALANCE interrupts */ 597e019c249SJiang Liu if (!__irq_can_set_affinity(desc)) 59818404756SMax Krasnyansky return 0; 59918404756SMax Krasnyansky 600cba4235eSThomas Gleixner raw_spin_lock(&mask_lock); 601f6d87f4bSThomas Gleixner /* 6029332ef9dSMasahiro Yamada * Preserve the managed affinity setting and a userspace affinity 60306ee6d57SThomas Gleixner * setup, but make sure that one of the targets is online. 604f6d87f4bSThomas Gleixner */ 60506ee6d57SThomas Gleixner if (irqd_affinity_is_managed(&desc->irq_data) || 60606ee6d57SThomas Gleixner irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { 6079df872faSJiang Liu if (cpumask_intersects(desc->irq_common_data.affinity, 608569bda8dSThomas Gleixner cpu_online_mask)) 6099df872faSJiang Liu set = desc->irq_common_data.affinity; 6100c6f8a8bSThomas Gleixner else 6112bdd1055SThomas Gleixner irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); 6122bdd1055SThomas Gleixner } 61318404756SMax Krasnyansky 614cba4235eSThomas Gleixner cpumask_and(&mask, cpu_online_mask, set); 615bddda606SSrinivas Ramana if (cpumask_empty(&mask)) 616bddda606SSrinivas Ramana cpumask_copy(&mask, cpu_online_mask); 617bddda606SSrinivas Ramana 618241fc640SPrarit Bhargava if (node != NUMA_NO_NODE) { 619241fc640SPrarit Bhargava const struct cpumask *nodemask = cpumask_of_node(node); 620241fc640SPrarit Bhargava 621241fc640SPrarit Bhargava /* make sure at least one of the cpus in nodemask is online */ 622cba4235eSThomas Gleixner if (cpumask_intersects(&mask, nodemask)) 623cba4235eSThomas Gleixner cpumask_and(&mask, &mask, nodemask); 624241fc640SPrarit Bhargava } 625cba4235eSThomas Gleixner ret = irq_do_set_affinity(&desc->irq_data, &mask, false); 626cba4235eSThomas Gleixner raw_spin_unlock(&mask_lock); 627cba4235eSThomas Gleixner return ret; 62818404756SMax Krasnyansky } 629f6d87f4bSThomas Gleixner #else 630a8a98eacSJiang Liu /* Wrapper for ALPHA specific affinity selector magic */ 631cba4235eSThomas Gleixner int irq_setup_affinity(struct irq_desc *desc) 632f6d87f4bSThomas Gleixner { 633cba4235eSThomas Gleixner return irq_select_affinity(irq_desc_get_irq(desc)); 634f6d87f4bSThomas Gleixner } 635cba6437aSThomas Gleixner #endif /* CONFIG_AUTO_IRQ_AFFINITY */ 636cba6437aSThomas Gleixner #endif /* CONFIG_SMP */ 63718404756SMax Krasnyansky 6381da177e4SLinus Torvalds 639fcf1ae2fSFeng Wu /** 640fcf1ae2fSFeng Wu * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt 641fcf1ae2fSFeng Wu * @irq: interrupt number to set affinity 642250a53d6SChristoffer Dall * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU 643250a53d6SChristoffer Dall * specific data for percpu_devid interrupts 644fcf1ae2fSFeng Wu * 645fcf1ae2fSFeng Wu * This function uses the vCPU specific data to set the vCPU 646fcf1ae2fSFeng Wu * affinity for an irq. The vCPU specific data is passed from 647fcf1ae2fSFeng Wu * outside, such as KVM. One example code path is as below: 648fcf1ae2fSFeng Wu * KVM -> IOMMU -> irq_set_vcpu_affinity(). 649fcf1ae2fSFeng Wu */ 650fcf1ae2fSFeng Wu int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) 651fcf1ae2fSFeng Wu { 652fcf1ae2fSFeng Wu unsigned long flags; 653fcf1ae2fSFeng Wu struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 654fcf1ae2fSFeng Wu struct irq_data *data; 655fcf1ae2fSFeng Wu struct irq_chip *chip; 656fcf1ae2fSFeng Wu int ret = -ENOSYS; 657fcf1ae2fSFeng Wu 658fcf1ae2fSFeng Wu if (!desc) 659fcf1ae2fSFeng Wu return -EINVAL; 660fcf1ae2fSFeng Wu 661fcf1ae2fSFeng Wu data = irq_desc_get_irq_data(desc); 6620abce64aSMarc Zyngier do { 663fcf1ae2fSFeng Wu chip = irq_data_get_irq_chip(data); 664fcf1ae2fSFeng Wu if (chip && chip->irq_set_vcpu_affinity) 6650abce64aSMarc Zyngier break; 6660abce64aSMarc Zyngier #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 6670abce64aSMarc Zyngier data = data->parent_data; 6680abce64aSMarc Zyngier #else 6690abce64aSMarc Zyngier data = NULL; 6700abce64aSMarc Zyngier #endif 6710abce64aSMarc Zyngier } while (data); 6720abce64aSMarc Zyngier 6730abce64aSMarc Zyngier if (data) 674fcf1ae2fSFeng Wu ret = chip->irq_set_vcpu_affinity(data, vcpu_info); 675fcf1ae2fSFeng Wu irq_put_desc_unlock(desc, flags); 676fcf1ae2fSFeng Wu 677fcf1ae2fSFeng Wu return ret; 678fcf1ae2fSFeng Wu } 679fcf1ae2fSFeng Wu EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity); 680fcf1ae2fSFeng Wu 68179ff1cdaSJiang Liu void __disable_irq(struct irq_desc *desc) 6820a0c5168SRafael J. Wysocki { 6833aae994fSThomas Gleixner if (!desc->depth++) 68487923470SThomas Gleixner irq_disable(desc); 6850a0c5168SRafael J. Wysocki } 6860a0c5168SRafael J. Wysocki 68702725e74SThomas Gleixner static int __disable_irq_nosync(unsigned int irq) 68802725e74SThomas Gleixner { 68902725e74SThomas Gleixner unsigned long flags; 69031d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 69102725e74SThomas Gleixner 69202725e74SThomas Gleixner if (!desc) 69302725e74SThomas Gleixner return -EINVAL; 69479ff1cdaSJiang Liu __disable_irq(desc); 69502725e74SThomas Gleixner irq_put_desc_busunlock(desc, flags); 69602725e74SThomas Gleixner return 0; 69702725e74SThomas Gleixner } 69802725e74SThomas Gleixner 6991da177e4SLinus Torvalds /** 7001da177e4SLinus Torvalds * disable_irq_nosync - disable an irq without waiting 7011da177e4SLinus Torvalds * @irq: Interrupt to disable 7021da177e4SLinus Torvalds * 7031da177e4SLinus Torvalds * Disable the selected interrupt line. Disables and Enables are 7041da177e4SLinus Torvalds * nested. 7051da177e4SLinus Torvalds * Unlike disable_irq(), this function does not ensure existing 7061da177e4SLinus Torvalds * instances of the IRQ handler have completed before returning. 7071da177e4SLinus Torvalds * 7081da177e4SLinus Torvalds * This function may be called from IRQ context. 7091da177e4SLinus Torvalds */ 7101da177e4SLinus Torvalds void disable_irq_nosync(unsigned int irq) 7111da177e4SLinus Torvalds { 71202725e74SThomas Gleixner __disable_irq_nosync(irq); 7131da177e4SLinus Torvalds } 7141da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq_nosync); 7151da177e4SLinus Torvalds 7161da177e4SLinus Torvalds /** 7171da177e4SLinus Torvalds * disable_irq - disable an irq and wait for completion 7181da177e4SLinus Torvalds * @irq: Interrupt to disable 7191da177e4SLinus Torvalds * 7201da177e4SLinus Torvalds * Disable the selected interrupt line. Enables and Disables are 7211da177e4SLinus Torvalds * nested. 7221da177e4SLinus Torvalds * This function waits for any pending IRQ handlers for this interrupt 7231da177e4SLinus Torvalds * to complete before returning. If you use this function while 7241da177e4SLinus Torvalds * holding a resource the IRQ handler may need you will deadlock. 7251da177e4SLinus Torvalds * 7261da177e4SLinus Torvalds * This function may be called - with care - from IRQ context. 7271da177e4SLinus Torvalds */ 7281da177e4SLinus Torvalds void disable_irq(unsigned int irq) 7291da177e4SLinus Torvalds { 73002725e74SThomas Gleixner if (!__disable_irq_nosync(irq)) 7311da177e4SLinus Torvalds synchronize_irq(irq); 7321da177e4SLinus Torvalds } 7331da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq); 7341da177e4SLinus Torvalds 73502cea395SPeter Zijlstra /** 73602cea395SPeter Zijlstra * disable_hardirq - disables an irq and waits for hardirq completion 73702cea395SPeter Zijlstra * @irq: Interrupt to disable 73802cea395SPeter Zijlstra * 73902cea395SPeter Zijlstra * Disable the selected interrupt line. Enables and Disables are 74002cea395SPeter Zijlstra * nested. 74102cea395SPeter Zijlstra * This function waits for any pending hard IRQ handlers for this 74202cea395SPeter Zijlstra * interrupt to complete before returning. If you use this function while 74302cea395SPeter Zijlstra * holding a resource the hard IRQ handler may need you will deadlock. 74402cea395SPeter Zijlstra * 74502cea395SPeter Zijlstra * When used to optimistically disable an interrupt from atomic context 74602cea395SPeter Zijlstra * the return value must be checked. 74702cea395SPeter Zijlstra * 74802cea395SPeter Zijlstra * Returns: false if a threaded handler is active. 74902cea395SPeter Zijlstra * 75002cea395SPeter Zijlstra * This function may be called - with care - from IRQ context. 75102cea395SPeter Zijlstra */ 75202cea395SPeter Zijlstra bool disable_hardirq(unsigned int irq) 75302cea395SPeter Zijlstra { 75402cea395SPeter Zijlstra if (!__disable_irq_nosync(irq)) 75502cea395SPeter Zijlstra return synchronize_hardirq(irq); 75602cea395SPeter Zijlstra 75702cea395SPeter Zijlstra return false; 75802cea395SPeter Zijlstra } 75902cea395SPeter Zijlstra EXPORT_SYMBOL_GPL(disable_hardirq); 76002cea395SPeter Zijlstra 761b525903cSJulien Thierry /** 762b525903cSJulien Thierry * disable_nmi_nosync - disable an nmi without waiting 763b525903cSJulien Thierry * @irq: Interrupt to disable 764b525903cSJulien Thierry * 765b525903cSJulien Thierry * Disable the selected interrupt line. Disables and enables are 766b525903cSJulien Thierry * nested. 767b525903cSJulien Thierry * The interrupt to disable must have been requested through request_nmi. 768b525903cSJulien Thierry * Unlike disable_nmi(), this function does not ensure existing 769b525903cSJulien Thierry * instances of the IRQ handler have completed before returning. 770b525903cSJulien Thierry */ 771b525903cSJulien Thierry void disable_nmi_nosync(unsigned int irq) 772b525903cSJulien Thierry { 773b525903cSJulien Thierry disable_irq_nosync(irq); 774b525903cSJulien Thierry } 775b525903cSJulien Thierry 77679ff1cdaSJiang Liu void __enable_irq(struct irq_desc *desc) 7771adb0850SThomas Gleixner { 7781adb0850SThomas Gleixner switch (desc->depth) { 7791adb0850SThomas Gleixner case 0: 7800a0c5168SRafael J. Wysocki err_out: 78179ff1cdaSJiang Liu WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", 78279ff1cdaSJiang Liu irq_desc_get_irq(desc)); 7831adb0850SThomas Gleixner break; 7841adb0850SThomas Gleixner case 1: { 785c531e836SThomas Gleixner if (desc->istate & IRQS_SUSPENDED) 7860a0c5168SRafael J. Wysocki goto err_out; 7871adb0850SThomas Gleixner /* Prevent probing on this irq: */ 7881ccb4e61SThomas Gleixner irq_settings_set_noprobe(desc); 789201d7f47SThomas Gleixner /* 790201d7f47SThomas Gleixner * Call irq_startup() not irq_enable() here because the 791201d7f47SThomas Gleixner * interrupt might be marked NOAUTOEN. So irq_startup() 792201d7f47SThomas Gleixner * needs to be invoked when it gets enabled the first 793201d7f47SThomas Gleixner * time. If it was already started up, then irq_startup() 794201d7f47SThomas Gleixner * will invoke irq_enable() under the hood. 795201d7f47SThomas Gleixner */ 796c942cee4SThomas Gleixner irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE); 797201d7f47SThomas Gleixner break; 7981adb0850SThomas Gleixner } 7991adb0850SThomas Gleixner default: 8001adb0850SThomas Gleixner desc->depth--; 8011adb0850SThomas Gleixner } 8021adb0850SThomas Gleixner } 8031adb0850SThomas Gleixner 8041da177e4SLinus Torvalds /** 8051da177e4SLinus Torvalds * enable_irq - enable handling of an irq 8061da177e4SLinus Torvalds * @irq: Interrupt to enable 8071da177e4SLinus Torvalds * 8081da177e4SLinus Torvalds * Undoes the effect of one call to disable_irq(). If this 8091da177e4SLinus Torvalds * matches the last disable, processing of interrupts on this 8101da177e4SLinus Torvalds * IRQ line is re-enabled. 8111da177e4SLinus Torvalds * 81270aedd24SThomas Gleixner * This function may be called from IRQ context only when 8136b8ff312SThomas Gleixner * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 8141da177e4SLinus Torvalds */ 8151da177e4SLinus Torvalds void enable_irq(unsigned int irq) 8161da177e4SLinus Torvalds { 8171da177e4SLinus Torvalds unsigned long flags; 81831d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 8191da177e4SLinus Torvalds 8207d94f7caSYinghai Lu if (!desc) 821c2b5a251SMatthew Wilcox return; 82250f7c032SThomas Gleixner if (WARN(!desc->irq_data.chip, 8232656c366SThomas Gleixner KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) 82402725e74SThomas Gleixner goto out; 8252656c366SThomas Gleixner 82679ff1cdaSJiang Liu __enable_irq(desc); 82702725e74SThomas Gleixner out: 82802725e74SThomas Gleixner irq_put_desc_busunlock(desc, flags); 8291da177e4SLinus Torvalds } 8301da177e4SLinus Torvalds EXPORT_SYMBOL(enable_irq); 8311da177e4SLinus Torvalds 832b525903cSJulien Thierry /** 833b525903cSJulien Thierry * enable_nmi - enable handling of an nmi 834b525903cSJulien Thierry * @irq: Interrupt to enable 835b525903cSJulien Thierry * 836b525903cSJulien Thierry * The interrupt to enable must have been requested through request_nmi. 837b525903cSJulien Thierry * Undoes the effect of one call to disable_nmi(). If this 838b525903cSJulien Thierry * matches the last disable, processing of interrupts on this 839b525903cSJulien Thierry * IRQ line is re-enabled. 840b525903cSJulien Thierry */ 841b525903cSJulien Thierry void enable_nmi(unsigned int irq) 842b525903cSJulien Thierry { 843b525903cSJulien Thierry enable_irq(irq); 844b525903cSJulien Thierry } 845b525903cSJulien Thierry 8460c5d1eb7SDavid Brownell static int set_irq_wake_real(unsigned int irq, unsigned int on) 8472db87321SUwe Kleine-König { 84808678b08SYinghai Lu struct irq_desc *desc = irq_to_desc(irq); 8492db87321SUwe Kleine-König int ret = -ENXIO; 8502db87321SUwe Kleine-König 85160f96b41SSantosh Shilimkar if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) 85260f96b41SSantosh Shilimkar return 0; 85360f96b41SSantosh Shilimkar 8542f7e99bbSThomas Gleixner if (desc->irq_data.chip->irq_set_wake) 8552f7e99bbSThomas Gleixner ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); 8562db87321SUwe Kleine-König 8572db87321SUwe Kleine-König return ret; 8582db87321SUwe Kleine-König } 8592db87321SUwe Kleine-König 860ba9a2331SThomas Gleixner /** 861a0cd9ca2SThomas Gleixner * irq_set_irq_wake - control irq power management wakeup 862ba9a2331SThomas Gleixner * @irq: interrupt to control 863ba9a2331SThomas Gleixner * @on: enable/disable power management wakeup 864ba9a2331SThomas Gleixner * 86515a647ebSDavid Brownell * Enable/disable power management wakeup mode, which is 86615a647ebSDavid Brownell * disabled by default. Enables and disables must match, 86715a647ebSDavid Brownell * just as they match for non-wakeup mode support. 86815a647ebSDavid Brownell * 86915a647ebSDavid Brownell * Wakeup mode lets this IRQ wake the system from sleep 87015a647ebSDavid Brownell * states like "suspend to RAM". 871f9f21ceaSStephen Boyd * 872f9f21ceaSStephen Boyd * Note: irq enable/disable state is completely orthogonal 873f9f21ceaSStephen Boyd * to the enable/disable state of irq wake. An irq can be 874f9f21ceaSStephen Boyd * disabled with disable_irq() and still wake the system as 875f9f21ceaSStephen Boyd * long as the irq has wake enabled. If this does not hold, 876f9f21ceaSStephen Boyd * then the underlying irq chip and the related driver need 877f9f21ceaSStephen Boyd * to be investigated. 878ba9a2331SThomas Gleixner */ 879a0cd9ca2SThomas Gleixner int irq_set_irq_wake(unsigned int irq, unsigned int on) 880ba9a2331SThomas Gleixner { 881ba9a2331SThomas Gleixner unsigned long flags; 88231d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 8832db87321SUwe Kleine-König int ret = 0; 884ba9a2331SThomas Gleixner 88513863a66SJesper Juhl if (!desc) 88613863a66SJesper Juhl return -EINVAL; 88713863a66SJesper Juhl 888b525903cSJulien Thierry /* Don't use NMIs as wake up interrupts please */ 889b525903cSJulien Thierry if (desc->istate & IRQS_NMI) { 890b525903cSJulien Thierry ret = -EINVAL; 891b525903cSJulien Thierry goto out_unlock; 892b525903cSJulien Thierry } 893b525903cSJulien Thierry 89415a647ebSDavid Brownell /* wakeup-capable irqs can be shared between drivers that 89515a647ebSDavid Brownell * don't need to have the same sleep mode behaviors. 89615a647ebSDavid Brownell */ 89715a647ebSDavid Brownell if (on) { 8982db87321SUwe Kleine-König if (desc->wake_depth++ == 0) { 8992db87321SUwe Kleine-König ret = set_irq_wake_real(irq, on); 9002db87321SUwe Kleine-König if (ret) 9012db87321SUwe Kleine-König desc->wake_depth = 0; 90215a647ebSDavid Brownell else 9037f94226fSThomas Gleixner irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); 9042db87321SUwe Kleine-König } 90515a647ebSDavid Brownell } else { 90615a647ebSDavid Brownell if (desc->wake_depth == 0) { 9077a2c4770SArjan van de Ven WARN(1, "Unbalanced IRQ %d wake disable\n", irq); 9082db87321SUwe Kleine-König } else if (--desc->wake_depth == 0) { 9092db87321SUwe Kleine-König ret = set_irq_wake_real(irq, on); 9102db87321SUwe Kleine-König if (ret) 9112db87321SUwe Kleine-König desc->wake_depth = 1; 91215a647ebSDavid Brownell else 9137f94226fSThomas Gleixner irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); 91415a647ebSDavid Brownell } 9152db87321SUwe Kleine-König } 916b525903cSJulien Thierry 917b525903cSJulien Thierry out_unlock: 91802725e74SThomas Gleixner irq_put_desc_busunlock(desc, flags); 919ba9a2331SThomas Gleixner return ret; 920ba9a2331SThomas Gleixner } 921a0cd9ca2SThomas Gleixner EXPORT_SYMBOL(irq_set_irq_wake); 922ba9a2331SThomas Gleixner 9231da177e4SLinus Torvalds /* 9241da177e4SLinus Torvalds * Internal function that tells the architecture code whether a 9251da177e4SLinus Torvalds * particular irq has been exclusively allocated or is available 9261da177e4SLinus Torvalds * for driver use. 9271da177e4SLinus Torvalds */ 9281da177e4SLinus Torvalds int can_request_irq(unsigned int irq, unsigned long irqflags) 9291da177e4SLinus Torvalds { 930cc8c3b78SThomas Gleixner unsigned long flags; 93131d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 93202725e74SThomas Gleixner int canrequest = 0; 9331da177e4SLinus Torvalds 9347d94f7caSYinghai Lu if (!desc) 9357d94f7caSYinghai Lu return 0; 9367d94f7caSYinghai Lu 93702725e74SThomas Gleixner if (irq_settings_can_request(desc)) { 9382779db8dSBen Hutchings if (!desc->action || 9392779db8dSBen Hutchings irqflags & desc->action->flags & IRQF_SHARED) 94002725e74SThomas Gleixner canrequest = 1; 94102725e74SThomas Gleixner } 94202725e74SThomas Gleixner irq_put_desc_unlock(desc, flags); 94302725e74SThomas Gleixner return canrequest; 9441da177e4SLinus Torvalds } 9451da177e4SLinus Torvalds 946a1ff541aSJiang Liu int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) 94782736f4dSUwe Kleine-König { 9486b8ff312SThomas Gleixner struct irq_chip *chip = desc->irq_data.chip; 949d4d5e089SThomas Gleixner int ret, unmask = 0; 95082736f4dSUwe Kleine-König 951b2ba2c30SThomas Gleixner if (!chip || !chip->irq_set_type) { 95282736f4dSUwe Kleine-König /* 95382736f4dSUwe Kleine-König * IRQF_TRIGGER_* but the PIC does not support multiple 95482736f4dSUwe Kleine-König * flow-types? 95582736f4dSUwe Kleine-König */ 956a1ff541aSJiang Liu pr_debug("No set_type function for IRQ %d (%s)\n", 957a1ff541aSJiang Liu irq_desc_get_irq(desc), 95882736f4dSUwe Kleine-König chip ? (chip->name ? : "unknown") : "unknown"); 95982736f4dSUwe Kleine-König return 0; 96082736f4dSUwe Kleine-König } 96182736f4dSUwe Kleine-König 962d4d5e089SThomas Gleixner if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { 96332f4125eSThomas Gleixner if (!irqd_irq_masked(&desc->irq_data)) 964d4d5e089SThomas Gleixner mask_irq(desc); 96532f4125eSThomas Gleixner if (!irqd_irq_disabled(&desc->irq_data)) 966d4d5e089SThomas Gleixner unmask = 1; 967d4d5e089SThomas Gleixner } 968d4d5e089SThomas Gleixner 96900b992deSAlexander Kuleshov /* Mask all flags except trigger mode */ 97000b992deSAlexander Kuleshov flags &= IRQ_TYPE_SENSE_MASK; 971b2ba2c30SThomas Gleixner ret = chip->irq_set_type(&desc->irq_data, flags); 97282736f4dSUwe Kleine-König 973876dbd4cSThomas Gleixner switch (ret) { 974876dbd4cSThomas Gleixner case IRQ_SET_MASK_OK: 9752cb62547SJiang Liu case IRQ_SET_MASK_OK_DONE: 976876dbd4cSThomas Gleixner irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); 977876dbd4cSThomas Gleixner irqd_set(&desc->irq_data, flags); 978df561f66SGustavo A. R. Silva fallthrough; 979876dbd4cSThomas Gleixner 980876dbd4cSThomas Gleixner case IRQ_SET_MASK_OK_NOCOPY: 981876dbd4cSThomas Gleixner flags = irqd_get_trigger_type(&desc->irq_data); 982876dbd4cSThomas Gleixner irq_settings_set_trigger_mask(desc, flags); 983876dbd4cSThomas Gleixner irqd_clear(&desc->irq_data, IRQD_LEVEL); 984876dbd4cSThomas Gleixner irq_settings_clr_level(desc); 985876dbd4cSThomas Gleixner if (flags & IRQ_TYPE_LEVEL_MASK) { 986876dbd4cSThomas Gleixner irq_settings_set_level(desc); 987876dbd4cSThomas Gleixner irqd_set(&desc->irq_data, IRQD_LEVEL); 988876dbd4cSThomas Gleixner } 98946732475SThomas Gleixner 990d4d5e089SThomas Gleixner ret = 0; 9918fff39e0SThomas Gleixner break; 992876dbd4cSThomas Gleixner default: 993d75f773cSSakari Ailus pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n", 994a1ff541aSJiang Liu flags, irq_desc_get_irq(desc), chip->irq_set_type); 9950c5d1eb7SDavid Brownell } 996d4d5e089SThomas Gleixner if (unmask) 997d4d5e089SThomas Gleixner unmask_irq(desc); 99882736f4dSUwe Kleine-König return ret; 99982736f4dSUwe Kleine-König } 100082736f4dSUwe Kleine-König 1001293a7a0aSThomas Gleixner #ifdef CONFIG_HARDIRQS_SW_RESEND 1002293a7a0aSThomas Gleixner int irq_set_parent(int irq, int parent_irq) 1003293a7a0aSThomas Gleixner { 1004293a7a0aSThomas Gleixner unsigned long flags; 1005293a7a0aSThomas Gleixner struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 1006293a7a0aSThomas Gleixner 1007293a7a0aSThomas Gleixner if (!desc) 1008293a7a0aSThomas Gleixner return -EINVAL; 1009293a7a0aSThomas Gleixner 1010293a7a0aSThomas Gleixner desc->parent_irq = parent_irq; 1011293a7a0aSThomas Gleixner 1012293a7a0aSThomas Gleixner irq_put_desc_unlock(desc, flags); 1013293a7a0aSThomas Gleixner return 0; 1014293a7a0aSThomas Gleixner } 10153118dac5SSudip Mukherjee EXPORT_SYMBOL_GPL(irq_set_parent); 1016293a7a0aSThomas Gleixner #endif 1017293a7a0aSThomas Gleixner 1018b25c340cSThomas Gleixner /* 1019b25c340cSThomas Gleixner * Default primary interrupt handler for threaded interrupts. Is 1020b25c340cSThomas Gleixner * assigned as primary handler when request_threaded_irq is called 1021b25c340cSThomas Gleixner * with handler == NULL. Useful for oneshot interrupts. 1022b25c340cSThomas Gleixner */ 1023b25c340cSThomas Gleixner static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) 1024b25c340cSThomas Gleixner { 1025b25c340cSThomas Gleixner return IRQ_WAKE_THREAD; 1026b25c340cSThomas Gleixner } 1027b25c340cSThomas Gleixner 1028399b5da2SThomas Gleixner /* 1029399b5da2SThomas Gleixner * Primary handler for nested threaded interrupts. Should never be 1030399b5da2SThomas Gleixner * called. 1031399b5da2SThomas Gleixner */ 1032399b5da2SThomas Gleixner static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) 1033399b5da2SThomas Gleixner { 1034399b5da2SThomas Gleixner WARN(1, "Primary handler called for nested irq %d\n", irq); 1035399b5da2SThomas Gleixner return IRQ_NONE; 1036399b5da2SThomas Gleixner } 1037399b5da2SThomas Gleixner 10382a1d3ab8SThomas Gleixner static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id) 10392a1d3ab8SThomas Gleixner { 10402a1d3ab8SThomas Gleixner WARN(1, "Secondary action handler called for irq %d\n", irq); 10412a1d3ab8SThomas Gleixner return IRQ_NONE; 10422a1d3ab8SThomas Gleixner } 10432a1d3ab8SThomas Gleixner 10443aa551c9SThomas Gleixner static int irq_wait_for_interrupt(struct irqaction *action) 10453aa551c9SThomas Gleixner { 1046519cc865SLukas Wunner for (;;) { 10473aa551c9SThomas Gleixner set_current_state(TASK_INTERRUPTIBLE); 1048f48fe81eSThomas Gleixner 1049519cc865SLukas Wunner if (kthread_should_stop()) { 1050519cc865SLukas Wunner /* may need to run one last time */ 1051519cc865SLukas Wunner if (test_and_clear_bit(IRQTF_RUNTHREAD, 1052519cc865SLukas Wunner &action->thread_flags)) { 1053519cc865SLukas Wunner __set_current_state(TASK_RUNNING); 1054519cc865SLukas Wunner return 0; 1055519cc865SLukas Wunner } 1056519cc865SLukas Wunner __set_current_state(TASK_RUNNING); 1057519cc865SLukas Wunner return -1; 1058519cc865SLukas Wunner } 1059550acb19SIdo Yariv 1060f48fe81eSThomas Gleixner if (test_and_clear_bit(IRQTF_RUNTHREAD, 1061f48fe81eSThomas Gleixner &action->thread_flags)) { 10623aa551c9SThomas Gleixner __set_current_state(TASK_RUNNING); 10633aa551c9SThomas Gleixner return 0; 1064f48fe81eSThomas Gleixner } 10653aa551c9SThomas Gleixner schedule(); 10663aa551c9SThomas Gleixner } 10673aa551c9SThomas Gleixner } 10683aa551c9SThomas Gleixner 1069b25c340cSThomas Gleixner /* 1070b25c340cSThomas Gleixner * Oneshot interrupts keep the irq line masked until the threaded 1071b25c340cSThomas Gleixner * handler finished. unmask if the interrupt has not been disabled and 1072b25c340cSThomas Gleixner * is marked MASKED. 1073b25c340cSThomas Gleixner */ 1074b5faba21SThomas Gleixner static void irq_finalize_oneshot(struct irq_desc *desc, 1075f3f79e38SAlexander Gordeev struct irqaction *action) 1076b25c340cSThomas Gleixner { 10772a1d3ab8SThomas Gleixner if (!(desc->istate & IRQS_ONESHOT) || 10782a1d3ab8SThomas Gleixner action->handler == irq_forced_secondary_handler) 1079b5faba21SThomas Gleixner return; 10800b1adaa0SThomas Gleixner again: 10813876ec9eSThomas Gleixner chip_bus_lock(desc); 1082239007b8SThomas Gleixner raw_spin_lock_irq(&desc->lock); 10830b1adaa0SThomas Gleixner 10840b1adaa0SThomas Gleixner /* 10850b1adaa0SThomas Gleixner * Implausible though it may be we need to protect us against 10860b1adaa0SThomas Gleixner * the following scenario: 10870b1adaa0SThomas Gleixner * 10880b1adaa0SThomas Gleixner * The thread is faster done than the hard interrupt handler 10890b1adaa0SThomas Gleixner * on the other CPU. If we unmask the irq line then the 10900b1adaa0SThomas Gleixner * interrupt can come in again and masks the line, leaves due 1091009b4c3bSThomas Gleixner * to IRQS_INPROGRESS and the irq line is masked forever. 1092b5faba21SThomas Gleixner * 1093b5faba21SThomas Gleixner * This also serializes the state of shared oneshot handlers 1094a359f757SIngo Molnar * versus "desc->threads_oneshot |= action->thread_mask;" in 1095b5faba21SThomas Gleixner * irq_wake_thread(). See the comment there which explains the 1096b5faba21SThomas Gleixner * serialization. 10970b1adaa0SThomas Gleixner */ 109832f4125eSThomas Gleixner if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { 10990b1adaa0SThomas Gleixner raw_spin_unlock_irq(&desc->lock); 11003876ec9eSThomas Gleixner chip_bus_sync_unlock(desc); 11010b1adaa0SThomas Gleixner cpu_relax(); 11020b1adaa0SThomas Gleixner goto again; 11030b1adaa0SThomas Gleixner } 11040b1adaa0SThomas Gleixner 1105b5faba21SThomas Gleixner /* 1106b5faba21SThomas Gleixner * Now check again, whether the thread should run. Otherwise 1107b5faba21SThomas Gleixner * we would clear the threads_oneshot bit of this thread which 1108b5faba21SThomas Gleixner * was just set. 1109b5faba21SThomas Gleixner */ 1110f3f79e38SAlexander Gordeev if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) 1111b5faba21SThomas Gleixner goto out_unlock; 1112b5faba21SThomas Gleixner 1113b5faba21SThomas Gleixner desc->threads_oneshot &= ~action->thread_mask; 1114b5faba21SThomas Gleixner 111532f4125eSThomas Gleixner if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && 111632f4125eSThomas Gleixner irqd_irq_masked(&desc->irq_data)) 1117328a4978SThomas Gleixner unmask_threaded_irq(desc); 111832f4125eSThomas Gleixner 1119b5faba21SThomas Gleixner out_unlock: 1120239007b8SThomas Gleixner raw_spin_unlock_irq(&desc->lock); 11213876ec9eSThomas Gleixner chip_bus_sync_unlock(desc); 1122b25c340cSThomas Gleixner } 1123b25c340cSThomas Gleixner 112461f38261SBruno Premont #ifdef CONFIG_SMP 11253aa551c9SThomas Gleixner /* 1126b04c644eSChuansheng Liu * Check whether we need to change the affinity of the interrupt thread. 1127591d2fb0SThomas Gleixner */ 1128591d2fb0SThomas Gleixner static void 1129591d2fb0SThomas Gleixner irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) 1130591d2fb0SThomas Gleixner { 1131591d2fb0SThomas Gleixner cpumask_var_t mask; 113204aa530eSThomas Gleixner bool valid = true; 1133591d2fb0SThomas Gleixner 1134591d2fb0SThomas Gleixner if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) 1135591d2fb0SThomas Gleixner return; 1136591d2fb0SThomas Gleixner 1137591d2fb0SThomas Gleixner /* 1138591d2fb0SThomas Gleixner * In case we are out of memory we set IRQTF_AFFINITY again and 1139591d2fb0SThomas Gleixner * try again next time 1140591d2fb0SThomas Gleixner */ 1141591d2fb0SThomas Gleixner if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 1142591d2fb0SThomas Gleixner set_bit(IRQTF_AFFINITY, &action->thread_flags); 1143591d2fb0SThomas Gleixner return; 1144591d2fb0SThomas Gleixner } 1145591d2fb0SThomas Gleixner 1146239007b8SThomas Gleixner raw_spin_lock_irq(&desc->lock); 114704aa530eSThomas Gleixner /* 114804aa530eSThomas Gleixner * This code is triggered unconditionally. Check the affinity 114904aa530eSThomas Gleixner * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. 115004aa530eSThomas Gleixner */ 1151cbf86999SThomas Gleixner if (cpumask_available(desc->irq_common_data.affinity)) { 1152cbf86999SThomas Gleixner const struct cpumask *m; 1153cbf86999SThomas Gleixner 1154cbf86999SThomas Gleixner m = irq_data_get_effective_affinity_mask(&desc->irq_data); 1155cbf86999SThomas Gleixner cpumask_copy(mask, m); 1156cbf86999SThomas Gleixner } else { 115704aa530eSThomas Gleixner valid = false; 1158cbf86999SThomas Gleixner } 1159239007b8SThomas Gleixner raw_spin_unlock_irq(&desc->lock); 1160591d2fb0SThomas Gleixner 116104aa530eSThomas Gleixner if (valid) 1162591d2fb0SThomas Gleixner set_cpus_allowed_ptr(current, mask); 1163591d2fb0SThomas Gleixner free_cpumask_var(mask); 1164591d2fb0SThomas Gleixner } 116561f38261SBruno Premont #else 116661f38261SBruno Premont static inline void 116761f38261SBruno Premont irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } 116861f38261SBruno Premont #endif 1169591d2fb0SThomas Gleixner 1170591d2fb0SThomas Gleixner /* 1171c5f48c0aSIngo Molnar * Interrupts which are not explicitly requested as threaded 11728d32a307SThomas Gleixner * interrupts rely on the implicit bh/preempt disable of the hard irq 11738d32a307SThomas Gleixner * context. So we need to disable bh here to avoid deadlocks and other 11748d32a307SThomas Gleixner * side effects. 11758d32a307SThomas Gleixner */ 11763a43e05fSSebastian Andrzej Siewior static irqreturn_t 11778d32a307SThomas Gleixner irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) 11788d32a307SThomas Gleixner { 11793a43e05fSSebastian Andrzej Siewior irqreturn_t ret; 11803a43e05fSSebastian Andrzej Siewior 11818d32a307SThomas Gleixner local_bh_disable(); 118281e2073cSThomas Gleixner if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 118381e2073cSThomas Gleixner local_irq_disable(); 11843a43e05fSSebastian Andrzej Siewior ret = action->thread_fn(action->irq, action->dev_id); 1185746a923bSLukas Wunner if (ret == IRQ_HANDLED) 1186746a923bSLukas Wunner atomic_inc(&desc->threads_handled); 1187746a923bSLukas Wunner 1188f3f79e38SAlexander Gordeev irq_finalize_oneshot(desc, action); 118981e2073cSThomas Gleixner if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 119081e2073cSThomas Gleixner local_irq_enable(); 11918d32a307SThomas Gleixner local_bh_enable(); 11923a43e05fSSebastian Andrzej Siewior return ret; 11938d32a307SThomas Gleixner } 11948d32a307SThomas Gleixner 11958d32a307SThomas Gleixner /* 1196f788e7bfSXie XiuQi * Interrupts explicitly requested as threaded interrupts want to be 11975c982c58SKrzysztof Kozlowski * preemptible - many of them need to sleep and wait for slow busses to 11988d32a307SThomas Gleixner * complete. 11998d32a307SThomas Gleixner */ 12003a43e05fSSebastian Andrzej Siewior static irqreturn_t irq_thread_fn(struct irq_desc *desc, 12013a43e05fSSebastian Andrzej Siewior struct irqaction *action) 12028d32a307SThomas Gleixner { 12033a43e05fSSebastian Andrzej Siewior irqreturn_t ret; 12043a43e05fSSebastian Andrzej Siewior 12053a43e05fSSebastian Andrzej Siewior ret = action->thread_fn(action->irq, action->dev_id); 1206746a923bSLukas Wunner if (ret == IRQ_HANDLED) 1207746a923bSLukas Wunner atomic_inc(&desc->threads_handled); 1208746a923bSLukas Wunner 1209f3f79e38SAlexander Gordeev irq_finalize_oneshot(desc, action); 12103a43e05fSSebastian Andrzej Siewior return ret; 12118d32a307SThomas Gleixner } 12128d32a307SThomas Gleixner 12137140ea19SIdo Yariv static void wake_threads_waitq(struct irq_desc *desc) 12147140ea19SIdo Yariv { 1215c685689fSChuansheng Liu if (atomic_dec_and_test(&desc->threads_active)) 12167140ea19SIdo Yariv wake_up(&desc->wait_for_threads); 12177140ea19SIdo Yariv } 12187140ea19SIdo Yariv 121967d12145SAl Viro static void irq_thread_dtor(struct callback_head *unused) 12204d1d61a6SOleg Nesterov { 12214d1d61a6SOleg Nesterov struct task_struct *tsk = current; 12224d1d61a6SOleg Nesterov struct irq_desc *desc; 12234d1d61a6SOleg Nesterov struct irqaction *action; 12244d1d61a6SOleg Nesterov 12254d1d61a6SOleg Nesterov if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) 12264d1d61a6SOleg Nesterov return; 12274d1d61a6SOleg Nesterov 12284d1d61a6SOleg Nesterov action = kthread_data(tsk); 12294d1d61a6SOleg Nesterov 1230fb21affaSLinus Torvalds pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", 123119af395dSAlan Cox tsk->comm, tsk->pid, action->irq); 12324d1d61a6SOleg Nesterov 12334d1d61a6SOleg Nesterov 12344d1d61a6SOleg Nesterov desc = irq_to_desc(action->irq); 12354d1d61a6SOleg Nesterov /* 12364d1d61a6SOleg Nesterov * If IRQTF_RUNTHREAD is set, we need to decrement 12374d1d61a6SOleg Nesterov * desc->threads_active and wake possible waiters. 12384d1d61a6SOleg Nesterov */ 12394d1d61a6SOleg Nesterov if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) 12404d1d61a6SOleg Nesterov wake_threads_waitq(desc); 12414d1d61a6SOleg Nesterov 12424d1d61a6SOleg Nesterov /* Prevent a stale desc->threads_oneshot */ 12434d1d61a6SOleg Nesterov irq_finalize_oneshot(desc, action); 12444d1d61a6SOleg Nesterov } 12454d1d61a6SOleg Nesterov 12462a1d3ab8SThomas Gleixner static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) 12472a1d3ab8SThomas Gleixner { 12482a1d3ab8SThomas Gleixner struct irqaction *secondary = action->secondary; 12492a1d3ab8SThomas Gleixner 12502a1d3ab8SThomas Gleixner if (WARN_ON_ONCE(!secondary)) 12512a1d3ab8SThomas Gleixner return; 12522a1d3ab8SThomas Gleixner 12532a1d3ab8SThomas Gleixner raw_spin_lock_irq(&desc->lock); 12542a1d3ab8SThomas Gleixner __irq_wake_thread(desc, secondary); 12552a1d3ab8SThomas Gleixner raw_spin_unlock_irq(&desc->lock); 12562a1d3ab8SThomas Gleixner } 12572a1d3ab8SThomas Gleixner 12588d32a307SThomas Gleixner /* 12598707898eSThomas Pfaff * Internal function to notify that a interrupt thread is ready. 12608707898eSThomas Pfaff */ 12618707898eSThomas Pfaff static void irq_thread_set_ready(struct irq_desc *desc, 12628707898eSThomas Pfaff struct irqaction *action) 12638707898eSThomas Pfaff { 12648707898eSThomas Pfaff set_bit(IRQTF_READY, &action->thread_flags); 12658707898eSThomas Pfaff wake_up(&desc->wait_for_threads); 12668707898eSThomas Pfaff } 12678707898eSThomas Pfaff 12688707898eSThomas Pfaff /* 12698707898eSThomas Pfaff * Internal function to wake up a interrupt thread and wait until it is 12708707898eSThomas Pfaff * ready. 12718707898eSThomas Pfaff */ 12728707898eSThomas Pfaff static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc, 12738707898eSThomas Pfaff struct irqaction *action) 12748707898eSThomas Pfaff { 12758707898eSThomas Pfaff if (!action || !action->thread) 12768707898eSThomas Pfaff return; 12778707898eSThomas Pfaff 12788707898eSThomas Pfaff wake_up_process(action->thread); 12798707898eSThomas Pfaff wait_event(desc->wait_for_threads, 12808707898eSThomas Pfaff test_bit(IRQTF_READY, &action->thread_flags)); 12818707898eSThomas Pfaff } 12828707898eSThomas Pfaff 12838707898eSThomas Pfaff /* 12843aa551c9SThomas Gleixner * Interrupt handler thread 12853aa551c9SThomas Gleixner */ 12863aa551c9SThomas Gleixner static int irq_thread(void *data) 12873aa551c9SThomas Gleixner { 128867d12145SAl Viro struct callback_head on_exit_work; 12893aa551c9SThomas Gleixner struct irqaction *action = data; 12903aa551c9SThomas Gleixner struct irq_desc *desc = irq_to_desc(action->irq); 12913a43e05fSSebastian Andrzej Siewior irqreturn_t (*handler_fn)(struct irq_desc *desc, 12923a43e05fSSebastian Andrzej Siewior struct irqaction *action); 12933aa551c9SThomas Gleixner 12948707898eSThomas Pfaff irq_thread_set_ready(desc, action); 12958707898eSThomas Pfaff 1296e739f98bSThomas Gleixner sched_set_fifo(current); 1297e739f98bSThomas Gleixner 129891cc470eSTanner Love if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD, 12998d32a307SThomas Gleixner &action->thread_flags)) 13008d32a307SThomas Gleixner handler_fn = irq_forced_thread_fn; 13018d32a307SThomas Gleixner else 13028d32a307SThomas Gleixner handler_fn = irq_thread_fn; 13038d32a307SThomas Gleixner 130441f9d29fSAl Viro init_task_work(&on_exit_work, irq_thread_dtor); 130591989c70SJens Axboe task_work_add(current, &on_exit_work, TWA_NONE); 13063aa551c9SThomas Gleixner 1307f3de44edSSankara Muthukrishnan irq_thread_check_affinity(desc, action); 1308f3de44edSSankara Muthukrishnan 13093aa551c9SThomas Gleixner while (!irq_wait_for_interrupt(action)) { 13107140ea19SIdo Yariv irqreturn_t action_ret; 13113aa551c9SThomas Gleixner 1312591d2fb0SThomas Gleixner irq_thread_check_affinity(desc, action); 1313591d2fb0SThomas Gleixner 13143a43e05fSSebastian Andrzej Siewior action_ret = handler_fn(desc, action); 13152a1d3ab8SThomas Gleixner if (action_ret == IRQ_WAKE_THREAD) 13162a1d3ab8SThomas Gleixner irq_wake_secondary(desc, action); 13177140ea19SIdo Yariv 13187140ea19SIdo Yariv wake_threads_waitq(desc); 13193aa551c9SThomas Gleixner } 13203aa551c9SThomas Gleixner 13217140ea19SIdo Yariv /* 13227140ea19SIdo Yariv * This is the regular exit path. __free_irq() is stopping the 13237140ea19SIdo Yariv * thread via kthread_stop() after calling 1324519cc865SLukas Wunner * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the 1325836557bdSLukas Wunner * oneshot mask bit can be set. 13263aa551c9SThomas Gleixner */ 13274d1d61a6SOleg Nesterov task_work_cancel(current, irq_thread_dtor); 13283aa551c9SThomas Gleixner return 0; 13293aa551c9SThomas Gleixner } 13303aa551c9SThomas Gleixner 1331a92444c6SThomas Gleixner /** 1332a92444c6SThomas Gleixner * irq_wake_thread - wake the irq thread for the action identified by dev_id 1333a92444c6SThomas Gleixner * @irq: Interrupt line 1334a92444c6SThomas Gleixner * @dev_id: Device identity for which the thread should be woken 1335a92444c6SThomas Gleixner * 1336a92444c6SThomas Gleixner */ 1337a92444c6SThomas Gleixner void irq_wake_thread(unsigned int irq, void *dev_id) 1338a92444c6SThomas Gleixner { 1339a92444c6SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 1340a92444c6SThomas Gleixner struct irqaction *action; 1341a92444c6SThomas Gleixner unsigned long flags; 1342a92444c6SThomas Gleixner 1343a92444c6SThomas Gleixner if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1344a92444c6SThomas Gleixner return; 1345a92444c6SThomas Gleixner 1346a92444c6SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 1347f944b5a7SDaniel Lezcano for_each_action_of_desc(desc, action) { 1348a92444c6SThomas Gleixner if (action->dev_id == dev_id) { 1349a92444c6SThomas Gleixner if (action->thread) 1350a92444c6SThomas Gleixner __irq_wake_thread(desc, action); 1351a92444c6SThomas Gleixner break; 1352a92444c6SThomas Gleixner } 1353a92444c6SThomas Gleixner } 1354a92444c6SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 1355a92444c6SThomas Gleixner } 1356a92444c6SThomas Gleixner EXPORT_SYMBOL_GPL(irq_wake_thread); 1357a92444c6SThomas Gleixner 13582a1d3ab8SThomas Gleixner static int irq_setup_forced_threading(struct irqaction *new) 13598d32a307SThomas Gleixner { 136091cc470eSTanner Love if (!force_irqthreads()) 13612a1d3ab8SThomas Gleixner return 0; 13628d32a307SThomas Gleixner if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) 13632a1d3ab8SThomas Gleixner return 0; 13648d32a307SThomas Gleixner 1365d1f0301bSThomas Gleixner /* 1366d1f0301bSThomas Gleixner * No further action required for interrupts which are requested as 1367d1f0301bSThomas Gleixner * threaded interrupts already 1368d1f0301bSThomas Gleixner */ 1369d1f0301bSThomas Gleixner if (new->handler == irq_default_primary_handler) 1370d1f0301bSThomas Gleixner return 0; 1371d1f0301bSThomas Gleixner 13728d32a307SThomas Gleixner new->flags |= IRQF_ONESHOT; 13738d32a307SThomas Gleixner 13742a1d3ab8SThomas Gleixner /* 13752a1d3ab8SThomas Gleixner * Handle the case where we have a real primary handler and a 13762a1d3ab8SThomas Gleixner * thread handler. We force thread them as well by creating a 13772a1d3ab8SThomas Gleixner * secondary action. 13782a1d3ab8SThomas Gleixner */ 1379d1f0301bSThomas Gleixner if (new->handler && new->thread_fn) { 13802a1d3ab8SThomas Gleixner /* Allocate the secondary action */ 13812a1d3ab8SThomas Gleixner new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 13822a1d3ab8SThomas Gleixner if (!new->secondary) 13832a1d3ab8SThomas Gleixner return -ENOMEM; 13842a1d3ab8SThomas Gleixner new->secondary->handler = irq_forced_secondary_handler; 13852a1d3ab8SThomas Gleixner new->secondary->thread_fn = new->thread_fn; 13862a1d3ab8SThomas Gleixner new->secondary->dev_id = new->dev_id; 13872a1d3ab8SThomas Gleixner new->secondary->irq = new->irq; 13882a1d3ab8SThomas Gleixner new->secondary->name = new->name; 13892a1d3ab8SThomas Gleixner } 13902a1d3ab8SThomas Gleixner /* Deal with the primary handler */ 13918d32a307SThomas Gleixner set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); 13928d32a307SThomas Gleixner new->thread_fn = new->handler; 13938d32a307SThomas Gleixner new->handler = irq_default_primary_handler; 13942a1d3ab8SThomas Gleixner return 0; 13958d32a307SThomas Gleixner } 13968d32a307SThomas Gleixner 1397c1bacbaeSThomas Gleixner static int irq_request_resources(struct irq_desc *desc) 1398c1bacbaeSThomas Gleixner { 1399c1bacbaeSThomas Gleixner struct irq_data *d = &desc->irq_data; 1400c1bacbaeSThomas Gleixner struct irq_chip *c = d->chip; 1401c1bacbaeSThomas Gleixner 1402c1bacbaeSThomas Gleixner return c->irq_request_resources ? c->irq_request_resources(d) : 0; 1403c1bacbaeSThomas Gleixner } 1404c1bacbaeSThomas Gleixner 1405c1bacbaeSThomas Gleixner static void irq_release_resources(struct irq_desc *desc) 1406c1bacbaeSThomas Gleixner { 1407c1bacbaeSThomas Gleixner struct irq_data *d = &desc->irq_data; 1408c1bacbaeSThomas Gleixner struct irq_chip *c = d->chip; 1409c1bacbaeSThomas Gleixner 1410c1bacbaeSThomas Gleixner if (c->irq_release_resources) 1411c1bacbaeSThomas Gleixner c->irq_release_resources(d); 1412c1bacbaeSThomas Gleixner } 1413c1bacbaeSThomas Gleixner 1414b525903cSJulien Thierry static bool irq_supports_nmi(struct irq_desc *desc) 1415b525903cSJulien Thierry { 1416b525903cSJulien Thierry struct irq_data *d = irq_desc_get_irq_data(desc); 1417b525903cSJulien Thierry 1418b525903cSJulien Thierry #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1419b525903cSJulien Thierry /* Only IRQs directly managed by the root irqchip can be set as NMI */ 1420b525903cSJulien Thierry if (d->parent_data) 1421b525903cSJulien Thierry return false; 1422b525903cSJulien Thierry #endif 1423b525903cSJulien Thierry /* Don't support NMIs for chips behind a slow bus */ 1424b525903cSJulien Thierry if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock) 1425b525903cSJulien Thierry return false; 1426b525903cSJulien Thierry 1427b525903cSJulien Thierry return d->chip->flags & IRQCHIP_SUPPORTS_NMI; 1428b525903cSJulien Thierry } 1429b525903cSJulien Thierry 1430b525903cSJulien Thierry static int irq_nmi_setup(struct irq_desc *desc) 1431b525903cSJulien Thierry { 1432b525903cSJulien Thierry struct irq_data *d = irq_desc_get_irq_data(desc); 1433b525903cSJulien Thierry struct irq_chip *c = d->chip; 1434b525903cSJulien Thierry 1435b525903cSJulien Thierry return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL; 1436b525903cSJulien Thierry } 1437b525903cSJulien Thierry 1438b525903cSJulien Thierry static void irq_nmi_teardown(struct irq_desc *desc) 1439b525903cSJulien Thierry { 1440b525903cSJulien Thierry struct irq_data *d = irq_desc_get_irq_data(desc); 1441b525903cSJulien Thierry struct irq_chip *c = d->chip; 1442b525903cSJulien Thierry 1443b525903cSJulien Thierry if (c->irq_nmi_teardown) 1444b525903cSJulien Thierry c->irq_nmi_teardown(d); 1445b525903cSJulien Thierry } 1446b525903cSJulien Thierry 14472a1d3ab8SThomas Gleixner static int 14482a1d3ab8SThomas Gleixner setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) 14492a1d3ab8SThomas Gleixner { 14502a1d3ab8SThomas Gleixner struct task_struct *t; 14512a1d3ab8SThomas Gleixner 14522a1d3ab8SThomas Gleixner if (!secondary) { 14532a1d3ab8SThomas Gleixner t = kthread_create(irq_thread, new, "irq/%d-%s", irq, 14542a1d3ab8SThomas Gleixner new->name); 14552a1d3ab8SThomas Gleixner } else { 14562a1d3ab8SThomas Gleixner t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq, 14572a1d3ab8SThomas Gleixner new->name); 14582a1d3ab8SThomas Gleixner } 14592a1d3ab8SThomas Gleixner 14602a1d3ab8SThomas Gleixner if (IS_ERR(t)) 14612a1d3ab8SThomas Gleixner return PTR_ERR(t); 14622a1d3ab8SThomas Gleixner 14632a1d3ab8SThomas Gleixner /* 14642a1d3ab8SThomas Gleixner * We keep the reference to the task struct even if 14652a1d3ab8SThomas Gleixner * the thread dies to avoid that the interrupt code 14662a1d3ab8SThomas Gleixner * references an already freed task_struct. 14672a1d3ab8SThomas Gleixner */ 14687b3c92b8SMatthew Wilcox (Oracle) new->thread = get_task_struct(t); 14692a1d3ab8SThomas Gleixner /* 14702a1d3ab8SThomas Gleixner * Tell the thread to set its affinity. This is 14712a1d3ab8SThomas Gleixner * important for shared interrupt handlers as we do 14722a1d3ab8SThomas Gleixner * not invoke setup_affinity() for the secondary 14732a1d3ab8SThomas Gleixner * handlers as everything is already set up. Even for 14742a1d3ab8SThomas Gleixner * interrupts marked with IRQF_NO_BALANCE this is 14752a1d3ab8SThomas Gleixner * correct as we want the thread to move to the cpu(s) 14762a1d3ab8SThomas Gleixner * on which the requesting code placed the interrupt. 14772a1d3ab8SThomas Gleixner */ 14782a1d3ab8SThomas Gleixner set_bit(IRQTF_AFFINITY, &new->thread_flags); 14792a1d3ab8SThomas Gleixner return 0; 14802a1d3ab8SThomas Gleixner } 14812a1d3ab8SThomas Gleixner 14821da177e4SLinus Torvalds /* 14831da177e4SLinus Torvalds * Internal function to register an irqaction - typically used to 14841da177e4SLinus Torvalds * allocate special interrupts that are part of the architecture. 148519d39a38SThomas Gleixner * 148619d39a38SThomas Gleixner * Locking rules: 148719d39a38SThomas Gleixner * 148819d39a38SThomas Gleixner * desc->request_mutex Provides serialization against a concurrent free_irq() 148919d39a38SThomas Gleixner * chip_bus_lock Provides serialization for slow bus operations 149019d39a38SThomas Gleixner * desc->lock Provides serialization against hard interrupts 149119d39a38SThomas Gleixner * 149219d39a38SThomas Gleixner * chip_bus_lock and desc->lock are sufficient for all other management and 149319d39a38SThomas Gleixner * interrupt related functions. desc->request_mutex solely serializes 149419d39a38SThomas Gleixner * request/free_irq(). 14951da177e4SLinus Torvalds */ 1496d3c60047SThomas Gleixner static int 1497d3c60047SThomas Gleixner __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) 14981da177e4SLinus Torvalds { 1499f17c7545SIngo Molnar struct irqaction *old, **old_ptr; 1500b5faba21SThomas Gleixner unsigned long flags, thread_mask = 0; 15013b8249e7SThomas Gleixner int ret, nested, shared = 0; 15021da177e4SLinus Torvalds 15037d94f7caSYinghai Lu if (!desc) 1504c2b5a251SMatthew Wilcox return -EINVAL; 1505c2b5a251SMatthew Wilcox 15066b8ff312SThomas Gleixner if (desc->irq_data.chip == &no_irq_chip) 15071da177e4SLinus Torvalds return -ENOSYS; 1508b6873807SSebastian Andrzej Siewior if (!try_module_get(desc->owner)) 1509b6873807SSebastian Andrzej Siewior return -ENODEV; 15101da177e4SLinus Torvalds 15112a1d3ab8SThomas Gleixner new->irq = irq; 15122a1d3ab8SThomas Gleixner 15131da177e4SLinus Torvalds /* 15144b357daeSJon Hunter * If the trigger type is not specified by the caller, 15154b357daeSJon Hunter * then use the default for this interrupt. 15164b357daeSJon Hunter */ 15174b357daeSJon Hunter if (!(new->flags & IRQF_TRIGGER_MASK)) 15184b357daeSJon Hunter new->flags |= irqd_get_trigger_type(&desc->irq_data); 15194b357daeSJon Hunter 15204b357daeSJon Hunter /* 1521399b5da2SThomas Gleixner * Check whether the interrupt nests into another interrupt 1522399b5da2SThomas Gleixner * thread. 15233aa551c9SThomas Gleixner */ 15241ccb4e61SThomas Gleixner nested = irq_settings_is_nested_thread(desc); 1525399b5da2SThomas Gleixner if (nested) { 1526b6873807SSebastian Andrzej Siewior if (!new->thread_fn) { 1527b6873807SSebastian Andrzej Siewior ret = -EINVAL; 1528b6873807SSebastian Andrzej Siewior goto out_mput; 1529b6873807SSebastian Andrzej Siewior } 1530399b5da2SThomas Gleixner /* 1531399b5da2SThomas Gleixner * Replace the primary handler which was provided from 1532399b5da2SThomas Gleixner * the driver for non nested interrupt handling by the 1533399b5da2SThomas Gleixner * dummy function which warns when called. 1534399b5da2SThomas Gleixner */ 1535399b5da2SThomas Gleixner new->handler = irq_nested_primary_handler; 15368d32a307SThomas Gleixner } else { 15372a1d3ab8SThomas Gleixner if (irq_settings_can_thread(desc)) { 15382a1d3ab8SThomas Gleixner ret = irq_setup_forced_threading(new); 15392a1d3ab8SThomas Gleixner if (ret) 15402a1d3ab8SThomas Gleixner goto out_mput; 15412a1d3ab8SThomas Gleixner } 1542399b5da2SThomas Gleixner } 1543399b5da2SThomas Gleixner 1544399b5da2SThomas Gleixner /* 1545399b5da2SThomas Gleixner * Create a handler thread when a thread function is supplied 1546399b5da2SThomas Gleixner * and the interrupt does not nest into another interrupt 1547399b5da2SThomas Gleixner * thread. 1548399b5da2SThomas Gleixner */ 1549399b5da2SThomas Gleixner if (new->thread_fn && !nested) { 15502a1d3ab8SThomas Gleixner ret = setup_irq_thread(new, irq, false); 15512a1d3ab8SThomas Gleixner if (ret) 1552b6873807SSebastian Andrzej Siewior goto out_mput; 15532a1d3ab8SThomas Gleixner if (new->secondary) { 15542a1d3ab8SThomas Gleixner ret = setup_irq_thread(new->secondary, irq, true); 15552a1d3ab8SThomas Gleixner if (ret) 15562a1d3ab8SThomas Gleixner goto out_thread; 1557b6873807SSebastian Andrzej Siewior } 15583aa551c9SThomas Gleixner } 15593aa551c9SThomas Gleixner 15603aa551c9SThomas Gleixner /* 1561dc9b229aSThomas Gleixner * Drivers are often written to work w/o knowledge about the 1562dc9b229aSThomas Gleixner * underlying irq chip implementation, so a request for a 1563dc9b229aSThomas Gleixner * threaded irq without a primary hard irq context handler 1564dc9b229aSThomas Gleixner * requires the ONESHOT flag to be set. Some irq chips like 1565dc9b229aSThomas Gleixner * MSI based interrupts are per se one shot safe. Check the 1566dc9b229aSThomas Gleixner * chip flags, so we can avoid the unmask dance at the end of 1567dc9b229aSThomas Gleixner * the threaded handler for those. 1568dc9b229aSThomas Gleixner */ 1569dc9b229aSThomas Gleixner if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) 1570dc9b229aSThomas Gleixner new->flags &= ~IRQF_ONESHOT; 1571dc9b229aSThomas Gleixner 157219d39a38SThomas Gleixner /* 157319d39a38SThomas Gleixner * Protects against a concurrent __free_irq() call which might wait 1574519cc865SLukas Wunner * for synchronize_hardirq() to complete without holding the optional 1575836557bdSLukas Wunner * chip bus lock and desc->lock. Also protects against handing out 1576836557bdSLukas Wunner * a recycled oneshot thread_mask bit while it's still in use by 1577836557bdSLukas Wunner * its previous owner. 157819d39a38SThomas Gleixner */ 15799114014cSThomas Gleixner mutex_lock(&desc->request_mutex); 158019d39a38SThomas Gleixner 158119d39a38SThomas Gleixner /* 158219d39a38SThomas Gleixner * Acquire bus lock as the irq_request_resources() callback below 158319d39a38SThomas Gleixner * might rely on the serialization or the magic power management 158419d39a38SThomas Gleixner * functions which are abusing the irq_bus_lock() callback, 158519d39a38SThomas Gleixner */ 158619d39a38SThomas Gleixner chip_bus_lock(desc); 158719d39a38SThomas Gleixner 158819d39a38SThomas Gleixner /* First installed action requests resources. */ 158946e48e25SThomas Gleixner if (!desc->action) { 159046e48e25SThomas Gleixner ret = irq_request_resources(desc); 159146e48e25SThomas Gleixner if (ret) { 159246e48e25SThomas Gleixner pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", 159346e48e25SThomas Gleixner new->name, irq, desc->irq_data.chip->name); 159419d39a38SThomas Gleixner goto out_bus_unlock; 159546e48e25SThomas Gleixner } 159646e48e25SThomas Gleixner } 15979114014cSThomas Gleixner 1598dc9b229aSThomas Gleixner /* 15991da177e4SLinus Torvalds * The following block of code has to be executed atomically 160019d39a38SThomas Gleixner * protected against a concurrent interrupt and any of the other 160119d39a38SThomas Gleixner * management calls which are not serialized via 160219d39a38SThomas Gleixner * desc->request_mutex or the optional bus lock. 16031da177e4SLinus Torvalds */ 1604239007b8SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 1605f17c7545SIngo Molnar old_ptr = &desc->action; 1606f17c7545SIngo Molnar old = *old_ptr; 160706fcb0c6SIngo Molnar if (old) { 1608e76de9f8SThomas Gleixner /* 1609e76de9f8SThomas Gleixner * Can't share interrupts unless both agree to and are 1610e76de9f8SThomas Gleixner * the same type (level, edge, polarity). So both flag 16113cca53b0SThomas Gleixner * fields must have IRQF_SHARED set and the bits which 16129d591eddSThomas Gleixner * set the trigger type must match. Also all must 16139d591eddSThomas Gleixner * agree on ONESHOT. 1614b525903cSJulien Thierry * Interrupt lines used for NMIs cannot be shared. 1615e76de9f8SThomas Gleixner */ 16164f8413a3SMarc Zyngier unsigned int oldtype; 16174f8413a3SMarc Zyngier 1618b525903cSJulien Thierry if (desc->istate & IRQS_NMI) { 1619b525903cSJulien Thierry pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n", 1620b525903cSJulien Thierry new->name, irq, desc->irq_data.chip->name); 1621b525903cSJulien Thierry ret = -EINVAL; 1622b525903cSJulien Thierry goto out_unlock; 1623b525903cSJulien Thierry } 1624b525903cSJulien Thierry 16254f8413a3SMarc Zyngier /* 16264f8413a3SMarc Zyngier * If nobody did set the configuration before, inherit 16274f8413a3SMarc Zyngier * the one provided by the requester. 16284f8413a3SMarc Zyngier */ 16294f8413a3SMarc Zyngier if (irqd_trigger_type_was_set(&desc->irq_data)) { 16304f8413a3SMarc Zyngier oldtype = irqd_get_trigger_type(&desc->irq_data); 16314f8413a3SMarc Zyngier } else { 16324f8413a3SMarc Zyngier oldtype = new->flags & IRQF_TRIGGER_MASK; 16334f8413a3SMarc Zyngier irqd_set_trigger_type(&desc->irq_data, oldtype); 16344f8413a3SMarc Zyngier } 1635382bd4deSHans de Goede 16363cca53b0SThomas Gleixner if (!((old->flags & new->flags) & IRQF_SHARED) || 1637382bd4deSHans de Goede (oldtype != (new->flags & IRQF_TRIGGER_MASK)) || 1638f5d89470SThomas Gleixner ((old->flags ^ new->flags) & IRQF_ONESHOT)) 1639f5163427SDimitri Sivanich goto mismatch; 1640f5163427SDimitri Sivanich 1641f5163427SDimitri Sivanich /* All handlers must agree on per-cpuness */ 16423cca53b0SThomas Gleixner if ((old->flags & IRQF_PERCPU) != 16433cca53b0SThomas Gleixner (new->flags & IRQF_PERCPU)) 1644f5163427SDimitri Sivanich goto mismatch; 16451da177e4SLinus Torvalds 16461da177e4SLinus Torvalds /* add new interrupt at end of irq queue */ 16471da177e4SLinus Torvalds do { 164852abb700SThomas Gleixner /* 164952abb700SThomas Gleixner * Or all existing action->thread_mask bits, 165052abb700SThomas Gleixner * so we can find the next zero bit for this 165152abb700SThomas Gleixner * new action. 165252abb700SThomas Gleixner */ 1653b5faba21SThomas Gleixner thread_mask |= old->thread_mask; 1654f17c7545SIngo Molnar old_ptr = &old->next; 1655f17c7545SIngo Molnar old = *old_ptr; 16561da177e4SLinus Torvalds } while (old); 16571da177e4SLinus Torvalds shared = 1; 16581da177e4SLinus Torvalds } 16591da177e4SLinus Torvalds 1660b5faba21SThomas Gleixner /* 166152abb700SThomas Gleixner * Setup the thread mask for this irqaction for ONESHOT. For 166252abb700SThomas Gleixner * !ONESHOT irqs the thread mask is 0 so we can avoid a 166352abb700SThomas Gleixner * conditional in irq_wake_thread(). 1664b5faba21SThomas Gleixner */ 166552abb700SThomas Gleixner if (new->flags & IRQF_ONESHOT) { 166652abb700SThomas Gleixner /* 166752abb700SThomas Gleixner * Unlikely to have 32 resp 64 irqs sharing one line, 166852abb700SThomas Gleixner * but who knows. 166952abb700SThomas Gleixner */ 167052abb700SThomas Gleixner if (thread_mask == ~0UL) { 1671b5faba21SThomas Gleixner ret = -EBUSY; 1672cba4235eSThomas Gleixner goto out_unlock; 1673b5faba21SThomas Gleixner } 167452abb700SThomas Gleixner /* 167552abb700SThomas Gleixner * The thread_mask for the action is or'ed to 167652abb700SThomas Gleixner * desc->thread_active to indicate that the 167752abb700SThomas Gleixner * IRQF_ONESHOT thread handler has been woken, but not 167852abb700SThomas Gleixner * yet finished. The bit is cleared when a thread 167952abb700SThomas Gleixner * completes. When all threads of a shared interrupt 168052abb700SThomas Gleixner * line have completed desc->threads_active becomes 168152abb700SThomas Gleixner * zero and the interrupt line is unmasked. See 168252abb700SThomas Gleixner * handle.c:irq_wake_thread() for further information. 168352abb700SThomas Gleixner * 168452abb700SThomas Gleixner * If no thread is woken by primary (hard irq context) 168552abb700SThomas Gleixner * interrupt handlers, then desc->threads_active is 168652abb700SThomas Gleixner * also checked for zero to unmask the irq line in the 168752abb700SThomas Gleixner * affected hard irq flow handlers 168852abb700SThomas Gleixner * (handle_[fasteoi|level]_irq). 168952abb700SThomas Gleixner * 169052abb700SThomas Gleixner * The new action gets the first zero bit of 169152abb700SThomas Gleixner * thread_mask assigned. See the loop above which or's 169252abb700SThomas Gleixner * all existing action->thread_mask bits. 169352abb700SThomas Gleixner */ 1694ffc661c9SRasmus Villemoes new->thread_mask = 1UL << ffz(thread_mask); 16951c6c6952SThomas Gleixner 1696dc9b229aSThomas Gleixner } else if (new->handler == irq_default_primary_handler && 1697dc9b229aSThomas Gleixner !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { 16981c6c6952SThomas Gleixner /* 16991c6c6952SThomas Gleixner * The interrupt was requested with handler = NULL, so 17001c6c6952SThomas Gleixner * we use the default primary handler for it. But it 17011c6c6952SThomas Gleixner * does not have the oneshot flag set. In combination 17021c6c6952SThomas Gleixner * with level interrupts this is deadly, because the 17031c6c6952SThomas Gleixner * default primary handler just wakes the thread, then 17041c6c6952SThomas Gleixner * the irq lines is reenabled, but the device still 17051c6c6952SThomas Gleixner * has the level irq asserted. Rinse and repeat.... 17061c6c6952SThomas Gleixner * 17071c6c6952SThomas Gleixner * While this works for edge type interrupts, we play 17081c6c6952SThomas Gleixner * it safe and reject unconditionally because we can't 17091c6c6952SThomas Gleixner * say for sure which type this interrupt really 17101c6c6952SThomas Gleixner * has. The type flags are unreliable as the 17111c6c6952SThomas Gleixner * underlying chip implementation can override them. 17121c6c6952SThomas Gleixner */ 1713025af39bSLuca Ceresoli pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n", 1714025af39bSLuca Ceresoli new->name, irq); 17151c6c6952SThomas Gleixner ret = -EINVAL; 1716cba4235eSThomas Gleixner goto out_unlock; 171752abb700SThomas Gleixner } 1718b5faba21SThomas Gleixner 17191da177e4SLinus Torvalds if (!shared) { 172082736f4dSUwe Kleine-König /* Setup the type (level, edge polarity) if configured: */ 172182736f4dSUwe Kleine-König if (new->flags & IRQF_TRIGGER_MASK) { 1722a1ff541aSJiang Liu ret = __irq_set_trigger(desc, 1723f2b662daSDavid Brownell new->flags & IRQF_TRIGGER_MASK); 172482736f4dSUwe Kleine-König 172519d39a38SThomas Gleixner if (ret) 1726cba4235eSThomas Gleixner goto out_unlock; 1727091738a2SThomas Gleixner } 1728f75d222bSAhmed S. Darwish 1729c942cee4SThomas Gleixner /* 1730c942cee4SThomas Gleixner * Activate the interrupt. That activation must happen 1731c942cee4SThomas Gleixner * independently of IRQ_NOAUTOEN. request_irq() can fail 1732c942cee4SThomas Gleixner * and the callers are supposed to handle 1733c942cee4SThomas Gleixner * that. enable_irq() of an interrupt requested with 1734c942cee4SThomas Gleixner * IRQ_NOAUTOEN is not supposed to fail. The activation 1735c942cee4SThomas Gleixner * keeps it in shutdown mode, it merily associates 1736c942cee4SThomas Gleixner * resources if necessary and if that's not possible it 1737c942cee4SThomas Gleixner * fails. Interrupts which are in managed shutdown mode 1738c942cee4SThomas Gleixner * will simply ignore that activation request. 1739c942cee4SThomas Gleixner */ 1740c942cee4SThomas Gleixner ret = irq_activate(desc); 1741c942cee4SThomas Gleixner if (ret) 1742c942cee4SThomas Gleixner goto out_unlock; 1743c942cee4SThomas Gleixner 1744009b4c3bSThomas Gleixner desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ 174532f4125eSThomas Gleixner IRQS_ONESHOT | IRQS_WAITING); 174632f4125eSThomas Gleixner irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 174794d39e1fSThomas Gleixner 1748a005677bSThomas Gleixner if (new->flags & IRQF_PERCPU) { 1749a005677bSThomas Gleixner irqd_set(&desc->irq_data, IRQD_PER_CPU); 1750a005677bSThomas Gleixner irq_settings_set_per_cpu(desc); 1751c2b1063eSThomas Gleixner if (new->flags & IRQF_NO_DEBUG) 1752c2b1063eSThomas Gleixner irq_settings_set_no_debug(desc); 1753a005677bSThomas Gleixner } 17546a58fb3bSThomas Gleixner 1755c2b1063eSThomas Gleixner if (noirqdebug) 1756c2b1063eSThomas Gleixner irq_settings_set_no_debug(desc); 1757c2b1063eSThomas Gleixner 1758b25c340cSThomas Gleixner if (new->flags & IRQF_ONESHOT) 17593d67baecSThomas Gleixner desc->istate |= IRQS_ONESHOT; 1760b25c340cSThomas Gleixner 17612e051552SThomas Gleixner /* Exclude IRQ from balancing if requested */ 17622e051552SThomas Gleixner if (new->flags & IRQF_NOBALANCING) { 17632e051552SThomas Gleixner irq_settings_set_no_balancing(desc); 17642e051552SThomas Gleixner irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 17652e051552SThomas Gleixner } 17662e051552SThomas Gleixner 1767cbe16f35SBarry Song if (!(new->flags & IRQF_NO_AUTOEN) && 1768cbe16f35SBarry Song irq_settings_can_autoenable(desc)) { 17694cde9c6bSThomas Gleixner irq_startup(desc, IRQ_RESEND, IRQ_START_COND); 177004c848d3SThomas Gleixner } else { 177104c848d3SThomas Gleixner /* 177204c848d3SThomas Gleixner * Shared interrupts do not go well with disabling 177304c848d3SThomas Gleixner * auto enable. The sharing interrupt might request 177404c848d3SThomas Gleixner * it while it's still disabled and then wait for 177504c848d3SThomas Gleixner * interrupts forever. 177604c848d3SThomas Gleixner */ 177704c848d3SThomas Gleixner WARN_ON_ONCE(new->flags & IRQF_SHARED); 1778e76de9f8SThomas Gleixner /* Undo nested disables: */ 1779e76de9f8SThomas Gleixner desc->depth = 1; 178004c848d3SThomas Gleixner } 178118404756SMax Krasnyansky 1782876dbd4cSThomas Gleixner } else if (new->flags & IRQF_TRIGGER_MASK) { 1783876dbd4cSThomas Gleixner unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; 17847ee7e87dSThomas Gleixner unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); 1785876dbd4cSThomas Gleixner 1786876dbd4cSThomas Gleixner if (nmsk != omsk) 1787876dbd4cSThomas Gleixner /* hope the handler works with current trigger mode */ 1788a395d6a7SJoe Perches pr_warn("irq %d uses trigger mode %u; requested %u\n", 17897ee7e87dSThomas Gleixner irq, omsk, nmsk); 179094d39e1fSThomas Gleixner } 179182736f4dSUwe Kleine-König 1792f17c7545SIngo Molnar *old_ptr = new; 179382736f4dSUwe Kleine-König 1794cab303beSThomas Gleixner irq_pm_install_action(desc, new); 1795cab303beSThomas Gleixner 17968528b0f1SLinus Torvalds /* Reset broken irq detection when installing new handler */ 17978528b0f1SLinus Torvalds desc->irq_count = 0; 17988528b0f1SLinus Torvalds desc->irqs_unhandled = 0; 17991adb0850SThomas Gleixner 18001adb0850SThomas Gleixner /* 18011adb0850SThomas Gleixner * Check whether we disabled the irq via the spurious handler 18021adb0850SThomas Gleixner * before. Reenable it and give it another chance. 18031adb0850SThomas Gleixner */ 18047acdd53eSThomas Gleixner if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { 18057acdd53eSThomas Gleixner desc->istate &= ~IRQS_SPURIOUS_DISABLED; 180679ff1cdaSJiang Liu __enable_irq(desc); 18071adb0850SThomas Gleixner } 18081adb0850SThomas Gleixner 1809239007b8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 18103a90795eSThomas Gleixner chip_bus_sync_unlock(desc); 18119114014cSThomas Gleixner mutex_unlock(&desc->request_mutex); 18121da177e4SLinus Torvalds 1813b2d3d61aSDaniel Lezcano irq_setup_timings(desc, new); 1814b2d3d61aSDaniel Lezcano 18158707898eSThomas Pfaff wake_up_and_wait_for_irq_thread_ready(desc, new); 18168707898eSThomas Pfaff wake_up_and_wait_for_irq_thread_ready(desc, new->secondary); 181769ab8494SThomas Gleixner 18182c6927a3SYinghai Lu register_irq_proc(irq, desc); 18191da177e4SLinus Torvalds new->dir = NULL; 18201da177e4SLinus Torvalds register_handler_proc(irq, new); 18211da177e4SLinus Torvalds return 0; 1822f5163427SDimitri Sivanich 1823f5163427SDimitri Sivanich mismatch: 18243cca53b0SThomas Gleixner if (!(new->flags & IRQF_PROBE_SHARED)) { 182597fd75b7SAndrew Morton pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", 1826f5d89470SThomas Gleixner irq, new->flags, new->name, old->flags, old->name); 1827f5d89470SThomas Gleixner #ifdef CONFIG_DEBUG_SHIRQ 1828f5163427SDimitri Sivanich dump_stack(); 18293f050447SAlan Cox #endif 1830f5d89470SThomas Gleixner } 18313aa551c9SThomas Gleixner ret = -EBUSY; 18323aa551c9SThomas Gleixner 1833cba4235eSThomas Gleixner out_unlock: 18341c389795SDan Carpenter raw_spin_unlock_irqrestore(&desc->lock, flags); 18353b8249e7SThomas Gleixner 183646e48e25SThomas Gleixner if (!desc->action) 183746e48e25SThomas Gleixner irq_release_resources(desc); 183819d39a38SThomas Gleixner out_bus_unlock: 183919d39a38SThomas Gleixner chip_bus_sync_unlock(desc); 18409114014cSThomas Gleixner mutex_unlock(&desc->request_mutex); 18419114014cSThomas Gleixner 18423aa551c9SThomas Gleixner out_thread: 18433aa551c9SThomas Gleixner if (new->thread) { 18443aa551c9SThomas Gleixner struct task_struct *t = new->thread; 18453aa551c9SThomas Gleixner 18463aa551c9SThomas Gleixner new->thread = NULL; 18473aa551c9SThomas Gleixner kthread_stop(t); 18483aa551c9SThomas Gleixner put_task_struct(t); 18493aa551c9SThomas Gleixner } 18502a1d3ab8SThomas Gleixner if (new->secondary && new->secondary->thread) { 18512a1d3ab8SThomas Gleixner struct task_struct *t = new->secondary->thread; 18522a1d3ab8SThomas Gleixner 18532a1d3ab8SThomas Gleixner new->secondary->thread = NULL; 18542a1d3ab8SThomas Gleixner kthread_stop(t); 18552a1d3ab8SThomas Gleixner put_task_struct(t); 18562a1d3ab8SThomas Gleixner } 1857b6873807SSebastian Andrzej Siewior out_mput: 1858b6873807SSebastian Andrzej Siewior module_put(desc->owner); 18593aa551c9SThomas Gleixner return ret; 18601da177e4SLinus Torvalds } 18611da177e4SLinus Torvalds 1862cbf94f06SMagnus Damm /* 1863cbf94f06SMagnus Damm * Internal function to unregister an irqaction - used to free 1864cbf94f06SMagnus Damm * regular and special interrupts that are part of the architecture. 18651da177e4SLinus Torvalds */ 186683ac4ca9SUwe Kleine König static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) 18671da177e4SLinus Torvalds { 186883ac4ca9SUwe Kleine König unsigned irq = desc->irq_data.irq; 1869f17c7545SIngo Molnar struct irqaction *action, **action_ptr; 18701da177e4SLinus Torvalds unsigned long flags; 18711da177e4SLinus Torvalds 1872ae88a23bSIngo Molnar WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 18737d94f7caSYinghai Lu 18749114014cSThomas Gleixner mutex_lock(&desc->request_mutex); 1875abc7e40cSThomas Gleixner chip_bus_lock(desc); 1876239007b8SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 1877ae88a23bSIngo Molnar 1878ae88a23bSIngo Molnar /* 1879ae88a23bSIngo Molnar * There can be multiple actions per IRQ descriptor, find the right 1880ae88a23bSIngo Molnar * one based on the dev_id: 1881ae88a23bSIngo Molnar */ 1882f17c7545SIngo Molnar action_ptr = &desc->action; 18831da177e4SLinus Torvalds for (;;) { 1884f17c7545SIngo Molnar action = *action_ptr; 18851da177e4SLinus Torvalds 1886ae88a23bSIngo Molnar if (!action) { 1887ae88a23bSIngo Molnar WARN(1, "Trying to free already-free IRQ %d\n", irq); 1888239007b8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 1889abc7e40cSThomas Gleixner chip_bus_sync_unlock(desc); 189019d39a38SThomas Gleixner mutex_unlock(&desc->request_mutex); 1891f21cfb25SMagnus Damm return NULL; 1892ae88a23bSIngo Molnar } 18931da177e4SLinus Torvalds 18948316e381SIngo Molnar if (action->dev_id == dev_id) 1895ae88a23bSIngo Molnar break; 1896f17c7545SIngo Molnar action_ptr = &action->next; 1897ae88a23bSIngo Molnar } 1898ae88a23bSIngo Molnar 1899ae88a23bSIngo Molnar /* Found it - now remove it from the list of entries: */ 1900f17c7545SIngo Molnar *action_ptr = action->next; 1901dbce706eSPaolo 'Blaisorblade' Giarrusso 1902cab303beSThomas Gleixner irq_pm_remove_action(desc, action); 1903cab303beSThomas Gleixner 1904ae88a23bSIngo Molnar /* If this was the last handler, shut down the IRQ line: */ 1905c1bacbaeSThomas Gleixner if (!desc->action) { 1906e9849777SThomas Gleixner irq_settings_clr_disable_unlazy(desc); 19074001d8e8SThomas Gleixner /* Only shutdown. Deactivate after synchronize_hardirq() */ 190846999238SThomas Gleixner irq_shutdown(desc); 1909c1bacbaeSThomas Gleixner } 19103aa551c9SThomas Gleixner 1911e7a297b0SPeter P Waskiewicz Jr #ifdef CONFIG_SMP 1912e7a297b0SPeter P Waskiewicz Jr /* make sure affinity_hint is cleaned up */ 1913e7a297b0SPeter P Waskiewicz Jr if (WARN_ON_ONCE(desc->affinity_hint)) 1914e7a297b0SPeter P Waskiewicz Jr desc->affinity_hint = NULL; 1915e7a297b0SPeter P Waskiewicz Jr #endif 1916e7a297b0SPeter P Waskiewicz Jr 1917239007b8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 191819d39a38SThomas Gleixner /* 191919d39a38SThomas Gleixner * Drop bus_lock here so the changes which were done in the chip 192019d39a38SThomas Gleixner * callbacks above are synced out to the irq chips which hang 1921519cc865SLukas Wunner * behind a slow bus (I2C, SPI) before calling synchronize_hardirq(). 192219d39a38SThomas Gleixner * 192319d39a38SThomas Gleixner * Aside of that the bus_lock can also be taken from the threaded 192419d39a38SThomas Gleixner * handler in irq_finalize_oneshot() which results in a deadlock 1925519cc865SLukas Wunner * because kthread_stop() would wait forever for the thread to 192619d39a38SThomas Gleixner * complete, which is blocked on the bus lock. 192719d39a38SThomas Gleixner * 192819d39a38SThomas Gleixner * The still held desc->request_mutex() protects against a 192919d39a38SThomas Gleixner * concurrent request_irq() of this irq so the release of resources 193019d39a38SThomas Gleixner * and timing data is properly serialized. 193119d39a38SThomas Gleixner */ 1932abc7e40cSThomas Gleixner chip_bus_sync_unlock(desc); 1933ae88a23bSIngo Molnar 19341da177e4SLinus Torvalds unregister_handler_proc(irq, action); 19351da177e4SLinus Torvalds 193662e04686SThomas Gleixner /* 193762e04686SThomas Gleixner * Make sure it's not being used on another CPU and if the chip 193862e04686SThomas Gleixner * supports it also make sure that there is no (not yet serviced) 193962e04686SThomas Gleixner * interrupt in flight at the hardware level. 194062e04686SThomas Gleixner */ 194162e04686SThomas Gleixner __synchronize_hardirq(desc, true); 1942ae88a23bSIngo Molnar 19431d99493bSDavid Woodhouse #ifdef CONFIG_DEBUG_SHIRQ 19441d99493bSDavid Woodhouse /* 1945ae88a23bSIngo Molnar * It's a shared IRQ -- the driver ought to be prepared for an IRQ 1946ae88a23bSIngo Molnar * event to happen even now it's being freed, so let's make sure that 1947ae88a23bSIngo Molnar * is so by doing an extra call to the handler .... 1948ae88a23bSIngo Molnar * 1949ae88a23bSIngo Molnar * ( We do this after actually deregistering it, to make sure that a 19500a13ec0bSJonathan Neuschäfer * 'real' IRQ doesn't run in parallel with our fake. ) 19511d99493bSDavid Woodhouse */ 19521d99493bSDavid Woodhouse if (action->flags & IRQF_SHARED) { 19531d99493bSDavid Woodhouse local_irq_save(flags); 19541d99493bSDavid Woodhouse action->handler(irq, dev_id); 19551d99493bSDavid Woodhouse local_irq_restore(flags); 19561d99493bSDavid Woodhouse } 19571d99493bSDavid Woodhouse #endif 19582d860ad7SLinus Torvalds 1959519cc865SLukas Wunner /* 1960519cc865SLukas Wunner * The action has already been removed above, but the thread writes 1961519cc865SLukas Wunner * its oneshot mask bit when it completes. Though request_mutex is 1962519cc865SLukas Wunner * held across this which prevents __setup_irq() from handing out 1963519cc865SLukas Wunner * the same bit to a newly requested action. 1964519cc865SLukas Wunner */ 19652d860ad7SLinus Torvalds if (action->thread) { 19662d860ad7SLinus Torvalds kthread_stop(action->thread); 19672d860ad7SLinus Torvalds put_task_struct(action->thread); 19682a1d3ab8SThomas Gleixner if (action->secondary && action->secondary->thread) { 19692a1d3ab8SThomas Gleixner kthread_stop(action->secondary->thread); 19702a1d3ab8SThomas Gleixner put_task_struct(action->secondary->thread); 19712a1d3ab8SThomas Gleixner } 19722d860ad7SLinus Torvalds } 19732d860ad7SLinus Torvalds 197419d39a38SThomas Gleixner /* Last action releases resources */ 19752343877fSThomas Gleixner if (!desc->action) { 197619d39a38SThomas Gleixner /* 1977a359f757SIngo Molnar * Reacquire bus lock as irq_release_resources() might 197819d39a38SThomas Gleixner * require it to deallocate resources over the slow bus. 197919d39a38SThomas Gleixner */ 198019d39a38SThomas Gleixner chip_bus_lock(desc); 19814001d8e8SThomas Gleixner /* 19824001d8e8SThomas Gleixner * There is no interrupt on the fly anymore. Deactivate it 19834001d8e8SThomas Gleixner * completely. 19844001d8e8SThomas Gleixner */ 19854001d8e8SThomas Gleixner raw_spin_lock_irqsave(&desc->lock, flags); 19864001d8e8SThomas Gleixner irq_domain_deactivate_irq(&desc->irq_data); 19874001d8e8SThomas Gleixner raw_spin_unlock_irqrestore(&desc->lock, flags); 19884001d8e8SThomas Gleixner 198946e48e25SThomas Gleixner irq_release_resources(desc); 199019d39a38SThomas Gleixner chip_bus_sync_unlock(desc); 19912343877fSThomas Gleixner irq_remove_timings(desc); 19922343877fSThomas Gleixner } 199346e48e25SThomas Gleixner 19949114014cSThomas Gleixner mutex_unlock(&desc->request_mutex); 19959114014cSThomas Gleixner 1996be45beb2SJon Hunter irq_chip_pm_put(&desc->irq_data); 1997b6873807SSebastian Andrzej Siewior module_put(desc->owner); 19982a1d3ab8SThomas Gleixner kfree(action->secondary); 1999f21cfb25SMagnus Damm return action; 2000f21cfb25SMagnus Damm } 20011da177e4SLinus Torvalds 20021da177e4SLinus Torvalds /** 2003f21cfb25SMagnus Damm * free_irq - free an interrupt allocated with request_irq 20041da177e4SLinus Torvalds * @irq: Interrupt line to free 20051da177e4SLinus Torvalds * @dev_id: Device identity to free 20061da177e4SLinus Torvalds * 20071da177e4SLinus Torvalds * Remove an interrupt handler. The handler is removed and if the 20081da177e4SLinus Torvalds * interrupt line is no longer in use by any driver it is disabled. 20091da177e4SLinus Torvalds * On a shared IRQ the caller must ensure the interrupt is disabled 20101da177e4SLinus Torvalds * on the card it drives before calling this function. The function 20111da177e4SLinus Torvalds * does not return until any executing interrupts for this IRQ 20121da177e4SLinus Torvalds * have completed. 20131da177e4SLinus Torvalds * 20141da177e4SLinus Torvalds * This function must not be called from interrupt context. 201525ce4be7SChristoph Hellwig * 201625ce4be7SChristoph Hellwig * Returns the devname argument passed to request_irq. 20171da177e4SLinus Torvalds */ 201825ce4be7SChristoph Hellwig const void *free_irq(unsigned int irq, void *dev_id) 20191da177e4SLinus Torvalds { 202070aedd24SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 202125ce4be7SChristoph Hellwig struct irqaction *action; 202225ce4be7SChristoph Hellwig const char *devname; 202370aedd24SThomas Gleixner 202431d9d9b6SMarc Zyngier if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) 202525ce4be7SChristoph Hellwig return NULL; 202670aedd24SThomas Gleixner 2027cd7eab44SBen Hutchings #ifdef CONFIG_SMP 2028cd7eab44SBen Hutchings if (WARN_ON(desc->affinity_notify)) 2029cd7eab44SBen Hutchings desc->affinity_notify = NULL; 2030cd7eab44SBen Hutchings #endif 2031cd7eab44SBen Hutchings 203283ac4ca9SUwe Kleine König action = __free_irq(desc, dev_id); 20332827a418SAlexandru Moise 20342827a418SAlexandru Moise if (!action) 20352827a418SAlexandru Moise return NULL; 20362827a418SAlexandru Moise 203725ce4be7SChristoph Hellwig devname = action->name; 203825ce4be7SChristoph Hellwig kfree(action); 203925ce4be7SChristoph Hellwig return devname; 20401da177e4SLinus Torvalds } 20411da177e4SLinus Torvalds EXPORT_SYMBOL(free_irq); 20421da177e4SLinus Torvalds 2043b525903cSJulien Thierry /* This function must be called with desc->lock held */ 2044b525903cSJulien Thierry static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc) 2045b525903cSJulien Thierry { 2046b525903cSJulien Thierry const char *devname = NULL; 2047b525903cSJulien Thierry 2048b525903cSJulien Thierry desc->istate &= ~IRQS_NMI; 2049b525903cSJulien Thierry 2050b525903cSJulien Thierry if (!WARN_ON(desc->action == NULL)) { 2051b525903cSJulien Thierry irq_pm_remove_action(desc, desc->action); 2052b525903cSJulien Thierry devname = desc->action->name; 2053b525903cSJulien Thierry unregister_handler_proc(irq, desc->action); 2054b525903cSJulien Thierry 2055b525903cSJulien Thierry kfree(desc->action); 2056b525903cSJulien Thierry desc->action = NULL; 2057b525903cSJulien Thierry } 2058b525903cSJulien Thierry 2059b525903cSJulien Thierry irq_settings_clr_disable_unlazy(desc); 20604001d8e8SThomas Gleixner irq_shutdown_and_deactivate(desc); 2061b525903cSJulien Thierry 2062b525903cSJulien Thierry irq_release_resources(desc); 2063b525903cSJulien Thierry 2064b525903cSJulien Thierry irq_chip_pm_put(&desc->irq_data); 2065b525903cSJulien Thierry module_put(desc->owner); 2066b525903cSJulien Thierry 2067b525903cSJulien Thierry return devname; 2068b525903cSJulien Thierry } 2069b525903cSJulien Thierry 2070b525903cSJulien Thierry const void *free_nmi(unsigned int irq, void *dev_id) 2071b525903cSJulien Thierry { 2072b525903cSJulien Thierry struct irq_desc *desc = irq_to_desc(irq); 2073b525903cSJulien Thierry unsigned long flags; 2074b525903cSJulien Thierry const void *devname; 2075b525903cSJulien Thierry 2076b525903cSJulien Thierry if (!desc || WARN_ON(!(desc->istate & IRQS_NMI))) 2077b525903cSJulien Thierry return NULL; 2078b525903cSJulien Thierry 2079b525903cSJulien Thierry if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) 2080b525903cSJulien Thierry return NULL; 2081b525903cSJulien Thierry 2082b525903cSJulien Thierry /* NMI still enabled */ 2083b525903cSJulien Thierry if (WARN_ON(desc->depth == 0)) 2084b525903cSJulien Thierry disable_nmi_nosync(irq); 2085b525903cSJulien Thierry 2086b525903cSJulien Thierry raw_spin_lock_irqsave(&desc->lock, flags); 2087b525903cSJulien Thierry 2088b525903cSJulien Thierry irq_nmi_teardown(desc); 2089b525903cSJulien Thierry devname = __cleanup_nmi(irq, desc); 2090b525903cSJulien Thierry 2091b525903cSJulien Thierry raw_spin_unlock_irqrestore(&desc->lock, flags); 2092b525903cSJulien Thierry 2093b525903cSJulien Thierry return devname; 2094b525903cSJulien Thierry } 2095b525903cSJulien Thierry 20961da177e4SLinus Torvalds /** 20973aa551c9SThomas Gleixner * request_threaded_irq - allocate an interrupt line 20981da177e4SLinus Torvalds * @irq: Interrupt line to allocate 20993aa551c9SThomas Gleixner * @handler: Function to be called when the IRQ occurs. 210061377ec1SJoel Savitz * Primary handler for threaded interrupts. 210161377ec1SJoel Savitz * If handler is NULL and thread_fn != NULL 210261377ec1SJoel Savitz * the default primary handler is installed. 21033aa551c9SThomas Gleixner * @thread_fn: Function called from the irq handler thread 21043aa551c9SThomas Gleixner * If NULL, no irq thread is created 21051da177e4SLinus Torvalds * @irqflags: Interrupt type flags 21061da177e4SLinus Torvalds * @devname: An ascii name for the claiming device 21071da177e4SLinus Torvalds * @dev_id: A cookie passed back to the handler function 21081da177e4SLinus Torvalds * 21091da177e4SLinus Torvalds * This call allocates interrupt resources and enables the 21101da177e4SLinus Torvalds * interrupt line and IRQ handling. From the point this 21111da177e4SLinus Torvalds * call is made your handler function may be invoked. Since 21121da177e4SLinus Torvalds * your handler function must clear any interrupt the board 21131da177e4SLinus Torvalds * raises, you must take care both to initialise your hardware 21141da177e4SLinus Torvalds * and to set up the interrupt handler in the right order. 21151da177e4SLinus Torvalds * 21163aa551c9SThomas Gleixner * If you want to set up a threaded irq handler for your device 21176d21af4fSJavi Merino * then you need to supply @handler and @thread_fn. @handler is 21183aa551c9SThomas Gleixner * still called in hard interrupt context and has to check 21193aa551c9SThomas Gleixner * whether the interrupt originates from the device. If yes it 21203aa551c9SThomas Gleixner * needs to disable the interrupt on the device and return 212139a2eddbSSteven Rostedt * IRQ_WAKE_THREAD which will wake up the handler thread and run 21223aa551c9SThomas Gleixner * @thread_fn. This split handler design is necessary to support 21233aa551c9SThomas Gleixner * shared interrupts. 21243aa551c9SThomas Gleixner * 21251da177e4SLinus Torvalds * Dev_id must be globally unique. Normally the address of the 21261da177e4SLinus Torvalds * device data structure is used as the cookie. Since the handler 21271da177e4SLinus Torvalds * receives this value it makes sense to use it. 21281da177e4SLinus Torvalds * 21291da177e4SLinus Torvalds * If your interrupt is shared you must pass a non NULL dev_id 21301da177e4SLinus Torvalds * as this is required when freeing the interrupt. 21311da177e4SLinus Torvalds * 21321da177e4SLinus Torvalds * Flags: 21331da177e4SLinus Torvalds * 21343cca53b0SThomas Gleixner * IRQF_SHARED Interrupt is shared 21350c5d1eb7SDavid Brownell * IRQF_TRIGGER_* Specify active edge(s) or level 213604c2721dSThomas Gleixner * IRQF_ONESHOT Run thread_fn with interrupt line masked 21371da177e4SLinus Torvalds */ 21383aa551c9SThomas Gleixner int request_threaded_irq(unsigned int irq, irq_handler_t handler, 21393aa551c9SThomas Gleixner irq_handler_t thread_fn, unsigned long irqflags, 21403aa551c9SThomas Gleixner const char *devname, void *dev_id) 21411da177e4SLinus Torvalds { 21421da177e4SLinus Torvalds struct irqaction *action; 214308678b08SYinghai Lu struct irq_desc *desc; 2144d3c60047SThomas Gleixner int retval; 21451da177e4SLinus Torvalds 2146e237a551SChen Fan if (irq == IRQ_NOTCONNECTED) 2147e237a551SChen Fan return -ENOTCONN; 2148e237a551SChen Fan 2149470c6623SDavid Brownell /* 21501da177e4SLinus Torvalds * Sanity-check: shared interrupts must pass in a real dev-ID, 21511da177e4SLinus Torvalds * otherwise we'll have trouble later trying to figure out 21521da177e4SLinus Torvalds * which interrupt is which (messes up the interrupt freeing 21531da177e4SLinus Torvalds * logic etc). 215417f48034SRafael J. Wysocki * 2155cbe16f35SBarry Song * Also shared interrupts do not go well with disabling auto enable. 2156cbe16f35SBarry Song * The sharing interrupt might request it while it's still disabled 2157cbe16f35SBarry Song * and then wait for interrupts forever. 2158cbe16f35SBarry Song * 215917f48034SRafael J. Wysocki * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and 216017f48034SRafael J. Wysocki * it cannot be set along with IRQF_NO_SUSPEND. 21611da177e4SLinus Torvalds */ 216217f48034SRafael J. Wysocki if (((irqflags & IRQF_SHARED) && !dev_id) || 2163cbe16f35SBarry Song ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) || 216417f48034SRafael J. Wysocki (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) || 216517f48034SRafael J. Wysocki ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND))) 21661da177e4SLinus Torvalds return -EINVAL; 21677d94f7caSYinghai Lu 2168cb5bc832SYinghai Lu desc = irq_to_desc(irq); 21697d94f7caSYinghai Lu if (!desc) 21701da177e4SLinus Torvalds return -EINVAL; 21717d94f7caSYinghai Lu 217231d9d9b6SMarc Zyngier if (!irq_settings_can_request(desc) || 217331d9d9b6SMarc Zyngier WARN_ON(irq_settings_is_per_cpu_devid(desc))) 21746550c775SThomas Gleixner return -EINVAL; 2175b25c340cSThomas Gleixner 2176b25c340cSThomas Gleixner if (!handler) { 2177b25c340cSThomas Gleixner if (!thread_fn) 21781da177e4SLinus Torvalds return -EINVAL; 2179b25c340cSThomas Gleixner handler = irq_default_primary_handler; 2180b25c340cSThomas Gleixner } 21811da177e4SLinus Torvalds 218245535732SThomas Gleixner action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 21831da177e4SLinus Torvalds if (!action) 21841da177e4SLinus Torvalds return -ENOMEM; 21851da177e4SLinus Torvalds 21861da177e4SLinus Torvalds action->handler = handler; 21873aa551c9SThomas Gleixner action->thread_fn = thread_fn; 21881da177e4SLinus Torvalds action->flags = irqflags; 21891da177e4SLinus Torvalds action->name = devname; 21901da177e4SLinus Torvalds action->dev_id = dev_id; 21911da177e4SLinus Torvalds 2192be45beb2SJon Hunter retval = irq_chip_pm_get(&desc->irq_data); 21934396f46cSShawn Lin if (retval < 0) { 21944396f46cSShawn Lin kfree(action); 2195be45beb2SJon Hunter return retval; 21964396f46cSShawn Lin } 2197be45beb2SJon Hunter 2198d3c60047SThomas Gleixner retval = __setup_irq(irq, desc, action); 219970aedd24SThomas Gleixner 22002a1d3ab8SThomas Gleixner if (retval) { 2201be45beb2SJon Hunter irq_chip_pm_put(&desc->irq_data); 22022a1d3ab8SThomas Gleixner kfree(action->secondary); 2203377bf1e4SAnton Vorontsov kfree(action); 22042a1d3ab8SThomas Gleixner } 2205377bf1e4SAnton Vorontsov 22066d83f94dSThomas Gleixner #ifdef CONFIG_DEBUG_SHIRQ_FIXME 22076ce51c43SLuis Henriques if (!retval && (irqflags & IRQF_SHARED)) { 2208a304e1b8SDavid Woodhouse /* 2209a304e1b8SDavid Woodhouse * It's a shared IRQ -- the driver ought to be prepared for it 2210a304e1b8SDavid Woodhouse * to happen immediately, so let's make sure.... 2211377bf1e4SAnton Vorontsov * We disable the irq to make sure that a 'real' IRQ doesn't 2212377bf1e4SAnton Vorontsov * run in parallel with our fake. 2213a304e1b8SDavid Woodhouse */ 2214a304e1b8SDavid Woodhouse unsigned long flags; 2215a304e1b8SDavid Woodhouse 2216377bf1e4SAnton Vorontsov disable_irq(irq); 2217a304e1b8SDavid Woodhouse local_irq_save(flags); 2218377bf1e4SAnton Vorontsov 2219a304e1b8SDavid Woodhouse handler(irq, dev_id); 2220377bf1e4SAnton Vorontsov 2221a304e1b8SDavid Woodhouse local_irq_restore(flags); 2222377bf1e4SAnton Vorontsov enable_irq(irq); 2223a304e1b8SDavid Woodhouse } 2224a304e1b8SDavid Woodhouse #endif 22251da177e4SLinus Torvalds return retval; 22261da177e4SLinus Torvalds } 22273aa551c9SThomas Gleixner EXPORT_SYMBOL(request_threaded_irq); 2228ae731f8dSMarc Zyngier 2229ae731f8dSMarc Zyngier /** 2230ae731f8dSMarc Zyngier * request_any_context_irq - allocate an interrupt line 2231ae731f8dSMarc Zyngier * @irq: Interrupt line to allocate 2232ae731f8dSMarc Zyngier * @handler: Function to be called when the IRQ occurs. 2233ae731f8dSMarc Zyngier * Threaded handler for threaded interrupts. 2234ae731f8dSMarc Zyngier * @flags: Interrupt type flags 2235ae731f8dSMarc Zyngier * @name: An ascii name for the claiming device 2236ae731f8dSMarc Zyngier * @dev_id: A cookie passed back to the handler function 2237ae731f8dSMarc Zyngier * 2238ae731f8dSMarc Zyngier * This call allocates interrupt resources and enables the 2239ae731f8dSMarc Zyngier * interrupt line and IRQ handling. It selects either a 2240ae731f8dSMarc Zyngier * hardirq or threaded handling method depending on the 2241ae731f8dSMarc Zyngier * context. 2242ae731f8dSMarc Zyngier * 2243ae731f8dSMarc Zyngier * On failure, it returns a negative value. On success, 2244ae731f8dSMarc Zyngier * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. 2245ae731f8dSMarc Zyngier */ 2246ae731f8dSMarc Zyngier int request_any_context_irq(unsigned int irq, irq_handler_t handler, 2247ae731f8dSMarc Zyngier unsigned long flags, const char *name, void *dev_id) 2248ae731f8dSMarc Zyngier { 2249e237a551SChen Fan struct irq_desc *desc; 2250ae731f8dSMarc Zyngier int ret; 2251ae731f8dSMarc Zyngier 2252e237a551SChen Fan if (irq == IRQ_NOTCONNECTED) 2253e237a551SChen Fan return -ENOTCONN; 2254e237a551SChen Fan 2255e237a551SChen Fan desc = irq_to_desc(irq); 2256ae731f8dSMarc Zyngier if (!desc) 2257ae731f8dSMarc Zyngier return -EINVAL; 2258ae731f8dSMarc Zyngier 22591ccb4e61SThomas Gleixner if (irq_settings_is_nested_thread(desc)) { 2260ae731f8dSMarc Zyngier ret = request_threaded_irq(irq, NULL, handler, 2261ae731f8dSMarc Zyngier flags, name, dev_id); 2262ae731f8dSMarc Zyngier return !ret ? IRQC_IS_NESTED : ret; 2263ae731f8dSMarc Zyngier } 2264ae731f8dSMarc Zyngier 2265ae731f8dSMarc Zyngier ret = request_irq(irq, handler, flags, name, dev_id); 2266ae731f8dSMarc Zyngier return !ret ? IRQC_IS_HARDIRQ : ret; 2267ae731f8dSMarc Zyngier } 2268ae731f8dSMarc Zyngier EXPORT_SYMBOL_GPL(request_any_context_irq); 226931d9d9b6SMarc Zyngier 2270b525903cSJulien Thierry /** 2271b525903cSJulien Thierry * request_nmi - allocate an interrupt line for NMI delivery 2272b525903cSJulien Thierry * @irq: Interrupt line to allocate 2273b525903cSJulien Thierry * @handler: Function to be called when the IRQ occurs. 2274b525903cSJulien Thierry * Threaded handler for threaded interrupts. 2275b525903cSJulien Thierry * @irqflags: Interrupt type flags 2276b525903cSJulien Thierry * @name: An ascii name for the claiming device 2277b525903cSJulien Thierry * @dev_id: A cookie passed back to the handler function 2278b525903cSJulien Thierry * 2279b525903cSJulien Thierry * This call allocates interrupt resources and enables the 2280b525903cSJulien Thierry * interrupt line and IRQ handling. It sets up the IRQ line 2281b525903cSJulien Thierry * to be handled as an NMI. 2282b525903cSJulien Thierry * 2283b525903cSJulien Thierry * An interrupt line delivering NMIs cannot be shared and IRQ handling 2284b525903cSJulien Thierry * cannot be threaded. 2285b525903cSJulien Thierry * 2286b525903cSJulien Thierry * Interrupt lines requested for NMI delivering must produce per cpu 2287b525903cSJulien Thierry * interrupts and have auto enabling setting disabled. 2288b525903cSJulien Thierry * 2289b525903cSJulien Thierry * Dev_id must be globally unique. Normally the address of the 2290b525903cSJulien Thierry * device data structure is used as the cookie. Since the handler 2291b525903cSJulien Thierry * receives this value it makes sense to use it. 2292b525903cSJulien Thierry * 2293b525903cSJulien Thierry * If the interrupt line cannot be used to deliver NMIs, function 2294b525903cSJulien Thierry * will fail and return a negative value. 2295b525903cSJulien Thierry */ 2296b525903cSJulien Thierry int request_nmi(unsigned int irq, irq_handler_t handler, 2297b525903cSJulien Thierry unsigned long irqflags, const char *name, void *dev_id) 2298b525903cSJulien Thierry { 2299b525903cSJulien Thierry struct irqaction *action; 2300b525903cSJulien Thierry struct irq_desc *desc; 2301b525903cSJulien Thierry unsigned long flags; 2302b525903cSJulien Thierry int retval; 2303b525903cSJulien Thierry 2304b525903cSJulien Thierry if (irq == IRQ_NOTCONNECTED) 2305b525903cSJulien Thierry return -ENOTCONN; 2306b525903cSJulien Thierry 2307b525903cSJulien Thierry /* NMI cannot be shared, used for Polling */ 2308b525903cSJulien Thierry if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL)) 2309b525903cSJulien Thierry return -EINVAL; 2310b525903cSJulien Thierry 2311b525903cSJulien Thierry if (!(irqflags & IRQF_PERCPU)) 2312b525903cSJulien Thierry return -EINVAL; 2313b525903cSJulien Thierry 2314b525903cSJulien Thierry if (!handler) 2315b525903cSJulien Thierry return -EINVAL; 2316b525903cSJulien Thierry 2317b525903cSJulien Thierry desc = irq_to_desc(irq); 2318b525903cSJulien Thierry 2319cbe16f35SBarry Song if (!desc || (irq_settings_can_autoenable(desc) && 2320cbe16f35SBarry Song !(irqflags & IRQF_NO_AUTOEN)) || 2321b525903cSJulien Thierry !irq_settings_can_request(desc) || 2322b525903cSJulien Thierry WARN_ON(irq_settings_is_per_cpu_devid(desc)) || 2323b525903cSJulien Thierry !irq_supports_nmi(desc)) 2324b525903cSJulien Thierry return -EINVAL; 2325b525903cSJulien Thierry 2326b525903cSJulien Thierry action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 2327b525903cSJulien Thierry if (!action) 2328b525903cSJulien Thierry return -ENOMEM; 2329b525903cSJulien Thierry 2330b525903cSJulien Thierry action->handler = handler; 2331b525903cSJulien Thierry action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING; 2332b525903cSJulien Thierry action->name = name; 2333b525903cSJulien Thierry action->dev_id = dev_id; 2334b525903cSJulien Thierry 2335b525903cSJulien Thierry retval = irq_chip_pm_get(&desc->irq_data); 2336b525903cSJulien Thierry if (retval < 0) 2337b525903cSJulien Thierry goto err_out; 2338b525903cSJulien Thierry 2339b525903cSJulien Thierry retval = __setup_irq(irq, desc, action); 2340b525903cSJulien Thierry if (retval) 2341b525903cSJulien Thierry goto err_irq_setup; 2342b525903cSJulien Thierry 2343b525903cSJulien Thierry raw_spin_lock_irqsave(&desc->lock, flags); 2344b525903cSJulien Thierry 2345b525903cSJulien Thierry /* Setup NMI state */ 2346b525903cSJulien Thierry desc->istate |= IRQS_NMI; 2347b525903cSJulien Thierry retval = irq_nmi_setup(desc); 2348b525903cSJulien Thierry if (retval) { 2349b525903cSJulien Thierry __cleanup_nmi(irq, desc); 2350b525903cSJulien Thierry raw_spin_unlock_irqrestore(&desc->lock, flags); 2351b525903cSJulien Thierry return -EINVAL; 2352b525903cSJulien Thierry } 2353b525903cSJulien Thierry 2354b525903cSJulien Thierry raw_spin_unlock_irqrestore(&desc->lock, flags); 2355b525903cSJulien Thierry 2356b525903cSJulien Thierry return 0; 2357b525903cSJulien Thierry 2358b525903cSJulien Thierry err_irq_setup: 2359b525903cSJulien Thierry irq_chip_pm_put(&desc->irq_data); 2360b525903cSJulien Thierry err_out: 2361b525903cSJulien Thierry kfree(action); 2362b525903cSJulien Thierry 2363b525903cSJulien Thierry return retval; 2364b525903cSJulien Thierry } 2365b525903cSJulien Thierry 23661e7c5fd2SMarc Zyngier void enable_percpu_irq(unsigned int irq, unsigned int type) 236731d9d9b6SMarc Zyngier { 236831d9d9b6SMarc Zyngier unsigned int cpu = smp_processor_id(); 236931d9d9b6SMarc Zyngier unsigned long flags; 237031d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 237131d9d9b6SMarc Zyngier 237231d9d9b6SMarc Zyngier if (!desc) 237331d9d9b6SMarc Zyngier return; 237431d9d9b6SMarc Zyngier 2375f35ad083SMarc Zyngier /* 2376f35ad083SMarc Zyngier * If the trigger type is not specified by the caller, then 2377f35ad083SMarc Zyngier * use the default for this interrupt. 2378f35ad083SMarc Zyngier */ 23791e7c5fd2SMarc Zyngier type &= IRQ_TYPE_SENSE_MASK; 2380f35ad083SMarc Zyngier if (type == IRQ_TYPE_NONE) 2381f35ad083SMarc Zyngier type = irqd_get_trigger_type(&desc->irq_data); 2382f35ad083SMarc Zyngier 23831e7c5fd2SMarc Zyngier if (type != IRQ_TYPE_NONE) { 23841e7c5fd2SMarc Zyngier int ret; 23851e7c5fd2SMarc Zyngier 2386a1ff541aSJiang Liu ret = __irq_set_trigger(desc, type); 23871e7c5fd2SMarc Zyngier 23881e7c5fd2SMarc Zyngier if (ret) { 238932cffddeSThomas Gleixner WARN(1, "failed to set type for IRQ%d\n", irq); 23901e7c5fd2SMarc Zyngier goto out; 23911e7c5fd2SMarc Zyngier } 23921e7c5fd2SMarc Zyngier } 23931e7c5fd2SMarc Zyngier 239431d9d9b6SMarc Zyngier irq_percpu_enable(desc, cpu); 23951e7c5fd2SMarc Zyngier out: 239631d9d9b6SMarc Zyngier irq_put_desc_unlock(desc, flags); 239731d9d9b6SMarc Zyngier } 239836a5df85SChris Metcalf EXPORT_SYMBOL_GPL(enable_percpu_irq); 239931d9d9b6SMarc Zyngier 24004b078c3fSJulien Thierry void enable_percpu_nmi(unsigned int irq, unsigned int type) 24014b078c3fSJulien Thierry { 24024b078c3fSJulien Thierry enable_percpu_irq(irq, type); 24034b078c3fSJulien Thierry } 24044b078c3fSJulien Thierry 2405f0cb3220SThomas Petazzoni /** 2406f0cb3220SThomas Petazzoni * irq_percpu_is_enabled - Check whether the per cpu irq is enabled 2407f0cb3220SThomas Petazzoni * @irq: Linux irq number to check for 2408f0cb3220SThomas Petazzoni * 2409f0cb3220SThomas Petazzoni * Must be called from a non migratable context. Returns the enable 2410f0cb3220SThomas Petazzoni * state of a per cpu interrupt on the current cpu. 2411f0cb3220SThomas Petazzoni */ 2412f0cb3220SThomas Petazzoni bool irq_percpu_is_enabled(unsigned int irq) 2413f0cb3220SThomas Petazzoni { 2414f0cb3220SThomas Petazzoni unsigned int cpu = smp_processor_id(); 2415f0cb3220SThomas Petazzoni struct irq_desc *desc; 2416f0cb3220SThomas Petazzoni unsigned long flags; 2417f0cb3220SThomas Petazzoni bool is_enabled; 2418f0cb3220SThomas Petazzoni 2419f0cb3220SThomas Petazzoni desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 2420f0cb3220SThomas Petazzoni if (!desc) 2421f0cb3220SThomas Petazzoni return false; 2422f0cb3220SThomas Petazzoni 2423f0cb3220SThomas Petazzoni is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); 2424f0cb3220SThomas Petazzoni irq_put_desc_unlock(desc, flags); 2425f0cb3220SThomas Petazzoni 2426f0cb3220SThomas Petazzoni return is_enabled; 2427f0cb3220SThomas Petazzoni } 2428f0cb3220SThomas Petazzoni EXPORT_SYMBOL_GPL(irq_percpu_is_enabled); 2429f0cb3220SThomas Petazzoni 243031d9d9b6SMarc Zyngier void disable_percpu_irq(unsigned int irq) 243131d9d9b6SMarc Zyngier { 243231d9d9b6SMarc Zyngier unsigned int cpu = smp_processor_id(); 243331d9d9b6SMarc Zyngier unsigned long flags; 243431d9d9b6SMarc Zyngier struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 243531d9d9b6SMarc Zyngier 243631d9d9b6SMarc Zyngier if (!desc) 243731d9d9b6SMarc Zyngier return; 243831d9d9b6SMarc Zyngier 243931d9d9b6SMarc Zyngier irq_percpu_disable(desc, cpu); 244031d9d9b6SMarc Zyngier irq_put_desc_unlock(desc, flags); 244131d9d9b6SMarc Zyngier } 244236a5df85SChris Metcalf EXPORT_SYMBOL_GPL(disable_percpu_irq); 244331d9d9b6SMarc Zyngier 24444b078c3fSJulien Thierry void disable_percpu_nmi(unsigned int irq) 24454b078c3fSJulien Thierry { 24464b078c3fSJulien Thierry disable_percpu_irq(irq); 24474b078c3fSJulien Thierry } 24484b078c3fSJulien Thierry 244931d9d9b6SMarc Zyngier /* 245031d9d9b6SMarc Zyngier * Internal function to unregister a percpu irqaction. 245131d9d9b6SMarc Zyngier */ 245231d9d9b6SMarc Zyngier static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) 245331d9d9b6SMarc Zyngier { 245431d9d9b6SMarc Zyngier struct irq_desc *desc = irq_to_desc(irq); 245531d9d9b6SMarc Zyngier struct irqaction *action; 245631d9d9b6SMarc Zyngier unsigned long flags; 245731d9d9b6SMarc Zyngier 245831d9d9b6SMarc Zyngier WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 245931d9d9b6SMarc Zyngier 246031d9d9b6SMarc Zyngier if (!desc) 246131d9d9b6SMarc Zyngier return NULL; 246231d9d9b6SMarc Zyngier 246331d9d9b6SMarc Zyngier raw_spin_lock_irqsave(&desc->lock, flags); 246431d9d9b6SMarc Zyngier 246531d9d9b6SMarc Zyngier action = desc->action; 246631d9d9b6SMarc Zyngier if (!action || action->percpu_dev_id != dev_id) { 246731d9d9b6SMarc Zyngier WARN(1, "Trying to free already-free IRQ %d\n", irq); 246831d9d9b6SMarc Zyngier goto bad; 246931d9d9b6SMarc Zyngier } 247031d9d9b6SMarc Zyngier 247131d9d9b6SMarc Zyngier if (!cpumask_empty(desc->percpu_enabled)) { 247231d9d9b6SMarc Zyngier WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", 247331d9d9b6SMarc Zyngier irq, cpumask_first(desc->percpu_enabled)); 247431d9d9b6SMarc Zyngier goto bad; 247531d9d9b6SMarc Zyngier } 247631d9d9b6SMarc Zyngier 247731d9d9b6SMarc Zyngier /* Found it - now remove it from the list of entries: */ 247831d9d9b6SMarc Zyngier desc->action = NULL; 247931d9d9b6SMarc Zyngier 24804b078c3fSJulien Thierry desc->istate &= ~IRQS_NMI; 24814b078c3fSJulien Thierry 248231d9d9b6SMarc Zyngier raw_spin_unlock_irqrestore(&desc->lock, flags); 248331d9d9b6SMarc Zyngier 248431d9d9b6SMarc Zyngier unregister_handler_proc(irq, action); 248531d9d9b6SMarc Zyngier 2486be45beb2SJon Hunter irq_chip_pm_put(&desc->irq_data); 248731d9d9b6SMarc Zyngier module_put(desc->owner); 248831d9d9b6SMarc Zyngier return action; 248931d9d9b6SMarc Zyngier 249031d9d9b6SMarc Zyngier bad: 249131d9d9b6SMarc Zyngier raw_spin_unlock_irqrestore(&desc->lock, flags); 249231d9d9b6SMarc Zyngier return NULL; 249331d9d9b6SMarc Zyngier } 249431d9d9b6SMarc Zyngier 249531d9d9b6SMarc Zyngier /** 249631d9d9b6SMarc Zyngier * remove_percpu_irq - free a per-cpu interrupt 249731d9d9b6SMarc Zyngier * @irq: Interrupt line to free 249831d9d9b6SMarc Zyngier * @act: irqaction for the interrupt 249931d9d9b6SMarc Zyngier * 250031d9d9b6SMarc Zyngier * Used to remove interrupts statically setup by the early boot process. 250131d9d9b6SMarc Zyngier */ 250231d9d9b6SMarc Zyngier void remove_percpu_irq(unsigned int irq, struct irqaction *act) 250331d9d9b6SMarc Zyngier { 250431d9d9b6SMarc Zyngier struct irq_desc *desc = irq_to_desc(irq); 250531d9d9b6SMarc Zyngier 250631d9d9b6SMarc Zyngier if (desc && irq_settings_is_per_cpu_devid(desc)) 250731d9d9b6SMarc Zyngier __free_percpu_irq(irq, act->percpu_dev_id); 250831d9d9b6SMarc Zyngier } 250931d9d9b6SMarc Zyngier 251031d9d9b6SMarc Zyngier /** 251131d9d9b6SMarc Zyngier * free_percpu_irq - free an interrupt allocated with request_percpu_irq 251231d9d9b6SMarc Zyngier * @irq: Interrupt line to free 251331d9d9b6SMarc Zyngier * @dev_id: Device identity to free 251431d9d9b6SMarc Zyngier * 251531d9d9b6SMarc Zyngier * Remove a percpu interrupt handler. The handler is removed, but 251631d9d9b6SMarc Zyngier * the interrupt line is not disabled. This must be done on each 251731d9d9b6SMarc Zyngier * CPU before calling this function. The function does not return 251831d9d9b6SMarc Zyngier * until any executing interrupts for this IRQ have completed. 251931d9d9b6SMarc Zyngier * 252031d9d9b6SMarc Zyngier * This function must not be called from interrupt context. 252131d9d9b6SMarc Zyngier */ 252231d9d9b6SMarc Zyngier void free_percpu_irq(unsigned int irq, void __percpu *dev_id) 252331d9d9b6SMarc Zyngier { 252431d9d9b6SMarc Zyngier struct irq_desc *desc = irq_to_desc(irq); 252531d9d9b6SMarc Zyngier 252631d9d9b6SMarc Zyngier if (!desc || !irq_settings_is_per_cpu_devid(desc)) 252731d9d9b6SMarc Zyngier return; 252831d9d9b6SMarc Zyngier 252931d9d9b6SMarc Zyngier chip_bus_lock(desc); 253031d9d9b6SMarc Zyngier kfree(__free_percpu_irq(irq, dev_id)); 253131d9d9b6SMarc Zyngier chip_bus_sync_unlock(desc); 253231d9d9b6SMarc Zyngier } 2533aec2e2adSMaxime Ripard EXPORT_SYMBOL_GPL(free_percpu_irq); 253431d9d9b6SMarc Zyngier 25354b078c3fSJulien Thierry void free_percpu_nmi(unsigned int irq, void __percpu *dev_id) 25364b078c3fSJulien Thierry { 25374b078c3fSJulien Thierry struct irq_desc *desc = irq_to_desc(irq); 25384b078c3fSJulien Thierry 25394b078c3fSJulien Thierry if (!desc || !irq_settings_is_per_cpu_devid(desc)) 25404b078c3fSJulien Thierry return; 25414b078c3fSJulien Thierry 25424b078c3fSJulien Thierry if (WARN_ON(!(desc->istate & IRQS_NMI))) 25434b078c3fSJulien Thierry return; 25444b078c3fSJulien Thierry 25454b078c3fSJulien Thierry kfree(__free_percpu_irq(irq, dev_id)); 25464b078c3fSJulien Thierry } 25474b078c3fSJulien Thierry 254831d9d9b6SMarc Zyngier /** 254931d9d9b6SMarc Zyngier * setup_percpu_irq - setup a per-cpu interrupt 255031d9d9b6SMarc Zyngier * @irq: Interrupt line to setup 255131d9d9b6SMarc Zyngier * @act: irqaction for the interrupt 255231d9d9b6SMarc Zyngier * 255331d9d9b6SMarc Zyngier * Used to statically setup per-cpu interrupts in the early boot process. 255431d9d9b6SMarc Zyngier */ 255531d9d9b6SMarc Zyngier int setup_percpu_irq(unsigned int irq, struct irqaction *act) 255631d9d9b6SMarc Zyngier { 255731d9d9b6SMarc Zyngier struct irq_desc *desc = irq_to_desc(irq); 255831d9d9b6SMarc Zyngier int retval; 255931d9d9b6SMarc Zyngier 256031d9d9b6SMarc Zyngier if (!desc || !irq_settings_is_per_cpu_devid(desc)) 256131d9d9b6SMarc Zyngier return -EINVAL; 2562be45beb2SJon Hunter 2563be45beb2SJon Hunter retval = irq_chip_pm_get(&desc->irq_data); 2564be45beb2SJon Hunter if (retval < 0) 2565be45beb2SJon Hunter return retval; 2566be45beb2SJon Hunter 256731d9d9b6SMarc Zyngier retval = __setup_irq(irq, desc, act); 256831d9d9b6SMarc Zyngier 2569be45beb2SJon Hunter if (retval) 2570be45beb2SJon Hunter irq_chip_pm_put(&desc->irq_data); 2571be45beb2SJon Hunter 257231d9d9b6SMarc Zyngier return retval; 257331d9d9b6SMarc Zyngier } 257431d9d9b6SMarc Zyngier 257531d9d9b6SMarc Zyngier /** 2576c80081b9SDaniel Lezcano * __request_percpu_irq - allocate a percpu interrupt line 257731d9d9b6SMarc Zyngier * @irq: Interrupt line to allocate 257831d9d9b6SMarc Zyngier * @handler: Function to be called when the IRQ occurs. 2579c80081b9SDaniel Lezcano * @flags: Interrupt type flags (IRQF_TIMER only) 258031d9d9b6SMarc Zyngier * @devname: An ascii name for the claiming device 258131d9d9b6SMarc Zyngier * @dev_id: A percpu cookie passed back to the handler function 258231d9d9b6SMarc Zyngier * 2583a1b7febdSMaxime Ripard * This call allocates interrupt resources and enables the 2584a1b7febdSMaxime Ripard * interrupt on the local CPU. If the interrupt is supposed to be 2585a1b7febdSMaxime Ripard * enabled on other CPUs, it has to be done on each CPU using 2586a1b7febdSMaxime Ripard * enable_percpu_irq(). 258731d9d9b6SMarc Zyngier * 258831d9d9b6SMarc Zyngier * Dev_id must be globally unique. It is a per-cpu variable, and 258931d9d9b6SMarc Zyngier * the handler gets called with the interrupted CPU's instance of 259031d9d9b6SMarc Zyngier * that variable. 259131d9d9b6SMarc Zyngier */ 2592c80081b9SDaniel Lezcano int __request_percpu_irq(unsigned int irq, irq_handler_t handler, 2593c80081b9SDaniel Lezcano unsigned long flags, const char *devname, 2594c80081b9SDaniel Lezcano void __percpu *dev_id) 259531d9d9b6SMarc Zyngier { 259631d9d9b6SMarc Zyngier struct irqaction *action; 259731d9d9b6SMarc Zyngier struct irq_desc *desc; 259831d9d9b6SMarc Zyngier int retval; 259931d9d9b6SMarc Zyngier 260031d9d9b6SMarc Zyngier if (!dev_id) 260131d9d9b6SMarc Zyngier return -EINVAL; 260231d9d9b6SMarc Zyngier 260331d9d9b6SMarc Zyngier desc = irq_to_desc(irq); 260431d9d9b6SMarc Zyngier if (!desc || !irq_settings_can_request(desc) || 260531d9d9b6SMarc Zyngier !irq_settings_is_per_cpu_devid(desc)) 260631d9d9b6SMarc Zyngier return -EINVAL; 260731d9d9b6SMarc Zyngier 2608c80081b9SDaniel Lezcano if (flags && flags != IRQF_TIMER) 2609c80081b9SDaniel Lezcano return -EINVAL; 2610c80081b9SDaniel Lezcano 261131d9d9b6SMarc Zyngier action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 261231d9d9b6SMarc Zyngier if (!action) 261331d9d9b6SMarc Zyngier return -ENOMEM; 261431d9d9b6SMarc Zyngier 261531d9d9b6SMarc Zyngier action->handler = handler; 2616c80081b9SDaniel Lezcano action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND; 261731d9d9b6SMarc Zyngier action->name = devname; 261831d9d9b6SMarc Zyngier action->percpu_dev_id = dev_id; 261931d9d9b6SMarc Zyngier 2620be45beb2SJon Hunter retval = irq_chip_pm_get(&desc->irq_data); 26214396f46cSShawn Lin if (retval < 0) { 26224396f46cSShawn Lin kfree(action); 2623be45beb2SJon Hunter return retval; 26244396f46cSShawn Lin } 2625be45beb2SJon Hunter 262631d9d9b6SMarc Zyngier retval = __setup_irq(irq, desc, action); 262731d9d9b6SMarc Zyngier 2628be45beb2SJon Hunter if (retval) { 2629be45beb2SJon Hunter irq_chip_pm_put(&desc->irq_data); 263031d9d9b6SMarc Zyngier kfree(action); 2631be45beb2SJon Hunter } 263231d9d9b6SMarc Zyngier 263331d9d9b6SMarc Zyngier return retval; 263431d9d9b6SMarc Zyngier } 2635c80081b9SDaniel Lezcano EXPORT_SYMBOL_GPL(__request_percpu_irq); 26361b7047edSMarc Zyngier 26371b7047edSMarc Zyngier /** 26384b078c3fSJulien Thierry * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery 26394b078c3fSJulien Thierry * @irq: Interrupt line to allocate 26404b078c3fSJulien Thierry * @handler: Function to be called when the IRQ occurs. 26414b078c3fSJulien Thierry * @name: An ascii name for the claiming device 26424b078c3fSJulien Thierry * @dev_id: A percpu cookie passed back to the handler function 26434b078c3fSJulien Thierry * 26444b078c3fSJulien Thierry * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs 2645a5186694SJulien Thierry * have to be setup on each CPU by calling prepare_percpu_nmi() before 2646a5186694SJulien Thierry * being enabled on the same CPU by using enable_percpu_nmi(). 26474b078c3fSJulien Thierry * 26484b078c3fSJulien Thierry * Dev_id must be globally unique. It is a per-cpu variable, and 26494b078c3fSJulien Thierry * the handler gets called with the interrupted CPU's instance of 26504b078c3fSJulien Thierry * that variable. 26514b078c3fSJulien Thierry * 26524b078c3fSJulien Thierry * Interrupt lines requested for NMI delivering should have auto enabling 26534b078c3fSJulien Thierry * setting disabled. 26544b078c3fSJulien Thierry * 26554b078c3fSJulien Thierry * If the interrupt line cannot be used to deliver NMIs, function 26564b078c3fSJulien Thierry * will fail returning a negative value. 26574b078c3fSJulien Thierry */ 26584b078c3fSJulien Thierry int request_percpu_nmi(unsigned int irq, irq_handler_t handler, 26594b078c3fSJulien Thierry const char *name, void __percpu *dev_id) 26604b078c3fSJulien Thierry { 26614b078c3fSJulien Thierry struct irqaction *action; 26624b078c3fSJulien Thierry struct irq_desc *desc; 26634b078c3fSJulien Thierry unsigned long flags; 26644b078c3fSJulien Thierry int retval; 26654b078c3fSJulien Thierry 26664b078c3fSJulien Thierry if (!handler) 26674b078c3fSJulien Thierry return -EINVAL; 26684b078c3fSJulien Thierry 26694b078c3fSJulien Thierry desc = irq_to_desc(irq); 26704b078c3fSJulien Thierry 26714b078c3fSJulien Thierry if (!desc || !irq_settings_can_request(desc) || 26724b078c3fSJulien Thierry !irq_settings_is_per_cpu_devid(desc) || 26734b078c3fSJulien Thierry irq_settings_can_autoenable(desc) || 26744b078c3fSJulien Thierry !irq_supports_nmi(desc)) 26754b078c3fSJulien Thierry return -EINVAL; 26764b078c3fSJulien Thierry 26774b078c3fSJulien Thierry /* The line cannot already be NMI */ 26784b078c3fSJulien Thierry if (desc->istate & IRQS_NMI) 26794b078c3fSJulien Thierry return -EINVAL; 26804b078c3fSJulien Thierry 26814b078c3fSJulien Thierry action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 26824b078c3fSJulien Thierry if (!action) 26834b078c3fSJulien Thierry return -ENOMEM; 26844b078c3fSJulien Thierry 26854b078c3fSJulien Thierry action->handler = handler; 26864b078c3fSJulien Thierry action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD 26874b078c3fSJulien Thierry | IRQF_NOBALANCING; 26884b078c3fSJulien Thierry action->name = name; 26894b078c3fSJulien Thierry action->percpu_dev_id = dev_id; 26904b078c3fSJulien Thierry 26914b078c3fSJulien Thierry retval = irq_chip_pm_get(&desc->irq_data); 26924b078c3fSJulien Thierry if (retval < 0) 26934b078c3fSJulien Thierry goto err_out; 26944b078c3fSJulien Thierry 26954b078c3fSJulien Thierry retval = __setup_irq(irq, desc, action); 26964b078c3fSJulien Thierry if (retval) 26974b078c3fSJulien Thierry goto err_irq_setup; 26984b078c3fSJulien Thierry 26994b078c3fSJulien Thierry raw_spin_lock_irqsave(&desc->lock, flags); 27004b078c3fSJulien Thierry desc->istate |= IRQS_NMI; 27014b078c3fSJulien Thierry raw_spin_unlock_irqrestore(&desc->lock, flags); 27024b078c3fSJulien Thierry 27034b078c3fSJulien Thierry return 0; 27044b078c3fSJulien Thierry 27054b078c3fSJulien Thierry err_irq_setup: 27064b078c3fSJulien Thierry irq_chip_pm_put(&desc->irq_data); 27074b078c3fSJulien Thierry err_out: 27084b078c3fSJulien Thierry kfree(action); 27094b078c3fSJulien Thierry 27104b078c3fSJulien Thierry return retval; 27114b078c3fSJulien Thierry } 27124b078c3fSJulien Thierry 27134b078c3fSJulien Thierry /** 27144b078c3fSJulien Thierry * prepare_percpu_nmi - performs CPU local setup for NMI delivery 27154b078c3fSJulien Thierry * @irq: Interrupt line to prepare for NMI delivery 27164b078c3fSJulien Thierry * 27174b078c3fSJulien Thierry * This call prepares an interrupt line to deliver NMI on the current CPU, 27184b078c3fSJulien Thierry * before that interrupt line gets enabled with enable_percpu_nmi(). 27194b078c3fSJulien Thierry * 27204b078c3fSJulien Thierry * As a CPU local operation, this should be called from non-preemptible 27214b078c3fSJulien Thierry * context. 27224b078c3fSJulien Thierry * 27234b078c3fSJulien Thierry * If the interrupt line cannot be used to deliver NMIs, function 27244b078c3fSJulien Thierry * will fail returning a negative value. 27254b078c3fSJulien Thierry */ 27264b078c3fSJulien Thierry int prepare_percpu_nmi(unsigned int irq) 27274b078c3fSJulien Thierry { 27284b078c3fSJulien Thierry unsigned long flags; 27294b078c3fSJulien Thierry struct irq_desc *desc; 27304b078c3fSJulien Thierry int ret = 0; 27314b078c3fSJulien Thierry 27324b078c3fSJulien Thierry WARN_ON(preemptible()); 27334b078c3fSJulien Thierry 27344b078c3fSJulien Thierry desc = irq_get_desc_lock(irq, &flags, 27354b078c3fSJulien Thierry IRQ_GET_DESC_CHECK_PERCPU); 27364b078c3fSJulien Thierry if (!desc) 27374b078c3fSJulien Thierry return -EINVAL; 27384b078c3fSJulien Thierry 27394b078c3fSJulien Thierry if (WARN(!(desc->istate & IRQS_NMI), 27404b078c3fSJulien Thierry KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n", 27414b078c3fSJulien Thierry irq)) { 27424b078c3fSJulien Thierry ret = -EINVAL; 27434b078c3fSJulien Thierry goto out; 27444b078c3fSJulien Thierry } 27454b078c3fSJulien Thierry 27464b078c3fSJulien Thierry ret = irq_nmi_setup(desc); 27474b078c3fSJulien Thierry if (ret) { 27484b078c3fSJulien Thierry pr_err("Failed to setup NMI delivery: irq %u\n", irq); 27494b078c3fSJulien Thierry goto out; 27504b078c3fSJulien Thierry } 27514b078c3fSJulien Thierry 27524b078c3fSJulien Thierry out: 27534b078c3fSJulien Thierry irq_put_desc_unlock(desc, flags); 27544b078c3fSJulien Thierry return ret; 27554b078c3fSJulien Thierry } 27564b078c3fSJulien Thierry 27574b078c3fSJulien Thierry /** 27584b078c3fSJulien Thierry * teardown_percpu_nmi - undoes NMI setup of IRQ line 27594b078c3fSJulien Thierry * @irq: Interrupt line from which CPU local NMI configuration should be 27604b078c3fSJulien Thierry * removed 27614b078c3fSJulien Thierry * 27624b078c3fSJulien Thierry * This call undoes the setup done by prepare_percpu_nmi(). 27634b078c3fSJulien Thierry * 27644b078c3fSJulien Thierry * IRQ line should not be enabled for the current CPU. 27654b078c3fSJulien Thierry * 27664b078c3fSJulien Thierry * As a CPU local operation, this should be called from non-preemptible 27674b078c3fSJulien Thierry * context. 27684b078c3fSJulien Thierry */ 27694b078c3fSJulien Thierry void teardown_percpu_nmi(unsigned int irq) 27704b078c3fSJulien Thierry { 27714b078c3fSJulien Thierry unsigned long flags; 27724b078c3fSJulien Thierry struct irq_desc *desc; 27734b078c3fSJulien Thierry 27744b078c3fSJulien Thierry WARN_ON(preemptible()); 27754b078c3fSJulien Thierry 27764b078c3fSJulien Thierry desc = irq_get_desc_lock(irq, &flags, 27774b078c3fSJulien Thierry IRQ_GET_DESC_CHECK_PERCPU); 27784b078c3fSJulien Thierry if (!desc) 27794b078c3fSJulien Thierry return; 27804b078c3fSJulien Thierry 27814b078c3fSJulien Thierry if (WARN_ON(!(desc->istate & IRQS_NMI))) 27824b078c3fSJulien Thierry goto out; 27834b078c3fSJulien Thierry 27844b078c3fSJulien Thierry irq_nmi_teardown(desc); 27854b078c3fSJulien Thierry out: 27864b078c3fSJulien Thierry irq_put_desc_unlock(desc, flags); 27874b078c3fSJulien Thierry } 27884b078c3fSJulien Thierry 278962e04686SThomas Gleixner int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which, 279062e04686SThomas Gleixner bool *state) 279162e04686SThomas Gleixner { 279262e04686SThomas Gleixner struct irq_chip *chip; 279362e04686SThomas Gleixner int err = -EINVAL; 279462e04686SThomas Gleixner 279562e04686SThomas Gleixner do { 279662e04686SThomas Gleixner chip = irq_data_get_irq_chip(data); 27971d0326f3SMarek Vasut if (WARN_ON_ONCE(!chip)) 27981d0326f3SMarek Vasut return -ENODEV; 279962e04686SThomas Gleixner if (chip->irq_get_irqchip_state) 280062e04686SThomas Gleixner break; 280162e04686SThomas Gleixner #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 280262e04686SThomas Gleixner data = data->parent_data; 280362e04686SThomas Gleixner #else 280462e04686SThomas Gleixner data = NULL; 280562e04686SThomas Gleixner #endif 280662e04686SThomas Gleixner } while (data); 280762e04686SThomas Gleixner 280862e04686SThomas Gleixner if (data) 280962e04686SThomas Gleixner err = chip->irq_get_irqchip_state(data, which, state); 281062e04686SThomas Gleixner return err; 281162e04686SThomas Gleixner } 281262e04686SThomas Gleixner 28134b078c3fSJulien Thierry /** 28141b7047edSMarc Zyngier * irq_get_irqchip_state - returns the irqchip state of a interrupt. 28151b7047edSMarc Zyngier * @irq: Interrupt line that is forwarded to a VM 28161b7047edSMarc Zyngier * @which: One of IRQCHIP_STATE_* the caller wants to know about 28175c982c58SKrzysztof Kozlowski * @state: a pointer to a boolean where the state is to be stored 28181b7047edSMarc Zyngier * 28191b7047edSMarc Zyngier * This call snapshots the internal irqchip state of an 28201b7047edSMarc Zyngier * interrupt, returning into @state the bit corresponding to 28211b7047edSMarc Zyngier * stage @which 28221b7047edSMarc Zyngier * 28231b7047edSMarc Zyngier * This function should be called with preemption disabled if the 28241b7047edSMarc Zyngier * interrupt controller has per-cpu registers. 28251b7047edSMarc Zyngier */ 28261b7047edSMarc Zyngier int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, 28271b7047edSMarc Zyngier bool *state) 28281b7047edSMarc Zyngier { 28291b7047edSMarc Zyngier struct irq_desc *desc; 28301b7047edSMarc Zyngier struct irq_data *data; 28311b7047edSMarc Zyngier unsigned long flags; 28321b7047edSMarc Zyngier int err = -EINVAL; 28331b7047edSMarc Zyngier 28341b7047edSMarc Zyngier desc = irq_get_desc_buslock(irq, &flags, 0); 28351b7047edSMarc Zyngier if (!desc) 28361b7047edSMarc Zyngier return err; 28371b7047edSMarc Zyngier 28381b7047edSMarc Zyngier data = irq_desc_get_irq_data(desc); 28391b7047edSMarc Zyngier 284062e04686SThomas Gleixner err = __irq_get_irqchip_state(data, which, state); 28411b7047edSMarc Zyngier 28421b7047edSMarc Zyngier irq_put_desc_busunlock(desc, flags); 28431b7047edSMarc Zyngier return err; 28441b7047edSMarc Zyngier } 28451ee4fb3eSBjorn Andersson EXPORT_SYMBOL_GPL(irq_get_irqchip_state); 28461b7047edSMarc Zyngier 28471b7047edSMarc Zyngier /** 28481b7047edSMarc Zyngier * irq_set_irqchip_state - set the state of a forwarded interrupt. 28491b7047edSMarc Zyngier * @irq: Interrupt line that is forwarded to a VM 28501b7047edSMarc Zyngier * @which: State to be restored (one of IRQCHIP_STATE_*) 28511b7047edSMarc Zyngier * @val: Value corresponding to @which 28521b7047edSMarc Zyngier * 28531b7047edSMarc Zyngier * This call sets the internal irqchip state of an interrupt, 28541b7047edSMarc Zyngier * depending on the value of @which. 28551b7047edSMarc Zyngier * 2856e1a6af4bSJosh Cartwright * This function should be called with migration disabled if the 28571b7047edSMarc Zyngier * interrupt controller has per-cpu registers. 28581b7047edSMarc Zyngier */ 28591b7047edSMarc Zyngier int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, 28601b7047edSMarc Zyngier bool val) 28611b7047edSMarc Zyngier { 28621b7047edSMarc Zyngier struct irq_desc *desc; 28631b7047edSMarc Zyngier struct irq_data *data; 28641b7047edSMarc Zyngier struct irq_chip *chip; 28651b7047edSMarc Zyngier unsigned long flags; 28661b7047edSMarc Zyngier int err = -EINVAL; 28671b7047edSMarc Zyngier 28681b7047edSMarc Zyngier desc = irq_get_desc_buslock(irq, &flags, 0); 28691b7047edSMarc Zyngier if (!desc) 28701b7047edSMarc Zyngier return err; 28711b7047edSMarc Zyngier 28721b7047edSMarc Zyngier data = irq_desc_get_irq_data(desc); 28731b7047edSMarc Zyngier 28741b7047edSMarc Zyngier do { 28751b7047edSMarc Zyngier chip = irq_data_get_irq_chip(data); 2876f107cee9SGuenter Roeck if (WARN_ON_ONCE(!chip)) { 2877f107cee9SGuenter Roeck err = -ENODEV; 2878f107cee9SGuenter Roeck goto out_unlock; 2879f107cee9SGuenter Roeck } 28801b7047edSMarc Zyngier if (chip->irq_set_irqchip_state) 28811b7047edSMarc Zyngier break; 28821b7047edSMarc Zyngier #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 28831b7047edSMarc Zyngier data = data->parent_data; 28841b7047edSMarc Zyngier #else 28851b7047edSMarc Zyngier data = NULL; 28861b7047edSMarc Zyngier #endif 28871b7047edSMarc Zyngier } while (data); 28881b7047edSMarc Zyngier 28891b7047edSMarc Zyngier if (data) 28901b7047edSMarc Zyngier err = chip->irq_set_irqchip_state(data, which, val); 28911b7047edSMarc Zyngier 2892f107cee9SGuenter Roeck out_unlock: 28931b7047edSMarc Zyngier irq_put_desc_busunlock(desc, flags); 28941b7047edSMarc Zyngier return err; 28951b7047edSMarc Zyngier } 28961ee4fb3eSBjorn Andersson EXPORT_SYMBOL_GPL(irq_set_irqchip_state); 2897a313357eSThomas Gleixner 2898a313357eSThomas Gleixner /** 2899a313357eSThomas Gleixner * irq_has_action - Check whether an interrupt is requested 2900a313357eSThomas Gleixner * @irq: The linux irq number 2901a313357eSThomas Gleixner * 2902a313357eSThomas Gleixner * Returns: A snapshot of the current state 2903a313357eSThomas Gleixner */ 2904a313357eSThomas Gleixner bool irq_has_action(unsigned int irq) 2905a313357eSThomas Gleixner { 2906a313357eSThomas Gleixner bool res; 2907a313357eSThomas Gleixner 2908a313357eSThomas Gleixner rcu_read_lock(); 2909a313357eSThomas Gleixner res = irq_desc_has_action(irq_to_desc(irq)); 2910a313357eSThomas Gleixner rcu_read_unlock(); 2911a313357eSThomas Gleixner return res; 2912a313357eSThomas Gleixner } 2913a313357eSThomas Gleixner EXPORT_SYMBOL_GPL(irq_has_action); 2914fdd02963SThomas Gleixner 2915fdd02963SThomas Gleixner /** 2916fdd02963SThomas Gleixner * irq_check_status_bit - Check whether bits in the irq descriptor status are set 2917fdd02963SThomas Gleixner * @irq: The linux irq number 2918fdd02963SThomas Gleixner * @bitmask: The bitmask to evaluate 2919fdd02963SThomas Gleixner * 2920fdd02963SThomas Gleixner * Returns: True if one of the bits in @bitmask is set 2921fdd02963SThomas Gleixner */ 2922fdd02963SThomas Gleixner bool irq_check_status_bit(unsigned int irq, unsigned int bitmask) 2923fdd02963SThomas Gleixner { 2924fdd02963SThomas Gleixner struct irq_desc *desc; 2925fdd02963SThomas Gleixner bool res = false; 2926fdd02963SThomas Gleixner 2927fdd02963SThomas Gleixner rcu_read_lock(); 2928fdd02963SThomas Gleixner desc = irq_to_desc(irq); 2929fdd02963SThomas Gleixner if (desc) 2930fdd02963SThomas Gleixner res = !!(desc->status_use_accessors & bitmask); 2931fdd02963SThomas Gleixner rcu_read_unlock(); 2932fdd02963SThomas Gleixner return res; 2933fdd02963SThomas Gleixner } 2934ce09ccc5SThomas Gleixner EXPORT_SYMBOL_GPL(irq_check_status_bit); 2935