xref: /openbmc/linux/kernel/irq/manage.c (revision 61377ec144574313ebfbf31685895a7b9b9b7a9a)
152a65ff5SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
3a34db9b2SIngo Molnar  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4a34db9b2SIngo Molnar  * Copyright (C) 2005-2006 Thomas Gleixner
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  * This file contains driver APIs to the irq subsystem.
71da177e4SLinus Torvalds  */
81da177e4SLinus Torvalds 
997fd75b7SAndrew Morton #define pr_fmt(fmt) "genirq: " fmt
1097fd75b7SAndrew Morton 
111da177e4SLinus Torvalds #include <linux/irq.h>
123aa551c9SThomas Gleixner #include <linux/kthread.h>
131da177e4SLinus Torvalds #include <linux/module.h>
141da177e4SLinus Torvalds #include <linux/random.h>
151da177e4SLinus Torvalds #include <linux/interrupt.h>
164001d8e8SThomas Gleixner #include <linux/irqdomain.h>
171aeb272cSRobert P. J. Day #include <linux/slab.h>
183aa551c9SThomas Gleixner #include <linux/sched.h>
198bd75c77SClark Williams #include <linux/sched/rt.h>
200881e7bdSIngo Molnar #include <linux/sched/task.h>
2111ea68f5SMing Lei #include <linux/sched/isolation.h>
22ae7e81c0SIngo Molnar #include <uapi/linux/sched/types.h>
234d1d61a6SOleg Nesterov #include <linux/task_work.h>
241da177e4SLinus Torvalds 
251da177e4SLinus Torvalds #include "internals.h"
261da177e4SLinus Torvalds 
27b6a32bbdSThomas Gleixner #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
288d32a307SThomas Gleixner __read_mostly bool force_irqthreads;
2947b82e88SSebastian Andrzej Siewior EXPORT_SYMBOL_GPL(force_irqthreads);
308d32a307SThomas Gleixner 
318d32a307SThomas Gleixner static int __init setup_forced_irqthreads(char *arg)
328d32a307SThomas Gleixner {
338d32a307SThomas Gleixner 	force_irqthreads = true;
348d32a307SThomas Gleixner 	return 0;
358d32a307SThomas Gleixner }
368d32a307SThomas Gleixner early_param("threadirqs", setup_forced_irqthreads);
378d32a307SThomas Gleixner #endif
388d32a307SThomas Gleixner 
3962e04686SThomas Gleixner static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
401da177e4SLinus Torvalds {
4162e04686SThomas Gleixner 	struct irq_data *irqd = irq_desc_get_irq_data(desc);
4232f4125eSThomas Gleixner 	bool inprogress;
431da177e4SLinus Torvalds 
44a98ce5c6SHerbert Xu 	do {
45a98ce5c6SHerbert Xu 		unsigned long flags;
46a98ce5c6SHerbert Xu 
47a98ce5c6SHerbert Xu 		/*
48a98ce5c6SHerbert Xu 		 * Wait until we're out of the critical section.  This might
49a98ce5c6SHerbert Xu 		 * give the wrong answer due to the lack of memory barriers.
50a98ce5c6SHerbert Xu 		 */
5132f4125eSThomas Gleixner 		while (irqd_irq_inprogress(&desc->irq_data))
521da177e4SLinus Torvalds 			cpu_relax();
53a98ce5c6SHerbert Xu 
54a98ce5c6SHerbert Xu 		/* Ok, that indicated we're done: double-check carefully. */
55239007b8SThomas Gleixner 		raw_spin_lock_irqsave(&desc->lock, flags);
5632f4125eSThomas Gleixner 		inprogress = irqd_irq_inprogress(&desc->irq_data);
5762e04686SThomas Gleixner 
5862e04686SThomas Gleixner 		/*
5962e04686SThomas Gleixner 		 * If requested and supported, check at the chip whether it
6062e04686SThomas Gleixner 		 * is in flight at the hardware level, i.e. already pending
6162e04686SThomas Gleixner 		 * in a CPU and waiting for service and acknowledge.
6262e04686SThomas Gleixner 		 */
6362e04686SThomas Gleixner 		if (!inprogress && sync_chip) {
6462e04686SThomas Gleixner 			/*
6562e04686SThomas Gleixner 			 * Ignore the return code. inprogress is only updated
6662e04686SThomas Gleixner 			 * when the chip supports it.
6762e04686SThomas Gleixner 			 */
6862e04686SThomas Gleixner 			__irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
6962e04686SThomas Gleixner 						&inprogress);
7062e04686SThomas Gleixner 		}
71239007b8SThomas Gleixner 		raw_spin_unlock_irqrestore(&desc->lock, flags);
72a98ce5c6SHerbert Xu 
73a98ce5c6SHerbert Xu 		/* Oops, that failed? */
7432f4125eSThomas Gleixner 	} while (inprogress);
7518258f72SThomas Gleixner }
763aa551c9SThomas Gleixner 
7718258f72SThomas Gleixner /**
7818258f72SThomas Gleixner  *	synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
7918258f72SThomas Gleixner  *	@irq: interrupt number to wait for
8018258f72SThomas Gleixner  *
8118258f72SThomas Gleixner  *	This function waits for any pending hard IRQ handlers for this
8218258f72SThomas Gleixner  *	interrupt to complete before returning. If you use this
8318258f72SThomas Gleixner  *	function while holding a resource the IRQ handler may need you
8418258f72SThomas Gleixner  *	will deadlock. It does not take associated threaded handlers
8518258f72SThomas Gleixner  *	into account.
8618258f72SThomas Gleixner  *
8718258f72SThomas Gleixner  *	Do not use this for shutdown scenarios where you must be sure
8818258f72SThomas Gleixner  *	that all parts (hardirq and threaded handler) have completed.
8918258f72SThomas Gleixner  *
9002cea395SPeter Zijlstra  *	Returns: false if a threaded handler is active.
9102cea395SPeter Zijlstra  *
9218258f72SThomas Gleixner  *	This function may be called - with care - from IRQ context.
9362e04686SThomas Gleixner  *
9462e04686SThomas Gleixner  *	It does not check whether there is an interrupt in flight at the
9562e04686SThomas Gleixner  *	hardware level, but not serviced yet, as this might deadlock when
9662e04686SThomas Gleixner  *	called with interrupts disabled and the target CPU of the interrupt
9762e04686SThomas Gleixner  *	is the current CPU.
983aa551c9SThomas Gleixner  */
9902cea395SPeter Zijlstra bool synchronize_hardirq(unsigned int irq)
10018258f72SThomas Gleixner {
10118258f72SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
10218258f72SThomas Gleixner 
10302cea395SPeter Zijlstra 	if (desc) {
10462e04686SThomas Gleixner 		__synchronize_hardirq(desc, false);
10502cea395SPeter Zijlstra 		return !atomic_read(&desc->threads_active);
10602cea395SPeter Zijlstra 	}
10702cea395SPeter Zijlstra 
10802cea395SPeter Zijlstra 	return true;
10918258f72SThomas Gleixner }
11018258f72SThomas Gleixner EXPORT_SYMBOL(synchronize_hardirq);
11118258f72SThomas Gleixner 
11218258f72SThomas Gleixner /**
11318258f72SThomas Gleixner  *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
11418258f72SThomas Gleixner  *	@irq: interrupt number to wait for
11518258f72SThomas Gleixner  *
11618258f72SThomas Gleixner  *	This function waits for any pending IRQ handlers for this interrupt
11718258f72SThomas Gleixner  *	to complete before returning. If you use this function while
11818258f72SThomas Gleixner  *	holding a resource the IRQ handler may need you will deadlock.
11918258f72SThomas Gleixner  *
1201d21f2afSThomas Gleixner  *	Can only be called from preemptible code as it might sleep when
1211d21f2afSThomas Gleixner  *	an interrupt thread is associated to @irq.
12262e04686SThomas Gleixner  *
12362e04686SThomas Gleixner  *	It optionally makes sure (when the irq chip supports that method)
12462e04686SThomas Gleixner  *	that the interrupt is not pending in any CPU and waiting for
12562e04686SThomas Gleixner  *	service.
12618258f72SThomas Gleixner  */
12718258f72SThomas Gleixner void synchronize_irq(unsigned int irq)
12818258f72SThomas Gleixner {
12918258f72SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
13018258f72SThomas Gleixner 
13118258f72SThomas Gleixner 	if (desc) {
13262e04686SThomas Gleixner 		__synchronize_hardirq(desc, true);
13318258f72SThomas Gleixner 		/*
13418258f72SThomas Gleixner 		 * We made sure that no hardirq handler is
13518258f72SThomas Gleixner 		 * running. Now verify that no threaded handlers are
13618258f72SThomas Gleixner 		 * active.
13718258f72SThomas Gleixner 		 */
13818258f72SThomas Gleixner 		wait_event(desc->wait_for_threads,
13918258f72SThomas Gleixner 			   !atomic_read(&desc->threads_active));
14018258f72SThomas Gleixner 	}
1411da177e4SLinus Torvalds }
1421da177e4SLinus Torvalds EXPORT_SYMBOL(synchronize_irq);
1431da177e4SLinus Torvalds 
1443aa551c9SThomas Gleixner #ifdef CONFIG_SMP
1453aa551c9SThomas Gleixner cpumask_var_t irq_default_affinity;
1463aa551c9SThomas Gleixner 
1479c255583SThomas Gleixner static bool __irq_can_set_affinity(struct irq_desc *desc)
148e019c249SJiang Liu {
149e019c249SJiang Liu 	if (!desc || !irqd_can_balance(&desc->irq_data) ||
150e019c249SJiang Liu 	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
1519c255583SThomas Gleixner 		return false;
1529c255583SThomas Gleixner 	return true;
153e019c249SJiang Liu }
154e019c249SJiang Liu 
155771ee3b0SThomas Gleixner /**
156771ee3b0SThomas Gleixner  *	irq_can_set_affinity - Check if the affinity of a given irq can be set
157771ee3b0SThomas Gleixner  *	@irq:		Interrupt to check
158771ee3b0SThomas Gleixner  *
159771ee3b0SThomas Gleixner  */
160771ee3b0SThomas Gleixner int irq_can_set_affinity(unsigned int irq)
161771ee3b0SThomas Gleixner {
162e019c249SJiang Liu 	return __irq_can_set_affinity(irq_to_desc(irq));
163771ee3b0SThomas Gleixner }
164771ee3b0SThomas Gleixner 
165591d2fb0SThomas Gleixner /**
1669c255583SThomas Gleixner  * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
1679c255583SThomas Gleixner  * @irq:	Interrupt to check
1689c255583SThomas Gleixner  *
1699c255583SThomas Gleixner  * Like irq_can_set_affinity() above, but additionally checks for the
1709c255583SThomas Gleixner  * AFFINITY_MANAGED flag.
1719c255583SThomas Gleixner  */
1729c255583SThomas Gleixner bool irq_can_set_affinity_usr(unsigned int irq)
1739c255583SThomas Gleixner {
1749c255583SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
1759c255583SThomas Gleixner 
1769c255583SThomas Gleixner 	return __irq_can_set_affinity(desc) &&
1779c255583SThomas Gleixner 		!irqd_affinity_is_managed(&desc->irq_data);
1789c255583SThomas Gleixner }
1799c255583SThomas Gleixner 
1809c255583SThomas Gleixner /**
181591d2fb0SThomas Gleixner  *	irq_set_thread_affinity - Notify irq threads to adjust affinity
1825c982c58SKrzysztof Kozlowski  *	@desc:		irq descriptor which has affinity changed
183591d2fb0SThomas Gleixner  *
184591d2fb0SThomas Gleixner  *	We just set IRQTF_AFFINITY and delegate the affinity setting
185591d2fb0SThomas Gleixner  *	to the interrupt thread itself. We can not call
186591d2fb0SThomas Gleixner  *	set_cpus_allowed_ptr() here as we hold desc->lock and this
187591d2fb0SThomas Gleixner  *	code can be called from hard interrupt context.
188591d2fb0SThomas Gleixner  */
189591d2fb0SThomas Gleixner void irq_set_thread_affinity(struct irq_desc *desc)
1903aa551c9SThomas Gleixner {
191f944b5a7SDaniel Lezcano 	struct irqaction *action;
1923aa551c9SThomas Gleixner 
193f944b5a7SDaniel Lezcano 	for_each_action_of_desc(desc, action)
1943aa551c9SThomas Gleixner 		if (action->thread)
195591d2fb0SThomas Gleixner 			set_bit(IRQTF_AFFINITY, &action->thread_flags);
1963aa551c9SThomas Gleixner }
1973aa551c9SThomas Gleixner 
198baedb87dSThomas Gleixner #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
19919e1d4e9SThomas Gleixner static void irq_validate_effective_affinity(struct irq_data *data)
20019e1d4e9SThomas Gleixner {
20119e1d4e9SThomas Gleixner 	const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
20219e1d4e9SThomas Gleixner 	struct irq_chip *chip = irq_data_get_irq_chip(data);
20319e1d4e9SThomas Gleixner 
20419e1d4e9SThomas Gleixner 	if (!cpumask_empty(m))
20519e1d4e9SThomas Gleixner 		return;
20619e1d4e9SThomas Gleixner 	pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
20719e1d4e9SThomas Gleixner 		     chip->name, data->irq);
20819e1d4e9SThomas Gleixner }
20919e1d4e9SThomas Gleixner 
210baedb87dSThomas Gleixner static inline void irq_init_effective_affinity(struct irq_data *data,
211baedb87dSThomas Gleixner 					       const struct cpumask *mask)
212baedb87dSThomas Gleixner {
213baedb87dSThomas Gleixner 	cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
214baedb87dSThomas Gleixner }
215baedb87dSThomas Gleixner #else
216baedb87dSThomas Gleixner static inline void irq_validate_effective_affinity(struct irq_data *data) { }
217baedb87dSThomas Gleixner static inline void irq_init_effective_affinity(struct irq_data *data,
218baedb87dSThomas Gleixner 					       const struct cpumask *mask) { }
219baedb87dSThomas Gleixner #endif
220baedb87dSThomas Gleixner 
221818b0f3bSJiang Liu int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
222818b0f3bSJiang Liu 			bool force)
223818b0f3bSJiang Liu {
224818b0f3bSJiang Liu 	struct irq_desc *desc = irq_data_to_desc(data);
225818b0f3bSJiang Liu 	struct irq_chip *chip = irq_data_get_irq_chip(data);
226818b0f3bSJiang Liu 	int ret;
227818b0f3bSJiang Liu 
228e43b3b58SThomas Gleixner 	if (!chip || !chip->irq_set_affinity)
229e43b3b58SThomas Gleixner 		return -EINVAL;
230e43b3b58SThomas Gleixner 
23111ea68f5SMing Lei 	/*
23211ea68f5SMing Lei 	 * If this is a managed interrupt and housekeeping is enabled on
23311ea68f5SMing Lei 	 * it check whether the requested affinity mask intersects with
23411ea68f5SMing Lei 	 * a housekeeping CPU. If so, then remove the isolated CPUs from
23511ea68f5SMing Lei 	 * the mask and just keep the housekeeping CPU(s). This prevents
23611ea68f5SMing Lei 	 * the affinity setter from routing the interrupt to an isolated
23711ea68f5SMing Lei 	 * CPU to avoid that I/O submitted from a housekeeping CPU causes
23811ea68f5SMing Lei 	 * interrupts on an isolated one.
23911ea68f5SMing Lei 	 *
24011ea68f5SMing Lei 	 * If the masks do not intersect or include online CPU(s) then
24111ea68f5SMing Lei 	 * keep the requested mask. The isolated target CPUs are only
24211ea68f5SMing Lei 	 * receiving interrupts when the I/O operation was submitted
24311ea68f5SMing Lei 	 * directly from them.
24411ea68f5SMing Lei 	 *
24511ea68f5SMing Lei 	 * If all housekeeping CPUs in the affinity mask are offline, the
24611ea68f5SMing Lei 	 * interrupt will be migrated by the CPU hotplug code once a
24711ea68f5SMing Lei 	 * housekeeping CPU which belongs to the affinity mask comes
24811ea68f5SMing Lei 	 * online.
24911ea68f5SMing Lei 	 */
25011ea68f5SMing Lei 	if (irqd_affinity_is_managed(data) &&
25111ea68f5SMing Lei 	    housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) {
25211ea68f5SMing Lei 		const struct cpumask *hk_mask, *prog_mask;
25311ea68f5SMing Lei 
25411ea68f5SMing Lei 		static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
25511ea68f5SMing Lei 		static struct cpumask tmp_mask;
25611ea68f5SMing Lei 
25711ea68f5SMing Lei 		hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
25811ea68f5SMing Lei 
25911ea68f5SMing Lei 		raw_spin_lock(&tmp_mask_lock);
26011ea68f5SMing Lei 		cpumask_and(&tmp_mask, mask, hk_mask);
26111ea68f5SMing Lei 		if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
26211ea68f5SMing Lei 			prog_mask = mask;
26311ea68f5SMing Lei 		else
26411ea68f5SMing Lei 			prog_mask = &tmp_mask;
26511ea68f5SMing Lei 		ret = chip->irq_set_affinity(data, prog_mask, force);
26611ea68f5SMing Lei 		raw_spin_unlock(&tmp_mask_lock);
26711ea68f5SMing Lei 	} else {
26801f8fa4fSThomas Gleixner 		ret = chip->irq_set_affinity(data, mask, force);
26911ea68f5SMing Lei 	}
270818b0f3bSJiang Liu 	switch (ret) {
271818b0f3bSJiang Liu 	case IRQ_SET_MASK_OK:
2722cb62547SJiang Liu 	case IRQ_SET_MASK_OK_DONE:
2739df872faSJiang Liu 		cpumask_copy(desc->irq_common_data.affinity, mask);
274df561f66SGustavo A. R. Silva 		fallthrough;
275818b0f3bSJiang Liu 	case IRQ_SET_MASK_OK_NOCOPY:
27619e1d4e9SThomas Gleixner 		irq_validate_effective_affinity(data);
277818b0f3bSJiang Liu 		irq_set_thread_affinity(desc);
278818b0f3bSJiang Liu 		ret = 0;
279818b0f3bSJiang Liu 	}
280818b0f3bSJiang Liu 
281818b0f3bSJiang Liu 	return ret;
282818b0f3bSJiang Liu }
283818b0f3bSJiang Liu 
28412f47073SThomas Gleixner #ifdef CONFIG_GENERIC_PENDING_IRQ
28512f47073SThomas Gleixner static inline int irq_set_affinity_pending(struct irq_data *data,
28612f47073SThomas Gleixner 					   const struct cpumask *dest)
28712f47073SThomas Gleixner {
28812f47073SThomas Gleixner 	struct irq_desc *desc = irq_data_to_desc(data);
28912f47073SThomas Gleixner 
29012f47073SThomas Gleixner 	irqd_set_move_pending(data);
29112f47073SThomas Gleixner 	irq_copy_pending(desc, dest);
29212f47073SThomas Gleixner 	return 0;
29312f47073SThomas Gleixner }
29412f47073SThomas Gleixner #else
29512f47073SThomas Gleixner static inline int irq_set_affinity_pending(struct irq_data *data,
29612f47073SThomas Gleixner 					   const struct cpumask *dest)
29712f47073SThomas Gleixner {
29812f47073SThomas Gleixner 	return -EBUSY;
29912f47073SThomas Gleixner }
30012f47073SThomas Gleixner #endif
30112f47073SThomas Gleixner 
30212f47073SThomas Gleixner static int irq_try_set_affinity(struct irq_data *data,
30312f47073SThomas Gleixner 				const struct cpumask *dest, bool force)
30412f47073SThomas Gleixner {
30512f47073SThomas Gleixner 	int ret = irq_do_set_affinity(data, dest, force);
30612f47073SThomas Gleixner 
30712f47073SThomas Gleixner 	/*
30812f47073SThomas Gleixner 	 * In case that the underlying vector management is busy and the
30912f47073SThomas Gleixner 	 * architecture supports the generic pending mechanism then utilize
31012f47073SThomas Gleixner 	 * this to avoid returning an error to user space.
31112f47073SThomas Gleixner 	 */
31212f47073SThomas Gleixner 	if (ret == -EBUSY && !force)
31312f47073SThomas Gleixner 		ret = irq_set_affinity_pending(data, dest);
31412f47073SThomas Gleixner 	return ret;
31512f47073SThomas Gleixner }
31612f47073SThomas Gleixner 
317baedb87dSThomas Gleixner static bool irq_set_affinity_deactivated(struct irq_data *data,
318baedb87dSThomas Gleixner 					 const struct cpumask *mask, bool force)
319baedb87dSThomas Gleixner {
320baedb87dSThomas Gleixner 	struct irq_desc *desc = irq_data_to_desc(data);
321baedb87dSThomas Gleixner 
322baedb87dSThomas Gleixner 	/*
323f0c7bacaSThomas Gleixner 	 * Handle irq chips which can handle affinity only in activated
324f0c7bacaSThomas Gleixner 	 * state correctly
325f0c7bacaSThomas Gleixner 	 *
326baedb87dSThomas Gleixner 	 * If the interrupt is not yet activated, just store the affinity
327baedb87dSThomas Gleixner 	 * mask and do not call the chip driver at all. On activation the
328baedb87dSThomas Gleixner 	 * driver has to make sure anyway that the interrupt is in a
329a359f757SIngo Molnar 	 * usable state so startup works.
330baedb87dSThomas Gleixner 	 */
331f0c7bacaSThomas Gleixner 	if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
332f0c7bacaSThomas Gleixner 	    irqd_is_activated(data) || !irqd_affinity_on_activate(data))
333baedb87dSThomas Gleixner 		return false;
334baedb87dSThomas Gleixner 
335baedb87dSThomas Gleixner 	cpumask_copy(desc->irq_common_data.affinity, mask);
336baedb87dSThomas Gleixner 	irq_init_effective_affinity(data, mask);
337baedb87dSThomas Gleixner 	irqd_set(data, IRQD_AFFINITY_SET);
338baedb87dSThomas Gleixner 	return true;
339baedb87dSThomas Gleixner }
340baedb87dSThomas Gleixner 
34101f8fa4fSThomas Gleixner int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
34201f8fa4fSThomas Gleixner 			    bool force)
343c2d0c555SDavid Daney {
344c2d0c555SDavid Daney 	struct irq_chip *chip = irq_data_get_irq_chip(data);
345c2d0c555SDavid Daney 	struct irq_desc *desc = irq_data_to_desc(data);
346c2d0c555SDavid Daney 	int ret = 0;
347c2d0c555SDavid Daney 
348c2d0c555SDavid Daney 	if (!chip || !chip->irq_set_affinity)
349c2d0c555SDavid Daney 		return -EINVAL;
350c2d0c555SDavid Daney 
351baedb87dSThomas Gleixner 	if (irq_set_affinity_deactivated(data, mask, force))
352baedb87dSThomas Gleixner 		return 0;
353baedb87dSThomas Gleixner 
35412f47073SThomas Gleixner 	if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
35512f47073SThomas Gleixner 		ret = irq_try_set_affinity(data, mask, force);
356c2d0c555SDavid Daney 	} else {
357c2d0c555SDavid Daney 		irqd_set_move_pending(data);
358c2d0c555SDavid Daney 		irq_copy_pending(desc, mask);
359c2d0c555SDavid Daney 	}
360c2d0c555SDavid Daney 
361c2d0c555SDavid Daney 	if (desc->affinity_notify) {
362c2d0c555SDavid Daney 		kref_get(&desc->affinity_notify->kref);
363df81dfcfSEdward Cree 		if (!schedule_work(&desc->affinity_notify->work)) {
364df81dfcfSEdward Cree 			/* Work was already scheduled, drop our extra ref */
365df81dfcfSEdward Cree 			kref_put(&desc->affinity_notify->kref,
366df81dfcfSEdward Cree 				 desc->affinity_notify->release);
367df81dfcfSEdward Cree 		}
368c2d0c555SDavid Daney 	}
369c2d0c555SDavid Daney 	irqd_set(data, IRQD_AFFINITY_SET);
370c2d0c555SDavid Daney 
371c2d0c555SDavid Daney 	return ret;
372c2d0c555SDavid Daney }
373c2d0c555SDavid Daney 
3741d3aec89SJohn Garry /**
3751d3aec89SJohn Garry  * irq_update_affinity_desc - Update affinity management for an interrupt
3761d3aec89SJohn Garry  * @irq:	The interrupt number to update
3771d3aec89SJohn Garry  * @affinity:	Pointer to the affinity descriptor
3781d3aec89SJohn Garry  *
3791d3aec89SJohn Garry  * This interface can be used to configure the affinity management of
3801d3aec89SJohn Garry  * interrupts which have been allocated already.
3811d3aec89SJohn Garry  *
3821d3aec89SJohn Garry  * There are certain limitations on when it may be used - attempts to use it
3831d3aec89SJohn Garry  * for when the kernel is configured for generic IRQ reservation mode (in
3841d3aec89SJohn Garry  * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
3851d3aec89SJohn Garry  * managed/non-managed interrupt accounting. In addition, attempts to use it on
3861d3aec89SJohn Garry  * an interrupt which is already started or which has already been configured
3871d3aec89SJohn Garry  * as managed will also fail, as these mean invalid init state or double init.
3881d3aec89SJohn Garry  */
3891d3aec89SJohn Garry int irq_update_affinity_desc(unsigned int irq,
3901d3aec89SJohn Garry 			     struct irq_affinity_desc *affinity)
3911d3aec89SJohn Garry {
3921d3aec89SJohn Garry 	struct irq_desc *desc;
3931d3aec89SJohn Garry 	unsigned long flags;
3941d3aec89SJohn Garry 	bool activated;
3951d3aec89SJohn Garry 	int ret = 0;
3961d3aec89SJohn Garry 
3971d3aec89SJohn Garry 	/*
3981d3aec89SJohn Garry 	 * Supporting this with the reservation scheme used by x86 needs
3991d3aec89SJohn Garry 	 * some more thought. Fail it for now.
4001d3aec89SJohn Garry 	 */
4011d3aec89SJohn Garry 	if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
4021d3aec89SJohn Garry 		return -EOPNOTSUPP;
4031d3aec89SJohn Garry 
4041d3aec89SJohn Garry 	desc = irq_get_desc_buslock(irq, &flags, 0);
4051d3aec89SJohn Garry 	if (!desc)
4061d3aec89SJohn Garry 		return -EINVAL;
4071d3aec89SJohn Garry 
4081d3aec89SJohn Garry 	/* Requires the interrupt to be shut down */
4091d3aec89SJohn Garry 	if (irqd_is_started(&desc->irq_data)) {
4101d3aec89SJohn Garry 		ret = -EBUSY;
4111d3aec89SJohn Garry 		goto out_unlock;
4121d3aec89SJohn Garry 	}
4131d3aec89SJohn Garry 
4141d3aec89SJohn Garry 	/* Interrupts which are already managed cannot be modified */
4151d3aec89SJohn Garry 	if (irqd_affinity_is_managed(&desc->irq_data)) {
4161d3aec89SJohn Garry 		ret = -EBUSY;
4171d3aec89SJohn Garry 		goto out_unlock;
4181d3aec89SJohn Garry 	}
4191d3aec89SJohn Garry 
4201d3aec89SJohn Garry 	/*
4211d3aec89SJohn Garry 	 * Deactivate the interrupt. That's required to undo
4221d3aec89SJohn Garry 	 * anything an earlier activation has established.
4231d3aec89SJohn Garry 	 */
4241d3aec89SJohn Garry 	activated = irqd_is_activated(&desc->irq_data);
4251d3aec89SJohn Garry 	if (activated)
4261d3aec89SJohn Garry 		irq_domain_deactivate_irq(&desc->irq_data);
4271d3aec89SJohn Garry 
4281d3aec89SJohn Garry 	if (affinity->is_managed) {
4291d3aec89SJohn Garry 		irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
4301d3aec89SJohn Garry 		irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
4311d3aec89SJohn Garry 	}
4321d3aec89SJohn Garry 
4331d3aec89SJohn Garry 	cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
4341d3aec89SJohn Garry 
4351d3aec89SJohn Garry 	/* Restore the activation state */
4361d3aec89SJohn Garry 	if (activated)
4371d3aec89SJohn Garry 		irq_domain_activate_irq(&desc->irq_data, false);
4381d3aec89SJohn Garry 
4391d3aec89SJohn Garry out_unlock:
4401d3aec89SJohn Garry 	irq_put_desc_busunlock(desc, flags);
4411d3aec89SJohn Garry 	return ret;
4421d3aec89SJohn Garry }
4431d3aec89SJohn Garry 
4444d80d6caSThomas Gleixner static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
4454d80d6caSThomas Gleixner 			      bool force)
446771ee3b0SThomas Gleixner {
44708678b08SYinghai Lu 	struct irq_desc *desc = irq_to_desc(irq);
448f6d87f4bSThomas Gleixner 	unsigned long flags;
449c2d0c555SDavid Daney 	int ret;
450771ee3b0SThomas Gleixner 
451c2d0c555SDavid Daney 	if (!desc)
452771ee3b0SThomas Gleixner 		return -EINVAL;
453771ee3b0SThomas Gleixner 
454239007b8SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
45501f8fa4fSThomas Gleixner 	ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
456239007b8SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
4571fa46f1fSThomas Gleixner 	return ret;
458771ee3b0SThomas Gleixner }
459771ee3b0SThomas Gleixner 
4604d80d6caSThomas Gleixner /**
4614d80d6caSThomas Gleixner  * irq_set_affinity - Set the irq affinity of a given irq
4624d80d6caSThomas Gleixner  * @irq:	Interrupt to set affinity
4634d80d6caSThomas Gleixner  * @cpumask:	cpumask
4644d80d6caSThomas Gleixner  *
4654d80d6caSThomas Gleixner  * Fails if cpumask does not contain an online CPU
4664d80d6caSThomas Gleixner  */
4674d80d6caSThomas Gleixner int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
4684d80d6caSThomas Gleixner {
4694d80d6caSThomas Gleixner 	return __irq_set_affinity(irq, cpumask, false);
4704d80d6caSThomas Gleixner }
4714d80d6caSThomas Gleixner EXPORT_SYMBOL_GPL(irq_set_affinity);
4724d80d6caSThomas Gleixner 
4734d80d6caSThomas Gleixner /**
4744d80d6caSThomas Gleixner  * irq_force_affinity - Force the irq affinity of a given irq
4754d80d6caSThomas Gleixner  * @irq:	Interrupt to set affinity
4764d80d6caSThomas Gleixner  * @cpumask:	cpumask
4774d80d6caSThomas Gleixner  *
4784d80d6caSThomas Gleixner  * Same as irq_set_affinity, but without checking the mask against
4794d80d6caSThomas Gleixner  * online cpus.
4804d80d6caSThomas Gleixner  *
4814d80d6caSThomas Gleixner  * Solely for low level cpu hotplug code, where we need to make per
4824d80d6caSThomas Gleixner  * cpu interrupts affine before the cpu becomes online.
4834d80d6caSThomas Gleixner  */
4844d80d6caSThomas Gleixner int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
4854d80d6caSThomas Gleixner {
4864d80d6caSThomas Gleixner 	return __irq_set_affinity(irq, cpumask, true);
4874d80d6caSThomas Gleixner }
4884d80d6caSThomas Gleixner EXPORT_SYMBOL_GPL(irq_force_affinity);
4894d80d6caSThomas Gleixner 
490e7a297b0SPeter P Waskiewicz Jr int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
491e7a297b0SPeter P Waskiewicz Jr {
492e7a297b0SPeter P Waskiewicz Jr 	unsigned long flags;
49331d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
494e7a297b0SPeter P Waskiewicz Jr 
495e7a297b0SPeter P Waskiewicz Jr 	if (!desc)
496e7a297b0SPeter P Waskiewicz Jr 		return -EINVAL;
497e7a297b0SPeter P Waskiewicz Jr 	desc->affinity_hint = m;
49802725e74SThomas Gleixner 	irq_put_desc_unlock(desc, flags);
499e2e64a93SJesse Brandeburg 	/* set the initial affinity to prevent every interrupt being on CPU0 */
5004fe7ffb7SJesse Brandeburg 	if (m)
501e2e64a93SJesse Brandeburg 		__irq_set_affinity(irq, m, false);
502e7a297b0SPeter P Waskiewicz Jr 	return 0;
503e7a297b0SPeter P Waskiewicz Jr }
504e7a297b0SPeter P Waskiewicz Jr EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
505e7a297b0SPeter P Waskiewicz Jr 
506cd7eab44SBen Hutchings static void irq_affinity_notify(struct work_struct *work)
507cd7eab44SBen Hutchings {
508cd7eab44SBen Hutchings 	struct irq_affinity_notify *notify =
509cd7eab44SBen Hutchings 		container_of(work, struct irq_affinity_notify, work);
510cd7eab44SBen Hutchings 	struct irq_desc *desc = irq_to_desc(notify->irq);
511cd7eab44SBen Hutchings 	cpumask_var_t cpumask;
512cd7eab44SBen Hutchings 	unsigned long flags;
513cd7eab44SBen Hutchings 
5141fa46f1fSThomas Gleixner 	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
515cd7eab44SBen Hutchings 		goto out;
516cd7eab44SBen Hutchings 
517cd7eab44SBen Hutchings 	raw_spin_lock_irqsave(&desc->lock, flags);
5180ef5ca1eSThomas Gleixner 	if (irq_move_pending(&desc->irq_data))
5191fa46f1fSThomas Gleixner 		irq_get_pending(cpumask, desc);
520cd7eab44SBen Hutchings 	else
5219df872faSJiang Liu 		cpumask_copy(cpumask, desc->irq_common_data.affinity);
522cd7eab44SBen Hutchings 	raw_spin_unlock_irqrestore(&desc->lock, flags);
523cd7eab44SBen Hutchings 
524cd7eab44SBen Hutchings 	notify->notify(notify, cpumask);
525cd7eab44SBen Hutchings 
526cd7eab44SBen Hutchings 	free_cpumask_var(cpumask);
527cd7eab44SBen Hutchings out:
528cd7eab44SBen Hutchings 	kref_put(&notify->kref, notify->release);
529cd7eab44SBen Hutchings }
530cd7eab44SBen Hutchings 
531cd7eab44SBen Hutchings /**
532cd7eab44SBen Hutchings  *	irq_set_affinity_notifier - control notification of IRQ affinity changes
533cd7eab44SBen Hutchings  *	@irq:		Interrupt for which to enable/disable notification
534cd7eab44SBen Hutchings  *	@notify:	Context for notification, or %NULL to disable
535cd7eab44SBen Hutchings  *			notification.  Function pointers must be initialised;
536cd7eab44SBen Hutchings  *			the other fields will be initialised by this function.
537cd7eab44SBen Hutchings  *
538cd7eab44SBen Hutchings  *	Must be called in process context.  Notification may only be enabled
539cd7eab44SBen Hutchings  *	after the IRQ is allocated and must be disabled before the IRQ is
540cd7eab44SBen Hutchings  *	freed using free_irq().
541cd7eab44SBen Hutchings  */
542cd7eab44SBen Hutchings int
543cd7eab44SBen Hutchings irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
544cd7eab44SBen Hutchings {
545cd7eab44SBen Hutchings 	struct irq_desc *desc = irq_to_desc(irq);
546cd7eab44SBen Hutchings 	struct irq_affinity_notify *old_notify;
547cd7eab44SBen Hutchings 	unsigned long flags;
548cd7eab44SBen Hutchings 
549cd7eab44SBen Hutchings 	/* The release function is promised process context */
550cd7eab44SBen Hutchings 	might_sleep();
551cd7eab44SBen Hutchings 
552b525903cSJulien Thierry 	if (!desc || desc->istate & IRQS_NMI)
553cd7eab44SBen Hutchings 		return -EINVAL;
554cd7eab44SBen Hutchings 
555cd7eab44SBen Hutchings 	/* Complete initialisation of *notify */
556cd7eab44SBen Hutchings 	if (notify) {
557cd7eab44SBen Hutchings 		notify->irq = irq;
558cd7eab44SBen Hutchings 		kref_init(&notify->kref);
559cd7eab44SBen Hutchings 		INIT_WORK(&notify->work, irq_affinity_notify);
560cd7eab44SBen Hutchings 	}
561cd7eab44SBen Hutchings 
562cd7eab44SBen Hutchings 	raw_spin_lock_irqsave(&desc->lock, flags);
563cd7eab44SBen Hutchings 	old_notify = desc->affinity_notify;
564cd7eab44SBen Hutchings 	desc->affinity_notify = notify;
565cd7eab44SBen Hutchings 	raw_spin_unlock_irqrestore(&desc->lock, flags);
566cd7eab44SBen Hutchings 
56759c39840SPrasad Sodagudi 	if (old_notify) {
568df81dfcfSEdward Cree 		if (cancel_work_sync(&old_notify->work)) {
569df81dfcfSEdward Cree 			/* Pending work had a ref, put that one too */
570df81dfcfSEdward Cree 			kref_put(&old_notify->kref, old_notify->release);
571df81dfcfSEdward Cree 		}
572cd7eab44SBen Hutchings 		kref_put(&old_notify->kref, old_notify->release);
57359c39840SPrasad Sodagudi 	}
574cd7eab44SBen Hutchings 
575cd7eab44SBen Hutchings 	return 0;
576cd7eab44SBen Hutchings }
577cd7eab44SBen Hutchings EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
578cd7eab44SBen Hutchings 
57918404756SMax Krasnyansky #ifndef CONFIG_AUTO_IRQ_AFFINITY
58018404756SMax Krasnyansky /*
58118404756SMax Krasnyansky  * Generic version of the affinity autoselector.
58218404756SMax Krasnyansky  */
58343564bd9SThomas Gleixner int irq_setup_affinity(struct irq_desc *desc)
58418404756SMax Krasnyansky {
585569bda8dSThomas Gleixner 	struct cpumask *set = irq_default_affinity;
586cba4235eSThomas Gleixner 	int ret, node = irq_desc_get_node(desc);
587cba4235eSThomas Gleixner 	static DEFINE_RAW_SPINLOCK(mask_lock);
588cba4235eSThomas Gleixner 	static struct cpumask mask;
589569bda8dSThomas Gleixner 
590b008207cSThomas Gleixner 	/* Excludes PER_CPU and NO_BALANCE interrupts */
591e019c249SJiang Liu 	if (!__irq_can_set_affinity(desc))
59218404756SMax Krasnyansky 		return 0;
59318404756SMax Krasnyansky 
594cba4235eSThomas Gleixner 	raw_spin_lock(&mask_lock);
595f6d87f4bSThomas Gleixner 	/*
5969332ef9dSMasahiro Yamada 	 * Preserve the managed affinity setting and a userspace affinity
59706ee6d57SThomas Gleixner 	 * setup, but make sure that one of the targets is online.
598f6d87f4bSThomas Gleixner 	 */
59906ee6d57SThomas Gleixner 	if (irqd_affinity_is_managed(&desc->irq_data) ||
60006ee6d57SThomas Gleixner 	    irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
6019df872faSJiang Liu 		if (cpumask_intersects(desc->irq_common_data.affinity,
602569bda8dSThomas Gleixner 				       cpu_online_mask))
6039df872faSJiang Liu 			set = desc->irq_common_data.affinity;
6040c6f8a8bSThomas Gleixner 		else
6052bdd1055SThomas Gleixner 			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
6062bdd1055SThomas Gleixner 	}
60718404756SMax Krasnyansky 
608cba4235eSThomas Gleixner 	cpumask_and(&mask, cpu_online_mask, set);
609bddda606SSrinivas Ramana 	if (cpumask_empty(&mask))
610bddda606SSrinivas Ramana 		cpumask_copy(&mask, cpu_online_mask);
611bddda606SSrinivas Ramana 
612241fc640SPrarit Bhargava 	if (node != NUMA_NO_NODE) {
613241fc640SPrarit Bhargava 		const struct cpumask *nodemask = cpumask_of_node(node);
614241fc640SPrarit Bhargava 
615241fc640SPrarit Bhargava 		/* make sure at least one of the cpus in nodemask is online */
616cba4235eSThomas Gleixner 		if (cpumask_intersects(&mask, nodemask))
617cba4235eSThomas Gleixner 			cpumask_and(&mask, &mask, nodemask);
618241fc640SPrarit Bhargava 	}
619cba4235eSThomas Gleixner 	ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
620cba4235eSThomas Gleixner 	raw_spin_unlock(&mask_lock);
621cba4235eSThomas Gleixner 	return ret;
62218404756SMax Krasnyansky }
623f6d87f4bSThomas Gleixner #else
624a8a98eacSJiang Liu /* Wrapper for ALPHA specific affinity selector magic */
625cba4235eSThomas Gleixner int irq_setup_affinity(struct irq_desc *desc)
626f6d87f4bSThomas Gleixner {
627cba4235eSThomas Gleixner 	return irq_select_affinity(irq_desc_get_irq(desc));
628f6d87f4bSThomas Gleixner }
629cba6437aSThomas Gleixner #endif /* CONFIG_AUTO_IRQ_AFFINITY */
630cba6437aSThomas Gleixner #endif /* CONFIG_SMP */
63118404756SMax Krasnyansky 
6321da177e4SLinus Torvalds 
633fcf1ae2fSFeng Wu /**
634fcf1ae2fSFeng Wu  *	irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
635fcf1ae2fSFeng Wu  *	@irq: interrupt number to set affinity
636250a53d6SChristoffer Dall  *	@vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
637250a53d6SChristoffer Dall  *	            specific data for percpu_devid interrupts
638fcf1ae2fSFeng Wu  *
639fcf1ae2fSFeng Wu  *	This function uses the vCPU specific data to set the vCPU
640fcf1ae2fSFeng Wu  *	affinity for an irq. The vCPU specific data is passed from
641fcf1ae2fSFeng Wu  *	outside, such as KVM. One example code path is as below:
642fcf1ae2fSFeng Wu  *	KVM -> IOMMU -> irq_set_vcpu_affinity().
643fcf1ae2fSFeng Wu  */
644fcf1ae2fSFeng Wu int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
645fcf1ae2fSFeng Wu {
646fcf1ae2fSFeng Wu 	unsigned long flags;
647fcf1ae2fSFeng Wu 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
648fcf1ae2fSFeng Wu 	struct irq_data *data;
649fcf1ae2fSFeng Wu 	struct irq_chip *chip;
650fcf1ae2fSFeng Wu 	int ret = -ENOSYS;
651fcf1ae2fSFeng Wu 
652fcf1ae2fSFeng Wu 	if (!desc)
653fcf1ae2fSFeng Wu 		return -EINVAL;
654fcf1ae2fSFeng Wu 
655fcf1ae2fSFeng Wu 	data = irq_desc_get_irq_data(desc);
6560abce64aSMarc Zyngier 	do {
657fcf1ae2fSFeng Wu 		chip = irq_data_get_irq_chip(data);
658fcf1ae2fSFeng Wu 		if (chip && chip->irq_set_vcpu_affinity)
6590abce64aSMarc Zyngier 			break;
6600abce64aSMarc Zyngier #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
6610abce64aSMarc Zyngier 		data = data->parent_data;
6620abce64aSMarc Zyngier #else
6630abce64aSMarc Zyngier 		data = NULL;
6640abce64aSMarc Zyngier #endif
6650abce64aSMarc Zyngier 	} while (data);
6660abce64aSMarc Zyngier 
6670abce64aSMarc Zyngier 	if (data)
668fcf1ae2fSFeng Wu 		ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
669fcf1ae2fSFeng Wu 	irq_put_desc_unlock(desc, flags);
670fcf1ae2fSFeng Wu 
671fcf1ae2fSFeng Wu 	return ret;
672fcf1ae2fSFeng Wu }
673fcf1ae2fSFeng Wu EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
674fcf1ae2fSFeng Wu 
67579ff1cdaSJiang Liu void __disable_irq(struct irq_desc *desc)
6760a0c5168SRafael J. Wysocki {
6773aae994fSThomas Gleixner 	if (!desc->depth++)
67887923470SThomas Gleixner 		irq_disable(desc);
6790a0c5168SRafael J. Wysocki }
6800a0c5168SRafael J. Wysocki 
68102725e74SThomas Gleixner static int __disable_irq_nosync(unsigned int irq)
68202725e74SThomas Gleixner {
68302725e74SThomas Gleixner 	unsigned long flags;
68431d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
68502725e74SThomas Gleixner 
68602725e74SThomas Gleixner 	if (!desc)
68702725e74SThomas Gleixner 		return -EINVAL;
68879ff1cdaSJiang Liu 	__disable_irq(desc);
68902725e74SThomas Gleixner 	irq_put_desc_busunlock(desc, flags);
69002725e74SThomas Gleixner 	return 0;
69102725e74SThomas Gleixner }
69202725e74SThomas Gleixner 
6931da177e4SLinus Torvalds /**
6941da177e4SLinus Torvalds  *	disable_irq_nosync - disable an irq without waiting
6951da177e4SLinus Torvalds  *	@irq: Interrupt to disable
6961da177e4SLinus Torvalds  *
6971da177e4SLinus Torvalds  *	Disable the selected interrupt line.  Disables and Enables are
6981da177e4SLinus Torvalds  *	nested.
6991da177e4SLinus Torvalds  *	Unlike disable_irq(), this function does not ensure existing
7001da177e4SLinus Torvalds  *	instances of the IRQ handler have completed before returning.
7011da177e4SLinus Torvalds  *
7021da177e4SLinus Torvalds  *	This function may be called from IRQ context.
7031da177e4SLinus Torvalds  */
7041da177e4SLinus Torvalds void disable_irq_nosync(unsigned int irq)
7051da177e4SLinus Torvalds {
70602725e74SThomas Gleixner 	__disable_irq_nosync(irq);
7071da177e4SLinus Torvalds }
7081da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq_nosync);
7091da177e4SLinus Torvalds 
7101da177e4SLinus Torvalds /**
7111da177e4SLinus Torvalds  *	disable_irq - disable an irq and wait for completion
7121da177e4SLinus Torvalds  *	@irq: Interrupt to disable
7131da177e4SLinus Torvalds  *
7141da177e4SLinus Torvalds  *	Disable the selected interrupt line.  Enables and Disables are
7151da177e4SLinus Torvalds  *	nested.
7161da177e4SLinus Torvalds  *	This function waits for any pending IRQ handlers for this interrupt
7171da177e4SLinus Torvalds  *	to complete before returning. If you use this function while
7181da177e4SLinus Torvalds  *	holding a resource the IRQ handler may need you will deadlock.
7191da177e4SLinus Torvalds  *
7201da177e4SLinus Torvalds  *	This function may be called - with care - from IRQ context.
7211da177e4SLinus Torvalds  */
7221da177e4SLinus Torvalds void disable_irq(unsigned int irq)
7231da177e4SLinus Torvalds {
72402725e74SThomas Gleixner 	if (!__disable_irq_nosync(irq))
7251da177e4SLinus Torvalds 		synchronize_irq(irq);
7261da177e4SLinus Torvalds }
7271da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq);
7281da177e4SLinus Torvalds 
72902cea395SPeter Zijlstra /**
73002cea395SPeter Zijlstra  *	disable_hardirq - disables an irq and waits for hardirq completion
73102cea395SPeter Zijlstra  *	@irq: Interrupt to disable
73202cea395SPeter Zijlstra  *
73302cea395SPeter Zijlstra  *	Disable the selected interrupt line.  Enables and Disables are
73402cea395SPeter Zijlstra  *	nested.
73502cea395SPeter Zijlstra  *	This function waits for any pending hard IRQ handlers for this
73602cea395SPeter Zijlstra  *	interrupt to complete before returning. If you use this function while
73702cea395SPeter Zijlstra  *	holding a resource the hard IRQ handler may need you will deadlock.
73802cea395SPeter Zijlstra  *
73902cea395SPeter Zijlstra  *	When used to optimistically disable an interrupt from atomic context
74002cea395SPeter Zijlstra  *	the return value must be checked.
74102cea395SPeter Zijlstra  *
74202cea395SPeter Zijlstra  *	Returns: false if a threaded handler is active.
74302cea395SPeter Zijlstra  *
74402cea395SPeter Zijlstra  *	This function may be called - with care - from IRQ context.
74502cea395SPeter Zijlstra  */
74602cea395SPeter Zijlstra bool disable_hardirq(unsigned int irq)
74702cea395SPeter Zijlstra {
74802cea395SPeter Zijlstra 	if (!__disable_irq_nosync(irq))
74902cea395SPeter Zijlstra 		return synchronize_hardirq(irq);
75002cea395SPeter Zijlstra 
75102cea395SPeter Zijlstra 	return false;
75202cea395SPeter Zijlstra }
75302cea395SPeter Zijlstra EXPORT_SYMBOL_GPL(disable_hardirq);
75402cea395SPeter Zijlstra 
755b525903cSJulien Thierry /**
756b525903cSJulien Thierry  *	disable_nmi_nosync - disable an nmi without waiting
757b525903cSJulien Thierry  *	@irq: Interrupt to disable
758b525903cSJulien Thierry  *
759b525903cSJulien Thierry  *	Disable the selected interrupt line. Disables and enables are
760b525903cSJulien Thierry  *	nested.
761b525903cSJulien Thierry  *	The interrupt to disable must have been requested through request_nmi.
762b525903cSJulien Thierry  *	Unlike disable_nmi(), this function does not ensure existing
763b525903cSJulien Thierry  *	instances of the IRQ handler have completed before returning.
764b525903cSJulien Thierry  */
765b525903cSJulien Thierry void disable_nmi_nosync(unsigned int irq)
766b525903cSJulien Thierry {
767b525903cSJulien Thierry 	disable_irq_nosync(irq);
768b525903cSJulien Thierry }
769b525903cSJulien Thierry 
77079ff1cdaSJiang Liu void __enable_irq(struct irq_desc *desc)
7711adb0850SThomas Gleixner {
7721adb0850SThomas Gleixner 	switch (desc->depth) {
7731adb0850SThomas Gleixner 	case 0:
7740a0c5168SRafael J. Wysocki  err_out:
77579ff1cdaSJiang Liu 		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
77679ff1cdaSJiang Liu 		     irq_desc_get_irq(desc));
7771adb0850SThomas Gleixner 		break;
7781adb0850SThomas Gleixner 	case 1: {
779c531e836SThomas Gleixner 		if (desc->istate & IRQS_SUSPENDED)
7800a0c5168SRafael J. Wysocki 			goto err_out;
7811adb0850SThomas Gleixner 		/* Prevent probing on this irq: */
7821ccb4e61SThomas Gleixner 		irq_settings_set_noprobe(desc);
783201d7f47SThomas Gleixner 		/*
784201d7f47SThomas Gleixner 		 * Call irq_startup() not irq_enable() here because the
785201d7f47SThomas Gleixner 		 * interrupt might be marked NOAUTOEN. So irq_startup()
786201d7f47SThomas Gleixner 		 * needs to be invoked when it gets enabled the first
787201d7f47SThomas Gleixner 		 * time. If it was already started up, then irq_startup()
788201d7f47SThomas Gleixner 		 * will invoke irq_enable() under the hood.
789201d7f47SThomas Gleixner 		 */
790c942cee4SThomas Gleixner 		irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
791201d7f47SThomas Gleixner 		break;
7921adb0850SThomas Gleixner 	}
7931adb0850SThomas Gleixner 	default:
7941adb0850SThomas Gleixner 		desc->depth--;
7951adb0850SThomas Gleixner 	}
7961adb0850SThomas Gleixner }
7971adb0850SThomas Gleixner 
7981da177e4SLinus Torvalds /**
7991da177e4SLinus Torvalds  *	enable_irq - enable handling of an irq
8001da177e4SLinus Torvalds  *	@irq: Interrupt to enable
8011da177e4SLinus Torvalds  *
8021da177e4SLinus Torvalds  *	Undoes the effect of one call to disable_irq().  If this
8031da177e4SLinus Torvalds  *	matches the last disable, processing of interrupts on this
8041da177e4SLinus Torvalds  *	IRQ line is re-enabled.
8051da177e4SLinus Torvalds  *
80670aedd24SThomas Gleixner  *	This function may be called from IRQ context only when
8076b8ff312SThomas Gleixner  *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
8081da177e4SLinus Torvalds  */
8091da177e4SLinus Torvalds void enable_irq(unsigned int irq)
8101da177e4SLinus Torvalds {
8111da177e4SLinus Torvalds 	unsigned long flags;
81231d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
8131da177e4SLinus Torvalds 
8147d94f7caSYinghai Lu 	if (!desc)
815c2b5a251SMatthew Wilcox 		return;
81650f7c032SThomas Gleixner 	if (WARN(!desc->irq_data.chip,
8172656c366SThomas Gleixner 		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
81802725e74SThomas Gleixner 		goto out;
8192656c366SThomas Gleixner 
82079ff1cdaSJiang Liu 	__enable_irq(desc);
82102725e74SThomas Gleixner out:
82202725e74SThomas Gleixner 	irq_put_desc_busunlock(desc, flags);
8231da177e4SLinus Torvalds }
8241da177e4SLinus Torvalds EXPORT_SYMBOL(enable_irq);
8251da177e4SLinus Torvalds 
826b525903cSJulien Thierry /**
827b525903cSJulien Thierry  *	enable_nmi - enable handling of an nmi
828b525903cSJulien Thierry  *	@irq: Interrupt to enable
829b525903cSJulien Thierry  *
830b525903cSJulien Thierry  *	The interrupt to enable must have been requested through request_nmi.
831b525903cSJulien Thierry  *	Undoes the effect of one call to disable_nmi(). If this
832b525903cSJulien Thierry  *	matches the last disable, processing of interrupts on this
833b525903cSJulien Thierry  *	IRQ line is re-enabled.
834b525903cSJulien Thierry  */
835b525903cSJulien Thierry void enable_nmi(unsigned int irq)
836b525903cSJulien Thierry {
837b525903cSJulien Thierry 	enable_irq(irq);
838b525903cSJulien Thierry }
839b525903cSJulien Thierry 
8400c5d1eb7SDavid Brownell static int set_irq_wake_real(unsigned int irq, unsigned int on)
8412db87321SUwe Kleine-König {
84208678b08SYinghai Lu 	struct irq_desc *desc = irq_to_desc(irq);
8432db87321SUwe Kleine-König 	int ret = -ENXIO;
8442db87321SUwe Kleine-König 
84560f96b41SSantosh Shilimkar 	if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
84660f96b41SSantosh Shilimkar 		return 0;
84760f96b41SSantosh Shilimkar 
8482f7e99bbSThomas Gleixner 	if (desc->irq_data.chip->irq_set_wake)
8492f7e99bbSThomas Gleixner 		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
8502db87321SUwe Kleine-König 
8512db87321SUwe Kleine-König 	return ret;
8522db87321SUwe Kleine-König }
8532db87321SUwe Kleine-König 
854ba9a2331SThomas Gleixner /**
855a0cd9ca2SThomas Gleixner  *	irq_set_irq_wake - control irq power management wakeup
856ba9a2331SThomas Gleixner  *	@irq:	interrupt to control
857ba9a2331SThomas Gleixner  *	@on:	enable/disable power management wakeup
858ba9a2331SThomas Gleixner  *
85915a647ebSDavid Brownell  *	Enable/disable power management wakeup mode, which is
86015a647ebSDavid Brownell  *	disabled by default.  Enables and disables must match,
86115a647ebSDavid Brownell  *	just as they match for non-wakeup mode support.
86215a647ebSDavid Brownell  *
86315a647ebSDavid Brownell  *	Wakeup mode lets this IRQ wake the system from sleep
86415a647ebSDavid Brownell  *	states like "suspend to RAM".
865f9f21ceaSStephen Boyd  *
866f9f21ceaSStephen Boyd  *	Note: irq enable/disable state is completely orthogonal
867f9f21ceaSStephen Boyd  *	to the enable/disable state of irq wake. An irq can be
868f9f21ceaSStephen Boyd  *	disabled with disable_irq() and still wake the system as
869f9f21ceaSStephen Boyd  *	long as the irq has wake enabled. If this does not hold,
870f9f21ceaSStephen Boyd  *	then the underlying irq chip and the related driver need
871f9f21ceaSStephen Boyd  *	to be investigated.
872ba9a2331SThomas Gleixner  */
873a0cd9ca2SThomas Gleixner int irq_set_irq_wake(unsigned int irq, unsigned int on)
874ba9a2331SThomas Gleixner {
875ba9a2331SThomas Gleixner 	unsigned long flags;
87631d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
8772db87321SUwe Kleine-König 	int ret = 0;
878ba9a2331SThomas Gleixner 
87913863a66SJesper Juhl 	if (!desc)
88013863a66SJesper Juhl 		return -EINVAL;
88113863a66SJesper Juhl 
882b525903cSJulien Thierry 	/* Don't use NMIs as wake up interrupts please */
883b525903cSJulien Thierry 	if (desc->istate & IRQS_NMI) {
884b525903cSJulien Thierry 		ret = -EINVAL;
885b525903cSJulien Thierry 		goto out_unlock;
886b525903cSJulien Thierry 	}
887b525903cSJulien Thierry 
88815a647ebSDavid Brownell 	/* wakeup-capable irqs can be shared between drivers that
88915a647ebSDavid Brownell 	 * don't need to have the same sleep mode behaviors.
89015a647ebSDavid Brownell 	 */
89115a647ebSDavid Brownell 	if (on) {
8922db87321SUwe Kleine-König 		if (desc->wake_depth++ == 0) {
8932db87321SUwe Kleine-König 			ret = set_irq_wake_real(irq, on);
8942db87321SUwe Kleine-König 			if (ret)
8952db87321SUwe Kleine-König 				desc->wake_depth = 0;
89615a647ebSDavid Brownell 			else
8977f94226fSThomas Gleixner 				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
8982db87321SUwe Kleine-König 		}
89915a647ebSDavid Brownell 	} else {
90015a647ebSDavid Brownell 		if (desc->wake_depth == 0) {
9017a2c4770SArjan van de Ven 			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
9022db87321SUwe Kleine-König 		} else if (--desc->wake_depth == 0) {
9032db87321SUwe Kleine-König 			ret = set_irq_wake_real(irq, on);
9042db87321SUwe Kleine-König 			if (ret)
9052db87321SUwe Kleine-König 				desc->wake_depth = 1;
90615a647ebSDavid Brownell 			else
9077f94226fSThomas Gleixner 				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
90815a647ebSDavid Brownell 		}
9092db87321SUwe Kleine-König 	}
910b525903cSJulien Thierry 
911b525903cSJulien Thierry out_unlock:
91202725e74SThomas Gleixner 	irq_put_desc_busunlock(desc, flags);
913ba9a2331SThomas Gleixner 	return ret;
914ba9a2331SThomas Gleixner }
915a0cd9ca2SThomas Gleixner EXPORT_SYMBOL(irq_set_irq_wake);
916ba9a2331SThomas Gleixner 
9171da177e4SLinus Torvalds /*
9181da177e4SLinus Torvalds  * Internal function that tells the architecture code whether a
9191da177e4SLinus Torvalds  * particular irq has been exclusively allocated or is available
9201da177e4SLinus Torvalds  * for driver use.
9211da177e4SLinus Torvalds  */
9221da177e4SLinus Torvalds int can_request_irq(unsigned int irq, unsigned long irqflags)
9231da177e4SLinus Torvalds {
924cc8c3b78SThomas Gleixner 	unsigned long flags;
92531d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
92602725e74SThomas Gleixner 	int canrequest = 0;
9271da177e4SLinus Torvalds 
9287d94f7caSYinghai Lu 	if (!desc)
9297d94f7caSYinghai Lu 		return 0;
9307d94f7caSYinghai Lu 
93102725e74SThomas Gleixner 	if (irq_settings_can_request(desc)) {
9322779db8dSBen Hutchings 		if (!desc->action ||
9332779db8dSBen Hutchings 		    irqflags & desc->action->flags & IRQF_SHARED)
93402725e74SThomas Gleixner 			canrequest = 1;
93502725e74SThomas Gleixner 	}
93602725e74SThomas Gleixner 	irq_put_desc_unlock(desc, flags);
93702725e74SThomas Gleixner 	return canrequest;
9381da177e4SLinus Torvalds }
9391da177e4SLinus Torvalds 
940a1ff541aSJiang Liu int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
94182736f4dSUwe Kleine-König {
9426b8ff312SThomas Gleixner 	struct irq_chip *chip = desc->irq_data.chip;
943d4d5e089SThomas Gleixner 	int ret, unmask = 0;
94482736f4dSUwe Kleine-König 
945b2ba2c30SThomas Gleixner 	if (!chip || !chip->irq_set_type) {
94682736f4dSUwe Kleine-König 		/*
94782736f4dSUwe Kleine-König 		 * IRQF_TRIGGER_* but the PIC does not support multiple
94882736f4dSUwe Kleine-König 		 * flow-types?
94982736f4dSUwe Kleine-König 		 */
950a1ff541aSJiang Liu 		pr_debug("No set_type function for IRQ %d (%s)\n",
951a1ff541aSJiang Liu 			 irq_desc_get_irq(desc),
95282736f4dSUwe Kleine-König 			 chip ? (chip->name ? : "unknown") : "unknown");
95382736f4dSUwe Kleine-König 		return 0;
95482736f4dSUwe Kleine-König 	}
95582736f4dSUwe Kleine-König 
956d4d5e089SThomas Gleixner 	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
95732f4125eSThomas Gleixner 		if (!irqd_irq_masked(&desc->irq_data))
958d4d5e089SThomas Gleixner 			mask_irq(desc);
95932f4125eSThomas Gleixner 		if (!irqd_irq_disabled(&desc->irq_data))
960d4d5e089SThomas Gleixner 			unmask = 1;
961d4d5e089SThomas Gleixner 	}
962d4d5e089SThomas Gleixner 
96300b992deSAlexander Kuleshov 	/* Mask all flags except trigger mode */
96400b992deSAlexander Kuleshov 	flags &= IRQ_TYPE_SENSE_MASK;
965b2ba2c30SThomas Gleixner 	ret = chip->irq_set_type(&desc->irq_data, flags);
96682736f4dSUwe Kleine-König 
967876dbd4cSThomas Gleixner 	switch (ret) {
968876dbd4cSThomas Gleixner 	case IRQ_SET_MASK_OK:
9692cb62547SJiang Liu 	case IRQ_SET_MASK_OK_DONE:
970876dbd4cSThomas Gleixner 		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
971876dbd4cSThomas Gleixner 		irqd_set(&desc->irq_data, flags);
972df561f66SGustavo A. R. Silva 		fallthrough;
973876dbd4cSThomas Gleixner 
974876dbd4cSThomas Gleixner 	case IRQ_SET_MASK_OK_NOCOPY:
975876dbd4cSThomas Gleixner 		flags = irqd_get_trigger_type(&desc->irq_data);
976876dbd4cSThomas Gleixner 		irq_settings_set_trigger_mask(desc, flags);
977876dbd4cSThomas Gleixner 		irqd_clear(&desc->irq_data, IRQD_LEVEL);
978876dbd4cSThomas Gleixner 		irq_settings_clr_level(desc);
979876dbd4cSThomas Gleixner 		if (flags & IRQ_TYPE_LEVEL_MASK) {
980876dbd4cSThomas Gleixner 			irq_settings_set_level(desc);
981876dbd4cSThomas Gleixner 			irqd_set(&desc->irq_data, IRQD_LEVEL);
982876dbd4cSThomas Gleixner 		}
98346732475SThomas Gleixner 
984d4d5e089SThomas Gleixner 		ret = 0;
9858fff39e0SThomas Gleixner 		break;
986876dbd4cSThomas Gleixner 	default:
987d75f773cSSakari Ailus 		pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
988a1ff541aSJiang Liu 		       flags, irq_desc_get_irq(desc), chip->irq_set_type);
9890c5d1eb7SDavid Brownell 	}
990d4d5e089SThomas Gleixner 	if (unmask)
991d4d5e089SThomas Gleixner 		unmask_irq(desc);
99282736f4dSUwe Kleine-König 	return ret;
99382736f4dSUwe Kleine-König }
99482736f4dSUwe Kleine-König 
995293a7a0aSThomas Gleixner #ifdef CONFIG_HARDIRQS_SW_RESEND
996293a7a0aSThomas Gleixner int irq_set_parent(int irq, int parent_irq)
997293a7a0aSThomas Gleixner {
998293a7a0aSThomas Gleixner 	unsigned long flags;
999293a7a0aSThomas Gleixner 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1000293a7a0aSThomas Gleixner 
1001293a7a0aSThomas Gleixner 	if (!desc)
1002293a7a0aSThomas Gleixner 		return -EINVAL;
1003293a7a0aSThomas Gleixner 
1004293a7a0aSThomas Gleixner 	desc->parent_irq = parent_irq;
1005293a7a0aSThomas Gleixner 
1006293a7a0aSThomas Gleixner 	irq_put_desc_unlock(desc, flags);
1007293a7a0aSThomas Gleixner 	return 0;
1008293a7a0aSThomas Gleixner }
10093118dac5SSudip Mukherjee EXPORT_SYMBOL_GPL(irq_set_parent);
1010293a7a0aSThomas Gleixner #endif
1011293a7a0aSThomas Gleixner 
1012b25c340cSThomas Gleixner /*
1013b25c340cSThomas Gleixner  * Default primary interrupt handler for threaded interrupts. Is
1014b25c340cSThomas Gleixner  * assigned as primary handler when request_threaded_irq is called
1015b25c340cSThomas Gleixner  * with handler == NULL. Useful for oneshot interrupts.
1016b25c340cSThomas Gleixner  */
1017b25c340cSThomas Gleixner static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
1018b25c340cSThomas Gleixner {
1019b25c340cSThomas Gleixner 	return IRQ_WAKE_THREAD;
1020b25c340cSThomas Gleixner }
1021b25c340cSThomas Gleixner 
1022399b5da2SThomas Gleixner /*
1023399b5da2SThomas Gleixner  * Primary handler for nested threaded interrupts. Should never be
1024399b5da2SThomas Gleixner  * called.
1025399b5da2SThomas Gleixner  */
1026399b5da2SThomas Gleixner static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
1027399b5da2SThomas Gleixner {
1028399b5da2SThomas Gleixner 	WARN(1, "Primary handler called for nested irq %d\n", irq);
1029399b5da2SThomas Gleixner 	return IRQ_NONE;
1030399b5da2SThomas Gleixner }
1031399b5da2SThomas Gleixner 
10322a1d3ab8SThomas Gleixner static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
10332a1d3ab8SThomas Gleixner {
10342a1d3ab8SThomas Gleixner 	WARN(1, "Secondary action handler called for irq %d\n", irq);
10352a1d3ab8SThomas Gleixner 	return IRQ_NONE;
10362a1d3ab8SThomas Gleixner }
10372a1d3ab8SThomas Gleixner 
10383aa551c9SThomas Gleixner static int irq_wait_for_interrupt(struct irqaction *action)
10393aa551c9SThomas Gleixner {
1040519cc865SLukas Wunner 	for (;;) {
10413aa551c9SThomas Gleixner 		set_current_state(TASK_INTERRUPTIBLE);
1042f48fe81eSThomas Gleixner 
1043519cc865SLukas Wunner 		if (kthread_should_stop()) {
1044519cc865SLukas Wunner 			/* may need to run one last time */
1045519cc865SLukas Wunner 			if (test_and_clear_bit(IRQTF_RUNTHREAD,
1046519cc865SLukas Wunner 					       &action->thread_flags)) {
1047519cc865SLukas Wunner 				__set_current_state(TASK_RUNNING);
1048519cc865SLukas Wunner 				return 0;
1049519cc865SLukas Wunner 			}
1050519cc865SLukas Wunner 			__set_current_state(TASK_RUNNING);
1051519cc865SLukas Wunner 			return -1;
1052519cc865SLukas Wunner 		}
1053550acb19SIdo Yariv 
1054f48fe81eSThomas Gleixner 		if (test_and_clear_bit(IRQTF_RUNTHREAD,
1055f48fe81eSThomas Gleixner 				       &action->thread_flags)) {
10563aa551c9SThomas Gleixner 			__set_current_state(TASK_RUNNING);
10573aa551c9SThomas Gleixner 			return 0;
1058f48fe81eSThomas Gleixner 		}
10593aa551c9SThomas Gleixner 		schedule();
10603aa551c9SThomas Gleixner 	}
10613aa551c9SThomas Gleixner }
10623aa551c9SThomas Gleixner 
1063b25c340cSThomas Gleixner /*
1064b25c340cSThomas Gleixner  * Oneshot interrupts keep the irq line masked until the threaded
1065b25c340cSThomas Gleixner  * handler finished. unmask if the interrupt has not been disabled and
1066b25c340cSThomas Gleixner  * is marked MASKED.
1067b25c340cSThomas Gleixner  */
1068b5faba21SThomas Gleixner static void irq_finalize_oneshot(struct irq_desc *desc,
1069f3f79e38SAlexander Gordeev 				 struct irqaction *action)
1070b25c340cSThomas Gleixner {
10712a1d3ab8SThomas Gleixner 	if (!(desc->istate & IRQS_ONESHOT) ||
10722a1d3ab8SThomas Gleixner 	    action->handler == irq_forced_secondary_handler)
1073b5faba21SThomas Gleixner 		return;
10740b1adaa0SThomas Gleixner again:
10753876ec9eSThomas Gleixner 	chip_bus_lock(desc);
1076239007b8SThomas Gleixner 	raw_spin_lock_irq(&desc->lock);
10770b1adaa0SThomas Gleixner 
10780b1adaa0SThomas Gleixner 	/*
10790b1adaa0SThomas Gleixner 	 * Implausible though it may be we need to protect us against
10800b1adaa0SThomas Gleixner 	 * the following scenario:
10810b1adaa0SThomas Gleixner 	 *
10820b1adaa0SThomas Gleixner 	 * The thread is faster done than the hard interrupt handler
10830b1adaa0SThomas Gleixner 	 * on the other CPU. If we unmask the irq line then the
10840b1adaa0SThomas Gleixner 	 * interrupt can come in again and masks the line, leaves due
1085009b4c3bSThomas Gleixner 	 * to IRQS_INPROGRESS and the irq line is masked forever.
1086b5faba21SThomas Gleixner 	 *
1087b5faba21SThomas Gleixner 	 * This also serializes the state of shared oneshot handlers
1088a359f757SIngo Molnar 	 * versus "desc->threads_oneshot |= action->thread_mask;" in
1089b5faba21SThomas Gleixner 	 * irq_wake_thread(). See the comment there which explains the
1090b5faba21SThomas Gleixner 	 * serialization.
10910b1adaa0SThomas Gleixner 	 */
109232f4125eSThomas Gleixner 	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
10930b1adaa0SThomas Gleixner 		raw_spin_unlock_irq(&desc->lock);
10943876ec9eSThomas Gleixner 		chip_bus_sync_unlock(desc);
10950b1adaa0SThomas Gleixner 		cpu_relax();
10960b1adaa0SThomas Gleixner 		goto again;
10970b1adaa0SThomas Gleixner 	}
10980b1adaa0SThomas Gleixner 
1099b5faba21SThomas Gleixner 	/*
1100b5faba21SThomas Gleixner 	 * Now check again, whether the thread should run. Otherwise
1101b5faba21SThomas Gleixner 	 * we would clear the threads_oneshot bit of this thread which
1102b5faba21SThomas Gleixner 	 * was just set.
1103b5faba21SThomas Gleixner 	 */
1104f3f79e38SAlexander Gordeev 	if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1105b5faba21SThomas Gleixner 		goto out_unlock;
1106b5faba21SThomas Gleixner 
1107b5faba21SThomas Gleixner 	desc->threads_oneshot &= ~action->thread_mask;
1108b5faba21SThomas Gleixner 
110932f4125eSThomas Gleixner 	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
111032f4125eSThomas Gleixner 	    irqd_irq_masked(&desc->irq_data))
1111328a4978SThomas Gleixner 		unmask_threaded_irq(desc);
111232f4125eSThomas Gleixner 
1113b5faba21SThomas Gleixner out_unlock:
1114239007b8SThomas Gleixner 	raw_spin_unlock_irq(&desc->lock);
11153876ec9eSThomas Gleixner 	chip_bus_sync_unlock(desc);
1116b25c340cSThomas Gleixner }
1117b25c340cSThomas Gleixner 
111861f38261SBruno Premont #ifdef CONFIG_SMP
11193aa551c9SThomas Gleixner /*
1120b04c644eSChuansheng Liu  * Check whether we need to change the affinity of the interrupt thread.
1121591d2fb0SThomas Gleixner  */
1122591d2fb0SThomas Gleixner static void
1123591d2fb0SThomas Gleixner irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1124591d2fb0SThomas Gleixner {
1125591d2fb0SThomas Gleixner 	cpumask_var_t mask;
112604aa530eSThomas Gleixner 	bool valid = true;
1127591d2fb0SThomas Gleixner 
1128591d2fb0SThomas Gleixner 	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1129591d2fb0SThomas Gleixner 		return;
1130591d2fb0SThomas Gleixner 
1131591d2fb0SThomas Gleixner 	/*
1132591d2fb0SThomas Gleixner 	 * In case we are out of memory we set IRQTF_AFFINITY again and
1133591d2fb0SThomas Gleixner 	 * try again next time
1134591d2fb0SThomas Gleixner 	 */
1135591d2fb0SThomas Gleixner 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1136591d2fb0SThomas Gleixner 		set_bit(IRQTF_AFFINITY, &action->thread_flags);
1137591d2fb0SThomas Gleixner 		return;
1138591d2fb0SThomas Gleixner 	}
1139591d2fb0SThomas Gleixner 
1140239007b8SThomas Gleixner 	raw_spin_lock_irq(&desc->lock);
114104aa530eSThomas Gleixner 	/*
114204aa530eSThomas Gleixner 	 * This code is triggered unconditionally. Check the affinity
114304aa530eSThomas Gleixner 	 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
114404aa530eSThomas Gleixner 	 */
1145cbf86999SThomas Gleixner 	if (cpumask_available(desc->irq_common_data.affinity)) {
1146cbf86999SThomas Gleixner 		const struct cpumask *m;
1147cbf86999SThomas Gleixner 
1148cbf86999SThomas Gleixner 		m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1149cbf86999SThomas Gleixner 		cpumask_copy(mask, m);
1150cbf86999SThomas Gleixner 	} else {
115104aa530eSThomas Gleixner 		valid = false;
1152cbf86999SThomas Gleixner 	}
1153239007b8SThomas Gleixner 	raw_spin_unlock_irq(&desc->lock);
1154591d2fb0SThomas Gleixner 
115504aa530eSThomas Gleixner 	if (valid)
1156591d2fb0SThomas Gleixner 		set_cpus_allowed_ptr(current, mask);
1157591d2fb0SThomas Gleixner 	free_cpumask_var(mask);
1158591d2fb0SThomas Gleixner }
115961f38261SBruno Premont #else
116061f38261SBruno Premont static inline void
116161f38261SBruno Premont irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
116261f38261SBruno Premont #endif
1163591d2fb0SThomas Gleixner 
1164591d2fb0SThomas Gleixner /*
1165c5f48c0aSIngo Molnar  * Interrupts which are not explicitly requested as threaded
11668d32a307SThomas Gleixner  * interrupts rely on the implicit bh/preempt disable of the hard irq
11678d32a307SThomas Gleixner  * context. So we need to disable bh here to avoid deadlocks and other
11688d32a307SThomas Gleixner  * side effects.
11698d32a307SThomas Gleixner  */
11703a43e05fSSebastian Andrzej Siewior static irqreturn_t
11718d32a307SThomas Gleixner irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
11728d32a307SThomas Gleixner {
11733a43e05fSSebastian Andrzej Siewior 	irqreturn_t ret;
11743a43e05fSSebastian Andrzej Siewior 
11758d32a307SThomas Gleixner 	local_bh_disable();
117681e2073cSThomas Gleixner 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
117781e2073cSThomas Gleixner 		local_irq_disable();
11783a43e05fSSebastian Andrzej Siewior 	ret = action->thread_fn(action->irq, action->dev_id);
1179746a923bSLukas Wunner 	if (ret == IRQ_HANDLED)
1180746a923bSLukas Wunner 		atomic_inc(&desc->threads_handled);
1181746a923bSLukas Wunner 
1182f3f79e38SAlexander Gordeev 	irq_finalize_oneshot(desc, action);
118381e2073cSThomas Gleixner 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
118481e2073cSThomas Gleixner 		local_irq_enable();
11858d32a307SThomas Gleixner 	local_bh_enable();
11863a43e05fSSebastian Andrzej Siewior 	return ret;
11878d32a307SThomas Gleixner }
11888d32a307SThomas Gleixner 
11898d32a307SThomas Gleixner /*
1190f788e7bfSXie XiuQi  * Interrupts explicitly requested as threaded interrupts want to be
11915c982c58SKrzysztof Kozlowski  * preemptible - many of them need to sleep and wait for slow busses to
11928d32a307SThomas Gleixner  * complete.
11938d32a307SThomas Gleixner  */
11943a43e05fSSebastian Andrzej Siewior static irqreturn_t irq_thread_fn(struct irq_desc *desc,
11953a43e05fSSebastian Andrzej Siewior 		struct irqaction *action)
11968d32a307SThomas Gleixner {
11973a43e05fSSebastian Andrzej Siewior 	irqreturn_t ret;
11983a43e05fSSebastian Andrzej Siewior 
11993a43e05fSSebastian Andrzej Siewior 	ret = action->thread_fn(action->irq, action->dev_id);
1200746a923bSLukas Wunner 	if (ret == IRQ_HANDLED)
1201746a923bSLukas Wunner 		atomic_inc(&desc->threads_handled);
1202746a923bSLukas Wunner 
1203f3f79e38SAlexander Gordeev 	irq_finalize_oneshot(desc, action);
12043a43e05fSSebastian Andrzej Siewior 	return ret;
12058d32a307SThomas Gleixner }
12068d32a307SThomas Gleixner 
12077140ea19SIdo Yariv static void wake_threads_waitq(struct irq_desc *desc)
12087140ea19SIdo Yariv {
1209c685689fSChuansheng Liu 	if (atomic_dec_and_test(&desc->threads_active))
12107140ea19SIdo Yariv 		wake_up(&desc->wait_for_threads);
12117140ea19SIdo Yariv }
12127140ea19SIdo Yariv 
121367d12145SAl Viro static void irq_thread_dtor(struct callback_head *unused)
12144d1d61a6SOleg Nesterov {
12154d1d61a6SOleg Nesterov 	struct task_struct *tsk = current;
12164d1d61a6SOleg Nesterov 	struct irq_desc *desc;
12174d1d61a6SOleg Nesterov 	struct irqaction *action;
12184d1d61a6SOleg Nesterov 
12194d1d61a6SOleg Nesterov 	if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
12204d1d61a6SOleg Nesterov 		return;
12214d1d61a6SOleg Nesterov 
12224d1d61a6SOleg Nesterov 	action = kthread_data(tsk);
12234d1d61a6SOleg Nesterov 
1224fb21affaSLinus Torvalds 	pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
122519af395dSAlan Cox 	       tsk->comm, tsk->pid, action->irq);
12264d1d61a6SOleg Nesterov 
12274d1d61a6SOleg Nesterov 
12284d1d61a6SOleg Nesterov 	desc = irq_to_desc(action->irq);
12294d1d61a6SOleg Nesterov 	/*
12304d1d61a6SOleg Nesterov 	 * If IRQTF_RUNTHREAD is set, we need to decrement
12314d1d61a6SOleg Nesterov 	 * desc->threads_active and wake possible waiters.
12324d1d61a6SOleg Nesterov 	 */
12334d1d61a6SOleg Nesterov 	if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
12344d1d61a6SOleg Nesterov 		wake_threads_waitq(desc);
12354d1d61a6SOleg Nesterov 
12364d1d61a6SOleg Nesterov 	/* Prevent a stale desc->threads_oneshot */
12374d1d61a6SOleg Nesterov 	irq_finalize_oneshot(desc, action);
12384d1d61a6SOleg Nesterov }
12394d1d61a6SOleg Nesterov 
12402a1d3ab8SThomas Gleixner static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
12412a1d3ab8SThomas Gleixner {
12422a1d3ab8SThomas Gleixner 	struct irqaction *secondary = action->secondary;
12432a1d3ab8SThomas Gleixner 
12442a1d3ab8SThomas Gleixner 	if (WARN_ON_ONCE(!secondary))
12452a1d3ab8SThomas Gleixner 		return;
12462a1d3ab8SThomas Gleixner 
12472a1d3ab8SThomas Gleixner 	raw_spin_lock_irq(&desc->lock);
12482a1d3ab8SThomas Gleixner 	__irq_wake_thread(desc, secondary);
12492a1d3ab8SThomas Gleixner 	raw_spin_unlock_irq(&desc->lock);
12502a1d3ab8SThomas Gleixner }
12512a1d3ab8SThomas Gleixner 
12528d32a307SThomas Gleixner /*
12533aa551c9SThomas Gleixner  * Interrupt handler thread
12543aa551c9SThomas Gleixner  */
12553aa551c9SThomas Gleixner static int irq_thread(void *data)
12563aa551c9SThomas Gleixner {
125767d12145SAl Viro 	struct callback_head on_exit_work;
12583aa551c9SThomas Gleixner 	struct irqaction *action = data;
12593aa551c9SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(action->irq);
12603a43e05fSSebastian Andrzej Siewior 	irqreturn_t (*handler_fn)(struct irq_desc *desc,
12613a43e05fSSebastian Andrzej Siewior 			struct irqaction *action);
12623aa551c9SThomas Gleixner 
1263540b60e2SAlexander Gordeev 	if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
12648d32a307SThomas Gleixner 					&action->thread_flags))
12658d32a307SThomas Gleixner 		handler_fn = irq_forced_thread_fn;
12668d32a307SThomas Gleixner 	else
12678d32a307SThomas Gleixner 		handler_fn = irq_thread_fn;
12688d32a307SThomas Gleixner 
126941f9d29fSAl Viro 	init_task_work(&on_exit_work, irq_thread_dtor);
127091989c70SJens Axboe 	task_work_add(current, &on_exit_work, TWA_NONE);
12713aa551c9SThomas Gleixner 
1272f3de44edSSankara Muthukrishnan 	irq_thread_check_affinity(desc, action);
1273f3de44edSSankara Muthukrishnan 
12743aa551c9SThomas Gleixner 	while (!irq_wait_for_interrupt(action)) {
12757140ea19SIdo Yariv 		irqreturn_t action_ret;
12763aa551c9SThomas Gleixner 
1277591d2fb0SThomas Gleixner 		irq_thread_check_affinity(desc, action);
1278591d2fb0SThomas Gleixner 
12793a43e05fSSebastian Andrzej Siewior 		action_ret = handler_fn(desc, action);
12802a1d3ab8SThomas Gleixner 		if (action_ret == IRQ_WAKE_THREAD)
12812a1d3ab8SThomas Gleixner 			irq_wake_secondary(desc, action);
12827140ea19SIdo Yariv 
12837140ea19SIdo Yariv 		wake_threads_waitq(desc);
12843aa551c9SThomas Gleixner 	}
12853aa551c9SThomas Gleixner 
12867140ea19SIdo Yariv 	/*
12877140ea19SIdo Yariv 	 * This is the regular exit path. __free_irq() is stopping the
12887140ea19SIdo Yariv 	 * thread via kthread_stop() after calling
1289519cc865SLukas Wunner 	 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1290836557bdSLukas Wunner 	 * oneshot mask bit can be set.
12913aa551c9SThomas Gleixner 	 */
12924d1d61a6SOleg Nesterov 	task_work_cancel(current, irq_thread_dtor);
12933aa551c9SThomas Gleixner 	return 0;
12943aa551c9SThomas Gleixner }
12953aa551c9SThomas Gleixner 
1296a92444c6SThomas Gleixner /**
1297a92444c6SThomas Gleixner  *	irq_wake_thread - wake the irq thread for the action identified by dev_id
1298a92444c6SThomas Gleixner  *	@irq:		Interrupt line
1299a92444c6SThomas Gleixner  *	@dev_id:	Device identity for which the thread should be woken
1300a92444c6SThomas Gleixner  *
1301a92444c6SThomas Gleixner  */
1302a92444c6SThomas Gleixner void irq_wake_thread(unsigned int irq, void *dev_id)
1303a92444c6SThomas Gleixner {
1304a92444c6SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
1305a92444c6SThomas Gleixner 	struct irqaction *action;
1306a92444c6SThomas Gleixner 	unsigned long flags;
1307a92444c6SThomas Gleixner 
1308a92444c6SThomas Gleixner 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1309a92444c6SThomas Gleixner 		return;
1310a92444c6SThomas Gleixner 
1311a92444c6SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
1312f944b5a7SDaniel Lezcano 	for_each_action_of_desc(desc, action) {
1313a92444c6SThomas Gleixner 		if (action->dev_id == dev_id) {
1314a92444c6SThomas Gleixner 			if (action->thread)
1315a92444c6SThomas Gleixner 				__irq_wake_thread(desc, action);
1316a92444c6SThomas Gleixner 			break;
1317a92444c6SThomas Gleixner 		}
1318a92444c6SThomas Gleixner 	}
1319a92444c6SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1320a92444c6SThomas Gleixner }
1321a92444c6SThomas Gleixner EXPORT_SYMBOL_GPL(irq_wake_thread);
1322a92444c6SThomas Gleixner 
13232a1d3ab8SThomas Gleixner static int irq_setup_forced_threading(struct irqaction *new)
13248d32a307SThomas Gleixner {
13258d32a307SThomas Gleixner 	if (!force_irqthreads)
13262a1d3ab8SThomas Gleixner 		return 0;
13278d32a307SThomas Gleixner 	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
13282a1d3ab8SThomas Gleixner 		return 0;
13298d32a307SThomas Gleixner 
1330d1f0301bSThomas Gleixner 	/*
1331d1f0301bSThomas Gleixner 	 * No further action required for interrupts which are requested as
1332d1f0301bSThomas Gleixner 	 * threaded interrupts already
1333d1f0301bSThomas Gleixner 	 */
1334d1f0301bSThomas Gleixner 	if (new->handler == irq_default_primary_handler)
1335d1f0301bSThomas Gleixner 		return 0;
1336d1f0301bSThomas Gleixner 
13378d32a307SThomas Gleixner 	new->flags |= IRQF_ONESHOT;
13388d32a307SThomas Gleixner 
13392a1d3ab8SThomas Gleixner 	/*
13402a1d3ab8SThomas Gleixner 	 * Handle the case where we have a real primary handler and a
13412a1d3ab8SThomas Gleixner 	 * thread handler. We force thread them as well by creating a
13422a1d3ab8SThomas Gleixner 	 * secondary action.
13432a1d3ab8SThomas Gleixner 	 */
1344d1f0301bSThomas Gleixner 	if (new->handler && new->thread_fn) {
13452a1d3ab8SThomas Gleixner 		/* Allocate the secondary action */
13462a1d3ab8SThomas Gleixner 		new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
13472a1d3ab8SThomas Gleixner 		if (!new->secondary)
13482a1d3ab8SThomas Gleixner 			return -ENOMEM;
13492a1d3ab8SThomas Gleixner 		new->secondary->handler = irq_forced_secondary_handler;
13502a1d3ab8SThomas Gleixner 		new->secondary->thread_fn = new->thread_fn;
13512a1d3ab8SThomas Gleixner 		new->secondary->dev_id = new->dev_id;
13522a1d3ab8SThomas Gleixner 		new->secondary->irq = new->irq;
13532a1d3ab8SThomas Gleixner 		new->secondary->name = new->name;
13542a1d3ab8SThomas Gleixner 	}
13552a1d3ab8SThomas Gleixner 	/* Deal with the primary handler */
13568d32a307SThomas Gleixner 	set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
13578d32a307SThomas Gleixner 	new->thread_fn = new->handler;
13588d32a307SThomas Gleixner 	new->handler = irq_default_primary_handler;
13592a1d3ab8SThomas Gleixner 	return 0;
13608d32a307SThomas Gleixner }
13618d32a307SThomas Gleixner 
1362c1bacbaeSThomas Gleixner static int irq_request_resources(struct irq_desc *desc)
1363c1bacbaeSThomas Gleixner {
1364c1bacbaeSThomas Gleixner 	struct irq_data *d = &desc->irq_data;
1365c1bacbaeSThomas Gleixner 	struct irq_chip *c = d->chip;
1366c1bacbaeSThomas Gleixner 
1367c1bacbaeSThomas Gleixner 	return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1368c1bacbaeSThomas Gleixner }
1369c1bacbaeSThomas Gleixner 
1370c1bacbaeSThomas Gleixner static void irq_release_resources(struct irq_desc *desc)
1371c1bacbaeSThomas Gleixner {
1372c1bacbaeSThomas Gleixner 	struct irq_data *d = &desc->irq_data;
1373c1bacbaeSThomas Gleixner 	struct irq_chip *c = d->chip;
1374c1bacbaeSThomas Gleixner 
1375c1bacbaeSThomas Gleixner 	if (c->irq_release_resources)
1376c1bacbaeSThomas Gleixner 		c->irq_release_resources(d);
1377c1bacbaeSThomas Gleixner }
1378c1bacbaeSThomas Gleixner 
1379b525903cSJulien Thierry static bool irq_supports_nmi(struct irq_desc *desc)
1380b525903cSJulien Thierry {
1381b525903cSJulien Thierry 	struct irq_data *d = irq_desc_get_irq_data(desc);
1382b525903cSJulien Thierry 
1383b525903cSJulien Thierry #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1384b525903cSJulien Thierry 	/* Only IRQs directly managed by the root irqchip can be set as NMI */
1385b525903cSJulien Thierry 	if (d->parent_data)
1386b525903cSJulien Thierry 		return false;
1387b525903cSJulien Thierry #endif
1388b525903cSJulien Thierry 	/* Don't support NMIs for chips behind a slow bus */
1389b525903cSJulien Thierry 	if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1390b525903cSJulien Thierry 		return false;
1391b525903cSJulien Thierry 
1392b525903cSJulien Thierry 	return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1393b525903cSJulien Thierry }
1394b525903cSJulien Thierry 
1395b525903cSJulien Thierry static int irq_nmi_setup(struct irq_desc *desc)
1396b525903cSJulien Thierry {
1397b525903cSJulien Thierry 	struct irq_data *d = irq_desc_get_irq_data(desc);
1398b525903cSJulien Thierry 	struct irq_chip *c = d->chip;
1399b525903cSJulien Thierry 
1400b525903cSJulien Thierry 	return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1401b525903cSJulien Thierry }
1402b525903cSJulien Thierry 
1403b525903cSJulien Thierry static void irq_nmi_teardown(struct irq_desc *desc)
1404b525903cSJulien Thierry {
1405b525903cSJulien Thierry 	struct irq_data *d = irq_desc_get_irq_data(desc);
1406b525903cSJulien Thierry 	struct irq_chip *c = d->chip;
1407b525903cSJulien Thierry 
1408b525903cSJulien Thierry 	if (c->irq_nmi_teardown)
1409b525903cSJulien Thierry 		c->irq_nmi_teardown(d);
1410b525903cSJulien Thierry }
1411b525903cSJulien Thierry 
14122a1d3ab8SThomas Gleixner static int
14132a1d3ab8SThomas Gleixner setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
14142a1d3ab8SThomas Gleixner {
14152a1d3ab8SThomas Gleixner 	struct task_struct *t;
14162a1d3ab8SThomas Gleixner 
14172a1d3ab8SThomas Gleixner 	if (!secondary) {
14182a1d3ab8SThomas Gleixner 		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
14192a1d3ab8SThomas Gleixner 				   new->name);
14202a1d3ab8SThomas Gleixner 	} else {
14212a1d3ab8SThomas Gleixner 		t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
14222a1d3ab8SThomas Gleixner 				   new->name);
14232a1d3ab8SThomas Gleixner 	}
14242a1d3ab8SThomas Gleixner 
14252a1d3ab8SThomas Gleixner 	if (IS_ERR(t))
14262a1d3ab8SThomas Gleixner 		return PTR_ERR(t);
14272a1d3ab8SThomas Gleixner 
14287a40798cSPeter Zijlstra 	sched_set_fifo(t);
14292a1d3ab8SThomas Gleixner 
14302a1d3ab8SThomas Gleixner 	/*
14312a1d3ab8SThomas Gleixner 	 * We keep the reference to the task struct even if
14322a1d3ab8SThomas Gleixner 	 * the thread dies to avoid that the interrupt code
14332a1d3ab8SThomas Gleixner 	 * references an already freed task_struct.
14342a1d3ab8SThomas Gleixner 	 */
14357b3c92b8SMatthew Wilcox (Oracle) 	new->thread = get_task_struct(t);
14362a1d3ab8SThomas Gleixner 	/*
14372a1d3ab8SThomas Gleixner 	 * Tell the thread to set its affinity. This is
14382a1d3ab8SThomas Gleixner 	 * important for shared interrupt handlers as we do
14392a1d3ab8SThomas Gleixner 	 * not invoke setup_affinity() for the secondary
14402a1d3ab8SThomas Gleixner 	 * handlers as everything is already set up. Even for
14412a1d3ab8SThomas Gleixner 	 * interrupts marked with IRQF_NO_BALANCE this is
14422a1d3ab8SThomas Gleixner 	 * correct as we want the thread to move to the cpu(s)
14432a1d3ab8SThomas Gleixner 	 * on which the requesting code placed the interrupt.
14442a1d3ab8SThomas Gleixner 	 */
14452a1d3ab8SThomas Gleixner 	set_bit(IRQTF_AFFINITY, &new->thread_flags);
14462a1d3ab8SThomas Gleixner 	return 0;
14472a1d3ab8SThomas Gleixner }
14482a1d3ab8SThomas Gleixner 
14491da177e4SLinus Torvalds /*
14501da177e4SLinus Torvalds  * Internal function to register an irqaction - typically used to
14511da177e4SLinus Torvalds  * allocate special interrupts that are part of the architecture.
145219d39a38SThomas Gleixner  *
145319d39a38SThomas Gleixner  * Locking rules:
145419d39a38SThomas Gleixner  *
145519d39a38SThomas Gleixner  * desc->request_mutex	Provides serialization against a concurrent free_irq()
145619d39a38SThomas Gleixner  *   chip_bus_lock	Provides serialization for slow bus operations
145719d39a38SThomas Gleixner  *     desc->lock	Provides serialization against hard interrupts
145819d39a38SThomas Gleixner  *
145919d39a38SThomas Gleixner  * chip_bus_lock and desc->lock are sufficient for all other management and
146019d39a38SThomas Gleixner  * interrupt related functions. desc->request_mutex solely serializes
146119d39a38SThomas Gleixner  * request/free_irq().
14621da177e4SLinus Torvalds  */
1463d3c60047SThomas Gleixner static int
1464d3c60047SThomas Gleixner __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
14651da177e4SLinus Torvalds {
1466f17c7545SIngo Molnar 	struct irqaction *old, **old_ptr;
1467b5faba21SThomas Gleixner 	unsigned long flags, thread_mask = 0;
14683b8249e7SThomas Gleixner 	int ret, nested, shared = 0;
14691da177e4SLinus Torvalds 
14707d94f7caSYinghai Lu 	if (!desc)
1471c2b5a251SMatthew Wilcox 		return -EINVAL;
1472c2b5a251SMatthew Wilcox 
14736b8ff312SThomas Gleixner 	if (desc->irq_data.chip == &no_irq_chip)
14741da177e4SLinus Torvalds 		return -ENOSYS;
1475b6873807SSebastian Andrzej Siewior 	if (!try_module_get(desc->owner))
1476b6873807SSebastian Andrzej Siewior 		return -ENODEV;
14771da177e4SLinus Torvalds 
14782a1d3ab8SThomas Gleixner 	new->irq = irq;
14792a1d3ab8SThomas Gleixner 
14801da177e4SLinus Torvalds 	/*
14814b357daeSJon Hunter 	 * If the trigger type is not specified by the caller,
14824b357daeSJon Hunter 	 * then use the default for this interrupt.
14834b357daeSJon Hunter 	 */
14844b357daeSJon Hunter 	if (!(new->flags & IRQF_TRIGGER_MASK))
14854b357daeSJon Hunter 		new->flags |= irqd_get_trigger_type(&desc->irq_data);
14864b357daeSJon Hunter 
14874b357daeSJon Hunter 	/*
1488399b5da2SThomas Gleixner 	 * Check whether the interrupt nests into another interrupt
1489399b5da2SThomas Gleixner 	 * thread.
14903aa551c9SThomas Gleixner 	 */
14911ccb4e61SThomas Gleixner 	nested = irq_settings_is_nested_thread(desc);
1492399b5da2SThomas Gleixner 	if (nested) {
1493b6873807SSebastian Andrzej Siewior 		if (!new->thread_fn) {
1494b6873807SSebastian Andrzej Siewior 			ret = -EINVAL;
1495b6873807SSebastian Andrzej Siewior 			goto out_mput;
1496b6873807SSebastian Andrzej Siewior 		}
1497399b5da2SThomas Gleixner 		/*
1498399b5da2SThomas Gleixner 		 * Replace the primary handler which was provided from
1499399b5da2SThomas Gleixner 		 * the driver for non nested interrupt handling by the
1500399b5da2SThomas Gleixner 		 * dummy function which warns when called.
1501399b5da2SThomas Gleixner 		 */
1502399b5da2SThomas Gleixner 		new->handler = irq_nested_primary_handler;
15038d32a307SThomas Gleixner 	} else {
15042a1d3ab8SThomas Gleixner 		if (irq_settings_can_thread(desc)) {
15052a1d3ab8SThomas Gleixner 			ret = irq_setup_forced_threading(new);
15062a1d3ab8SThomas Gleixner 			if (ret)
15072a1d3ab8SThomas Gleixner 				goto out_mput;
15082a1d3ab8SThomas Gleixner 		}
1509399b5da2SThomas Gleixner 	}
1510399b5da2SThomas Gleixner 
1511399b5da2SThomas Gleixner 	/*
1512399b5da2SThomas Gleixner 	 * Create a handler thread when a thread function is supplied
1513399b5da2SThomas Gleixner 	 * and the interrupt does not nest into another interrupt
1514399b5da2SThomas Gleixner 	 * thread.
1515399b5da2SThomas Gleixner 	 */
1516399b5da2SThomas Gleixner 	if (new->thread_fn && !nested) {
15172a1d3ab8SThomas Gleixner 		ret = setup_irq_thread(new, irq, false);
15182a1d3ab8SThomas Gleixner 		if (ret)
1519b6873807SSebastian Andrzej Siewior 			goto out_mput;
15202a1d3ab8SThomas Gleixner 		if (new->secondary) {
15212a1d3ab8SThomas Gleixner 			ret = setup_irq_thread(new->secondary, irq, true);
15222a1d3ab8SThomas Gleixner 			if (ret)
15232a1d3ab8SThomas Gleixner 				goto out_thread;
1524b6873807SSebastian Andrzej Siewior 		}
15253aa551c9SThomas Gleixner 	}
15263aa551c9SThomas Gleixner 
15273aa551c9SThomas Gleixner 	/*
1528dc9b229aSThomas Gleixner 	 * Drivers are often written to work w/o knowledge about the
1529dc9b229aSThomas Gleixner 	 * underlying irq chip implementation, so a request for a
1530dc9b229aSThomas Gleixner 	 * threaded irq without a primary hard irq context handler
1531dc9b229aSThomas Gleixner 	 * requires the ONESHOT flag to be set. Some irq chips like
1532dc9b229aSThomas Gleixner 	 * MSI based interrupts are per se one shot safe. Check the
1533dc9b229aSThomas Gleixner 	 * chip flags, so we can avoid the unmask dance at the end of
1534dc9b229aSThomas Gleixner 	 * the threaded handler for those.
1535dc9b229aSThomas Gleixner 	 */
1536dc9b229aSThomas Gleixner 	if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1537dc9b229aSThomas Gleixner 		new->flags &= ~IRQF_ONESHOT;
1538dc9b229aSThomas Gleixner 
153919d39a38SThomas Gleixner 	/*
154019d39a38SThomas Gleixner 	 * Protects against a concurrent __free_irq() call which might wait
1541519cc865SLukas Wunner 	 * for synchronize_hardirq() to complete without holding the optional
1542836557bdSLukas Wunner 	 * chip bus lock and desc->lock. Also protects against handing out
1543836557bdSLukas Wunner 	 * a recycled oneshot thread_mask bit while it's still in use by
1544836557bdSLukas Wunner 	 * its previous owner.
154519d39a38SThomas Gleixner 	 */
15469114014cSThomas Gleixner 	mutex_lock(&desc->request_mutex);
154719d39a38SThomas Gleixner 
154819d39a38SThomas Gleixner 	/*
154919d39a38SThomas Gleixner 	 * Acquire bus lock as the irq_request_resources() callback below
155019d39a38SThomas Gleixner 	 * might rely on the serialization or the magic power management
155119d39a38SThomas Gleixner 	 * functions which are abusing the irq_bus_lock() callback,
155219d39a38SThomas Gleixner 	 */
155319d39a38SThomas Gleixner 	chip_bus_lock(desc);
155419d39a38SThomas Gleixner 
155519d39a38SThomas Gleixner 	/* First installed action requests resources. */
155646e48e25SThomas Gleixner 	if (!desc->action) {
155746e48e25SThomas Gleixner 		ret = irq_request_resources(desc);
155846e48e25SThomas Gleixner 		if (ret) {
155946e48e25SThomas Gleixner 			pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
156046e48e25SThomas Gleixner 			       new->name, irq, desc->irq_data.chip->name);
156119d39a38SThomas Gleixner 			goto out_bus_unlock;
156246e48e25SThomas Gleixner 		}
156346e48e25SThomas Gleixner 	}
15649114014cSThomas Gleixner 
1565dc9b229aSThomas Gleixner 	/*
15661da177e4SLinus Torvalds 	 * The following block of code has to be executed atomically
156719d39a38SThomas Gleixner 	 * protected against a concurrent interrupt and any of the other
156819d39a38SThomas Gleixner 	 * management calls which are not serialized via
156919d39a38SThomas Gleixner 	 * desc->request_mutex or the optional bus lock.
15701da177e4SLinus Torvalds 	 */
1571239007b8SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
1572f17c7545SIngo Molnar 	old_ptr = &desc->action;
1573f17c7545SIngo Molnar 	old = *old_ptr;
157406fcb0c6SIngo Molnar 	if (old) {
1575e76de9f8SThomas Gleixner 		/*
1576e76de9f8SThomas Gleixner 		 * Can't share interrupts unless both agree to and are
1577e76de9f8SThomas Gleixner 		 * the same type (level, edge, polarity). So both flag
15783cca53b0SThomas Gleixner 		 * fields must have IRQF_SHARED set and the bits which
15799d591eddSThomas Gleixner 		 * set the trigger type must match. Also all must
15809d591eddSThomas Gleixner 		 * agree on ONESHOT.
1581b525903cSJulien Thierry 		 * Interrupt lines used for NMIs cannot be shared.
1582e76de9f8SThomas Gleixner 		 */
15834f8413a3SMarc Zyngier 		unsigned int oldtype;
15844f8413a3SMarc Zyngier 
1585b525903cSJulien Thierry 		if (desc->istate & IRQS_NMI) {
1586b525903cSJulien Thierry 			pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1587b525903cSJulien Thierry 				new->name, irq, desc->irq_data.chip->name);
1588b525903cSJulien Thierry 			ret = -EINVAL;
1589b525903cSJulien Thierry 			goto out_unlock;
1590b525903cSJulien Thierry 		}
1591b525903cSJulien Thierry 
15924f8413a3SMarc Zyngier 		/*
15934f8413a3SMarc Zyngier 		 * If nobody did set the configuration before, inherit
15944f8413a3SMarc Zyngier 		 * the one provided by the requester.
15954f8413a3SMarc Zyngier 		 */
15964f8413a3SMarc Zyngier 		if (irqd_trigger_type_was_set(&desc->irq_data)) {
15974f8413a3SMarc Zyngier 			oldtype = irqd_get_trigger_type(&desc->irq_data);
15984f8413a3SMarc Zyngier 		} else {
15994f8413a3SMarc Zyngier 			oldtype = new->flags & IRQF_TRIGGER_MASK;
16004f8413a3SMarc Zyngier 			irqd_set_trigger_type(&desc->irq_data, oldtype);
16014f8413a3SMarc Zyngier 		}
1602382bd4deSHans de Goede 
16033cca53b0SThomas Gleixner 		if (!((old->flags & new->flags) & IRQF_SHARED) ||
1604382bd4deSHans de Goede 		    (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1605f5d89470SThomas Gleixner 		    ((old->flags ^ new->flags) & IRQF_ONESHOT))
1606f5163427SDimitri Sivanich 			goto mismatch;
1607f5163427SDimitri Sivanich 
1608f5163427SDimitri Sivanich 		/* All handlers must agree on per-cpuness */
16093cca53b0SThomas Gleixner 		if ((old->flags & IRQF_PERCPU) !=
16103cca53b0SThomas Gleixner 		    (new->flags & IRQF_PERCPU))
1611f5163427SDimitri Sivanich 			goto mismatch;
16121da177e4SLinus Torvalds 
16131da177e4SLinus Torvalds 		/* add new interrupt at end of irq queue */
16141da177e4SLinus Torvalds 		do {
161552abb700SThomas Gleixner 			/*
161652abb700SThomas Gleixner 			 * Or all existing action->thread_mask bits,
161752abb700SThomas Gleixner 			 * so we can find the next zero bit for this
161852abb700SThomas Gleixner 			 * new action.
161952abb700SThomas Gleixner 			 */
1620b5faba21SThomas Gleixner 			thread_mask |= old->thread_mask;
1621f17c7545SIngo Molnar 			old_ptr = &old->next;
1622f17c7545SIngo Molnar 			old = *old_ptr;
16231da177e4SLinus Torvalds 		} while (old);
16241da177e4SLinus Torvalds 		shared = 1;
16251da177e4SLinus Torvalds 	}
16261da177e4SLinus Torvalds 
1627b5faba21SThomas Gleixner 	/*
162852abb700SThomas Gleixner 	 * Setup the thread mask for this irqaction for ONESHOT. For
162952abb700SThomas Gleixner 	 * !ONESHOT irqs the thread mask is 0 so we can avoid a
163052abb700SThomas Gleixner 	 * conditional in irq_wake_thread().
1631b5faba21SThomas Gleixner 	 */
163252abb700SThomas Gleixner 	if (new->flags & IRQF_ONESHOT) {
163352abb700SThomas Gleixner 		/*
163452abb700SThomas Gleixner 		 * Unlikely to have 32 resp 64 irqs sharing one line,
163552abb700SThomas Gleixner 		 * but who knows.
163652abb700SThomas Gleixner 		 */
163752abb700SThomas Gleixner 		if (thread_mask == ~0UL) {
1638b5faba21SThomas Gleixner 			ret = -EBUSY;
1639cba4235eSThomas Gleixner 			goto out_unlock;
1640b5faba21SThomas Gleixner 		}
164152abb700SThomas Gleixner 		/*
164252abb700SThomas Gleixner 		 * The thread_mask for the action is or'ed to
164352abb700SThomas Gleixner 		 * desc->thread_active to indicate that the
164452abb700SThomas Gleixner 		 * IRQF_ONESHOT thread handler has been woken, but not
164552abb700SThomas Gleixner 		 * yet finished. The bit is cleared when a thread
164652abb700SThomas Gleixner 		 * completes. When all threads of a shared interrupt
164752abb700SThomas Gleixner 		 * line have completed desc->threads_active becomes
164852abb700SThomas Gleixner 		 * zero and the interrupt line is unmasked. See
164952abb700SThomas Gleixner 		 * handle.c:irq_wake_thread() for further information.
165052abb700SThomas Gleixner 		 *
165152abb700SThomas Gleixner 		 * If no thread is woken by primary (hard irq context)
165252abb700SThomas Gleixner 		 * interrupt handlers, then desc->threads_active is
165352abb700SThomas Gleixner 		 * also checked for zero to unmask the irq line in the
165452abb700SThomas Gleixner 		 * affected hard irq flow handlers
165552abb700SThomas Gleixner 		 * (handle_[fasteoi|level]_irq).
165652abb700SThomas Gleixner 		 *
165752abb700SThomas Gleixner 		 * The new action gets the first zero bit of
165852abb700SThomas Gleixner 		 * thread_mask assigned. See the loop above which or's
165952abb700SThomas Gleixner 		 * all existing action->thread_mask bits.
166052abb700SThomas Gleixner 		 */
1661ffc661c9SRasmus Villemoes 		new->thread_mask = 1UL << ffz(thread_mask);
16621c6c6952SThomas Gleixner 
1663dc9b229aSThomas Gleixner 	} else if (new->handler == irq_default_primary_handler &&
1664dc9b229aSThomas Gleixner 		   !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
16651c6c6952SThomas Gleixner 		/*
16661c6c6952SThomas Gleixner 		 * The interrupt was requested with handler = NULL, so
16671c6c6952SThomas Gleixner 		 * we use the default primary handler for it. But it
16681c6c6952SThomas Gleixner 		 * does not have the oneshot flag set. In combination
16691c6c6952SThomas Gleixner 		 * with level interrupts this is deadly, because the
16701c6c6952SThomas Gleixner 		 * default primary handler just wakes the thread, then
16711c6c6952SThomas Gleixner 		 * the irq lines is reenabled, but the device still
16721c6c6952SThomas Gleixner 		 * has the level irq asserted. Rinse and repeat....
16731c6c6952SThomas Gleixner 		 *
16741c6c6952SThomas Gleixner 		 * While this works for edge type interrupts, we play
16751c6c6952SThomas Gleixner 		 * it safe and reject unconditionally because we can't
16761c6c6952SThomas Gleixner 		 * say for sure which type this interrupt really
16771c6c6952SThomas Gleixner 		 * has. The type flags are unreliable as the
16781c6c6952SThomas Gleixner 		 * underlying chip implementation can override them.
16791c6c6952SThomas Gleixner 		 */
1680025af39bSLuca Ceresoli 		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1681025af39bSLuca Ceresoli 		       new->name, irq);
16821c6c6952SThomas Gleixner 		ret = -EINVAL;
1683cba4235eSThomas Gleixner 		goto out_unlock;
168452abb700SThomas Gleixner 	}
1685b5faba21SThomas Gleixner 
16861da177e4SLinus Torvalds 	if (!shared) {
16873aa551c9SThomas Gleixner 		init_waitqueue_head(&desc->wait_for_threads);
16883aa551c9SThomas Gleixner 
168982736f4dSUwe Kleine-König 		/* Setup the type (level, edge polarity) if configured: */
169082736f4dSUwe Kleine-König 		if (new->flags & IRQF_TRIGGER_MASK) {
1691a1ff541aSJiang Liu 			ret = __irq_set_trigger(desc,
1692f2b662daSDavid Brownell 						new->flags & IRQF_TRIGGER_MASK);
169382736f4dSUwe Kleine-König 
169419d39a38SThomas Gleixner 			if (ret)
1695cba4235eSThomas Gleixner 				goto out_unlock;
1696091738a2SThomas Gleixner 		}
1697f75d222bSAhmed S. Darwish 
1698c942cee4SThomas Gleixner 		/*
1699c942cee4SThomas Gleixner 		 * Activate the interrupt. That activation must happen
1700c942cee4SThomas Gleixner 		 * independently of IRQ_NOAUTOEN. request_irq() can fail
1701c942cee4SThomas Gleixner 		 * and the callers are supposed to handle
1702c942cee4SThomas Gleixner 		 * that. enable_irq() of an interrupt requested with
1703c942cee4SThomas Gleixner 		 * IRQ_NOAUTOEN is not supposed to fail. The activation
1704c942cee4SThomas Gleixner 		 * keeps it in shutdown mode, it merily associates
1705c942cee4SThomas Gleixner 		 * resources if necessary and if that's not possible it
1706c942cee4SThomas Gleixner 		 * fails. Interrupts which are in managed shutdown mode
1707c942cee4SThomas Gleixner 		 * will simply ignore that activation request.
1708c942cee4SThomas Gleixner 		 */
1709c942cee4SThomas Gleixner 		ret = irq_activate(desc);
1710c942cee4SThomas Gleixner 		if (ret)
1711c942cee4SThomas Gleixner 			goto out_unlock;
1712c942cee4SThomas Gleixner 
1713009b4c3bSThomas Gleixner 		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
171432f4125eSThomas Gleixner 				  IRQS_ONESHOT | IRQS_WAITING);
171532f4125eSThomas Gleixner 		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
171694d39e1fSThomas Gleixner 
1717a005677bSThomas Gleixner 		if (new->flags & IRQF_PERCPU) {
1718a005677bSThomas Gleixner 			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1719a005677bSThomas Gleixner 			irq_settings_set_per_cpu(desc);
1720c2b1063eSThomas Gleixner 			if (new->flags & IRQF_NO_DEBUG)
1721c2b1063eSThomas Gleixner 				irq_settings_set_no_debug(desc);
1722a005677bSThomas Gleixner 		}
17236a58fb3bSThomas Gleixner 
1724c2b1063eSThomas Gleixner 		if (noirqdebug)
1725c2b1063eSThomas Gleixner 			irq_settings_set_no_debug(desc);
1726c2b1063eSThomas Gleixner 
1727b25c340cSThomas Gleixner 		if (new->flags & IRQF_ONESHOT)
17283d67baecSThomas Gleixner 			desc->istate |= IRQS_ONESHOT;
1729b25c340cSThomas Gleixner 
17302e051552SThomas Gleixner 		/* Exclude IRQ from balancing if requested */
17312e051552SThomas Gleixner 		if (new->flags & IRQF_NOBALANCING) {
17322e051552SThomas Gleixner 			irq_settings_set_no_balancing(desc);
17332e051552SThomas Gleixner 			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
17342e051552SThomas Gleixner 		}
17352e051552SThomas Gleixner 
1736cbe16f35SBarry Song 		if (!(new->flags & IRQF_NO_AUTOEN) &&
1737cbe16f35SBarry Song 		    irq_settings_can_autoenable(desc)) {
17384cde9c6bSThomas Gleixner 			irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
173904c848d3SThomas Gleixner 		} else {
174004c848d3SThomas Gleixner 			/*
174104c848d3SThomas Gleixner 			 * Shared interrupts do not go well with disabling
174204c848d3SThomas Gleixner 			 * auto enable. The sharing interrupt might request
174304c848d3SThomas Gleixner 			 * it while it's still disabled and then wait for
174404c848d3SThomas Gleixner 			 * interrupts forever.
174504c848d3SThomas Gleixner 			 */
174604c848d3SThomas Gleixner 			WARN_ON_ONCE(new->flags & IRQF_SHARED);
1747e76de9f8SThomas Gleixner 			/* Undo nested disables: */
1748e76de9f8SThomas Gleixner 			desc->depth = 1;
174904c848d3SThomas Gleixner 		}
175018404756SMax Krasnyansky 
1751876dbd4cSThomas Gleixner 	} else if (new->flags & IRQF_TRIGGER_MASK) {
1752876dbd4cSThomas Gleixner 		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
17537ee7e87dSThomas Gleixner 		unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1754876dbd4cSThomas Gleixner 
1755876dbd4cSThomas Gleixner 		if (nmsk != omsk)
1756876dbd4cSThomas Gleixner 			/* hope the handler works with current  trigger mode */
1757a395d6a7SJoe Perches 			pr_warn("irq %d uses trigger mode %u; requested %u\n",
17587ee7e87dSThomas Gleixner 				irq, omsk, nmsk);
175994d39e1fSThomas Gleixner 	}
176082736f4dSUwe Kleine-König 
1761f17c7545SIngo Molnar 	*old_ptr = new;
176282736f4dSUwe Kleine-König 
1763cab303beSThomas Gleixner 	irq_pm_install_action(desc, new);
1764cab303beSThomas Gleixner 
17658528b0f1SLinus Torvalds 	/* Reset broken irq detection when installing new handler */
17668528b0f1SLinus Torvalds 	desc->irq_count = 0;
17678528b0f1SLinus Torvalds 	desc->irqs_unhandled = 0;
17681adb0850SThomas Gleixner 
17691adb0850SThomas Gleixner 	/*
17701adb0850SThomas Gleixner 	 * Check whether we disabled the irq via the spurious handler
17711adb0850SThomas Gleixner 	 * before. Reenable it and give it another chance.
17721adb0850SThomas Gleixner 	 */
17737acdd53eSThomas Gleixner 	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
17747acdd53eSThomas Gleixner 		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
177579ff1cdaSJiang Liu 		__enable_irq(desc);
17761adb0850SThomas Gleixner 	}
17771adb0850SThomas Gleixner 
1778239007b8SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
17793a90795eSThomas Gleixner 	chip_bus_sync_unlock(desc);
17809114014cSThomas Gleixner 	mutex_unlock(&desc->request_mutex);
17811da177e4SLinus Torvalds 
1782b2d3d61aSDaniel Lezcano 	irq_setup_timings(desc, new);
1783b2d3d61aSDaniel Lezcano 
178469ab8494SThomas Gleixner 	/*
178569ab8494SThomas Gleixner 	 * Strictly no need to wake it up, but hung_task complains
178669ab8494SThomas Gleixner 	 * when no hard interrupt wakes the thread up.
178769ab8494SThomas Gleixner 	 */
178869ab8494SThomas Gleixner 	if (new->thread)
178969ab8494SThomas Gleixner 		wake_up_process(new->thread);
17902a1d3ab8SThomas Gleixner 	if (new->secondary)
17912a1d3ab8SThomas Gleixner 		wake_up_process(new->secondary->thread);
179269ab8494SThomas Gleixner 
17932c6927a3SYinghai Lu 	register_irq_proc(irq, desc);
17941da177e4SLinus Torvalds 	new->dir = NULL;
17951da177e4SLinus Torvalds 	register_handler_proc(irq, new);
17961da177e4SLinus Torvalds 	return 0;
1797f5163427SDimitri Sivanich 
1798f5163427SDimitri Sivanich mismatch:
17993cca53b0SThomas Gleixner 	if (!(new->flags & IRQF_PROBE_SHARED)) {
180097fd75b7SAndrew Morton 		pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1801f5d89470SThomas Gleixner 		       irq, new->flags, new->name, old->flags, old->name);
1802f5d89470SThomas Gleixner #ifdef CONFIG_DEBUG_SHIRQ
1803f5163427SDimitri Sivanich 		dump_stack();
18043f050447SAlan Cox #endif
1805f5d89470SThomas Gleixner 	}
18063aa551c9SThomas Gleixner 	ret = -EBUSY;
18073aa551c9SThomas Gleixner 
1808cba4235eSThomas Gleixner out_unlock:
18091c389795SDan Carpenter 	raw_spin_unlock_irqrestore(&desc->lock, flags);
18103b8249e7SThomas Gleixner 
181146e48e25SThomas Gleixner 	if (!desc->action)
181246e48e25SThomas Gleixner 		irq_release_resources(desc);
181319d39a38SThomas Gleixner out_bus_unlock:
181419d39a38SThomas Gleixner 	chip_bus_sync_unlock(desc);
18159114014cSThomas Gleixner 	mutex_unlock(&desc->request_mutex);
18169114014cSThomas Gleixner 
18173aa551c9SThomas Gleixner out_thread:
18183aa551c9SThomas Gleixner 	if (new->thread) {
18193aa551c9SThomas Gleixner 		struct task_struct *t = new->thread;
18203aa551c9SThomas Gleixner 
18213aa551c9SThomas Gleixner 		new->thread = NULL;
18223aa551c9SThomas Gleixner 		kthread_stop(t);
18233aa551c9SThomas Gleixner 		put_task_struct(t);
18243aa551c9SThomas Gleixner 	}
18252a1d3ab8SThomas Gleixner 	if (new->secondary && new->secondary->thread) {
18262a1d3ab8SThomas Gleixner 		struct task_struct *t = new->secondary->thread;
18272a1d3ab8SThomas Gleixner 
18282a1d3ab8SThomas Gleixner 		new->secondary->thread = NULL;
18292a1d3ab8SThomas Gleixner 		kthread_stop(t);
18302a1d3ab8SThomas Gleixner 		put_task_struct(t);
18312a1d3ab8SThomas Gleixner 	}
1832b6873807SSebastian Andrzej Siewior out_mput:
1833b6873807SSebastian Andrzej Siewior 	module_put(desc->owner);
18343aa551c9SThomas Gleixner 	return ret;
18351da177e4SLinus Torvalds }
18361da177e4SLinus Torvalds 
1837cbf94f06SMagnus Damm /*
1838cbf94f06SMagnus Damm  * Internal function to unregister an irqaction - used to free
1839cbf94f06SMagnus Damm  * regular and special interrupts that are part of the architecture.
18401da177e4SLinus Torvalds  */
184183ac4ca9SUwe Kleine König static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
18421da177e4SLinus Torvalds {
184383ac4ca9SUwe Kleine König 	unsigned irq = desc->irq_data.irq;
1844f17c7545SIngo Molnar 	struct irqaction *action, **action_ptr;
18451da177e4SLinus Torvalds 	unsigned long flags;
18461da177e4SLinus Torvalds 
1847ae88a23bSIngo Molnar 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
18487d94f7caSYinghai Lu 
18499114014cSThomas Gleixner 	mutex_lock(&desc->request_mutex);
1850abc7e40cSThomas Gleixner 	chip_bus_lock(desc);
1851239007b8SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
1852ae88a23bSIngo Molnar 
1853ae88a23bSIngo Molnar 	/*
1854ae88a23bSIngo Molnar 	 * There can be multiple actions per IRQ descriptor, find the right
1855ae88a23bSIngo Molnar 	 * one based on the dev_id:
1856ae88a23bSIngo Molnar 	 */
1857f17c7545SIngo Molnar 	action_ptr = &desc->action;
18581da177e4SLinus Torvalds 	for (;;) {
1859f17c7545SIngo Molnar 		action = *action_ptr;
18601da177e4SLinus Torvalds 
1861ae88a23bSIngo Molnar 		if (!action) {
1862ae88a23bSIngo Molnar 			WARN(1, "Trying to free already-free IRQ %d\n", irq);
1863239007b8SThomas Gleixner 			raw_spin_unlock_irqrestore(&desc->lock, flags);
1864abc7e40cSThomas Gleixner 			chip_bus_sync_unlock(desc);
186519d39a38SThomas Gleixner 			mutex_unlock(&desc->request_mutex);
1866f21cfb25SMagnus Damm 			return NULL;
1867ae88a23bSIngo Molnar 		}
18681da177e4SLinus Torvalds 
18698316e381SIngo Molnar 		if (action->dev_id == dev_id)
1870ae88a23bSIngo Molnar 			break;
1871f17c7545SIngo Molnar 		action_ptr = &action->next;
1872ae88a23bSIngo Molnar 	}
1873ae88a23bSIngo Molnar 
1874ae88a23bSIngo Molnar 	/* Found it - now remove it from the list of entries: */
1875f17c7545SIngo Molnar 	*action_ptr = action->next;
1876dbce706eSPaolo 'Blaisorblade' Giarrusso 
1877cab303beSThomas Gleixner 	irq_pm_remove_action(desc, action);
1878cab303beSThomas Gleixner 
1879ae88a23bSIngo Molnar 	/* If this was the last handler, shut down the IRQ line: */
1880c1bacbaeSThomas Gleixner 	if (!desc->action) {
1881e9849777SThomas Gleixner 		irq_settings_clr_disable_unlazy(desc);
18824001d8e8SThomas Gleixner 		/* Only shutdown. Deactivate after synchronize_hardirq() */
188346999238SThomas Gleixner 		irq_shutdown(desc);
1884c1bacbaeSThomas Gleixner 	}
18853aa551c9SThomas Gleixner 
1886e7a297b0SPeter P Waskiewicz Jr #ifdef CONFIG_SMP
1887e7a297b0SPeter P Waskiewicz Jr 	/* make sure affinity_hint is cleaned up */
1888e7a297b0SPeter P Waskiewicz Jr 	if (WARN_ON_ONCE(desc->affinity_hint))
1889e7a297b0SPeter P Waskiewicz Jr 		desc->affinity_hint = NULL;
1890e7a297b0SPeter P Waskiewicz Jr #endif
1891e7a297b0SPeter P Waskiewicz Jr 
1892239007b8SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
189319d39a38SThomas Gleixner 	/*
189419d39a38SThomas Gleixner 	 * Drop bus_lock here so the changes which were done in the chip
189519d39a38SThomas Gleixner 	 * callbacks above are synced out to the irq chips which hang
1896519cc865SLukas Wunner 	 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
189719d39a38SThomas Gleixner 	 *
189819d39a38SThomas Gleixner 	 * Aside of that the bus_lock can also be taken from the threaded
189919d39a38SThomas Gleixner 	 * handler in irq_finalize_oneshot() which results in a deadlock
1900519cc865SLukas Wunner 	 * because kthread_stop() would wait forever for the thread to
190119d39a38SThomas Gleixner 	 * complete, which is blocked on the bus lock.
190219d39a38SThomas Gleixner 	 *
190319d39a38SThomas Gleixner 	 * The still held desc->request_mutex() protects against a
190419d39a38SThomas Gleixner 	 * concurrent request_irq() of this irq so the release of resources
190519d39a38SThomas Gleixner 	 * and timing data is properly serialized.
190619d39a38SThomas Gleixner 	 */
1907abc7e40cSThomas Gleixner 	chip_bus_sync_unlock(desc);
1908ae88a23bSIngo Molnar 
19091da177e4SLinus Torvalds 	unregister_handler_proc(irq, action);
19101da177e4SLinus Torvalds 
191162e04686SThomas Gleixner 	/*
191262e04686SThomas Gleixner 	 * Make sure it's not being used on another CPU and if the chip
191362e04686SThomas Gleixner 	 * supports it also make sure that there is no (not yet serviced)
191462e04686SThomas Gleixner 	 * interrupt in flight at the hardware level.
191562e04686SThomas Gleixner 	 */
191662e04686SThomas Gleixner 	__synchronize_hardirq(desc, true);
1917ae88a23bSIngo Molnar 
19181d99493bSDavid Woodhouse #ifdef CONFIG_DEBUG_SHIRQ
19191d99493bSDavid Woodhouse 	/*
1920ae88a23bSIngo Molnar 	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1921ae88a23bSIngo Molnar 	 * event to happen even now it's being freed, so let's make sure that
1922ae88a23bSIngo Molnar 	 * is so by doing an extra call to the handler ....
1923ae88a23bSIngo Molnar 	 *
1924ae88a23bSIngo Molnar 	 * ( We do this after actually deregistering it, to make sure that a
19250a13ec0bSJonathan Neuschäfer 	 *   'real' IRQ doesn't run in parallel with our fake. )
19261d99493bSDavid Woodhouse 	 */
19271d99493bSDavid Woodhouse 	if (action->flags & IRQF_SHARED) {
19281d99493bSDavid Woodhouse 		local_irq_save(flags);
19291d99493bSDavid Woodhouse 		action->handler(irq, dev_id);
19301d99493bSDavid Woodhouse 		local_irq_restore(flags);
19311d99493bSDavid Woodhouse 	}
19321d99493bSDavid Woodhouse #endif
19332d860ad7SLinus Torvalds 
1934519cc865SLukas Wunner 	/*
1935519cc865SLukas Wunner 	 * The action has already been removed above, but the thread writes
1936519cc865SLukas Wunner 	 * its oneshot mask bit when it completes. Though request_mutex is
1937519cc865SLukas Wunner 	 * held across this which prevents __setup_irq() from handing out
1938519cc865SLukas Wunner 	 * the same bit to a newly requested action.
1939519cc865SLukas Wunner 	 */
19402d860ad7SLinus Torvalds 	if (action->thread) {
19412d860ad7SLinus Torvalds 		kthread_stop(action->thread);
19422d860ad7SLinus Torvalds 		put_task_struct(action->thread);
19432a1d3ab8SThomas Gleixner 		if (action->secondary && action->secondary->thread) {
19442a1d3ab8SThomas Gleixner 			kthread_stop(action->secondary->thread);
19452a1d3ab8SThomas Gleixner 			put_task_struct(action->secondary->thread);
19462a1d3ab8SThomas Gleixner 		}
19472d860ad7SLinus Torvalds 	}
19482d860ad7SLinus Torvalds 
194919d39a38SThomas Gleixner 	/* Last action releases resources */
19502343877fSThomas Gleixner 	if (!desc->action) {
195119d39a38SThomas Gleixner 		/*
1952a359f757SIngo Molnar 		 * Reacquire bus lock as irq_release_resources() might
195319d39a38SThomas Gleixner 		 * require it to deallocate resources over the slow bus.
195419d39a38SThomas Gleixner 		 */
195519d39a38SThomas Gleixner 		chip_bus_lock(desc);
19564001d8e8SThomas Gleixner 		/*
19574001d8e8SThomas Gleixner 		 * There is no interrupt on the fly anymore. Deactivate it
19584001d8e8SThomas Gleixner 		 * completely.
19594001d8e8SThomas Gleixner 		 */
19604001d8e8SThomas Gleixner 		raw_spin_lock_irqsave(&desc->lock, flags);
19614001d8e8SThomas Gleixner 		irq_domain_deactivate_irq(&desc->irq_data);
19624001d8e8SThomas Gleixner 		raw_spin_unlock_irqrestore(&desc->lock, flags);
19634001d8e8SThomas Gleixner 
196446e48e25SThomas Gleixner 		irq_release_resources(desc);
196519d39a38SThomas Gleixner 		chip_bus_sync_unlock(desc);
19662343877fSThomas Gleixner 		irq_remove_timings(desc);
19672343877fSThomas Gleixner 	}
196846e48e25SThomas Gleixner 
19699114014cSThomas Gleixner 	mutex_unlock(&desc->request_mutex);
19709114014cSThomas Gleixner 
1971be45beb2SJon Hunter 	irq_chip_pm_put(&desc->irq_data);
1972b6873807SSebastian Andrzej Siewior 	module_put(desc->owner);
19732a1d3ab8SThomas Gleixner 	kfree(action->secondary);
1974f21cfb25SMagnus Damm 	return action;
1975f21cfb25SMagnus Damm }
19761da177e4SLinus Torvalds 
19771da177e4SLinus Torvalds /**
1978f21cfb25SMagnus Damm  *	free_irq - free an interrupt allocated with request_irq
19791da177e4SLinus Torvalds  *	@irq: Interrupt line to free
19801da177e4SLinus Torvalds  *	@dev_id: Device identity to free
19811da177e4SLinus Torvalds  *
19821da177e4SLinus Torvalds  *	Remove an interrupt handler. The handler is removed and if the
19831da177e4SLinus Torvalds  *	interrupt line is no longer in use by any driver it is disabled.
19841da177e4SLinus Torvalds  *	On a shared IRQ the caller must ensure the interrupt is disabled
19851da177e4SLinus Torvalds  *	on the card it drives before calling this function. The function
19861da177e4SLinus Torvalds  *	does not return until any executing interrupts for this IRQ
19871da177e4SLinus Torvalds  *	have completed.
19881da177e4SLinus Torvalds  *
19891da177e4SLinus Torvalds  *	This function must not be called from interrupt context.
199025ce4be7SChristoph Hellwig  *
199125ce4be7SChristoph Hellwig  *	Returns the devname argument passed to request_irq.
19921da177e4SLinus Torvalds  */
199325ce4be7SChristoph Hellwig const void *free_irq(unsigned int irq, void *dev_id)
19941da177e4SLinus Torvalds {
199570aedd24SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
199625ce4be7SChristoph Hellwig 	struct irqaction *action;
199725ce4be7SChristoph Hellwig 	const char *devname;
199870aedd24SThomas Gleixner 
199931d9d9b6SMarc Zyngier 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
200025ce4be7SChristoph Hellwig 		return NULL;
200170aedd24SThomas Gleixner 
2002cd7eab44SBen Hutchings #ifdef CONFIG_SMP
2003cd7eab44SBen Hutchings 	if (WARN_ON(desc->affinity_notify))
2004cd7eab44SBen Hutchings 		desc->affinity_notify = NULL;
2005cd7eab44SBen Hutchings #endif
2006cd7eab44SBen Hutchings 
200783ac4ca9SUwe Kleine König 	action = __free_irq(desc, dev_id);
20082827a418SAlexandru Moise 
20092827a418SAlexandru Moise 	if (!action)
20102827a418SAlexandru Moise 		return NULL;
20112827a418SAlexandru Moise 
201225ce4be7SChristoph Hellwig 	devname = action->name;
201325ce4be7SChristoph Hellwig 	kfree(action);
201425ce4be7SChristoph Hellwig 	return devname;
20151da177e4SLinus Torvalds }
20161da177e4SLinus Torvalds EXPORT_SYMBOL(free_irq);
20171da177e4SLinus Torvalds 
2018b525903cSJulien Thierry /* This function must be called with desc->lock held */
2019b525903cSJulien Thierry static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
2020b525903cSJulien Thierry {
2021b525903cSJulien Thierry 	const char *devname = NULL;
2022b525903cSJulien Thierry 
2023b525903cSJulien Thierry 	desc->istate &= ~IRQS_NMI;
2024b525903cSJulien Thierry 
2025b525903cSJulien Thierry 	if (!WARN_ON(desc->action == NULL)) {
2026b525903cSJulien Thierry 		irq_pm_remove_action(desc, desc->action);
2027b525903cSJulien Thierry 		devname = desc->action->name;
2028b525903cSJulien Thierry 		unregister_handler_proc(irq, desc->action);
2029b525903cSJulien Thierry 
2030b525903cSJulien Thierry 		kfree(desc->action);
2031b525903cSJulien Thierry 		desc->action = NULL;
2032b525903cSJulien Thierry 	}
2033b525903cSJulien Thierry 
2034b525903cSJulien Thierry 	irq_settings_clr_disable_unlazy(desc);
20354001d8e8SThomas Gleixner 	irq_shutdown_and_deactivate(desc);
2036b525903cSJulien Thierry 
2037b525903cSJulien Thierry 	irq_release_resources(desc);
2038b525903cSJulien Thierry 
2039b525903cSJulien Thierry 	irq_chip_pm_put(&desc->irq_data);
2040b525903cSJulien Thierry 	module_put(desc->owner);
2041b525903cSJulien Thierry 
2042b525903cSJulien Thierry 	return devname;
2043b525903cSJulien Thierry }
2044b525903cSJulien Thierry 
2045b525903cSJulien Thierry const void *free_nmi(unsigned int irq, void *dev_id)
2046b525903cSJulien Thierry {
2047b525903cSJulien Thierry 	struct irq_desc *desc = irq_to_desc(irq);
2048b525903cSJulien Thierry 	unsigned long flags;
2049b525903cSJulien Thierry 	const void *devname;
2050b525903cSJulien Thierry 
2051b525903cSJulien Thierry 	if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
2052b525903cSJulien Thierry 		return NULL;
2053b525903cSJulien Thierry 
2054b525903cSJulien Thierry 	if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2055b525903cSJulien Thierry 		return NULL;
2056b525903cSJulien Thierry 
2057b525903cSJulien Thierry 	/* NMI still enabled */
2058b525903cSJulien Thierry 	if (WARN_ON(desc->depth == 0))
2059b525903cSJulien Thierry 		disable_nmi_nosync(irq);
2060b525903cSJulien Thierry 
2061b525903cSJulien Thierry 	raw_spin_lock_irqsave(&desc->lock, flags);
2062b525903cSJulien Thierry 
2063b525903cSJulien Thierry 	irq_nmi_teardown(desc);
2064b525903cSJulien Thierry 	devname = __cleanup_nmi(irq, desc);
2065b525903cSJulien Thierry 
2066b525903cSJulien Thierry 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2067b525903cSJulien Thierry 
2068b525903cSJulien Thierry 	return devname;
2069b525903cSJulien Thierry }
2070b525903cSJulien Thierry 
20711da177e4SLinus Torvalds /**
20723aa551c9SThomas Gleixner  *	request_threaded_irq - allocate an interrupt line
20731da177e4SLinus Torvalds  *	@irq: Interrupt line to allocate
20743aa551c9SThomas Gleixner  *	@handler: Function to be called when the IRQ occurs.
2075*61377ec1SJoel Savitz  *		  Primary handler for threaded interrupts.
2076*61377ec1SJoel Savitz  *		  If handler is NULL and thread_fn != NULL
2077*61377ec1SJoel Savitz  *		  the default primary handler is installed.
20783aa551c9SThomas Gleixner  *	@thread_fn: Function called from the irq handler thread
20793aa551c9SThomas Gleixner  *		    If NULL, no irq thread is created
20801da177e4SLinus Torvalds  *	@irqflags: Interrupt type flags
20811da177e4SLinus Torvalds  *	@devname: An ascii name for the claiming device
20821da177e4SLinus Torvalds  *	@dev_id: A cookie passed back to the handler function
20831da177e4SLinus Torvalds  *
20841da177e4SLinus Torvalds  *	This call allocates interrupt resources and enables the
20851da177e4SLinus Torvalds  *	interrupt line and IRQ handling. From the point this
20861da177e4SLinus Torvalds  *	call is made your handler function may be invoked. Since
20871da177e4SLinus Torvalds  *	your handler function must clear any interrupt the board
20881da177e4SLinus Torvalds  *	raises, you must take care both to initialise your hardware
20891da177e4SLinus Torvalds  *	and to set up the interrupt handler in the right order.
20901da177e4SLinus Torvalds  *
20913aa551c9SThomas Gleixner  *	If you want to set up a threaded irq handler for your device
20926d21af4fSJavi Merino  *	then you need to supply @handler and @thread_fn. @handler is
20933aa551c9SThomas Gleixner  *	still called in hard interrupt context and has to check
20943aa551c9SThomas Gleixner  *	whether the interrupt originates from the device. If yes it
20953aa551c9SThomas Gleixner  *	needs to disable the interrupt on the device and return
209639a2eddbSSteven Rostedt  *	IRQ_WAKE_THREAD which will wake up the handler thread and run
20973aa551c9SThomas Gleixner  *	@thread_fn. This split handler design is necessary to support
20983aa551c9SThomas Gleixner  *	shared interrupts.
20993aa551c9SThomas Gleixner  *
21001da177e4SLinus Torvalds  *	Dev_id must be globally unique. Normally the address of the
21011da177e4SLinus Torvalds  *	device data structure is used as the cookie. Since the handler
21021da177e4SLinus Torvalds  *	receives this value it makes sense to use it.
21031da177e4SLinus Torvalds  *
21041da177e4SLinus Torvalds  *	If your interrupt is shared you must pass a non NULL dev_id
21051da177e4SLinus Torvalds  *	as this is required when freeing the interrupt.
21061da177e4SLinus Torvalds  *
21071da177e4SLinus Torvalds  *	Flags:
21081da177e4SLinus Torvalds  *
21093cca53b0SThomas Gleixner  *	IRQF_SHARED		Interrupt is shared
21100c5d1eb7SDavid Brownell  *	IRQF_TRIGGER_*		Specify active edge(s) or level
2111*61377ec1SJoel Savitz  *	IRQF_ONESHOT		Do not unmask interrupt line until
2112*61377ec1SJoel Savitz  *				thread_fn returns
21131da177e4SLinus Torvalds  *
21141da177e4SLinus Torvalds  */
21153aa551c9SThomas Gleixner int request_threaded_irq(unsigned int irq, irq_handler_t handler,
21163aa551c9SThomas Gleixner 			 irq_handler_t thread_fn, unsigned long irqflags,
21173aa551c9SThomas Gleixner 			 const char *devname, void *dev_id)
21181da177e4SLinus Torvalds {
21191da177e4SLinus Torvalds 	struct irqaction *action;
212008678b08SYinghai Lu 	struct irq_desc *desc;
2121d3c60047SThomas Gleixner 	int retval;
21221da177e4SLinus Torvalds 
2123e237a551SChen Fan 	if (irq == IRQ_NOTCONNECTED)
2124e237a551SChen Fan 		return -ENOTCONN;
2125e237a551SChen Fan 
2126470c6623SDavid Brownell 	/*
21271da177e4SLinus Torvalds 	 * Sanity-check: shared interrupts must pass in a real dev-ID,
21281da177e4SLinus Torvalds 	 * otherwise we'll have trouble later trying to figure out
21291da177e4SLinus Torvalds 	 * which interrupt is which (messes up the interrupt freeing
21301da177e4SLinus Torvalds 	 * logic etc).
213117f48034SRafael J. Wysocki 	 *
2132cbe16f35SBarry Song 	 * Also shared interrupts do not go well with disabling auto enable.
2133cbe16f35SBarry Song 	 * The sharing interrupt might request it while it's still disabled
2134cbe16f35SBarry Song 	 * and then wait for interrupts forever.
2135cbe16f35SBarry Song 	 *
213617f48034SRafael J. Wysocki 	 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
213717f48034SRafael J. Wysocki 	 * it cannot be set along with IRQF_NO_SUSPEND.
21381da177e4SLinus Torvalds 	 */
213917f48034SRafael J. Wysocki 	if (((irqflags & IRQF_SHARED) && !dev_id) ||
2140cbe16f35SBarry Song 	    ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
214117f48034SRafael J. Wysocki 	    (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
214217f48034SRafael J. Wysocki 	    ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
21431da177e4SLinus Torvalds 		return -EINVAL;
21447d94f7caSYinghai Lu 
2145cb5bc832SYinghai Lu 	desc = irq_to_desc(irq);
21467d94f7caSYinghai Lu 	if (!desc)
21471da177e4SLinus Torvalds 		return -EINVAL;
21487d94f7caSYinghai Lu 
214931d9d9b6SMarc Zyngier 	if (!irq_settings_can_request(desc) ||
215031d9d9b6SMarc Zyngier 	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))
21516550c775SThomas Gleixner 		return -EINVAL;
2152b25c340cSThomas Gleixner 
2153b25c340cSThomas Gleixner 	if (!handler) {
2154b25c340cSThomas Gleixner 		if (!thread_fn)
21551da177e4SLinus Torvalds 			return -EINVAL;
2156b25c340cSThomas Gleixner 		handler = irq_default_primary_handler;
2157b25c340cSThomas Gleixner 	}
21581da177e4SLinus Torvalds 
215945535732SThomas Gleixner 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
21601da177e4SLinus Torvalds 	if (!action)
21611da177e4SLinus Torvalds 		return -ENOMEM;
21621da177e4SLinus Torvalds 
21631da177e4SLinus Torvalds 	action->handler = handler;
21643aa551c9SThomas Gleixner 	action->thread_fn = thread_fn;
21651da177e4SLinus Torvalds 	action->flags = irqflags;
21661da177e4SLinus Torvalds 	action->name = devname;
21671da177e4SLinus Torvalds 	action->dev_id = dev_id;
21681da177e4SLinus Torvalds 
2169be45beb2SJon Hunter 	retval = irq_chip_pm_get(&desc->irq_data);
21704396f46cSShawn Lin 	if (retval < 0) {
21714396f46cSShawn Lin 		kfree(action);
2172be45beb2SJon Hunter 		return retval;
21734396f46cSShawn Lin 	}
2174be45beb2SJon Hunter 
2175d3c60047SThomas Gleixner 	retval = __setup_irq(irq, desc, action);
217670aedd24SThomas Gleixner 
21772a1d3ab8SThomas Gleixner 	if (retval) {
2178be45beb2SJon Hunter 		irq_chip_pm_put(&desc->irq_data);
21792a1d3ab8SThomas Gleixner 		kfree(action->secondary);
2180377bf1e4SAnton Vorontsov 		kfree(action);
21812a1d3ab8SThomas Gleixner 	}
2182377bf1e4SAnton Vorontsov 
21836d83f94dSThomas Gleixner #ifdef CONFIG_DEBUG_SHIRQ_FIXME
21846ce51c43SLuis Henriques 	if (!retval && (irqflags & IRQF_SHARED)) {
2185a304e1b8SDavid Woodhouse 		/*
2186a304e1b8SDavid Woodhouse 		 * It's a shared IRQ -- the driver ought to be prepared for it
2187a304e1b8SDavid Woodhouse 		 * to happen immediately, so let's make sure....
2188377bf1e4SAnton Vorontsov 		 * We disable the irq to make sure that a 'real' IRQ doesn't
2189377bf1e4SAnton Vorontsov 		 * run in parallel with our fake.
2190a304e1b8SDavid Woodhouse 		 */
2191a304e1b8SDavid Woodhouse 		unsigned long flags;
2192a304e1b8SDavid Woodhouse 
2193377bf1e4SAnton Vorontsov 		disable_irq(irq);
2194a304e1b8SDavid Woodhouse 		local_irq_save(flags);
2195377bf1e4SAnton Vorontsov 
2196a304e1b8SDavid Woodhouse 		handler(irq, dev_id);
2197377bf1e4SAnton Vorontsov 
2198a304e1b8SDavid Woodhouse 		local_irq_restore(flags);
2199377bf1e4SAnton Vorontsov 		enable_irq(irq);
2200a304e1b8SDavid Woodhouse 	}
2201a304e1b8SDavid Woodhouse #endif
22021da177e4SLinus Torvalds 	return retval;
22031da177e4SLinus Torvalds }
22043aa551c9SThomas Gleixner EXPORT_SYMBOL(request_threaded_irq);
2205ae731f8dSMarc Zyngier 
2206ae731f8dSMarc Zyngier /**
2207ae731f8dSMarc Zyngier  *	request_any_context_irq - allocate an interrupt line
2208ae731f8dSMarc Zyngier  *	@irq: Interrupt line to allocate
2209ae731f8dSMarc Zyngier  *	@handler: Function to be called when the IRQ occurs.
2210ae731f8dSMarc Zyngier  *		  Threaded handler for threaded interrupts.
2211ae731f8dSMarc Zyngier  *	@flags: Interrupt type flags
2212ae731f8dSMarc Zyngier  *	@name: An ascii name for the claiming device
2213ae731f8dSMarc Zyngier  *	@dev_id: A cookie passed back to the handler function
2214ae731f8dSMarc Zyngier  *
2215ae731f8dSMarc Zyngier  *	This call allocates interrupt resources and enables the
2216ae731f8dSMarc Zyngier  *	interrupt line and IRQ handling. It selects either a
2217ae731f8dSMarc Zyngier  *	hardirq or threaded handling method depending on the
2218ae731f8dSMarc Zyngier  *	context.
2219ae731f8dSMarc Zyngier  *
2220ae731f8dSMarc Zyngier  *	On failure, it returns a negative value. On success,
2221ae731f8dSMarc Zyngier  *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2222ae731f8dSMarc Zyngier  */
2223ae731f8dSMarc Zyngier int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2224ae731f8dSMarc Zyngier 			    unsigned long flags, const char *name, void *dev_id)
2225ae731f8dSMarc Zyngier {
2226e237a551SChen Fan 	struct irq_desc *desc;
2227ae731f8dSMarc Zyngier 	int ret;
2228ae731f8dSMarc Zyngier 
2229e237a551SChen Fan 	if (irq == IRQ_NOTCONNECTED)
2230e237a551SChen Fan 		return -ENOTCONN;
2231e237a551SChen Fan 
2232e237a551SChen Fan 	desc = irq_to_desc(irq);
2233ae731f8dSMarc Zyngier 	if (!desc)
2234ae731f8dSMarc Zyngier 		return -EINVAL;
2235ae731f8dSMarc Zyngier 
22361ccb4e61SThomas Gleixner 	if (irq_settings_is_nested_thread(desc)) {
2237ae731f8dSMarc Zyngier 		ret = request_threaded_irq(irq, NULL, handler,
2238ae731f8dSMarc Zyngier 					   flags, name, dev_id);
2239ae731f8dSMarc Zyngier 		return !ret ? IRQC_IS_NESTED : ret;
2240ae731f8dSMarc Zyngier 	}
2241ae731f8dSMarc Zyngier 
2242ae731f8dSMarc Zyngier 	ret = request_irq(irq, handler, flags, name, dev_id);
2243ae731f8dSMarc Zyngier 	return !ret ? IRQC_IS_HARDIRQ : ret;
2244ae731f8dSMarc Zyngier }
2245ae731f8dSMarc Zyngier EXPORT_SYMBOL_GPL(request_any_context_irq);
224631d9d9b6SMarc Zyngier 
2247b525903cSJulien Thierry /**
2248b525903cSJulien Thierry  *	request_nmi - allocate an interrupt line for NMI delivery
2249b525903cSJulien Thierry  *	@irq: Interrupt line to allocate
2250b525903cSJulien Thierry  *	@handler: Function to be called when the IRQ occurs.
2251b525903cSJulien Thierry  *		  Threaded handler for threaded interrupts.
2252b525903cSJulien Thierry  *	@irqflags: Interrupt type flags
2253b525903cSJulien Thierry  *	@name: An ascii name for the claiming device
2254b525903cSJulien Thierry  *	@dev_id: A cookie passed back to the handler function
2255b525903cSJulien Thierry  *
2256b525903cSJulien Thierry  *	This call allocates interrupt resources and enables the
2257b525903cSJulien Thierry  *	interrupt line and IRQ handling. It sets up the IRQ line
2258b525903cSJulien Thierry  *	to be handled as an NMI.
2259b525903cSJulien Thierry  *
2260b525903cSJulien Thierry  *	An interrupt line delivering NMIs cannot be shared and IRQ handling
2261b525903cSJulien Thierry  *	cannot be threaded.
2262b525903cSJulien Thierry  *
2263b525903cSJulien Thierry  *	Interrupt lines requested for NMI delivering must produce per cpu
2264b525903cSJulien Thierry  *	interrupts and have auto enabling setting disabled.
2265b525903cSJulien Thierry  *
2266b525903cSJulien Thierry  *	Dev_id must be globally unique. Normally the address of the
2267b525903cSJulien Thierry  *	device data structure is used as the cookie. Since the handler
2268b525903cSJulien Thierry  *	receives this value it makes sense to use it.
2269b525903cSJulien Thierry  *
2270b525903cSJulien Thierry  *	If the interrupt line cannot be used to deliver NMIs, function
2271b525903cSJulien Thierry  *	will fail and return a negative value.
2272b525903cSJulien Thierry  */
2273b525903cSJulien Thierry int request_nmi(unsigned int irq, irq_handler_t handler,
2274b525903cSJulien Thierry 		unsigned long irqflags, const char *name, void *dev_id)
2275b525903cSJulien Thierry {
2276b525903cSJulien Thierry 	struct irqaction *action;
2277b525903cSJulien Thierry 	struct irq_desc *desc;
2278b525903cSJulien Thierry 	unsigned long flags;
2279b525903cSJulien Thierry 	int retval;
2280b525903cSJulien Thierry 
2281b525903cSJulien Thierry 	if (irq == IRQ_NOTCONNECTED)
2282b525903cSJulien Thierry 		return -ENOTCONN;
2283b525903cSJulien Thierry 
2284b525903cSJulien Thierry 	/* NMI cannot be shared, used for Polling */
2285b525903cSJulien Thierry 	if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2286b525903cSJulien Thierry 		return -EINVAL;
2287b525903cSJulien Thierry 
2288b525903cSJulien Thierry 	if (!(irqflags & IRQF_PERCPU))
2289b525903cSJulien Thierry 		return -EINVAL;
2290b525903cSJulien Thierry 
2291b525903cSJulien Thierry 	if (!handler)
2292b525903cSJulien Thierry 		return -EINVAL;
2293b525903cSJulien Thierry 
2294b525903cSJulien Thierry 	desc = irq_to_desc(irq);
2295b525903cSJulien Thierry 
2296cbe16f35SBarry Song 	if (!desc || (irq_settings_can_autoenable(desc) &&
2297cbe16f35SBarry Song 	    !(irqflags & IRQF_NO_AUTOEN)) ||
2298b525903cSJulien Thierry 	    !irq_settings_can_request(desc) ||
2299b525903cSJulien Thierry 	    WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2300b525903cSJulien Thierry 	    !irq_supports_nmi(desc))
2301b525903cSJulien Thierry 		return -EINVAL;
2302b525903cSJulien Thierry 
2303b525903cSJulien Thierry 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2304b525903cSJulien Thierry 	if (!action)
2305b525903cSJulien Thierry 		return -ENOMEM;
2306b525903cSJulien Thierry 
2307b525903cSJulien Thierry 	action->handler = handler;
2308b525903cSJulien Thierry 	action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2309b525903cSJulien Thierry 	action->name = name;
2310b525903cSJulien Thierry 	action->dev_id = dev_id;
2311b525903cSJulien Thierry 
2312b525903cSJulien Thierry 	retval = irq_chip_pm_get(&desc->irq_data);
2313b525903cSJulien Thierry 	if (retval < 0)
2314b525903cSJulien Thierry 		goto err_out;
2315b525903cSJulien Thierry 
2316b525903cSJulien Thierry 	retval = __setup_irq(irq, desc, action);
2317b525903cSJulien Thierry 	if (retval)
2318b525903cSJulien Thierry 		goto err_irq_setup;
2319b525903cSJulien Thierry 
2320b525903cSJulien Thierry 	raw_spin_lock_irqsave(&desc->lock, flags);
2321b525903cSJulien Thierry 
2322b525903cSJulien Thierry 	/* Setup NMI state */
2323b525903cSJulien Thierry 	desc->istate |= IRQS_NMI;
2324b525903cSJulien Thierry 	retval = irq_nmi_setup(desc);
2325b525903cSJulien Thierry 	if (retval) {
2326b525903cSJulien Thierry 		__cleanup_nmi(irq, desc);
2327b525903cSJulien Thierry 		raw_spin_unlock_irqrestore(&desc->lock, flags);
2328b525903cSJulien Thierry 		return -EINVAL;
2329b525903cSJulien Thierry 	}
2330b525903cSJulien Thierry 
2331b525903cSJulien Thierry 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2332b525903cSJulien Thierry 
2333b525903cSJulien Thierry 	return 0;
2334b525903cSJulien Thierry 
2335b525903cSJulien Thierry err_irq_setup:
2336b525903cSJulien Thierry 	irq_chip_pm_put(&desc->irq_data);
2337b525903cSJulien Thierry err_out:
2338b525903cSJulien Thierry 	kfree(action);
2339b525903cSJulien Thierry 
2340b525903cSJulien Thierry 	return retval;
2341b525903cSJulien Thierry }
2342b525903cSJulien Thierry 
23431e7c5fd2SMarc Zyngier void enable_percpu_irq(unsigned int irq, unsigned int type)
234431d9d9b6SMarc Zyngier {
234531d9d9b6SMarc Zyngier 	unsigned int cpu = smp_processor_id();
234631d9d9b6SMarc Zyngier 	unsigned long flags;
234731d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
234831d9d9b6SMarc Zyngier 
234931d9d9b6SMarc Zyngier 	if (!desc)
235031d9d9b6SMarc Zyngier 		return;
235131d9d9b6SMarc Zyngier 
2352f35ad083SMarc Zyngier 	/*
2353f35ad083SMarc Zyngier 	 * If the trigger type is not specified by the caller, then
2354f35ad083SMarc Zyngier 	 * use the default for this interrupt.
2355f35ad083SMarc Zyngier 	 */
23561e7c5fd2SMarc Zyngier 	type &= IRQ_TYPE_SENSE_MASK;
2357f35ad083SMarc Zyngier 	if (type == IRQ_TYPE_NONE)
2358f35ad083SMarc Zyngier 		type = irqd_get_trigger_type(&desc->irq_data);
2359f35ad083SMarc Zyngier 
23601e7c5fd2SMarc Zyngier 	if (type != IRQ_TYPE_NONE) {
23611e7c5fd2SMarc Zyngier 		int ret;
23621e7c5fd2SMarc Zyngier 
2363a1ff541aSJiang Liu 		ret = __irq_set_trigger(desc, type);
23641e7c5fd2SMarc Zyngier 
23651e7c5fd2SMarc Zyngier 		if (ret) {
236632cffddeSThomas Gleixner 			WARN(1, "failed to set type for IRQ%d\n", irq);
23671e7c5fd2SMarc Zyngier 			goto out;
23681e7c5fd2SMarc Zyngier 		}
23691e7c5fd2SMarc Zyngier 	}
23701e7c5fd2SMarc Zyngier 
237131d9d9b6SMarc Zyngier 	irq_percpu_enable(desc, cpu);
23721e7c5fd2SMarc Zyngier out:
237331d9d9b6SMarc Zyngier 	irq_put_desc_unlock(desc, flags);
237431d9d9b6SMarc Zyngier }
237536a5df85SChris Metcalf EXPORT_SYMBOL_GPL(enable_percpu_irq);
237631d9d9b6SMarc Zyngier 
23774b078c3fSJulien Thierry void enable_percpu_nmi(unsigned int irq, unsigned int type)
23784b078c3fSJulien Thierry {
23794b078c3fSJulien Thierry 	enable_percpu_irq(irq, type);
23804b078c3fSJulien Thierry }
23814b078c3fSJulien Thierry 
2382f0cb3220SThomas Petazzoni /**
2383f0cb3220SThomas Petazzoni  * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2384f0cb3220SThomas Petazzoni  * @irq:	Linux irq number to check for
2385f0cb3220SThomas Petazzoni  *
2386f0cb3220SThomas Petazzoni  * Must be called from a non migratable context. Returns the enable
2387f0cb3220SThomas Petazzoni  * state of a per cpu interrupt on the current cpu.
2388f0cb3220SThomas Petazzoni  */
2389f0cb3220SThomas Petazzoni bool irq_percpu_is_enabled(unsigned int irq)
2390f0cb3220SThomas Petazzoni {
2391f0cb3220SThomas Petazzoni 	unsigned int cpu = smp_processor_id();
2392f0cb3220SThomas Petazzoni 	struct irq_desc *desc;
2393f0cb3220SThomas Petazzoni 	unsigned long flags;
2394f0cb3220SThomas Petazzoni 	bool is_enabled;
2395f0cb3220SThomas Petazzoni 
2396f0cb3220SThomas Petazzoni 	desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2397f0cb3220SThomas Petazzoni 	if (!desc)
2398f0cb3220SThomas Petazzoni 		return false;
2399f0cb3220SThomas Petazzoni 
2400f0cb3220SThomas Petazzoni 	is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2401f0cb3220SThomas Petazzoni 	irq_put_desc_unlock(desc, flags);
2402f0cb3220SThomas Petazzoni 
2403f0cb3220SThomas Petazzoni 	return is_enabled;
2404f0cb3220SThomas Petazzoni }
2405f0cb3220SThomas Petazzoni EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2406f0cb3220SThomas Petazzoni 
240731d9d9b6SMarc Zyngier void disable_percpu_irq(unsigned int irq)
240831d9d9b6SMarc Zyngier {
240931d9d9b6SMarc Zyngier 	unsigned int cpu = smp_processor_id();
241031d9d9b6SMarc Zyngier 	unsigned long flags;
241131d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
241231d9d9b6SMarc Zyngier 
241331d9d9b6SMarc Zyngier 	if (!desc)
241431d9d9b6SMarc Zyngier 		return;
241531d9d9b6SMarc Zyngier 
241631d9d9b6SMarc Zyngier 	irq_percpu_disable(desc, cpu);
241731d9d9b6SMarc Zyngier 	irq_put_desc_unlock(desc, flags);
241831d9d9b6SMarc Zyngier }
241936a5df85SChris Metcalf EXPORT_SYMBOL_GPL(disable_percpu_irq);
242031d9d9b6SMarc Zyngier 
24214b078c3fSJulien Thierry void disable_percpu_nmi(unsigned int irq)
24224b078c3fSJulien Thierry {
24234b078c3fSJulien Thierry 	disable_percpu_irq(irq);
24244b078c3fSJulien Thierry }
24254b078c3fSJulien Thierry 
242631d9d9b6SMarc Zyngier /*
242731d9d9b6SMarc Zyngier  * Internal function to unregister a percpu irqaction.
242831d9d9b6SMarc Zyngier  */
242931d9d9b6SMarc Zyngier static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
243031d9d9b6SMarc Zyngier {
243131d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
243231d9d9b6SMarc Zyngier 	struct irqaction *action;
243331d9d9b6SMarc Zyngier 	unsigned long flags;
243431d9d9b6SMarc Zyngier 
243531d9d9b6SMarc Zyngier 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
243631d9d9b6SMarc Zyngier 
243731d9d9b6SMarc Zyngier 	if (!desc)
243831d9d9b6SMarc Zyngier 		return NULL;
243931d9d9b6SMarc Zyngier 
244031d9d9b6SMarc Zyngier 	raw_spin_lock_irqsave(&desc->lock, flags);
244131d9d9b6SMarc Zyngier 
244231d9d9b6SMarc Zyngier 	action = desc->action;
244331d9d9b6SMarc Zyngier 	if (!action || action->percpu_dev_id != dev_id) {
244431d9d9b6SMarc Zyngier 		WARN(1, "Trying to free already-free IRQ %d\n", irq);
244531d9d9b6SMarc Zyngier 		goto bad;
244631d9d9b6SMarc Zyngier 	}
244731d9d9b6SMarc Zyngier 
244831d9d9b6SMarc Zyngier 	if (!cpumask_empty(desc->percpu_enabled)) {
244931d9d9b6SMarc Zyngier 		WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
245031d9d9b6SMarc Zyngier 		     irq, cpumask_first(desc->percpu_enabled));
245131d9d9b6SMarc Zyngier 		goto bad;
245231d9d9b6SMarc Zyngier 	}
245331d9d9b6SMarc Zyngier 
245431d9d9b6SMarc Zyngier 	/* Found it - now remove it from the list of entries: */
245531d9d9b6SMarc Zyngier 	desc->action = NULL;
245631d9d9b6SMarc Zyngier 
24574b078c3fSJulien Thierry 	desc->istate &= ~IRQS_NMI;
24584b078c3fSJulien Thierry 
245931d9d9b6SMarc Zyngier 	raw_spin_unlock_irqrestore(&desc->lock, flags);
246031d9d9b6SMarc Zyngier 
246131d9d9b6SMarc Zyngier 	unregister_handler_proc(irq, action);
246231d9d9b6SMarc Zyngier 
2463be45beb2SJon Hunter 	irq_chip_pm_put(&desc->irq_data);
246431d9d9b6SMarc Zyngier 	module_put(desc->owner);
246531d9d9b6SMarc Zyngier 	return action;
246631d9d9b6SMarc Zyngier 
246731d9d9b6SMarc Zyngier bad:
246831d9d9b6SMarc Zyngier 	raw_spin_unlock_irqrestore(&desc->lock, flags);
246931d9d9b6SMarc Zyngier 	return NULL;
247031d9d9b6SMarc Zyngier }
247131d9d9b6SMarc Zyngier 
247231d9d9b6SMarc Zyngier /**
247331d9d9b6SMarc Zyngier  *	remove_percpu_irq - free a per-cpu interrupt
247431d9d9b6SMarc Zyngier  *	@irq: Interrupt line to free
247531d9d9b6SMarc Zyngier  *	@act: irqaction for the interrupt
247631d9d9b6SMarc Zyngier  *
247731d9d9b6SMarc Zyngier  * Used to remove interrupts statically setup by the early boot process.
247831d9d9b6SMarc Zyngier  */
247931d9d9b6SMarc Zyngier void remove_percpu_irq(unsigned int irq, struct irqaction *act)
248031d9d9b6SMarc Zyngier {
248131d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
248231d9d9b6SMarc Zyngier 
248331d9d9b6SMarc Zyngier 	if (desc && irq_settings_is_per_cpu_devid(desc))
248431d9d9b6SMarc Zyngier 	    __free_percpu_irq(irq, act->percpu_dev_id);
248531d9d9b6SMarc Zyngier }
248631d9d9b6SMarc Zyngier 
248731d9d9b6SMarc Zyngier /**
248831d9d9b6SMarc Zyngier  *	free_percpu_irq - free an interrupt allocated with request_percpu_irq
248931d9d9b6SMarc Zyngier  *	@irq: Interrupt line to free
249031d9d9b6SMarc Zyngier  *	@dev_id: Device identity to free
249131d9d9b6SMarc Zyngier  *
249231d9d9b6SMarc Zyngier  *	Remove a percpu interrupt handler. The handler is removed, but
249331d9d9b6SMarc Zyngier  *	the interrupt line is not disabled. This must be done on each
249431d9d9b6SMarc Zyngier  *	CPU before calling this function. The function does not return
249531d9d9b6SMarc Zyngier  *	until any executing interrupts for this IRQ have completed.
249631d9d9b6SMarc Zyngier  *
249731d9d9b6SMarc Zyngier  *	This function must not be called from interrupt context.
249831d9d9b6SMarc Zyngier  */
249931d9d9b6SMarc Zyngier void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
250031d9d9b6SMarc Zyngier {
250131d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
250231d9d9b6SMarc Zyngier 
250331d9d9b6SMarc Zyngier 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
250431d9d9b6SMarc Zyngier 		return;
250531d9d9b6SMarc Zyngier 
250631d9d9b6SMarc Zyngier 	chip_bus_lock(desc);
250731d9d9b6SMarc Zyngier 	kfree(__free_percpu_irq(irq, dev_id));
250831d9d9b6SMarc Zyngier 	chip_bus_sync_unlock(desc);
250931d9d9b6SMarc Zyngier }
2510aec2e2adSMaxime Ripard EXPORT_SYMBOL_GPL(free_percpu_irq);
251131d9d9b6SMarc Zyngier 
25124b078c3fSJulien Thierry void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
25134b078c3fSJulien Thierry {
25144b078c3fSJulien Thierry 	struct irq_desc *desc = irq_to_desc(irq);
25154b078c3fSJulien Thierry 
25164b078c3fSJulien Thierry 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
25174b078c3fSJulien Thierry 		return;
25184b078c3fSJulien Thierry 
25194b078c3fSJulien Thierry 	if (WARN_ON(!(desc->istate & IRQS_NMI)))
25204b078c3fSJulien Thierry 		return;
25214b078c3fSJulien Thierry 
25224b078c3fSJulien Thierry 	kfree(__free_percpu_irq(irq, dev_id));
25234b078c3fSJulien Thierry }
25244b078c3fSJulien Thierry 
252531d9d9b6SMarc Zyngier /**
252631d9d9b6SMarc Zyngier  *	setup_percpu_irq - setup a per-cpu interrupt
252731d9d9b6SMarc Zyngier  *	@irq: Interrupt line to setup
252831d9d9b6SMarc Zyngier  *	@act: irqaction for the interrupt
252931d9d9b6SMarc Zyngier  *
253031d9d9b6SMarc Zyngier  * Used to statically setup per-cpu interrupts in the early boot process.
253131d9d9b6SMarc Zyngier  */
253231d9d9b6SMarc Zyngier int setup_percpu_irq(unsigned int irq, struct irqaction *act)
253331d9d9b6SMarc Zyngier {
253431d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
253531d9d9b6SMarc Zyngier 	int retval;
253631d9d9b6SMarc Zyngier 
253731d9d9b6SMarc Zyngier 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
253831d9d9b6SMarc Zyngier 		return -EINVAL;
2539be45beb2SJon Hunter 
2540be45beb2SJon Hunter 	retval = irq_chip_pm_get(&desc->irq_data);
2541be45beb2SJon Hunter 	if (retval < 0)
2542be45beb2SJon Hunter 		return retval;
2543be45beb2SJon Hunter 
254431d9d9b6SMarc Zyngier 	retval = __setup_irq(irq, desc, act);
254531d9d9b6SMarc Zyngier 
2546be45beb2SJon Hunter 	if (retval)
2547be45beb2SJon Hunter 		irq_chip_pm_put(&desc->irq_data);
2548be45beb2SJon Hunter 
254931d9d9b6SMarc Zyngier 	return retval;
255031d9d9b6SMarc Zyngier }
255131d9d9b6SMarc Zyngier 
255231d9d9b6SMarc Zyngier /**
2553c80081b9SDaniel Lezcano  *	__request_percpu_irq - allocate a percpu interrupt line
255431d9d9b6SMarc Zyngier  *	@irq: Interrupt line to allocate
255531d9d9b6SMarc Zyngier  *	@handler: Function to be called when the IRQ occurs.
2556c80081b9SDaniel Lezcano  *	@flags: Interrupt type flags (IRQF_TIMER only)
255731d9d9b6SMarc Zyngier  *	@devname: An ascii name for the claiming device
255831d9d9b6SMarc Zyngier  *	@dev_id: A percpu cookie passed back to the handler function
255931d9d9b6SMarc Zyngier  *
2560a1b7febdSMaxime Ripard  *	This call allocates interrupt resources and enables the
2561a1b7febdSMaxime Ripard  *	interrupt on the local CPU. If the interrupt is supposed to be
2562a1b7febdSMaxime Ripard  *	enabled on other CPUs, it has to be done on each CPU using
2563a1b7febdSMaxime Ripard  *	enable_percpu_irq().
256431d9d9b6SMarc Zyngier  *
256531d9d9b6SMarc Zyngier  *	Dev_id must be globally unique. It is a per-cpu variable, and
256631d9d9b6SMarc Zyngier  *	the handler gets called with the interrupted CPU's instance of
256731d9d9b6SMarc Zyngier  *	that variable.
256831d9d9b6SMarc Zyngier  */
2569c80081b9SDaniel Lezcano int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2570c80081b9SDaniel Lezcano 			 unsigned long flags, const char *devname,
2571c80081b9SDaniel Lezcano 			 void __percpu *dev_id)
257231d9d9b6SMarc Zyngier {
257331d9d9b6SMarc Zyngier 	struct irqaction *action;
257431d9d9b6SMarc Zyngier 	struct irq_desc *desc;
257531d9d9b6SMarc Zyngier 	int retval;
257631d9d9b6SMarc Zyngier 
257731d9d9b6SMarc Zyngier 	if (!dev_id)
257831d9d9b6SMarc Zyngier 		return -EINVAL;
257931d9d9b6SMarc Zyngier 
258031d9d9b6SMarc Zyngier 	desc = irq_to_desc(irq);
258131d9d9b6SMarc Zyngier 	if (!desc || !irq_settings_can_request(desc) ||
258231d9d9b6SMarc Zyngier 	    !irq_settings_is_per_cpu_devid(desc))
258331d9d9b6SMarc Zyngier 		return -EINVAL;
258431d9d9b6SMarc Zyngier 
2585c80081b9SDaniel Lezcano 	if (flags && flags != IRQF_TIMER)
2586c80081b9SDaniel Lezcano 		return -EINVAL;
2587c80081b9SDaniel Lezcano 
258831d9d9b6SMarc Zyngier 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
258931d9d9b6SMarc Zyngier 	if (!action)
259031d9d9b6SMarc Zyngier 		return -ENOMEM;
259131d9d9b6SMarc Zyngier 
259231d9d9b6SMarc Zyngier 	action->handler = handler;
2593c80081b9SDaniel Lezcano 	action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
259431d9d9b6SMarc Zyngier 	action->name = devname;
259531d9d9b6SMarc Zyngier 	action->percpu_dev_id = dev_id;
259631d9d9b6SMarc Zyngier 
2597be45beb2SJon Hunter 	retval = irq_chip_pm_get(&desc->irq_data);
25984396f46cSShawn Lin 	if (retval < 0) {
25994396f46cSShawn Lin 		kfree(action);
2600be45beb2SJon Hunter 		return retval;
26014396f46cSShawn Lin 	}
2602be45beb2SJon Hunter 
260331d9d9b6SMarc Zyngier 	retval = __setup_irq(irq, desc, action);
260431d9d9b6SMarc Zyngier 
2605be45beb2SJon Hunter 	if (retval) {
2606be45beb2SJon Hunter 		irq_chip_pm_put(&desc->irq_data);
260731d9d9b6SMarc Zyngier 		kfree(action);
2608be45beb2SJon Hunter 	}
260931d9d9b6SMarc Zyngier 
261031d9d9b6SMarc Zyngier 	return retval;
261131d9d9b6SMarc Zyngier }
2612c80081b9SDaniel Lezcano EXPORT_SYMBOL_GPL(__request_percpu_irq);
26131b7047edSMarc Zyngier 
26141b7047edSMarc Zyngier /**
26154b078c3fSJulien Thierry  *	request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
26164b078c3fSJulien Thierry  *	@irq: Interrupt line to allocate
26174b078c3fSJulien Thierry  *	@handler: Function to be called when the IRQ occurs.
26184b078c3fSJulien Thierry  *	@name: An ascii name for the claiming device
26194b078c3fSJulien Thierry  *	@dev_id: A percpu cookie passed back to the handler function
26204b078c3fSJulien Thierry  *
26214b078c3fSJulien Thierry  *	This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2622a5186694SJulien Thierry  *	have to be setup on each CPU by calling prepare_percpu_nmi() before
2623a5186694SJulien Thierry  *	being enabled on the same CPU by using enable_percpu_nmi().
26244b078c3fSJulien Thierry  *
26254b078c3fSJulien Thierry  *	Dev_id must be globally unique. It is a per-cpu variable, and
26264b078c3fSJulien Thierry  *	the handler gets called with the interrupted CPU's instance of
26274b078c3fSJulien Thierry  *	that variable.
26284b078c3fSJulien Thierry  *
26294b078c3fSJulien Thierry  *	Interrupt lines requested for NMI delivering should have auto enabling
26304b078c3fSJulien Thierry  *	setting disabled.
26314b078c3fSJulien Thierry  *
26324b078c3fSJulien Thierry  *	If the interrupt line cannot be used to deliver NMIs, function
26334b078c3fSJulien Thierry  *	will fail returning a negative value.
26344b078c3fSJulien Thierry  */
26354b078c3fSJulien Thierry int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
26364b078c3fSJulien Thierry 		       const char *name, void __percpu *dev_id)
26374b078c3fSJulien Thierry {
26384b078c3fSJulien Thierry 	struct irqaction *action;
26394b078c3fSJulien Thierry 	struct irq_desc *desc;
26404b078c3fSJulien Thierry 	unsigned long flags;
26414b078c3fSJulien Thierry 	int retval;
26424b078c3fSJulien Thierry 
26434b078c3fSJulien Thierry 	if (!handler)
26444b078c3fSJulien Thierry 		return -EINVAL;
26454b078c3fSJulien Thierry 
26464b078c3fSJulien Thierry 	desc = irq_to_desc(irq);
26474b078c3fSJulien Thierry 
26484b078c3fSJulien Thierry 	if (!desc || !irq_settings_can_request(desc) ||
26494b078c3fSJulien Thierry 	    !irq_settings_is_per_cpu_devid(desc) ||
26504b078c3fSJulien Thierry 	    irq_settings_can_autoenable(desc) ||
26514b078c3fSJulien Thierry 	    !irq_supports_nmi(desc))
26524b078c3fSJulien Thierry 		return -EINVAL;
26534b078c3fSJulien Thierry 
26544b078c3fSJulien Thierry 	/* The line cannot already be NMI */
26554b078c3fSJulien Thierry 	if (desc->istate & IRQS_NMI)
26564b078c3fSJulien Thierry 		return -EINVAL;
26574b078c3fSJulien Thierry 
26584b078c3fSJulien Thierry 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
26594b078c3fSJulien Thierry 	if (!action)
26604b078c3fSJulien Thierry 		return -ENOMEM;
26614b078c3fSJulien Thierry 
26624b078c3fSJulien Thierry 	action->handler = handler;
26634b078c3fSJulien Thierry 	action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
26644b078c3fSJulien Thierry 		| IRQF_NOBALANCING;
26654b078c3fSJulien Thierry 	action->name = name;
26664b078c3fSJulien Thierry 	action->percpu_dev_id = dev_id;
26674b078c3fSJulien Thierry 
26684b078c3fSJulien Thierry 	retval = irq_chip_pm_get(&desc->irq_data);
26694b078c3fSJulien Thierry 	if (retval < 0)
26704b078c3fSJulien Thierry 		goto err_out;
26714b078c3fSJulien Thierry 
26724b078c3fSJulien Thierry 	retval = __setup_irq(irq, desc, action);
26734b078c3fSJulien Thierry 	if (retval)
26744b078c3fSJulien Thierry 		goto err_irq_setup;
26754b078c3fSJulien Thierry 
26764b078c3fSJulien Thierry 	raw_spin_lock_irqsave(&desc->lock, flags);
26774b078c3fSJulien Thierry 	desc->istate |= IRQS_NMI;
26784b078c3fSJulien Thierry 	raw_spin_unlock_irqrestore(&desc->lock, flags);
26794b078c3fSJulien Thierry 
26804b078c3fSJulien Thierry 	return 0;
26814b078c3fSJulien Thierry 
26824b078c3fSJulien Thierry err_irq_setup:
26834b078c3fSJulien Thierry 	irq_chip_pm_put(&desc->irq_data);
26844b078c3fSJulien Thierry err_out:
26854b078c3fSJulien Thierry 	kfree(action);
26864b078c3fSJulien Thierry 
26874b078c3fSJulien Thierry 	return retval;
26884b078c3fSJulien Thierry }
26894b078c3fSJulien Thierry 
26904b078c3fSJulien Thierry /**
26914b078c3fSJulien Thierry  *	prepare_percpu_nmi - performs CPU local setup for NMI delivery
26924b078c3fSJulien Thierry  *	@irq: Interrupt line to prepare for NMI delivery
26934b078c3fSJulien Thierry  *
26944b078c3fSJulien Thierry  *	This call prepares an interrupt line to deliver NMI on the current CPU,
26954b078c3fSJulien Thierry  *	before that interrupt line gets enabled with enable_percpu_nmi().
26964b078c3fSJulien Thierry  *
26974b078c3fSJulien Thierry  *	As a CPU local operation, this should be called from non-preemptible
26984b078c3fSJulien Thierry  *	context.
26994b078c3fSJulien Thierry  *
27004b078c3fSJulien Thierry  *	If the interrupt line cannot be used to deliver NMIs, function
27014b078c3fSJulien Thierry  *	will fail returning a negative value.
27024b078c3fSJulien Thierry  */
27034b078c3fSJulien Thierry int prepare_percpu_nmi(unsigned int irq)
27044b078c3fSJulien Thierry {
27054b078c3fSJulien Thierry 	unsigned long flags;
27064b078c3fSJulien Thierry 	struct irq_desc *desc;
27074b078c3fSJulien Thierry 	int ret = 0;
27084b078c3fSJulien Thierry 
27094b078c3fSJulien Thierry 	WARN_ON(preemptible());
27104b078c3fSJulien Thierry 
27114b078c3fSJulien Thierry 	desc = irq_get_desc_lock(irq, &flags,
27124b078c3fSJulien Thierry 				 IRQ_GET_DESC_CHECK_PERCPU);
27134b078c3fSJulien Thierry 	if (!desc)
27144b078c3fSJulien Thierry 		return -EINVAL;
27154b078c3fSJulien Thierry 
27164b078c3fSJulien Thierry 	if (WARN(!(desc->istate & IRQS_NMI),
27174b078c3fSJulien Thierry 		 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
27184b078c3fSJulien Thierry 		 irq)) {
27194b078c3fSJulien Thierry 		ret = -EINVAL;
27204b078c3fSJulien Thierry 		goto out;
27214b078c3fSJulien Thierry 	}
27224b078c3fSJulien Thierry 
27234b078c3fSJulien Thierry 	ret = irq_nmi_setup(desc);
27244b078c3fSJulien Thierry 	if (ret) {
27254b078c3fSJulien Thierry 		pr_err("Failed to setup NMI delivery: irq %u\n", irq);
27264b078c3fSJulien Thierry 		goto out;
27274b078c3fSJulien Thierry 	}
27284b078c3fSJulien Thierry 
27294b078c3fSJulien Thierry out:
27304b078c3fSJulien Thierry 	irq_put_desc_unlock(desc, flags);
27314b078c3fSJulien Thierry 	return ret;
27324b078c3fSJulien Thierry }
27334b078c3fSJulien Thierry 
27344b078c3fSJulien Thierry /**
27354b078c3fSJulien Thierry  *	teardown_percpu_nmi - undoes NMI setup of IRQ line
27364b078c3fSJulien Thierry  *	@irq: Interrupt line from which CPU local NMI configuration should be
27374b078c3fSJulien Thierry  *	      removed
27384b078c3fSJulien Thierry  *
27394b078c3fSJulien Thierry  *	This call undoes the setup done by prepare_percpu_nmi().
27404b078c3fSJulien Thierry  *
27414b078c3fSJulien Thierry  *	IRQ line should not be enabled for the current CPU.
27424b078c3fSJulien Thierry  *
27434b078c3fSJulien Thierry  *	As a CPU local operation, this should be called from non-preemptible
27444b078c3fSJulien Thierry  *	context.
27454b078c3fSJulien Thierry  */
27464b078c3fSJulien Thierry void teardown_percpu_nmi(unsigned int irq)
27474b078c3fSJulien Thierry {
27484b078c3fSJulien Thierry 	unsigned long flags;
27494b078c3fSJulien Thierry 	struct irq_desc *desc;
27504b078c3fSJulien Thierry 
27514b078c3fSJulien Thierry 	WARN_ON(preemptible());
27524b078c3fSJulien Thierry 
27534b078c3fSJulien Thierry 	desc = irq_get_desc_lock(irq, &flags,
27544b078c3fSJulien Thierry 				 IRQ_GET_DESC_CHECK_PERCPU);
27554b078c3fSJulien Thierry 	if (!desc)
27564b078c3fSJulien Thierry 		return;
27574b078c3fSJulien Thierry 
27584b078c3fSJulien Thierry 	if (WARN_ON(!(desc->istate & IRQS_NMI)))
27594b078c3fSJulien Thierry 		goto out;
27604b078c3fSJulien Thierry 
27614b078c3fSJulien Thierry 	irq_nmi_teardown(desc);
27624b078c3fSJulien Thierry out:
27634b078c3fSJulien Thierry 	irq_put_desc_unlock(desc, flags);
27644b078c3fSJulien Thierry }
27654b078c3fSJulien Thierry 
276662e04686SThomas Gleixner int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
276762e04686SThomas Gleixner 			    bool *state)
276862e04686SThomas Gleixner {
276962e04686SThomas Gleixner 	struct irq_chip *chip;
277062e04686SThomas Gleixner 	int err = -EINVAL;
277162e04686SThomas Gleixner 
277262e04686SThomas Gleixner 	do {
277362e04686SThomas Gleixner 		chip = irq_data_get_irq_chip(data);
27741d0326f3SMarek Vasut 		if (WARN_ON_ONCE(!chip))
27751d0326f3SMarek Vasut 			return -ENODEV;
277662e04686SThomas Gleixner 		if (chip->irq_get_irqchip_state)
277762e04686SThomas Gleixner 			break;
277862e04686SThomas Gleixner #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
277962e04686SThomas Gleixner 		data = data->parent_data;
278062e04686SThomas Gleixner #else
278162e04686SThomas Gleixner 		data = NULL;
278262e04686SThomas Gleixner #endif
278362e04686SThomas Gleixner 	} while (data);
278462e04686SThomas Gleixner 
278562e04686SThomas Gleixner 	if (data)
278662e04686SThomas Gleixner 		err = chip->irq_get_irqchip_state(data, which, state);
278762e04686SThomas Gleixner 	return err;
278862e04686SThomas Gleixner }
278962e04686SThomas Gleixner 
27904b078c3fSJulien Thierry /**
27911b7047edSMarc Zyngier  *	irq_get_irqchip_state - returns the irqchip state of a interrupt.
27921b7047edSMarc Zyngier  *	@irq: Interrupt line that is forwarded to a VM
27931b7047edSMarc Zyngier  *	@which: One of IRQCHIP_STATE_* the caller wants to know about
27945c982c58SKrzysztof Kozlowski  *	@state: a pointer to a boolean where the state is to be stored
27951b7047edSMarc Zyngier  *
27961b7047edSMarc Zyngier  *	This call snapshots the internal irqchip state of an
27971b7047edSMarc Zyngier  *	interrupt, returning into @state the bit corresponding to
27981b7047edSMarc Zyngier  *	stage @which
27991b7047edSMarc Zyngier  *
28001b7047edSMarc Zyngier  *	This function should be called with preemption disabled if the
28011b7047edSMarc Zyngier  *	interrupt controller has per-cpu registers.
28021b7047edSMarc Zyngier  */
28031b7047edSMarc Zyngier int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
28041b7047edSMarc Zyngier 			  bool *state)
28051b7047edSMarc Zyngier {
28061b7047edSMarc Zyngier 	struct irq_desc *desc;
28071b7047edSMarc Zyngier 	struct irq_data *data;
28081b7047edSMarc Zyngier 	unsigned long flags;
28091b7047edSMarc Zyngier 	int err = -EINVAL;
28101b7047edSMarc Zyngier 
28111b7047edSMarc Zyngier 	desc = irq_get_desc_buslock(irq, &flags, 0);
28121b7047edSMarc Zyngier 	if (!desc)
28131b7047edSMarc Zyngier 		return err;
28141b7047edSMarc Zyngier 
28151b7047edSMarc Zyngier 	data = irq_desc_get_irq_data(desc);
28161b7047edSMarc Zyngier 
281762e04686SThomas Gleixner 	err = __irq_get_irqchip_state(data, which, state);
28181b7047edSMarc Zyngier 
28191b7047edSMarc Zyngier 	irq_put_desc_busunlock(desc, flags);
28201b7047edSMarc Zyngier 	return err;
28211b7047edSMarc Zyngier }
28221ee4fb3eSBjorn Andersson EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
28231b7047edSMarc Zyngier 
28241b7047edSMarc Zyngier /**
28251b7047edSMarc Zyngier  *	irq_set_irqchip_state - set the state of a forwarded interrupt.
28261b7047edSMarc Zyngier  *	@irq: Interrupt line that is forwarded to a VM
28271b7047edSMarc Zyngier  *	@which: State to be restored (one of IRQCHIP_STATE_*)
28281b7047edSMarc Zyngier  *	@val: Value corresponding to @which
28291b7047edSMarc Zyngier  *
28301b7047edSMarc Zyngier  *	This call sets the internal irqchip state of an interrupt,
28311b7047edSMarc Zyngier  *	depending on the value of @which.
28321b7047edSMarc Zyngier  *
28331b7047edSMarc Zyngier  *	This function should be called with preemption disabled if the
28341b7047edSMarc Zyngier  *	interrupt controller has per-cpu registers.
28351b7047edSMarc Zyngier  */
28361b7047edSMarc Zyngier int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
28371b7047edSMarc Zyngier 			  bool val)
28381b7047edSMarc Zyngier {
28391b7047edSMarc Zyngier 	struct irq_desc *desc;
28401b7047edSMarc Zyngier 	struct irq_data *data;
28411b7047edSMarc Zyngier 	struct irq_chip *chip;
28421b7047edSMarc Zyngier 	unsigned long flags;
28431b7047edSMarc Zyngier 	int err = -EINVAL;
28441b7047edSMarc Zyngier 
28451b7047edSMarc Zyngier 	desc = irq_get_desc_buslock(irq, &flags, 0);
28461b7047edSMarc Zyngier 	if (!desc)
28471b7047edSMarc Zyngier 		return err;
28481b7047edSMarc Zyngier 
28491b7047edSMarc Zyngier 	data = irq_desc_get_irq_data(desc);
28501b7047edSMarc Zyngier 
28511b7047edSMarc Zyngier 	do {
28521b7047edSMarc Zyngier 		chip = irq_data_get_irq_chip(data);
2853f107cee9SGuenter Roeck 		if (WARN_ON_ONCE(!chip)) {
2854f107cee9SGuenter Roeck 			err = -ENODEV;
2855f107cee9SGuenter Roeck 			goto out_unlock;
2856f107cee9SGuenter Roeck 		}
28571b7047edSMarc Zyngier 		if (chip->irq_set_irqchip_state)
28581b7047edSMarc Zyngier 			break;
28591b7047edSMarc Zyngier #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
28601b7047edSMarc Zyngier 		data = data->parent_data;
28611b7047edSMarc Zyngier #else
28621b7047edSMarc Zyngier 		data = NULL;
28631b7047edSMarc Zyngier #endif
28641b7047edSMarc Zyngier 	} while (data);
28651b7047edSMarc Zyngier 
28661b7047edSMarc Zyngier 	if (data)
28671b7047edSMarc Zyngier 		err = chip->irq_set_irqchip_state(data, which, val);
28681b7047edSMarc Zyngier 
2869f107cee9SGuenter Roeck out_unlock:
28701b7047edSMarc Zyngier 	irq_put_desc_busunlock(desc, flags);
28711b7047edSMarc Zyngier 	return err;
28721b7047edSMarc Zyngier }
28731ee4fb3eSBjorn Andersson EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2874a313357eSThomas Gleixner 
2875a313357eSThomas Gleixner /**
2876a313357eSThomas Gleixner  * irq_has_action - Check whether an interrupt is requested
2877a313357eSThomas Gleixner  * @irq:	The linux irq number
2878a313357eSThomas Gleixner  *
2879a313357eSThomas Gleixner  * Returns: A snapshot of the current state
2880a313357eSThomas Gleixner  */
2881a313357eSThomas Gleixner bool irq_has_action(unsigned int irq)
2882a313357eSThomas Gleixner {
2883a313357eSThomas Gleixner 	bool res;
2884a313357eSThomas Gleixner 
2885a313357eSThomas Gleixner 	rcu_read_lock();
2886a313357eSThomas Gleixner 	res = irq_desc_has_action(irq_to_desc(irq));
2887a313357eSThomas Gleixner 	rcu_read_unlock();
2888a313357eSThomas Gleixner 	return res;
2889a313357eSThomas Gleixner }
2890a313357eSThomas Gleixner EXPORT_SYMBOL_GPL(irq_has_action);
2891fdd02963SThomas Gleixner 
2892fdd02963SThomas Gleixner /**
2893fdd02963SThomas Gleixner  * irq_check_status_bit - Check whether bits in the irq descriptor status are set
2894fdd02963SThomas Gleixner  * @irq:	The linux irq number
2895fdd02963SThomas Gleixner  * @bitmask:	The bitmask to evaluate
2896fdd02963SThomas Gleixner  *
2897fdd02963SThomas Gleixner  * Returns: True if one of the bits in @bitmask is set
2898fdd02963SThomas Gleixner  */
2899fdd02963SThomas Gleixner bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
2900fdd02963SThomas Gleixner {
2901fdd02963SThomas Gleixner 	struct irq_desc *desc;
2902fdd02963SThomas Gleixner 	bool res = false;
2903fdd02963SThomas Gleixner 
2904fdd02963SThomas Gleixner 	rcu_read_lock();
2905fdd02963SThomas Gleixner 	desc = irq_to_desc(irq);
2906fdd02963SThomas Gleixner 	if (desc)
2907fdd02963SThomas Gleixner 		res = !!(desc->status_use_accessors & bitmask);
2908fdd02963SThomas Gleixner 	rcu_read_unlock();
2909fdd02963SThomas Gleixner 	return res;
2910fdd02963SThomas Gleixner }
2911ce09ccc5SThomas Gleixner EXPORT_SYMBOL_GPL(irq_check_status_bit);
2912