xref: /openbmc/linux/kernel/irq/manage.c (revision c48c8b829d2b966a6649827426bcdba082ccf922)
152a65ff5SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
3a34db9b2SIngo Molnar  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4a34db9b2SIngo Molnar  * Copyright (C) 2005-2006 Thomas Gleixner
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  * This file contains driver APIs to the irq subsystem.
71da177e4SLinus Torvalds  */
81da177e4SLinus Torvalds 
997fd75b7SAndrew Morton #define pr_fmt(fmt) "genirq: " fmt
1097fd75b7SAndrew Morton 
111da177e4SLinus Torvalds #include <linux/irq.h>
123aa551c9SThomas Gleixner #include <linux/kthread.h>
131da177e4SLinus Torvalds #include <linux/module.h>
141da177e4SLinus Torvalds #include <linux/random.h>
151da177e4SLinus Torvalds #include <linux/interrupt.h>
164001d8e8SThomas Gleixner #include <linux/irqdomain.h>
171aeb272cSRobert P. J. Day #include <linux/slab.h>
183aa551c9SThomas Gleixner #include <linux/sched.h>
198bd75c77SClark Williams #include <linux/sched/rt.h>
200881e7bdSIngo Molnar #include <linux/sched/task.h>
2111ea68f5SMing Lei #include <linux/sched/isolation.h>
22ae7e81c0SIngo Molnar #include <uapi/linux/sched/types.h>
234d1d61a6SOleg Nesterov #include <linux/task_work.h>
241da177e4SLinus Torvalds 
251da177e4SLinus Torvalds #include "internals.h"
261da177e4SLinus Torvalds 
27b6a32bbdSThomas Gleixner #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
2891cc470eSTanner Love DEFINE_STATIC_KEY_FALSE(force_irqthreads_key);
298d32a307SThomas Gleixner 
308d32a307SThomas Gleixner static int __init setup_forced_irqthreads(char *arg)
318d32a307SThomas Gleixner {
3291cc470eSTanner Love 	static_branch_enable(&force_irqthreads_key);
338d32a307SThomas Gleixner 	return 0;
348d32a307SThomas Gleixner }
358d32a307SThomas Gleixner early_param("threadirqs", setup_forced_irqthreads);
368d32a307SThomas Gleixner #endif
378d32a307SThomas Gleixner 
3862e04686SThomas Gleixner static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
391da177e4SLinus Torvalds {
4062e04686SThomas Gleixner 	struct irq_data *irqd = irq_desc_get_irq_data(desc);
4132f4125eSThomas Gleixner 	bool inprogress;
421da177e4SLinus Torvalds 
43a98ce5c6SHerbert Xu 	do {
44a98ce5c6SHerbert Xu 		unsigned long flags;
45a98ce5c6SHerbert Xu 
46a98ce5c6SHerbert Xu 		/*
47a98ce5c6SHerbert Xu 		 * Wait until we're out of the critical section.  This might
48a98ce5c6SHerbert Xu 		 * give the wrong answer due to the lack of memory barriers.
49a98ce5c6SHerbert Xu 		 */
5032f4125eSThomas Gleixner 		while (irqd_irq_inprogress(&desc->irq_data))
511da177e4SLinus Torvalds 			cpu_relax();
52a98ce5c6SHerbert Xu 
53a98ce5c6SHerbert Xu 		/* Ok, that indicated we're done: double-check carefully. */
54239007b8SThomas Gleixner 		raw_spin_lock_irqsave(&desc->lock, flags);
5532f4125eSThomas Gleixner 		inprogress = irqd_irq_inprogress(&desc->irq_data);
5662e04686SThomas Gleixner 
5762e04686SThomas Gleixner 		/*
5862e04686SThomas Gleixner 		 * If requested and supported, check at the chip whether it
5962e04686SThomas Gleixner 		 * is in flight at the hardware level, i.e. already pending
6062e04686SThomas Gleixner 		 * in a CPU and waiting for service and acknowledge.
6162e04686SThomas Gleixner 		 */
6262e04686SThomas Gleixner 		if (!inprogress && sync_chip) {
6362e04686SThomas Gleixner 			/*
6462e04686SThomas Gleixner 			 * Ignore the return code. inprogress is only updated
6562e04686SThomas Gleixner 			 * when the chip supports it.
6662e04686SThomas Gleixner 			 */
6762e04686SThomas Gleixner 			__irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
6862e04686SThomas Gleixner 						&inprogress);
6962e04686SThomas Gleixner 		}
70239007b8SThomas Gleixner 		raw_spin_unlock_irqrestore(&desc->lock, flags);
71a98ce5c6SHerbert Xu 
72a98ce5c6SHerbert Xu 		/* Oops, that failed? */
7332f4125eSThomas Gleixner 	} while (inprogress);
7418258f72SThomas Gleixner }
753aa551c9SThomas Gleixner 
7618258f72SThomas Gleixner /**
7718258f72SThomas Gleixner  *	synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
7818258f72SThomas Gleixner  *	@irq: interrupt number to wait for
7918258f72SThomas Gleixner  *
8018258f72SThomas Gleixner  *	This function waits for any pending hard IRQ handlers for this
8118258f72SThomas Gleixner  *	interrupt to complete before returning. If you use this
8218258f72SThomas Gleixner  *	function while holding a resource the IRQ handler may need you
8318258f72SThomas Gleixner  *	will deadlock. It does not take associated threaded handlers
8418258f72SThomas Gleixner  *	into account.
8518258f72SThomas Gleixner  *
8618258f72SThomas Gleixner  *	Do not use this for shutdown scenarios where you must be sure
8718258f72SThomas Gleixner  *	that all parts (hardirq and threaded handler) have completed.
8818258f72SThomas Gleixner  *
8902cea395SPeter Zijlstra  *	Returns: false if a threaded handler is active.
9002cea395SPeter Zijlstra  *
9118258f72SThomas Gleixner  *	This function may be called - with care - from IRQ context.
9262e04686SThomas Gleixner  *
9362e04686SThomas Gleixner  *	It does not check whether there is an interrupt in flight at the
9462e04686SThomas Gleixner  *	hardware level, but not serviced yet, as this might deadlock when
9562e04686SThomas Gleixner  *	called with interrupts disabled and the target CPU of the interrupt
9662e04686SThomas Gleixner  *	is the current CPU.
973aa551c9SThomas Gleixner  */
9802cea395SPeter Zijlstra bool synchronize_hardirq(unsigned int irq)
9918258f72SThomas Gleixner {
10018258f72SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
10118258f72SThomas Gleixner 
10202cea395SPeter Zijlstra 	if (desc) {
10362e04686SThomas Gleixner 		__synchronize_hardirq(desc, false);
10402cea395SPeter Zijlstra 		return !atomic_read(&desc->threads_active);
10502cea395SPeter Zijlstra 	}
10602cea395SPeter Zijlstra 
10702cea395SPeter Zijlstra 	return true;
10818258f72SThomas Gleixner }
10918258f72SThomas Gleixner EXPORT_SYMBOL(synchronize_hardirq);
11018258f72SThomas Gleixner 
11118258f72SThomas Gleixner /**
11218258f72SThomas Gleixner  *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
11318258f72SThomas Gleixner  *	@irq: interrupt number to wait for
11418258f72SThomas Gleixner  *
11518258f72SThomas Gleixner  *	This function waits for any pending IRQ handlers for this interrupt
11618258f72SThomas Gleixner  *	to complete before returning. If you use this function while
11718258f72SThomas Gleixner  *	holding a resource the IRQ handler may need you will deadlock.
11818258f72SThomas Gleixner  *
1191d21f2afSThomas Gleixner  *	Can only be called from preemptible code as it might sleep when
1201d21f2afSThomas Gleixner  *	an interrupt thread is associated to @irq.
12162e04686SThomas Gleixner  *
12262e04686SThomas Gleixner  *	It optionally makes sure (when the irq chip supports that method)
12362e04686SThomas Gleixner  *	that the interrupt is not pending in any CPU and waiting for
12462e04686SThomas Gleixner  *	service.
12518258f72SThomas Gleixner  */
12618258f72SThomas Gleixner void synchronize_irq(unsigned int irq)
12718258f72SThomas Gleixner {
12818258f72SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
12918258f72SThomas Gleixner 
13018258f72SThomas Gleixner 	if (desc) {
13162e04686SThomas Gleixner 		__synchronize_hardirq(desc, true);
13218258f72SThomas Gleixner 		/*
13318258f72SThomas Gleixner 		 * We made sure that no hardirq handler is
13418258f72SThomas Gleixner 		 * running. Now verify that no threaded handlers are
13518258f72SThomas Gleixner 		 * active.
13618258f72SThomas Gleixner 		 */
13718258f72SThomas Gleixner 		wait_event(desc->wait_for_threads,
13818258f72SThomas Gleixner 			   !atomic_read(&desc->threads_active));
13918258f72SThomas Gleixner 	}
1401da177e4SLinus Torvalds }
1411da177e4SLinus Torvalds EXPORT_SYMBOL(synchronize_irq);
1421da177e4SLinus Torvalds 
1433aa551c9SThomas Gleixner #ifdef CONFIG_SMP
1443aa551c9SThomas Gleixner cpumask_var_t irq_default_affinity;
1453aa551c9SThomas Gleixner 
1469c255583SThomas Gleixner static bool __irq_can_set_affinity(struct irq_desc *desc)
147e019c249SJiang Liu {
148e019c249SJiang Liu 	if (!desc || !irqd_can_balance(&desc->irq_data) ||
149e019c249SJiang Liu 	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
1509c255583SThomas Gleixner 		return false;
1519c255583SThomas Gleixner 	return true;
152e019c249SJiang Liu }
153e019c249SJiang Liu 
154771ee3b0SThomas Gleixner /**
155771ee3b0SThomas Gleixner  *	irq_can_set_affinity - Check if the affinity of a given irq can be set
156771ee3b0SThomas Gleixner  *	@irq:		Interrupt to check
157771ee3b0SThomas Gleixner  *
158771ee3b0SThomas Gleixner  */
159771ee3b0SThomas Gleixner int irq_can_set_affinity(unsigned int irq)
160771ee3b0SThomas Gleixner {
161e019c249SJiang Liu 	return __irq_can_set_affinity(irq_to_desc(irq));
162771ee3b0SThomas Gleixner }
163771ee3b0SThomas Gleixner 
164591d2fb0SThomas Gleixner /**
1659c255583SThomas Gleixner  * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
1669c255583SThomas Gleixner  * @irq:	Interrupt to check
1679c255583SThomas Gleixner  *
1689c255583SThomas Gleixner  * Like irq_can_set_affinity() above, but additionally checks for the
1699c255583SThomas Gleixner  * AFFINITY_MANAGED flag.
1709c255583SThomas Gleixner  */
1719c255583SThomas Gleixner bool irq_can_set_affinity_usr(unsigned int irq)
1729c255583SThomas Gleixner {
1739c255583SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
1749c255583SThomas Gleixner 
1759c255583SThomas Gleixner 	return __irq_can_set_affinity(desc) &&
1769c255583SThomas Gleixner 		!irqd_affinity_is_managed(&desc->irq_data);
1779c255583SThomas Gleixner }
1789c255583SThomas Gleixner 
1799c255583SThomas Gleixner /**
180591d2fb0SThomas Gleixner  *	irq_set_thread_affinity - Notify irq threads to adjust affinity
1815c982c58SKrzysztof Kozlowski  *	@desc:		irq descriptor which has affinity changed
182591d2fb0SThomas Gleixner  *
183591d2fb0SThomas Gleixner  *	We just set IRQTF_AFFINITY and delegate the affinity setting
184591d2fb0SThomas Gleixner  *	to the interrupt thread itself. We can not call
185591d2fb0SThomas Gleixner  *	set_cpus_allowed_ptr() here as we hold desc->lock and this
186591d2fb0SThomas Gleixner  *	code can be called from hard interrupt context.
187591d2fb0SThomas Gleixner  */
188591d2fb0SThomas Gleixner void irq_set_thread_affinity(struct irq_desc *desc)
1893aa551c9SThomas Gleixner {
190f944b5a7SDaniel Lezcano 	struct irqaction *action;
1913aa551c9SThomas Gleixner 
192f944b5a7SDaniel Lezcano 	for_each_action_of_desc(desc, action)
1933aa551c9SThomas Gleixner 		if (action->thread)
194591d2fb0SThomas Gleixner 			set_bit(IRQTF_AFFINITY, &action->thread_flags);
1953aa551c9SThomas Gleixner }
1963aa551c9SThomas Gleixner 
197baedb87dSThomas Gleixner #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
19819e1d4e9SThomas Gleixner static void irq_validate_effective_affinity(struct irq_data *data)
19919e1d4e9SThomas Gleixner {
20019e1d4e9SThomas Gleixner 	const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
20119e1d4e9SThomas Gleixner 	struct irq_chip *chip = irq_data_get_irq_chip(data);
20219e1d4e9SThomas Gleixner 
20319e1d4e9SThomas Gleixner 	if (!cpumask_empty(m))
20419e1d4e9SThomas Gleixner 		return;
20519e1d4e9SThomas Gleixner 	pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
20619e1d4e9SThomas Gleixner 		     chip->name, data->irq);
20719e1d4e9SThomas Gleixner }
20819e1d4e9SThomas Gleixner 
209baedb87dSThomas Gleixner static inline void irq_init_effective_affinity(struct irq_data *data,
210baedb87dSThomas Gleixner 					       const struct cpumask *mask)
211baedb87dSThomas Gleixner {
212baedb87dSThomas Gleixner 	cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
213baedb87dSThomas Gleixner }
214baedb87dSThomas Gleixner #else
215baedb87dSThomas Gleixner static inline void irq_validate_effective_affinity(struct irq_data *data) { }
216baedb87dSThomas Gleixner static inline void irq_init_effective_affinity(struct irq_data *data,
217baedb87dSThomas Gleixner 					       const struct cpumask *mask) { }
218baedb87dSThomas Gleixner #endif
219baedb87dSThomas Gleixner 
220818b0f3bSJiang Liu int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
221818b0f3bSJiang Liu 			bool force)
222818b0f3bSJiang Liu {
223818b0f3bSJiang Liu 	struct irq_desc *desc = irq_data_to_desc(data);
224818b0f3bSJiang Liu 	struct irq_chip *chip = irq_data_get_irq_chip(data);
22533de0aa4SMarc Zyngier 	const struct cpumask  *prog_mask;
226818b0f3bSJiang Liu 	int ret;
227818b0f3bSJiang Liu 
22833de0aa4SMarc Zyngier 	static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
22933de0aa4SMarc Zyngier 	static struct cpumask tmp_mask;
23033de0aa4SMarc Zyngier 
231e43b3b58SThomas Gleixner 	if (!chip || !chip->irq_set_affinity)
232e43b3b58SThomas Gleixner 		return -EINVAL;
233e43b3b58SThomas Gleixner 
23433de0aa4SMarc Zyngier 	raw_spin_lock(&tmp_mask_lock);
23511ea68f5SMing Lei 	/*
23611ea68f5SMing Lei 	 * If this is a managed interrupt and housekeeping is enabled on
23711ea68f5SMing Lei 	 * it check whether the requested affinity mask intersects with
23811ea68f5SMing Lei 	 * a housekeeping CPU. If so, then remove the isolated CPUs from
23911ea68f5SMing Lei 	 * the mask and just keep the housekeeping CPU(s). This prevents
24011ea68f5SMing Lei 	 * the affinity setter from routing the interrupt to an isolated
24111ea68f5SMing Lei 	 * CPU to avoid that I/O submitted from a housekeeping CPU causes
24211ea68f5SMing Lei 	 * interrupts on an isolated one.
24311ea68f5SMing Lei 	 *
24411ea68f5SMing Lei 	 * If the masks do not intersect or include online CPU(s) then
24511ea68f5SMing Lei 	 * keep the requested mask. The isolated target CPUs are only
24611ea68f5SMing Lei 	 * receiving interrupts when the I/O operation was submitted
24711ea68f5SMing Lei 	 * directly from them.
24811ea68f5SMing Lei 	 *
24911ea68f5SMing Lei 	 * If all housekeeping CPUs in the affinity mask are offline, the
25011ea68f5SMing Lei 	 * interrupt will be migrated by the CPU hotplug code once a
25111ea68f5SMing Lei 	 * housekeeping CPU which belongs to the affinity mask comes
25211ea68f5SMing Lei 	 * online.
25311ea68f5SMing Lei 	 */
25411ea68f5SMing Lei 	if (irqd_affinity_is_managed(data) &&
25504d4e665SFrederic Weisbecker 	    housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) {
25633de0aa4SMarc Zyngier 		const struct cpumask *hk_mask;
25711ea68f5SMing Lei 
25804d4e665SFrederic Weisbecker 		hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
25911ea68f5SMing Lei 
26011ea68f5SMing Lei 		cpumask_and(&tmp_mask, mask, hk_mask);
26111ea68f5SMing Lei 		if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
26211ea68f5SMing Lei 			prog_mask = mask;
26311ea68f5SMing Lei 		else
26411ea68f5SMing Lei 			prog_mask = &tmp_mask;
26511ea68f5SMing Lei 	} else {
26633de0aa4SMarc Zyngier 		prog_mask = mask;
26711ea68f5SMing Lei 	}
26833de0aa4SMarc Zyngier 
269*c48c8b82SMarc Zyngier 	/*
270*c48c8b82SMarc Zyngier 	 * Make sure we only provide online CPUs to the irqchip,
271*c48c8b82SMarc Zyngier 	 * unless we are being asked to force the affinity (in which
272*c48c8b82SMarc Zyngier 	 * case we do as we are told).
273*c48c8b82SMarc Zyngier 	 */
27433de0aa4SMarc Zyngier 	cpumask_and(&tmp_mask, prog_mask, cpu_online_mask);
275*c48c8b82SMarc Zyngier 	if (!force && !cpumask_empty(&tmp_mask))
27633de0aa4SMarc Zyngier 		ret = chip->irq_set_affinity(data, &tmp_mask, force);
277*c48c8b82SMarc Zyngier 	else if (force)
278*c48c8b82SMarc Zyngier 		ret = chip->irq_set_affinity(data, mask, force);
27933de0aa4SMarc Zyngier 	else
28033de0aa4SMarc Zyngier 		ret = -EINVAL;
28133de0aa4SMarc Zyngier 
28233de0aa4SMarc Zyngier 	raw_spin_unlock(&tmp_mask_lock);
28333de0aa4SMarc Zyngier 
284818b0f3bSJiang Liu 	switch (ret) {
285818b0f3bSJiang Liu 	case IRQ_SET_MASK_OK:
2862cb62547SJiang Liu 	case IRQ_SET_MASK_OK_DONE:
2879df872faSJiang Liu 		cpumask_copy(desc->irq_common_data.affinity, mask);
288df561f66SGustavo A. R. Silva 		fallthrough;
289818b0f3bSJiang Liu 	case IRQ_SET_MASK_OK_NOCOPY:
29019e1d4e9SThomas Gleixner 		irq_validate_effective_affinity(data);
291818b0f3bSJiang Liu 		irq_set_thread_affinity(desc);
292818b0f3bSJiang Liu 		ret = 0;
293818b0f3bSJiang Liu 	}
294818b0f3bSJiang Liu 
295818b0f3bSJiang Liu 	return ret;
296818b0f3bSJiang Liu }
297818b0f3bSJiang Liu 
29812f47073SThomas Gleixner #ifdef CONFIG_GENERIC_PENDING_IRQ
29912f47073SThomas Gleixner static inline int irq_set_affinity_pending(struct irq_data *data,
30012f47073SThomas Gleixner 					   const struct cpumask *dest)
30112f47073SThomas Gleixner {
30212f47073SThomas Gleixner 	struct irq_desc *desc = irq_data_to_desc(data);
30312f47073SThomas Gleixner 
30412f47073SThomas Gleixner 	irqd_set_move_pending(data);
30512f47073SThomas Gleixner 	irq_copy_pending(desc, dest);
30612f47073SThomas Gleixner 	return 0;
30712f47073SThomas Gleixner }
30812f47073SThomas Gleixner #else
30912f47073SThomas Gleixner static inline int irq_set_affinity_pending(struct irq_data *data,
31012f47073SThomas Gleixner 					   const struct cpumask *dest)
31112f47073SThomas Gleixner {
31212f47073SThomas Gleixner 	return -EBUSY;
31312f47073SThomas Gleixner }
31412f47073SThomas Gleixner #endif
31512f47073SThomas Gleixner 
31612f47073SThomas Gleixner static int irq_try_set_affinity(struct irq_data *data,
31712f47073SThomas Gleixner 				const struct cpumask *dest, bool force)
31812f47073SThomas Gleixner {
31912f47073SThomas Gleixner 	int ret = irq_do_set_affinity(data, dest, force);
32012f47073SThomas Gleixner 
32112f47073SThomas Gleixner 	/*
32212f47073SThomas Gleixner 	 * In case that the underlying vector management is busy and the
32312f47073SThomas Gleixner 	 * architecture supports the generic pending mechanism then utilize
32412f47073SThomas Gleixner 	 * this to avoid returning an error to user space.
32512f47073SThomas Gleixner 	 */
32612f47073SThomas Gleixner 	if (ret == -EBUSY && !force)
32712f47073SThomas Gleixner 		ret = irq_set_affinity_pending(data, dest);
32812f47073SThomas Gleixner 	return ret;
32912f47073SThomas Gleixner }
33012f47073SThomas Gleixner 
331baedb87dSThomas Gleixner static bool irq_set_affinity_deactivated(struct irq_data *data,
332baedb87dSThomas Gleixner 					 const struct cpumask *mask, bool force)
333baedb87dSThomas Gleixner {
334baedb87dSThomas Gleixner 	struct irq_desc *desc = irq_data_to_desc(data);
335baedb87dSThomas Gleixner 
336baedb87dSThomas Gleixner 	/*
337f0c7bacaSThomas Gleixner 	 * Handle irq chips which can handle affinity only in activated
338f0c7bacaSThomas Gleixner 	 * state correctly
339f0c7bacaSThomas Gleixner 	 *
340baedb87dSThomas Gleixner 	 * If the interrupt is not yet activated, just store the affinity
341baedb87dSThomas Gleixner 	 * mask and do not call the chip driver at all. On activation the
342baedb87dSThomas Gleixner 	 * driver has to make sure anyway that the interrupt is in a
343a359f757SIngo Molnar 	 * usable state so startup works.
344baedb87dSThomas Gleixner 	 */
345f0c7bacaSThomas Gleixner 	if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
346f0c7bacaSThomas Gleixner 	    irqd_is_activated(data) || !irqd_affinity_on_activate(data))
347baedb87dSThomas Gleixner 		return false;
348baedb87dSThomas Gleixner 
349baedb87dSThomas Gleixner 	cpumask_copy(desc->irq_common_data.affinity, mask);
350baedb87dSThomas Gleixner 	irq_init_effective_affinity(data, mask);
351baedb87dSThomas Gleixner 	irqd_set(data, IRQD_AFFINITY_SET);
352baedb87dSThomas Gleixner 	return true;
353baedb87dSThomas Gleixner }
354baedb87dSThomas Gleixner 
35501f8fa4fSThomas Gleixner int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
35601f8fa4fSThomas Gleixner 			    bool force)
357c2d0c555SDavid Daney {
358c2d0c555SDavid Daney 	struct irq_chip *chip = irq_data_get_irq_chip(data);
359c2d0c555SDavid Daney 	struct irq_desc *desc = irq_data_to_desc(data);
360c2d0c555SDavid Daney 	int ret = 0;
361c2d0c555SDavid Daney 
362c2d0c555SDavid Daney 	if (!chip || !chip->irq_set_affinity)
363c2d0c555SDavid Daney 		return -EINVAL;
364c2d0c555SDavid Daney 
365baedb87dSThomas Gleixner 	if (irq_set_affinity_deactivated(data, mask, force))
366baedb87dSThomas Gleixner 		return 0;
367baedb87dSThomas Gleixner 
36812f47073SThomas Gleixner 	if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
36912f47073SThomas Gleixner 		ret = irq_try_set_affinity(data, mask, force);
370c2d0c555SDavid Daney 	} else {
371c2d0c555SDavid Daney 		irqd_set_move_pending(data);
372c2d0c555SDavid Daney 		irq_copy_pending(desc, mask);
373c2d0c555SDavid Daney 	}
374c2d0c555SDavid Daney 
375c2d0c555SDavid Daney 	if (desc->affinity_notify) {
376c2d0c555SDavid Daney 		kref_get(&desc->affinity_notify->kref);
377df81dfcfSEdward Cree 		if (!schedule_work(&desc->affinity_notify->work)) {
378df81dfcfSEdward Cree 			/* Work was already scheduled, drop our extra ref */
379df81dfcfSEdward Cree 			kref_put(&desc->affinity_notify->kref,
380df81dfcfSEdward Cree 				 desc->affinity_notify->release);
381df81dfcfSEdward Cree 		}
382c2d0c555SDavid Daney 	}
383c2d0c555SDavid Daney 	irqd_set(data, IRQD_AFFINITY_SET);
384c2d0c555SDavid Daney 
385c2d0c555SDavid Daney 	return ret;
386c2d0c555SDavid Daney }
387c2d0c555SDavid Daney 
3881d3aec89SJohn Garry /**
3891d3aec89SJohn Garry  * irq_update_affinity_desc - Update affinity management for an interrupt
3901d3aec89SJohn Garry  * @irq:	The interrupt number to update
3911d3aec89SJohn Garry  * @affinity:	Pointer to the affinity descriptor
3921d3aec89SJohn Garry  *
3931d3aec89SJohn Garry  * This interface can be used to configure the affinity management of
3941d3aec89SJohn Garry  * interrupts which have been allocated already.
3951d3aec89SJohn Garry  *
3961d3aec89SJohn Garry  * There are certain limitations on when it may be used - attempts to use it
3971d3aec89SJohn Garry  * for when the kernel is configured for generic IRQ reservation mode (in
3981d3aec89SJohn Garry  * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
3991d3aec89SJohn Garry  * managed/non-managed interrupt accounting. In addition, attempts to use it on
4001d3aec89SJohn Garry  * an interrupt which is already started or which has already been configured
4011d3aec89SJohn Garry  * as managed will also fail, as these mean invalid init state or double init.
4021d3aec89SJohn Garry  */
4031d3aec89SJohn Garry int irq_update_affinity_desc(unsigned int irq,
4041d3aec89SJohn Garry 			     struct irq_affinity_desc *affinity)
4051d3aec89SJohn Garry {
4061d3aec89SJohn Garry 	struct irq_desc *desc;
4071d3aec89SJohn Garry 	unsigned long flags;
4081d3aec89SJohn Garry 	bool activated;
4091d3aec89SJohn Garry 	int ret = 0;
4101d3aec89SJohn Garry 
4111d3aec89SJohn Garry 	/*
4121d3aec89SJohn Garry 	 * Supporting this with the reservation scheme used by x86 needs
4131d3aec89SJohn Garry 	 * some more thought. Fail it for now.
4141d3aec89SJohn Garry 	 */
4151d3aec89SJohn Garry 	if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
4161d3aec89SJohn Garry 		return -EOPNOTSUPP;
4171d3aec89SJohn Garry 
4181d3aec89SJohn Garry 	desc = irq_get_desc_buslock(irq, &flags, 0);
4191d3aec89SJohn Garry 	if (!desc)
4201d3aec89SJohn Garry 		return -EINVAL;
4211d3aec89SJohn Garry 
4221d3aec89SJohn Garry 	/* Requires the interrupt to be shut down */
4231d3aec89SJohn Garry 	if (irqd_is_started(&desc->irq_data)) {
4241d3aec89SJohn Garry 		ret = -EBUSY;
4251d3aec89SJohn Garry 		goto out_unlock;
4261d3aec89SJohn Garry 	}
4271d3aec89SJohn Garry 
4281d3aec89SJohn Garry 	/* Interrupts which are already managed cannot be modified */
4291d3aec89SJohn Garry 	if (irqd_affinity_is_managed(&desc->irq_data)) {
4301d3aec89SJohn Garry 		ret = -EBUSY;
4311d3aec89SJohn Garry 		goto out_unlock;
4321d3aec89SJohn Garry 	}
4331d3aec89SJohn Garry 
4341d3aec89SJohn Garry 	/*
4351d3aec89SJohn Garry 	 * Deactivate the interrupt. That's required to undo
4361d3aec89SJohn Garry 	 * anything an earlier activation has established.
4371d3aec89SJohn Garry 	 */
4381d3aec89SJohn Garry 	activated = irqd_is_activated(&desc->irq_data);
4391d3aec89SJohn Garry 	if (activated)
4401d3aec89SJohn Garry 		irq_domain_deactivate_irq(&desc->irq_data);
4411d3aec89SJohn Garry 
4421d3aec89SJohn Garry 	if (affinity->is_managed) {
4431d3aec89SJohn Garry 		irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
4441d3aec89SJohn Garry 		irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
4451d3aec89SJohn Garry 	}
4461d3aec89SJohn Garry 
4471d3aec89SJohn Garry 	cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
4481d3aec89SJohn Garry 
4491d3aec89SJohn Garry 	/* Restore the activation state */
4501d3aec89SJohn Garry 	if (activated)
4511d3aec89SJohn Garry 		irq_domain_activate_irq(&desc->irq_data, false);
4521d3aec89SJohn Garry 
4531d3aec89SJohn Garry out_unlock:
4541d3aec89SJohn Garry 	irq_put_desc_busunlock(desc, flags);
4551d3aec89SJohn Garry 	return ret;
4561d3aec89SJohn Garry }
4571d3aec89SJohn Garry 
4584d80d6caSThomas Gleixner static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
4594d80d6caSThomas Gleixner 			      bool force)
460771ee3b0SThomas Gleixner {
46108678b08SYinghai Lu 	struct irq_desc *desc = irq_to_desc(irq);
462f6d87f4bSThomas Gleixner 	unsigned long flags;
463c2d0c555SDavid Daney 	int ret;
464771ee3b0SThomas Gleixner 
465c2d0c555SDavid Daney 	if (!desc)
466771ee3b0SThomas Gleixner 		return -EINVAL;
467771ee3b0SThomas Gleixner 
468239007b8SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
46901f8fa4fSThomas Gleixner 	ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
470239007b8SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
4711fa46f1fSThomas Gleixner 	return ret;
472771ee3b0SThomas Gleixner }
473771ee3b0SThomas Gleixner 
4744d80d6caSThomas Gleixner /**
4754d80d6caSThomas Gleixner  * irq_set_affinity - Set the irq affinity of a given irq
4764d80d6caSThomas Gleixner  * @irq:	Interrupt to set affinity
4774d80d6caSThomas Gleixner  * @cpumask:	cpumask
4784d80d6caSThomas Gleixner  *
4794d80d6caSThomas Gleixner  * Fails if cpumask does not contain an online CPU
4804d80d6caSThomas Gleixner  */
4814d80d6caSThomas Gleixner int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
4824d80d6caSThomas Gleixner {
4834d80d6caSThomas Gleixner 	return __irq_set_affinity(irq, cpumask, false);
4844d80d6caSThomas Gleixner }
4854d80d6caSThomas Gleixner EXPORT_SYMBOL_GPL(irq_set_affinity);
4864d80d6caSThomas Gleixner 
4874d80d6caSThomas Gleixner /**
4884d80d6caSThomas Gleixner  * irq_force_affinity - Force the irq affinity of a given irq
4894d80d6caSThomas Gleixner  * @irq:	Interrupt to set affinity
4904d80d6caSThomas Gleixner  * @cpumask:	cpumask
4914d80d6caSThomas Gleixner  *
4924d80d6caSThomas Gleixner  * Same as irq_set_affinity, but without checking the mask against
4934d80d6caSThomas Gleixner  * online cpus.
4944d80d6caSThomas Gleixner  *
4954d80d6caSThomas Gleixner  * Solely for low level cpu hotplug code, where we need to make per
4964d80d6caSThomas Gleixner  * cpu interrupts affine before the cpu becomes online.
4974d80d6caSThomas Gleixner  */
4984d80d6caSThomas Gleixner int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
4994d80d6caSThomas Gleixner {
5004d80d6caSThomas Gleixner 	return __irq_set_affinity(irq, cpumask, true);
5014d80d6caSThomas Gleixner }
5024d80d6caSThomas Gleixner EXPORT_SYMBOL_GPL(irq_force_affinity);
5034d80d6caSThomas Gleixner 
50465c7cdedSThomas Gleixner int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
50565c7cdedSThomas Gleixner 			      bool setaffinity)
506e7a297b0SPeter P Waskiewicz Jr {
507e7a297b0SPeter P Waskiewicz Jr 	unsigned long flags;
50831d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
509e7a297b0SPeter P Waskiewicz Jr 
510e7a297b0SPeter P Waskiewicz Jr 	if (!desc)
511e7a297b0SPeter P Waskiewicz Jr 		return -EINVAL;
512e7a297b0SPeter P Waskiewicz Jr 	desc->affinity_hint = m;
51302725e74SThomas Gleixner 	irq_put_desc_unlock(desc, flags);
51465c7cdedSThomas Gleixner 	if (m && setaffinity)
515e2e64a93SJesse Brandeburg 		__irq_set_affinity(irq, m, false);
516e7a297b0SPeter P Waskiewicz Jr 	return 0;
517e7a297b0SPeter P Waskiewicz Jr }
51865c7cdedSThomas Gleixner EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint);
519e7a297b0SPeter P Waskiewicz Jr 
520cd7eab44SBen Hutchings static void irq_affinity_notify(struct work_struct *work)
521cd7eab44SBen Hutchings {
522cd7eab44SBen Hutchings 	struct irq_affinity_notify *notify =
523cd7eab44SBen Hutchings 		container_of(work, struct irq_affinity_notify, work);
524cd7eab44SBen Hutchings 	struct irq_desc *desc = irq_to_desc(notify->irq);
525cd7eab44SBen Hutchings 	cpumask_var_t cpumask;
526cd7eab44SBen Hutchings 	unsigned long flags;
527cd7eab44SBen Hutchings 
5281fa46f1fSThomas Gleixner 	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
529cd7eab44SBen Hutchings 		goto out;
530cd7eab44SBen Hutchings 
531cd7eab44SBen Hutchings 	raw_spin_lock_irqsave(&desc->lock, flags);
5320ef5ca1eSThomas Gleixner 	if (irq_move_pending(&desc->irq_data))
5331fa46f1fSThomas Gleixner 		irq_get_pending(cpumask, desc);
534cd7eab44SBen Hutchings 	else
5359df872faSJiang Liu 		cpumask_copy(cpumask, desc->irq_common_data.affinity);
536cd7eab44SBen Hutchings 	raw_spin_unlock_irqrestore(&desc->lock, flags);
537cd7eab44SBen Hutchings 
538cd7eab44SBen Hutchings 	notify->notify(notify, cpumask);
539cd7eab44SBen Hutchings 
540cd7eab44SBen Hutchings 	free_cpumask_var(cpumask);
541cd7eab44SBen Hutchings out:
542cd7eab44SBen Hutchings 	kref_put(&notify->kref, notify->release);
543cd7eab44SBen Hutchings }
544cd7eab44SBen Hutchings 
545cd7eab44SBen Hutchings /**
546cd7eab44SBen Hutchings  *	irq_set_affinity_notifier - control notification of IRQ affinity changes
547cd7eab44SBen Hutchings  *	@irq:		Interrupt for which to enable/disable notification
548cd7eab44SBen Hutchings  *	@notify:	Context for notification, or %NULL to disable
549cd7eab44SBen Hutchings  *			notification.  Function pointers must be initialised;
550cd7eab44SBen Hutchings  *			the other fields will be initialised by this function.
551cd7eab44SBen Hutchings  *
552cd7eab44SBen Hutchings  *	Must be called in process context.  Notification may only be enabled
553cd7eab44SBen Hutchings  *	after the IRQ is allocated and must be disabled before the IRQ is
554cd7eab44SBen Hutchings  *	freed using free_irq().
555cd7eab44SBen Hutchings  */
556cd7eab44SBen Hutchings int
557cd7eab44SBen Hutchings irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
558cd7eab44SBen Hutchings {
559cd7eab44SBen Hutchings 	struct irq_desc *desc = irq_to_desc(irq);
560cd7eab44SBen Hutchings 	struct irq_affinity_notify *old_notify;
561cd7eab44SBen Hutchings 	unsigned long flags;
562cd7eab44SBen Hutchings 
563cd7eab44SBen Hutchings 	/* The release function is promised process context */
564cd7eab44SBen Hutchings 	might_sleep();
565cd7eab44SBen Hutchings 
566b525903cSJulien Thierry 	if (!desc || desc->istate & IRQS_NMI)
567cd7eab44SBen Hutchings 		return -EINVAL;
568cd7eab44SBen Hutchings 
569cd7eab44SBen Hutchings 	/* Complete initialisation of *notify */
570cd7eab44SBen Hutchings 	if (notify) {
571cd7eab44SBen Hutchings 		notify->irq = irq;
572cd7eab44SBen Hutchings 		kref_init(&notify->kref);
573cd7eab44SBen Hutchings 		INIT_WORK(&notify->work, irq_affinity_notify);
574cd7eab44SBen Hutchings 	}
575cd7eab44SBen Hutchings 
576cd7eab44SBen Hutchings 	raw_spin_lock_irqsave(&desc->lock, flags);
577cd7eab44SBen Hutchings 	old_notify = desc->affinity_notify;
578cd7eab44SBen Hutchings 	desc->affinity_notify = notify;
579cd7eab44SBen Hutchings 	raw_spin_unlock_irqrestore(&desc->lock, flags);
580cd7eab44SBen Hutchings 
58159c39840SPrasad Sodagudi 	if (old_notify) {
582df81dfcfSEdward Cree 		if (cancel_work_sync(&old_notify->work)) {
583df81dfcfSEdward Cree 			/* Pending work had a ref, put that one too */
584df81dfcfSEdward Cree 			kref_put(&old_notify->kref, old_notify->release);
585df81dfcfSEdward Cree 		}
586cd7eab44SBen Hutchings 		kref_put(&old_notify->kref, old_notify->release);
58759c39840SPrasad Sodagudi 	}
588cd7eab44SBen Hutchings 
589cd7eab44SBen Hutchings 	return 0;
590cd7eab44SBen Hutchings }
591cd7eab44SBen Hutchings EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
592cd7eab44SBen Hutchings 
59318404756SMax Krasnyansky #ifndef CONFIG_AUTO_IRQ_AFFINITY
59418404756SMax Krasnyansky /*
59518404756SMax Krasnyansky  * Generic version of the affinity autoselector.
59618404756SMax Krasnyansky  */
59743564bd9SThomas Gleixner int irq_setup_affinity(struct irq_desc *desc)
59818404756SMax Krasnyansky {
599569bda8dSThomas Gleixner 	struct cpumask *set = irq_default_affinity;
600cba4235eSThomas Gleixner 	int ret, node = irq_desc_get_node(desc);
601cba4235eSThomas Gleixner 	static DEFINE_RAW_SPINLOCK(mask_lock);
602cba4235eSThomas Gleixner 	static struct cpumask mask;
603569bda8dSThomas Gleixner 
604b008207cSThomas Gleixner 	/* Excludes PER_CPU and NO_BALANCE interrupts */
605e019c249SJiang Liu 	if (!__irq_can_set_affinity(desc))
60618404756SMax Krasnyansky 		return 0;
60718404756SMax Krasnyansky 
608cba4235eSThomas Gleixner 	raw_spin_lock(&mask_lock);
609f6d87f4bSThomas Gleixner 	/*
6109332ef9dSMasahiro Yamada 	 * Preserve the managed affinity setting and a userspace affinity
61106ee6d57SThomas Gleixner 	 * setup, but make sure that one of the targets is online.
612f6d87f4bSThomas Gleixner 	 */
61306ee6d57SThomas Gleixner 	if (irqd_affinity_is_managed(&desc->irq_data) ||
61406ee6d57SThomas Gleixner 	    irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
6159df872faSJiang Liu 		if (cpumask_intersects(desc->irq_common_data.affinity,
616569bda8dSThomas Gleixner 				       cpu_online_mask))
6179df872faSJiang Liu 			set = desc->irq_common_data.affinity;
6180c6f8a8bSThomas Gleixner 		else
6192bdd1055SThomas Gleixner 			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
6202bdd1055SThomas Gleixner 	}
62118404756SMax Krasnyansky 
622cba4235eSThomas Gleixner 	cpumask_and(&mask, cpu_online_mask, set);
623bddda606SSrinivas Ramana 	if (cpumask_empty(&mask))
624bddda606SSrinivas Ramana 		cpumask_copy(&mask, cpu_online_mask);
625bddda606SSrinivas Ramana 
626241fc640SPrarit Bhargava 	if (node != NUMA_NO_NODE) {
627241fc640SPrarit Bhargava 		const struct cpumask *nodemask = cpumask_of_node(node);
628241fc640SPrarit Bhargava 
629241fc640SPrarit Bhargava 		/* make sure at least one of the cpus in nodemask is online */
630cba4235eSThomas Gleixner 		if (cpumask_intersects(&mask, nodemask))
631cba4235eSThomas Gleixner 			cpumask_and(&mask, &mask, nodemask);
632241fc640SPrarit Bhargava 	}
633cba4235eSThomas Gleixner 	ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
634cba4235eSThomas Gleixner 	raw_spin_unlock(&mask_lock);
635cba4235eSThomas Gleixner 	return ret;
63618404756SMax Krasnyansky }
637f6d87f4bSThomas Gleixner #else
638a8a98eacSJiang Liu /* Wrapper for ALPHA specific affinity selector magic */
639cba4235eSThomas Gleixner int irq_setup_affinity(struct irq_desc *desc)
640f6d87f4bSThomas Gleixner {
641cba4235eSThomas Gleixner 	return irq_select_affinity(irq_desc_get_irq(desc));
642f6d87f4bSThomas Gleixner }
643cba6437aSThomas Gleixner #endif /* CONFIG_AUTO_IRQ_AFFINITY */
644cba6437aSThomas Gleixner #endif /* CONFIG_SMP */
64518404756SMax Krasnyansky 
6461da177e4SLinus Torvalds 
647fcf1ae2fSFeng Wu /**
648fcf1ae2fSFeng Wu  *	irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
649fcf1ae2fSFeng Wu  *	@irq: interrupt number to set affinity
650250a53d6SChristoffer Dall  *	@vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
651250a53d6SChristoffer Dall  *	            specific data for percpu_devid interrupts
652fcf1ae2fSFeng Wu  *
653fcf1ae2fSFeng Wu  *	This function uses the vCPU specific data to set the vCPU
654fcf1ae2fSFeng Wu  *	affinity for an irq. The vCPU specific data is passed from
655fcf1ae2fSFeng Wu  *	outside, such as KVM. One example code path is as below:
656fcf1ae2fSFeng Wu  *	KVM -> IOMMU -> irq_set_vcpu_affinity().
657fcf1ae2fSFeng Wu  */
658fcf1ae2fSFeng Wu int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
659fcf1ae2fSFeng Wu {
660fcf1ae2fSFeng Wu 	unsigned long flags;
661fcf1ae2fSFeng Wu 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
662fcf1ae2fSFeng Wu 	struct irq_data *data;
663fcf1ae2fSFeng Wu 	struct irq_chip *chip;
664fcf1ae2fSFeng Wu 	int ret = -ENOSYS;
665fcf1ae2fSFeng Wu 
666fcf1ae2fSFeng Wu 	if (!desc)
667fcf1ae2fSFeng Wu 		return -EINVAL;
668fcf1ae2fSFeng Wu 
669fcf1ae2fSFeng Wu 	data = irq_desc_get_irq_data(desc);
6700abce64aSMarc Zyngier 	do {
671fcf1ae2fSFeng Wu 		chip = irq_data_get_irq_chip(data);
672fcf1ae2fSFeng Wu 		if (chip && chip->irq_set_vcpu_affinity)
6730abce64aSMarc Zyngier 			break;
6740abce64aSMarc Zyngier #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
6750abce64aSMarc Zyngier 		data = data->parent_data;
6760abce64aSMarc Zyngier #else
6770abce64aSMarc Zyngier 		data = NULL;
6780abce64aSMarc Zyngier #endif
6790abce64aSMarc Zyngier 	} while (data);
6800abce64aSMarc Zyngier 
6810abce64aSMarc Zyngier 	if (data)
682fcf1ae2fSFeng Wu 		ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
683fcf1ae2fSFeng Wu 	irq_put_desc_unlock(desc, flags);
684fcf1ae2fSFeng Wu 
685fcf1ae2fSFeng Wu 	return ret;
686fcf1ae2fSFeng Wu }
687fcf1ae2fSFeng Wu EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
688fcf1ae2fSFeng Wu 
68979ff1cdaSJiang Liu void __disable_irq(struct irq_desc *desc)
6900a0c5168SRafael J. Wysocki {
6913aae994fSThomas Gleixner 	if (!desc->depth++)
69287923470SThomas Gleixner 		irq_disable(desc);
6930a0c5168SRafael J. Wysocki }
6940a0c5168SRafael J. Wysocki 
69502725e74SThomas Gleixner static int __disable_irq_nosync(unsigned int irq)
69602725e74SThomas Gleixner {
69702725e74SThomas Gleixner 	unsigned long flags;
69831d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
69902725e74SThomas Gleixner 
70002725e74SThomas Gleixner 	if (!desc)
70102725e74SThomas Gleixner 		return -EINVAL;
70279ff1cdaSJiang Liu 	__disable_irq(desc);
70302725e74SThomas Gleixner 	irq_put_desc_busunlock(desc, flags);
70402725e74SThomas Gleixner 	return 0;
70502725e74SThomas Gleixner }
70602725e74SThomas Gleixner 
7071da177e4SLinus Torvalds /**
7081da177e4SLinus Torvalds  *	disable_irq_nosync - disable an irq without waiting
7091da177e4SLinus Torvalds  *	@irq: Interrupt to disable
7101da177e4SLinus Torvalds  *
7111da177e4SLinus Torvalds  *	Disable the selected interrupt line.  Disables and Enables are
7121da177e4SLinus Torvalds  *	nested.
7131da177e4SLinus Torvalds  *	Unlike disable_irq(), this function does not ensure existing
7141da177e4SLinus Torvalds  *	instances of the IRQ handler have completed before returning.
7151da177e4SLinus Torvalds  *
7161da177e4SLinus Torvalds  *	This function may be called from IRQ context.
7171da177e4SLinus Torvalds  */
7181da177e4SLinus Torvalds void disable_irq_nosync(unsigned int irq)
7191da177e4SLinus Torvalds {
72002725e74SThomas Gleixner 	__disable_irq_nosync(irq);
7211da177e4SLinus Torvalds }
7221da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq_nosync);
7231da177e4SLinus Torvalds 
7241da177e4SLinus Torvalds /**
7251da177e4SLinus Torvalds  *	disable_irq - disable an irq and wait for completion
7261da177e4SLinus Torvalds  *	@irq: Interrupt to disable
7271da177e4SLinus Torvalds  *
7281da177e4SLinus Torvalds  *	Disable the selected interrupt line.  Enables and Disables are
7291da177e4SLinus Torvalds  *	nested.
7301da177e4SLinus Torvalds  *	This function waits for any pending IRQ handlers for this interrupt
7311da177e4SLinus Torvalds  *	to complete before returning. If you use this function while
7321da177e4SLinus Torvalds  *	holding a resource the IRQ handler may need you will deadlock.
7331da177e4SLinus Torvalds  *
7341da177e4SLinus Torvalds  *	This function may be called - with care - from IRQ context.
7351da177e4SLinus Torvalds  */
7361da177e4SLinus Torvalds void disable_irq(unsigned int irq)
7371da177e4SLinus Torvalds {
73802725e74SThomas Gleixner 	if (!__disable_irq_nosync(irq))
7391da177e4SLinus Torvalds 		synchronize_irq(irq);
7401da177e4SLinus Torvalds }
7411da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq);
7421da177e4SLinus Torvalds 
74302cea395SPeter Zijlstra /**
74402cea395SPeter Zijlstra  *	disable_hardirq - disables an irq and waits for hardirq completion
74502cea395SPeter Zijlstra  *	@irq: Interrupt to disable
74602cea395SPeter Zijlstra  *
74702cea395SPeter Zijlstra  *	Disable the selected interrupt line.  Enables and Disables are
74802cea395SPeter Zijlstra  *	nested.
74902cea395SPeter Zijlstra  *	This function waits for any pending hard IRQ handlers for this
75002cea395SPeter Zijlstra  *	interrupt to complete before returning. If you use this function while
75102cea395SPeter Zijlstra  *	holding a resource the hard IRQ handler may need you will deadlock.
75202cea395SPeter Zijlstra  *
75302cea395SPeter Zijlstra  *	When used to optimistically disable an interrupt from atomic context
75402cea395SPeter Zijlstra  *	the return value must be checked.
75502cea395SPeter Zijlstra  *
75602cea395SPeter Zijlstra  *	Returns: false if a threaded handler is active.
75702cea395SPeter Zijlstra  *
75802cea395SPeter Zijlstra  *	This function may be called - with care - from IRQ context.
75902cea395SPeter Zijlstra  */
76002cea395SPeter Zijlstra bool disable_hardirq(unsigned int irq)
76102cea395SPeter Zijlstra {
76202cea395SPeter Zijlstra 	if (!__disable_irq_nosync(irq))
76302cea395SPeter Zijlstra 		return synchronize_hardirq(irq);
76402cea395SPeter Zijlstra 
76502cea395SPeter Zijlstra 	return false;
76602cea395SPeter Zijlstra }
76702cea395SPeter Zijlstra EXPORT_SYMBOL_GPL(disable_hardirq);
76802cea395SPeter Zijlstra 
769b525903cSJulien Thierry /**
770b525903cSJulien Thierry  *	disable_nmi_nosync - disable an nmi without waiting
771b525903cSJulien Thierry  *	@irq: Interrupt to disable
772b525903cSJulien Thierry  *
773b525903cSJulien Thierry  *	Disable the selected interrupt line. Disables and enables are
774b525903cSJulien Thierry  *	nested.
775b525903cSJulien Thierry  *	The interrupt to disable must have been requested through request_nmi.
776b525903cSJulien Thierry  *	Unlike disable_nmi(), this function does not ensure existing
777b525903cSJulien Thierry  *	instances of the IRQ handler have completed before returning.
778b525903cSJulien Thierry  */
779b525903cSJulien Thierry void disable_nmi_nosync(unsigned int irq)
780b525903cSJulien Thierry {
781b525903cSJulien Thierry 	disable_irq_nosync(irq);
782b525903cSJulien Thierry }
783b525903cSJulien Thierry 
78479ff1cdaSJiang Liu void __enable_irq(struct irq_desc *desc)
7851adb0850SThomas Gleixner {
7861adb0850SThomas Gleixner 	switch (desc->depth) {
7871adb0850SThomas Gleixner 	case 0:
7880a0c5168SRafael J. Wysocki  err_out:
78979ff1cdaSJiang Liu 		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
79079ff1cdaSJiang Liu 		     irq_desc_get_irq(desc));
7911adb0850SThomas Gleixner 		break;
7921adb0850SThomas Gleixner 	case 1: {
793c531e836SThomas Gleixner 		if (desc->istate & IRQS_SUSPENDED)
7940a0c5168SRafael J. Wysocki 			goto err_out;
7951adb0850SThomas Gleixner 		/* Prevent probing on this irq: */
7961ccb4e61SThomas Gleixner 		irq_settings_set_noprobe(desc);
797201d7f47SThomas Gleixner 		/*
798201d7f47SThomas Gleixner 		 * Call irq_startup() not irq_enable() here because the
799201d7f47SThomas Gleixner 		 * interrupt might be marked NOAUTOEN. So irq_startup()
800201d7f47SThomas Gleixner 		 * needs to be invoked when it gets enabled the first
801201d7f47SThomas Gleixner 		 * time. If it was already started up, then irq_startup()
802201d7f47SThomas Gleixner 		 * will invoke irq_enable() under the hood.
803201d7f47SThomas Gleixner 		 */
804c942cee4SThomas Gleixner 		irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
805201d7f47SThomas Gleixner 		break;
8061adb0850SThomas Gleixner 	}
8071adb0850SThomas Gleixner 	default:
8081adb0850SThomas Gleixner 		desc->depth--;
8091adb0850SThomas Gleixner 	}
8101adb0850SThomas Gleixner }
8111adb0850SThomas Gleixner 
8121da177e4SLinus Torvalds /**
8131da177e4SLinus Torvalds  *	enable_irq - enable handling of an irq
8141da177e4SLinus Torvalds  *	@irq: Interrupt to enable
8151da177e4SLinus Torvalds  *
8161da177e4SLinus Torvalds  *	Undoes the effect of one call to disable_irq().  If this
8171da177e4SLinus Torvalds  *	matches the last disable, processing of interrupts on this
8181da177e4SLinus Torvalds  *	IRQ line is re-enabled.
8191da177e4SLinus Torvalds  *
82070aedd24SThomas Gleixner  *	This function may be called from IRQ context only when
8216b8ff312SThomas Gleixner  *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
8221da177e4SLinus Torvalds  */
8231da177e4SLinus Torvalds void enable_irq(unsigned int irq)
8241da177e4SLinus Torvalds {
8251da177e4SLinus Torvalds 	unsigned long flags;
82631d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
8271da177e4SLinus Torvalds 
8287d94f7caSYinghai Lu 	if (!desc)
829c2b5a251SMatthew Wilcox 		return;
83050f7c032SThomas Gleixner 	if (WARN(!desc->irq_data.chip,
8312656c366SThomas Gleixner 		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
83202725e74SThomas Gleixner 		goto out;
8332656c366SThomas Gleixner 
83479ff1cdaSJiang Liu 	__enable_irq(desc);
83502725e74SThomas Gleixner out:
83602725e74SThomas Gleixner 	irq_put_desc_busunlock(desc, flags);
8371da177e4SLinus Torvalds }
8381da177e4SLinus Torvalds EXPORT_SYMBOL(enable_irq);
8391da177e4SLinus Torvalds 
840b525903cSJulien Thierry /**
841b525903cSJulien Thierry  *	enable_nmi - enable handling of an nmi
842b525903cSJulien Thierry  *	@irq: Interrupt to enable
843b525903cSJulien Thierry  *
844b525903cSJulien Thierry  *	The interrupt to enable must have been requested through request_nmi.
845b525903cSJulien Thierry  *	Undoes the effect of one call to disable_nmi(). If this
846b525903cSJulien Thierry  *	matches the last disable, processing of interrupts on this
847b525903cSJulien Thierry  *	IRQ line is re-enabled.
848b525903cSJulien Thierry  */
849b525903cSJulien Thierry void enable_nmi(unsigned int irq)
850b525903cSJulien Thierry {
851b525903cSJulien Thierry 	enable_irq(irq);
852b525903cSJulien Thierry }
853b525903cSJulien Thierry 
8540c5d1eb7SDavid Brownell static int set_irq_wake_real(unsigned int irq, unsigned int on)
8552db87321SUwe Kleine-König {
85608678b08SYinghai Lu 	struct irq_desc *desc = irq_to_desc(irq);
8572db87321SUwe Kleine-König 	int ret = -ENXIO;
8582db87321SUwe Kleine-König 
85960f96b41SSantosh Shilimkar 	if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
86060f96b41SSantosh Shilimkar 		return 0;
86160f96b41SSantosh Shilimkar 
8622f7e99bbSThomas Gleixner 	if (desc->irq_data.chip->irq_set_wake)
8632f7e99bbSThomas Gleixner 		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
8642db87321SUwe Kleine-König 
8652db87321SUwe Kleine-König 	return ret;
8662db87321SUwe Kleine-König }
8672db87321SUwe Kleine-König 
868ba9a2331SThomas Gleixner /**
869a0cd9ca2SThomas Gleixner  *	irq_set_irq_wake - control irq power management wakeup
870ba9a2331SThomas Gleixner  *	@irq:	interrupt to control
871ba9a2331SThomas Gleixner  *	@on:	enable/disable power management wakeup
872ba9a2331SThomas Gleixner  *
87315a647ebSDavid Brownell  *	Enable/disable power management wakeup mode, which is
87415a647ebSDavid Brownell  *	disabled by default.  Enables and disables must match,
87515a647ebSDavid Brownell  *	just as they match for non-wakeup mode support.
87615a647ebSDavid Brownell  *
87715a647ebSDavid Brownell  *	Wakeup mode lets this IRQ wake the system from sleep
87815a647ebSDavid Brownell  *	states like "suspend to RAM".
879f9f21ceaSStephen Boyd  *
880f9f21ceaSStephen Boyd  *	Note: irq enable/disable state is completely orthogonal
881f9f21ceaSStephen Boyd  *	to the enable/disable state of irq wake. An irq can be
882f9f21ceaSStephen Boyd  *	disabled with disable_irq() and still wake the system as
883f9f21ceaSStephen Boyd  *	long as the irq has wake enabled. If this does not hold,
884f9f21ceaSStephen Boyd  *	then the underlying irq chip and the related driver need
885f9f21ceaSStephen Boyd  *	to be investigated.
886ba9a2331SThomas Gleixner  */
887a0cd9ca2SThomas Gleixner int irq_set_irq_wake(unsigned int irq, unsigned int on)
888ba9a2331SThomas Gleixner {
889ba9a2331SThomas Gleixner 	unsigned long flags;
89031d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
8912db87321SUwe Kleine-König 	int ret = 0;
892ba9a2331SThomas Gleixner 
89313863a66SJesper Juhl 	if (!desc)
89413863a66SJesper Juhl 		return -EINVAL;
89513863a66SJesper Juhl 
896b525903cSJulien Thierry 	/* Don't use NMIs as wake up interrupts please */
897b525903cSJulien Thierry 	if (desc->istate & IRQS_NMI) {
898b525903cSJulien Thierry 		ret = -EINVAL;
899b525903cSJulien Thierry 		goto out_unlock;
900b525903cSJulien Thierry 	}
901b525903cSJulien Thierry 
90215a647ebSDavid Brownell 	/* wakeup-capable irqs can be shared between drivers that
90315a647ebSDavid Brownell 	 * don't need to have the same sleep mode behaviors.
90415a647ebSDavid Brownell 	 */
90515a647ebSDavid Brownell 	if (on) {
9062db87321SUwe Kleine-König 		if (desc->wake_depth++ == 0) {
9072db87321SUwe Kleine-König 			ret = set_irq_wake_real(irq, on);
9082db87321SUwe Kleine-König 			if (ret)
9092db87321SUwe Kleine-König 				desc->wake_depth = 0;
91015a647ebSDavid Brownell 			else
9117f94226fSThomas Gleixner 				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
9122db87321SUwe Kleine-König 		}
91315a647ebSDavid Brownell 	} else {
91415a647ebSDavid Brownell 		if (desc->wake_depth == 0) {
9157a2c4770SArjan van de Ven 			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
9162db87321SUwe Kleine-König 		} else if (--desc->wake_depth == 0) {
9172db87321SUwe Kleine-König 			ret = set_irq_wake_real(irq, on);
9182db87321SUwe Kleine-König 			if (ret)
9192db87321SUwe Kleine-König 				desc->wake_depth = 1;
92015a647ebSDavid Brownell 			else
9217f94226fSThomas Gleixner 				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
92215a647ebSDavid Brownell 		}
9232db87321SUwe Kleine-König 	}
924b525903cSJulien Thierry 
925b525903cSJulien Thierry out_unlock:
92602725e74SThomas Gleixner 	irq_put_desc_busunlock(desc, flags);
927ba9a2331SThomas Gleixner 	return ret;
928ba9a2331SThomas Gleixner }
929a0cd9ca2SThomas Gleixner EXPORT_SYMBOL(irq_set_irq_wake);
930ba9a2331SThomas Gleixner 
9311da177e4SLinus Torvalds /*
9321da177e4SLinus Torvalds  * Internal function that tells the architecture code whether a
9331da177e4SLinus Torvalds  * particular irq has been exclusively allocated or is available
9341da177e4SLinus Torvalds  * for driver use.
9351da177e4SLinus Torvalds  */
9361da177e4SLinus Torvalds int can_request_irq(unsigned int irq, unsigned long irqflags)
9371da177e4SLinus Torvalds {
938cc8c3b78SThomas Gleixner 	unsigned long flags;
93931d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
94002725e74SThomas Gleixner 	int canrequest = 0;
9411da177e4SLinus Torvalds 
9427d94f7caSYinghai Lu 	if (!desc)
9437d94f7caSYinghai Lu 		return 0;
9447d94f7caSYinghai Lu 
94502725e74SThomas Gleixner 	if (irq_settings_can_request(desc)) {
9462779db8dSBen Hutchings 		if (!desc->action ||
9472779db8dSBen Hutchings 		    irqflags & desc->action->flags & IRQF_SHARED)
94802725e74SThomas Gleixner 			canrequest = 1;
94902725e74SThomas Gleixner 	}
95002725e74SThomas Gleixner 	irq_put_desc_unlock(desc, flags);
95102725e74SThomas Gleixner 	return canrequest;
9521da177e4SLinus Torvalds }
9531da177e4SLinus Torvalds 
954a1ff541aSJiang Liu int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
95582736f4dSUwe Kleine-König {
9566b8ff312SThomas Gleixner 	struct irq_chip *chip = desc->irq_data.chip;
957d4d5e089SThomas Gleixner 	int ret, unmask = 0;
95882736f4dSUwe Kleine-König 
959b2ba2c30SThomas Gleixner 	if (!chip || !chip->irq_set_type) {
96082736f4dSUwe Kleine-König 		/*
96182736f4dSUwe Kleine-König 		 * IRQF_TRIGGER_* but the PIC does not support multiple
96282736f4dSUwe Kleine-König 		 * flow-types?
96382736f4dSUwe Kleine-König 		 */
964a1ff541aSJiang Liu 		pr_debug("No set_type function for IRQ %d (%s)\n",
965a1ff541aSJiang Liu 			 irq_desc_get_irq(desc),
96682736f4dSUwe Kleine-König 			 chip ? (chip->name ? : "unknown") : "unknown");
96782736f4dSUwe Kleine-König 		return 0;
96882736f4dSUwe Kleine-König 	}
96982736f4dSUwe Kleine-König 
970d4d5e089SThomas Gleixner 	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
97132f4125eSThomas Gleixner 		if (!irqd_irq_masked(&desc->irq_data))
972d4d5e089SThomas Gleixner 			mask_irq(desc);
97332f4125eSThomas Gleixner 		if (!irqd_irq_disabled(&desc->irq_data))
974d4d5e089SThomas Gleixner 			unmask = 1;
975d4d5e089SThomas Gleixner 	}
976d4d5e089SThomas Gleixner 
97700b992deSAlexander Kuleshov 	/* Mask all flags except trigger mode */
97800b992deSAlexander Kuleshov 	flags &= IRQ_TYPE_SENSE_MASK;
979b2ba2c30SThomas Gleixner 	ret = chip->irq_set_type(&desc->irq_data, flags);
98082736f4dSUwe Kleine-König 
981876dbd4cSThomas Gleixner 	switch (ret) {
982876dbd4cSThomas Gleixner 	case IRQ_SET_MASK_OK:
9832cb62547SJiang Liu 	case IRQ_SET_MASK_OK_DONE:
984876dbd4cSThomas Gleixner 		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
985876dbd4cSThomas Gleixner 		irqd_set(&desc->irq_data, flags);
986df561f66SGustavo A. R. Silva 		fallthrough;
987876dbd4cSThomas Gleixner 
988876dbd4cSThomas Gleixner 	case IRQ_SET_MASK_OK_NOCOPY:
989876dbd4cSThomas Gleixner 		flags = irqd_get_trigger_type(&desc->irq_data);
990876dbd4cSThomas Gleixner 		irq_settings_set_trigger_mask(desc, flags);
991876dbd4cSThomas Gleixner 		irqd_clear(&desc->irq_data, IRQD_LEVEL);
992876dbd4cSThomas Gleixner 		irq_settings_clr_level(desc);
993876dbd4cSThomas Gleixner 		if (flags & IRQ_TYPE_LEVEL_MASK) {
994876dbd4cSThomas Gleixner 			irq_settings_set_level(desc);
995876dbd4cSThomas Gleixner 			irqd_set(&desc->irq_data, IRQD_LEVEL);
996876dbd4cSThomas Gleixner 		}
99746732475SThomas Gleixner 
998d4d5e089SThomas Gleixner 		ret = 0;
9998fff39e0SThomas Gleixner 		break;
1000876dbd4cSThomas Gleixner 	default:
1001d75f773cSSakari Ailus 		pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
1002a1ff541aSJiang Liu 		       flags, irq_desc_get_irq(desc), chip->irq_set_type);
10030c5d1eb7SDavid Brownell 	}
1004d4d5e089SThomas Gleixner 	if (unmask)
1005d4d5e089SThomas Gleixner 		unmask_irq(desc);
100682736f4dSUwe Kleine-König 	return ret;
100782736f4dSUwe Kleine-König }
100882736f4dSUwe Kleine-König 
1009293a7a0aSThomas Gleixner #ifdef CONFIG_HARDIRQS_SW_RESEND
1010293a7a0aSThomas Gleixner int irq_set_parent(int irq, int parent_irq)
1011293a7a0aSThomas Gleixner {
1012293a7a0aSThomas Gleixner 	unsigned long flags;
1013293a7a0aSThomas Gleixner 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1014293a7a0aSThomas Gleixner 
1015293a7a0aSThomas Gleixner 	if (!desc)
1016293a7a0aSThomas Gleixner 		return -EINVAL;
1017293a7a0aSThomas Gleixner 
1018293a7a0aSThomas Gleixner 	desc->parent_irq = parent_irq;
1019293a7a0aSThomas Gleixner 
1020293a7a0aSThomas Gleixner 	irq_put_desc_unlock(desc, flags);
1021293a7a0aSThomas Gleixner 	return 0;
1022293a7a0aSThomas Gleixner }
10233118dac5SSudip Mukherjee EXPORT_SYMBOL_GPL(irq_set_parent);
1024293a7a0aSThomas Gleixner #endif
1025293a7a0aSThomas Gleixner 
1026b25c340cSThomas Gleixner /*
1027b25c340cSThomas Gleixner  * Default primary interrupt handler for threaded interrupts. Is
1028b25c340cSThomas Gleixner  * assigned as primary handler when request_threaded_irq is called
1029b25c340cSThomas Gleixner  * with handler == NULL. Useful for oneshot interrupts.
1030b25c340cSThomas Gleixner  */
1031b25c340cSThomas Gleixner static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
1032b25c340cSThomas Gleixner {
1033b25c340cSThomas Gleixner 	return IRQ_WAKE_THREAD;
1034b25c340cSThomas Gleixner }
1035b25c340cSThomas Gleixner 
1036399b5da2SThomas Gleixner /*
1037399b5da2SThomas Gleixner  * Primary handler for nested threaded interrupts. Should never be
1038399b5da2SThomas Gleixner  * called.
1039399b5da2SThomas Gleixner  */
1040399b5da2SThomas Gleixner static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
1041399b5da2SThomas Gleixner {
1042399b5da2SThomas Gleixner 	WARN(1, "Primary handler called for nested irq %d\n", irq);
1043399b5da2SThomas Gleixner 	return IRQ_NONE;
1044399b5da2SThomas Gleixner }
1045399b5da2SThomas Gleixner 
10462a1d3ab8SThomas Gleixner static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
10472a1d3ab8SThomas Gleixner {
10482a1d3ab8SThomas Gleixner 	WARN(1, "Secondary action handler called for irq %d\n", irq);
10492a1d3ab8SThomas Gleixner 	return IRQ_NONE;
10502a1d3ab8SThomas Gleixner }
10512a1d3ab8SThomas Gleixner 
10523aa551c9SThomas Gleixner static int irq_wait_for_interrupt(struct irqaction *action)
10533aa551c9SThomas Gleixner {
1054519cc865SLukas Wunner 	for (;;) {
10553aa551c9SThomas Gleixner 		set_current_state(TASK_INTERRUPTIBLE);
1056f48fe81eSThomas Gleixner 
1057519cc865SLukas Wunner 		if (kthread_should_stop()) {
1058519cc865SLukas Wunner 			/* may need to run one last time */
1059519cc865SLukas Wunner 			if (test_and_clear_bit(IRQTF_RUNTHREAD,
1060519cc865SLukas Wunner 					       &action->thread_flags)) {
1061519cc865SLukas Wunner 				__set_current_state(TASK_RUNNING);
1062519cc865SLukas Wunner 				return 0;
1063519cc865SLukas Wunner 			}
1064519cc865SLukas Wunner 			__set_current_state(TASK_RUNNING);
1065519cc865SLukas Wunner 			return -1;
1066519cc865SLukas Wunner 		}
1067550acb19SIdo Yariv 
1068f48fe81eSThomas Gleixner 		if (test_and_clear_bit(IRQTF_RUNTHREAD,
1069f48fe81eSThomas Gleixner 				       &action->thread_flags)) {
10703aa551c9SThomas Gleixner 			__set_current_state(TASK_RUNNING);
10713aa551c9SThomas Gleixner 			return 0;
1072f48fe81eSThomas Gleixner 		}
10733aa551c9SThomas Gleixner 		schedule();
10743aa551c9SThomas Gleixner 	}
10753aa551c9SThomas Gleixner }
10763aa551c9SThomas Gleixner 
1077b25c340cSThomas Gleixner /*
1078b25c340cSThomas Gleixner  * Oneshot interrupts keep the irq line masked until the threaded
1079b25c340cSThomas Gleixner  * handler finished. unmask if the interrupt has not been disabled and
1080b25c340cSThomas Gleixner  * is marked MASKED.
1081b25c340cSThomas Gleixner  */
1082b5faba21SThomas Gleixner static void irq_finalize_oneshot(struct irq_desc *desc,
1083f3f79e38SAlexander Gordeev 				 struct irqaction *action)
1084b25c340cSThomas Gleixner {
10852a1d3ab8SThomas Gleixner 	if (!(desc->istate & IRQS_ONESHOT) ||
10862a1d3ab8SThomas Gleixner 	    action->handler == irq_forced_secondary_handler)
1087b5faba21SThomas Gleixner 		return;
10880b1adaa0SThomas Gleixner again:
10893876ec9eSThomas Gleixner 	chip_bus_lock(desc);
1090239007b8SThomas Gleixner 	raw_spin_lock_irq(&desc->lock);
10910b1adaa0SThomas Gleixner 
10920b1adaa0SThomas Gleixner 	/*
10930b1adaa0SThomas Gleixner 	 * Implausible though it may be we need to protect us against
10940b1adaa0SThomas Gleixner 	 * the following scenario:
10950b1adaa0SThomas Gleixner 	 *
10960b1adaa0SThomas Gleixner 	 * The thread is faster done than the hard interrupt handler
10970b1adaa0SThomas Gleixner 	 * on the other CPU. If we unmask the irq line then the
10980b1adaa0SThomas Gleixner 	 * interrupt can come in again and masks the line, leaves due
1099009b4c3bSThomas Gleixner 	 * to IRQS_INPROGRESS and the irq line is masked forever.
1100b5faba21SThomas Gleixner 	 *
1101b5faba21SThomas Gleixner 	 * This also serializes the state of shared oneshot handlers
1102a359f757SIngo Molnar 	 * versus "desc->threads_oneshot |= action->thread_mask;" in
1103b5faba21SThomas Gleixner 	 * irq_wake_thread(). See the comment there which explains the
1104b5faba21SThomas Gleixner 	 * serialization.
11050b1adaa0SThomas Gleixner 	 */
110632f4125eSThomas Gleixner 	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
11070b1adaa0SThomas Gleixner 		raw_spin_unlock_irq(&desc->lock);
11083876ec9eSThomas Gleixner 		chip_bus_sync_unlock(desc);
11090b1adaa0SThomas Gleixner 		cpu_relax();
11100b1adaa0SThomas Gleixner 		goto again;
11110b1adaa0SThomas Gleixner 	}
11120b1adaa0SThomas Gleixner 
1113b5faba21SThomas Gleixner 	/*
1114b5faba21SThomas Gleixner 	 * Now check again, whether the thread should run. Otherwise
1115b5faba21SThomas Gleixner 	 * we would clear the threads_oneshot bit of this thread which
1116b5faba21SThomas Gleixner 	 * was just set.
1117b5faba21SThomas Gleixner 	 */
1118f3f79e38SAlexander Gordeev 	if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1119b5faba21SThomas Gleixner 		goto out_unlock;
1120b5faba21SThomas Gleixner 
1121b5faba21SThomas Gleixner 	desc->threads_oneshot &= ~action->thread_mask;
1122b5faba21SThomas Gleixner 
112332f4125eSThomas Gleixner 	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
112432f4125eSThomas Gleixner 	    irqd_irq_masked(&desc->irq_data))
1125328a4978SThomas Gleixner 		unmask_threaded_irq(desc);
112632f4125eSThomas Gleixner 
1127b5faba21SThomas Gleixner out_unlock:
1128239007b8SThomas Gleixner 	raw_spin_unlock_irq(&desc->lock);
11293876ec9eSThomas Gleixner 	chip_bus_sync_unlock(desc);
1130b25c340cSThomas Gleixner }
1131b25c340cSThomas Gleixner 
113261f38261SBruno Premont #ifdef CONFIG_SMP
11333aa551c9SThomas Gleixner /*
1134b04c644eSChuansheng Liu  * Check whether we need to change the affinity of the interrupt thread.
1135591d2fb0SThomas Gleixner  */
1136591d2fb0SThomas Gleixner static void
1137591d2fb0SThomas Gleixner irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1138591d2fb0SThomas Gleixner {
1139591d2fb0SThomas Gleixner 	cpumask_var_t mask;
114004aa530eSThomas Gleixner 	bool valid = true;
1141591d2fb0SThomas Gleixner 
1142591d2fb0SThomas Gleixner 	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1143591d2fb0SThomas Gleixner 		return;
1144591d2fb0SThomas Gleixner 
1145591d2fb0SThomas Gleixner 	/*
1146591d2fb0SThomas Gleixner 	 * In case we are out of memory we set IRQTF_AFFINITY again and
1147591d2fb0SThomas Gleixner 	 * try again next time
1148591d2fb0SThomas Gleixner 	 */
1149591d2fb0SThomas Gleixner 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1150591d2fb0SThomas Gleixner 		set_bit(IRQTF_AFFINITY, &action->thread_flags);
1151591d2fb0SThomas Gleixner 		return;
1152591d2fb0SThomas Gleixner 	}
1153591d2fb0SThomas Gleixner 
1154239007b8SThomas Gleixner 	raw_spin_lock_irq(&desc->lock);
115504aa530eSThomas Gleixner 	/*
115604aa530eSThomas Gleixner 	 * This code is triggered unconditionally. Check the affinity
115704aa530eSThomas Gleixner 	 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
115804aa530eSThomas Gleixner 	 */
1159cbf86999SThomas Gleixner 	if (cpumask_available(desc->irq_common_data.affinity)) {
1160cbf86999SThomas Gleixner 		const struct cpumask *m;
1161cbf86999SThomas Gleixner 
1162cbf86999SThomas Gleixner 		m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1163cbf86999SThomas Gleixner 		cpumask_copy(mask, m);
1164cbf86999SThomas Gleixner 	} else {
116504aa530eSThomas Gleixner 		valid = false;
1166cbf86999SThomas Gleixner 	}
1167239007b8SThomas Gleixner 	raw_spin_unlock_irq(&desc->lock);
1168591d2fb0SThomas Gleixner 
116904aa530eSThomas Gleixner 	if (valid)
1170591d2fb0SThomas Gleixner 		set_cpus_allowed_ptr(current, mask);
1171591d2fb0SThomas Gleixner 	free_cpumask_var(mask);
1172591d2fb0SThomas Gleixner }
117361f38261SBruno Premont #else
117461f38261SBruno Premont static inline void
117561f38261SBruno Premont irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
117661f38261SBruno Premont #endif
1177591d2fb0SThomas Gleixner 
1178591d2fb0SThomas Gleixner /*
1179c5f48c0aSIngo Molnar  * Interrupts which are not explicitly requested as threaded
11808d32a307SThomas Gleixner  * interrupts rely on the implicit bh/preempt disable of the hard irq
11818d32a307SThomas Gleixner  * context. So we need to disable bh here to avoid deadlocks and other
11828d32a307SThomas Gleixner  * side effects.
11838d32a307SThomas Gleixner  */
11843a43e05fSSebastian Andrzej Siewior static irqreturn_t
11858d32a307SThomas Gleixner irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
11868d32a307SThomas Gleixner {
11873a43e05fSSebastian Andrzej Siewior 	irqreturn_t ret;
11883a43e05fSSebastian Andrzej Siewior 
11898d32a307SThomas Gleixner 	local_bh_disable();
119081e2073cSThomas Gleixner 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
119181e2073cSThomas Gleixner 		local_irq_disable();
11923a43e05fSSebastian Andrzej Siewior 	ret = action->thread_fn(action->irq, action->dev_id);
1193746a923bSLukas Wunner 	if (ret == IRQ_HANDLED)
1194746a923bSLukas Wunner 		atomic_inc(&desc->threads_handled);
1195746a923bSLukas Wunner 
1196f3f79e38SAlexander Gordeev 	irq_finalize_oneshot(desc, action);
119781e2073cSThomas Gleixner 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
119881e2073cSThomas Gleixner 		local_irq_enable();
11998d32a307SThomas Gleixner 	local_bh_enable();
12003a43e05fSSebastian Andrzej Siewior 	return ret;
12018d32a307SThomas Gleixner }
12028d32a307SThomas Gleixner 
12038d32a307SThomas Gleixner /*
1204f788e7bfSXie XiuQi  * Interrupts explicitly requested as threaded interrupts want to be
12055c982c58SKrzysztof Kozlowski  * preemptible - many of them need to sleep and wait for slow busses to
12068d32a307SThomas Gleixner  * complete.
12078d32a307SThomas Gleixner  */
12083a43e05fSSebastian Andrzej Siewior static irqreturn_t irq_thread_fn(struct irq_desc *desc,
12093a43e05fSSebastian Andrzej Siewior 		struct irqaction *action)
12108d32a307SThomas Gleixner {
12113a43e05fSSebastian Andrzej Siewior 	irqreturn_t ret;
12123a43e05fSSebastian Andrzej Siewior 
12133a43e05fSSebastian Andrzej Siewior 	ret = action->thread_fn(action->irq, action->dev_id);
1214746a923bSLukas Wunner 	if (ret == IRQ_HANDLED)
1215746a923bSLukas Wunner 		atomic_inc(&desc->threads_handled);
1216746a923bSLukas Wunner 
1217f3f79e38SAlexander Gordeev 	irq_finalize_oneshot(desc, action);
12183a43e05fSSebastian Andrzej Siewior 	return ret;
12198d32a307SThomas Gleixner }
12208d32a307SThomas Gleixner 
12217140ea19SIdo Yariv static void wake_threads_waitq(struct irq_desc *desc)
12227140ea19SIdo Yariv {
1223c685689fSChuansheng Liu 	if (atomic_dec_and_test(&desc->threads_active))
12247140ea19SIdo Yariv 		wake_up(&desc->wait_for_threads);
12257140ea19SIdo Yariv }
12267140ea19SIdo Yariv 
122767d12145SAl Viro static void irq_thread_dtor(struct callback_head *unused)
12284d1d61a6SOleg Nesterov {
12294d1d61a6SOleg Nesterov 	struct task_struct *tsk = current;
12304d1d61a6SOleg Nesterov 	struct irq_desc *desc;
12314d1d61a6SOleg Nesterov 	struct irqaction *action;
12324d1d61a6SOleg Nesterov 
12334d1d61a6SOleg Nesterov 	if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
12344d1d61a6SOleg Nesterov 		return;
12354d1d61a6SOleg Nesterov 
12364d1d61a6SOleg Nesterov 	action = kthread_data(tsk);
12374d1d61a6SOleg Nesterov 
1238fb21affaSLinus Torvalds 	pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
123919af395dSAlan Cox 	       tsk->comm, tsk->pid, action->irq);
12404d1d61a6SOleg Nesterov 
12414d1d61a6SOleg Nesterov 
12424d1d61a6SOleg Nesterov 	desc = irq_to_desc(action->irq);
12434d1d61a6SOleg Nesterov 	/*
12444d1d61a6SOleg Nesterov 	 * If IRQTF_RUNTHREAD is set, we need to decrement
12454d1d61a6SOleg Nesterov 	 * desc->threads_active and wake possible waiters.
12464d1d61a6SOleg Nesterov 	 */
12474d1d61a6SOleg Nesterov 	if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
12484d1d61a6SOleg Nesterov 		wake_threads_waitq(desc);
12494d1d61a6SOleg Nesterov 
12504d1d61a6SOleg Nesterov 	/* Prevent a stale desc->threads_oneshot */
12514d1d61a6SOleg Nesterov 	irq_finalize_oneshot(desc, action);
12524d1d61a6SOleg Nesterov }
12534d1d61a6SOleg Nesterov 
12542a1d3ab8SThomas Gleixner static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
12552a1d3ab8SThomas Gleixner {
12562a1d3ab8SThomas Gleixner 	struct irqaction *secondary = action->secondary;
12572a1d3ab8SThomas Gleixner 
12582a1d3ab8SThomas Gleixner 	if (WARN_ON_ONCE(!secondary))
12592a1d3ab8SThomas Gleixner 		return;
12602a1d3ab8SThomas Gleixner 
12612a1d3ab8SThomas Gleixner 	raw_spin_lock_irq(&desc->lock);
12622a1d3ab8SThomas Gleixner 	__irq_wake_thread(desc, secondary);
12632a1d3ab8SThomas Gleixner 	raw_spin_unlock_irq(&desc->lock);
12642a1d3ab8SThomas Gleixner }
12652a1d3ab8SThomas Gleixner 
12668d32a307SThomas Gleixner /*
12673aa551c9SThomas Gleixner  * Interrupt handler thread
12683aa551c9SThomas Gleixner  */
12693aa551c9SThomas Gleixner static int irq_thread(void *data)
12703aa551c9SThomas Gleixner {
127167d12145SAl Viro 	struct callback_head on_exit_work;
12723aa551c9SThomas Gleixner 	struct irqaction *action = data;
12733aa551c9SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(action->irq);
12743a43e05fSSebastian Andrzej Siewior 	irqreturn_t (*handler_fn)(struct irq_desc *desc,
12753a43e05fSSebastian Andrzej Siewior 			struct irqaction *action);
12763aa551c9SThomas Gleixner 
1277e739f98bSThomas Gleixner 	sched_set_fifo(current);
1278e739f98bSThomas Gleixner 
127991cc470eSTanner Love 	if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
12808d32a307SThomas Gleixner 					   &action->thread_flags))
12818d32a307SThomas Gleixner 		handler_fn = irq_forced_thread_fn;
12828d32a307SThomas Gleixner 	else
12838d32a307SThomas Gleixner 		handler_fn = irq_thread_fn;
12848d32a307SThomas Gleixner 
128541f9d29fSAl Viro 	init_task_work(&on_exit_work, irq_thread_dtor);
128691989c70SJens Axboe 	task_work_add(current, &on_exit_work, TWA_NONE);
12873aa551c9SThomas Gleixner 
1288f3de44edSSankara Muthukrishnan 	irq_thread_check_affinity(desc, action);
1289f3de44edSSankara Muthukrishnan 
12903aa551c9SThomas Gleixner 	while (!irq_wait_for_interrupt(action)) {
12917140ea19SIdo Yariv 		irqreturn_t action_ret;
12923aa551c9SThomas Gleixner 
1293591d2fb0SThomas Gleixner 		irq_thread_check_affinity(desc, action);
1294591d2fb0SThomas Gleixner 
12953a43e05fSSebastian Andrzej Siewior 		action_ret = handler_fn(desc, action);
12962a1d3ab8SThomas Gleixner 		if (action_ret == IRQ_WAKE_THREAD)
12972a1d3ab8SThomas Gleixner 			irq_wake_secondary(desc, action);
12987140ea19SIdo Yariv 
12997140ea19SIdo Yariv 		wake_threads_waitq(desc);
13003aa551c9SThomas Gleixner 	}
13013aa551c9SThomas Gleixner 
13027140ea19SIdo Yariv 	/*
13037140ea19SIdo Yariv 	 * This is the regular exit path. __free_irq() is stopping the
13047140ea19SIdo Yariv 	 * thread via kthread_stop() after calling
1305519cc865SLukas Wunner 	 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1306836557bdSLukas Wunner 	 * oneshot mask bit can be set.
13073aa551c9SThomas Gleixner 	 */
13084d1d61a6SOleg Nesterov 	task_work_cancel(current, irq_thread_dtor);
13093aa551c9SThomas Gleixner 	return 0;
13103aa551c9SThomas Gleixner }
13113aa551c9SThomas Gleixner 
1312a92444c6SThomas Gleixner /**
1313a92444c6SThomas Gleixner  *	irq_wake_thread - wake the irq thread for the action identified by dev_id
1314a92444c6SThomas Gleixner  *	@irq:		Interrupt line
1315a92444c6SThomas Gleixner  *	@dev_id:	Device identity for which the thread should be woken
1316a92444c6SThomas Gleixner  *
1317a92444c6SThomas Gleixner  */
1318a92444c6SThomas Gleixner void irq_wake_thread(unsigned int irq, void *dev_id)
1319a92444c6SThomas Gleixner {
1320a92444c6SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
1321a92444c6SThomas Gleixner 	struct irqaction *action;
1322a92444c6SThomas Gleixner 	unsigned long flags;
1323a92444c6SThomas Gleixner 
1324a92444c6SThomas Gleixner 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1325a92444c6SThomas Gleixner 		return;
1326a92444c6SThomas Gleixner 
1327a92444c6SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
1328f944b5a7SDaniel Lezcano 	for_each_action_of_desc(desc, action) {
1329a92444c6SThomas Gleixner 		if (action->dev_id == dev_id) {
1330a92444c6SThomas Gleixner 			if (action->thread)
1331a92444c6SThomas Gleixner 				__irq_wake_thread(desc, action);
1332a92444c6SThomas Gleixner 			break;
1333a92444c6SThomas Gleixner 		}
1334a92444c6SThomas Gleixner 	}
1335a92444c6SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1336a92444c6SThomas Gleixner }
1337a92444c6SThomas Gleixner EXPORT_SYMBOL_GPL(irq_wake_thread);
1338a92444c6SThomas Gleixner 
13392a1d3ab8SThomas Gleixner static int irq_setup_forced_threading(struct irqaction *new)
13408d32a307SThomas Gleixner {
134191cc470eSTanner Love 	if (!force_irqthreads())
13422a1d3ab8SThomas Gleixner 		return 0;
13438d32a307SThomas Gleixner 	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
13442a1d3ab8SThomas Gleixner 		return 0;
13458d32a307SThomas Gleixner 
1346d1f0301bSThomas Gleixner 	/*
1347d1f0301bSThomas Gleixner 	 * No further action required for interrupts which are requested as
1348d1f0301bSThomas Gleixner 	 * threaded interrupts already
1349d1f0301bSThomas Gleixner 	 */
1350d1f0301bSThomas Gleixner 	if (new->handler == irq_default_primary_handler)
1351d1f0301bSThomas Gleixner 		return 0;
1352d1f0301bSThomas Gleixner 
13538d32a307SThomas Gleixner 	new->flags |= IRQF_ONESHOT;
13548d32a307SThomas Gleixner 
13552a1d3ab8SThomas Gleixner 	/*
13562a1d3ab8SThomas Gleixner 	 * Handle the case where we have a real primary handler and a
13572a1d3ab8SThomas Gleixner 	 * thread handler. We force thread them as well by creating a
13582a1d3ab8SThomas Gleixner 	 * secondary action.
13592a1d3ab8SThomas Gleixner 	 */
1360d1f0301bSThomas Gleixner 	if (new->handler && new->thread_fn) {
13612a1d3ab8SThomas Gleixner 		/* Allocate the secondary action */
13622a1d3ab8SThomas Gleixner 		new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
13632a1d3ab8SThomas Gleixner 		if (!new->secondary)
13642a1d3ab8SThomas Gleixner 			return -ENOMEM;
13652a1d3ab8SThomas Gleixner 		new->secondary->handler = irq_forced_secondary_handler;
13662a1d3ab8SThomas Gleixner 		new->secondary->thread_fn = new->thread_fn;
13672a1d3ab8SThomas Gleixner 		new->secondary->dev_id = new->dev_id;
13682a1d3ab8SThomas Gleixner 		new->secondary->irq = new->irq;
13692a1d3ab8SThomas Gleixner 		new->secondary->name = new->name;
13702a1d3ab8SThomas Gleixner 	}
13712a1d3ab8SThomas Gleixner 	/* Deal with the primary handler */
13728d32a307SThomas Gleixner 	set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
13738d32a307SThomas Gleixner 	new->thread_fn = new->handler;
13748d32a307SThomas Gleixner 	new->handler = irq_default_primary_handler;
13752a1d3ab8SThomas Gleixner 	return 0;
13768d32a307SThomas Gleixner }
13778d32a307SThomas Gleixner 
1378c1bacbaeSThomas Gleixner static int irq_request_resources(struct irq_desc *desc)
1379c1bacbaeSThomas Gleixner {
1380c1bacbaeSThomas Gleixner 	struct irq_data *d = &desc->irq_data;
1381c1bacbaeSThomas Gleixner 	struct irq_chip *c = d->chip;
1382c1bacbaeSThomas Gleixner 
1383c1bacbaeSThomas Gleixner 	return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1384c1bacbaeSThomas Gleixner }
1385c1bacbaeSThomas Gleixner 
1386c1bacbaeSThomas Gleixner static void irq_release_resources(struct irq_desc *desc)
1387c1bacbaeSThomas Gleixner {
1388c1bacbaeSThomas Gleixner 	struct irq_data *d = &desc->irq_data;
1389c1bacbaeSThomas Gleixner 	struct irq_chip *c = d->chip;
1390c1bacbaeSThomas Gleixner 
1391c1bacbaeSThomas Gleixner 	if (c->irq_release_resources)
1392c1bacbaeSThomas Gleixner 		c->irq_release_resources(d);
1393c1bacbaeSThomas Gleixner }
1394c1bacbaeSThomas Gleixner 
1395b525903cSJulien Thierry static bool irq_supports_nmi(struct irq_desc *desc)
1396b525903cSJulien Thierry {
1397b525903cSJulien Thierry 	struct irq_data *d = irq_desc_get_irq_data(desc);
1398b525903cSJulien Thierry 
1399b525903cSJulien Thierry #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1400b525903cSJulien Thierry 	/* Only IRQs directly managed by the root irqchip can be set as NMI */
1401b525903cSJulien Thierry 	if (d->parent_data)
1402b525903cSJulien Thierry 		return false;
1403b525903cSJulien Thierry #endif
1404b525903cSJulien Thierry 	/* Don't support NMIs for chips behind a slow bus */
1405b525903cSJulien Thierry 	if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1406b525903cSJulien Thierry 		return false;
1407b525903cSJulien Thierry 
1408b525903cSJulien Thierry 	return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1409b525903cSJulien Thierry }
1410b525903cSJulien Thierry 
1411b525903cSJulien Thierry static int irq_nmi_setup(struct irq_desc *desc)
1412b525903cSJulien Thierry {
1413b525903cSJulien Thierry 	struct irq_data *d = irq_desc_get_irq_data(desc);
1414b525903cSJulien Thierry 	struct irq_chip *c = d->chip;
1415b525903cSJulien Thierry 
1416b525903cSJulien Thierry 	return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1417b525903cSJulien Thierry }
1418b525903cSJulien Thierry 
1419b525903cSJulien Thierry static void irq_nmi_teardown(struct irq_desc *desc)
1420b525903cSJulien Thierry {
1421b525903cSJulien Thierry 	struct irq_data *d = irq_desc_get_irq_data(desc);
1422b525903cSJulien Thierry 	struct irq_chip *c = d->chip;
1423b525903cSJulien Thierry 
1424b525903cSJulien Thierry 	if (c->irq_nmi_teardown)
1425b525903cSJulien Thierry 		c->irq_nmi_teardown(d);
1426b525903cSJulien Thierry }
1427b525903cSJulien Thierry 
14282a1d3ab8SThomas Gleixner static int
14292a1d3ab8SThomas Gleixner setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
14302a1d3ab8SThomas Gleixner {
14312a1d3ab8SThomas Gleixner 	struct task_struct *t;
14322a1d3ab8SThomas Gleixner 
14332a1d3ab8SThomas Gleixner 	if (!secondary) {
14342a1d3ab8SThomas Gleixner 		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
14352a1d3ab8SThomas Gleixner 				   new->name);
14362a1d3ab8SThomas Gleixner 	} else {
14372a1d3ab8SThomas Gleixner 		t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
14382a1d3ab8SThomas Gleixner 				   new->name);
14392a1d3ab8SThomas Gleixner 	}
14402a1d3ab8SThomas Gleixner 
14412a1d3ab8SThomas Gleixner 	if (IS_ERR(t))
14422a1d3ab8SThomas Gleixner 		return PTR_ERR(t);
14432a1d3ab8SThomas Gleixner 
14442a1d3ab8SThomas Gleixner 	/*
14452a1d3ab8SThomas Gleixner 	 * We keep the reference to the task struct even if
14462a1d3ab8SThomas Gleixner 	 * the thread dies to avoid that the interrupt code
14472a1d3ab8SThomas Gleixner 	 * references an already freed task_struct.
14482a1d3ab8SThomas Gleixner 	 */
14497b3c92b8SMatthew Wilcox (Oracle) 	new->thread = get_task_struct(t);
14502a1d3ab8SThomas Gleixner 	/*
14512a1d3ab8SThomas Gleixner 	 * Tell the thread to set its affinity. This is
14522a1d3ab8SThomas Gleixner 	 * important for shared interrupt handlers as we do
14532a1d3ab8SThomas Gleixner 	 * not invoke setup_affinity() for the secondary
14542a1d3ab8SThomas Gleixner 	 * handlers as everything is already set up. Even for
14552a1d3ab8SThomas Gleixner 	 * interrupts marked with IRQF_NO_BALANCE this is
14562a1d3ab8SThomas Gleixner 	 * correct as we want the thread to move to the cpu(s)
14572a1d3ab8SThomas Gleixner 	 * on which the requesting code placed the interrupt.
14582a1d3ab8SThomas Gleixner 	 */
14592a1d3ab8SThomas Gleixner 	set_bit(IRQTF_AFFINITY, &new->thread_flags);
14602a1d3ab8SThomas Gleixner 	return 0;
14612a1d3ab8SThomas Gleixner }
14622a1d3ab8SThomas Gleixner 
14631da177e4SLinus Torvalds /*
14641da177e4SLinus Torvalds  * Internal function to register an irqaction - typically used to
14651da177e4SLinus Torvalds  * allocate special interrupts that are part of the architecture.
146619d39a38SThomas Gleixner  *
146719d39a38SThomas Gleixner  * Locking rules:
146819d39a38SThomas Gleixner  *
146919d39a38SThomas Gleixner  * desc->request_mutex	Provides serialization against a concurrent free_irq()
147019d39a38SThomas Gleixner  *   chip_bus_lock	Provides serialization for slow bus operations
147119d39a38SThomas Gleixner  *     desc->lock	Provides serialization against hard interrupts
147219d39a38SThomas Gleixner  *
147319d39a38SThomas Gleixner  * chip_bus_lock and desc->lock are sufficient for all other management and
147419d39a38SThomas Gleixner  * interrupt related functions. desc->request_mutex solely serializes
147519d39a38SThomas Gleixner  * request/free_irq().
14761da177e4SLinus Torvalds  */
1477d3c60047SThomas Gleixner static int
1478d3c60047SThomas Gleixner __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
14791da177e4SLinus Torvalds {
1480f17c7545SIngo Molnar 	struct irqaction *old, **old_ptr;
1481b5faba21SThomas Gleixner 	unsigned long flags, thread_mask = 0;
14823b8249e7SThomas Gleixner 	int ret, nested, shared = 0;
14831da177e4SLinus Torvalds 
14847d94f7caSYinghai Lu 	if (!desc)
1485c2b5a251SMatthew Wilcox 		return -EINVAL;
1486c2b5a251SMatthew Wilcox 
14876b8ff312SThomas Gleixner 	if (desc->irq_data.chip == &no_irq_chip)
14881da177e4SLinus Torvalds 		return -ENOSYS;
1489b6873807SSebastian Andrzej Siewior 	if (!try_module_get(desc->owner))
1490b6873807SSebastian Andrzej Siewior 		return -ENODEV;
14911da177e4SLinus Torvalds 
14922a1d3ab8SThomas Gleixner 	new->irq = irq;
14932a1d3ab8SThomas Gleixner 
14941da177e4SLinus Torvalds 	/*
14954b357daeSJon Hunter 	 * If the trigger type is not specified by the caller,
14964b357daeSJon Hunter 	 * then use the default for this interrupt.
14974b357daeSJon Hunter 	 */
14984b357daeSJon Hunter 	if (!(new->flags & IRQF_TRIGGER_MASK))
14994b357daeSJon Hunter 		new->flags |= irqd_get_trigger_type(&desc->irq_data);
15004b357daeSJon Hunter 
15014b357daeSJon Hunter 	/*
1502399b5da2SThomas Gleixner 	 * Check whether the interrupt nests into another interrupt
1503399b5da2SThomas Gleixner 	 * thread.
15043aa551c9SThomas Gleixner 	 */
15051ccb4e61SThomas Gleixner 	nested = irq_settings_is_nested_thread(desc);
1506399b5da2SThomas Gleixner 	if (nested) {
1507b6873807SSebastian Andrzej Siewior 		if (!new->thread_fn) {
1508b6873807SSebastian Andrzej Siewior 			ret = -EINVAL;
1509b6873807SSebastian Andrzej Siewior 			goto out_mput;
1510b6873807SSebastian Andrzej Siewior 		}
1511399b5da2SThomas Gleixner 		/*
1512399b5da2SThomas Gleixner 		 * Replace the primary handler which was provided from
1513399b5da2SThomas Gleixner 		 * the driver for non nested interrupt handling by the
1514399b5da2SThomas Gleixner 		 * dummy function which warns when called.
1515399b5da2SThomas Gleixner 		 */
1516399b5da2SThomas Gleixner 		new->handler = irq_nested_primary_handler;
15178d32a307SThomas Gleixner 	} else {
15182a1d3ab8SThomas Gleixner 		if (irq_settings_can_thread(desc)) {
15192a1d3ab8SThomas Gleixner 			ret = irq_setup_forced_threading(new);
15202a1d3ab8SThomas Gleixner 			if (ret)
15212a1d3ab8SThomas Gleixner 				goto out_mput;
15222a1d3ab8SThomas Gleixner 		}
1523399b5da2SThomas Gleixner 	}
1524399b5da2SThomas Gleixner 
1525399b5da2SThomas Gleixner 	/*
1526399b5da2SThomas Gleixner 	 * Create a handler thread when a thread function is supplied
1527399b5da2SThomas Gleixner 	 * and the interrupt does not nest into another interrupt
1528399b5da2SThomas Gleixner 	 * thread.
1529399b5da2SThomas Gleixner 	 */
1530399b5da2SThomas Gleixner 	if (new->thread_fn && !nested) {
15312a1d3ab8SThomas Gleixner 		ret = setup_irq_thread(new, irq, false);
15322a1d3ab8SThomas Gleixner 		if (ret)
1533b6873807SSebastian Andrzej Siewior 			goto out_mput;
15342a1d3ab8SThomas Gleixner 		if (new->secondary) {
15352a1d3ab8SThomas Gleixner 			ret = setup_irq_thread(new->secondary, irq, true);
15362a1d3ab8SThomas Gleixner 			if (ret)
15372a1d3ab8SThomas Gleixner 				goto out_thread;
1538b6873807SSebastian Andrzej Siewior 		}
15393aa551c9SThomas Gleixner 	}
15403aa551c9SThomas Gleixner 
15413aa551c9SThomas Gleixner 	/*
1542dc9b229aSThomas Gleixner 	 * Drivers are often written to work w/o knowledge about the
1543dc9b229aSThomas Gleixner 	 * underlying irq chip implementation, so a request for a
1544dc9b229aSThomas Gleixner 	 * threaded irq without a primary hard irq context handler
1545dc9b229aSThomas Gleixner 	 * requires the ONESHOT flag to be set. Some irq chips like
1546dc9b229aSThomas Gleixner 	 * MSI based interrupts are per se one shot safe. Check the
1547dc9b229aSThomas Gleixner 	 * chip flags, so we can avoid the unmask dance at the end of
1548dc9b229aSThomas Gleixner 	 * the threaded handler for those.
1549dc9b229aSThomas Gleixner 	 */
1550dc9b229aSThomas Gleixner 	if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1551dc9b229aSThomas Gleixner 		new->flags &= ~IRQF_ONESHOT;
1552dc9b229aSThomas Gleixner 
155319d39a38SThomas Gleixner 	/*
155419d39a38SThomas Gleixner 	 * Protects against a concurrent __free_irq() call which might wait
1555519cc865SLukas Wunner 	 * for synchronize_hardirq() to complete without holding the optional
1556836557bdSLukas Wunner 	 * chip bus lock and desc->lock. Also protects against handing out
1557836557bdSLukas Wunner 	 * a recycled oneshot thread_mask bit while it's still in use by
1558836557bdSLukas Wunner 	 * its previous owner.
155919d39a38SThomas Gleixner 	 */
15609114014cSThomas Gleixner 	mutex_lock(&desc->request_mutex);
156119d39a38SThomas Gleixner 
156219d39a38SThomas Gleixner 	/*
156319d39a38SThomas Gleixner 	 * Acquire bus lock as the irq_request_resources() callback below
156419d39a38SThomas Gleixner 	 * might rely on the serialization or the magic power management
156519d39a38SThomas Gleixner 	 * functions which are abusing the irq_bus_lock() callback,
156619d39a38SThomas Gleixner 	 */
156719d39a38SThomas Gleixner 	chip_bus_lock(desc);
156819d39a38SThomas Gleixner 
156919d39a38SThomas Gleixner 	/* First installed action requests resources. */
157046e48e25SThomas Gleixner 	if (!desc->action) {
157146e48e25SThomas Gleixner 		ret = irq_request_resources(desc);
157246e48e25SThomas Gleixner 		if (ret) {
157346e48e25SThomas Gleixner 			pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
157446e48e25SThomas Gleixner 			       new->name, irq, desc->irq_data.chip->name);
157519d39a38SThomas Gleixner 			goto out_bus_unlock;
157646e48e25SThomas Gleixner 		}
157746e48e25SThomas Gleixner 	}
15789114014cSThomas Gleixner 
1579dc9b229aSThomas Gleixner 	/*
15801da177e4SLinus Torvalds 	 * The following block of code has to be executed atomically
158119d39a38SThomas Gleixner 	 * protected against a concurrent interrupt and any of the other
158219d39a38SThomas Gleixner 	 * management calls which are not serialized via
158319d39a38SThomas Gleixner 	 * desc->request_mutex or the optional bus lock.
15841da177e4SLinus Torvalds 	 */
1585239007b8SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
1586f17c7545SIngo Molnar 	old_ptr = &desc->action;
1587f17c7545SIngo Molnar 	old = *old_ptr;
158806fcb0c6SIngo Molnar 	if (old) {
1589e76de9f8SThomas Gleixner 		/*
1590e76de9f8SThomas Gleixner 		 * Can't share interrupts unless both agree to and are
1591e76de9f8SThomas Gleixner 		 * the same type (level, edge, polarity). So both flag
15923cca53b0SThomas Gleixner 		 * fields must have IRQF_SHARED set and the bits which
15939d591eddSThomas Gleixner 		 * set the trigger type must match. Also all must
15949d591eddSThomas Gleixner 		 * agree on ONESHOT.
1595b525903cSJulien Thierry 		 * Interrupt lines used for NMIs cannot be shared.
1596e76de9f8SThomas Gleixner 		 */
15974f8413a3SMarc Zyngier 		unsigned int oldtype;
15984f8413a3SMarc Zyngier 
1599b525903cSJulien Thierry 		if (desc->istate & IRQS_NMI) {
1600b525903cSJulien Thierry 			pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1601b525903cSJulien Thierry 				new->name, irq, desc->irq_data.chip->name);
1602b525903cSJulien Thierry 			ret = -EINVAL;
1603b525903cSJulien Thierry 			goto out_unlock;
1604b525903cSJulien Thierry 		}
1605b525903cSJulien Thierry 
16064f8413a3SMarc Zyngier 		/*
16074f8413a3SMarc Zyngier 		 * If nobody did set the configuration before, inherit
16084f8413a3SMarc Zyngier 		 * the one provided by the requester.
16094f8413a3SMarc Zyngier 		 */
16104f8413a3SMarc Zyngier 		if (irqd_trigger_type_was_set(&desc->irq_data)) {
16114f8413a3SMarc Zyngier 			oldtype = irqd_get_trigger_type(&desc->irq_data);
16124f8413a3SMarc Zyngier 		} else {
16134f8413a3SMarc Zyngier 			oldtype = new->flags & IRQF_TRIGGER_MASK;
16144f8413a3SMarc Zyngier 			irqd_set_trigger_type(&desc->irq_data, oldtype);
16154f8413a3SMarc Zyngier 		}
1616382bd4deSHans de Goede 
16173cca53b0SThomas Gleixner 		if (!((old->flags & new->flags) & IRQF_SHARED) ||
1618382bd4deSHans de Goede 		    (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1619f5d89470SThomas Gleixner 		    ((old->flags ^ new->flags) & IRQF_ONESHOT))
1620f5163427SDimitri Sivanich 			goto mismatch;
1621f5163427SDimitri Sivanich 
1622f5163427SDimitri Sivanich 		/* All handlers must agree on per-cpuness */
16233cca53b0SThomas Gleixner 		if ((old->flags & IRQF_PERCPU) !=
16243cca53b0SThomas Gleixner 		    (new->flags & IRQF_PERCPU))
1625f5163427SDimitri Sivanich 			goto mismatch;
16261da177e4SLinus Torvalds 
16271da177e4SLinus Torvalds 		/* add new interrupt at end of irq queue */
16281da177e4SLinus Torvalds 		do {
162952abb700SThomas Gleixner 			/*
163052abb700SThomas Gleixner 			 * Or all existing action->thread_mask bits,
163152abb700SThomas Gleixner 			 * so we can find the next zero bit for this
163252abb700SThomas Gleixner 			 * new action.
163352abb700SThomas Gleixner 			 */
1634b5faba21SThomas Gleixner 			thread_mask |= old->thread_mask;
1635f17c7545SIngo Molnar 			old_ptr = &old->next;
1636f17c7545SIngo Molnar 			old = *old_ptr;
16371da177e4SLinus Torvalds 		} while (old);
16381da177e4SLinus Torvalds 		shared = 1;
16391da177e4SLinus Torvalds 	}
16401da177e4SLinus Torvalds 
1641b5faba21SThomas Gleixner 	/*
164252abb700SThomas Gleixner 	 * Setup the thread mask for this irqaction for ONESHOT. For
164352abb700SThomas Gleixner 	 * !ONESHOT irqs the thread mask is 0 so we can avoid a
164452abb700SThomas Gleixner 	 * conditional in irq_wake_thread().
1645b5faba21SThomas Gleixner 	 */
164652abb700SThomas Gleixner 	if (new->flags & IRQF_ONESHOT) {
164752abb700SThomas Gleixner 		/*
164852abb700SThomas Gleixner 		 * Unlikely to have 32 resp 64 irqs sharing one line,
164952abb700SThomas Gleixner 		 * but who knows.
165052abb700SThomas Gleixner 		 */
165152abb700SThomas Gleixner 		if (thread_mask == ~0UL) {
1652b5faba21SThomas Gleixner 			ret = -EBUSY;
1653cba4235eSThomas Gleixner 			goto out_unlock;
1654b5faba21SThomas Gleixner 		}
165552abb700SThomas Gleixner 		/*
165652abb700SThomas Gleixner 		 * The thread_mask for the action is or'ed to
165752abb700SThomas Gleixner 		 * desc->thread_active to indicate that the
165852abb700SThomas Gleixner 		 * IRQF_ONESHOT thread handler has been woken, but not
165952abb700SThomas Gleixner 		 * yet finished. The bit is cleared when a thread
166052abb700SThomas Gleixner 		 * completes. When all threads of a shared interrupt
166152abb700SThomas Gleixner 		 * line have completed desc->threads_active becomes
166252abb700SThomas Gleixner 		 * zero and the interrupt line is unmasked. See
166352abb700SThomas Gleixner 		 * handle.c:irq_wake_thread() for further information.
166452abb700SThomas Gleixner 		 *
166552abb700SThomas Gleixner 		 * If no thread is woken by primary (hard irq context)
166652abb700SThomas Gleixner 		 * interrupt handlers, then desc->threads_active is
166752abb700SThomas Gleixner 		 * also checked for zero to unmask the irq line in the
166852abb700SThomas Gleixner 		 * affected hard irq flow handlers
166952abb700SThomas Gleixner 		 * (handle_[fasteoi|level]_irq).
167052abb700SThomas Gleixner 		 *
167152abb700SThomas Gleixner 		 * The new action gets the first zero bit of
167252abb700SThomas Gleixner 		 * thread_mask assigned. See the loop above which or's
167352abb700SThomas Gleixner 		 * all existing action->thread_mask bits.
167452abb700SThomas Gleixner 		 */
1675ffc661c9SRasmus Villemoes 		new->thread_mask = 1UL << ffz(thread_mask);
16761c6c6952SThomas Gleixner 
1677dc9b229aSThomas Gleixner 	} else if (new->handler == irq_default_primary_handler &&
1678dc9b229aSThomas Gleixner 		   !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
16791c6c6952SThomas Gleixner 		/*
16801c6c6952SThomas Gleixner 		 * The interrupt was requested with handler = NULL, so
16811c6c6952SThomas Gleixner 		 * we use the default primary handler for it. But it
16821c6c6952SThomas Gleixner 		 * does not have the oneshot flag set. In combination
16831c6c6952SThomas Gleixner 		 * with level interrupts this is deadly, because the
16841c6c6952SThomas Gleixner 		 * default primary handler just wakes the thread, then
16851c6c6952SThomas Gleixner 		 * the irq lines is reenabled, but the device still
16861c6c6952SThomas Gleixner 		 * has the level irq asserted. Rinse and repeat....
16871c6c6952SThomas Gleixner 		 *
16881c6c6952SThomas Gleixner 		 * While this works for edge type interrupts, we play
16891c6c6952SThomas Gleixner 		 * it safe and reject unconditionally because we can't
16901c6c6952SThomas Gleixner 		 * say for sure which type this interrupt really
16911c6c6952SThomas Gleixner 		 * has. The type flags are unreliable as the
16921c6c6952SThomas Gleixner 		 * underlying chip implementation can override them.
16931c6c6952SThomas Gleixner 		 */
1694025af39bSLuca Ceresoli 		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1695025af39bSLuca Ceresoli 		       new->name, irq);
16961c6c6952SThomas Gleixner 		ret = -EINVAL;
1697cba4235eSThomas Gleixner 		goto out_unlock;
169852abb700SThomas Gleixner 	}
1699b5faba21SThomas Gleixner 
17001da177e4SLinus Torvalds 	if (!shared) {
17013aa551c9SThomas Gleixner 		init_waitqueue_head(&desc->wait_for_threads);
17023aa551c9SThomas Gleixner 
170382736f4dSUwe Kleine-König 		/* Setup the type (level, edge polarity) if configured: */
170482736f4dSUwe Kleine-König 		if (new->flags & IRQF_TRIGGER_MASK) {
1705a1ff541aSJiang Liu 			ret = __irq_set_trigger(desc,
1706f2b662daSDavid Brownell 						new->flags & IRQF_TRIGGER_MASK);
170782736f4dSUwe Kleine-König 
170819d39a38SThomas Gleixner 			if (ret)
1709cba4235eSThomas Gleixner 				goto out_unlock;
1710091738a2SThomas Gleixner 		}
1711f75d222bSAhmed S. Darwish 
1712c942cee4SThomas Gleixner 		/*
1713c942cee4SThomas Gleixner 		 * Activate the interrupt. That activation must happen
1714c942cee4SThomas Gleixner 		 * independently of IRQ_NOAUTOEN. request_irq() can fail
1715c942cee4SThomas Gleixner 		 * and the callers are supposed to handle
1716c942cee4SThomas Gleixner 		 * that. enable_irq() of an interrupt requested with
1717c942cee4SThomas Gleixner 		 * IRQ_NOAUTOEN is not supposed to fail. The activation
1718c942cee4SThomas Gleixner 		 * keeps it in shutdown mode, it merily associates
1719c942cee4SThomas Gleixner 		 * resources if necessary and if that's not possible it
1720c942cee4SThomas Gleixner 		 * fails. Interrupts which are in managed shutdown mode
1721c942cee4SThomas Gleixner 		 * will simply ignore that activation request.
1722c942cee4SThomas Gleixner 		 */
1723c942cee4SThomas Gleixner 		ret = irq_activate(desc);
1724c942cee4SThomas Gleixner 		if (ret)
1725c942cee4SThomas Gleixner 			goto out_unlock;
1726c942cee4SThomas Gleixner 
1727009b4c3bSThomas Gleixner 		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
172832f4125eSThomas Gleixner 				  IRQS_ONESHOT | IRQS_WAITING);
172932f4125eSThomas Gleixner 		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
173094d39e1fSThomas Gleixner 
1731a005677bSThomas Gleixner 		if (new->flags & IRQF_PERCPU) {
1732a005677bSThomas Gleixner 			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1733a005677bSThomas Gleixner 			irq_settings_set_per_cpu(desc);
1734c2b1063eSThomas Gleixner 			if (new->flags & IRQF_NO_DEBUG)
1735c2b1063eSThomas Gleixner 				irq_settings_set_no_debug(desc);
1736a005677bSThomas Gleixner 		}
17376a58fb3bSThomas Gleixner 
1738c2b1063eSThomas Gleixner 		if (noirqdebug)
1739c2b1063eSThomas Gleixner 			irq_settings_set_no_debug(desc);
1740c2b1063eSThomas Gleixner 
1741b25c340cSThomas Gleixner 		if (new->flags & IRQF_ONESHOT)
17423d67baecSThomas Gleixner 			desc->istate |= IRQS_ONESHOT;
1743b25c340cSThomas Gleixner 
17442e051552SThomas Gleixner 		/* Exclude IRQ from balancing if requested */
17452e051552SThomas Gleixner 		if (new->flags & IRQF_NOBALANCING) {
17462e051552SThomas Gleixner 			irq_settings_set_no_balancing(desc);
17472e051552SThomas Gleixner 			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
17482e051552SThomas Gleixner 		}
17492e051552SThomas Gleixner 
1750cbe16f35SBarry Song 		if (!(new->flags & IRQF_NO_AUTOEN) &&
1751cbe16f35SBarry Song 		    irq_settings_can_autoenable(desc)) {
17524cde9c6bSThomas Gleixner 			irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
175304c848d3SThomas Gleixner 		} else {
175404c848d3SThomas Gleixner 			/*
175504c848d3SThomas Gleixner 			 * Shared interrupts do not go well with disabling
175604c848d3SThomas Gleixner 			 * auto enable. The sharing interrupt might request
175704c848d3SThomas Gleixner 			 * it while it's still disabled and then wait for
175804c848d3SThomas Gleixner 			 * interrupts forever.
175904c848d3SThomas Gleixner 			 */
176004c848d3SThomas Gleixner 			WARN_ON_ONCE(new->flags & IRQF_SHARED);
1761e76de9f8SThomas Gleixner 			/* Undo nested disables: */
1762e76de9f8SThomas Gleixner 			desc->depth = 1;
176304c848d3SThomas Gleixner 		}
176418404756SMax Krasnyansky 
1765876dbd4cSThomas Gleixner 	} else if (new->flags & IRQF_TRIGGER_MASK) {
1766876dbd4cSThomas Gleixner 		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
17677ee7e87dSThomas Gleixner 		unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1768876dbd4cSThomas Gleixner 
1769876dbd4cSThomas Gleixner 		if (nmsk != omsk)
1770876dbd4cSThomas Gleixner 			/* hope the handler works with current  trigger mode */
1771a395d6a7SJoe Perches 			pr_warn("irq %d uses trigger mode %u; requested %u\n",
17727ee7e87dSThomas Gleixner 				irq, omsk, nmsk);
177394d39e1fSThomas Gleixner 	}
177482736f4dSUwe Kleine-König 
1775f17c7545SIngo Molnar 	*old_ptr = new;
177682736f4dSUwe Kleine-König 
1777cab303beSThomas Gleixner 	irq_pm_install_action(desc, new);
1778cab303beSThomas Gleixner 
17798528b0f1SLinus Torvalds 	/* Reset broken irq detection when installing new handler */
17808528b0f1SLinus Torvalds 	desc->irq_count = 0;
17818528b0f1SLinus Torvalds 	desc->irqs_unhandled = 0;
17821adb0850SThomas Gleixner 
17831adb0850SThomas Gleixner 	/*
17841adb0850SThomas Gleixner 	 * Check whether we disabled the irq via the spurious handler
17851adb0850SThomas Gleixner 	 * before. Reenable it and give it another chance.
17861adb0850SThomas Gleixner 	 */
17877acdd53eSThomas Gleixner 	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
17887acdd53eSThomas Gleixner 		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
178979ff1cdaSJiang Liu 		__enable_irq(desc);
17901adb0850SThomas Gleixner 	}
17911adb0850SThomas Gleixner 
1792239007b8SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
17933a90795eSThomas Gleixner 	chip_bus_sync_unlock(desc);
17949114014cSThomas Gleixner 	mutex_unlock(&desc->request_mutex);
17951da177e4SLinus Torvalds 
1796b2d3d61aSDaniel Lezcano 	irq_setup_timings(desc, new);
1797b2d3d61aSDaniel Lezcano 
179869ab8494SThomas Gleixner 	/*
179969ab8494SThomas Gleixner 	 * Strictly no need to wake it up, but hung_task complains
180069ab8494SThomas Gleixner 	 * when no hard interrupt wakes the thread up.
180169ab8494SThomas Gleixner 	 */
180269ab8494SThomas Gleixner 	if (new->thread)
180369ab8494SThomas Gleixner 		wake_up_process(new->thread);
18042a1d3ab8SThomas Gleixner 	if (new->secondary)
18052a1d3ab8SThomas Gleixner 		wake_up_process(new->secondary->thread);
180669ab8494SThomas Gleixner 
18072c6927a3SYinghai Lu 	register_irq_proc(irq, desc);
18081da177e4SLinus Torvalds 	new->dir = NULL;
18091da177e4SLinus Torvalds 	register_handler_proc(irq, new);
18101da177e4SLinus Torvalds 	return 0;
1811f5163427SDimitri Sivanich 
1812f5163427SDimitri Sivanich mismatch:
18133cca53b0SThomas Gleixner 	if (!(new->flags & IRQF_PROBE_SHARED)) {
181497fd75b7SAndrew Morton 		pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1815f5d89470SThomas Gleixner 		       irq, new->flags, new->name, old->flags, old->name);
1816f5d89470SThomas Gleixner #ifdef CONFIG_DEBUG_SHIRQ
1817f5163427SDimitri Sivanich 		dump_stack();
18183f050447SAlan Cox #endif
1819f5d89470SThomas Gleixner 	}
18203aa551c9SThomas Gleixner 	ret = -EBUSY;
18213aa551c9SThomas Gleixner 
1822cba4235eSThomas Gleixner out_unlock:
18231c389795SDan Carpenter 	raw_spin_unlock_irqrestore(&desc->lock, flags);
18243b8249e7SThomas Gleixner 
182546e48e25SThomas Gleixner 	if (!desc->action)
182646e48e25SThomas Gleixner 		irq_release_resources(desc);
182719d39a38SThomas Gleixner out_bus_unlock:
182819d39a38SThomas Gleixner 	chip_bus_sync_unlock(desc);
18299114014cSThomas Gleixner 	mutex_unlock(&desc->request_mutex);
18309114014cSThomas Gleixner 
18313aa551c9SThomas Gleixner out_thread:
18323aa551c9SThomas Gleixner 	if (new->thread) {
18333aa551c9SThomas Gleixner 		struct task_struct *t = new->thread;
18343aa551c9SThomas Gleixner 
18353aa551c9SThomas Gleixner 		new->thread = NULL;
18363aa551c9SThomas Gleixner 		kthread_stop(t);
18373aa551c9SThomas Gleixner 		put_task_struct(t);
18383aa551c9SThomas Gleixner 	}
18392a1d3ab8SThomas Gleixner 	if (new->secondary && new->secondary->thread) {
18402a1d3ab8SThomas Gleixner 		struct task_struct *t = new->secondary->thread;
18412a1d3ab8SThomas Gleixner 
18422a1d3ab8SThomas Gleixner 		new->secondary->thread = NULL;
18432a1d3ab8SThomas Gleixner 		kthread_stop(t);
18442a1d3ab8SThomas Gleixner 		put_task_struct(t);
18452a1d3ab8SThomas Gleixner 	}
1846b6873807SSebastian Andrzej Siewior out_mput:
1847b6873807SSebastian Andrzej Siewior 	module_put(desc->owner);
18483aa551c9SThomas Gleixner 	return ret;
18491da177e4SLinus Torvalds }
18501da177e4SLinus Torvalds 
1851cbf94f06SMagnus Damm /*
1852cbf94f06SMagnus Damm  * Internal function to unregister an irqaction - used to free
1853cbf94f06SMagnus Damm  * regular and special interrupts that are part of the architecture.
18541da177e4SLinus Torvalds  */
185583ac4ca9SUwe Kleine König static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
18561da177e4SLinus Torvalds {
185783ac4ca9SUwe Kleine König 	unsigned irq = desc->irq_data.irq;
1858f17c7545SIngo Molnar 	struct irqaction *action, **action_ptr;
18591da177e4SLinus Torvalds 	unsigned long flags;
18601da177e4SLinus Torvalds 
1861ae88a23bSIngo Molnar 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
18627d94f7caSYinghai Lu 
18639114014cSThomas Gleixner 	mutex_lock(&desc->request_mutex);
1864abc7e40cSThomas Gleixner 	chip_bus_lock(desc);
1865239007b8SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
1866ae88a23bSIngo Molnar 
1867ae88a23bSIngo Molnar 	/*
1868ae88a23bSIngo Molnar 	 * There can be multiple actions per IRQ descriptor, find the right
1869ae88a23bSIngo Molnar 	 * one based on the dev_id:
1870ae88a23bSIngo Molnar 	 */
1871f17c7545SIngo Molnar 	action_ptr = &desc->action;
18721da177e4SLinus Torvalds 	for (;;) {
1873f17c7545SIngo Molnar 		action = *action_ptr;
18741da177e4SLinus Torvalds 
1875ae88a23bSIngo Molnar 		if (!action) {
1876ae88a23bSIngo Molnar 			WARN(1, "Trying to free already-free IRQ %d\n", irq);
1877239007b8SThomas Gleixner 			raw_spin_unlock_irqrestore(&desc->lock, flags);
1878abc7e40cSThomas Gleixner 			chip_bus_sync_unlock(desc);
187919d39a38SThomas Gleixner 			mutex_unlock(&desc->request_mutex);
1880f21cfb25SMagnus Damm 			return NULL;
1881ae88a23bSIngo Molnar 		}
18821da177e4SLinus Torvalds 
18838316e381SIngo Molnar 		if (action->dev_id == dev_id)
1884ae88a23bSIngo Molnar 			break;
1885f17c7545SIngo Molnar 		action_ptr = &action->next;
1886ae88a23bSIngo Molnar 	}
1887ae88a23bSIngo Molnar 
1888ae88a23bSIngo Molnar 	/* Found it - now remove it from the list of entries: */
1889f17c7545SIngo Molnar 	*action_ptr = action->next;
1890dbce706eSPaolo 'Blaisorblade' Giarrusso 
1891cab303beSThomas Gleixner 	irq_pm_remove_action(desc, action);
1892cab303beSThomas Gleixner 
1893ae88a23bSIngo Molnar 	/* If this was the last handler, shut down the IRQ line: */
1894c1bacbaeSThomas Gleixner 	if (!desc->action) {
1895e9849777SThomas Gleixner 		irq_settings_clr_disable_unlazy(desc);
18964001d8e8SThomas Gleixner 		/* Only shutdown. Deactivate after synchronize_hardirq() */
189746999238SThomas Gleixner 		irq_shutdown(desc);
1898c1bacbaeSThomas Gleixner 	}
18993aa551c9SThomas Gleixner 
1900e7a297b0SPeter P Waskiewicz Jr #ifdef CONFIG_SMP
1901e7a297b0SPeter P Waskiewicz Jr 	/* make sure affinity_hint is cleaned up */
1902e7a297b0SPeter P Waskiewicz Jr 	if (WARN_ON_ONCE(desc->affinity_hint))
1903e7a297b0SPeter P Waskiewicz Jr 		desc->affinity_hint = NULL;
1904e7a297b0SPeter P Waskiewicz Jr #endif
1905e7a297b0SPeter P Waskiewicz Jr 
1906239007b8SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
190719d39a38SThomas Gleixner 	/*
190819d39a38SThomas Gleixner 	 * Drop bus_lock here so the changes which were done in the chip
190919d39a38SThomas Gleixner 	 * callbacks above are synced out to the irq chips which hang
1910519cc865SLukas Wunner 	 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
191119d39a38SThomas Gleixner 	 *
191219d39a38SThomas Gleixner 	 * Aside of that the bus_lock can also be taken from the threaded
191319d39a38SThomas Gleixner 	 * handler in irq_finalize_oneshot() which results in a deadlock
1914519cc865SLukas Wunner 	 * because kthread_stop() would wait forever for the thread to
191519d39a38SThomas Gleixner 	 * complete, which is blocked on the bus lock.
191619d39a38SThomas Gleixner 	 *
191719d39a38SThomas Gleixner 	 * The still held desc->request_mutex() protects against a
191819d39a38SThomas Gleixner 	 * concurrent request_irq() of this irq so the release of resources
191919d39a38SThomas Gleixner 	 * and timing data is properly serialized.
192019d39a38SThomas Gleixner 	 */
1921abc7e40cSThomas Gleixner 	chip_bus_sync_unlock(desc);
1922ae88a23bSIngo Molnar 
19231da177e4SLinus Torvalds 	unregister_handler_proc(irq, action);
19241da177e4SLinus Torvalds 
192562e04686SThomas Gleixner 	/*
192662e04686SThomas Gleixner 	 * Make sure it's not being used on another CPU and if the chip
192762e04686SThomas Gleixner 	 * supports it also make sure that there is no (not yet serviced)
192862e04686SThomas Gleixner 	 * interrupt in flight at the hardware level.
192962e04686SThomas Gleixner 	 */
193062e04686SThomas Gleixner 	__synchronize_hardirq(desc, true);
1931ae88a23bSIngo Molnar 
19321d99493bSDavid Woodhouse #ifdef CONFIG_DEBUG_SHIRQ
19331d99493bSDavid Woodhouse 	/*
1934ae88a23bSIngo Molnar 	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1935ae88a23bSIngo Molnar 	 * event to happen even now it's being freed, so let's make sure that
1936ae88a23bSIngo Molnar 	 * is so by doing an extra call to the handler ....
1937ae88a23bSIngo Molnar 	 *
1938ae88a23bSIngo Molnar 	 * ( We do this after actually deregistering it, to make sure that a
19390a13ec0bSJonathan Neuschäfer 	 *   'real' IRQ doesn't run in parallel with our fake. )
19401d99493bSDavid Woodhouse 	 */
19411d99493bSDavid Woodhouse 	if (action->flags & IRQF_SHARED) {
19421d99493bSDavid Woodhouse 		local_irq_save(flags);
19431d99493bSDavid Woodhouse 		action->handler(irq, dev_id);
19441d99493bSDavid Woodhouse 		local_irq_restore(flags);
19451d99493bSDavid Woodhouse 	}
19461d99493bSDavid Woodhouse #endif
19472d860ad7SLinus Torvalds 
1948519cc865SLukas Wunner 	/*
1949519cc865SLukas Wunner 	 * The action has already been removed above, but the thread writes
1950519cc865SLukas Wunner 	 * its oneshot mask bit when it completes. Though request_mutex is
1951519cc865SLukas Wunner 	 * held across this which prevents __setup_irq() from handing out
1952519cc865SLukas Wunner 	 * the same bit to a newly requested action.
1953519cc865SLukas Wunner 	 */
19542d860ad7SLinus Torvalds 	if (action->thread) {
19552d860ad7SLinus Torvalds 		kthread_stop(action->thread);
19562d860ad7SLinus Torvalds 		put_task_struct(action->thread);
19572a1d3ab8SThomas Gleixner 		if (action->secondary && action->secondary->thread) {
19582a1d3ab8SThomas Gleixner 			kthread_stop(action->secondary->thread);
19592a1d3ab8SThomas Gleixner 			put_task_struct(action->secondary->thread);
19602a1d3ab8SThomas Gleixner 		}
19612d860ad7SLinus Torvalds 	}
19622d860ad7SLinus Torvalds 
196319d39a38SThomas Gleixner 	/* Last action releases resources */
19642343877fSThomas Gleixner 	if (!desc->action) {
196519d39a38SThomas Gleixner 		/*
1966a359f757SIngo Molnar 		 * Reacquire bus lock as irq_release_resources() might
196719d39a38SThomas Gleixner 		 * require it to deallocate resources over the slow bus.
196819d39a38SThomas Gleixner 		 */
196919d39a38SThomas Gleixner 		chip_bus_lock(desc);
19704001d8e8SThomas Gleixner 		/*
19714001d8e8SThomas Gleixner 		 * There is no interrupt on the fly anymore. Deactivate it
19724001d8e8SThomas Gleixner 		 * completely.
19734001d8e8SThomas Gleixner 		 */
19744001d8e8SThomas Gleixner 		raw_spin_lock_irqsave(&desc->lock, flags);
19754001d8e8SThomas Gleixner 		irq_domain_deactivate_irq(&desc->irq_data);
19764001d8e8SThomas Gleixner 		raw_spin_unlock_irqrestore(&desc->lock, flags);
19774001d8e8SThomas Gleixner 
197846e48e25SThomas Gleixner 		irq_release_resources(desc);
197919d39a38SThomas Gleixner 		chip_bus_sync_unlock(desc);
19802343877fSThomas Gleixner 		irq_remove_timings(desc);
19812343877fSThomas Gleixner 	}
198246e48e25SThomas Gleixner 
19839114014cSThomas Gleixner 	mutex_unlock(&desc->request_mutex);
19849114014cSThomas Gleixner 
1985be45beb2SJon Hunter 	irq_chip_pm_put(&desc->irq_data);
1986b6873807SSebastian Andrzej Siewior 	module_put(desc->owner);
19872a1d3ab8SThomas Gleixner 	kfree(action->secondary);
1988f21cfb25SMagnus Damm 	return action;
1989f21cfb25SMagnus Damm }
19901da177e4SLinus Torvalds 
19911da177e4SLinus Torvalds /**
1992f21cfb25SMagnus Damm  *	free_irq - free an interrupt allocated with request_irq
19931da177e4SLinus Torvalds  *	@irq: Interrupt line to free
19941da177e4SLinus Torvalds  *	@dev_id: Device identity to free
19951da177e4SLinus Torvalds  *
19961da177e4SLinus Torvalds  *	Remove an interrupt handler. The handler is removed and if the
19971da177e4SLinus Torvalds  *	interrupt line is no longer in use by any driver it is disabled.
19981da177e4SLinus Torvalds  *	On a shared IRQ the caller must ensure the interrupt is disabled
19991da177e4SLinus Torvalds  *	on the card it drives before calling this function. The function
20001da177e4SLinus Torvalds  *	does not return until any executing interrupts for this IRQ
20011da177e4SLinus Torvalds  *	have completed.
20021da177e4SLinus Torvalds  *
20031da177e4SLinus Torvalds  *	This function must not be called from interrupt context.
200425ce4be7SChristoph Hellwig  *
200525ce4be7SChristoph Hellwig  *	Returns the devname argument passed to request_irq.
20061da177e4SLinus Torvalds  */
200725ce4be7SChristoph Hellwig const void *free_irq(unsigned int irq, void *dev_id)
20081da177e4SLinus Torvalds {
200970aedd24SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
201025ce4be7SChristoph Hellwig 	struct irqaction *action;
201125ce4be7SChristoph Hellwig 	const char *devname;
201270aedd24SThomas Gleixner 
201331d9d9b6SMarc Zyngier 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
201425ce4be7SChristoph Hellwig 		return NULL;
201570aedd24SThomas Gleixner 
2016cd7eab44SBen Hutchings #ifdef CONFIG_SMP
2017cd7eab44SBen Hutchings 	if (WARN_ON(desc->affinity_notify))
2018cd7eab44SBen Hutchings 		desc->affinity_notify = NULL;
2019cd7eab44SBen Hutchings #endif
2020cd7eab44SBen Hutchings 
202183ac4ca9SUwe Kleine König 	action = __free_irq(desc, dev_id);
20222827a418SAlexandru Moise 
20232827a418SAlexandru Moise 	if (!action)
20242827a418SAlexandru Moise 		return NULL;
20252827a418SAlexandru Moise 
202625ce4be7SChristoph Hellwig 	devname = action->name;
202725ce4be7SChristoph Hellwig 	kfree(action);
202825ce4be7SChristoph Hellwig 	return devname;
20291da177e4SLinus Torvalds }
20301da177e4SLinus Torvalds EXPORT_SYMBOL(free_irq);
20311da177e4SLinus Torvalds 
2032b525903cSJulien Thierry /* This function must be called with desc->lock held */
2033b525903cSJulien Thierry static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
2034b525903cSJulien Thierry {
2035b525903cSJulien Thierry 	const char *devname = NULL;
2036b525903cSJulien Thierry 
2037b525903cSJulien Thierry 	desc->istate &= ~IRQS_NMI;
2038b525903cSJulien Thierry 
2039b525903cSJulien Thierry 	if (!WARN_ON(desc->action == NULL)) {
2040b525903cSJulien Thierry 		irq_pm_remove_action(desc, desc->action);
2041b525903cSJulien Thierry 		devname = desc->action->name;
2042b525903cSJulien Thierry 		unregister_handler_proc(irq, desc->action);
2043b525903cSJulien Thierry 
2044b525903cSJulien Thierry 		kfree(desc->action);
2045b525903cSJulien Thierry 		desc->action = NULL;
2046b525903cSJulien Thierry 	}
2047b525903cSJulien Thierry 
2048b525903cSJulien Thierry 	irq_settings_clr_disable_unlazy(desc);
20494001d8e8SThomas Gleixner 	irq_shutdown_and_deactivate(desc);
2050b525903cSJulien Thierry 
2051b525903cSJulien Thierry 	irq_release_resources(desc);
2052b525903cSJulien Thierry 
2053b525903cSJulien Thierry 	irq_chip_pm_put(&desc->irq_data);
2054b525903cSJulien Thierry 	module_put(desc->owner);
2055b525903cSJulien Thierry 
2056b525903cSJulien Thierry 	return devname;
2057b525903cSJulien Thierry }
2058b525903cSJulien Thierry 
2059b525903cSJulien Thierry const void *free_nmi(unsigned int irq, void *dev_id)
2060b525903cSJulien Thierry {
2061b525903cSJulien Thierry 	struct irq_desc *desc = irq_to_desc(irq);
2062b525903cSJulien Thierry 	unsigned long flags;
2063b525903cSJulien Thierry 	const void *devname;
2064b525903cSJulien Thierry 
2065b525903cSJulien Thierry 	if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
2066b525903cSJulien Thierry 		return NULL;
2067b525903cSJulien Thierry 
2068b525903cSJulien Thierry 	if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2069b525903cSJulien Thierry 		return NULL;
2070b525903cSJulien Thierry 
2071b525903cSJulien Thierry 	/* NMI still enabled */
2072b525903cSJulien Thierry 	if (WARN_ON(desc->depth == 0))
2073b525903cSJulien Thierry 		disable_nmi_nosync(irq);
2074b525903cSJulien Thierry 
2075b525903cSJulien Thierry 	raw_spin_lock_irqsave(&desc->lock, flags);
2076b525903cSJulien Thierry 
2077b525903cSJulien Thierry 	irq_nmi_teardown(desc);
2078b525903cSJulien Thierry 	devname = __cleanup_nmi(irq, desc);
2079b525903cSJulien Thierry 
2080b525903cSJulien Thierry 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2081b525903cSJulien Thierry 
2082b525903cSJulien Thierry 	return devname;
2083b525903cSJulien Thierry }
2084b525903cSJulien Thierry 
20851da177e4SLinus Torvalds /**
20863aa551c9SThomas Gleixner  *	request_threaded_irq - allocate an interrupt line
20871da177e4SLinus Torvalds  *	@irq: Interrupt line to allocate
20883aa551c9SThomas Gleixner  *	@handler: Function to be called when the IRQ occurs.
208961377ec1SJoel Savitz  *		  Primary handler for threaded interrupts.
209061377ec1SJoel Savitz  *		  If handler is NULL and thread_fn != NULL
209161377ec1SJoel Savitz  *		  the default primary handler is installed.
20923aa551c9SThomas Gleixner  *	@thread_fn: Function called from the irq handler thread
20933aa551c9SThomas Gleixner  *		    If NULL, no irq thread is created
20941da177e4SLinus Torvalds  *	@irqflags: Interrupt type flags
20951da177e4SLinus Torvalds  *	@devname: An ascii name for the claiming device
20961da177e4SLinus Torvalds  *	@dev_id: A cookie passed back to the handler function
20971da177e4SLinus Torvalds  *
20981da177e4SLinus Torvalds  *	This call allocates interrupt resources and enables the
20991da177e4SLinus Torvalds  *	interrupt line and IRQ handling. From the point this
21001da177e4SLinus Torvalds  *	call is made your handler function may be invoked. Since
21011da177e4SLinus Torvalds  *	your handler function must clear any interrupt the board
21021da177e4SLinus Torvalds  *	raises, you must take care both to initialise your hardware
21031da177e4SLinus Torvalds  *	and to set up the interrupt handler in the right order.
21041da177e4SLinus Torvalds  *
21053aa551c9SThomas Gleixner  *	If you want to set up a threaded irq handler for your device
21066d21af4fSJavi Merino  *	then you need to supply @handler and @thread_fn. @handler is
21073aa551c9SThomas Gleixner  *	still called in hard interrupt context and has to check
21083aa551c9SThomas Gleixner  *	whether the interrupt originates from the device. If yes it
21093aa551c9SThomas Gleixner  *	needs to disable the interrupt on the device and return
211039a2eddbSSteven Rostedt  *	IRQ_WAKE_THREAD which will wake up the handler thread and run
21113aa551c9SThomas Gleixner  *	@thread_fn. This split handler design is necessary to support
21123aa551c9SThomas Gleixner  *	shared interrupts.
21133aa551c9SThomas Gleixner  *
21141da177e4SLinus Torvalds  *	Dev_id must be globally unique. Normally the address of the
21151da177e4SLinus Torvalds  *	device data structure is used as the cookie. Since the handler
21161da177e4SLinus Torvalds  *	receives this value it makes sense to use it.
21171da177e4SLinus Torvalds  *
21181da177e4SLinus Torvalds  *	If your interrupt is shared you must pass a non NULL dev_id
21191da177e4SLinus Torvalds  *	as this is required when freeing the interrupt.
21201da177e4SLinus Torvalds  *
21211da177e4SLinus Torvalds  *	Flags:
21221da177e4SLinus Torvalds  *
21233cca53b0SThomas Gleixner  *	IRQF_SHARED		Interrupt is shared
21240c5d1eb7SDavid Brownell  *	IRQF_TRIGGER_*		Specify active edge(s) or level
212504c2721dSThomas Gleixner  *	IRQF_ONESHOT		Run thread_fn with interrupt line masked
21261da177e4SLinus Torvalds  */
21273aa551c9SThomas Gleixner int request_threaded_irq(unsigned int irq, irq_handler_t handler,
21283aa551c9SThomas Gleixner 			 irq_handler_t thread_fn, unsigned long irqflags,
21293aa551c9SThomas Gleixner 			 const char *devname, void *dev_id)
21301da177e4SLinus Torvalds {
21311da177e4SLinus Torvalds 	struct irqaction *action;
213208678b08SYinghai Lu 	struct irq_desc *desc;
2133d3c60047SThomas Gleixner 	int retval;
21341da177e4SLinus Torvalds 
2135e237a551SChen Fan 	if (irq == IRQ_NOTCONNECTED)
2136e237a551SChen Fan 		return -ENOTCONN;
2137e237a551SChen Fan 
2138470c6623SDavid Brownell 	/*
21391da177e4SLinus Torvalds 	 * Sanity-check: shared interrupts must pass in a real dev-ID,
21401da177e4SLinus Torvalds 	 * otherwise we'll have trouble later trying to figure out
21411da177e4SLinus Torvalds 	 * which interrupt is which (messes up the interrupt freeing
21421da177e4SLinus Torvalds 	 * logic etc).
214317f48034SRafael J. Wysocki 	 *
2144cbe16f35SBarry Song 	 * Also shared interrupts do not go well with disabling auto enable.
2145cbe16f35SBarry Song 	 * The sharing interrupt might request it while it's still disabled
2146cbe16f35SBarry Song 	 * and then wait for interrupts forever.
2147cbe16f35SBarry Song 	 *
214817f48034SRafael J. Wysocki 	 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
214917f48034SRafael J. Wysocki 	 * it cannot be set along with IRQF_NO_SUSPEND.
21501da177e4SLinus Torvalds 	 */
215117f48034SRafael J. Wysocki 	if (((irqflags & IRQF_SHARED) && !dev_id) ||
2152cbe16f35SBarry Song 	    ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
215317f48034SRafael J. Wysocki 	    (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
215417f48034SRafael J. Wysocki 	    ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
21551da177e4SLinus Torvalds 		return -EINVAL;
21567d94f7caSYinghai Lu 
2157cb5bc832SYinghai Lu 	desc = irq_to_desc(irq);
21587d94f7caSYinghai Lu 	if (!desc)
21591da177e4SLinus Torvalds 		return -EINVAL;
21607d94f7caSYinghai Lu 
216131d9d9b6SMarc Zyngier 	if (!irq_settings_can_request(desc) ||
216231d9d9b6SMarc Zyngier 	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))
21636550c775SThomas Gleixner 		return -EINVAL;
2164b25c340cSThomas Gleixner 
2165b25c340cSThomas Gleixner 	if (!handler) {
2166b25c340cSThomas Gleixner 		if (!thread_fn)
21671da177e4SLinus Torvalds 			return -EINVAL;
2168b25c340cSThomas Gleixner 		handler = irq_default_primary_handler;
2169b25c340cSThomas Gleixner 	}
21701da177e4SLinus Torvalds 
217145535732SThomas Gleixner 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
21721da177e4SLinus Torvalds 	if (!action)
21731da177e4SLinus Torvalds 		return -ENOMEM;
21741da177e4SLinus Torvalds 
21751da177e4SLinus Torvalds 	action->handler = handler;
21763aa551c9SThomas Gleixner 	action->thread_fn = thread_fn;
21771da177e4SLinus Torvalds 	action->flags = irqflags;
21781da177e4SLinus Torvalds 	action->name = devname;
21791da177e4SLinus Torvalds 	action->dev_id = dev_id;
21801da177e4SLinus Torvalds 
2181be45beb2SJon Hunter 	retval = irq_chip_pm_get(&desc->irq_data);
21824396f46cSShawn Lin 	if (retval < 0) {
21834396f46cSShawn Lin 		kfree(action);
2184be45beb2SJon Hunter 		return retval;
21854396f46cSShawn Lin 	}
2186be45beb2SJon Hunter 
2187d3c60047SThomas Gleixner 	retval = __setup_irq(irq, desc, action);
218870aedd24SThomas Gleixner 
21892a1d3ab8SThomas Gleixner 	if (retval) {
2190be45beb2SJon Hunter 		irq_chip_pm_put(&desc->irq_data);
21912a1d3ab8SThomas Gleixner 		kfree(action->secondary);
2192377bf1e4SAnton Vorontsov 		kfree(action);
21932a1d3ab8SThomas Gleixner 	}
2194377bf1e4SAnton Vorontsov 
21956d83f94dSThomas Gleixner #ifdef CONFIG_DEBUG_SHIRQ_FIXME
21966ce51c43SLuis Henriques 	if (!retval && (irqflags & IRQF_SHARED)) {
2197a304e1b8SDavid Woodhouse 		/*
2198a304e1b8SDavid Woodhouse 		 * It's a shared IRQ -- the driver ought to be prepared for it
2199a304e1b8SDavid Woodhouse 		 * to happen immediately, so let's make sure....
2200377bf1e4SAnton Vorontsov 		 * We disable the irq to make sure that a 'real' IRQ doesn't
2201377bf1e4SAnton Vorontsov 		 * run in parallel with our fake.
2202a304e1b8SDavid Woodhouse 		 */
2203a304e1b8SDavid Woodhouse 		unsigned long flags;
2204a304e1b8SDavid Woodhouse 
2205377bf1e4SAnton Vorontsov 		disable_irq(irq);
2206a304e1b8SDavid Woodhouse 		local_irq_save(flags);
2207377bf1e4SAnton Vorontsov 
2208a304e1b8SDavid Woodhouse 		handler(irq, dev_id);
2209377bf1e4SAnton Vorontsov 
2210a304e1b8SDavid Woodhouse 		local_irq_restore(flags);
2211377bf1e4SAnton Vorontsov 		enable_irq(irq);
2212a304e1b8SDavid Woodhouse 	}
2213a304e1b8SDavid Woodhouse #endif
22141da177e4SLinus Torvalds 	return retval;
22151da177e4SLinus Torvalds }
22163aa551c9SThomas Gleixner EXPORT_SYMBOL(request_threaded_irq);
2217ae731f8dSMarc Zyngier 
2218ae731f8dSMarc Zyngier /**
2219ae731f8dSMarc Zyngier  *	request_any_context_irq - allocate an interrupt line
2220ae731f8dSMarc Zyngier  *	@irq: Interrupt line to allocate
2221ae731f8dSMarc Zyngier  *	@handler: Function to be called when the IRQ occurs.
2222ae731f8dSMarc Zyngier  *		  Threaded handler for threaded interrupts.
2223ae731f8dSMarc Zyngier  *	@flags: Interrupt type flags
2224ae731f8dSMarc Zyngier  *	@name: An ascii name for the claiming device
2225ae731f8dSMarc Zyngier  *	@dev_id: A cookie passed back to the handler function
2226ae731f8dSMarc Zyngier  *
2227ae731f8dSMarc Zyngier  *	This call allocates interrupt resources and enables the
2228ae731f8dSMarc Zyngier  *	interrupt line and IRQ handling. It selects either a
2229ae731f8dSMarc Zyngier  *	hardirq or threaded handling method depending on the
2230ae731f8dSMarc Zyngier  *	context.
2231ae731f8dSMarc Zyngier  *
2232ae731f8dSMarc Zyngier  *	On failure, it returns a negative value. On success,
2233ae731f8dSMarc Zyngier  *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2234ae731f8dSMarc Zyngier  */
2235ae731f8dSMarc Zyngier int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2236ae731f8dSMarc Zyngier 			    unsigned long flags, const char *name, void *dev_id)
2237ae731f8dSMarc Zyngier {
2238e237a551SChen Fan 	struct irq_desc *desc;
2239ae731f8dSMarc Zyngier 	int ret;
2240ae731f8dSMarc Zyngier 
2241e237a551SChen Fan 	if (irq == IRQ_NOTCONNECTED)
2242e237a551SChen Fan 		return -ENOTCONN;
2243e237a551SChen Fan 
2244e237a551SChen Fan 	desc = irq_to_desc(irq);
2245ae731f8dSMarc Zyngier 	if (!desc)
2246ae731f8dSMarc Zyngier 		return -EINVAL;
2247ae731f8dSMarc Zyngier 
22481ccb4e61SThomas Gleixner 	if (irq_settings_is_nested_thread(desc)) {
2249ae731f8dSMarc Zyngier 		ret = request_threaded_irq(irq, NULL, handler,
2250ae731f8dSMarc Zyngier 					   flags, name, dev_id);
2251ae731f8dSMarc Zyngier 		return !ret ? IRQC_IS_NESTED : ret;
2252ae731f8dSMarc Zyngier 	}
2253ae731f8dSMarc Zyngier 
2254ae731f8dSMarc Zyngier 	ret = request_irq(irq, handler, flags, name, dev_id);
2255ae731f8dSMarc Zyngier 	return !ret ? IRQC_IS_HARDIRQ : ret;
2256ae731f8dSMarc Zyngier }
2257ae731f8dSMarc Zyngier EXPORT_SYMBOL_GPL(request_any_context_irq);
225831d9d9b6SMarc Zyngier 
2259b525903cSJulien Thierry /**
2260b525903cSJulien Thierry  *	request_nmi - allocate an interrupt line for NMI delivery
2261b525903cSJulien Thierry  *	@irq: Interrupt line to allocate
2262b525903cSJulien Thierry  *	@handler: Function to be called when the IRQ occurs.
2263b525903cSJulien Thierry  *		  Threaded handler for threaded interrupts.
2264b525903cSJulien Thierry  *	@irqflags: Interrupt type flags
2265b525903cSJulien Thierry  *	@name: An ascii name for the claiming device
2266b525903cSJulien Thierry  *	@dev_id: A cookie passed back to the handler function
2267b525903cSJulien Thierry  *
2268b525903cSJulien Thierry  *	This call allocates interrupt resources and enables the
2269b525903cSJulien Thierry  *	interrupt line and IRQ handling. It sets up the IRQ line
2270b525903cSJulien Thierry  *	to be handled as an NMI.
2271b525903cSJulien Thierry  *
2272b525903cSJulien Thierry  *	An interrupt line delivering NMIs cannot be shared and IRQ handling
2273b525903cSJulien Thierry  *	cannot be threaded.
2274b525903cSJulien Thierry  *
2275b525903cSJulien Thierry  *	Interrupt lines requested for NMI delivering must produce per cpu
2276b525903cSJulien Thierry  *	interrupts and have auto enabling setting disabled.
2277b525903cSJulien Thierry  *
2278b525903cSJulien Thierry  *	Dev_id must be globally unique. Normally the address of the
2279b525903cSJulien Thierry  *	device data structure is used as the cookie. Since the handler
2280b525903cSJulien Thierry  *	receives this value it makes sense to use it.
2281b525903cSJulien Thierry  *
2282b525903cSJulien Thierry  *	If the interrupt line cannot be used to deliver NMIs, function
2283b525903cSJulien Thierry  *	will fail and return a negative value.
2284b525903cSJulien Thierry  */
2285b525903cSJulien Thierry int request_nmi(unsigned int irq, irq_handler_t handler,
2286b525903cSJulien Thierry 		unsigned long irqflags, const char *name, void *dev_id)
2287b525903cSJulien Thierry {
2288b525903cSJulien Thierry 	struct irqaction *action;
2289b525903cSJulien Thierry 	struct irq_desc *desc;
2290b525903cSJulien Thierry 	unsigned long flags;
2291b525903cSJulien Thierry 	int retval;
2292b525903cSJulien Thierry 
2293b525903cSJulien Thierry 	if (irq == IRQ_NOTCONNECTED)
2294b525903cSJulien Thierry 		return -ENOTCONN;
2295b525903cSJulien Thierry 
2296b525903cSJulien Thierry 	/* NMI cannot be shared, used for Polling */
2297b525903cSJulien Thierry 	if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2298b525903cSJulien Thierry 		return -EINVAL;
2299b525903cSJulien Thierry 
2300b525903cSJulien Thierry 	if (!(irqflags & IRQF_PERCPU))
2301b525903cSJulien Thierry 		return -EINVAL;
2302b525903cSJulien Thierry 
2303b525903cSJulien Thierry 	if (!handler)
2304b525903cSJulien Thierry 		return -EINVAL;
2305b525903cSJulien Thierry 
2306b525903cSJulien Thierry 	desc = irq_to_desc(irq);
2307b525903cSJulien Thierry 
2308cbe16f35SBarry Song 	if (!desc || (irq_settings_can_autoenable(desc) &&
2309cbe16f35SBarry Song 	    !(irqflags & IRQF_NO_AUTOEN)) ||
2310b525903cSJulien Thierry 	    !irq_settings_can_request(desc) ||
2311b525903cSJulien Thierry 	    WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2312b525903cSJulien Thierry 	    !irq_supports_nmi(desc))
2313b525903cSJulien Thierry 		return -EINVAL;
2314b525903cSJulien Thierry 
2315b525903cSJulien Thierry 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2316b525903cSJulien Thierry 	if (!action)
2317b525903cSJulien Thierry 		return -ENOMEM;
2318b525903cSJulien Thierry 
2319b525903cSJulien Thierry 	action->handler = handler;
2320b525903cSJulien Thierry 	action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2321b525903cSJulien Thierry 	action->name = name;
2322b525903cSJulien Thierry 	action->dev_id = dev_id;
2323b525903cSJulien Thierry 
2324b525903cSJulien Thierry 	retval = irq_chip_pm_get(&desc->irq_data);
2325b525903cSJulien Thierry 	if (retval < 0)
2326b525903cSJulien Thierry 		goto err_out;
2327b525903cSJulien Thierry 
2328b525903cSJulien Thierry 	retval = __setup_irq(irq, desc, action);
2329b525903cSJulien Thierry 	if (retval)
2330b525903cSJulien Thierry 		goto err_irq_setup;
2331b525903cSJulien Thierry 
2332b525903cSJulien Thierry 	raw_spin_lock_irqsave(&desc->lock, flags);
2333b525903cSJulien Thierry 
2334b525903cSJulien Thierry 	/* Setup NMI state */
2335b525903cSJulien Thierry 	desc->istate |= IRQS_NMI;
2336b525903cSJulien Thierry 	retval = irq_nmi_setup(desc);
2337b525903cSJulien Thierry 	if (retval) {
2338b525903cSJulien Thierry 		__cleanup_nmi(irq, desc);
2339b525903cSJulien Thierry 		raw_spin_unlock_irqrestore(&desc->lock, flags);
2340b525903cSJulien Thierry 		return -EINVAL;
2341b525903cSJulien Thierry 	}
2342b525903cSJulien Thierry 
2343b525903cSJulien Thierry 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2344b525903cSJulien Thierry 
2345b525903cSJulien Thierry 	return 0;
2346b525903cSJulien Thierry 
2347b525903cSJulien Thierry err_irq_setup:
2348b525903cSJulien Thierry 	irq_chip_pm_put(&desc->irq_data);
2349b525903cSJulien Thierry err_out:
2350b525903cSJulien Thierry 	kfree(action);
2351b525903cSJulien Thierry 
2352b525903cSJulien Thierry 	return retval;
2353b525903cSJulien Thierry }
2354b525903cSJulien Thierry 
23551e7c5fd2SMarc Zyngier void enable_percpu_irq(unsigned int irq, unsigned int type)
235631d9d9b6SMarc Zyngier {
235731d9d9b6SMarc Zyngier 	unsigned int cpu = smp_processor_id();
235831d9d9b6SMarc Zyngier 	unsigned long flags;
235931d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
236031d9d9b6SMarc Zyngier 
236131d9d9b6SMarc Zyngier 	if (!desc)
236231d9d9b6SMarc Zyngier 		return;
236331d9d9b6SMarc Zyngier 
2364f35ad083SMarc Zyngier 	/*
2365f35ad083SMarc Zyngier 	 * If the trigger type is not specified by the caller, then
2366f35ad083SMarc Zyngier 	 * use the default for this interrupt.
2367f35ad083SMarc Zyngier 	 */
23681e7c5fd2SMarc Zyngier 	type &= IRQ_TYPE_SENSE_MASK;
2369f35ad083SMarc Zyngier 	if (type == IRQ_TYPE_NONE)
2370f35ad083SMarc Zyngier 		type = irqd_get_trigger_type(&desc->irq_data);
2371f35ad083SMarc Zyngier 
23721e7c5fd2SMarc Zyngier 	if (type != IRQ_TYPE_NONE) {
23731e7c5fd2SMarc Zyngier 		int ret;
23741e7c5fd2SMarc Zyngier 
2375a1ff541aSJiang Liu 		ret = __irq_set_trigger(desc, type);
23761e7c5fd2SMarc Zyngier 
23771e7c5fd2SMarc Zyngier 		if (ret) {
237832cffddeSThomas Gleixner 			WARN(1, "failed to set type for IRQ%d\n", irq);
23791e7c5fd2SMarc Zyngier 			goto out;
23801e7c5fd2SMarc Zyngier 		}
23811e7c5fd2SMarc Zyngier 	}
23821e7c5fd2SMarc Zyngier 
238331d9d9b6SMarc Zyngier 	irq_percpu_enable(desc, cpu);
23841e7c5fd2SMarc Zyngier out:
238531d9d9b6SMarc Zyngier 	irq_put_desc_unlock(desc, flags);
238631d9d9b6SMarc Zyngier }
238736a5df85SChris Metcalf EXPORT_SYMBOL_GPL(enable_percpu_irq);
238831d9d9b6SMarc Zyngier 
23894b078c3fSJulien Thierry void enable_percpu_nmi(unsigned int irq, unsigned int type)
23904b078c3fSJulien Thierry {
23914b078c3fSJulien Thierry 	enable_percpu_irq(irq, type);
23924b078c3fSJulien Thierry }
23934b078c3fSJulien Thierry 
2394f0cb3220SThomas Petazzoni /**
2395f0cb3220SThomas Petazzoni  * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2396f0cb3220SThomas Petazzoni  * @irq:	Linux irq number to check for
2397f0cb3220SThomas Petazzoni  *
2398f0cb3220SThomas Petazzoni  * Must be called from a non migratable context. Returns the enable
2399f0cb3220SThomas Petazzoni  * state of a per cpu interrupt on the current cpu.
2400f0cb3220SThomas Petazzoni  */
2401f0cb3220SThomas Petazzoni bool irq_percpu_is_enabled(unsigned int irq)
2402f0cb3220SThomas Petazzoni {
2403f0cb3220SThomas Petazzoni 	unsigned int cpu = smp_processor_id();
2404f0cb3220SThomas Petazzoni 	struct irq_desc *desc;
2405f0cb3220SThomas Petazzoni 	unsigned long flags;
2406f0cb3220SThomas Petazzoni 	bool is_enabled;
2407f0cb3220SThomas Petazzoni 
2408f0cb3220SThomas Petazzoni 	desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2409f0cb3220SThomas Petazzoni 	if (!desc)
2410f0cb3220SThomas Petazzoni 		return false;
2411f0cb3220SThomas Petazzoni 
2412f0cb3220SThomas Petazzoni 	is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2413f0cb3220SThomas Petazzoni 	irq_put_desc_unlock(desc, flags);
2414f0cb3220SThomas Petazzoni 
2415f0cb3220SThomas Petazzoni 	return is_enabled;
2416f0cb3220SThomas Petazzoni }
2417f0cb3220SThomas Petazzoni EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2418f0cb3220SThomas Petazzoni 
241931d9d9b6SMarc Zyngier void disable_percpu_irq(unsigned int irq)
242031d9d9b6SMarc Zyngier {
242131d9d9b6SMarc Zyngier 	unsigned int cpu = smp_processor_id();
242231d9d9b6SMarc Zyngier 	unsigned long flags;
242331d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
242431d9d9b6SMarc Zyngier 
242531d9d9b6SMarc Zyngier 	if (!desc)
242631d9d9b6SMarc Zyngier 		return;
242731d9d9b6SMarc Zyngier 
242831d9d9b6SMarc Zyngier 	irq_percpu_disable(desc, cpu);
242931d9d9b6SMarc Zyngier 	irq_put_desc_unlock(desc, flags);
243031d9d9b6SMarc Zyngier }
243136a5df85SChris Metcalf EXPORT_SYMBOL_GPL(disable_percpu_irq);
243231d9d9b6SMarc Zyngier 
24334b078c3fSJulien Thierry void disable_percpu_nmi(unsigned int irq)
24344b078c3fSJulien Thierry {
24354b078c3fSJulien Thierry 	disable_percpu_irq(irq);
24364b078c3fSJulien Thierry }
24374b078c3fSJulien Thierry 
243831d9d9b6SMarc Zyngier /*
243931d9d9b6SMarc Zyngier  * Internal function to unregister a percpu irqaction.
244031d9d9b6SMarc Zyngier  */
244131d9d9b6SMarc Zyngier static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
244231d9d9b6SMarc Zyngier {
244331d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
244431d9d9b6SMarc Zyngier 	struct irqaction *action;
244531d9d9b6SMarc Zyngier 	unsigned long flags;
244631d9d9b6SMarc Zyngier 
244731d9d9b6SMarc Zyngier 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
244831d9d9b6SMarc Zyngier 
244931d9d9b6SMarc Zyngier 	if (!desc)
245031d9d9b6SMarc Zyngier 		return NULL;
245131d9d9b6SMarc Zyngier 
245231d9d9b6SMarc Zyngier 	raw_spin_lock_irqsave(&desc->lock, flags);
245331d9d9b6SMarc Zyngier 
245431d9d9b6SMarc Zyngier 	action = desc->action;
245531d9d9b6SMarc Zyngier 	if (!action || action->percpu_dev_id != dev_id) {
245631d9d9b6SMarc Zyngier 		WARN(1, "Trying to free already-free IRQ %d\n", irq);
245731d9d9b6SMarc Zyngier 		goto bad;
245831d9d9b6SMarc Zyngier 	}
245931d9d9b6SMarc Zyngier 
246031d9d9b6SMarc Zyngier 	if (!cpumask_empty(desc->percpu_enabled)) {
246131d9d9b6SMarc Zyngier 		WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
246231d9d9b6SMarc Zyngier 		     irq, cpumask_first(desc->percpu_enabled));
246331d9d9b6SMarc Zyngier 		goto bad;
246431d9d9b6SMarc Zyngier 	}
246531d9d9b6SMarc Zyngier 
246631d9d9b6SMarc Zyngier 	/* Found it - now remove it from the list of entries: */
246731d9d9b6SMarc Zyngier 	desc->action = NULL;
246831d9d9b6SMarc Zyngier 
24694b078c3fSJulien Thierry 	desc->istate &= ~IRQS_NMI;
24704b078c3fSJulien Thierry 
247131d9d9b6SMarc Zyngier 	raw_spin_unlock_irqrestore(&desc->lock, flags);
247231d9d9b6SMarc Zyngier 
247331d9d9b6SMarc Zyngier 	unregister_handler_proc(irq, action);
247431d9d9b6SMarc Zyngier 
2475be45beb2SJon Hunter 	irq_chip_pm_put(&desc->irq_data);
247631d9d9b6SMarc Zyngier 	module_put(desc->owner);
247731d9d9b6SMarc Zyngier 	return action;
247831d9d9b6SMarc Zyngier 
247931d9d9b6SMarc Zyngier bad:
248031d9d9b6SMarc Zyngier 	raw_spin_unlock_irqrestore(&desc->lock, flags);
248131d9d9b6SMarc Zyngier 	return NULL;
248231d9d9b6SMarc Zyngier }
248331d9d9b6SMarc Zyngier 
248431d9d9b6SMarc Zyngier /**
248531d9d9b6SMarc Zyngier  *	remove_percpu_irq - free a per-cpu interrupt
248631d9d9b6SMarc Zyngier  *	@irq: Interrupt line to free
248731d9d9b6SMarc Zyngier  *	@act: irqaction for the interrupt
248831d9d9b6SMarc Zyngier  *
248931d9d9b6SMarc Zyngier  * Used to remove interrupts statically setup by the early boot process.
249031d9d9b6SMarc Zyngier  */
249131d9d9b6SMarc Zyngier void remove_percpu_irq(unsigned int irq, struct irqaction *act)
249231d9d9b6SMarc Zyngier {
249331d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
249431d9d9b6SMarc Zyngier 
249531d9d9b6SMarc Zyngier 	if (desc && irq_settings_is_per_cpu_devid(desc))
249631d9d9b6SMarc Zyngier 	    __free_percpu_irq(irq, act->percpu_dev_id);
249731d9d9b6SMarc Zyngier }
249831d9d9b6SMarc Zyngier 
249931d9d9b6SMarc Zyngier /**
250031d9d9b6SMarc Zyngier  *	free_percpu_irq - free an interrupt allocated with request_percpu_irq
250131d9d9b6SMarc Zyngier  *	@irq: Interrupt line to free
250231d9d9b6SMarc Zyngier  *	@dev_id: Device identity to free
250331d9d9b6SMarc Zyngier  *
250431d9d9b6SMarc Zyngier  *	Remove a percpu interrupt handler. The handler is removed, but
250531d9d9b6SMarc Zyngier  *	the interrupt line is not disabled. This must be done on each
250631d9d9b6SMarc Zyngier  *	CPU before calling this function. The function does not return
250731d9d9b6SMarc Zyngier  *	until any executing interrupts for this IRQ have completed.
250831d9d9b6SMarc Zyngier  *
250931d9d9b6SMarc Zyngier  *	This function must not be called from interrupt context.
251031d9d9b6SMarc Zyngier  */
251131d9d9b6SMarc Zyngier void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
251231d9d9b6SMarc Zyngier {
251331d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
251431d9d9b6SMarc Zyngier 
251531d9d9b6SMarc Zyngier 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
251631d9d9b6SMarc Zyngier 		return;
251731d9d9b6SMarc Zyngier 
251831d9d9b6SMarc Zyngier 	chip_bus_lock(desc);
251931d9d9b6SMarc Zyngier 	kfree(__free_percpu_irq(irq, dev_id));
252031d9d9b6SMarc Zyngier 	chip_bus_sync_unlock(desc);
252131d9d9b6SMarc Zyngier }
2522aec2e2adSMaxime Ripard EXPORT_SYMBOL_GPL(free_percpu_irq);
252331d9d9b6SMarc Zyngier 
25244b078c3fSJulien Thierry void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
25254b078c3fSJulien Thierry {
25264b078c3fSJulien Thierry 	struct irq_desc *desc = irq_to_desc(irq);
25274b078c3fSJulien Thierry 
25284b078c3fSJulien Thierry 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
25294b078c3fSJulien Thierry 		return;
25304b078c3fSJulien Thierry 
25314b078c3fSJulien Thierry 	if (WARN_ON(!(desc->istate & IRQS_NMI)))
25324b078c3fSJulien Thierry 		return;
25334b078c3fSJulien Thierry 
25344b078c3fSJulien Thierry 	kfree(__free_percpu_irq(irq, dev_id));
25354b078c3fSJulien Thierry }
25364b078c3fSJulien Thierry 
253731d9d9b6SMarc Zyngier /**
253831d9d9b6SMarc Zyngier  *	setup_percpu_irq - setup a per-cpu interrupt
253931d9d9b6SMarc Zyngier  *	@irq: Interrupt line to setup
254031d9d9b6SMarc Zyngier  *	@act: irqaction for the interrupt
254131d9d9b6SMarc Zyngier  *
254231d9d9b6SMarc Zyngier  * Used to statically setup per-cpu interrupts in the early boot process.
254331d9d9b6SMarc Zyngier  */
254431d9d9b6SMarc Zyngier int setup_percpu_irq(unsigned int irq, struct irqaction *act)
254531d9d9b6SMarc Zyngier {
254631d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
254731d9d9b6SMarc Zyngier 	int retval;
254831d9d9b6SMarc Zyngier 
254931d9d9b6SMarc Zyngier 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
255031d9d9b6SMarc Zyngier 		return -EINVAL;
2551be45beb2SJon Hunter 
2552be45beb2SJon Hunter 	retval = irq_chip_pm_get(&desc->irq_data);
2553be45beb2SJon Hunter 	if (retval < 0)
2554be45beb2SJon Hunter 		return retval;
2555be45beb2SJon Hunter 
255631d9d9b6SMarc Zyngier 	retval = __setup_irq(irq, desc, act);
255731d9d9b6SMarc Zyngier 
2558be45beb2SJon Hunter 	if (retval)
2559be45beb2SJon Hunter 		irq_chip_pm_put(&desc->irq_data);
2560be45beb2SJon Hunter 
256131d9d9b6SMarc Zyngier 	return retval;
256231d9d9b6SMarc Zyngier }
256331d9d9b6SMarc Zyngier 
256431d9d9b6SMarc Zyngier /**
2565c80081b9SDaniel Lezcano  *	__request_percpu_irq - allocate a percpu interrupt line
256631d9d9b6SMarc Zyngier  *	@irq: Interrupt line to allocate
256731d9d9b6SMarc Zyngier  *	@handler: Function to be called when the IRQ occurs.
2568c80081b9SDaniel Lezcano  *	@flags: Interrupt type flags (IRQF_TIMER only)
256931d9d9b6SMarc Zyngier  *	@devname: An ascii name for the claiming device
257031d9d9b6SMarc Zyngier  *	@dev_id: A percpu cookie passed back to the handler function
257131d9d9b6SMarc Zyngier  *
2572a1b7febdSMaxime Ripard  *	This call allocates interrupt resources and enables the
2573a1b7febdSMaxime Ripard  *	interrupt on the local CPU. If the interrupt is supposed to be
2574a1b7febdSMaxime Ripard  *	enabled on other CPUs, it has to be done on each CPU using
2575a1b7febdSMaxime Ripard  *	enable_percpu_irq().
257631d9d9b6SMarc Zyngier  *
257731d9d9b6SMarc Zyngier  *	Dev_id must be globally unique. It is a per-cpu variable, and
257831d9d9b6SMarc Zyngier  *	the handler gets called with the interrupted CPU's instance of
257931d9d9b6SMarc Zyngier  *	that variable.
258031d9d9b6SMarc Zyngier  */
2581c80081b9SDaniel Lezcano int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2582c80081b9SDaniel Lezcano 			 unsigned long flags, const char *devname,
2583c80081b9SDaniel Lezcano 			 void __percpu *dev_id)
258431d9d9b6SMarc Zyngier {
258531d9d9b6SMarc Zyngier 	struct irqaction *action;
258631d9d9b6SMarc Zyngier 	struct irq_desc *desc;
258731d9d9b6SMarc Zyngier 	int retval;
258831d9d9b6SMarc Zyngier 
258931d9d9b6SMarc Zyngier 	if (!dev_id)
259031d9d9b6SMarc Zyngier 		return -EINVAL;
259131d9d9b6SMarc Zyngier 
259231d9d9b6SMarc Zyngier 	desc = irq_to_desc(irq);
259331d9d9b6SMarc Zyngier 	if (!desc || !irq_settings_can_request(desc) ||
259431d9d9b6SMarc Zyngier 	    !irq_settings_is_per_cpu_devid(desc))
259531d9d9b6SMarc Zyngier 		return -EINVAL;
259631d9d9b6SMarc Zyngier 
2597c80081b9SDaniel Lezcano 	if (flags && flags != IRQF_TIMER)
2598c80081b9SDaniel Lezcano 		return -EINVAL;
2599c80081b9SDaniel Lezcano 
260031d9d9b6SMarc Zyngier 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
260131d9d9b6SMarc Zyngier 	if (!action)
260231d9d9b6SMarc Zyngier 		return -ENOMEM;
260331d9d9b6SMarc Zyngier 
260431d9d9b6SMarc Zyngier 	action->handler = handler;
2605c80081b9SDaniel Lezcano 	action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
260631d9d9b6SMarc Zyngier 	action->name = devname;
260731d9d9b6SMarc Zyngier 	action->percpu_dev_id = dev_id;
260831d9d9b6SMarc Zyngier 
2609be45beb2SJon Hunter 	retval = irq_chip_pm_get(&desc->irq_data);
26104396f46cSShawn Lin 	if (retval < 0) {
26114396f46cSShawn Lin 		kfree(action);
2612be45beb2SJon Hunter 		return retval;
26134396f46cSShawn Lin 	}
2614be45beb2SJon Hunter 
261531d9d9b6SMarc Zyngier 	retval = __setup_irq(irq, desc, action);
261631d9d9b6SMarc Zyngier 
2617be45beb2SJon Hunter 	if (retval) {
2618be45beb2SJon Hunter 		irq_chip_pm_put(&desc->irq_data);
261931d9d9b6SMarc Zyngier 		kfree(action);
2620be45beb2SJon Hunter 	}
262131d9d9b6SMarc Zyngier 
262231d9d9b6SMarc Zyngier 	return retval;
262331d9d9b6SMarc Zyngier }
2624c80081b9SDaniel Lezcano EXPORT_SYMBOL_GPL(__request_percpu_irq);
26251b7047edSMarc Zyngier 
26261b7047edSMarc Zyngier /**
26274b078c3fSJulien Thierry  *	request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
26284b078c3fSJulien Thierry  *	@irq: Interrupt line to allocate
26294b078c3fSJulien Thierry  *	@handler: Function to be called when the IRQ occurs.
26304b078c3fSJulien Thierry  *	@name: An ascii name for the claiming device
26314b078c3fSJulien Thierry  *	@dev_id: A percpu cookie passed back to the handler function
26324b078c3fSJulien Thierry  *
26334b078c3fSJulien Thierry  *	This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2634a5186694SJulien Thierry  *	have to be setup on each CPU by calling prepare_percpu_nmi() before
2635a5186694SJulien Thierry  *	being enabled on the same CPU by using enable_percpu_nmi().
26364b078c3fSJulien Thierry  *
26374b078c3fSJulien Thierry  *	Dev_id must be globally unique. It is a per-cpu variable, and
26384b078c3fSJulien Thierry  *	the handler gets called with the interrupted CPU's instance of
26394b078c3fSJulien Thierry  *	that variable.
26404b078c3fSJulien Thierry  *
26414b078c3fSJulien Thierry  *	Interrupt lines requested for NMI delivering should have auto enabling
26424b078c3fSJulien Thierry  *	setting disabled.
26434b078c3fSJulien Thierry  *
26444b078c3fSJulien Thierry  *	If the interrupt line cannot be used to deliver NMIs, function
26454b078c3fSJulien Thierry  *	will fail returning a negative value.
26464b078c3fSJulien Thierry  */
26474b078c3fSJulien Thierry int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
26484b078c3fSJulien Thierry 		       const char *name, void __percpu *dev_id)
26494b078c3fSJulien Thierry {
26504b078c3fSJulien Thierry 	struct irqaction *action;
26514b078c3fSJulien Thierry 	struct irq_desc *desc;
26524b078c3fSJulien Thierry 	unsigned long flags;
26534b078c3fSJulien Thierry 	int retval;
26544b078c3fSJulien Thierry 
26554b078c3fSJulien Thierry 	if (!handler)
26564b078c3fSJulien Thierry 		return -EINVAL;
26574b078c3fSJulien Thierry 
26584b078c3fSJulien Thierry 	desc = irq_to_desc(irq);
26594b078c3fSJulien Thierry 
26604b078c3fSJulien Thierry 	if (!desc || !irq_settings_can_request(desc) ||
26614b078c3fSJulien Thierry 	    !irq_settings_is_per_cpu_devid(desc) ||
26624b078c3fSJulien Thierry 	    irq_settings_can_autoenable(desc) ||
26634b078c3fSJulien Thierry 	    !irq_supports_nmi(desc))
26644b078c3fSJulien Thierry 		return -EINVAL;
26654b078c3fSJulien Thierry 
26664b078c3fSJulien Thierry 	/* The line cannot already be NMI */
26674b078c3fSJulien Thierry 	if (desc->istate & IRQS_NMI)
26684b078c3fSJulien Thierry 		return -EINVAL;
26694b078c3fSJulien Thierry 
26704b078c3fSJulien Thierry 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
26714b078c3fSJulien Thierry 	if (!action)
26724b078c3fSJulien Thierry 		return -ENOMEM;
26734b078c3fSJulien Thierry 
26744b078c3fSJulien Thierry 	action->handler = handler;
26754b078c3fSJulien Thierry 	action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
26764b078c3fSJulien Thierry 		| IRQF_NOBALANCING;
26774b078c3fSJulien Thierry 	action->name = name;
26784b078c3fSJulien Thierry 	action->percpu_dev_id = dev_id;
26794b078c3fSJulien Thierry 
26804b078c3fSJulien Thierry 	retval = irq_chip_pm_get(&desc->irq_data);
26814b078c3fSJulien Thierry 	if (retval < 0)
26824b078c3fSJulien Thierry 		goto err_out;
26834b078c3fSJulien Thierry 
26844b078c3fSJulien Thierry 	retval = __setup_irq(irq, desc, action);
26854b078c3fSJulien Thierry 	if (retval)
26864b078c3fSJulien Thierry 		goto err_irq_setup;
26874b078c3fSJulien Thierry 
26884b078c3fSJulien Thierry 	raw_spin_lock_irqsave(&desc->lock, flags);
26894b078c3fSJulien Thierry 	desc->istate |= IRQS_NMI;
26904b078c3fSJulien Thierry 	raw_spin_unlock_irqrestore(&desc->lock, flags);
26914b078c3fSJulien Thierry 
26924b078c3fSJulien Thierry 	return 0;
26934b078c3fSJulien Thierry 
26944b078c3fSJulien Thierry err_irq_setup:
26954b078c3fSJulien Thierry 	irq_chip_pm_put(&desc->irq_data);
26964b078c3fSJulien Thierry err_out:
26974b078c3fSJulien Thierry 	kfree(action);
26984b078c3fSJulien Thierry 
26994b078c3fSJulien Thierry 	return retval;
27004b078c3fSJulien Thierry }
27014b078c3fSJulien Thierry 
27024b078c3fSJulien Thierry /**
27034b078c3fSJulien Thierry  *	prepare_percpu_nmi - performs CPU local setup for NMI delivery
27044b078c3fSJulien Thierry  *	@irq: Interrupt line to prepare for NMI delivery
27054b078c3fSJulien Thierry  *
27064b078c3fSJulien Thierry  *	This call prepares an interrupt line to deliver NMI on the current CPU,
27074b078c3fSJulien Thierry  *	before that interrupt line gets enabled with enable_percpu_nmi().
27084b078c3fSJulien Thierry  *
27094b078c3fSJulien Thierry  *	As a CPU local operation, this should be called from non-preemptible
27104b078c3fSJulien Thierry  *	context.
27114b078c3fSJulien Thierry  *
27124b078c3fSJulien Thierry  *	If the interrupt line cannot be used to deliver NMIs, function
27134b078c3fSJulien Thierry  *	will fail returning a negative value.
27144b078c3fSJulien Thierry  */
27154b078c3fSJulien Thierry int prepare_percpu_nmi(unsigned int irq)
27164b078c3fSJulien Thierry {
27174b078c3fSJulien Thierry 	unsigned long flags;
27184b078c3fSJulien Thierry 	struct irq_desc *desc;
27194b078c3fSJulien Thierry 	int ret = 0;
27204b078c3fSJulien Thierry 
27214b078c3fSJulien Thierry 	WARN_ON(preemptible());
27224b078c3fSJulien Thierry 
27234b078c3fSJulien Thierry 	desc = irq_get_desc_lock(irq, &flags,
27244b078c3fSJulien Thierry 				 IRQ_GET_DESC_CHECK_PERCPU);
27254b078c3fSJulien Thierry 	if (!desc)
27264b078c3fSJulien Thierry 		return -EINVAL;
27274b078c3fSJulien Thierry 
27284b078c3fSJulien Thierry 	if (WARN(!(desc->istate & IRQS_NMI),
27294b078c3fSJulien Thierry 		 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
27304b078c3fSJulien Thierry 		 irq)) {
27314b078c3fSJulien Thierry 		ret = -EINVAL;
27324b078c3fSJulien Thierry 		goto out;
27334b078c3fSJulien Thierry 	}
27344b078c3fSJulien Thierry 
27354b078c3fSJulien Thierry 	ret = irq_nmi_setup(desc);
27364b078c3fSJulien Thierry 	if (ret) {
27374b078c3fSJulien Thierry 		pr_err("Failed to setup NMI delivery: irq %u\n", irq);
27384b078c3fSJulien Thierry 		goto out;
27394b078c3fSJulien Thierry 	}
27404b078c3fSJulien Thierry 
27414b078c3fSJulien Thierry out:
27424b078c3fSJulien Thierry 	irq_put_desc_unlock(desc, flags);
27434b078c3fSJulien Thierry 	return ret;
27444b078c3fSJulien Thierry }
27454b078c3fSJulien Thierry 
27464b078c3fSJulien Thierry /**
27474b078c3fSJulien Thierry  *	teardown_percpu_nmi - undoes NMI setup of IRQ line
27484b078c3fSJulien Thierry  *	@irq: Interrupt line from which CPU local NMI configuration should be
27494b078c3fSJulien Thierry  *	      removed
27504b078c3fSJulien Thierry  *
27514b078c3fSJulien Thierry  *	This call undoes the setup done by prepare_percpu_nmi().
27524b078c3fSJulien Thierry  *
27534b078c3fSJulien Thierry  *	IRQ line should not be enabled for the current CPU.
27544b078c3fSJulien Thierry  *
27554b078c3fSJulien Thierry  *	As a CPU local operation, this should be called from non-preemptible
27564b078c3fSJulien Thierry  *	context.
27574b078c3fSJulien Thierry  */
27584b078c3fSJulien Thierry void teardown_percpu_nmi(unsigned int irq)
27594b078c3fSJulien Thierry {
27604b078c3fSJulien Thierry 	unsigned long flags;
27614b078c3fSJulien Thierry 	struct irq_desc *desc;
27624b078c3fSJulien Thierry 
27634b078c3fSJulien Thierry 	WARN_ON(preemptible());
27644b078c3fSJulien Thierry 
27654b078c3fSJulien Thierry 	desc = irq_get_desc_lock(irq, &flags,
27664b078c3fSJulien Thierry 				 IRQ_GET_DESC_CHECK_PERCPU);
27674b078c3fSJulien Thierry 	if (!desc)
27684b078c3fSJulien Thierry 		return;
27694b078c3fSJulien Thierry 
27704b078c3fSJulien Thierry 	if (WARN_ON(!(desc->istate & IRQS_NMI)))
27714b078c3fSJulien Thierry 		goto out;
27724b078c3fSJulien Thierry 
27734b078c3fSJulien Thierry 	irq_nmi_teardown(desc);
27744b078c3fSJulien Thierry out:
27754b078c3fSJulien Thierry 	irq_put_desc_unlock(desc, flags);
27764b078c3fSJulien Thierry }
27774b078c3fSJulien Thierry 
277862e04686SThomas Gleixner int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
277962e04686SThomas Gleixner 			    bool *state)
278062e04686SThomas Gleixner {
278162e04686SThomas Gleixner 	struct irq_chip *chip;
278262e04686SThomas Gleixner 	int err = -EINVAL;
278362e04686SThomas Gleixner 
278462e04686SThomas Gleixner 	do {
278562e04686SThomas Gleixner 		chip = irq_data_get_irq_chip(data);
27861d0326f3SMarek Vasut 		if (WARN_ON_ONCE(!chip))
27871d0326f3SMarek Vasut 			return -ENODEV;
278862e04686SThomas Gleixner 		if (chip->irq_get_irqchip_state)
278962e04686SThomas Gleixner 			break;
279062e04686SThomas Gleixner #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
279162e04686SThomas Gleixner 		data = data->parent_data;
279262e04686SThomas Gleixner #else
279362e04686SThomas Gleixner 		data = NULL;
279462e04686SThomas Gleixner #endif
279562e04686SThomas Gleixner 	} while (data);
279662e04686SThomas Gleixner 
279762e04686SThomas Gleixner 	if (data)
279862e04686SThomas Gleixner 		err = chip->irq_get_irqchip_state(data, which, state);
279962e04686SThomas Gleixner 	return err;
280062e04686SThomas Gleixner }
280162e04686SThomas Gleixner 
28024b078c3fSJulien Thierry /**
28031b7047edSMarc Zyngier  *	irq_get_irqchip_state - returns the irqchip state of a interrupt.
28041b7047edSMarc Zyngier  *	@irq: Interrupt line that is forwarded to a VM
28051b7047edSMarc Zyngier  *	@which: One of IRQCHIP_STATE_* the caller wants to know about
28065c982c58SKrzysztof Kozlowski  *	@state: a pointer to a boolean where the state is to be stored
28071b7047edSMarc Zyngier  *
28081b7047edSMarc Zyngier  *	This call snapshots the internal irqchip state of an
28091b7047edSMarc Zyngier  *	interrupt, returning into @state the bit corresponding to
28101b7047edSMarc Zyngier  *	stage @which
28111b7047edSMarc Zyngier  *
28121b7047edSMarc Zyngier  *	This function should be called with preemption disabled if the
28131b7047edSMarc Zyngier  *	interrupt controller has per-cpu registers.
28141b7047edSMarc Zyngier  */
28151b7047edSMarc Zyngier int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
28161b7047edSMarc Zyngier 			  bool *state)
28171b7047edSMarc Zyngier {
28181b7047edSMarc Zyngier 	struct irq_desc *desc;
28191b7047edSMarc Zyngier 	struct irq_data *data;
28201b7047edSMarc Zyngier 	unsigned long flags;
28211b7047edSMarc Zyngier 	int err = -EINVAL;
28221b7047edSMarc Zyngier 
28231b7047edSMarc Zyngier 	desc = irq_get_desc_buslock(irq, &flags, 0);
28241b7047edSMarc Zyngier 	if (!desc)
28251b7047edSMarc Zyngier 		return err;
28261b7047edSMarc Zyngier 
28271b7047edSMarc Zyngier 	data = irq_desc_get_irq_data(desc);
28281b7047edSMarc Zyngier 
282962e04686SThomas Gleixner 	err = __irq_get_irqchip_state(data, which, state);
28301b7047edSMarc Zyngier 
28311b7047edSMarc Zyngier 	irq_put_desc_busunlock(desc, flags);
28321b7047edSMarc Zyngier 	return err;
28331b7047edSMarc Zyngier }
28341ee4fb3eSBjorn Andersson EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
28351b7047edSMarc Zyngier 
28361b7047edSMarc Zyngier /**
28371b7047edSMarc Zyngier  *	irq_set_irqchip_state - set the state of a forwarded interrupt.
28381b7047edSMarc Zyngier  *	@irq: Interrupt line that is forwarded to a VM
28391b7047edSMarc Zyngier  *	@which: State to be restored (one of IRQCHIP_STATE_*)
28401b7047edSMarc Zyngier  *	@val: Value corresponding to @which
28411b7047edSMarc Zyngier  *
28421b7047edSMarc Zyngier  *	This call sets the internal irqchip state of an interrupt,
28431b7047edSMarc Zyngier  *	depending on the value of @which.
28441b7047edSMarc Zyngier  *
2845e1a6af4bSJosh Cartwright  *	This function should be called with migration disabled if the
28461b7047edSMarc Zyngier  *	interrupt controller has per-cpu registers.
28471b7047edSMarc Zyngier  */
28481b7047edSMarc Zyngier int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
28491b7047edSMarc Zyngier 			  bool val)
28501b7047edSMarc Zyngier {
28511b7047edSMarc Zyngier 	struct irq_desc *desc;
28521b7047edSMarc Zyngier 	struct irq_data *data;
28531b7047edSMarc Zyngier 	struct irq_chip *chip;
28541b7047edSMarc Zyngier 	unsigned long flags;
28551b7047edSMarc Zyngier 	int err = -EINVAL;
28561b7047edSMarc Zyngier 
28571b7047edSMarc Zyngier 	desc = irq_get_desc_buslock(irq, &flags, 0);
28581b7047edSMarc Zyngier 	if (!desc)
28591b7047edSMarc Zyngier 		return err;
28601b7047edSMarc Zyngier 
28611b7047edSMarc Zyngier 	data = irq_desc_get_irq_data(desc);
28621b7047edSMarc Zyngier 
28631b7047edSMarc Zyngier 	do {
28641b7047edSMarc Zyngier 		chip = irq_data_get_irq_chip(data);
2865f107cee9SGuenter Roeck 		if (WARN_ON_ONCE(!chip)) {
2866f107cee9SGuenter Roeck 			err = -ENODEV;
2867f107cee9SGuenter Roeck 			goto out_unlock;
2868f107cee9SGuenter Roeck 		}
28691b7047edSMarc Zyngier 		if (chip->irq_set_irqchip_state)
28701b7047edSMarc Zyngier 			break;
28711b7047edSMarc Zyngier #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
28721b7047edSMarc Zyngier 		data = data->parent_data;
28731b7047edSMarc Zyngier #else
28741b7047edSMarc Zyngier 		data = NULL;
28751b7047edSMarc Zyngier #endif
28761b7047edSMarc Zyngier 	} while (data);
28771b7047edSMarc Zyngier 
28781b7047edSMarc Zyngier 	if (data)
28791b7047edSMarc Zyngier 		err = chip->irq_set_irqchip_state(data, which, val);
28801b7047edSMarc Zyngier 
2881f107cee9SGuenter Roeck out_unlock:
28821b7047edSMarc Zyngier 	irq_put_desc_busunlock(desc, flags);
28831b7047edSMarc Zyngier 	return err;
28841b7047edSMarc Zyngier }
28851ee4fb3eSBjorn Andersson EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2886a313357eSThomas Gleixner 
2887a313357eSThomas Gleixner /**
2888a313357eSThomas Gleixner  * irq_has_action - Check whether an interrupt is requested
2889a313357eSThomas Gleixner  * @irq:	The linux irq number
2890a313357eSThomas Gleixner  *
2891a313357eSThomas Gleixner  * Returns: A snapshot of the current state
2892a313357eSThomas Gleixner  */
2893a313357eSThomas Gleixner bool irq_has_action(unsigned int irq)
2894a313357eSThomas Gleixner {
2895a313357eSThomas Gleixner 	bool res;
2896a313357eSThomas Gleixner 
2897a313357eSThomas Gleixner 	rcu_read_lock();
2898a313357eSThomas Gleixner 	res = irq_desc_has_action(irq_to_desc(irq));
2899a313357eSThomas Gleixner 	rcu_read_unlock();
2900a313357eSThomas Gleixner 	return res;
2901a313357eSThomas Gleixner }
2902a313357eSThomas Gleixner EXPORT_SYMBOL_GPL(irq_has_action);
2903fdd02963SThomas Gleixner 
2904fdd02963SThomas Gleixner /**
2905fdd02963SThomas Gleixner  * irq_check_status_bit - Check whether bits in the irq descriptor status are set
2906fdd02963SThomas Gleixner  * @irq:	The linux irq number
2907fdd02963SThomas Gleixner  * @bitmask:	The bitmask to evaluate
2908fdd02963SThomas Gleixner  *
2909fdd02963SThomas Gleixner  * Returns: True if one of the bits in @bitmask is set
2910fdd02963SThomas Gleixner  */
2911fdd02963SThomas Gleixner bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
2912fdd02963SThomas Gleixner {
2913fdd02963SThomas Gleixner 	struct irq_desc *desc;
2914fdd02963SThomas Gleixner 	bool res = false;
2915fdd02963SThomas Gleixner 
2916fdd02963SThomas Gleixner 	rcu_read_lock();
2917fdd02963SThomas Gleixner 	desc = irq_to_desc(irq);
2918fdd02963SThomas Gleixner 	if (desc)
2919fdd02963SThomas Gleixner 		res = !!(desc->status_use_accessors & bitmask);
2920fdd02963SThomas Gleixner 	rcu_read_unlock();
2921fdd02963SThomas Gleixner 	return res;
2922fdd02963SThomas Gleixner }
2923ce09ccc5SThomas Gleixner EXPORT_SYMBOL_GPL(irq_check_status_bit);
2924