xref: /openbmc/linux/kernel/irq/manage.c (revision 5c982c58752118b6c1f295024d3fda5ff22d3c52)
152a65ff5SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
3a34db9b2SIngo Molnar  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4a34db9b2SIngo Molnar  * Copyright (C) 2005-2006 Thomas Gleixner
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  * This file contains driver APIs to the irq subsystem.
71da177e4SLinus Torvalds  */
81da177e4SLinus Torvalds 
997fd75b7SAndrew Morton #define pr_fmt(fmt) "genirq: " fmt
1097fd75b7SAndrew Morton 
111da177e4SLinus Torvalds #include <linux/irq.h>
123aa551c9SThomas Gleixner #include <linux/kthread.h>
131da177e4SLinus Torvalds #include <linux/module.h>
141da177e4SLinus Torvalds #include <linux/random.h>
151da177e4SLinus Torvalds #include <linux/interrupt.h>
164001d8e8SThomas Gleixner #include <linux/irqdomain.h>
171aeb272cSRobert P. J. Day #include <linux/slab.h>
183aa551c9SThomas Gleixner #include <linux/sched.h>
198bd75c77SClark Williams #include <linux/sched/rt.h>
200881e7bdSIngo Molnar #include <linux/sched/task.h>
2111ea68f5SMing Lei #include <linux/sched/isolation.h>
22ae7e81c0SIngo Molnar #include <uapi/linux/sched/types.h>
234d1d61a6SOleg Nesterov #include <linux/task_work.h>
241da177e4SLinus Torvalds 
251da177e4SLinus Torvalds #include "internals.h"
261da177e4SLinus Torvalds 
27b6a32bbdSThomas Gleixner #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
288d32a307SThomas Gleixner __read_mostly bool force_irqthreads;
2947b82e88SSebastian Andrzej Siewior EXPORT_SYMBOL_GPL(force_irqthreads);
308d32a307SThomas Gleixner 
318d32a307SThomas Gleixner static int __init setup_forced_irqthreads(char *arg)
328d32a307SThomas Gleixner {
338d32a307SThomas Gleixner 	force_irqthreads = true;
348d32a307SThomas Gleixner 	return 0;
358d32a307SThomas Gleixner }
368d32a307SThomas Gleixner early_param("threadirqs", setup_forced_irqthreads);
378d32a307SThomas Gleixner #endif
388d32a307SThomas Gleixner 
3962e04686SThomas Gleixner static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
401da177e4SLinus Torvalds {
4162e04686SThomas Gleixner 	struct irq_data *irqd = irq_desc_get_irq_data(desc);
4232f4125eSThomas Gleixner 	bool inprogress;
431da177e4SLinus Torvalds 
44a98ce5c6SHerbert Xu 	do {
45a98ce5c6SHerbert Xu 		unsigned long flags;
46a98ce5c6SHerbert Xu 
47a98ce5c6SHerbert Xu 		/*
48a98ce5c6SHerbert Xu 		 * Wait until we're out of the critical section.  This might
49a98ce5c6SHerbert Xu 		 * give the wrong answer due to the lack of memory barriers.
50a98ce5c6SHerbert Xu 		 */
5132f4125eSThomas Gleixner 		while (irqd_irq_inprogress(&desc->irq_data))
521da177e4SLinus Torvalds 			cpu_relax();
53a98ce5c6SHerbert Xu 
54a98ce5c6SHerbert Xu 		/* Ok, that indicated we're done: double-check carefully. */
55239007b8SThomas Gleixner 		raw_spin_lock_irqsave(&desc->lock, flags);
5632f4125eSThomas Gleixner 		inprogress = irqd_irq_inprogress(&desc->irq_data);
5762e04686SThomas Gleixner 
5862e04686SThomas Gleixner 		/*
5962e04686SThomas Gleixner 		 * If requested and supported, check at the chip whether it
6062e04686SThomas Gleixner 		 * is in flight at the hardware level, i.e. already pending
6162e04686SThomas Gleixner 		 * in a CPU and waiting for service and acknowledge.
6262e04686SThomas Gleixner 		 */
6362e04686SThomas Gleixner 		if (!inprogress && sync_chip) {
6462e04686SThomas Gleixner 			/*
6562e04686SThomas Gleixner 			 * Ignore the return code. inprogress is only updated
6662e04686SThomas Gleixner 			 * when the chip supports it.
6762e04686SThomas Gleixner 			 */
6862e04686SThomas Gleixner 			__irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
6962e04686SThomas Gleixner 						&inprogress);
7062e04686SThomas Gleixner 		}
71239007b8SThomas Gleixner 		raw_spin_unlock_irqrestore(&desc->lock, flags);
72a98ce5c6SHerbert Xu 
73a98ce5c6SHerbert Xu 		/* Oops, that failed? */
7432f4125eSThomas Gleixner 	} while (inprogress);
7518258f72SThomas Gleixner }
763aa551c9SThomas Gleixner 
7718258f72SThomas Gleixner /**
7818258f72SThomas Gleixner  *	synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
7918258f72SThomas Gleixner  *	@irq: interrupt number to wait for
8018258f72SThomas Gleixner  *
8118258f72SThomas Gleixner  *	This function waits for any pending hard IRQ handlers for this
8218258f72SThomas Gleixner  *	interrupt to complete before returning. If you use this
8318258f72SThomas Gleixner  *	function while holding a resource the IRQ handler may need you
8418258f72SThomas Gleixner  *	will deadlock. It does not take associated threaded handlers
8518258f72SThomas Gleixner  *	into account.
8618258f72SThomas Gleixner  *
8718258f72SThomas Gleixner  *	Do not use this for shutdown scenarios where you must be sure
8818258f72SThomas Gleixner  *	that all parts (hardirq and threaded handler) have completed.
8918258f72SThomas Gleixner  *
9002cea395SPeter Zijlstra  *	Returns: false if a threaded handler is active.
9102cea395SPeter Zijlstra  *
9218258f72SThomas Gleixner  *	This function may be called - with care - from IRQ context.
9362e04686SThomas Gleixner  *
9462e04686SThomas Gleixner  *	It does not check whether there is an interrupt in flight at the
9562e04686SThomas Gleixner  *	hardware level, but not serviced yet, as this might deadlock when
9662e04686SThomas Gleixner  *	called with interrupts disabled and the target CPU of the interrupt
9762e04686SThomas Gleixner  *	is the current CPU.
983aa551c9SThomas Gleixner  */
9902cea395SPeter Zijlstra bool synchronize_hardirq(unsigned int irq)
10018258f72SThomas Gleixner {
10118258f72SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
10218258f72SThomas Gleixner 
10302cea395SPeter Zijlstra 	if (desc) {
10462e04686SThomas Gleixner 		__synchronize_hardirq(desc, false);
10502cea395SPeter Zijlstra 		return !atomic_read(&desc->threads_active);
10602cea395SPeter Zijlstra 	}
10702cea395SPeter Zijlstra 
10802cea395SPeter Zijlstra 	return true;
10918258f72SThomas Gleixner }
11018258f72SThomas Gleixner EXPORT_SYMBOL(synchronize_hardirq);
11118258f72SThomas Gleixner 
11218258f72SThomas Gleixner /**
11318258f72SThomas Gleixner  *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
11418258f72SThomas Gleixner  *	@irq: interrupt number to wait for
11518258f72SThomas Gleixner  *
11618258f72SThomas Gleixner  *	This function waits for any pending IRQ handlers for this interrupt
11718258f72SThomas Gleixner  *	to complete before returning. If you use this function while
11818258f72SThomas Gleixner  *	holding a resource the IRQ handler may need you will deadlock.
11918258f72SThomas Gleixner  *
1201d21f2afSThomas Gleixner  *	Can only be called from preemptible code as it might sleep when
1211d21f2afSThomas Gleixner  *	an interrupt thread is associated to @irq.
12262e04686SThomas Gleixner  *
12362e04686SThomas Gleixner  *	It optionally makes sure (when the irq chip supports that method)
12462e04686SThomas Gleixner  *	that the interrupt is not pending in any CPU and waiting for
12562e04686SThomas Gleixner  *	service.
12618258f72SThomas Gleixner  */
12718258f72SThomas Gleixner void synchronize_irq(unsigned int irq)
12818258f72SThomas Gleixner {
12918258f72SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
13018258f72SThomas Gleixner 
13118258f72SThomas Gleixner 	if (desc) {
13262e04686SThomas Gleixner 		__synchronize_hardirq(desc, true);
13318258f72SThomas Gleixner 		/*
13418258f72SThomas Gleixner 		 * We made sure that no hardirq handler is
13518258f72SThomas Gleixner 		 * running. Now verify that no threaded handlers are
13618258f72SThomas Gleixner 		 * active.
13718258f72SThomas Gleixner 		 */
13818258f72SThomas Gleixner 		wait_event(desc->wait_for_threads,
13918258f72SThomas Gleixner 			   !atomic_read(&desc->threads_active));
14018258f72SThomas Gleixner 	}
1411da177e4SLinus Torvalds }
1421da177e4SLinus Torvalds EXPORT_SYMBOL(synchronize_irq);
1431da177e4SLinus Torvalds 
1443aa551c9SThomas Gleixner #ifdef CONFIG_SMP
1453aa551c9SThomas Gleixner cpumask_var_t irq_default_affinity;
1463aa551c9SThomas Gleixner 
1479c255583SThomas Gleixner static bool __irq_can_set_affinity(struct irq_desc *desc)
148e019c249SJiang Liu {
149e019c249SJiang Liu 	if (!desc || !irqd_can_balance(&desc->irq_data) ||
150e019c249SJiang Liu 	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
1519c255583SThomas Gleixner 		return false;
1529c255583SThomas Gleixner 	return true;
153e019c249SJiang Liu }
154e019c249SJiang Liu 
155771ee3b0SThomas Gleixner /**
156771ee3b0SThomas Gleixner  *	irq_can_set_affinity - Check if the affinity of a given irq can be set
157771ee3b0SThomas Gleixner  *	@irq:		Interrupt to check
158771ee3b0SThomas Gleixner  *
159771ee3b0SThomas Gleixner  */
160771ee3b0SThomas Gleixner int irq_can_set_affinity(unsigned int irq)
161771ee3b0SThomas Gleixner {
162e019c249SJiang Liu 	return __irq_can_set_affinity(irq_to_desc(irq));
163771ee3b0SThomas Gleixner }
164771ee3b0SThomas Gleixner 
165591d2fb0SThomas Gleixner /**
1669c255583SThomas Gleixner  * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
1679c255583SThomas Gleixner  * @irq:	Interrupt to check
1689c255583SThomas Gleixner  *
1699c255583SThomas Gleixner  * Like irq_can_set_affinity() above, but additionally checks for the
1709c255583SThomas Gleixner  * AFFINITY_MANAGED flag.
1719c255583SThomas Gleixner  */
1729c255583SThomas Gleixner bool irq_can_set_affinity_usr(unsigned int irq)
1739c255583SThomas Gleixner {
1749c255583SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
1759c255583SThomas Gleixner 
1769c255583SThomas Gleixner 	return __irq_can_set_affinity(desc) &&
1779c255583SThomas Gleixner 		!irqd_affinity_is_managed(&desc->irq_data);
1789c255583SThomas Gleixner }
1799c255583SThomas Gleixner 
1809c255583SThomas Gleixner /**
181591d2fb0SThomas Gleixner  *	irq_set_thread_affinity - Notify irq threads to adjust affinity
182*5c982c58SKrzysztof Kozlowski  *	@desc:		irq descriptor which has affinity changed
183591d2fb0SThomas Gleixner  *
184591d2fb0SThomas Gleixner  *	We just set IRQTF_AFFINITY and delegate the affinity setting
185591d2fb0SThomas Gleixner  *	to the interrupt thread itself. We can not call
186591d2fb0SThomas Gleixner  *	set_cpus_allowed_ptr() here as we hold desc->lock and this
187591d2fb0SThomas Gleixner  *	code can be called from hard interrupt context.
188591d2fb0SThomas Gleixner  */
189591d2fb0SThomas Gleixner void irq_set_thread_affinity(struct irq_desc *desc)
1903aa551c9SThomas Gleixner {
191f944b5a7SDaniel Lezcano 	struct irqaction *action;
1923aa551c9SThomas Gleixner 
193f944b5a7SDaniel Lezcano 	for_each_action_of_desc(desc, action)
1943aa551c9SThomas Gleixner 		if (action->thread)
195591d2fb0SThomas Gleixner 			set_bit(IRQTF_AFFINITY, &action->thread_flags);
1963aa551c9SThomas Gleixner }
1973aa551c9SThomas Gleixner 
198baedb87dSThomas Gleixner #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
19919e1d4e9SThomas Gleixner static void irq_validate_effective_affinity(struct irq_data *data)
20019e1d4e9SThomas Gleixner {
20119e1d4e9SThomas Gleixner 	const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
20219e1d4e9SThomas Gleixner 	struct irq_chip *chip = irq_data_get_irq_chip(data);
20319e1d4e9SThomas Gleixner 
20419e1d4e9SThomas Gleixner 	if (!cpumask_empty(m))
20519e1d4e9SThomas Gleixner 		return;
20619e1d4e9SThomas Gleixner 	pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
20719e1d4e9SThomas Gleixner 		     chip->name, data->irq);
20819e1d4e9SThomas Gleixner }
20919e1d4e9SThomas Gleixner 
210baedb87dSThomas Gleixner static inline void irq_init_effective_affinity(struct irq_data *data,
211baedb87dSThomas Gleixner 					       const struct cpumask *mask)
212baedb87dSThomas Gleixner {
213baedb87dSThomas Gleixner 	cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
214baedb87dSThomas Gleixner }
215baedb87dSThomas Gleixner #else
216baedb87dSThomas Gleixner static inline void irq_validate_effective_affinity(struct irq_data *data) { }
217baedb87dSThomas Gleixner static inline void irq_init_effective_affinity(struct irq_data *data,
218baedb87dSThomas Gleixner 					       const struct cpumask *mask) { }
219baedb87dSThomas Gleixner #endif
220baedb87dSThomas Gleixner 
221818b0f3bSJiang Liu int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
222818b0f3bSJiang Liu 			bool force)
223818b0f3bSJiang Liu {
224818b0f3bSJiang Liu 	struct irq_desc *desc = irq_data_to_desc(data);
225818b0f3bSJiang Liu 	struct irq_chip *chip = irq_data_get_irq_chip(data);
226818b0f3bSJiang Liu 	int ret;
227818b0f3bSJiang Liu 
228e43b3b58SThomas Gleixner 	if (!chip || !chip->irq_set_affinity)
229e43b3b58SThomas Gleixner 		return -EINVAL;
230e43b3b58SThomas Gleixner 
23111ea68f5SMing Lei 	/*
23211ea68f5SMing Lei 	 * If this is a managed interrupt and housekeeping is enabled on
23311ea68f5SMing Lei 	 * it check whether the requested affinity mask intersects with
23411ea68f5SMing Lei 	 * a housekeeping CPU. If so, then remove the isolated CPUs from
23511ea68f5SMing Lei 	 * the mask and just keep the housekeeping CPU(s). This prevents
23611ea68f5SMing Lei 	 * the affinity setter from routing the interrupt to an isolated
23711ea68f5SMing Lei 	 * CPU to avoid that I/O submitted from a housekeeping CPU causes
23811ea68f5SMing Lei 	 * interrupts on an isolated one.
23911ea68f5SMing Lei 	 *
24011ea68f5SMing Lei 	 * If the masks do not intersect or include online CPU(s) then
24111ea68f5SMing Lei 	 * keep the requested mask. The isolated target CPUs are only
24211ea68f5SMing Lei 	 * receiving interrupts when the I/O operation was submitted
24311ea68f5SMing Lei 	 * directly from them.
24411ea68f5SMing Lei 	 *
24511ea68f5SMing Lei 	 * If all housekeeping CPUs in the affinity mask are offline, the
24611ea68f5SMing Lei 	 * interrupt will be migrated by the CPU hotplug code once a
24711ea68f5SMing Lei 	 * housekeeping CPU which belongs to the affinity mask comes
24811ea68f5SMing Lei 	 * online.
24911ea68f5SMing Lei 	 */
25011ea68f5SMing Lei 	if (irqd_affinity_is_managed(data) &&
25111ea68f5SMing Lei 	    housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) {
25211ea68f5SMing Lei 		const struct cpumask *hk_mask, *prog_mask;
25311ea68f5SMing Lei 
25411ea68f5SMing Lei 		static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
25511ea68f5SMing Lei 		static struct cpumask tmp_mask;
25611ea68f5SMing Lei 
25711ea68f5SMing Lei 		hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
25811ea68f5SMing Lei 
25911ea68f5SMing Lei 		raw_spin_lock(&tmp_mask_lock);
26011ea68f5SMing Lei 		cpumask_and(&tmp_mask, mask, hk_mask);
26111ea68f5SMing Lei 		if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
26211ea68f5SMing Lei 			prog_mask = mask;
26311ea68f5SMing Lei 		else
26411ea68f5SMing Lei 			prog_mask = &tmp_mask;
26511ea68f5SMing Lei 		ret = chip->irq_set_affinity(data, prog_mask, force);
26611ea68f5SMing Lei 		raw_spin_unlock(&tmp_mask_lock);
26711ea68f5SMing Lei 	} else {
26801f8fa4fSThomas Gleixner 		ret = chip->irq_set_affinity(data, mask, force);
26911ea68f5SMing Lei 	}
270818b0f3bSJiang Liu 	switch (ret) {
271818b0f3bSJiang Liu 	case IRQ_SET_MASK_OK:
2722cb62547SJiang Liu 	case IRQ_SET_MASK_OK_DONE:
2739df872faSJiang Liu 		cpumask_copy(desc->irq_common_data.affinity, mask);
274df561f66SGustavo A. R. Silva 		fallthrough;
275818b0f3bSJiang Liu 	case IRQ_SET_MASK_OK_NOCOPY:
27619e1d4e9SThomas Gleixner 		irq_validate_effective_affinity(data);
277818b0f3bSJiang Liu 		irq_set_thread_affinity(desc);
278818b0f3bSJiang Liu 		ret = 0;
279818b0f3bSJiang Liu 	}
280818b0f3bSJiang Liu 
281818b0f3bSJiang Liu 	return ret;
282818b0f3bSJiang Liu }
283818b0f3bSJiang Liu 
28412f47073SThomas Gleixner #ifdef CONFIG_GENERIC_PENDING_IRQ
28512f47073SThomas Gleixner static inline int irq_set_affinity_pending(struct irq_data *data,
28612f47073SThomas Gleixner 					   const struct cpumask *dest)
28712f47073SThomas Gleixner {
28812f47073SThomas Gleixner 	struct irq_desc *desc = irq_data_to_desc(data);
28912f47073SThomas Gleixner 
29012f47073SThomas Gleixner 	irqd_set_move_pending(data);
29112f47073SThomas Gleixner 	irq_copy_pending(desc, dest);
29212f47073SThomas Gleixner 	return 0;
29312f47073SThomas Gleixner }
29412f47073SThomas Gleixner #else
29512f47073SThomas Gleixner static inline int irq_set_affinity_pending(struct irq_data *data,
29612f47073SThomas Gleixner 					   const struct cpumask *dest)
29712f47073SThomas Gleixner {
29812f47073SThomas Gleixner 	return -EBUSY;
29912f47073SThomas Gleixner }
30012f47073SThomas Gleixner #endif
30112f47073SThomas Gleixner 
30212f47073SThomas Gleixner static int irq_try_set_affinity(struct irq_data *data,
30312f47073SThomas Gleixner 				const struct cpumask *dest, bool force)
30412f47073SThomas Gleixner {
30512f47073SThomas Gleixner 	int ret = irq_do_set_affinity(data, dest, force);
30612f47073SThomas Gleixner 
30712f47073SThomas Gleixner 	/*
30812f47073SThomas Gleixner 	 * In case that the underlying vector management is busy and the
30912f47073SThomas Gleixner 	 * architecture supports the generic pending mechanism then utilize
31012f47073SThomas Gleixner 	 * this to avoid returning an error to user space.
31112f47073SThomas Gleixner 	 */
31212f47073SThomas Gleixner 	if (ret == -EBUSY && !force)
31312f47073SThomas Gleixner 		ret = irq_set_affinity_pending(data, dest);
31412f47073SThomas Gleixner 	return ret;
31512f47073SThomas Gleixner }
31612f47073SThomas Gleixner 
317baedb87dSThomas Gleixner static bool irq_set_affinity_deactivated(struct irq_data *data,
318baedb87dSThomas Gleixner 					 const struct cpumask *mask, bool force)
319baedb87dSThomas Gleixner {
320baedb87dSThomas Gleixner 	struct irq_desc *desc = irq_data_to_desc(data);
321baedb87dSThomas Gleixner 
322baedb87dSThomas Gleixner 	/*
323f0c7bacaSThomas Gleixner 	 * Handle irq chips which can handle affinity only in activated
324f0c7bacaSThomas Gleixner 	 * state correctly
325f0c7bacaSThomas Gleixner 	 *
326baedb87dSThomas Gleixner 	 * If the interrupt is not yet activated, just store the affinity
327baedb87dSThomas Gleixner 	 * mask and do not call the chip driver at all. On activation the
328baedb87dSThomas Gleixner 	 * driver has to make sure anyway that the interrupt is in a
329baedb87dSThomas Gleixner 	 * useable state so startup works.
330baedb87dSThomas Gleixner 	 */
331f0c7bacaSThomas Gleixner 	if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
332f0c7bacaSThomas Gleixner 	    irqd_is_activated(data) || !irqd_affinity_on_activate(data))
333baedb87dSThomas Gleixner 		return false;
334baedb87dSThomas Gleixner 
335baedb87dSThomas Gleixner 	cpumask_copy(desc->irq_common_data.affinity, mask);
336baedb87dSThomas Gleixner 	irq_init_effective_affinity(data, mask);
337baedb87dSThomas Gleixner 	irqd_set(data, IRQD_AFFINITY_SET);
338baedb87dSThomas Gleixner 	return true;
339baedb87dSThomas Gleixner }
340baedb87dSThomas Gleixner 
34101f8fa4fSThomas Gleixner int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
34201f8fa4fSThomas Gleixner 			    bool force)
343c2d0c555SDavid Daney {
344c2d0c555SDavid Daney 	struct irq_chip *chip = irq_data_get_irq_chip(data);
345c2d0c555SDavid Daney 	struct irq_desc *desc = irq_data_to_desc(data);
346c2d0c555SDavid Daney 	int ret = 0;
347c2d0c555SDavid Daney 
348c2d0c555SDavid Daney 	if (!chip || !chip->irq_set_affinity)
349c2d0c555SDavid Daney 		return -EINVAL;
350c2d0c555SDavid Daney 
351baedb87dSThomas Gleixner 	if (irq_set_affinity_deactivated(data, mask, force))
352baedb87dSThomas Gleixner 		return 0;
353baedb87dSThomas Gleixner 
35412f47073SThomas Gleixner 	if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
35512f47073SThomas Gleixner 		ret = irq_try_set_affinity(data, mask, force);
356c2d0c555SDavid Daney 	} else {
357c2d0c555SDavid Daney 		irqd_set_move_pending(data);
358c2d0c555SDavid Daney 		irq_copy_pending(desc, mask);
359c2d0c555SDavid Daney 	}
360c2d0c555SDavid Daney 
361c2d0c555SDavid Daney 	if (desc->affinity_notify) {
362c2d0c555SDavid Daney 		kref_get(&desc->affinity_notify->kref);
363df81dfcfSEdward Cree 		if (!schedule_work(&desc->affinity_notify->work)) {
364df81dfcfSEdward Cree 			/* Work was already scheduled, drop our extra ref */
365df81dfcfSEdward Cree 			kref_put(&desc->affinity_notify->kref,
366df81dfcfSEdward Cree 				 desc->affinity_notify->release);
367df81dfcfSEdward Cree 		}
368c2d0c555SDavid Daney 	}
369c2d0c555SDavid Daney 	irqd_set(data, IRQD_AFFINITY_SET);
370c2d0c555SDavid Daney 
371c2d0c555SDavid Daney 	return ret;
372c2d0c555SDavid Daney }
373c2d0c555SDavid Daney 
3741d3aec89SJohn Garry /**
3751d3aec89SJohn Garry  * irq_update_affinity_desc - Update affinity management for an interrupt
3761d3aec89SJohn Garry  * @irq:	The interrupt number to update
3771d3aec89SJohn Garry  * @affinity:	Pointer to the affinity descriptor
3781d3aec89SJohn Garry  *
3791d3aec89SJohn Garry  * This interface can be used to configure the affinity management of
3801d3aec89SJohn Garry  * interrupts which have been allocated already.
3811d3aec89SJohn Garry  *
3821d3aec89SJohn Garry  * There are certain limitations on when it may be used - attempts to use it
3831d3aec89SJohn Garry  * for when the kernel is configured for generic IRQ reservation mode (in
3841d3aec89SJohn Garry  * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
3851d3aec89SJohn Garry  * managed/non-managed interrupt accounting. In addition, attempts to use it on
3861d3aec89SJohn Garry  * an interrupt which is already started or which has already been configured
3871d3aec89SJohn Garry  * as managed will also fail, as these mean invalid init state or double init.
3881d3aec89SJohn Garry  */
3891d3aec89SJohn Garry int irq_update_affinity_desc(unsigned int irq,
3901d3aec89SJohn Garry 			     struct irq_affinity_desc *affinity)
3911d3aec89SJohn Garry {
3921d3aec89SJohn Garry 	struct irq_desc *desc;
3931d3aec89SJohn Garry 	unsigned long flags;
3941d3aec89SJohn Garry 	bool activated;
3951d3aec89SJohn Garry 	int ret = 0;
3961d3aec89SJohn Garry 
3971d3aec89SJohn Garry 	/*
3981d3aec89SJohn Garry 	 * Supporting this with the reservation scheme used by x86 needs
3991d3aec89SJohn Garry 	 * some more thought. Fail it for now.
4001d3aec89SJohn Garry 	 */
4011d3aec89SJohn Garry 	if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
4021d3aec89SJohn Garry 		return -EOPNOTSUPP;
4031d3aec89SJohn Garry 
4041d3aec89SJohn Garry 	desc = irq_get_desc_buslock(irq, &flags, 0);
4051d3aec89SJohn Garry 	if (!desc)
4061d3aec89SJohn Garry 		return -EINVAL;
4071d3aec89SJohn Garry 
4081d3aec89SJohn Garry 	/* Requires the interrupt to be shut down */
4091d3aec89SJohn Garry 	if (irqd_is_started(&desc->irq_data)) {
4101d3aec89SJohn Garry 		ret = -EBUSY;
4111d3aec89SJohn Garry 		goto out_unlock;
4121d3aec89SJohn Garry 	}
4131d3aec89SJohn Garry 
4141d3aec89SJohn Garry 	/* Interrupts which are already managed cannot be modified */
4151d3aec89SJohn Garry 	if (irqd_affinity_is_managed(&desc->irq_data)) {
4161d3aec89SJohn Garry 		ret = -EBUSY;
4171d3aec89SJohn Garry 		goto out_unlock;
4181d3aec89SJohn Garry 	}
4191d3aec89SJohn Garry 
4201d3aec89SJohn Garry 	/*
4211d3aec89SJohn Garry 	 * Deactivate the interrupt. That's required to undo
4221d3aec89SJohn Garry 	 * anything an earlier activation has established.
4231d3aec89SJohn Garry 	 */
4241d3aec89SJohn Garry 	activated = irqd_is_activated(&desc->irq_data);
4251d3aec89SJohn Garry 	if (activated)
4261d3aec89SJohn Garry 		irq_domain_deactivate_irq(&desc->irq_data);
4271d3aec89SJohn Garry 
4281d3aec89SJohn Garry 	if (affinity->is_managed) {
4291d3aec89SJohn Garry 		irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
4301d3aec89SJohn Garry 		irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
4311d3aec89SJohn Garry 	}
4321d3aec89SJohn Garry 
4331d3aec89SJohn Garry 	cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
4341d3aec89SJohn Garry 
4351d3aec89SJohn Garry 	/* Restore the activation state */
4361d3aec89SJohn Garry 	if (activated)
4371d3aec89SJohn Garry 		irq_domain_activate_irq(&desc->irq_data, false);
4381d3aec89SJohn Garry 
4391d3aec89SJohn Garry out_unlock:
4401d3aec89SJohn Garry 	irq_put_desc_busunlock(desc, flags);
4411d3aec89SJohn Garry 	return ret;
4421d3aec89SJohn Garry }
4431d3aec89SJohn Garry 
44401f8fa4fSThomas Gleixner int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
445771ee3b0SThomas Gleixner {
44608678b08SYinghai Lu 	struct irq_desc *desc = irq_to_desc(irq);
447f6d87f4bSThomas Gleixner 	unsigned long flags;
448c2d0c555SDavid Daney 	int ret;
449771ee3b0SThomas Gleixner 
450c2d0c555SDavid Daney 	if (!desc)
451771ee3b0SThomas Gleixner 		return -EINVAL;
452771ee3b0SThomas Gleixner 
453239007b8SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
45401f8fa4fSThomas Gleixner 	ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
455239007b8SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
4561fa46f1fSThomas Gleixner 	return ret;
457771ee3b0SThomas Gleixner }
458771ee3b0SThomas Gleixner 
459e7a297b0SPeter P Waskiewicz Jr int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
460e7a297b0SPeter P Waskiewicz Jr {
461e7a297b0SPeter P Waskiewicz Jr 	unsigned long flags;
46231d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
463e7a297b0SPeter P Waskiewicz Jr 
464e7a297b0SPeter P Waskiewicz Jr 	if (!desc)
465e7a297b0SPeter P Waskiewicz Jr 		return -EINVAL;
466e7a297b0SPeter P Waskiewicz Jr 	desc->affinity_hint = m;
46702725e74SThomas Gleixner 	irq_put_desc_unlock(desc, flags);
468e2e64a93SJesse Brandeburg 	/* set the initial affinity to prevent every interrupt being on CPU0 */
4694fe7ffb7SJesse Brandeburg 	if (m)
470e2e64a93SJesse Brandeburg 		__irq_set_affinity(irq, m, false);
471e7a297b0SPeter P Waskiewicz Jr 	return 0;
472e7a297b0SPeter P Waskiewicz Jr }
473e7a297b0SPeter P Waskiewicz Jr EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
474e7a297b0SPeter P Waskiewicz Jr 
475cd7eab44SBen Hutchings static void irq_affinity_notify(struct work_struct *work)
476cd7eab44SBen Hutchings {
477cd7eab44SBen Hutchings 	struct irq_affinity_notify *notify =
478cd7eab44SBen Hutchings 		container_of(work, struct irq_affinity_notify, work);
479cd7eab44SBen Hutchings 	struct irq_desc *desc = irq_to_desc(notify->irq);
480cd7eab44SBen Hutchings 	cpumask_var_t cpumask;
481cd7eab44SBen Hutchings 	unsigned long flags;
482cd7eab44SBen Hutchings 
4831fa46f1fSThomas Gleixner 	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
484cd7eab44SBen Hutchings 		goto out;
485cd7eab44SBen Hutchings 
486cd7eab44SBen Hutchings 	raw_spin_lock_irqsave(&desc->lock, flags);
4870ef5ca1eSThomas Gleixner 	if (irq_move_pending(&desc->irq_data))
4881fa46f1fSThomas Gleixner 		irq_get_pending(cpumask, desc);
489cd7eab44SBen Hutchings 	else
4909df872faSJiang Liu 		cpumask_copy(cpumask, desc->irq_common_data.affinity);
491cd7eab44SBen Hutchings 	raw_spin_unlock_irqrestore(&desc->lock, flags);
492cd7eab44SBen Hutchings 
493cd7eab44SBen Hutchings 	notify->notify(notify, cpumask);
494cd7eab44SBen Hutchings 
495cd7eab44SBen Hutchings 	free_cpumask_var(cpumask);
496cd7eab44SBen Hutchings out:
497cd7eab44SBen Hutchings 	kref_put(&notify->kref, notify->release);
498cd7eab44SBen Hutchings }
499cd7eab44SBen Hutchings 
500cd7eab44SBen Hutchings /**
501cd7eab44SBen Hutchings  *	irq_set_affinity_notifier - control notification of IRQ affinity changes
502cd7eab44SBen Hutchings  *	@irq:		Interrupt for which to enable/disable notification
503cd7eab44SBen Hutchings  *	@notify:	Context for notification, or %NULL to disable
504cd7eab44SBen Hutchings  *			notification.  Function pointers must be initialised;
505cd7eab44SBen Hutchings  *			the other fields will be initialised by this function.
506cd7eab44SBen Hutchings  *
507cd7eab44SBen Hutchings  *	Must be called in process context.  Notification may only be enabled
508cd7eab44SBen Hutchings  *	after the IRQ is allocated and must be disabled before the IRQ is
509cd7eab44SBen Hutchings  *	freed using free_irq().
510cd7eab44SBen Hutchings  */
511cd7eab44SBen Hutchings int
512cd7eab44SBen Hutchings irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
513cd7eab44SBen Hutchings {
514cd7eab44SBen Hutchings 	struct irq_desc *desc = irq_to_desc(irq);
515cd7eab44SBen Hutchings 	struct irq_affinity_notify *old_notify;
516cd7eab44SBen Hutchings 	unsigned long flags;
517cd7eab44SBen Hutchings 
518cd7eab44SBen Hutchings 	/* The release function is promised process context */
519cd7eab44SBen Hutchings 	might_sleep();
520cd7eab44SBen Hutchings 
521b525903cSJulien Thierry 	if (!desc || desc->istate & IRQS_NMI)
522cd7eab44SBen Hutchings 		return -EINVAL;
523cd7eab44SBen Hutchings 
524cd7eab44SBen Hutchings 	/* Complete initialisation of *notify */
525cd7eab44SBen Hutchings 	if (notify) {
526cd7eab44SBen Hutchings 		notify->irq = irq;
527cd7eab44SBen Hutchings 		kref_init(&notify->kref);
528cd7eab44SBen Hutchings 		INIT_WORK(&notify->work, irq_affinity_notify);
529cd7eab44SBen Hutchings 	}
530cd7eab44SBen Hutchings 
531cd7eab44SBen Hutchings 	raw_spin_lock_irqsave(&desc->lock, flags);
532cd7eab44SBen Hutchings 	old_notify = desc->affinity_notify;
533cd7eab44SBen Hutchings 	desc->affinity_notify = notify;
534cd7eab44SBen Hutchings 	raw_spin_unlock_irqrestore(&desc->lock, flags);
535cd7eab44SBen Hutchings 
53659c39840SPrasad Sodagudi 	if (old_notify) {
537df81dfcfSEdward Cree 		if (cancel_work_sync(&old_notify->work)) {
538df81dfcfSEdward Cree 			/* Pending work had a ref, put that one too */
539df81dfcfSEdward Cree 			kref_put(&old_notify->kref, old_notify->release);
540df81dfcfSEdward Cree 		}
541cd7eab44SBen Hutchings 		kref_put(&old_notify->kref, old_notify->release);
54259c39840SPrasad Sodagudi 	}
543cd7eab44SBen Hutchings 
544cd7eab44SBen Hutchings 	return 0;
545cd7eab44SBen Hutchings }
546cd7eab44SBen Hutchings EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
547cd7eab44SBen Hutchings 
54818404756SMax Krasnyansky #ifndef CONFIG_AUTO_IRQ_AFFINITY
54918404756SMax Krasnyansky /*
55018404756SMax Krasnyansky  * Generic version of the affinity autoselector.
55118404756SMax Krasnyansky  */
55243564bd9SThomas Gleixner int irq_setup_affinity(struct irq_desc *desc)
55318404756SMax Krasnyansky {
554569bda8dSThomas Gleixner 	struct cpumask *set = irq_default_affinity;
555cba4235eSThomas Gleixner 	int ret, node = irq_desc_get_node(desc);
556cba4235eSThomas Gleixner 	static DEFINE_RAW_SPINLOCK(mask_lock);
557cba4235eSThomas Gleixner 	static struct cpumask mask;
558569bda8dSThomas Gleixner 
559b008207cSThomas Gleixner 	/* Excludes PER_CPU and NO_BALANCE interrupts */
560e019c249SJiang Liu 	if (!__irq_can_set_affinity(desc))
56118404756SMax Krasnyansky 		return 0;
56218404756SMax Krasnyansky 
563cba4235eSThomas Gleixner 	raw_spin_lock(&mask_lock);
564f6d87f4bSThomas Gleixner 	/*
5659332ef9dSMasahiro Yamada 	 * Preserve the managed affinity setting and a userspace affinity
56606ee6d57SThomas Gleixner 	 * setup, but make sure that one of the targets is online.
567f6d87f4bSThomas Gleixner 	 */
56806ee6d57SThomas Gleixner 	if (irqd_affinity_is_managed(&desc->irq_data) ||
56906ee6d57SThomas Gleixner 	    irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
5709df872faSJiang Liu 		if (cpumask_intersects(desc->irq_common_data.affinity,
571569bda8dSThomas Gleixner 				       cpu_online_mask))
5729df872faSJiang Liu 			set = desc->irq_common_data.affinity;
5730c6f8a8bSThomas Gleixner 		else
5742bdd1055SThomas Gleixner 			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
5752bdd1055SThomas Gleixner 	}
57618404756SMax Krasnyansky 
577cba4235eSThomas Gleixner 	cpumask_and(&mask, cpu_online_mask, set);
578bddda606SSrinivas Ramana 	if (cpumask_empty(&mask))
579bddda606SSrinivas Ramana 		cpumask_copy(&mask, cpu_online_mask);
580bddda606SSrinivas Ramana 
581241fc640SPrarit Bhargava 	if (node != NUMA_NO_NODE) {
582241fc640SPrarit Bhargava 		const struct cpumask *nodemask = cpumask_of_node(node);
583241fc640SPrarit Bhargava 
584241fc640SPrarit Bhargava 		/* make sure at least one of the cpus in nodemask is online */
585cba4235eSThomas Gleixner 		if (cpumask_intersects(&mask, nodemask))
586cba4235eSThomas Gleixner 			cpumask_and(&mask, &mask, nodemask);
587241fc640SPrarit Bhargava 	}
588cba4235eSThomas Gleixner 	ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
589cba4235eSThomas Gleixner 	raw_spin_unlock(&mask_lock);
590cba4235eSThomas Gleixner 	return ret;
59118404756SMax Krasnyansky }
592f6d87f4bSThomas Gleixner #else
593a8a98eacSJiang Liu /* Wrapper for ALPHA specific affinity selector magic */
594cba4235eSThomas Gleixner int irq_setup_affinity(struct irq_desc *desc)
595f6d87f4bSThomas Gleixner {
596cba4235eSThomas Gleixner 	return irq_select_affinity(irq_desc_get_irq(desc));
597f6d87f4bSThomas Gleixner }
598cba6437aSThomas Gleixner #endif /* CONFIG_AUTO_IRQ_AFFINITY */
599cba6437aSThomas Gleixner #endif /* CONFIG_SMP */
60018404756SMax Krasnyansky 
6011da177e4SLinus Torvalds 
602fcf1ae2fSFeng Wu /**
603fcf1ae2fSFeng Wu  *	irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
604fcf1ae2fSFeng Wu  *	@irq: interrupt number to set affinity
605250a53d6SChristoffer Dall  *	@vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
606250a53d6SChristoffer Dall  *	            specific data for percpu_devid interrupts
607fcf1ae2fSFeng Wu  *
608fcf1ae2fSFeng Wu  *	This function uses the vCPU specific data to set the vCPU
609fcf1ae2fSFeng Wu  *	affinity for an irq. The vCPU specific data is passed from
610fcf1ae2fSFeng Wu  *	outside, such as KVM. One example code path is as below:
611fcf1ae2fSFeng Wu  *	KVM -> IOMMU -> irq_set_vcpu_affinity().
612fcf1ae2fSFeng Wu  */
613fcf1ae2fSFeng Wu int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
614fcf1ae2fSFeng Wu {
615fcf1ae2fSFeng Wu 	unsigned long flags;
616fcf1ae2fSFeng Wu 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
617fcf1ae2fSFeng Wu 	struct irq_data *data;
618fcf1ae2fSFeng Wu 	struct irq_chip *chip;
619fcf1ae2fSFeng Wu 	int ret = -ENOSYS;
620fcf1ae2fSFeng Wu 
621fcf1ae2fSFeng Wu 	if (!desc)
622fcf1ae2fSFeng Wu 		return -EINVAL;
623fcf1ae2fSFeng Wu 
624fcf1ae2fSFeng Wu 	data = irq_desc_get_irq_data(desc);
6250abce64aSMarc Zyngier 	do {
626fcf1ae2fSFeng Wu 		chip = irq_data_get_irq_chip(data);
627fcf1ae2fSFeng Wu 		if (chip && chip->irq_set_vcpu_affinity)
6280abce64aSMarc Zyngier 			break;
6290abce64aSMarc Zyngier #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
6300abce64aSMarc Zyngier 		data = data->parent_data;
6310abce64aSMarc Zyngier #else
6320abce64aSMarc Zyngier 		data = NULL;
6330abce64aSMarc Zyngier #endif
6340abce64aSMarc Zyngier 	} while (data);
6350abce64aSMarc Zyngier 
6360abce64aSMarc Zyngier 	if (data)
637fcf1ae2fSFeng Wu 		ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
638fcf1ae2fSFeng Wu 	irq_put_desc_unlock(desc, flags);
639fcf1ae2fSFeng Wu 
640fcf1ae2fSFeng Wu 	return ret;
641fcf1ae2fSFeng Wu }
642fcf1ae2fSFeng Wu EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
643fcf1ae2fSFeng Wu 
64479ff1cdaSJiang Liu void __disable_irq(struct irq_desc *desc)
6450a0c5168SRafael J. Wysocki {
6463aae994fSThomas Gleixner 	if (!desc->depth++)
64787923470SThomas Gleixner 		irq_disable(desc);
6480a0c5168SRafael J. Wysocki }
6490a0c5168SRafael J. Wysocki 
65002725e74SThomas Gleixner static int __disable_irq_nosync(unsigned int irq)
65102725e74SThomas Gleixner {
65202725e74SThomas Gleixner 	unsigned long flags;
65331d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
65402725e74SThomas Gleixner 
65502725e74SThomas Gleixner 	if (!desc)
65602725e74SThomas Gleixner 		return -EINVAL;
65779ff1cdaSJiang Liu 	__disable_irq(desc);
65802725e74SThomas Gleixner 	irq_put_desc_busunlock(desc, flags);
65902725e74SThomas Gleixner 	return 0;
66002725e74SThomas Gleixner }
66102725e74SThomas Gleixner 
6621da177e4SLinus Torvalds /**
6631da177e4SLinus Torvalds  *	disable_irq_nosync - disable an irq without waiting
6641da177e4SLinus Torvalds  *	@irq: Interrupt to disable
6651da177e4SLinus Torvalds  *
6661da177e4SLinus Torvalds  *	Disable the selected interrupt line.  Disables and Enables are
6671da177e4SLinus Torvalds  *	nested.
6681da177e4SLinus Torvalds  *	Unlike disable_irq(), this function does not ensure existing
6691da177e4SLinus Torvalds  *	instances of the IRQ handler have completed before returning.
6701da177e4SLinus Torvalds  *
6711da177e4SLinus Torvalds  *	This function may be called from IRQ context.
6721da177e4SLinus Torvalds  */
6731da177e4SLinus Torvalds void disable_irq_nosync(unsigned int irq)
6741da177e4SLinus Torvalds {
67502725e74SThomas Gleixner 	__disable_irq_nosync(irq);
6761da177e4SLinus Torvalds }
6771da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq_nosync);
6781da177e4SLinus Torvalds 
6791da177e4SLinus Torvalds /**
6801da177e4SLinus Torvalds  *	disable_irq - disable an irq and wait for completion
6811da177e4SLinus Torvalds  *	@irq: Interrupt to disable
6821da177e4SLinus Torvalds  *
6831da177e4SLinus Torvalds  *	Disable the selected interrupt line.  Enables and Disables are
6841da177e4SLinus Torvalds  *	nested.
6851da177e4SLinus Torvalds  *	This function waits for any pending IRQ handlers for this interrupt
6861da177e4SLinus Torvalds  *	to complete before returning. If you use this function while
6871da177e4SLinus Torvalds  *	holding a resource the IRQ handler may need you will deadlock.
6881da177e4SLinus Torvalds  *
6891da177e4SLinus Torvalds  *	This function may be called - with care - from IRQ context.
6901da177e4SLinus Torvalds  */
6911da177e4SLinus Torvalds void disable_irq(unsigned int irq)
6921da177e4SLinus Torvalds {
69302725e74SThomas Gleixner 	if (!__disable_irq_nosync(irq))
6941da177e4SLinus Torvalds 		synchronize_irq(irq);
6951da177e4SLinus Torvalds }
6961da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq);
6971da177e4SLinus Torvalds 
69802cea395SPeter Zijlstra /**
69902cea395SPeter Zijlstra  *	disable_hardirq - disables an irq and waits for hardirq completion
70002cea395SPeter Zijlstra  *	@irq: Interrupt to disable
70102cea395SPeter Zijlstra  *
70202cea395SPeter Zijlstra  *	Disable the selected interrupt line.  Enables and Disables are
70302cea395SPeter Zijlstra  *	nested.
70402cea395SPeter Zijlstra  *	This function waits for any pending hard IRQ handlers for this
70502cea395SPeter Zijlstra  *	interrupt to complete before returning. If you use this function while
70602cea395SPeter Zijlstra  *	holding a resource the hard IRQ handler may need you will deadlock.
70702cea395SPeter Zijlstra  *
70802cea395SPeter Zijlstra  *	When used to optimistically disable an interrupt from atomic context
70902cea395SPeter Zijlstra  *	the return value must be checked.
71002cea395SPeter Zijlstra  *
71102cea395SPeter Zijlstra  *	Returns: false if a threaded handler is active.
71202cea395SPeter Zijlstra  *
71302cea395SPeter Zijlstra  *	This function may be called - with care - from IRQ context.
71402cea395SPeter Zijlstra  */
71502cea395SPeter Zijlstra bool disable_hardirq(unsigned int irq)
71602cea395SPeter Zijlstra {
71702cea395SPeter Zijlstra 	if (!__disable_irq_nosync(irq))
71802cea395SPeter Zijlstra 		return synchronize_hardirq(irq);
71902cea395SPeter Zijlstra 
72002cea395SPeter Zijlstra 	return false;
72102cea395SPeter Zijlstra }
72202cea395SPeter Zijlstra EXPORT_SYMBOL_GPL(disable_hardirq);
72302cea395SPeter Zijlstra 
724b525903cSJulien Thierry /**
725b525903cSJulien Thierry  *	disable_nmi_nosync - disable an nmi without waiting
726b525903cSJulien Thierry  *	@irq: Interrupt to disable
727b525903cSJulien Thierry  *
728b525903cSJulien Thierry  *	Disable the selected interrupt line. Disables and enables are
729b525903cSJulien Thierry  *	nested.
730b525903cSJulien Thierry  *	The interrupt to disable must have been requested through request_nmi.
731b525903cSJulien Thierry  *	Unlike disable_nmi(), this function does not ensure existing
732b525903cSJulien Thierry  *	instances of the IRQ handler have completed before returning.
733b525903cSJulien Thierry  */
734b525903cSJulien Thierry void disable_nmi_nosync(unsigned int irq)
735b525903cSJulien Thierry {
736b525903cSJulien Thierry 	disable_irq_nosync(irq);
737b525903cSJulien Thierry }
738b525903cSJulien Thierry 
73979ff1cdaSJiang Liu void __enable_irq(struct irq_desc *desc)
7401adb0850SThomas Gleixner {
7411adb0850SThomas Gleixner 	switch (desc->depth) {
7421adb0850SThomas Gleixner 	case 0:
7430a0c5168SRafael J. Wysocki  err_out:
74479ff1cdaSJiang Liu 		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
74579ff1cdaSJiang Liu 		     irq_desc_get_irq(desc));
7461adb0850SThomas Gleixner 		break;
7471adb0850SThomas Gleixner 	case 1: {
748c531e836SThomas Gleixner 		if (desc->istate & IRQS_SUSPENDED)
7490a0c5168SRafael J. Wysocki 			goto err_out;
7501adb0850SThomas Gleixner 		/* Prevent probing on this irq: */
7511ccb4e61SThomas Gleixner 		irq_settings_set_noprobe(desc);
752201d7f47SThomas Gleixner 		/*
753201d7f47SThomas Gleixner 		 * Call irq_startup() not irq_enable() here because the
754201d7f47SThomas Gleixner 		 * interrupt might be marked NOAUTOEN. So irq_startup()
755201d7f47SThomas Gleixner 		 * needs to be invoked when it gets enabled the first
756201d7f47SThomas Gleixner 		 * time. If it was already started up, then irq_startup()
757201d7f47SThomas Gleixner 		 * will invoke irq_enable() under the hood.
758201d7f47SThomas Gleixner 		 */
759c942cee4SThomas Gleixner 		irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
760201d7f47SThomas Gleixner 		break;
7611adb0850SThomas Gleixner 	}
7621adb0850SThomas Gleixner 	default:
7631adb0850SThomas Gleixner 		desc->depth--;
7641adb0850SThomas Gleixner 	}
7651adb0850SThomas Gleixner }
7661adb0850SThomas Gleixner 
7671da177e4SLinus Torvalds /**
7681da177e4SLinus Torvalds  *	enable_irq - enable handling of an irq
7691da177e4SLinus Torvalds  *	@irq: Interrupt to enable
7701da177e4SLinus Torvalds  *
7711da177e4SLinus Torvalds  *	Undoes the effect of one call to disable_irq().  If this
7721da177e4SLinus Torvalds  *	matches the last disable, processing of interrupts on this
7731da177e4SLinus Torvalds  *	IRQ line is re-enabled.
7741da177e4SLinus Torvalds  *
77570aedd24SThomas Gleixner  *	This function may be called from IRQ context only when
7766b8ff312SThomas Gleixner  *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
7771da177e4SLinus Torvalds  */
7781da177e4SLinus Torvalds void enable_irq(unsigned int irq)
7791da177e4SLinus Torvalds {
7801da177e4SLinus Torvalds 	unsigned long flags;
78131d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
7821da177e4SLinus Torvalds 
7837d94f7caSYinghai Lu 	if (!desc)
784c2b5a251SMatthew Wilcox 		return;
78550f7c032SThomas Gleixner 	if (WARN(!desc->irq_data.chip,
7862656c366SThomas Gleixner 		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
78702725e74SThomas Gleixner 		goto out;
7882656c366SThomas Gleixner 
78979ff1cdaSJiang Liu 	__enable_irq(desc);
79002725e74SThomas Gleixner out:
79102725e74SThomas Gleixner 	irq_put_desc_busunlock(desc, flags);
7921da177e4SLinus Torvalds }
7931da177e4SLinus Torvalds EXPORT_SYMBOL(enable_irq);
7941da177e4SLinus Torvalds 
795b525903cSJulien Thierry /**
796b525903cSJulien Thierry  *	enable_nmi - enable handling of an nmi
797b525903cSJulien Thierry  *	@irq: Interrupt to enable
798b525903cSJulien Thierry  *
799b525903cSJulien Thierry  *	The interrupt to enable must have been requested through request_nmi.
800b525903cSJulien Thierry  *	Undoes the effect of one call to disable_nmi(). If this
801b525903cSJulien Thierry  *	matches the last disable, processing of interrupts on this
802b525903cSJulien Thierry  *	IRQ line is re-enabled.
803b525903cSJulien Thierry  */
804b525903cSJulien Thierry void enable_nmi(unsigned int irq)
805b525903cSJulien Thierry {
806b525903cSJulien Thierry 	enable_irq(irq);
807b525903cSJulien Thierry }
808b525903cSJulien Thierry 
8090c5d1eb7SDavid Brownell static int set_irq_wake_real(unsigned int irq, unsigned int on)
8102db87321SUwe Kleine-König {
81108678b08SYinghai Lu 	struct irq_desc *desc = irq_to_desc(irq);
8122db87321SUwe Kleine-König 	int ret = -ENXIO;
8132db87321SUwe Kleine-König 
81460f96b41SSantosh Shilimkar 	if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
81560f96b41SSantosh Shilimkar 		return 0;
81660f96b41SSantosh Shilimkar 
8172f7e99bbSThomas Gleixner 	if (desc->irq_data.chip->irq_set_wake)
8182f7e99bbSThomas Gleixner 		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
8192db87321SUwe Kleine-König 
8202db87321SUwe Kleine-König 	return ret;
8212db87321SUwe Kleine-König }
8222db87321SUwe Kleine-König 
823ba9a2331SThomas Gleixner /**
824a0cd9ca2SThomas Gleixner  *	irq_set_irq_wake - control irq power management wakeup
825ba9a2331SThomas Gleixner  *	@irq:	interrupt to control
826ba9a2331SThomas Gleixner  *	@on:	enable/disable power management wakeup
827ba9a2331SThomas Gleixner  *
82815a647ebSDavid Brownell  *	Enable/disable power management wakeup mode, which is
82915a647ebSDavid Brownell  *	disabled by default.  Enables and disables must match,
83015a647ebSDavid Brownell  *	just as they match for non-wakeup mode support.
83115a647ebSDavid Brownell  *
83215a647ebSDavid Brownell  *	Wakeup mode lets this IRQ wake the system from sleep
83315a647ebSDavid Brownell  *	states like "suspend to RAM".
834f9f21ceaSStephen Boyd  *
835f9f21ceaSStephen Boyd  *	Note: irq enable/disable state is completely orthogonal
836f9f21ceaSStephen Boyd  *	to the enable/disable state of irq wake. An irq can be
837f9f21ceaSStephen Boyd  *	disabled with disable_irq() and still wake the system as
838f9f21ceaSStephen Boyd  *	long as the irq has wake enabled. If this does not hold,
839f9f21ceaSStephen Boyd  *	then the underlying irq chip and the related driver need
840f9f21ceaSStephen Boyd  *	to be investigated.
841ba9a2331SThomas Gleixner  */
842a0cd9ca2SThomas Gleixner int irq_set_irq_wake(unsigned int irq, unsigned int on)
843ba9a2331SThomas Gleixner {
844ba9a2331SThomas Gleixner 	unsigned long flags;
84531d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
8462db87321SUwe Kleine-König 	int ret = 0;
847ba9a2331SThomas Gleixner 
84813863a66SJesper Juhl 	if (!desc)
84913863a66SJesper Juhl 		return -EINVAL;
85013863a66SJesper Juhl 
851b525903cSJulien Thierry 	/* Don't use NMIs as wake up interrupts please */
852b525903cSJulien Thierry 	if (desc->istate & IRQS_NMI) {
853b525903cSJulien Thierry 		ret = -EINVAL;
854b525903cSJulien Thierry 		goto out_unlock;
855b525903cSJulien Thierry 	}
856b525903cSJulien Thierry 
85715a647ebSDavid Brownell 	/* wakeup-capable irqs can be shared between drivers that
85815a647ebSDavid Brownell 	 * don't need to have the same sleep mode behaviors.
85915a647ebSDavid Brownell 	 */
86015a647ebSDavid Brownell 	if (on) {
8612db87321SUwe Kleine-König 		if (desc->wake_depth++ == 0) {
8622db87321SUwe Kleine-König 			ret = set_irq_wake_real(irq, on);
8632db87321SUwe Kleine-König 			if (ret)
8642db87321SUwe Kleine-König 				desc->wake_depth = 0;
86515a647ebSDavid Brownell 			else
8667f94226fSThomas Gleixner 				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
8672db87321SUwe Kleine-König 		}
86815a647ebSDavid Brownell 	} else {
86915a647ebSDavid Brownell 		if (desc->wake_depth == 0) {
8707a2c4770SArjan van de Ven 			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
8712db87321SUwe Kleine-König 		} else if (--desc->wake_depth == 0) {
8722db87321SUwe Kleine-König 			ret = set_irq_wake_real(irq, on);
8732db87321SUwe Kleine-König 			if (ret)
8742db87321SUwe Kleine-König 				desc->wake_depth = 1;
87515a647ebSDavid Brownell 			else
8767f94226fSThomas Gleixner 				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
87715a647ebSDavid Brownell 		}
8782db87321SUwe Kleine-König 	}
879b525903cSJulien Thierry 
880b525903cSJulien Thierry out_unlock:
88102725e74SThomas Gleixner 	irq_put_desc_busunlock(desc, flags);
882ba9a2331SThomas Gleixner 	return ret;
883ba9a2331SThomas Gleixner }
884a0cd9ca2SThomas Gleixner EXPORT_SYMBOL(irq_set_irq_wake);
885ba9a2331SThomas Gleixner 
8861da177e4SLinus Torvalds /*
8871da177e4SLinus Torvalds  * Internal function that tells the architecture code whether a
8881da177e4SLinus Torvalds  * particular irq has been exclusively allocated or is available
8891da177e4SLinus Torvalds  * for driver use.
8901da177e4SLinus Torvalds  */
8911da177e4SLinus Torvalds int can_request_irq(unsigned int irq, unsigned long irqflags)
8921da177e4SLinus Torvalds {
893cc8c3b78SThomas Gleixner 	unsigned long flags;
89431d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
89502725e74SThomas Gleixner 	int canrequest = 0;
8961da177e4SLinus Torvalds 
8977d94f7caSYinghai Lu 	if (!desc)
8987d94f7caSYinghai Lu 		return 0;
8997d94f7caSYinghai Lu 
90002725e74SThomas Gleixner 	if (irq_settings_can_request(desc)) {
9012779db8dSBen Hutchings 		if (!desc->action ||
9022779db8dSBen Hutchings 		    irqflags & desc->action->flags & IRQF_SHARED)
90302725e74SThomas Gleixner 			canrequest = 1;
90402725e74SThomas Gleixner 	}
90502725e74SThomas Gleixner 	irq_put_desc_unlock(desc, flags);
90602725e74SThomas Gleixner 	return canrequest;
9071da177e4SLinus Torvalds }
9081da177e4SLinus Torvalds 
909a1ff541aSJiang Liu int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
91082736f4dSUwe Kleine-König {
9116b8ff312SThomas Gleixner 	struct irq_chip *chip = desc->irq_data.chip;
912d4d5e089SThomas Gleixner 	int ret, unmask = 0;
91382736f4dSUwe Kleine-König 
914b2ba2c30SThomas Gleixner 	if (!chip || !chip->irq_set_type) {
91582736f4dSUwe Kleine-König 		/*
91682736f4dSUwe Kleine-König 		 * IRQF_TRIGGER_* but the PIC does not support multiple
91782736f4dSUwe Kleine-König 		 * flow-types?
91882736f4dSUwe Kleine-König 		 */
919a1ff541aSJiang Liu 		pr_debug("No set_type function for IRQ %d (%s)\n",
920a1ff541aSJiang Liu 			 irq_desc_get_irq(desc),
92182736f4dSUwe Kleine-König 			 chip ? (chip->name ? : "unknown") : "unknown");
92282736f4dSUwe Kleine-König 		return 0;
92382736f4dSUwe Kleine-König 	}
92482736f4dSUwe Kleine-König 
925d4d5e089SThomas Gleixner 	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
92632f4125eSThomas Gleixner 		if (!irqd_irq_masked(&desc->irq_data))
927d4d5e089SThomas Gleixner 			mask_irq(desc);
92832f4125eSThomas Gleixner 		if (!irqd_irq_disabled(&desc->irq_data))
929d4d5e089SThomas Gleixner 			unmask = 1;
930d4d5e089SThomas Gleixner 	}
931d4d5e089SThomas Gleixner 
93200b992deSAlexander Kuleshov 	/* Mask all flags except trigger mode */
93300b992deSAlexander Kuleshov 	flags &= IRQ_TYPE_SENSE_MASK;
934b2ba2c30SThomas Gleixner 	ret = chip->irq_set_type(&desc->irq_data, flags);
93582736f4dSUwe Kleine-König 
936876dbd4cSThomas Gleixner 	switch (ret) {
937876dbd4cSThomas Gleixner 	case IRQ_SET_MASK_OK:
9382cb62547SJiang Liu 	case IRQ_SET_MASK_OK_DONE:
939876dbd4cSThomas Gleixner 		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
940876dbd4cSThomas Gleixner 		irqd_set(&desc->irq_data, flags);
941df561f66SGustavo A. R. Silva 		fallthrough;
942876dbd4cSThomas Gleixner 
943876dbd4cSThomas Gleixner 	case IRQ_SET_MASK_OK_NOCOPY:
944876dbd4cSThomas Gleixner 		flags = irqd_get_trigger_type(&desc->irq_data);
945876dbd4cSThomas Gleixner 		irq_settings_set_trigger_mask(desc, flags);
946876dbd4cSThomas Gleixner 		irqd_clear(&desc->irq_data, IRQD_LEVEL);
947876dbd4cSThomas Gleixner 		irq_settings_clr_level(desc);
948876dbd4cSThomas Gleixner 		if (flags & IRQ_TYPE_LEVEL_MASK) {
949876dbd4cSThomas Gleixner 			irq_settings_set_level(desc);
950876dbd4cSThomas Gleixner 			irqd_set(&desc->irq_data, IRQD_LEVEL);
951876dbd4cSThomas Gleixner 		}
95246732475SThomas Gleixner 
953d4d5e089SThomas Gleixner 		ret = 0;
9548fff39e0SThomas Gleixner 		break;
955876dbd4cSThomas Gleixner 	default:
956d75f773cSSakari Ailus 		pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
957a1ff541aSJiang Liu 		       flags, irq_desc_get_irq(desc), chip->irq_set_type);
9580c5d1eb7SDavid Brownell 	}
959d4d5e089SThomas Gleixner 	if (unmask)
960d4d5e089SThomas Gleixner 		unmask_irq(desc);
96182736f4dSUwe Kleine-König 	return ret;
96282736f4dSUwe Kleine-König }
96382736f4dSUwe Kleine-König 
964293a7a0aSThomas Gleixner #ifdef CONFIG_HARDIRQS_SW_RESEND
965293a7a0aSThomas Gleixner int irq_set_parent(int irq, int parent_irq)
966293a7a0aSThomas Gleixner {
967293a7a0aSThomas Gleixner 	unsigned long flags;
968293a7a0aSThomas Gleixner 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
969293a7a0aSThomas Gleixner 
970293a7a0aSThomas Gleixner 	if (!desc)
971293a7a0aSThomas Gleixner 		return -EINVAL;
972293a7a0aSThomas Gleixner 
973293a7a0aSThomas Gleixner 	desc->parent_irq = parent_irq;
974293a7a0aSThomas Gleixner 
975293a7a0aSThomas Gleixner 	irq_put_desc_unlock(desc, flags);
976293a7a0aSThomas Gleixner 	return 0;
977293a7a0aSThomas Gleixner }
9783118dac5SSudip Mukherjee EXPORT_SYMBOL_GPL(irq_set_parent);
979293a7a0aSThomas Gleixner #endif
980293a7a0aSThomas Gleixner 
981b25c340cSThomas Gleixner /*
982b25c340cSThomas Gleixner  * Default primary interrupt handler for threaded interrupts. Is
983b25c340cSThomas Gleixner  * assigned as primary handler when request_threaded_irq is called
984b25c340cSThomas Gleixner  * with handler == NULL. Useful for oneshot interrupts.
985b25c340cSThomas Gleixner  */
986b25c340cSThomas Gleixner static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
987b25c340cSThomas Gleixner {
988b25c340cSThomas Gleixner 	return IRQ_WAKE_THREAD;
989b25c340cSThomas Gleixner }
990b25c340cSThomas Gleixner 
991399b5da2SThomas Gleixner /*
992399b5da2SThomas Gleixner  * Primary handler for nested threaded interrupts. Should never be
993399b5da2SThomas Gleixner  * called.
994399b5da2SThomas Gleixner  */
995399b5da2SThomas Gleixner static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
996399b5da2SThomas Gleixner {
997399b5da2SThomas Gleixner 	WARN(1, "Primary handler called for nested irq %d\n", irq);
998399b5da2SThomas Gleixner 	return IRQ_NONE;
999399b5da2SThomas Gleixner }
1000399b5da2SThomas Gleixner 
10012a1d3ab8SThomas Gleixner static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
10022a1d3ab8SThomas Gleixner {
10032a1d3ab8SThomas Gleixner 	WARN(1, "Secondary action handler called for irq %d\n", irq);
10042a1d3ab8SThomas Gleixner 	return IRQ_NONE;
10052a1d3ab8SThomas Gleixner }
10062a1d3ab8SThomas Gleixner 
10073aa551c9SThomas Gleixner static int irq_wait_for_interrupt(struct irqaction *action)
10083aa551c9SThomas Gleixner {
1009519cc865SLukas Wunner 	for (;;) {
10103aa551c9SThomas Gleixner 		set_current_state(TASK_INTERRUPTIBLE);
1011f48fe81eSThomas Gleixner 
1012519cc865SLukas Wunner 		if (kthread_should_stop()) {
1013519cc865SLukas Wunner 			/* may need to run one last time */
1014519cc865SLukas Wunner 			if (test_and_clear_bit(IRQTF_RUNTHREAD,
1015519cc865SLukas Wunner 					       &action->thread_flags)) {
1016519cc865SLukas Wunner 				__set_current_state(TASK_RUNNING);
1017519cc865SLukas Wunner 				return 0;
1018519cc865SLukas Wunner 			}
1019519cc865SLukas Wunner 			__set_current_state(TASK_RUNNING);
1020519cc865SLukas Wunner 			return -1;
1021519cc865SLukas Wunner 		}
1022550acb19SIdo Yariv 
1023f48fe81eSThomas Gleixner 		if (test_and_clear_bit(IRQTF_RUNTHREAD,
1024f48fe81eSThomas Gleixner 				       &action->thread_flags)) {
10253aa551c9SThomas Gleixner 			__set_current_state(TASK_RUNNING);
10263aa551c9SThomas Gleixner 			return 0;
1027f48fe81eSThomas Gleixner 		}
10283aa551c9SThomas Gleixner 		schedule();
10293aa551c9SThomas Gleixner 	}
10303aa551c9SThomas Gleixner }
10313aa551c9SThomas Gleixner 
1032b25c340cSThomas Gleixner /*
1033b25c340cSThomas Gleixner  * Oneshot interrupts keep the irq line masked until the threaded
1034b25c340cSThomas Gleixner  * handler finished. unmask if the interrupt has not been disabled and
1035b25c340cSThomas Gleixner  * is marked MASKED.
1036b25c340cSThomas Gleixner  */
1037b5faba21SThomas Gleixner static void irq_finalize_oneshot(struct irq_desc *desc,
1038f3f79e38SAlexander Gordeev 				 struct irqaction *action)
1039b25c340cSThomas Gleixner {
10402a1d3ab8SThomas Gleixner 	if (!(desc->istate & IRQS_ONESHOT) ||
10412a1d3ab8SThomas Gleixner 	    action->handler == irq_forced_secondary_handler)
1042b5faba21SThomas Gleixner 		return;
10430b1adaa0SThomas Gleixner again:
10443876ec9eSThomas Gleixner 	chip_bus_lock(desc);
1045239007b8SThomas Gleixner 	raw_spin_lock_irq(&desc->lock);
10460b1adaa0SThomas Gleixner 
10470b1adaa0SThomas Gleixner 	/*
10480b1adaa0SThomas Gleixner 	 * Implausible though it may be we need to protect us against
10490b1adaa0SThomas Gleixner 	 * the following scenario:
10500b1adaa0SThomas Gleixner 	 *
10510b1adaa0SThomas Gleixner 	 * The thread is faster done than the hard interrupt handler
10520b1adaa0SThomas Gleixner 	 * on the other CPU. If we unmask the irq line then the
10530b1adaa0SThomas Gleixner 	 * interrupt can come in again and masks the line, leaves due
1054009b4c3bSThomas Gleixner 	 * to IRQS_INPROGRESS and the irq line is masked forever.
1055b5faba21SThomas Gleixner 	 *
1056b5faba21SThomas Gleixner 	 * This also serializes the state of shared oneshot handlers
1057b5faba21SThomas Gleixner 	 * versus "desc->threads_onehsot |= action->thread_mask;" in
1058b5faba21SThomas Gleixner 	 * irq_wake_thread(). See the comment there which explains the
1059b5faba21SThomas Gleixner 	 * serialization.
10600b1adaa0SThomas Gleixner 	 */
106132f4125eSThomas Gleixner 	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
10620b1adaa0SThomas Gleixner 		raw_spin_unlock_irq(&desc->lock);
10633876ec9eSThomas Gleixner 		chip_bus_sync_unlock(desc);
10640b1adaa0SThomas Gleixner 		cpu_relax();
10650b1adaa0SThomas Gleixner 		goto again;
10660b1adaa0SThomas Gleixner 	}
10670b1adaa0SThomas Gleixner 
1068b5faba21SThomas Gleixner 	/*
1069b5faba21SThomas Gleixner 	 * Now check again, whether the thread should run. Otherwise
1070b5faba21SThomas Gleixner 	 * we would clear the threads_oneshot bit of this thread which
1071b5faba21SThomas Gleixner 	 * was just set.
1072b5faba21SThomas Gleixner 	 */
1073f3f79e38SAlexander Gordeev 	if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1074b5faba21SThomas Gleixner 		goto out_unlock;
1075b5faba21SThomas Gleixner 
1076b5faba21SThomas Gleixner 	desc->threads_oneshot &= ~action->thread_mask;
1077b5faba21SThomas Gleixner 
107832f4125eSThomas Gleixner 	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
107932f4125eSThomas Gleixner 	    irqd_irq_masked(&desc->irq_data))
1080328a4978SThomas Gleixner 		unmask_threaded_irq(desc);
108132f4125eSThomas Gleixner 
1082b5faba21SThomas Gleixner out_unlock:
1083239007b8SThomas Gleixner 	raw_spin_unlock_irq(&desc->lock);
10843876ec9eSThomas Gleixner 	chip_bus_sync_unlock(desc);
1085b25c340cSThomas Gleixner }
1086b25c340cSThomas Gleixner 
108761f38261SBruno Premont #ifdef CONFIG_SMP
10883aa551c9SThomas Gleixner /*
1089b04c644eSChuansheng Liu  * Check whether we need to change the affinity of the interrupt thread.
1090591d2fb0SThomas Gleixner  */
1091591d2fb0SThomas Gleixner static void
1092591d2fb0SThomas Gleixner irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1093591d2fb0SThomas Gleixner {
1094591d2fb0SThomas Gleixner 	cpumask_var_t mask;
109504aa530eSThomas Gleixner 	bool valid = true;
1096591d2fb0SThomas Gleixner 
1097591d2fb0SThomas Gleixner 	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1098591d2fb0SThomas Gleixner 		return;
1099591d2fb0SThomas Gleixner 
1100591d2fb0SThomas Gleixner 	/*
1101591d2fb0SThomas Gleixner 	 * In case we are out of memory we set IRQTF_AFFINITY again and
1102591d2fb0SThomas Gleixner 	 * try again next time
1103591d2fb0SThomas Gleixner 	 */
1104591d2fb0SThomas Gleixner 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1105591d2fb0SThomas Gleixner 		set_bit(IRQTF_AFFINITY, &action->thread_flags);
1106591d2fb0SThomas Gleixner 		return;
1107591d2fb0SThomas Gleixner 	}
1108591d2fb0SThomas Gleixner 
1109239007b8SThomas Gleixner 	raw_spin_lock_irq(&desc->lock);
111004aa530eSThomas Gleixner 	/*
111104aa530eSThomas Gleixner 	 * This code is triggered unconditionally. Check the affinity
111204aa530eSThomas Gleixner 	 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
111304aa530eSThomas Gleixner 	 */
1114cbf86999SThomas Gleixner 	if (cpumask_available(desc->irq_common_data.affinity)) {
1115cbf86999SThomas Gleixner 		const struct cpumask *m;
1116cbf86999SThomas Gleixner 
1117cbf86999SThomas Gleixner 		m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1118cbf86999SThomas Gleixner 		cpumask_copy(mask, m);
1119cbf86999SThomas Gleixner 	} else {
112004aa530eSThomas Gleixner 		valid = false;
1121cbf86999SThomas Gleixner 	}
1122239007b8SThomas Gleixner 	raw_spin_unlock_irq(&desc->lock);
1123591d2fb0SThomas Gleixner 
112404aa530eSThomas Gleixner 	if (valid)
1125591d2fb0SThomas Gleixner 		set_cpus_allowed_ptr(current, mask);
1126591d2fb0SThomas Gleixner 	free_cpumask_var(mask);
1127591d2fb0SThomas Gleixner }
112861f38261SBruno Premont #else
112961f38261SBruno Premont static inline void
113061f38261SBruno Premont irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
113161f38261SBruno Premont #endif
1132591d2fb0SThomas Gleixner 
1133591d2fb0SThomas Gleixner /*
1134c5f48c0aSIngo Molnar  * Interrupts which are not explicitly requested as threaded
11358d32a307SThomas Gleixner  * interrupts rely on the implicit bh/preempt disable of the hard irq
11368d32a307SThomas Gleixner  * context. So we need to disable bh here to avoid deadlocks and other
11378d32a307SThomas Gleixner  * side effects.
11388d32a307SThomas Gleixner  */
11393a43e05fSSebastian Andrzej Siewior static irqreturn_t
11408d32a307SThomas Gleixner irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
11418d32a307SThomas Gleixner {
11423a43e05fSSebastian Andrzej Siewior 	irqreturn_t ret;
11433a43e05fSSebastian Andrzej Siewior 
11448d32a307SThomas Gleixner 	local_bh_disable();
11453a43e05fSSebastian Andrzej Siewior 	ret = action->thread_fn(action->irq, action->dev_id);
1146746a923bSLukas Wunner 	if (ret == IRQ_HANDLED)
1147746a923bSLukas Wunner 		atomic_inc(&desc->threads_handled);
1148746a923bSLukas Wunner 
1149f3f79e38SAlexander Gordeev 	irq_finalize_oneshot(desc, action);
11508d32a307SThomas Gleixner 	local_bh_enable();
11513a43e05fSSebastian Andrzej Siewior 	return ret;
11528d32a307SThomas Gleixner }
11538d32a307SThomas Gleixner 
11548d32a307SThomas Gleixner /*
1155f788e7bfSXie XiuQi  * Interrupts explicitly requested as threaded interrupts want to be
1156*5c982c58SKrzysztof Kozlowski  * preemptible - many of them need to sleep and wait for slow busses to
11578d32a307SThomas Gleixner  * complete.
11588d32a307SThomas Gleixner  */
11593a43e05fSSebastian Andrzej Siewior static irqreturn_t irq_thread_fn(struct irq_desc *desc,
11603a43e05fSSebastian Andrzej Siewior 		struct irqaction *action)
11618d32a307SThomas Gleixner {
11623a43e05fSSebastian Andrzej Siewior 	irqreturn_t ret;
11633a43e05fSSebastian Andrzej Siewior 
11643a43e05fSSebastian Andrzej Siewior 	ret = action->thread_fn(action->irq, action->dev_id);
1165746a923bSLukas Wunner 	if (ret == IRQ_HANDLED)
1166746a923bSLukas Wunner 		atomic_inc(&desc->threads_handled);
1167746a923bSLukas Wunner 
1168f3f79e38SAlexander Gordeev 	irq_finalize_oneshot(desc, action);
11693a43e05fSSebastian Andrzej Siewior 	return ret;
11708d32a307SThomas Gleixner }
11718d32a307SThomas Gleixner 
11727140ea19SIdo Yariv static void wake_threads_waitq(struct irq_desc *desc)
11737140ea19SIdo Yariv {
1174c685689fSChuansheng Liu 	if (atomic_dec_and_test(&desc->threads_active))
11757140ea19SIdo Yariv 		wake_up(&desc->wait_for_threads);
11767140ea19SIdo Yariv }
11777140ea19SIdo Yariv 
117867d12145SAl Viro static void irq_thread_dtor(struct callback_head *unused)
11794d1d61a6SOleg Nesterov {
11804d1d61a6SOleg Nesterov 	struct task_struct *tsk = current;
11814d1d61a6SOleg Nesterov 	struct irq_desc *desc;
11824d1d61a6SOleg Nesterov 	struct irqaction *action;
11834d1d61a6SOleg Nesterov 
11844d1d61a6SOleg Nesterov 	if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
11854d1d61a6SOleg Nesterov 		return;
11864d1d61a6SOleg Nesterov 
11874d1d61a6SOleg Nesterov 	action = kthread_data(tsk);
11884d1d61a6SOleg Nesterov 
1189fb21affaSLinus Torvalds 	pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
119019af395dSAlan Cox 	       tsk->comm, tsk->pid, action->irq);
11914d1d61a6SOleg Nesterov 
11924d1d61a6SOleg Nesterov 
11934d1d61a6SOleg Nesterov 	desc = irq_to_desc(action->irq);
11944d1d61a6SOleg Nesterov 	/*
11954d1d61a6SOleg Nesterov 	 * If IRQTF_RUNTHREAD is set, we need to decrement
11964d1d61a6SOleg Nesterov 	 * desc->threads_active and wake possible waiters.
11974d1d61a6SOleg Nesterov 	 */
11984d1d61a6SOleg Nesterov 	if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
11994d1d61a6SOleg Nesterov 		wake_threads_waitq(desc);
12004d1d61a6SOleg Nesterov 
12014d1d61a6SOleg Nesterov 	/* Prevent a stale desc->threads_oneshot */
12024d1d61a6SOleg Nesterov 	irq_finalize_oneshot(desc, action);
12034d1d61a6SOleg Nesterov }
12044d1d61a6SOleg Nesterov 
12052a1d3ab8SThomas Gleixner static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
12062a1d3ab8SThomas Gleixner {
12072a1d3ab8SThomas Gleixner 	struct irqaction *secondary = action->secondary;
12082a1d3ab8SThomas Gleixner 
12092a1d3ab8SThomas Gleixner 	if (WARN_ON_ONCE(!secondary))
12102a1d3ab8SThomas Gleixner 		return;
12112a1d3ab8SThomas Gleixner 
12122a1d3ab8SThomas Gleixner 	raw_spin_lock_irq(&desc->lock);
12132a1d3ab8SThomas Gleixner 	__irq_wake_thread(desc, secondary);
12142a1d3ab8SThomas Gleixner 	raw_spin_unlock_irq(&desc->lock);
12152a1d3ab8SThomas Gleixner }
12162a1d3ab8SThomas Gleixner 
12178d32a307SThomas Gleixner /*
12183aa551c9SThomas Gleixner  * Interrupt handler thread
12193aa551c9SThomas Gleixner  */
12203aa551c9SThomas Gleixner static int irq_thread(void *data)
12213aa551c9SThomas Gleixner {
122267d12145SAl Viro 	struct callback_head on_exit_work;
12233aa551c9SThomas Gleixner 	struct irqaction *action = data;
12243aa551c9SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(action->irq);
12253a43e05fSSebastian Andrzej Siewior 	irqreturn_t (*handler_fn)(struct irq_desc *desc,
12263a43e05fSSebastian Andrzej Siewior 			struct irqaction *action);
12273aa551c9SThomas Gleixner 
1228540b60e2SAlexander Gordeev 	if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
12298d32a307SThomas Gleixner 					&action->thread_flags))
12308d32a307SThomas Gleixner 		handler_fn = irq_forced_thread_fn;
12318d32a307SThomas Gleixner 	else
12328d32a307SThomas Gleixner 		handler_fn = irq_thread_fn;
12338d32a307SThomas Gleixner 
123441f9d29fSAl Viro 	init_task_work(&on_exit_work, irq_thread_dtor);
123591989c70SJens Axboe 	task_work_add(current, &on_exit_work, TWA_NONE);
12363aa551c9SThomas Gleixner 
1237f3de44edSSankara Muthukrishnan 	irq_thread_check_affinity(desc, action);
1238f3de44edSSankara Muthukrishnan 
12393aa551c9SThomas Gleixner 	while (!irq_wait_for_interrupt(action)) {
12407140ea19SIdo Yariv 		irqreturn_t action_ret;
12413aa551c9SThomas Gleixner 
1242591d2fb0SThomas Gleixner 		irq_thread_check_affinity(desc, action);
1243591d2fb0SThomas Gleixner 
12443a43e05fSSebastian Andrzej Siewior 		action_ret = handler_fn(desc, action);
12452a1d3ab8SThomas Gleixner 		if (action_ret == IRQ_WAKE_THREAD)
12462a1d3ab8SThomas Gleixner 			irq_wake_secondary(desc, action);
12477140ea19SIdo Yariv 
12487140ea19SIdo Yariv 		wake_threads_waitq(desc);
12493aa551c9SThomas Gleixner 	}
12503aa551c9SThomas Gleixner 
12517140ea19SIdo Yariv 	/*
12527140ea19SIdo Yariv 	 * This is the regular exit path. __free_irq() is stopping the
12537140ea19SIdo Yariv 	 * thread via kthread_stop() after calling
1254519cc865SLukas Wunner 	 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1255836557bdSLukas Wunner 	 * oneshot mask bit can be set.
12563aa551c9SThomas Gleixner 	 */
12574d1d61a6SOleg Nesterov 	task_work_cancel(current, irq_thread_dtor);
12583aa551c9SThomas Gleixner 	return 0;
12593aa551c9SThomas Gleixner }
12603aa551c9SThomas Gleixner 
1261a92444c6SThomas Gleixner /**
1262a92444c6SThomas Gleixner  *	irq_wake_thread - wake the irq thread for the action identified by dev_id
1263a92444c6SThomas Gleixner  *	@irq:		Interrupt line
1264a92444c6SThomas Gleixner  *	@dev_id:	Device identity for which the thread should be woken
1265a92444c6SThomas Gleixner  *
1266a92444c6SThomas Gleixner  */
1267a92444c6SThomas Gleixner void irq_wake_thread(unsigned int irq, void *dev_id)
1268a92444c6SThomas Gleixner {
1269a92444c6SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
1270a92444c6SThomas Gleixner 	struct irqaction *action;
1271a92444c6SThomas Gleixner 	unsigned long flags;
1272a92444c6SThomas Gleixner 
1273a92444c6SThomas Gleixner 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1274a92444c6SThomas Gleixner 		return;
1275a92444c6SThomas Gleixner 
1276a92444c6SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
1277f944b5a7SDaniel Lezcano 	for_each_action_of_desc(desc, action) {
1278a92444c6SThomas Gleixner 		if (action->dev_id == dev_id) {
1279a92444c6SThomas Gleixner 			if (action->thread)
1280a92444c6SThomas Gleixner 				__irq_wake_thread(desc, action);
1281a92444c6SThomas Gleixner 			break;
1282a92444c6SThomas Gleixner 		}
1283a92444c6SThomas Gleixner 	}
1284a92444c6SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1285a92444c6SThomas Gleixner }
1286a92444c6SThomas Gleixner EXPORT_SYMBOL_GPL(irq_wake_thread);
1287a92444c6SThomas Gleixner 
12882a1d3ab8SThomas Gleixner static int irq_setup_forced_threading(struct irqaction *new)
12898d32a307SThomas Gleixner {
12908d32a307SThomas Gleixner 	if (!force_irqthreads)
12912a1d3ab8SThomas Gleixner 		return 0;
12928d32a307SThomas Gleixner 	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
12932a1d3ab8SThomas Gleixner 		return 0;
12948d32a307SThomas Gleixner 
1295d1f0301bSThomas Gleixner 	/*
1296d1f0301bSThomas Gleixner 	 * No further action required for interrupts which are requested as
1297d1f0301bSThomas Gleixner 	 * threaded interrupts already
1298d1f0301bSThomas Gleixner 	 */
1299d1f0301bSThomas Gleixner 	if (new->handler == irq_default_primary_handler)
1300d1f0301bSThomas Gleixner 		return 0;
1301d1f0301bSThomas Gleixner 
13028d32a307SThomas Gleixner 	new->flags |= IRQF_ONESHOT;
13038d32a307SThomas Gleixner 
13042a1d3ab8SThomas Gleixner 	/*
13052a1d3ab8SThomas Gleixner 	 * Handle the case where we have a real primary handler and a
13062a1d3ab8SThomas Gleixner 	 * thread handler. We force thread them as well by creating a
13072a1d3ab8SThomas Gleixner 	 * secondary action.
13082a1d3ab8SThomas Gleixner 	 */
1309d1f0301bSThomas Gleixner 	if (new->handler && new->thread_fn) {
13102a1d3ab8SThomas Gleixner 		/* Allocate the secondary action */
13112a1d3ab8SThomas Gleixner 		new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
13122a1d3ab8SThomas Gleixner 		if (!new->secondary)
13132a1d3ab8SThomas Gleixner 			return -ENOMEM;
13142a1d3ab8SThomas Gleixner 		new->secondary->handler = irq_forced_secondary_handler;
13152a1d3ab8SThomas Gleixner 		new->secondary->thread_fn = new->thread_fn;
13162a1d3ab8SThomas Gleixner 		new->secondary->dev_id = new->dev_id;
13172a1d3ab8SThomas Gleixner 		new->secondary->irq = new->irq;
13182a1d3ab8SThomas Gleixner 		new->secondary->name = new->name;
13192a1d3ab8SThomas Gleixner 	}
13202a1d3ab8SThomas Gleixner 	/* Deal with the primary handler */
13218d32a307SThomas Gleixner 	set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
13228d32a307SThomas Gleixner 	new->thread_fn = new->handler;
13238d32a307SThomas Gleixner 	new->handler = irq_default_primary_handler;
13242a1d3ab8SThomas Gleixner 	return 0;
13258d32a307SThomas Gleixner }
13268d32a307SThomas Gleixner 
1327c1bacbaeSThomas Gleixner static int irq_request_resources(struct irq_desc *desc)
1328c1bacbaeSThomas Gleixner {
1329c1bacbaeSThomas Gleixner 	struct irq_data *d = &desc->irq_data;
1330c1bacbaeSThomas Gleixner 	struct irq_chip *c = d->chip;
1331c1bacbaeSThomas Gleixner 
1332c1bacbaeSThomas Gleixner 	return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1333c1bacbaeSThomas Gleixner }
1334c1bacbaeSThomas Gleixner 
1335c1bacbaeSThomas Gleixner static void irq_release_resources(struct irq_desc *desc)
1336c1bacbaeSThomas Gleixner {
1337c1bacbaeSThomas Gleixner 	struct irq_data *d = &desc->irq_data;
1338c1bacbaeSThomas Gleixner 	struct irq_chip *c = d->chip;
1339c1bacbaeSThomas Gleixner 
1340c1bacbaeSThomas Gleixner 	if (c->irq_release_resources)
1341c1bacbaeSThomas Gleixner 		c->irq_release_resources(d);
1342c1bacbaeSThomas Gleixner }
1343c1bacbaeSThomas Gleixner 
1344b525903cSJulien Thierry static bool irq_supports_nmi(struct irq_desc *desc)
1345b525903cSJulien Thierry {
1346b525903cSJulien Thierry 	struct irq_data *d = irq_desc_get_irq_data(desc);
1347b525903cSJulien Thierry 
1348b525903cSJulien Thierry #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1349b525903cSJulien Thierry 	/* Only IRQs directly managed by the root irqchip can be set as NMI */
1350b525903cSJulien Thierry 	if (d->parent_data)
1351b525903cSJulien Thierry 		return false;
1352b525903cSJulien Thierry #endif
1353b525903cSJulien Thierry 	/* Don't support NMIs for chips behind a slow bus */
1354b525903cSJulien Thierry 	if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1355b525903cSJulien Thierry 		return false;
1356b525903cSJulien Thierry 
1357b525903cSJulien Thierry 	return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1358b525903cSJulien Thierry }
1359b525903cSJulien Thierry 
1360b525903cSJulien Thierry static int irq_nmi_setup(struct irq_desc *desc)
1361b525903cSJulien Thierry {
1362b525903cSJulien Thierry 	struct irq_data *d = irq_desc_get_irq_data(desc);
1363b525903cSJulien Thierry 	struct irq_chip *c = d->chip;
1364b525903cSJulien Thierry 
1365b525903cSJulien Thierry 	return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1366b525903cSJulien Thierry }
1367b525903cSJulien Thierry 
1368b525903cSJulien Thierry static void irq_nmi_teardown(struct irq_desc *desc)
1369b525903cSJulien Thierry {
1370b525903cSJulien Thierry 	struct irq_data *d = irq_desc_get_irq_data(desc);
1371b525903cSJulien Thierry 	struct irq_chip *c = d->chip;
1372b525903cSJulien Thierry 
1373b525903cSJulien Thierry 	if (c->irq_nmi_teardown)
1374b525903cSJulien Thierry 		c->irq_nmi_teardown(d);
1375b525903cSJulien Thierry }
1376b525903cSJulien Thierry 
13772a1d3ab8SThomas Gleixner static int
13782a1d3ab8SThomas Gleixner setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
13792a1d3ab8SThomas Gleixner {
13802a1d3ab8SThomas Gleixner 	struct task_struct *t;
13812a1d3ab8SThomas Gleixner 
13822a1d3ab8SThomas Gleixner 	if (!secondary) {
13832a1d3ab8SThomas Gleixner 		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
13842a1d3ab8SThomas Gleixner 				   new->name);
13852a1d3ab8SThomas Gleixner 	} else {
13862a1d3ab8SThomas Gleixner 		t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
13872a1d3ab8SThomas Gleixner 				   new->name);
13882a1d3ab8SThomas Gleixner 	}
13892a1d3ab8SThomas Gleixner 
13902a1d3ab8SThomas Gleixner 	if (IS_ERR(t))
13912a1d3ab8SThomas Gleixner 		return PTR_ERR(t);
13922a1d3ab8SThomas Gleixner 
13937a40798cSPeter Zijlstra 	sched_set_fifo(t);
13942a1d3ab8SThomas Gleixner 
13952a1d3ab8SThomas Gleixner 	/*
13962a1d3ab8SThomas Gleixner 	 * We keep the reference to the task struct even if
13972a1d3ab8SThomas Gleixner 	 * the thread dies to avoid that the interrupt code
13982a1d3ab8SThomas Gleixner 	 * references an already freed task_struct.
13992a1d3ab8SThomas Gleixner 	 */
14007b3c92b8SMatthew Wilcox (Oracle) 	new->thread = get_task_struct(t);
14012a1d3ab8SThomas Gleixner 	/*
14022a1d3ab8SThomas Gleixner 	 * Tell the thread to set its affinity. This is
14032a1d3ab8SThomas Gleixner 	 * important for shared interrupt handlers as we do
14042a1d3ab8SThomas Gleixner 	 * not invoke setup_affinity() for the secondary
14052a1d3ab8SThomas Gleixner 	 * handlers as everything is already set up. Even for
14062a1d3ab8SThomas Gleixner 	 * interrupts marked with IRQF_NO_BALANCE this is
14072a1d3ab8SThomas Gleixner 	 * correct as we want the thread to move to the cpu(s)
14082a1d3ab8SThomas Gleixner 	 * on which the requesting code placed the interrupt.
14092a1d3ab8SThomas Gleixner 	 */
14102a1d3ab8SThomas Gleixner 	set_bit(IRQTF_AFFINITY, &new->thread_flags);
14112a1d3ab8SThomas Gleixner 	return 0;
14122a1d3ab8SThomas Gleixner }
14132a1d3ab8SThomas Gleixner 
14141da177e4SLinus Torvalds /*
14151da177e4SLinus Torvalds  * Internal function to register an irqaction - typically used to
14161da177e4SLinus Torvalds  * allocate special interrupts that are part of the architecture.
141719d39a38SThomas Gleixner  *
141819d39a38SThomas Gleixner  * Locking rules:
141919d39a38SThomas Gleixner  *
142019d39a38SThomas Gleixner  * desc->request_mutex	Provides serialization against a concurrent free_irq()
142119d39a38SThomas Gleixner  *   chip_bus_lock	Provides serialization for slow bus operations
142219d39a38SThomas Gleixner  *     desc->lock	Provides serialization against hard interrupts
142319d39a38SThomas Gleixner  *
142419d39a38SThomas Gleixner  * chip_bus_lock and desc->lock are sufficient for all other management and
142519d39a38SThomas Gleixner  * interrupt related functions. desc->request_mutex solely serializes
142619d39a38SThomas Gleixner  * request/free_irq().
14271da177e4SLinus Torvalds  */
1428d3c60047SThomas Gleixner static int
1429d3c60047SThomas Gleixner __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
14301da177e4SLinus Torvalds {
1431f17c7545SIngo Molnar 	struct irqaction *old, **old_ptr;
1432b5faba21SThomas Gleixner 	unsigned long flags, thread_mask = 0;
14333b8249e7SThomas Gleixner 	int ret, nested, shared = 0;
14341da177e4SLinus Torvalds 
14357d94f7caSYinghai Lu 	if (!desc)
1436c2b5a251SMatthew Wilcox 		return -EINVAL;
1437c2b5a251SMatthew Wilcox 
14386b8ff312SThomas Gleixner 	if (desc->irq_data.chip == &no_irq_chip)
14391da177e4SLinus Torvalds 		return -ENOSYS;
1440b6873807SSebastian Andrzej Siewior 	if (!try_module_get(desc->owner))
1441b6873807SSebastian Andrzej Siewior 		return -ENODEV;
14421da177e4SLinus Torvalds 
14432a1d3ab8SThomas Gleixner 	new->irq = irq;
14442a1d3ab8SThomas Gleixner 
14451da177e4SLinus Torvalds 	/*
14464b357daeSJon Hunter 	 * If the trigger type is not specified by the caller,
14474b357daeSJon Hunter 	 * then use the default for this interrupt.
14484b357daeSJon Hunter 	 */
14494b357daeSJon Hunter 	if (!(new->flags & IRQF_TRIGGER_MASK))
14504b357daeSJon Hunter 		new->flags |= irqd_get_trigger_type(&desc->irq_data);
14514b357daeSJon Hunter 
14524b357daeSJon Hunter 	/*
1453399b5da2SThomas Gleixner 	 * Check whether the interrupt nests into another interrupt
1454399b5da2SThomas Gleixner 	 * thread.
14553aa551c9SThomas Gleixner 	 */
14561ccb4e61SThomas Gleixner 	nested = irq_settings_is_nested_thread(desc);
1457399b5da2SThomas Gleixner 	if (nested) {
1458b6873807SSebastian Andrzej Siewior 		if (!new->thread_fn) {
1459b6873807SSebastian Andrzej Siewior 			ret = -EINVAL;
1460b6873807SSebastian Andrzej Siewior 			goto out_mput;
1461b6873807SSebastian Andrzej Siewior 		}
1462399b5da2SThomas Gleixner 		/*
1463399b5da2SThomas Gleixner 		 * Replace the primary handler which was provided from
1464399b5da2SThomas Gleixner 		 * the driver for non nested interrupt handling by the
1465399b5da2SThomas Gleixner 		 * dummy function which warns when called.
1466399b5da2SThomas Gleixner 		 */
1467399b5da2SThomas Gleixner 		new->handler = irq_nested_primary_handler;
14688d32a307SThomas Gleixner 	} else {
14692a1d3ab8SThomas Gleixner 		if (irq_settings_can_thread(desc)) {
14702a1d3ab8SThomas Gleixner 			ret = irq_setup_forced_threading(new);
14712a1d3ab8SThomas Gleixner 			if (ret)
14722a1d3ab8SThomas Gleixner 				goto out_mput;
14732a1d3ab8SThomas Gleixner 		}
1474399b5da2SThomas Gleixner 	}
1475399b5da2SThomas Gleixner 
1476399b5da2SThomas Gleixner 	/*
1477399b5da2SThomas Gleixner 	 * Create a handler thread when a thread function is supplied
1478399b5da2SThomas Gleixner 	 * and the interrupt does not nest into another interrupt
1479399b5da2SThomas Gleixner 	 * thread.
1480399b5da2SThomas Gleixner 	 */
1481399b5da2SThomas Gleixner 	if (new->thread_fn && !nested) {
14822a1d3ab8SThomas Gleixner 		ret = setup_irq_thread(new, irq, false);
14832a1d3ab8SThomas Gleixner 		if (ret)
1484b6873807SSebastian Andrzej Siewior 			goto out_mput;
14852a1d3ab8SThomas Gleixner 		if (new->secondary) {
14862a1d3ab8SThomas Gleixner 			ret = setup_irq_thread(new->secondary, irq, true);
14872a1d3ab8SThomas Gleixner 			if (ret)
14882a1d3ab8SThomas Gleixner 				goto out_thread;
1489b6873807SSebastian Andrzej Siewior 		}
14903aa551c9SThomas Gleixner 	}
14913aa551c9SThomas Gleixner 
14923aa551c9SThomas Gleixner 	/*
1493dc9b229aSThomas Gleixner 	 * Drivers are often written to work w/o knowledge about the
1494dc9b229aSThomas Gleixner 	 * underlying irq chip implementation, so a request for a
1495dc9b229aSThomas Gleixner 	 * threaded irq without a primary hard irq context handler
1496dc9b229aSThomas Gleixner 	 * requires the ONESHOT flag to be set. Some irq chips like
1497dc9b229aSThomas Gleixner 	 * MSI based interrupts are per se one shot safe. Check the
1498dc9b229aSThomas Gleixner 	 * chip flags, so we can avoid the unmask dance at the end of
1499dc9b229aSThomas Gleixner 	 * the threaded handler for those.
1500dc9b229aSThomas Gleixner 	 */
1501dc9b229aSThomas Gleixner 	if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1502dc9b229aSThomas Gleixner 		new->flags &= ~IRQF_ONESHOT;
1503dc9b229aSThomas Gleixner 
150419d39a38SThomas Gleixner 	/*
150519d39a38SThomas Gleixner 	 * Protects against a concurrent __free_irq() call which might wait
1506519cc865SLukas Wunner 	 * for synchronize_hardirq() to complete without holding the optional
1507836557bdSLukas Wunner 	 * chip bus lock and desc->lock. Also protects against handing out
1508836557bdSLukas Wunner 	 * a recycled oneshot thread_mask bit while it's still in use by
1509836557bdSLukas Wunner 	 * its previous owner.
151019d39a38SThomas Gleixner 	 */
15119114014cSThomas Gleixner 	mutex_lock(&desc->request_mutex);
151219d39a38SThomas Gleixner 
151319d39a38SThomas Gleixner 	/*
151419d39a38SThomas Gleixner 	 * Acquire bus lock as the irq_request_resources() callback below
151519d39a38SThomas Gleixner 	 * might rely on the serialization or the magic power management
151619d39a38SThomas Gleixner 	 * functions which are abusing the irq_bus_lock() callback,
151719d39a38SThomas Gleixner 	 */
151819d39a38SThomas Gleixner 	chip_bus_lock(desc);
151919d39a38SThomas Gleixner 
152019d39a38SThomas Gleixner 	/* First installed action requests resources. */
152146e48e25SThomas Gleixner 	if (!desc->action) {
152246e48e25SThomas Gleixner 		ret = irq_request_resources(desc);
152346e48e25SThomas Gleixner 		if (ret) {
152446e48e25SThomas Gleixner 			pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
152546e48e25SThomas Gleixner 			       new->name, irq, desc->irq_data.chip->name);
152619d39a38SThomas Gleixner 			goto out_bus_unlock;
152746e48e25SThomas Gleixner 		}
152846e48e25SThomas Gleixner 	}
15299114014cSThomas Gleixner 
1530dc9b229aSThomas Gleixner 	/*
15311da177e4SLinus Torvalds 	 * The following block of code has to be executed atomically
153219d39a38SThomas Gleixner 	 * protected against a concurrent interrupt and any of the other
153319d39a38SThomas Gleixner 	 * management calls which are not serialized via
153419d39a38SThomas Gleixner 	 * desc->request_mutex or the optional bus lock.
15351da177e4SLinus Torvalds 	 */
1536239007b8SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
1537f17c7545SIngo Molnar 	old_ptr = &desc->action;
1538f17c7545SIngo Molnar 	old = *old_ptr;
153906fcb0c6SIngo Molnar 	if (old) {
1540e76de9f8SThomas Gleixner 		/*
1541e76de9f8SThomas Gleixner 		 * Can't share interrupts unless both agree to and are
1542e76de9f8SThomas Gleixner 		 * the same type (level, edge, polarity). So both flag
15433cca53b0SThomas Gleixner 		 * fields must have IRQF_SHARED set and the bits which
15449d591eddSThomas Gleixner 		 * set the trigger type must match. Also all must
15459d591eddSThomas Gleixner 		 * agree on ONESHOT.
1546b525903cSJulien Thierry 		 * Interrupt lines used for NMIs cannot be shared.
1547e76de9f8SThomas Gleixner 		 */
15484f8413a3SMarc Zyngier 		unsigned int oldtype;
15494f8413a3SMarc Zyngier 
1550b525903cSJulien Thierry 		if (desc->istate & IRQS_NMI) {
1551b525903cSJulien Thierry 			pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1552b525903cSJulien Thierry 				new->name, irq, desc->irq_data.chip->name);
1553b525903cSJulien Thierry 			ret = -EINVAL;
1554b525903cSJulien Thierry 			goto out_unlock;
1555b525903cSJulien Thierry 		}
1556b525903cSJulien Thierry 
15574f8413a3SMarc Zyngier 		/*
15584f8413a3SMarc Zyngier 		 * If nobody did set the configuration before, inherit
15594f8413a3SMarc Zyngier 		 * the one provided by the requester.
15604f8413a3SMarc Zyngier 		 */
15614f8413a3SMarc Zyngier 		if (irqd_trigger_type_was_set(&desc->irq_data)) {
15624f8413a3SMarc Zyngier 			oldtype = irqd_get_trigger_type(&desc->irq_data);
15634f8413a3SMarc Zyngier 		} else {
15644f8413a3SMarc Zyngier 			oldtype = new->flags & IRQF_TRIGGER_MASK;
15654f8413a3SMarc Zyngier 			irqd_set_trigger_type(&desc->irq_data, oldtype);
15664f8413a3SMarc Zyngier 		}
1567382bd4deSHans de Goede 
15683cca53b0SThomas Gleixner 		if (!((old->flags & new->flags) & IRQF_SHARED) ||
1569382bd4deSHans de Goede 		    (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1570f5d89470SThomas Gleixner 		    ((old->flags ^ new->flags) & IRQF_ONESHOT))
1571f5163427SDimitri Sivanich 			goto mismatch;
1572f5163427SDimitri Sivanich 
1573f5163427SDimitri Sivanich 		/* All handlers must agree on per-cpuness */
15743cca53b0SThomas Gleixner 		if ((old->flags & IRQF_PERCPU) !=
15753cca53b0SThomas Gleixner 		    (new->flags & IRQF_PERCPU))
1576f5163427SDimitri Sivanich 			goto mismatch;
15771da177e4SLinus Torvalds 
15781da177e4SLinus Torvalds 		/* add new interrupt at end of irq queue */
15791da177e4SLinus Torvalds 		do {
158052abb700SThomas Gleixner 			/*
158152abb700SThomas Gleixner 			 * Or all existing action->thread_mask bits,
158252abb700SThomas Gleixner 			 * so we can find the next zero bit for this
158352abb700SThomas Gleixner 			 * new action.
158452abb700SThomas Gleixner 			 */
1585b5faba21SThomas Gleixner 			thread_mask |= old->thread_mask;
1586f17c7545SIngo Molnar 			old_ptr = &old->next;
1587f17c7545SIngo Molnar 			old = *old_ptr;
15881da177e4SLinus Torvalds 		} while (old);
15891da177e4SLinus Torvalds 		shared = 1;
15901da177e4SLinus Torvalds 	}
15911da177e4SLinus Torvalds 
1592b5faba21SThomas Gleixner 	/*
159352abb700SThomas Gleixner 	 * Setup the thread mask for this irqaction for ONESHOT. For
159452abb700SThomas Gleixner 	 * !ONESHOT irqs the thread mask is 0 so we can avoid a
159552abb700SThomas Gleixner 	 * conditional in irq_wake_thread().
1596b5faba21SThomas Gleixner 	 */
159752abb700SThomas Gleixner 	if (new->flags & IRQF_ONESHOT) {
159852abb700SThomas Gleixner 		/*
159952abb700SThomas Gleixner 		 * Unlikely to have 32 resp 64 irqs sharing one line,
160052abb700SThomas Gleixner 		 * but who knows.
160152abb700SThomas Gleixner 		 */
160252abb700SThomas Gleixner 		if (thread_mask == ~0UL) {
1603b5faba21SThomas Gleixner 			ret = -EBUSY;
1604cba4235eSThomas Gleixner 			goto out_unlock;
1605b5faba21SThomas Gleixner 		}
160652abb700SThomas Gleixner 		/*
160752abb700SThomas Gleixner 		 * The thread_mask for the action is or'ed to
160852abb700SThomas Gleixner 		 * desc->thread_active to indicate that the
160952abb700SThomas Gleixner 		 * IRQF_ONESHOT thread handler has been woken, but not
161052abb700SThomas Gleixner 		 * yet finished. The bit is cleared when a thread
161152abb700SThomas Gleixner 		 * completes. When all threads of a shared interrupt
161252abb700SThomas Gleixner 		 * line have completed desc->threads_active becomes
161352abb700SThomas Gleixner 		 * zero and the interrupt line is unmasked. See
161452abb700SThomas Gleixner 		 * handle.c:irq_wake_thread() for further information.
161552abb700SThomas Gleixner 		 *
161652abb700SThomas Gleixner 		 * If no thread is woken by primary (hard irq context)
161752abb700SThomas Gleixner 		 * interrupt handlers, then desc->threads_active is
161852abb700SThomas Gleixner 		 * also checked for zero to unmask the irq line in the
161952abb700SThomas Gleixner 		 * affected hard irq flow handlers
162052abb700SThomas Gleixner 		 * (handle_[fasteoi|level]_irq).
162152abb700SThomas Gleixner 		 *
162252abb700SThomas Gleixner 		 * The new action gets the first zero bit of
162352abb700SThomas Gleixner 		 * thread_mask assigned. See the loop above which or's
162452abb700SThomas Gleixner 		 * all existing action->thread_mask bits.
162552abb700SThomas Gleixner 		 */
1626ffc661c9SRasmus Villemoes 		new->thread_mask = 1UL << ffz(thread_mask);
16271c6c6952SThomas Gleixner 
1628dc9b229aSThomas Gleixner 	} else if (new->handler == irq_default_primary_handler &&
1629dc9b229aSThomas Gleixner 		   !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
16301c6c6952SThomas Gleixner 		/*
16311c6c6952SThomas Gleixner 		 * The interrupt was requested with handler = NULL, so
16321c6c6952SThomas Gleixner 		 * we use the default primary handler for it. But it
16331c6c6952SThomas Gleixner 		 * does not have the oneshot flag set. In combination
16341c6c6952SThomas Gleixner 		 * with level interrupts this is deadly, because the
16351c6c6952SThomas Gleixner 		 * default primary handler just wakes the thread, then
16361c6c6952SThomas Gleixner 		 * the irq lines is reenabled, but the device still
16371c6c6952SThomas Gleixner 		 * has the level irq asserted. Rinse and repeat....
16381c6c6952SThomas Gleixner 		 *
16391c6c6952SThomas Gleixner 		 * While this works for edge type interrupts, we play
16401c6c6952SThomas Gleixner 		 * it safe and reject unconditionally because we can't
16411c6c6952SThomas Gleixner 		 * say for sure which type this interrupt really
16421c6c6952SThomas Gleixner 		 * has. The type flags are unreliable as the
16431c6c6952SThomas Gleixner 		 * underlying chip implementation can override them.
16441c6c6952SThomas Gleixner 		 */
1645025af39bSLuca Ceresoli 		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1646025af39bSLuca Ceresoli 		       new->name, irq);
16471c6c6952SThomas Gleixner 		ret = -EINVAL;
1648cba4235eSThomas Gleixner 		goto out_unlock;
164952abb700SThomas Gleixner 	}
1650b5faba21SThomas Gleixner 
16511da177e4SLinus Torvalds 	if (!shared) {
16523aa551c9SThomas Gleixner 		init_waitqueue_head(&desc->wait_for_threads);
16533aa551c9SThomas Gleixner 
165482736f4dSUwe Kleine-König 		/* Setup the type (level, edge polarity) if configured: */
165582736f4dSUwe Kleine-König 		if (new->flags & IRQF_TRIGGER_MASK) {
1656a1ff541aSJiang Liu 			ret = __irq_set_trigger(desc,
1657f2b662daSDavid Brownell 						new->flags & IRQF_TRIGGER_MASK);
165882736f4dSUwe Kleine-König 
165919d39a38SThomas Gleixner 			if (ret)
1660cba4235eSThomas Gleixner 				goto out_unlock;
1661091738a2SThomas Gleixner 		}
1662f75d222bSAhmed S. Darwish 
1663c942cee4SThomas Gleixner 		/*
1664c942cee4SThomas Gleixner 		 * Activate the interrupt. That activation must happen
1665c942cee4SThomas Gleixner 		 * independently of IRQ_NOAUTOEN. request_irq() can fail
1666c942cee4SThomas Gleixner 		 * and the callers are supposed to handle
1667c942cee4SThomas Gleixner 		 * that. enable_irq() of an interrupt requested with
1668c942cee4SThomas Gleixner 		 * IRQ_NOAUTOEN is not supposed to fail. The activation
1669c942cee4SThomas Gleixner 		 * keeps it in shutdown mode, it merily associates
1670c942cee4SThomas Gleixner 		 * resources if necessary and if that's not possible it
1671c942cee4SThomas Gleixner 		 * fails. Interrupts which are in managed shutdown mode
1672c942cee4SThomas Gleixner 		 * will simply ignore that activation request.
1673c942cee4SThomas Gleixner 		 */
1674c942cee4SThomas Gleixner 		ret = irq_activate(desc);
1675c942cee4SThomas Gleixner 		if (ret)
1676c942cee4SThomas Gleixner 			goto out_unlock;
1677c942cee4SThomas Gleixner 
1678009b4c3bSThomas Gleixner 		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
167932f4125eSThomas Gleixner 				  IRQS_ONESHOT | IRQS_WAITING);
168032f4125eSThomas Gleixner 		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
168194d39e1fSThomas Gleixner 
1682a005677bSThomas Gleixner 		if (new->flags & IRQF_PERCPU) {
1683a005677bSThomas Gleixner 			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1684a005677bSThomas Gleixner 			irq_settings_set_per_cpu(desc);
1685a005677bSThomas Gleixner 		}
16866a58fb3bSThomas Gleixner 
1687b25c340cSThomas Gleixner 		if (new->flags & IRQF_ONESHOT)
16883d67baecSThomas Gleixner 			desc->istate |= IRQS_ONESHOT;
1689b25c340cSThomas Gleixner 
16902e051552SThomas Gleixner 		/* Exclude IRQ from balancing if requested */
16912e051552SThomas Gleixner 		if (new->flags & IRQF_NOBALANCING) {
16922e051552SThomas Gleixner 			irq_settings_set_no_balancing(desc);
16932e051552SThomas Gleixner 			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
16942e051552SThomas Gleixner 		}
16952e051552SThomas Gleixner 
1696cbe16f35SBarry Song 		if (!(new->flags & IRQF_NO_AUTOEN) &&
1697cbe16f35SBarry Song 		    irq_settings_can_autoenable(desc)) {
16984cde9c6bSThomas Gleixner 			irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
169904c848d3SThomas Gleixner 		} else {
170004c848d3SThomas Gleixner 			/*
170104c848d3SThomas Gleixner 			 * Shared interrupts do not go well with disabling
170204c848d3SThomas Gleixner 			 * auto enable. The sharing interrupt might request
170304c848d3SThomas Gleixner 			 * it while it's still disabled and then wait for
170404c848d3SThomas Gleixner 			 * interrupts forever.
170504c848d3SThomas Gleixner 			 */
170604c848d3SThomas Gleixner 			WARN_ON_ONCE(new->flags & IRQF_SHARED);
1707e76de9f8SThomas Gleixner 			/* Undo nested disables: */
1708e76de9f8SThomas Gleixner 			desc->depth = 1;
170904c848d3SThomas Gleixner 		}
171018404756SMax Krasnyansky 
1711876dbd4cSThomas Gleixner 	} else if (new->flags & IRQF_TRIGGER_MASK) {
1712876dbd4cSThomas Gleixner 		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
17137ee7e87dSThomas Gleixner 		unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1714876dbd4cSThomas Gleixner 
1715876dbd4cSThomas Gleixner 		if (nmsk != omsk)
1716876dbd4cSThomas Gleixner 			/* hope the handler works with current  trigger mode */
1717a395d6a7SJoe Perches 			pr_warn("irq %d uses trigger mode %u; requested %u\n",
17187ee7e87dSThomas Gleixner 				irq, omsk, nmsk);
171994d39e1fSThomas Gleixner 	}
172082736f4dSUwe Kleine-König 
1721f17c7545SIngo Molnar 	*old_ptr = new;
172282736f4dSUwe Kleine-König 
1723cab303beSThomas Gleixner 	irq_pm_install_action(desc, new);
1724cab303beSThomas Gleixner 
17258528b0f1SLinus Torvalds 	/* Reset broken irq detection when installing new handler */
17268528b0f1SLinus Torvalds 	desc->irq_count = 0;
17278528b0f1SLinus Torvalds 	desc->irqs_unhandled = 0;
17281adb0850SThomas Gleixner 
17291adb0850SThomas Gleixner 	/*
17301adb0850SThomas Gleixner 	 * Check whether we disabled the irq via the spurious handler
17311adb0850SThomas Gleixner 	 * before. Reenable it and give it another chance.
17321adb0850SThomas Gleixner 	 */
17337acdd53eSThomas Gleixner 	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
17347acdd53eSThomas Gleixner 		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
173579ff1cdaSJiang Liu 		__enable_irq(desc);
17361adb0850SThomas Gleixner 	}
17371adb0850SThomas Gleixner 
1738239007b8SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
17393a90795eSThomas Gleixner 	chip_bus_sync_unlock(desc);
17409114014cSThomas Gleixner 	mutex_unlock(&desc->request_mutex);
17411da177e4SLinus Torvalds 
1742b2d3d61aSDaniel Lezcano 	irq_setup_timings(desc, new);
1743b2d3d61aSDaniel Lezcano 
174469ab8494SThomas Gleixner 	/*
174569ab8494SThomas Gleixner 	 * Strictly no need to wake it up, but hung_task complains
174669ab8494SThomas Gleixner 	 * when no hard interrupt wakes the thread up.
174769ab8494SThomas Gleixner 	 */
174869ab8494SThomas Gleixner 	if (new->thread)
174969ab8494SThomas Gleixner 		wake_up_process(new->thread);
17502a1d3ab8SThomas Gleixner 	if (new->secondary)
17512a1d3ab8SThomas Gleixner 		wake_up_process(new->secondary->thread);
175269ab8494SThomas Gleixner 
17532c6927a3SYinghai Lu 	register_irq_proc(irq, desc);
17541da177e4SLinus Torvalds 	new->dir = NULL;
17551da177e4SLinus Torvalds 	register_handler_proc(irq, new);
17561da177e4SLinus Torvalds 	return 0;
1757f5163427SDimitri Sivanich 
1758f5163427SDimitri Sivanich mismatch:
17593cca53b0SThomas Gleixner 	if (!(new->flags & IRQF_PROBE_SHARED)) {
176097fd75b7SAndrew Morton 		pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1761f5d89470SThomas Gleixner 		       irq, new->flags, new->name, old->flags, old->name);
1762f5d89470SThomas Gleixner #ifdef CONFIG_DEBUG_SHIRQ
1763f5163427SDimitri Sivanich 		dump_stack();
17643f050447SAlan Cox #endif
1765f5d89470SThomas Gleixner 	}
17663aa551c9SThomas Gleixner 	ret = -EBUSY;
17673aa551c9SThomas Gleixner 
1768cba4235eSThomas Gleixner out_unlock:
17691c389795SDan Carpenter 	raw_spin_unlock_irqrestore(&desc->lock, flags);
17703b8249e7SThomas Gleixner 
177146e48e25SThomas Gleixner 	if (!desc->action)
177246e48e25SThomas Gleixner 		irq_release_resources(desc);
177319d39a38SThomas Gleixner out_bus_unlock:
177419d39a38SThomas Gleixner 	chip_bus_sync_unlock(desc);
17759114014cSThomas Gleixner 	mutex_unlock(&desc->request_mutex);
17769114014cSThomas Gleixner 
17773aa551c9SThomas Gleixner out_thread:
17783aa551c9SThomas Gleixner 	if (new->thread) {
17793aa551c9SThomas Gleixner 		struct task_struct *t = new->thread;
17803aa551c9SThomas Gleixner 
17813aa551c9SThomas Gleixner 		new->thread = NULL;
17823aa551c9SThomas Gleixner 		kthread_stop(t);
17833aa551c9SThomas Gleixner 		put_task_struct(t);
17843aa551c9SThomas Gleixner 	}
17852a1d3ab8SThomas Gleixner 	if (new->secondary && new->secondary->thread) {
17862a1d3ab8SThomas Gleixner 		struct task_struct *t = new->secondary->thread;
17872a1d3ab8SThomas Gleixner 
17882a1d3ab8SThomas Gleixner 		new->secondary->thread = NULL;
17892a1d3ab8SThomas Gleixner 		kthread_stop(t);
17902a1d3ab8SThomas Gleixner 		put_task_struct(t);
17912a1d3ab8SThomas Gleixner 	}
1792b6873807SSebastian Andrzej Siewior out_mput:
1793b6873807SSebastian Andrzej Siewior 	module_put(desc->owner);
17943aa551c9SThomas Gleixner 	return ret;
17951da177e4SLinus Torvalds }
17961da177e4SLinus Torvalds 
1797cbf94f06SMagnus Damm /*
1798cbf94f06SMagnus Damm  * Internal function to unregister an irqaction - used to free
1799cbf94f06SMagnus Damm  * regular and special interrupts that are part of the architecture.
18001da177e4SLinus Torvalds  */
180183ac4ca9SUwe Kleine König static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
18021da177e4SLinus Torvalds {
180383ac4ca9SUwe Kleine König 	unsigned irq = desc->irq_data.irq;
1804f17c7545SIngo Molnar 	struct irqaction *action, **action_ptr;
18051da177e4SLinus Torvalds 	unsigned long flags;
18061da177e4SLinus Torvalds 
1807ae88a23bSIngo Molnar 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
18087d94f7caSYinghai Lu 
18099114014cSThomas Gleixner 	mutex_lock(&desc->request_mutex);
1810abc7e40cSThomas Gleixner 	chip_bus_lock(desc);
1811239007b8SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
1812ae88a23bSIngo Molnar 
1813ae88a23bSIngo Molnar 	/*
1814ae88a23bSIngo Molnar 	 * There can be multiple actions per IRQ descriptor, find the right
1815ae88a23bSIngo Molnar 	 * one based on the dev_id:
1816ae88a23bSIngo Molnar 	 */
1817f17c7545SIngo Molnar 	action_ptr = &desc->action;
18181da177e4SLinus Torvalds 	for (;;) {
1819f17c7545SIngo Molnar 		action = *action_ptr;
18201da177e4SLinus Torvalds 
1821ae88a23bSIngo Molnar 		if (!action) {
1822ae88a23bSIngo Molnar 			WARN(1, "Trying to free already-free IRQ %d\n", irq);
1823239007b8SThomas Gleixner 			raw_spin_unlock_irqrestore(&desc->lock, flags);
1824abc7e40cSThomas Gleixner 			chip_bus_sync_unlock(desc);
182519d39a38SThomas Gleixner 			mutex_unlock(&desc->request_mutex);
1826f21cfb25SMagnus Damm 			return NULL;
1827ae88a23bSIngo Molnar 		}
18281da177e4SLinus Torvalds 
18298316e381SIngo Molnar 		if (action->dev_id == dev_id)
1830ae88a23bSIngo Molnar 			break;
1831f17c7545SIngo Molnar 		action_ptr = &action->next;
1832ae88a23bSIngo Molnar 	}
1833ae88a23bSIngo Molnar 
1834ae88a23bSIngo Molnar 	/* Found it - now remove it from the list of entries: */
1835f17c7545SIngo Molnar 	*action_ptr = action->next;
1836dbce706eSPaolo 'Blaisorblade' Giarrusso 
1837cab303beSThomas Gleixner 	irq_pm_remove_action(desc, action);
1838cab303beSThomas Gleixner 
1839ae88a23bSIngo Molnar 	/* If this was the last handler, shut down the IRQ line: */
1840c1bacbaeSThomas Gleixner 	if (!desc->action) {
1841e9849777SThomas Gleixner 		irq_settings_clr_disable_unlazy(desc);
18424001d8e8SThomas Gleixner 		/* Only shutdown. Deactivate after synchronize_hardirq() */
184346999238SThomas Gleixner 		irq_shutdown(desc);
1844c1bacbaeSThomas Gleixner 	}
18453aa551c9SThomas Gleixner 
1846e7a297b0SPeter P Waskiewicz Jr #ifdef CONFIG_SMP
1847e7a297b0SPeter P Waskiewicz Jr 	/* make sure affinity_hint is cleaned up */
1848e7a297b0SPeter P Waskiewicz Jr 	if (WARN_ON_ONCE(desc->affinity_hint))
1849e7a297b0SPeter P Waskiewicz Jr 		desc->affinity_hint = NULL;
1850e7a297b0SPeter P Waskiewicz Jr #endif
1851e7a297b0SPeter P Waskiewicz Jr 
1852239007b8SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
185319d39a38SThomas Gleixner 	/*
185419d39a38SThomas Gleixner 	 * Drop bus_lock here so the changes which were done in the chip
185519d39a38SThomas Gleixner 	 * callbacks above are synced out to the irq chips which hang
1856519cc865SLukas Wunner 	 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
185719d39a38SThomas Gleixner 	 *
185819d39a38SThomas Gleixner 	 * Aside of that the bus_lock can also be taken from the threaded
185919d39a38SThomas Gleixner 	 * handler in irq_finalize_oneshot() which results in a deadlock
1860519cc865SLukas Wunner 	 * because kthread_stop() would wait forever for the thread to
186119d39a38SThomas Gleixner 	 * complete, which is blocked on the bus lock.
186219d39a38SThomas Gleixner 	 *
186319d39a38SThomas Gleixner 	 * The still held desc->request_mutex() protects against a
186419d39a38SThomas Gleixner 	 * concurrent request_irq() of this irq so the release of resources
186519d39a38SThomas Gleixner 	 * and timing data is properly serialized.
186619d39a38SThomas Gleixner 	 */
1867abc7e40cSThomas Gleixner 	chip_bus_sync_unlock(desc);
1868ae88a23bSIngo Molnar 
18691da177e4SLinus Torvalds 	unregister_handler_proc(irq, action);
18701da177e4SLinus Torvalds 
187162e04686SThomas Gleixner 	/*
187262e04686SThomas Gleixner 	 * Make sure it's not being used on another CPU and if the chip
187362e04686SThomas Gleixner 	 * supports it also make sure that there is no (not yet serviced)
187462e04686SThomas Gleixner 	 * interrupt in flight at the hardware level.
187562e04686SThomas Gleixner 	 */
187662e04686SThomas Gleixner 	__synchronize_hardirq(desc, true);
1877ae88a23bSIngo Molnar 
18781d99493bSDavid Woodhouse #ifdef CONFIG_DEBUG_SHIRQ
18791d99493bSDavid Woodhouse 	/*
1880ae88a23bSIngo Molnar 	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1881ae88a23bSIngo Molnar 	 * event to happen even now it's being freed, so let's make sure that
1882ae88a23bSIngo Molnar 	 * is so by doing an extra call to the handler ....
1883ae88a23bSIngo Molnar 	 *
1884ae88a23bSIngo Molnar 	 * ( We do this after actually deregistering it, to make sure that a
18850a13ec0bSJonathan Neuschäfer 	 *   'real' IRQ doesn't run in parallel with our fake. )
18861d99493bSDavid Woodhouse 	 */
18871d99493bSDavid Woodhouse 	if (action->flags & IRQF_SHARED) {
18881d99493bSDavid Woodhouse 		local_irq_save(flags);
18891d99493bSDavid Woodhouse 		action->handler(irq, dev_id);
18901d99493bSDavid Woodhouse 		local_irq_restore(flags);
18911d99493bSDavid Woodhouse 	}
18921d99493bSDavid Woodhouse #endif
18932d860ad7SLinus Torvalds 
1894519cc865SLukas Wunner 	/*
1895519cc865SLukas Wunner 	 * The action has already been removed above, but the thread writes
1896519cc865SLukas Wunner 	 * its oneshot mask bit when it completes. Though request_mutex is
1897519cc865SLukas Wunner 	 * held across this which prevents __setup_irq() from handing out
1898519cc865SLukas Wunner 	 * the same bit to a newly requested action.
1899519cc865SLukas Wunner 	 */
19002d860ad7SLinus Torvalds 	if (action->thread) {
19012d860ad7SLinus Torvalds 		kthread_stop(action->thread);
19022d860ad7SLinus Torvalds 		put_task_struct(action->thread);
19032a1d3ab8SThomas Gleixner 		if (action->secondary && action->secondary->thread) {
19042a1d3ab8SThomas Gleixner 			kthread_stop(action->secondary->thread);
19052a1d3ab8SThomas Gleixner 			put_task_struct(action->secondary->thread);
19062a1d3ab8SThomas Gleixner 		}
19072d860ad7SLinus Torvalds 	}
19082d860ad7SLinus Torvalds 
190919d39a38SThomas Gleixner 	/* Last action releases resources */
19102343877fSThomas Gleixner 	if (!desc->action) {
191119d39a38SThomas Gleixner 		/*
191219d39a38SThomas Gleixner 		 * Reaquire bus lock as irq_release_resources() might
191319d39a38SThomas Gleixner 		 * require it to deallocate resources over the slow bus.
191419d39a38SThomas Gleixner 		 */
191519d39a38SThomas Gleixner 		chip_bus_lock(desc);
19164001d8e8SThomas Gleixner 		/*
19174001d8e8SThomas Gleixner 		 * There is no interrupt on the fly anymore. Deactivate it
19184001d8e8SThomas Gleixner 		 * completely.
19194001d8e8SThomas Gleixner 		 */
19204001d8e8SThomas Gleixner 		raw_spin_lock_irqsave(&desc->lock, flags);
19214001d8e8SThomas Gleixner 		irq_domain_deactivate_irq(&desc->irq_data);
19224001d8e8SThomas Gleixner 		raw_spin_unlock_irqrestore(&desc->lock, flags);
19234001d8e8SThomas Gleixner 
192446e48e25SThomas Gleixner 		irq_release_resources(desc);
192519d39a38SThomas Gleixner 		chip_bus_sync_unlock(desc);
19262343877fSThomas Gleixner 		irq_remove_timings(desc);
19272343877fSThomas Gleixner 	}
192846e48e25SThomas Gleixner 
19299114014cSThomas Gleixner 	mutex_unlock(&desc->request_mutex);
19309114014cSThomas Gleixner 
1931be45beb2SJon Hunter 	irq_chip_pm_put(&desc->irq_data);
1932b6873807SSebastian Andrzej Siewior 	module_put(desc->owner);
19332a1d3ab8SThomas Gleixner 	kfree(action->secondary);
1934f21cfb25SMagnus Damm 	return action;
1935f21cfb25SMagnus Damm }
19361da177e4SLinus Torvalds 
19371da177e4SLinus Torvalds /**
1938f21cfb25SMagnus Damm  *	free_irq - free an interrupt allocated with request_irq
19391da177e4SLinus Torvalds  *	@irq: Interrupt line to free
19401da177e4SLinus Torvalds  *	@dev_id: Device identity to free
19411da177e4SLinus Torvalds  *
19421da177e4SLinus Torvalds  *	Remove an interrupt handler. The handler is removed and if the
19431da177e4SLinus Torvalds  *	interrupt line is no longer in use by any driver it is disabled.
19441da177e4SLinus Torvalds  *	On a shared IRQ the caller must ensure the interrupt is disabled
19451da177e4SLinus Torvalds  *	on the card it drives before calling this function. The function
19461da177e4SLinus Torvalds  *	does not return until any executing interrupts for this IRQ
19471da177e4SLinus Torvalds  *	have completed.
19481da177e4SLinus Torvalds  *
19491da177e4SLinus Torvalds  *	This function must not be called from interrupt context.
195025ce4be7SChristoph Hellwig  *
195125ce4be7SChristoph Hellwig  *	Returns the devname argument passed to request_irq.
19521da177e4SLinus Torvalds  */
195325ce4be7SChristoph Hellwig const void *free_irq(unsigned int irq, void *dev_id)
19541da177e4SLinus Torvalds {
195570aedd24SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
195625ce4be7SChristoph Hellwig 	struct irqaction *action;
195725ce4be7SChristoph Hellwig 	const char *devname;
195870aedd24SThomas Gleixner 
195931d9d9b6SMarc Zyngier 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
196025ce4be7SChristoph Hellwig 		return NULL;
196170aedd24SThomas Gleixner 
1962cd7eab44SBen Hutchings #ifdef CONFIG_SMP
1963cd7eab44SBen Hutchings 	if (WARN_ON(desc->affinity_notify))
1964cd7eab44SBen Hutchings 		desc->affinity_notify = NULL;
1965cd7eab44SBen Hutchings #endif
1966cd7eab44SBen Hutchings 
196783ac4ca9SUwe Kleine König 	action = __free_irq(desc, dev_id);
19682827a418SAlexandru Moise 
19692827a418SAlexandru Moise 	if (!action)
19702827a418SAlexandru Moise 		return NULL;
19712827a418SAlexandru Moise 
197225ce4be7SChristoph Hellwig 	devname = action->name;
197325ce4be7SChristoph Hellwig 	kfree(action);
197425ce4be7SChristoph Hellwig 	return devname;
19751da177e4SLinus Torvalds }
19761da177e4SLinus Torvalds EXPORT_SYMBOL(free_irq);
19771da177e4SLinus Torvalds 
1978b525903cSJulien Thierry /* This function must be called with desc->lock held */
1979b525903cSJulien Thierry static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
1980b525903cSJulien Thierry {
1981b525903cSJulien Thierry 	const char *devname = NULL;
1982b525903cSJulien Thierry 
1983b525903cSJulien Thierry 	desc->istate &= ~IRQS_NMI;
1984b525903cSJulien Thierry 
1985b525903cSJulien Thierry 	if (!WARN_ON(desc->action == NULL)) {
1986b525903cSJulien Thierry 		irq_pm_remove_action(desc, desc->action);
1987b525903cSJulien Thierry 		devname = desc->action->name;
1988b525903cSJulien Thierry 		unregister_handler_proc(irq, desc->action);
1989b525903cSJulien Thierry 
1990b525903cSJulien Thierry 		kfree(desc->action);
1991b525903cSJulien Thierry 		desc->action = NULL;
1992b525903cSJulien Thierry 	}
1993b525903cSJulien Thierry 
1994b525903cSJulien Thierry 	irq_settings_clr_disable_unlazy(desc);
19954001d8e8SThomas Gleixner 	irq_shutdown_and_deactivate(desc);
1996b525903cSJulien Thierry 
1997b525903cSJulien Thierry 	irq_release_resources(desc);
1998b525903cSJulien Thierry 
1999b525903cSJulien Thierry 	irq_chip_pm_put(&desc->irq_data);
2000b525903cSJulien Thierry 	module_put(desc->owner);
2001b525903cSJulien Thierry 
2002b525903cSJulien Thierry 	return devname;
2003b525903cSJulien Thierry }
2004b525903cSJulien Thierry 
2005b525903cSJulien Thierry const void *free_nmi(unsigned int irq, void *dev_id)
2006b525903cSJulien Thierry {
2007b525903cSJulien Thierry 	struct irq_desc *desc = irq_to_desc(irq);
2008b525903cSJulien Thierry 	unsigned long flags;
2009b525903cSJulien Thierry 	const void *devname;
2010b525903cSJulien Thierry 
2011b525903cSJulien Thierry 	if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
2012b525903cSJulien Thierry 		return NULL;
2013b525903cSJulien Thierry 
2014b525903cSJulien Thierry 	if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2015b525903cSJulien Thierry 		return NULL;
2016b525903cSJulien Thierry 
2017b525903cSJulien Thierry 	/* NMI still enabled */
2018b525903cSJulien Thierry 	if (WARN_ON(desc->depth == 0))
2019b525903cSJulien Thierry 		disable_nmi_nosync(irq);
2020b525903cSJulien Thierry 
2021b525903cSJulien Thierry 	raw_spin_lock_irqsave(&desc->lock, flags);
2022b525903cSJulien Thierry 
2023b525903cSJulien Thierry 	irq_nmi_teardown(desc);
2024b525903cSJulien Thierry 	devname = __cleanup_nmi(irq, desc);
2025b525903cSJulien Thierry 
2026b525903cSJulien Thierry 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2027b525903cSJulien Thierry 
2028b525903cSJulien Thierry 	return devname;
2029b525903cSJulien Thierry }
2030b525903cSJulien Thierry 
20311da177e4SLinus Torvalds /**
20323aa551c9SThomas Gleixner  *	request_threaded_irq - allocate an interrupt line
20331da177e4SLinus Torvalds  *	@irq: Interrupt line to allocate
20343aa551c9SThomas Gleixner  *	@handler: Function to be called when the IRQ occurs.
20353aa551c9SThomas Gleixner  *		  Primary handler for threaded interrupts
2036b25c340cSThomas Gleixner  *		  If NULL and thread_fn != NULL the default
2037b25c340cSThomas Gleixner  *		  primary handler is installed
20383aa551c9SThomas Gleixner  *	@thread_fn: Function called from the irq handler thread
20393aa551c9SThomas Gleixner  *		    If NULL, no irq thread is created
20401da177e4SLinus Torvalds  *	@irqflags: Interrupt type flags
20411da177e4SLinus Torvalds  *	@devname: An ascii name for the claiming device
20421da177e4SLinus Torvalds  *	@dev_id: A cookie passed back to the handler function
20431da177e4SLinus Torvalds  *
20441da177e4SLinus Torvalds  *	This call allocates interrupt resources and enables the
20451da177e4SLinus Torvalds  *	interrupt line and IRQ handling. From the point this
20461da177e4SLinus Torvalds  *	call is made your handler function may be invoked. Since
20471da177e4SLinus Torvalds  *	your handler function must clear any interrupt the board
20481da177e4SLinus Torvalds  *	raises, you must take care both to initialise your hardware
20491da177e4SLinus Torvalds  *	and to set up the interrupt handler in the right order.
20501da177e4SLinus Torvalds  *
20513aa551c9SThomas Gleixner  *	If you want to set up a threaded irq handler for your device
20526d21af4fSJavi Merino  *	then you need to supply @handler and @thread_fn. @handler is
20533aa551c9SThomas Gleixner  *	still called in hard interrupt context and has to check
20543aa551c9SThomas Gleixner  *	whether the interrupt originates from the device. If yes it
20553aa551c9SThomas Gleixner  *	needs to disable the interrupt on the device and return
205639a2eddbSSteven Rostedt  *	IRQ_WAKE_THREAD which will wake up the handler thread and run
20573aa551c9SThomas Gleixner  *	@thread_fn. This split handler design is necessary to support
20583aa551c9SThomas Gleixner  *	shared interrupts.
20593aa551c9SThomas Gleixner  *
20601da177e4SLinus Torvalds  *	Dev_id must be globally unique. Normally the address of the
20611da177e4SLinus Torvalds  *	device data structure is used as the cookie. Since the handler
20621da177e4SLinus Torvalds  *	receives this value it makes sense to use it.
20631da177e4SLinus Torvalds  *
20641da177e4SLinus Torvalds  *	If your interrupt is shared you must pass a non NULL dev_id
20651da177e4SLinus Torvalds  *	as this is required when freeing the interrupt.
20661da177e4SLinus Torvalds  *
20671da177e4SLinus Torvalds  *	Flags:
20681da177e4SLinus Torvalds  *
20693cca53b0SThomas Gleixner  *	IRQF_SHARED		Interrupt is shared
20700c5d1eb7SDavid Brownell  *	IRQF_TRIGGER_*		Specify active edge(s) or level
20711da177e4SLinus Torvalds  *
20721da177e4SLinus Torvalds  */
20733aa551c9SThomas Gleixner int request_threaded_irq(unsigned int irq, irq_handler_t handler,
20743aa551c9SThomas Gleixner 			 irq_handler_t thread_fn, unsigned long irqflags,
20753aa551c9SThomas Gleixner 			 const char *devname, void *dev_id)
20761da177e4SLinus Torvalds {
20771da177e4SLinus Torvalds 	struct irqaction *action;
207808678b08SYinghai Lu 	struct irq_desc *desc;
2079d3c60047SThomas Gleixner 	int retval;
20801da177e4SLinus Torvalds 
2081e237a551SChen Fan 	if (irq == IRQ_NOTCONNECTED)
2082e237a551SChen Fan 		return -ENOTCONN;
2083e237a551SChen Fan 
2084470c6623SDavid Brownell 	/*
20851da177e4SLinus Torvalds 	 * Sanity-check: shared interrupts must pass in a real dev-ID,
20861da177e4SLinus Torvalds 	 * otherwise we'll have trouble later trying to figure out
20871da177e4SLinus Torvalds 	 * which interrupt is which (messes up the interrupt freeing
20881da177e4SLinus Torvalds 	 * logic etc).
208917f48034SRafael J. Wysocki 	 *
2090cbe16f35SBarry Song 	 * Also shared interrupts do not go well with disabling auto enable.
2091cbe16f35SBarry Song 	 * The sharing interrupt might request it while it's still disabled
2092cbe16f35SBarry Song 	 * and then wait for interrupts forever.
2093cbe16f35SBarry Song 	 *
209417f48034SRafael J. Wysocki 	 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
209517f48034SRafael J. Wysocki 	 * it cannot be set along with IRQF_NO_SUSPEND.
20961da177e4SLinus Torvalds 	 */
209717f48034SRafael J. Wysocki 	if (((irqflags & IRQF_SHARED) && !dev_id) ||
2098cbe16f35SBarry Song 	    ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
209917f48034SRafael J. Wysocki 	    (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
210017f48034SRafael J. Wysocki 	    ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
21011da177e4SLinus Torvalds 		return -EINVAL;
21027d94f7caSYinghai Lu 
2103cb5bc832SYinghai Lu 	desc = irq_to_desc(irq);
21047d94f7caSYinghai Lu 	if (!desc)
21051da177e4SLinus Torvalds 		return -EINVAL;
21067d94f7caSYinghai Lu 
210731d9d9b6SMarc Zyngier 	if (!irq_settings_can_request(desc) ||
210831d9d9b6SMarc Zyngier 	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))
21096550c775SThomas Gleixner 		return -EINVAL;
2110b25c340cSThomas Gleixner 
2111b25c340cSThomas Gleixner 	if (!handler) {
2112b25c340cSThomas Gleixner 		if (!thread_fn)
21131da177e4SLinus Torvalds 			return -EINVAL;
2114b25c340cSThomas Gleixner 		handler = irq_default_primary_handler;
2115b25c340cSThomas Gleixner 	}
21161da177e4SLinus Torvalds 
211745535732SThomas Gleixner 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
21181da177e4SLinus Torvalds 	if (!action)
21191da177e4SLinus Torvalds 		return -ENOMEM;
21201da177e4SLinus Torvalds 
21211da177e4SLinus Torvalds 	action->handler = handler;
21223aa551c9SThomas Gleixner 	action->thread_fn = thread_fn;
21231da177e4SLinus Torvalds 	action->flags = irqflags;
21241da177e4SLinus Torvalds 	action->name = devname;
21251da177e4SLinus Torvalds 	action->dev_id = dev_id;
21261da177e4SLinus Torvalds 
2127be45beb2SJon Hunter 	retval = irq_chip_pm_get(&desc->irq_data);
21284396f46cSShawn Lin 	if (retval < 0) {
21294396f46cSShawn Lin 		kfree(action);
2130be45beb2SJon Hunter 		return retval;
21314396f46cSShawn Lin 	}
2132be45beb2SJon Hunter 
2133d3c60047SThomas Gleixner 	retval = __setup_irq(irq, desc, action);
213470aedd24SThomas Gleixner 
21352a1d3ab8SThomas Gleixner 	if (retval) {
2136be45beb2SJon Hunter 		irq_chip_pm_put(&desc->irq_data);
21372a1d3ab8SThomas Gleixner 		kfree(action->secondary);
2138377bf1e4SAnton Vorontsov 		kfree(action);
21392a1d3ab8SThomas Gleixner 	}
2140377bf1e4SAnton Vorontsov 
21416d83f94dSThomas Gleixner #ifdef CONFIG_DEBUG_SHIRQ_FIXME
21426ce51c43SLuis Henriques 	if (!retval && (irqflags & IRQF_SHARED)) {
2143a304e1b8SDavid Woodhouse 		/*
2144a304e1b8SDavid Woodhouse 		 * It's a shared IRQ -- the driver ought to be prepared for it
2145a304e1b8SDavid Woodhouse 		 * to happen immediately, so let's make sure....
2146377bf1e4SAnton Vorontsov 		 * We disable the irq to make sure that a 'real' IRQ doesn't
2147377bf1e4SAnton Vorontsov 		 * run in parallel with our fake.
2148a304e1b8SDavid Woodhouse 		 */
2149a304e1b8SDavid Woodhouse 		unsigned long flags;
2150a304e1b8SDavid Woodhouse 
2151377bf1e4SAnton Vorontsov 		disable_irq(irq);
2152a304e1b8SDavid Woodhouse 		local_irq_save(flags);
2153377bf1e4SAnton Vorontsov 
2154a304e1b8SDavid Woodhouse 		handler(irq, dev_id);
2155377bf1e4SAnton Vorontsov 
2156a304e1b8SDavid Woodhouse 		local_irq_restore(flags);
2157377bf1e4SAnton Vorontsov 		enable_irq(irq);
2158a304e1b8SDavid Woodhouse 	}
2159a304e1b8SDavid Woodhouse #endif
21601da177e4SLinus Torvalds 	return retval;
21611da177e4SLinus Torvalds }
21623aa551c9SThomas Gleixner EXPORT_SYMBOL(request_threaded_irq);
2163ae731f8dSMarc Zyngier 
2164ae731f8dSMarc Zyngier /**
2165ae731f8dSMarc Zyngier  *	request_any_context_irq - allocate an interrupt line
2166ae731f8dSMarc Zyngier  *	@irq: Interrupt line to allocate
2167ae731f8dSMarc Zyngier  *	@handler: Function to be called when the IRQ occurs.
2168ae731f8dSMarc Zyngier  *		  Threaded handler for threaded interrupts.
2169ae731f8dSMarc Zyngier  *	@flags: Interrupt type flags
2170ae731f8dSMarc Zyngier  *	@name: An ascii name for the claiming device
2171ae731f8dSMarc Zyngier  *	@dev_id: A cookie passed back to the handler function
2172ae731f8dSMarc Zyngier  *
2173ae731f8dSMarc Zyngier  *	This call allocates interrupt resources and enables the
2174ae731f8dSMarc Zyngier  *	interrupt line and IRQ handling. It selects either a
2175ae731f8dSMarc Zyngier  *	hardirq or threaded handling method depending on the
2176ae731f8dSMarc Zyngier  *	context.
2177ae731f8dSMarc Zyngier  *
2178ae731f8dSMarc Zyngier  *	On failure, it returns a negative value. On success,
2179ae731f8dSMarc Zyngier  *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2180ae731f8dSMarc Zyngier  */
2181ae731f8dSMarc Zyngier int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2182ae731f8dSMarc Zyngier 			    unsigned long flags, const char *name, void *dev_id)
2183ae731f8dSMarc Zyngier {
2184e237a551SChen Fan 	struct irq_desc *desc;
2185ae731f8dSMarc Zyngier 	int ret;
2186ae731f8dSMarc Zyngier 
2187e237a551SChen Fan 	if (irq == IRQ_NOTCONNECTED)
2188e237a551SChen Fan 		return -ENOTCONN;
2189e237a551SChen Fan 
2190e237a551SChen Fan 	desc = irq_to_desc(irq);
2191ae731f8dSMarc Zyngier 	if (!desc)
2192ae731f8dSMarc Zyngier 		return -EINVAL;
2193ae731f8dSMarc Zyngier 
21941ccb4e61SThomas Gleixner 	if (irq_settings_is_nested_thread(desc)) {
2195ae731f8dSMarc Zyngier 		ret = request_threaded_irq(irq, NULL, handler,
2196ae731f8dSMarc Zyngier 					   flags, name, dev_id);
2197ae731f8dSMarc Zyngier 		return !ret ? IRQC_IS_NESTED : ret;
2198ae731f8dSMarc Zyngier 	}
2199ae731f8dSMarc Zyngier 
2200ae731f8dSMarc Zyngier 	ret = request_irq(irq, handler, flags, name, dev_id);
2201ae731f8dSMarc Zyngier 	return !ret ? IRQC_IS_HARDIRQ : ret;
2202ae731f8dSMarc Zyngier }
2203ae731f8dSMarc Zyngier EXPORT_SYMBOL_GPL(request_any_context_irq);
220431d9d9b6SMarc Zyngier 
2205b525903cSJulien Thierry /**
2206b525903cSJulien Thierry  *	request_nmi - allocate an interrupt line for NMI delivery
2207b525903cSJulien Thierry  *	@irq: Interrupt line to allocate
2208b525903cSJulien Thierry  *	@handler: Function to be called when the IRQ occurs.
2209b525903cSJulien Thierry  *		  Threaded handler for threaded interrupts.
2210b525903cSJulien Thierry  *	@irqflags: Interrupt type flags
2211b525903cSJulien Thierry  *	@name: An ascii name for the claiming device
2212b525903cSJulien Thierry  *	@dev_id: A cookie passed back to the handler function
2213b525903cSJulien Thierry  *
2214b525903cSJulien Thierry  *	This call allocates interrupt resources and enables the
2215b525903cSJulien Thierry  *	interrupt line and IRQ handling. It sets up the IRQ line
2216b525903cSJulien Thierry  *	to be handled as an NMI.
2217b525903cSJulien Thierry  *
2218b525903cSJulien Thierry  *	An interrupt line delivering NMIs cannot be shared and IRQ handling
2219b525903cSJulien Thierry  *	cannot be threaded.
2220b525903cSJulien Thierry  *
2221b525903cSJulien Thierry  *	Interrupt lines requested for NMI delivering must produce per cpu
2222b525903cSJulien Thierry  *	interrupts and have auto enabling setting disabled.
2223b525903cSJulien Thierry  *
2224b525903cSJulien Thierry  *	Dev_id must be globally unique. Normally the address of the
2225b525903cSJulien Thierry  *	device data structure is used as the cookie. Since the handler
2226b525903cSJulien Thierry  *	receives this value it makes sense to use it.
2227b525903cSJulien Thierry  *
2228b525903cSJulien Thierry  *	If the interrupt line cannot be used to deliver NMIs, function
2229b525903cSJulien Thierry  *	will fail and return a negative value.
2230b525903cSJulien Thierry  */
2231b525903cSJulien Thierry int request_nmi(unsigned int irq, irq_handler_t handler,
2232b525903cSJulien Thierry 		unsigned long irqflags, const char *name, void *dev_id)
2233b525903cSJulien Thierry {
2234b525903cSJulien Thierry 	struct irqaction *action;
2235b525903cSJulien Thierry 	struct irq_desc *desc;
2236b525903cSJulien Thierry 	unsigned long flags;
2237b525903cSJulien Thierry 	int retval;
2238b525903cSJulien Thierry 
2239b525903cSJulien Thierry 	if (irq == IRQ_NOTCONNECTED)
2240b525903cSJulien Thierry 		return -ENOTCONN;
2241b525903cSJulien Thierry 
2242b525903cSJulien Thierry 	/* NMI cannot be shared, used for Polling */
2243b525903cSJulien Thierry 	if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2244b525903cSJulien Thierry 		return -EINVAL;
2245b525903cSJulien Thierry 
2246b525903cSJulien Thierry 	if (!(irqflags & IRQF_PERCPU))
2247b525903cSJulien Thierry 		return -EINVAL;
2248b525903cSJulien Thierry 
2249b525903cSJulien Thierry 	if (!handler)
2250b525903cSJulien Thierry 		return -EINVAL;
2251b525903cSJulien Thierry 
2252b525903cSJulien Thierry 	desc = irq_to_desc(irq);
2253b525903cSJulien Thierry 
2254cbe16f35SBarry Song 	if (!desc || (irq_settings_can_autoenable(desc) &&
2255cbe16f35SBarry Song 	    !(irqflags & IRQF_NO_AUTOEN)) ||
2256b525903cSJulien Thierry 	    !irq_settings_can_request(desc) ||
2257b525903cSJulien Thierry 	    WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2258b525903cSJulien Thierry 	    !irq_supports_nmi(desc))
2259b525903cSJulien Thierry 		return -EINVAL;
2260b525903cSJulien Thierry 
2261b525903cSJulien Thierry 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2262b525903cSJulien Thierry 	if (!action)
2263b525903cSJulien Thierry 		return -ENOMEM;
2264b525903cSJulien Thierry 
2265b525903cSJulien Thierry 	action->handler = handler;
2266b525903cSJulien Thierry 	action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2267b525903cSJulien Thierry 	action->name = name;
2268b525903cSJulien Thierry 	action->dev_id = dev_id;
2269b525903cSJulien Thierry 
2270b525903cSJulien Thierry 	retval = irq_chip_pm_get(&desc->irq_data);
2271b525903cSJulien Thierry 	if (retval < 0)
2272b525903cSJulien Thierry 		goto err_out;
2273b525903cSJulien Thierry 
2274b525903cSJulien Thierry 	retval = __setup_irq(irq, desc, action);
2275b525903cSJulien Thierry 	if (retval)
2276b525903cSJulien Thierry 		goto err_irq_setup;
2277b525903cSJulien Thierry 
2278b525903cSJulien Thierry 	raw_spin_lock_irqsave(&desc->lock, flags);
2279b525903cSJulien Thierry 
2280b525903cSJulien Thierry 	/* Setup NMI state */
2281b525903cSJulien Thierry 	desc->istate |= IRQS_NMI;
2282b525903cSJulien Thierry 	retval = irq_nmi_setup(desc);
2283b525903cSJulien Thierry 	if (retval) {
2284b525903cSJulien Thierry 		__cleanup_nmi(irq, desc);
2285b525903cSJulien Thierry 		raw_spin_unlock_irqrestore(&desc->lock, flags);
2286b525903cSJulien Thierry 		return -EINVAL;
2287b525903cSJulien Thierry 	}
2288b525903cSJulien Thierry 
2289b525903cSJulien Thierry 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2290b525903cSJulien Thierry 
2291b525903cSJulien Thierry 	return 0;
2292b525903cSJulien Thierry 
2293b525903cSJulien Thierry err_irq_setup:
2294b525903cSJulien Thierry 	irq_chip_pm_put(&desc->irq_data);
2295b525903cSJulien Thierry err_out:
2296b525903cSJulien Thierry 	kfree(action);
2297b525903cSJulien Thierry 
2298b525903cSJulien Thierry 	return retval;
2299b525903cSJulien Thierry }
2300b525903cSJulien Thierry 
23011e7c5fd2SMarc Zyngier void enable_percpu_irq(unsigned int irq, unsigned int type)
230231d9d9b6SMarc Zyngier {
230331d9d9b6SMarc Zyngier 	unsigned int cpu = smp_processor_id();
230431d9d9b6SMarc Zyngier 	unsigned long flags;
230531d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
230631d9d9b6SMarc Zyngier 
230731d9d9b6SMarc Zyngier 	if (!desc)
230831d9d9b6SMarc Zyngier 		return;
230931d9d9b6SMarc Zyngier 
2310f35ad083SMarc Zyngier 	/*
2311f35ad083SMarc Zyngier 	 * If the trigger type is not specified by the caller, then
2312f35ad083SMarc Zyngier 	 * use the default for this interrupt.
2313f35ad083SMarc Zyngier 	 */
23141e7c5fd2SMarc Zyngier 	type &= IRQ_TYPE_SENSE_MASK;
2315f35ad083SMarc Zyngier 	if (type == IRQ_TYPE_NONE)
2316f35ad083SMarc Zyngier 		type = irqd_get_trigger_type(&desc->irq_data);
2317f35ad083SMarc Zyngier 
23181e7c5fd2SMarc Zyngier 	if (type != IRQ_TYPE_NONE) {
23191e7c5fd2SMarc Zyngier 		int ret;
23201e7c5fd2SMarc Zyngier 
2321a1ff541aSJiang Liu 		ret = __irq_set_trigger(desc, type);
23221e7c5fd2SMarc Zyngier 
23231e7c5fd2SMarc Zyngier 		if (ret) {
232432cffddeSThomas Gleixner 			WARN(1, "failed to set type for IRQ%d\n", irq);
23251e7c5fd2SMarc Zyngier 			goto out;
23261e7c5fd2SMarc Zyngier 		}
23271e7c5fd2SMarc Zyngier 	}
23281e7c5fd2SMarc Zyngier 
232931d9d9b6SMarc Zyngier 	irq_percpu_enable(desc, cpu);
23301e7c5fd2SMarc Zyngier out:
233131d9d9b6SMarc Zyngier 	irq_put_desc_unlock(desc, flags);
233231d9d9b6SMarc Zyngier }
233336a5df85SChris Metcalf EXPORT_SYMBOL_GPL(enable_percpu_irq);
233431d9d9b6SMarc Zyngier 
23354b078c3fSJulien Thierry void enable_percpu_nmi(unsigned int irq, unsigned int type)
23364b078c3fSJulien Thierry {
23374b078c3fSJulien Thierry 	enable_percpu_irq(irq, type);
23384b078c3fSJulien Thierry }
23394b078c3fSJulien Thierry 
2340f0cb3220SThomas Petazzoni /**
2341f0cb3220SThomas Petazzoni  * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2342f0cb3220SThomas Petazzoni  * @irq:	Linux irq number to check for
2343f0cb3220SThomas Petazzoni  *
2344f0cb3220SThomas Petazzoni  * Must be called from a non migratable context. Returns the enable
2345f0cb3220SThomas Petazzoni  * state of a per cpu interrupt on the current cpu.
2346f0cb3220SThomas Petazzoni  */
2347f0cb3220SThomas Petazzoni bool irq_percpu_is_enabled(unsigned int irq)
2348f0cb3220SThomas Petazzoni {
2349f0cb3220SThomas Petazzoni 	unsigned int cpu = smp_processor_id();
2350f0cb3220SThomas Petazzoni 	struct irq_desc *desc;
2351f0cb3220SThomas Petazzoni 	unsigned long flags;
2352f0cb3220SThomas Petazzoni 	bool is_enabled;
2353f0cb3220SThomas Petazzoni 
2354f0cb3220SThomas Petazzoni 	desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2355f0cb3220SThomas Petazzoni 	if (!desc)
2356f0cb3220SThomas Petazzoni 		return false;
2357f0cb3220SThomas Petazzoni 
2358f0cb3220SThomas Petazzoni 	is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2359f0cb3220SThomas Petazzoni 	irq_put_desc_unlock(desc, flags);
2360f0cb3220SThomas Petazzoni 
2361f0cb3220SThomas Petazzoni 	return is_enabled;
2362f0cb3220SThomas Petazzoni }
2363f0cb3220SThomas Petazzoni EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2364f0cb3220SThomas Petazzoni 
236531d9d9b6SMarc Zyngier void disable_percpu_irq(unsigned int irq)
236631d9d9b6SMarc Zyngier {
236731d9d9b6SMarc Zyngier 	unsigned int cpu = smp_processor_id();
236831d9d9b6SMarc Zyngier 	unsigned long flags;
236931d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
237031d9d9b6SMarc Zyngier 
237131d9d9b6SMarc Zyngier 	if (!desc)
237231d9d9b6SMarc Zyngier 		return;
237331d9d9b6SMarc Zyngier 
237431d9d9b6SMarc Zyngier 	irq_percpu_disable(desc, cpu);
237531d9d9b6SMarc Zyngier 	irq_put_desc_unlock(desc, flags);
237631d9d9b6SMarc Zyngier }
237736a5df85SChris Metcalf EXPORT_SYMBOL_GPL(disable_percpu_irq);
237831d9d9b6SMarc Zyngier 
23794b078c3fSJulien Thierry void disable_percpu_nmi(unsigned int irq)
23804b078c3fSJulien Thierry {
23814b078c3fSJulien Thierry 	disable_percpu_irq(irq);
23824b078c3fSJulien Thierry }
23834b078c3fSJulien Thierry 
238431d9d9b6SMarc Zyngier /*
238531d9d9b6SMarc Zyngier  * Internal function to unregister a percpu irqaction.
238631d9d9b6SMarc Zyngier  */
238731d9d9b6SMarc Zyngier static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
238831d9d9b6SMarc Zyngier {
238931d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
239031d9d9b6SMarc Zyngier 	struct irqaction *action;
239131d9d9b6SMarc Zyngier 	unsigned long flags;
239231d9d9b6SMarc Zyngier 
239331d9d9b6SMarc Zyngier 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
239431d9d9b6SMarc Zyngier 
239531d9d9b6SMarc Zyngier 	if (!desc)
239631d9d9b6SMarc Zyngier 		return NULL;
239731d9d9b6SMarc Zyngier 
239831d9d9b6SMarc Zyngier 	raw_spin_lock_irqsave(&desc->lock, flags);
239931d9d9b6SMarc Zyngier 
240031d9d9b6SMarc Zyngier 	action = desc->action;
240131d9d9b6SMarc Zyngier 	if (!action || action->percpu_dev_id != dev_id) {
240231d9d9b6SMarc Zyngier 		WARN(1, "Trying to free already-free IRQ %d\n", irq);
240331d9d9b6SMarc Zyngier 		goto bad;
240431d9d9b6SMarc Zyngier 	}
240531d9d9b6SMarc Zyngier 
240631d9d9b6SMarc Zyngier 	if (!cpumask_empty(desc->percpu_enabled)) {
240731d9d9b6SMarc Zyngier 		WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
240831d9d9b6SMarc Zyngier 		     irq, cpumask_first(desc->percpu_enabled));
240931d9d9b6SMarc Zyngier 		goto bad;
241031d9d9b6SMarc Zyngier 	}
241131d9d9b6SMarc Zyngier 
241231d9d9b6SMarc Zyngier 	/* Found it - now remove it from the list of entries: */
241331d9d9b6SMarc Zyngier 	desc->action = NULL;
241431d9d9b6SMarc Zyngier 
24154b078c3fSJulien Thierry 	desc->istate &= ~IRQS_NMI;
24164b078c3fSJulien Thierry 
241731d9d9b6SMarc Zyngier 	raw_spin_unlock_irqrestore(&desc->lock, flags);
241831d9d9b6SMarc Zyngier 
241931d9d9b6SMarc Zyngier 	unregister_handler_proc(irq, action);
242031d9d9b6SMarc Zyngier 
2421be45beb2SJon Hunter 	irq_chip_pm_put(&desc->irq_data);
242231d9d9b6SMarc Zyngier 	module_put(desc->owner);
242331d9d9b6SMarc Zyngier 	return action;
242431d9d9b6SMarc Zyngier 
242531d9d9b6SMarc Zyngier bad:
242631d9d9b6SMarc Zyngier 	raw_spin_unlock_irqrestore(&desc->lock, flags);
242731d9d9b6SMarc Zyngier 	return NULL;
242831d9d9b6SMarc Zyngier }
242931d9d9b6SMarc Zyngier 
243031d9d9b6SMarc Zyngier /**
243131d9d9b6SMarc Zyngier  *	remove_percpu_irq - free a per-cpu interrupt
243231d9d9b6SMarc Zyngier  *	@irq: Interrupt line to free
243331d9d9b6SMarc Zyngier  *	@act: irqaction for the interrupt
243431d9d9b6SMarc Zyngier  *
243531d9d9b6SMarc Zyngier  * Used to remove interrupts statically setup by the early boot process.
243631d9d9b6SMarc Zyngier  */
243731d9d9b6SMarc Zyngier void remove_percpu_irq(unsigned int irq, struct irqaction *act)
243831d9d9b6SMarc Zyngier {
243931d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
244031d9d9b6SMarc Zyngier 
244131d9d9b6SMarc Zyngier 	if (desc && irq_settings_is_per_cpu_devid(desc))
244231d9d9b6SMarc Zyngier 	    __free_percpu_irq(irq, act->percpu_dev_id);
244331d9d9b6SMarc Zyngier }
244431d9d9b6SMarc Zyngier 
244531d9d9b6SMarc Zyngier /**
244631d9d9b6SMarc Zyngier  *	free_percpu_irq - free an interrupt allocated with request_percpu_irq
244731d9d9b6SMarc Zyngier  *	@irq: Interrupt line to free
244831d9d9b6SMarc Zyngier  *	@dev_id: Device identity to free
244931d9d9b6SMarc Zyngier  *
245031d9d9b6SMarc Zyngier  *	Remove a percpu interrupt handler. The handler is removed, but
245131d9d9b6SMarc Zyngier  *	the interrupt line is not disabled. This must be done on each
245231d9d9b6SMarc Zyngier  *	CPU before calling this function. The function does not return
245331d9d9b6SMarc Zyngier  *	until any executing interrupts for this IRQ have completed.
245431d9d9b6SMarc Zyngier  *
245531d9d9b6SMarc Zyngier  *	This function must not be called from interrupt context.
245631d9d9b6SMarc Zyngier  */
245731d9d9b6SMarc Zyngier void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
245831d9d9b6SMarc Zyngier {
245931d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
246031d9d9b6SMarc Zyngier 
246131d9d9b6SMarc Zyngier 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
246231d9d9b6SMarc Zyngier 		return;
246331d9d9b6SMarc Zyngier 
246431d9d9b6SMarc Zyngier 	chip_bus_lock(desc);
246531d9d9b6SMarc Zyngier 	kfree(__free_percpu_irq(irq, dev_id));
246631d9d9b6SMarc Zyngier 	chip_bus_sync_unlock(desc);
246731d9d9b6SMarc Zyngier }
2468aec2e2adSMaxime Ripard EXPORT_SYMBOL_GPL(free_percpu_irq);
246931d9d9b6SMarc Zyngier 
24704b078c3fSJulien Thierry void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
24714b078c3fSJulien Thierry {
24724b078c3fSJulien Thierry 	struct irq_desc *desc = irq_to_desc(irq);
24734b078c3fSJulien Thierry 
24744b078c3fSJulien Thierry 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
24754b078c3fSJulien Thierry 		return;
24764b078c3fSJulien Thierry 
24774b078c3fSJulien Thierry 	if (WARN_ON(!(desc->istate & IRQS_NMI)))
24784b078c3fSJulien Thierry 		return;
24794b078c3fSJulien Thierry 
24804b078c3fSJulien Thierry 	kfree(__free_percpu_irq(irq, dev_id));
24814b078c3fSJulien Thierry }
24824b078c3fSJulien Thierry 
248331d9d9b6SMarc Zyngier /**
248431d9d9b6SMarc Zyngier  *	setup_percpu_irq - setup a per-cpu interrupt
248531d9d9b6SMarc Zyngier  *	@irq: Interrupt line to setup
248631d9d9b6SMarc Zyngier  *	@act: irqaction for the interrupt
248731d9d9b6SMarc Zyngier  *
248831d9d9b6SMarc Zyngier  * Used to statically setup per-cpu interrupts in the early boot process.
248931d9d9b6SMarc Zyngier  */
249031d9d9b6SMarc Zyngier int setup_percpu_irq(unsigned int irq, struct irqaction *act)
249131d9d9b6SMarc Zyngier {
249231d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
249331d9d9b6SMarc Zyngier 	int retval;
249431d9d9b6SMarc Zyngier 
249531d9d9b6SMarc Zyngier 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
249631d9d9b6SMarc Zyngier 		return -EINVAL;
2497be45beb2SJon Hunter 
2498be45beb2SJon Hunter 	retval = irq_chip_pm_get(&desc->irq_data);
2499be45beb2SJon Hunter 	if (retval < 0)
2500be45beb2SJon Hunter 		return retval;
2501be45beb2SJon Hunter 
250231d9d9b6SMarc Zyngier 	retval = __setup_irq(irq, desc, act);
250331d9d9b6SMarc Zyngier 
2504be45beb2SJon Hunter 	if (retval)
2505be45beb2SJon Hunter 		irq_chip_pm_put(&desc->irq_data);
2506be45beb2SJon Hunter 
250731d9d9b6SMarc Zyngier 	return retval;
250831d9d9b6SMarc Zyngier }
250931d9d9b6SMarc Zyngier 
251031d9d9b6SMarc Zyngier /**
2511c80081b9SDaniel Lezcano  *	__request_percpu_irq - allocate a percpu interrupt line
251231d9d9b6SMarc Zyngier  *	@irq: Interrupt line to allocate
251331d9d9b6SMarc Zyngier  *	@handler: Function to be called when the IRQ occurs.
2514c80081b9SDaniel Lezcano  *	@flags: Interrupt type flags (IRQF_TIMER only)
251531d9d9b6SMarc Zyngier  *	@devname: An ascii name for the claiming device
251631d9d9b6SMarc Zyngier  *	@dev_id: A percpu cookie passed back to the handler function
251731d9d9b6SMarc Zyngier  *
2518a1b7febdSMaxime Ripard  *	This call allocates interrupt resources and enables the
2519a1b7febdSMaxime Ripard  *	interrupt on the local CPU. If the interrupt is supposed to be
2520a1b7febdSMaxime Ripard  *	enabled on other CPUs, it has to be done on each CPU using
2521a1b7febdSMaxime Ripard  *	enable_percpu_irq().
252231d9d9b6SMarc Zyngier  *
252331d9d9b6SMarc Zyngier  *	Dev_id must be globally unique. It is a per-cpu variable, and
252431d9d9b6SMarc Zyngier  *	the handler gets called with the interrupted CPU's instance of
252531d9d9b6SMarc Zyngier  *	that variable.
252631d9d9b6SMarc Zyngier  */
2527c80081b9SDaniel Lezcano int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2528c80081b9SDaniel Lezcano 			 unsigned long flags, const char *devname,
2529c80081b9SDaniel Lezcano 			 void __percpu *dev_id)
253031d9d9b6SMarc Zyngier {
253131d9d9b6SMarc Zyngier 	struct irqaction *action;
253231d9d9b6SMarc Zyngier 	struct irq_desc *desc;
253331d9d9b6SMarc Zyngier 	int retval;
253431d9d9b6SMarc Zyngier 
253531d9d9b6SMarc Zyngier 	if (!dev_id)
253631d9d9b6SMarc Zyngier 		return -EINVAL;
253731d9d9b6SMarc Zyngier 
253831d9d9b6SMarc Zyngier 	desc = irq_to_desc(irq);
253931d9d9b6SMarc Zyngier 	if (!desc || !irq_settings_can_request(desc) ||
254031d9d9b6SMarc Zyngier 	    !irq_settings_is_per_cpu_devid(desc))
254131d9d9b6SMarc Zyngier 		return -EINVAL;
254231d9d9b6SMarc Zyngier 
2543c80081b9SDaniel Lezcano 	if (flags && flags != IRQF_TIMER)
2544c80081b9SDaniel Lezcano 		return -EINVAL;
2545c80081b9SDaniel Lezcano 
254631d9d9b6SMarc Zyngier 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
254731d9d9b6SMarc Zyngier 	if (!action)
254831d9d9b6SMarc Zyngier 		return -ENOMEM;
254931d9d9b6SMarc Zyngier 
255031d9d9b6SMarc Zyngier 	action->handler = handler;
2551c80081b9SDaniel Lezcano 	action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
255231d9d9b6SMarc Zyngier 	action->name = devname;
255331d9d9b6SMarc Zyngier 	action->percpu_dev_id = dev_id;
255431d9d9b6SMarc Zyngier 
2555be45beb2SJon Hunter 	retval = irq_chip_pm_get(&desc->irq_data);
25564396f46cSShawn Lin 	if (retval < 0) {
25574396f46cSShawn Lin 		kfree(action);
2558be45beb2SJon Hunter 		return retval;
25594396f46cSShawn Lin 	}
2560be45beb2SJon Hunter 
256131d9d9b6SMarc Zyngier 	retval = __setup_irq(irq, desc, action);
256231d9d9b6SMarc Zyngier 
2563be45beb2SJon Hunter 	if (retval) {
2564be45beb2SJon Hunter 		irq_chip_pm_put(&desc->irq_data);
256531d9d9b6SMarc Zyngier 		kfree(action);
2566be45beb2SJon Hunter 	}
256731d9d9b6SMarc Zyngier 
256831d9d9b6SMarc Zyngier 	return retval;
256931d9d9b6SMarc Zyngier }
2570c80081b9SDaniel Lezcano EXPORT_SYMBOL_GPL(__request_percpu_irq);
25711b7047edSMarc Zyngier 
25721b7047edSMarc Zyngier /**
25734b078c3fSJulien Thierry  *	request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
25744b078c3fSJulien Thierry  *	@irq: Interrupt line to allocate
25754b078c3fSJulien Thierry  *	@handler: Function to be called when the IRQ occurs.
25764b078c3fSJulien Thierry  *	@name: An ascii name for the claiming device
25774b078c3fSJulien Thierry  *	@dev_id: A percpu cookie passed back to the handler function
25784b078c3fSJulien Thierry  *
25794b078c3fSJulien Thierry  *	This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2580a5186694SJulien Thierry  *	have to be setup on each CPU by calling prepare_percpu_nmi() before
2581a5186694SJulien Thierry  *	being enabled on the same CPU by using enable_percpu_nmi().
25824b078c3fSJulien Thierry  *
25834b078c3fSJulien Thierry  *	Dev_id must be globally unique. It is a per-cpu variable, and
25844b078c3fSJulien Thierry  *	the handler gets called with the interrupted CPU's instance of
25854b078c3fSJulien Thierry  *	that variable.
25864b078c3fSJulien Thierry  *
25874b078c3fSJulien Thierry  *	Interrupt lines requested for NMI delivering should have auto enabling
25884b078c3fSJulien Thierry  *	setting disabled.
25894b078c3fSJulien Thierry  *
25904b078c3fSJulien Thierry  *	If the interrupt line cannot be used to deliver NMIs, function
25914b078c3fSJulien Thierry  *	will fail returning a negative value.
25924b078c3fSJulien Thierry  */
25934b078c3fSJulien Thierry int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
25944b078c3fSJulien Thierry 		       const char *name, void __percpu *dev_id)
25954b078c3fSJulien Thierry {
25964b078c3fSJulien Thierry 	struct irqaction *action;
25974b078c3fSJulien Thierry 	struct irq_desc *desc;
25984b078c3fSJulien Thierry 	unsigned long flags;
25994b078c3fSJulien Thierry 	int retval;
26004b078c3fSJulien Thierry 
26014b078c3fSJulien Thierry 	if (!handler)
26024b078c3fSJulien Thierry 		return -EINVAL;
26034b078c3fSJulien Thierry 
26044b078c3fSJulien Thierry 	desc = irq_to_desc(irq);
26054b078c3fSJulien Thierry 
26064b078c3fSJulien Thierry 	if (!desc || !irq_settings_can_request(desc) ||
26074b078c3fSJulien Thierry 	    !irq_settings_is_per_cpu_devid(desc) ||
26084b078c3fSJulien Thierry 	    irq_settings_can_autoenable(desc) ||
26094b078c3fSJulien Thierry 	    !irq_supports_nmi(desc))
26104b078c3fSJulien Thierry 		return -EINVAL;
26114b078c3fSJulien Thierry 
26124b078c3fSJulien Thierry 	/* The line cannot already be NMI */
26134b078c3fSJulien Thierry 	if (desc->istate & IRQS_NMI)
26144b078c3fSJulien Thierry 		return -EINVAL;
26154b078c3fSJulien Thierry 
26164b078c3fSJulien Thierry 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
26174b078c3fSJulien Thierry 	if (!action)
26184b078c3fSJulien Thierry 		return -ENOMEM;
26194b078c3fSJulien Thierry 
26204b078c3fSJulien Thierry 	action->handler = handler;
26214b078c3fSJulien Thierry 	action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
26224b078c3fSJulien Thierry 		| IRQF_NOBALANCING;
26234b078c3fSJulien Thierry 	action->name = name;
26244b078c3fSJulien Thierry 	action->percpu_dev_id = dev_id;
26254b078c3fSJulien Thierry 
26264b078c3fSJulien Thierry 	retval = irq_chip_pm_get(&desc->irq_data);
26274b078c3fSJulien Thierry 	if (retval < 0)
26284b078c3fSJulien Thierry 		goto err_out;
26294b078c3fSJulien Thierry 
26304b078c3fSJulien Thierry 	retval = __setup_irq(irq, desc, action);
26314b078c3fSJulien Thierry 	if (retval)
26324b078c3fSJulien Thierry 		goto err_irq_setup;
26334b078c3fSJulien Thierry 
26344b078c3fSJulien Thierry 	raw_spin_lock_irqsave(&desc->lock, flags);
26354b078c3fSJulien Thierry 	desc->istate |= IRQS_NMI;
26364b078c3fSJulien Thierry 	raw_spin_unlock_irqrestore(&desc->lock, flags);
26374b078c3fSJulien Thierry 
26384b078c3fSJulien Thierry 	return 0;
26394b078c3fSJulien Thierry 
26404b078c3fSJulien Thierry err_irq_setup:
26414b078c3fSJulien Thierry 	irq_chip_pm_put(&desc->irq_data);
26424b078c3fSJulien Thierry err_out:
26434b078c3fSJulien Thierry 	kfree(action);
26444b078c3fSJulien Thierry 
26454b078c3fSJulien Thierry 	return retval;
26464b078c3fSJulien Thierry }
26474b078c3fSJulien Thierry 
26484b078c3fSJulien Thierry /**
26494b078c3fSJulien Thierry  *	prepare_percpu_nmi - performs CPU local setup for NMI delivery
26504b078c3fSJulien Thierry  *	@irq: Interrupt line to prepare for NMI delivery
26514b078c3fSJulien Thierry  *
26524b078c3fSJulien Thierry  *	This call prepares an interrupt line to deliver NMI on the current CPU,
26534b078c3fSJulien Thierry  *	before that interrupt line gets enabled with enable_percpu_nmi().
26544b078c3fSJulien Thierry  *
26554b078c3fSJulien Thierry  *	As a CPU local operation, this should be called from non-preemptible
26564b078c3fSJulien Thierry  *	context.
26574b078c3fSJulien Thierry  *
26584b078c3fSJulien Thierry  *	If the interrupt line cannot be used to deliver NMIs, function
26594b078c3fSJulien Thierry  *	will fail returning a negative value.
26604b078c3fSJulien Thierry  */
26614b078c3fSJulien Thierry int prepare_percpu_nmi(unsigned int irq)
26624b078c3fSJulien Thierry {
26634b078c3fSJulien Thierry 	unsigned long flags;
26644b078c3fSJulien Thierry 	struct irq_desc *desc;
26654b078c3fSJulien Thierry 	int ret = 0;
26664b078c3fSJulien Thierry 
26674b078c3fSJulien Thierry 	WARN_ON(preemptible());
26684b078c3fSJulien Thierry 
26694b078c3fSJulien Thierry 	desc = irq_get_desc_lock(irq, &flags,
26704b078c3fSJulien Thierry 				 IRQ_GET_DESC_CHECK_PERCPU);
26714b078c3fSJulien Thierry 	if (!desc)
26724b078c3fSJulien Thierry 		return -EINVAL;
26734b078c3fSJulien Thierry 
26744b078c3fSJulien Thierry 	if (WARN(!(desc->istate & IRQS_NMI),
26754b078c3fSJulien Thierry 		 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
26764b078c3fSJulien Thierry 		 irq)) {
26774b078c3fSJulien Thierry 		ret = -EINVAL;
26784b078c3fSJulien Thierry 		goto out;
26794b078c3fSJulien Thierry 	}
26804b078c3fSJulien Thierry 
26814b078c3fSJulien Thierry 	ret = irq_nmi_setup(desc);
26824b078c3fSJulien Thierry 	if (ret) {
26834b078c3fSJulien Thierry 		pr_err("Failed to setup NMI delivery: irq %u\n", irq);
26844b078c3fSJulien Thierry 		goto out;
26854b078c3fSJulien Thierry 	}
26864b078c3fSJulien Thierry 
26874b078c3fSJulien Thierry out:
26884b078c3fSJulien Thierry 	irq_put_desc_unlock(desc, flags);
26894b078c3fSJulien Thierry 	return ret;
26904b078c3fSJulien Thierry }
26914b078c3fSJulien Thierry 
26924b078c3fSJulien Thierry /**
26934b078c3fSJulien Thierry  *	teardown_percpu_nmi - undoes NMI setup of IRQ line
26944b078c3fSJulien Thierry  *	@irq: Interrupt line from which CPU local NMI configuration should be
26954b078c3fSJulien Thierry  *	      removed
26964b078c3fSJulien Thierry  *
26974b078c3fSJulien Thierry  *	This call undoes the setup done by prepare_percpu_nmi().
26984b078c3fSJulien Thierry  *
26994b078c3fSJulien Thierry  *	IRQ line should not be enabled for the current CPU.
27004b078c3fSJulien Thierry  *
27014b078c3fSJulien Thierry  *	As a CPU local operation, this should be called from non-preemptible
27024b078c3fSJulien Thierry  *	context.
27034b078c3fSJulien Thierry  */
27044b078c3fSJulien Thierry void teardown_percpu_nmi(unsigned int irq)
27054b078c3fSJulien Thierry {
27064b078c3fSJulien Thierry 	unsigned long flags;
27074b078c3fSJulien Thierry 	struct irq_desc *desc;
27084b078c3fSJulien Thierry 
27094b078c3fSJulien Thierry 	WARN_ON(preemptible());
27104b078c3fSJulien Thierry 
27114b078c3fSJulien Thierry 	desc = irq_get_desc_lock(irq, &flags,
27124b078c3fSJulien Thierry 				 IRQ_GET_DESC_CHECK_PERCPU);
27134b078c3fSJulien Thierry 	if (!desc)
27144b078c3fSJulien Thierry 		return;
27154b078c3fSJulien Thierry 
27164b078c3fSJulien Thierry 	if (WARN_ON(!(desc->istate & IRQS_NMI)))
27174b078c3fSJulien Thierry 		goto out;
27184b078c3fSJulien Thierry 
27194b078c3fSJulien Thierry 	irq_nmi_teardown(desc);
27204b078c3fSJulien Thierry out:
27214b078c3fSJulien Thierry 	irq_put_desc_unlock(desc, flags);
27224b078c3fSJulien Thierry }
27234b078c3fSJulien Thierry 
272462e04686SThomas Gleixner int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
272562e04686SThomas Gleixner 			    bool *state)
272662e04686SThomas Gleixner {
272762e04686SThomas Gleixner 	struct irq_chip *chip;
272862e04686SThomas Gleixner 	int err = -EINVAL;
272962e04686SThomas Gleixner 
273062e04686SThomas Gleixner 	do {
273162e04686SThomas Gleixner 		chip = irq_data_get_irq_chip(data);
27321d0326f3SMarek Vasut 		if (WARN_ON_ONCE(!chip))
27331d0326f3SMarek Vasut 			return -ENODEV;
273462e04686SThomas Gleixner 		if (chip->irq_get_irqchip_state)
273562e04686SThomas Gleixner 			break;
273662e04686SThomas Gleixner #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
273762e04686SThomas Gleixner 		data = data->parent_data;
273862e04686SThomas Gleixner #else
273962e04686SThomas Gleixner 		data = NULL;
274062e04686SThomas Gleixner #endif
274162e04686SThomas Gleixner 	} while (data);
274262e04686SThomas Gleixner 
274362e04686SThomas Gleixner 	if (data)
274462e04686SThomas Gleixner 		err = chip->irq_get_irqchip_state(data, which, state);
274562e04686SThomas Gleixner 	return err;
274662e04686SThomas Gleixner }
274762e04686SThomas Gleixner 
27484b078c3fSJulien Thierry /**
27491b7047edSMarc Zyngier  *	irq_get_irqchip_state - returns the irqchip state of a interrupt.
27501b7047edSMarc Zyngier  *	@irq: Interrupt line that is forwarded to a VM
27511b7047edSMarc Zyngier  *	@which: One of IRQCHIP_STATE_* the caller wants to know about
2752*5c982c58SKrzysztof Kozlowski  *	@state: a pointer to a boolean where the state is to be stored
27531b7047edSMarc Zyngier  *
27541b7047edSMarc Zyngier  *	This call snapshots the internal irqchip state of an
27551b7047edSMarc Zyngier  *	interrupt, returning into @state the bit corresponding to
27561b7047edSMarc Zyngier  *	stage @which
27571b7047edSMarc Zyngier  *
27581b7047edSMarc Zyngier  *	This function should be called with preemption disabled if the
27591b7047edSMarc Zyngier  *	interrupt controller has per-cpu registers.
27601b7047edSMarc Zyngier  */
27611b7047edSMarc Zyngier int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
27621b7047edSMarc Zyngier 			  bool *state)
27631b7047edSMarc Zyngier {
27641b7047edSMarc Zyngier 	struct irq_desc *desc;
27651b7047edSMarc Zyngier 	struct irq_data *data;
27661b7047edSMarc Zyngier 	unsigned long flags;
27671b7047edSMarc Zyngier 	int err = -EINVAL;
27681b7047edSMarc Zyngier 
27691b7047edSMarc Zyngier 	desc = irq_get_desc_buslock(irq, &flags, 0);
27701b7047edSMarc Zyngier 	if (!desc)
27711b7047edSMarc Zyngier 		return err;
27721b7047edSMarc Zyngier 
27731b7047edSMarc Zyngier 	data = irq_desc_get_irq_data(desc);
27741b7047edSMarc Zyngier 
277562e04686SThomas Gleixner 	err = __irq_get_irqchip_state(data, which, state);
27761b7047edSMarc Zyngier 
27771b7047edSMarc Zyngier 	irq_put_desc_busunlock(desc, flags);
27781b7047edSMarc Zyngier 	return err;
27791b7047edSMarc Zyngier }
27801ee4fb3eSBjorn Andersson EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
27811b7047edSMarc Zyngier 
27821b7047edSMarc Zyngier /**
27831b7047edSMarc Zyngier  *	irq_set_irqchip_state - set the state of a forwarded interrupt.
27841b7047edSMarc Zyngier  *	@irq: Interrupt line that is forwarded to a VM
27851b7047edSMarc Zyngier  *	@which: State to be restored (one of IRQCHIP_STATE_*)
27861b7047edSMarc Zyngier  *	@val: Value corresponding to @which
27871b7047edSMarc Zyngier  *
27881b7047edSMarc Zyngier  *	This call sets the internal irqchip state of an interrupt,
27891b7047edSMarc Zyngier  *	depending on the value of @which.
27901b7047edSMarc Zyngier  *
27911b7047edSMarc Zyngier  *	This function should be called with preemption disabled if the
27921b7047edSMarc Zyngier  *	interrupt controller has per-cpu registers.
27931b7047edSMarc Zyngier  */
27941b7047edSMarc Zyngier int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
27951b7047edSMarc Zyngier 			  bool val)
27961b7047edSMarc Zyngier {
27971b7047edSMarc Zyngier 	struct irq_desc *desc;
27981b7047edSMarc Zyngier 	struct irq_data *data;
27991b7047edSMarc Zyngier 	struct irq_chip *chip;
28001b7047edSMarc Zyngier 	unsigned long flags;
28011b7047edSMarc Zyngier 	int err = -EINVAL;
28021b7047edSMarc Zyngier 
28031b7047edSMarc Zyngier 	desc = irq_get_desc_buslock(irq, &flags, 0);
28041b7047edSMarc Zyngier 	if (!desc)
28051b7047edSMarc Zyngier 		return err;
28061b7047edSMarc Zyngier 
28071b7047edSMarc Zyngier 	data = irq_desc_get_irq_data(desc);
28081b7047edSMarc Zyngier 
28091b7047edSMarc Zyngier 	do {
28101b7047edSMarc Zyngier 		chip = irq_data_get_irq_chip(data);
2811f107cee9SGuenter Roeck 		if (WARN_ON_ONCE(!chip)) {
2812f107cee9SGuenter Roeck 			err = -ENODEV;
2813f107cee9SGuenter Roeck 			goto out_unlock;
2814f107cee9SGuenter Roeck 		}
28151b7047edSMarc Zyngier 		if (chip->irq_set_irqchip_state)
28161b7047edSMarc Zyngier 			break;
28171b7047edSMarc Zyngier #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
28181b7047edSMarc Zyngier 		data = data->parent_data;
28191b7047edSMarc Zyngier #else
28201b7047edSMarc Zyngier 		data = NULL;
28211b7047edSMarc Zyngier #endif
28221b7047edSMarc Zyngier 	} while (data);
28231b7047edSMarc Zyngier 
28241b7047edSMarc Zyngier 	if (data)
28251b7047edSMarc Zyngier 		err = chip->irq_set_irqchip_state(data, which, val);
28261b7047edSMarc Zyngier 
2827f107cee9SGuenter Roeck out_unlock:
28281b7047edSMarc Zyngier 	irq_put_desc_busunlock(desc, flags);
28291b7047edSMarc Zyngier 	return err;
28301b7047edSMarc Zyngier }
28311ee4fb3eSBjorn Andersson EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2832a313357eSThomas Gleixner 
2833a313357eSThomas Gleixner /**
2834a313357eSThomas Gleixner  * irq_has_action - Check whether an interrupt is requested
2835a313357eSThomas Gleixner  * @irq:	The linux irq number
2836a313357eSThomas Gleixner  *
2837a313357eSThomas Gleixner  * Returns: A snapshot of the current state
2838a313357eSThomas Gleixner  */
2839a313357eSThomas Gleixner bool irq_has_action(unsigned int irq)
2840a313357eSThomas Gleixner {
2841a313357eSThomas Gleixner 	bool res;
2842a313357eSThomas Gleixner 
2843a313357eSThomas Gleixner 	rcu_read_lock();
2844a313357eSThomas Gleixner 	res = irq_desc_has_action(irq_to_desc(irq));
2845a313357eSThomas Gleixner 	rcu_read_unlock();
2846a313357eSThomas Gleixner 	return res;
2847a313357eSThomas Gleixner }
2848a313357eSThomas Gleixner EXPORT_SYMBOL_GPL(irq_has_action);
2849fdd02963SThomas Gleixner 
2850fdd02963SThomas Gleixner /**
2851fdd02963SThomas Gleixner  * irq_check_status_bit - Check whether bits in the irq descriptor status are set
2852fdd02963SThomas Gleixner  * @irq:	The linux irq number
2853fdd02963SThomas Gleixner  * @bitmask:	The bitmask to evaluate
2854fdd02963SThomas Gleixner  *
2855fdd02963SThomas Gleixner  * Returns: True if one of the bits in @bitmask is set
2856fdd02963SThomas Gleixner  */
2857fdd02963SThomas Gleixner bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
2858fdd02963SThomas Gleixner {
2859fdd02963SThomas Gleixner 	struct irq_desc *desc;
2860fdd02963SThomas Gleixner 	bool res = false;
2861fdd02963SThomas Gleixner 
2862fdd02963SThomas Gleixner 	rcu_read_lock();
2863fdd02963SThomas Gleixner 	desc = irq_to_desc(irq);
2864fdd02963SThomas Gleixner 	if (desc)
2865fdd02963SThomas Gleixner 		res = !!(desc->status_use_accessors & bitmask);
2866fdd02963SThomas Gleixner 	rcu_read_unlock();
2867fdd02963SThomas Gleixner 	return res;
2868fdd02963SThomas Gleixner }
2869ce09ccc5SThomas Gleixner EXPORT_SYMBOL_GPL(irq_check_status_bit);
2870