xref: /openbmc/linux/kernel/irq/manage.c (revision 7a40798c714ff462863352d490b382515daba49e)
152a65ff5SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
3a34db9b2SIngo Molnar  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4a34db9b2SIngo Molnar  * Copyright (C) 2005-2006 Thomas Gleixner
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  * This file contains driver APIs to the irq subsystem.
71da177e4SLinus Torvalds  */
81da177e4SLinus Torvalds 
997fd75b7SAndrew Morton #define pr_fmt(fmt) "genirq: " fmt
1097fd75b7SAndrew Morton 
111da177e4SLinus Torvalds #include <linux/irq.h>
123aa551c9SThomas Gleixner #include <linux/kthread.h>
131da177e4SLinus Torvalds #include <linux/module.h>
141da177e4SLinus Torvalds #include <linux/random.h>
151da177e4SLinus Torvalds #include <linux/interrupt.h>
164001d8e8SThomas Gleixner #include <linux/irqdomain.h>
171aeb272cSRobert P. J. Day #include <linux/slab.h>
183aa551c9SThomas Gleixner #include <linux/sched.h>
198bd75c77SClark Williams #include <linux/sched/rt.h>
200881e7bdSIngo Molnar #include <linux/sched/task.h>
2111ea68f5SMing Lei #include <linux/sched/isolation.h>
22ae7e81c0SIngo Molnar #include <uapi/linux/sched/types.h>
234d1d61a6SOleg Nesterov #include <linux/task_work.h>
241da177e4SLinus Torvalds 
251da177e4SLinus Torvalds #include "internals.h"
261da177e4SLinus Torvalds 
27b6a32bbdSThomas Gleixner #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
288d32a307SThomas Gleixner __read_mostly bool force_irqthreads;
2947b82e88SSebastian Andrzej Siewior EXPORT_SYMBOL_GPL(force_irqthreads);
308d32a307SThomas Gleixner 
318d32a307SThomas Gleixner static int __init setup_forced_irqthreads(char *arg)
328d32a307SThomas Gleixner {
338d32a307SThomas Gleixner 	force_irqthreads = true;
348d32a307SThomas Gleixner 	return 0;
358d32a307SThomas Gleixner }
368d32a307SThomas Gleixner early_param("threadirqs", setup_forced_irqthreads);
378d32a307SThomas Gleixner #endif
388d32a307SThomas Gleixner 
3962e04686SThomas Gleixner static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
401da177e4SLinus Torvalds {
4162e04686SThomas Gleixner 	struct irq_data *irqd = irq_desc_get_irq_data(desc);
4232f4125eSThomas Gleixner 	bool inprogress;
431da177e4SLinus Torvalds 
44a98ce5c6SHerbert Xu 	do {
45a98ce5c6SHerbert Xu 		unsigned long flags;
46a98ce5c6SHerbert Xu 
47a98ce5c6SHerbert Xu 		/*
48a98ce5c6SHerbert Xu 		 * Wait until we're out of the critical section.  This might
49a98ce5c6SHerbert Xu 		 * give the wrong answer due to the lack of memory barriers.
50a98ce5c6SHerbert Xu 		 */
5132f4125eSThomas Gleixner 		while (irqd_irq_inprogress(&desc->irq_data))
521da177e4SLinus Torvalds 			cpu_relax();
53a98ce5c6SHerbert Xu 
54a98ce5c6SHerbert Xu 		/* Ok, that indicated we're done: double-check carefully. */
55239007b8SThomas Gleixner 		raw_spin_lock_irqsave(&desc->lock, flags);
5632f4125eSThomas Gleixner 		inprogress = irqd_irq_inprogress(&desc->irq_data);
5762e04686SThomas Gleixner 
5862e04686SThomas Gleixner 		/*
5962e04686SThomas Gleixner 		 * If requested and supported, check at the chip whether it
6062e04686SThomas Gleixner 		 * is in flight at the hardware level, i.e. already pending
6162e04686SThomas Gleixner 		 * in a CPU and waiting for service and acknowledge.
6262e04686SThomas Gleixner 		 */
6362e04686SThomas Gleixner 		if (!inprogress && sync_chip) {
6462e04686SThomas Gleixner 			/*
6562e04686SThomas Gleixner 			 * Ignore the return code. inprogress is only updated
6662e04686SThomas Gleixner 			 * when the chip supports it.
6762e04686SThomas Gleixner 			 */
6862e04686SThomas Gleixner 			__irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
6962e04686SThomas Gleixner 						&inprogress);
7062e04686SThomas Gleixner 		}
71239007b8SThomas Gleixner 		raw_spin_unlock_irqrestore(&desc->lock, flags);
72a98ce5c6SHerbert Xu 
73a98ce5c6SHerbert Xu 		/* Oops, that failed? */
7432f4125eSThomas Gleixner 	} while (inprogress);
7518258f72SThomas Gleixner }
763aa551c9SThomas Gleixner 
7718258f72SThomas Gleixner /**
7818258f72SThomas Gleixner  *	synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
7918258f72SThomas Gleixner  *	@irq: interrupt number to wait for
8018258f72SThomas Gleixner  *
8118258f72SThomas Gleixner  *	This function waits for any pending hard IRQ handlers for this
8218258f72SThomas Gleixner  *	interrupt to complete before returning. If you use this
8318258f72SThomas Gleixner  *	function while holding a resource the IRQ handler may need you
8418258f72SThomas Gleixner  *	will deadlock. It does not take associated threaded handlers
8518258f72SThomas Gleixner  *	into account.
8618258f72SThomas Gleixner  *
8718258f72SThomas Gleixner  *	Do not use this for shutdown scenarios where you must be sure
8818258f72SThomas Gleixner  *	that all parts (hardirq and threaded handler) have completed.
8918258f72SThomas Gleixner  *
9002cea395SPeter Zijlstra  *	Returns: false if a threaded handler is active.
9102cea395SPeter Zijlstra  *
9218258f72SThomas Gleixner  *	This function may be called - with care - from IRQ context.
9362e04686SThomas Gleixner  *
9462e04686SThomas Gleixner  *	It does not check whether there is an interrupt in flight at the
9562e04686SThomas Gleixner  *	hardware level, but not serviced yet, as this might deadlock when
9662e04686SThomas Gleixner  *	called with interrupts disabled and the target CPU of the interrupt
9762e04686SThomas Gleixner  *	is the current CPU.
983aa551c9SThomas Gleixner  */
9902cea395SPeter Zijlstra bool synchronize_hardirq(unsigned int irq)
10018258f72SThomas Gleixner {
10118258f72SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
10218258f72SThomas Gleixner 
10302cea395SPeter Zijlstra 	if (desc) {
10462e04686SThomas Gleixner 		__synchronize_hardirq(desc, false);
10502cea395SPeter Zijlstra 		return !atomic_read(&desc->threads_active);
10602cea395SPeter Zijlstra 	}
10702cea395SPeter Zijlstra 
10802cea395SPeter Zijlstra 	return true;
10918258f72SThomas Gleixner }
11018258f72SThomas Gleixner EXPORT_SYMBOL(synchronize_hardirq);
11118258f72SThomas Gleixner 
11218258f72SThomas Gleixner /**
11318258f72SThomas Gleixner  *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
11418258f72SThomas Gleixner  *	@irq: interrupt number to wait for
11518258f72SThomas Gleixner  *
11618258f72SThomas Gleixner  *	This function waits for any pending IRQ handlers for this interrupt
11718258f72SThomas Gleixner  *	to complete before returning. If you use this function while
11818258f72SThomas Gleixner  *	holding a resource the IRQ handler may need you will deadlock.
11918258f72SThomas Gleixner  *
1201d21f2afSThomas Gleixner  *	Can only be called from preemptible code as it might sleep when
1211d21f2afSThomas Gleixner  *	an interrupt thread is associated to @irq.
12262e04686SThomas Gleixner  *
12362e04686SThomas Gleixner  *	It optionally makes sure (when the irq chip supports that method)
12462e04686SThomas Gleixner  *	that the interrupt is not pending in any CPU and waiting for
12562e04686SThomas Gleixner  *	service.
12618258f72SThomas Gleixner  */
12718258f72SThomas Gleixner void synchronize_irq(unsigned int irq)
12818258f72SThomas Gleixner {
12918258f72SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
13018258f72SThomas Gleixner 
13118258f72SThomas Gleixner 	if (desc) {
13262e04686SThomas Gleixner 		__synchronize_hardirq(desc, true);
13318258f72SThomas Gleixner 		/*
13418258f72SThomas Gleixner 		 * We made sure that no hardirq handler is
13518258f72SThomas Gleixner 		 * running. Now verify that no threaded handlers are
13618258f72SThomas Gleixner 		 * active.
13718258f72SThomas Gleixner 		 */
13818258f72SThomas Gleixner 		wait_event(desc->wait_for_threads,
13918258f72SThomas Gleixner 			   !atomic_read(&desc->threads_active));
14018258f72SThomas Gleixner 	}
1411da177e4SLinus Torvalds }
1421da177e4SLinus Torvalds EXPORT_SYMBOL(synchronize_irq);
1431da177e4SLinus Torvalds 
1443aa551c9SThomas Gleixner #ifdef CONFIG_SMP
1453aa551c9SThomas Gleixner cpumask_var_t irq_default_affinity;
1463aa551c9SThomas Gleixner 
1479c255583SThomas Gleixner static bool __irq_can_set_affinity(struct irq_desc *desc)
148e019c249SJiang Liu {
149e019c249SJiang Liu 	if (!desc || !irqd_can_balance(&desc->irq_data) ||
150e019c249SJiang Liu 	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
1519c255583SThomas Gleixner 		return false;
1529c255583SThomas Gleixner 	return true;
153e019c249SJiang Liu }
154e019c249SJiang Liu 
155771ee3b0SThomas Gleixner /**
156771ee3b0SThomas Gleixner  *	irq_can_set_affinity - Check if the affinity of a given irq can be set
157771ee3b0SThomas Gleixner  *	@irq:		Interrupt to check
158771ee3b0SThomas Gleixner  *
159771ee3b0SThomas Gleixner  */
160771ee3b0SThomas Gleixner int irq_can_set_affinity(unsigned int irq)
161771ee3b0SThomas Gleixner {
162e019c249SJiang Liu 	return __irq_can_set_affinity(irq_to_desc(irq));
163771ee3b0SThomas Gleixner }
164771ee3b0SThomas Gleixner 
165591d2fb0SThomas Gleixner /**
1669c255583SThomas Gleixner  * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
1679c255583SThomas Gleixner  * @irq:	Interrupt to check
1689c255583SThomas Gleixner  *
1699c255583SThomas Gleixner  * Like irq_can_set_affinity() above, but additionally checks for the
1709c255583SThomas Gleixner  * AFFINITY_MANAGED flag.
1719c255583SThomas Gleixner  */
1729c255583SThomas Gleixner bool irq_can_set_affinity_usr(unsigned int irq)
1739c255583SThomas Gleixner {
1749c255583SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
1759c255583SThomas Gleixner 
1769c255583SThomas Gleixner 	return __irq_can_set_affinity(desc) &&
1779c255583SThomas Gleixner 		!irqd_affinity_is_managed(&desc->irq_data);
1789c255583SThomas Gleixner }
1799c255583SThomas Gleixner 
1809c255583SThomas Gleixner /**
181591d2fb0SThomas Gleixner  *	irq_set_thread_affinity - Notify irq threads to adjust affinity
182591d2fb0SThomas Gleixner  *	@desc:		irq descriptor which has affitnity changed
183591d2fb0SThomas Gleixner  *
184591d2fb0SThomas Gleixner  *	We just set IRQTF_AFFINITY and delegate the affinity setting
185591d2fb0SThomas Gleixner  *	to the interrupt thread itself. We can not call
186591d2fb0SThomas Gleixner  *	set_cpus_allowed_ptr() here as we hold desc->lock and this
187591d2fb0SThomas Gleixner  *	code can be called from hard interrupt context.
188591d2fb0SThomas Gleixner  */
189591d2fb0SThomas Gleixner void irq_set_thread_affinity(struct irq_desc *desc)
1903aa551c9SThomas Gleixner {
191f944b5a7SDaniel Lezcano 	struct irqaction *action;
1923aa551c9SThomas Gleixner 
193f944b5a7SDaniel Lezcano 	for_each_action_of_desc(desc, action)
1943aa551c9SThomas Gleixner 		if (action->thread)
195591d2fb0SThomas Gleixner 			set_bit(IRQTF_AFFINITY, &action->thread_flags);
1963aa551c9SThomas Gleixner }
1973aa551c9SThomas Gleixner 
19819e1d4e9SThomas Gleixner static void irq_validate_effective_affinity(struct irq_data *data)
19919e1d4e9SThomas Gleixner {
20019e1d4e9SThomas Gleixner #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
20119e1d4e9SThomas Gleixner 	const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
20219e1d4e9SThomas Gleixner 	struct irq_chip *chip = irq_data_get_irq_chip(data);
20319e1d4e9SThomas Gleixner 
20419e1d4e9SThomas Gleixner 	if (!cpumask_empty(m))
20519e1d4e9SThomas Gleixner 		return;
20619e1d4e9SThomas Gleixner 	pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
20719e1d4e9SThomas Gleixner 		     chip->name, data->irq);
20819e1d4e9SThomas Gleixner #endif
20919e1d4e9SThomas Gleixner }
21019e1d4e9SThomas Gleixner 
211818b0f3bSJiang Liu int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
212818b0f3bSJiang Liu 			bool force)
213818b0f3bSJiang Liu {
214818b0f3bSJiang Liu 	struct irq_desc *desc = irq_data_to_desc(data);
215818b0f3bSJiang Liu 	struct irq_chip *chip = irq_data_get_irq_chip(data);
216818b0f3bSJiang Liu 	int ret;
217818b0f3bSJiang Liu 
218e43b3b58SThomas Gleixner 	if (!chip || !chip->irq_set_affinity)
219e43b3b58SThomas Gleixner 		return -EINVAL;
220e43b3b58SThomas Gleixner 
22111ea68f5SMing Lei 	/*
22211ea68f5SMing Lei 	 * If this is a managed interrupt and housekeeping is enabled on
22311ea68f5SMing Lei 	 * it check whether the requested affinity mask intersects with
22411ea68f5SMing Lei 	 * a housekeeping CPU. If so, then remove the isolated CPUs from
22511ea68f5SMing Lei 	 * the mask and just keep the housekeeping CPU(s). This prevents
22611ea68f5SMing Lei 	 * the affinity setter from routing the interrupt to an isolated
22711ea68f5SMing Lei 	 * CPU to avoid that I/O submitted from a housekeeping CPU causes
22811ea68f5SMing Lei 	 * interrupts on an isolated one.
22911ea68f5SMing Lei 	 *
23011ea68f5SMing Lei 	 * If the masks do not intersect or include online CPU(s) then
23111ea68f5SMing Lei 	 * keep the requested mask. The isolated target CPUs are only
23211ea68f5SMing Lei 	 * receiving interrupts when the I/O operation was submitted
23311ea68f5SMing Lei 	 * directly from them.
23411ea68f5SMing Lei 	 *
23511ea68f5SMing Lei 	 * If all housekeeping CPUs in the affinity mask are offline, the
23611ea68f5SMing Lei 	 * interrupt will be migrated by the CPU hotplug code once a
23711ea68f5SMing Lei 	 * housekeeping CPU which belongs to the affinity mask comes
23811ea68f5SMing Lei 	 * online.
23911ea68f5SMing Lei 	 */
24011ea68f5SMing Lei 	if (irqd_affinity_is_managed(data) &&
24111ea68f5SMing Lei 	    housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) {
24211ea68f5SMing Lei 		const struct cpumask *hk_mask, *prog_mask;
24311ea68f5SMing Lei 
24411ea68f5SMing Lei 		static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
24511ea68f5SMing Lei 		static struct cpumask tmp_mask;
24611ea68f5SMing Lei 
24711ea68f5SMing Lei 		hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
24811ea68f5SMing Lei 
24911ea68f5SMing Lei 		raw_spin_lock(&tmp_mask_lock);
25011ea68f5SMing Lei 		cpumask_and(&tmp_mask, mask, hk_mask);
25111ea68f5SMing Lei 		if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
25211ea68f5SMing Lei 			prog_mask = mask;
25311ea68f5SMing Lei 		else
25411ea68f5SMing Lei 			prog_mask = &tmp_mask;
25511ea68f5SMing Lei 		ret = chip->irq_set_affinity(data, prog_mask, force);
25611ea68f5SMing Lei 		raw_spin_unlock(&tmp_mask_lock);
25711ea68f5SMing Lei 	} else {
25801f8fa4fSThomas Gleixner 		ret = chip->irq_set_affinity(data, mask, force);
25911ea68f5SMing Lei 	}
260818b0f3bSJiang Liu 	switch (ret) {
261818b0f3bSJiang Liu 	case IRQ_SET_MASK_OK:
2622cb62547SJiang Liu 	case IRQ_SET_MASK_OK_DONE:
2639df872faSJiang Liu 		cpumask_copy(desc->irq_common_data.affinity, mask);
26493417a3fSGustavo A. R. Silva 		/* fall through */
265818b0f3bSJiang Liu 	case IRQ_SET_MASK_OK_NOCOPY:
26619e1d4e9SThomas Gleixner 		irq_validate_effective_affinity(data);
267818b0f3bSJiang Liu 		irq_set_thread_affinity(desc);
268818b0f3bSJiang Liu 		ret = 0;
269818b0f3bSJiang Liu 	}
270818b0f3bSJiang Liu 
271818b0f3bSJiang Liu 	return ret;
272818b0f3bSJiang Liu }
273818b0f3bSJiang Liu 
27412f47073SThomas Gleixner #ifdef CONFIG_GENERIC_PENDING_IRQ
27512f47073SThomas Gleixner static inline int irq_set_affinity_pending(struct irq_data *data,
27612f47073SThomas Gleixner 					   const struct cpumask *dest)
27712f47073SThomas Gleixner {
27812f47073SThomas Gleixner 	struct irq_desc *desc = irq_data_to_desc(data);
27912f47073SThomas Gleixner 
28012f47073SThomas Gleixner 	irqd_set_move_pending(data);
28112f47073SThomas Gleixner 	irq_copy_pending(desc, dest);
28212f47073SThomas Gleixner 	return 0;
28312f47073SThomas Gleixner }
28412f47073SThomas Gleixner #else
28512f47073SThomas Gleixner static inline int irq_set_affinity_pending(struct irq_data *data,
28612f47073SThomas Gleixner 					   const struct cpumask *dest)
28712f47073SThomas Gleixner {
28812f47073SThomas Gleixner 	return -EBUSY;
28912f47073SThomas Gleixner }
29012f47073SThomas Gleixner #endif
29112f47073SThomas Gleixner 
29212f47073SThomas Gleixner static int irq_try_set_affinity(struct irq_data *data,
29312f47073SThomas Gleixner 				const struct cpumask *dest, bool force)
29412f47073SThomas Gleixner {
29512f47073SThomas Gleixner 	int ret = irq_do_set_affinity(data, dest, force);
29612f47073SThomas Gleixner 
29712f47073SThomas Gleixner 	/*
29812f47073SThomas Gleixner 	 * In case that the underlying vector management is busy and the
29912f47073SThomas Gleixner 	 * architecture supports the generic pending mechanism then utilize
30012f47073SThomas Gleixner 	 * this to avoid returning an error to user space.
30112f47073SThomas Gleixner 	 */
30212f47073SThomas Gleixner 	if (ret == -EBUSY && !force)
30312f47073SThomas Gleixner 		ret = irq_set_affinity_pending(data, dest);
30412f47073SThomas Gleixner 	return ret;
30512f47073SThomas Gleixner }
30612f47073SThomas Gleixner 
30701f8fa4fSThomas Gleixner int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
30801f8fa4fSThomas Gleixner 			    bool force)
309c2d0c555SDavid Daney {
310c2d0c555SDavid Daney 	struct irq_chip *chip = irq_data_get_irq_chip(data);
311c2d0c555SDavid Daney 	struct irq_desc *desc = irq_data_to_desc(data);
312c2d0c555SDavid Daney 	int ret = 0;
313c2d0c555SDavid Daney 
314c2d0c555SDavid Daney 	if (!chip || !chip->irq_set_affinity)
315c2d0c555SDavid Daney 		return -EINVAL;
316c2d0c555SDavid Daney 
31712f47073SThomas Gleixner 	if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
31812f47073SThomas Gleixner 		ret = irq_try_set_affinity(data, mask, force);
319c2d0c555SDavid Daney 	} else {
320c2d0c555SDavid Daney 		irqd_set_move_pending(data);
321c2d0c555SDavid Daney 		irq_copy_pending(desc, mask);
322c2d0c555SDavid Daney 	}
323c2d0c555SDavid Daney 
324c2d0c555SDavid Daney 	if (desc->affinity_notify) {
325c2d0c555SDavid Daney 		kref_get(&desc->affinity_notify->kref);
326df81dfcfSEdward Cree 		if (!schedule_work(&desc->affinity_notify->work)) {
327df81dfcfSEdward Cree 			/* Work was already scheduled, drop our extra ref */
328df81dfcfSEdward Cree 			kref_put(&desc->affinity_notify->kref,
329df81dfcfSEdward Cree 				 desc->affinity_notify->release);
330df81dfcfSEdward Cree 		}
331c2d0c555SDavid Daney 	}
332c2d0c555SDavid Daney 	irqd_set(data, IRQD_AFFINITY_SET);
333c2d0c555SDavid Daney 
334c2d0c555SDavid Daney 	return ret;
335c2d0c555SDavid Daney }
336c2d0c555SDavid Daney 
33701f8fa4fSThomas Gleixner int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
338771ee3b0SThomas Gleixner {
33908678b08SYinghai Lu 	struct irq_desc *desc = irq_to_desc(irq);
340f6d87f4bSThomas Gleixner 	unsigned long flags;
341c2d0c555SDavid Daney 	int ret;
342771ee3b0SThomas Gleixner 
343c2d0c555SDavid Daney 	if (!desc)
344771ee3b0SThomas Gleixner 		return -EINVAL;
345771ee3b0SThomas Gleixner 
346239007b8SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
34701f8fa4fSThomas Gleixner 	ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
348239007b8SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
3491fa46f1fSThomas Gleixner 	return ret;
350771ee3b0SThomas Gleixner }
351771ee3b0SThomas Gleixner 
352e7a297b0SPeter P Waskiewicz Jr int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
353e7a297b0SPeter P Waskiewicz Jr {
354e7a297b0SPeter P Waskiewicz Jr 	unsigned long flags;
35531d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
356e7a297b0SPeter P Waskiewicz Jr 
357e7a297b0SPeter P Waskiewicz Jr 	if (!desc)
358e7a297b0SPeter P Waskiewicz Jr 		return -EINVAL;
359e7a297b0SPeter P Waskiewicz Jr 	desc->affinity_hint = m;
36002725e74SThomas Gleixner 	irq_put_desc_unlock(desc, flags);
361e2e64a93SJesse Brandeburg 	/* set the initial affinity to prevent every interrupt being on CPU0 */
3624fe7ffb7SJesse Brandeburg 	if (m)
363e2e64a93SJesse Brandeburg 		__irq_set_affinity(irq, m, false);
364e7a297b0SPeter P Waskiewicz Jr 	return 0;
365e7a297b0SPeter P Waskiewicz Jr }
366e7a297b0SPeter P Waskiewicz Jr EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
367e7a297b0SPeter P Waskiewicz Jr 
368cd7eab44SBen Hutchings static void irq_affinity_notify(struct work_struct *work)
369cd7eab44SBen Hutchings {
370cd7eab44SBen Hutchings 	struct irq_affinity_notify *notify =
371cd7eab44SBen Hutchings 		container_of(work, struct irq_affinity_notify, work);
372cd7eab44SBen Hutchings 	struct irq_desc *desc = irq_to_desc(notify->irq);
373cd7eab44SBen Hutchings 	cpumask_var_t cpumask;
374cd7eab44SBen Hutchings 	unsigned long flags;
375cd7eab44SBen Hutchings 
3761fa46f1fSThomas Gleixner 	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
377cd7eab44SBen Hutchings 		goto out;
378cd7eab44SBen Hutchings 
379cd7eab44SBen Hutchings 	raw_spin_lock_irqsave(&desc->lock, flags);
3800ef5ca1eSThomas Gleixner 	if (irq_move_pending(&desc->irq_data))
3811fa46f1fSThomas Gleixner 		irq_get_pending(cpumask, desc);
382cd7eab44SBen Hutchings 	else
3839df872faSJiang Liu 		cpumask_copy(cpumask, desc->irq_common_data.affinity);
384cd7eab44SBen Hutchings 	raw_spin_unlock_irqrestore(&desc->lock, flags);
385cd7eab44SBen Hutchings 
386cd7eab44SBen Hutchings 	notify->notify(notify, cpumask);
387cd7eab44SBen Hutchings 
388cd7eab44SBen Hutchings 	free_cpumask_var(cpumask);
389cd7eab44SBen Hutchings out:
390cd7eab44SBen Hutchings 	kref_put(&notify->kref, notify->release);
391cd7eab44SBen Hutchings }
392cd7eab44SBen Hutchings 
393cd7eab44SBen Hutchings /**
394cd7eab44SBen Hutchings  *	irq_set_affinity_notifier - control notification of IRQ affinity changes
395cd7eab44SBen Hutchings  *	@irq:		Interrupt for which to enable/disable notification
396cd7eab44SBen Hutchings  *	@notify:	Context for notification, or %NULL to disable
397cd7eab44SBen Hutchings  *			notification.  Function pointers must be initialised;
398cd7eab44SBen Hutchings  *			the other fields will be initialised by this function.
399cd7eab44SBen Hutchings  *
400cd7eab44SBen Hutchings  *	Must be called in process context.  Notification may only be enabled
401cd7eab44SBen Hutchings  *	after the IRQ is allocated and must be disabled before the IRQ is
402cd7eab44SBen Hutchings  *	freed using free_irq().
403cd7eab44SBen Hutchings  */
404cd7eab44SBen Hutchings int
405cd7eab44SBen Hutchings irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
406cd7eab44SBen Hutchings {
407cd7eab44SBen Hutchings 	struct irq_desc *desc = irq_to_desc(irq);
408cd7eab44SBen Hutchings 	struct irq_affinity_notify *old_notify;
409cd7eab44SBen Hutchings 	unsigned long flags;
410cd7eab44SBen Hutchings 
411cd7eab44SBen Hutchings 	/* The release function is promised process context */
412cd7eab44SBen Hutchings 	might_sleep();
413cd7eab44SBen Hutchings 
414b525903cSJulien Thierry 	if (!desc || desc->istate & IRQS_NMI)
415cd7eab44SBen Hutchings 		return -EINVAL;
416cd7eab44SBen Hutchings 
417cd7eab44SBen Hutchings 	/* Complete initialisation of *notify */
418cd7eab44SBen Hutchings 	if (notify) {
419cd7eab44SBen Hutchings 		notify->irq = irq;
420cd7eab44SBen Hutchings 		kref_init(&notify->kref);
421cd7eab44SBen Hutchings 		INIT_WORK(&notify->work, irq_affinity_notify);
422cd7eab44SBen Hutchings 	}
423cd7eab44SBen Hutchings 
424cd7eab44SBen Hutchings 	raw_spin_lock_irqsave(&desc->lock, flags);
425cd7eab44SBen Hutchings 	old_notify = desc->affinity_notify;
426cd7eab44SBen Hutchings 	desc->affinity_notify = notify;
427cd7eab44SBen Hutchings 	raw_spin_unlock_irqrestore(&desc->lock, flags);
428cd7eab44SBen Hutchings 
42959c39840SPrasad Sodagudi 	if (old_notify) {
430df81dfcfSEdward Cree 		if (cancel_work_sync(&old_notify->work)) {
431df81dfcfSEdward Cree 			/* Pending work had a ref, put that one too */
432df81dfcfSEdward Cree 			kref_put(&old_notify->kref, old_notify->release);
433df81dfcfSEdward Cree 		}
434cd7eab44SBen Hutchings 		kref_put(&old_notify->kref, old_notify->release);
43559c39840SPrasad Sodagudi 	}
436cd7eab44SBen Hutchings 
437cd7eab44SBen Hutchings 	return 0;
438cd7eab44SBen Hutchings }
439cd7eab44SBen Hutchings EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
440cd7eab44SBen Hutchings 
44118404756SMax Krasnyansky #ifndef CONFIG_AUTO_IRQ_AFFINITY
44218404756SMax Krasnyansky /*
44318404756SMax Krasnyansky  * Generic version of the affinity autoselector.
44418404756SMax Krasnyansky  */
44543564bd9SThomas Gleixner int irq_setup_affinity(struct irq_desc *desc)
44618404756SMax Krasnyansky {
447569bda8dSThomas Gleixner 	struct cpumask *set = irq_default_affinity;
448cba4235eSThomas Gleixner 	int ret, node = irq_desc_get_node(desc);
449cba4235eSThomas Gleixner 	static DEFINE_RAW_SPINLOCK(mask_lock);
450cba4235eSThomas Gleixner 	static struct cpumask mask;
451569bda8dSThomas Gleixner 
452b008207cSThomas Gleixner 	/* Excludes PER_CPU and NO_BALANCE interrupts */
453e019c249SJiang Liu 	if (!__irq_can_set_affinity(desc))
45418404756SMax Krasnyansky 		return 0;
45518404756SMax Krasnyansky 
456cba4235eSThomas Gleixner 	raw_spin_lock(&mask_lock);
457f6d87f4bSThomas Gleixner 	/*
4589332ef9dSMasahiro Yamada 	 * Preserve the managed affinity setting and a userspace affinity
45906ee6d57SThomas Gleixner 	 * setup, but make sure that one of the targets is online.
460f6d87f4bSThomas Gleixner 	 */
46106ee6d57SThomas Gleixner 	if (irqd_affinity_is_managed(&desc->irq_data) ||
46206ee6d57SThomas Gleixner 	    irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
4639df872faSJiang Liu 		if (cpumask_intersects(desc->irq_common_data.affinity,
464569bda8dSThomas Gleixner 				       cpu_online_mask))
4659df872faSJiang Liu 			set = desc->irq_common_data.affinity;
4660c6f8a8bSThomas Gleixner 		else
4672bdd1055SThomas Gleixner 			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
4682bdd1055SThomas Gleixner 	}
46918404756SMax Krasnyansky 
470cba4235eSThomas Gleixner 	cpumask_and(&mask, cpu_online_mask, set);
471bddda606SSrinivas Ramana 	if (cpumask_empty(&mask))
472bddda606SSrinivas Ramana 		cpumask_copy(&mask, cpu_online_mask);
473bddda606SSrinivas Ramana 
474241fc640SPrarit Bhargava 	if (node != NUMA_NO_NODE) {
475241fc640SPrarit Bhargava 		const struct cpumask *nodemask = cpumask_of_node(node);
476241fc640SPrarit Bhargava 
477241fc640SPrarit Bhargava 		/* make sure at least one of the cpus in nodemask is online */
478cba4235eSThomas Gleixner 		if (cpumask_intersects(&mask, nodemask))
479cba4235eSThomas Gleixner 			cpumask_and(&mask, &mask, nodemask);
480241fc640SPrarit Bhargava 	}
481cba4235eSThomas Gleixner 	ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
482cba4235eSThomas Gleixner 	raw_spin_unlock(&mask_lock);
483cba4235eSThomas Gleixner 	return ret;
48418404756SMax Krasnyansky }
485f6d87f4bSThomas Gleixner #else
486a8a98eacSJiang Liu /* Wrapper for ALPHA specific affinity selector magic */
487cba4235eSThomas Gleixner int irq_setup_affinity(struct irq_desc *desc)
488f6d87f4bSThomas Gleixner {
489cba4235eSThomas Gleixner 	return irq_select_affinity(irq_desc_get_irq(desc));
490f6d87f4bSThomas Gleixner }
491cba6437aSThomas Gleixner #endif /* CONFIG_AUTO_IRQ_AFFINITY */
492cba6437aSThomas Gleixner #endif /* CONFIG_SMP */
49318404756SMax Krasnyansky 
4941da177e4SLinus Torvalds 
495fcf1ae2fSFeng Wu /**
496fcf1ae2fSFeng Wu  *	irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
497fcf1ae2fSFeng Wu  *	@irq: interrupt number to set affinity
498250a53d6SChristoffer Dall  *	@vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
499250a53d6SChristoffer Dall  *	            specific data for percpu_devid interrupts
500fcf1ae2fSFeng Wu  *
501fcf1ae2fSFeng Wu  *	This function uses the vCPU specific data to set the vCPU
502fcf1ae2fSFeng Wu  *	affinity for an irq. The vCPU specific data is passed from
503fcf1ae2fSFeng Wu  *	outside, such as KVM. One example code path is as below:
504fcf1ae2fSFeng Wu  *	KVM -> IOMMU -> irq_set_vcpu_affinity().
505fcf1ae2fSFeng Wu  */
506fcf1ae2fSFeng Wu int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
507fcf1ae2fSFeng Wu {
508fcf1ae2fSFeng Wu 	unsigned long flags;
509fcf1ae2fSFeng Wu 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
510fcf1ae2fSFeng Wu 	struct irq_data *data;
511fcf1ae2fSFeng Wu 	struct irq_chip *chip;
512fcf1ae2fSFeng Wu 	int ret = -ENOSYS;
513fcf1ae2fSFeng Wu 
514fcf1ae2fSFeng Wu 	if (!desc)
515fcf1ae2fSFeng Wu 		return -EINVAL;
516fcf1ae2fSFeng Wu 
517fcf1ae2fSFeng Wu 	data = irq_desc_get_irq_data(desc);
5180abce64aSMarc Zyngier 	do {
519fcf1ae2fSFeng Wu 		chip = irq_data_get_irq_chip(data);
520fcf1ae2fSFeng Wu 		if (chip && chip->irq_set_vcpu_affinity)
5210abce64aSMarc Zyngier 			break;
5220abce64aSMarc Zyngier #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
5230abce64aSMarc Zyngier 		data = data->parent_data;
5240abce64aSMarc Zyngier #else
5250abce64aSMarc Zyngier 		data = NULL;
5260abce64aSMarc Zyngier #endif
5270abce64aSMarc Zyngier 	} while (data);
5280abce64aSMarc Zyngier 
5290abce64aSMarc Zyngier 	if (data)
530fcf1ae2fSFeng Wu 		ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
531fcf1ae2fSFeng Wu 	irq_put_desc_unlock(desc, flags);
532fcf1ae2fSFeng Wu 
533fcf1ae2fSFeng Wu 	return ret;
534fcf1ae2fSFeng Wu }
535fcf1ae2fSFeng Wu EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
536fcf1ae2fSFeng Wu 
53779ff1cdaSJiang Liu void __disable_irq(struct irq_desc *desc)
5380a0c5168SRafael J. Wysocki {
5393aae994fSThomas Gleixner 	if (!desc->depth++)
54087923470SThomas Gleixner 		irq_disable(desc);
5410a0c5168SRafael J. Wysocki }
5420a0c5168SRafael J. Wysocki 
54302725e74SThomas Gleixner static int __disable_irq_nosync(unsigned int irq)
54402725e74SThomas Gleixner {
54502725e74SThomas Gleixner 	unsigned long flags;
54631d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
54702725e74SThomas Gleixner 
54802725e74SThomas Gleixner 	if (!desc)
54902725e74SThomas Gleixner 		return -EINVAL;
55079ff1cdaSJiang Liu 	__disable_irq(desc);
55102725e74SThomas Gleixner 	irq_put_desc_busunlock(desc, flags);
55202725e74SThomas Gleixner 	return 0;
55302725e74SThomas Gleixner }
55402725e74SThomas Gleixner 
5551da177e4SLinus Torvalds /**
5561da177e4SLinus Torvalds  *	disable_irq_nosync - disable an irq without waiting
5571da177e4SLinus Torvalds  *	@irq: Interrupt to disable
5581da177e4SLinus Torvalds  *
5591da177e4SLinus Torvalds  *	Disable the selected interrupt line.  Disables and Enables are
5601da177e4SLinus Torvalds  *	nested.
5611da177e4SLinus Torvalds  *	Unlike disable_irq(), this function does not ensure existing
5621da177e4SLinus Torvalds  *	instances of the IRQ handler have completed before returning.
5631da177e4SLinus Torvalds  *
5641da177e4SLinus Torvalds  *	This function may be called from IRQ context.
5651da177e4SLinus Torvalds  */
5661da177e4SLinus Torvalds void disable_irq_nosync(unsigned int irq)
5671da177e4SLinus Torvalds {
56802725e74SThomas Gleixner 	__disable_irq_nosync(irq);
5691da177e4SLinus Torvalds }
5701da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq_nosync);
5711da177e4SLinus Torvalds 
5721da177e4SLinus Torvalds /**
5731da177e4SLinus Torvalds  *	disable_irq - disable an irq and wait for completion
5741da177e4SLinus Torvalds  *	@irq: Interrupt to disable
5751da177e4SLinus Torvalds  *
5761da177e4SLinus Torvalds  *	Disable the selected interrupt line.  Enables and Disables are
5771da177e4SLinus Torvalds  *	nested.
5781da177e4SLinus Torvalds  *	This function waits for any pending IRQ handlers for this interrupt
5791da177e4SLinus Torvalds  *	to complete before returning. If you use this function while
5801da177e4SLinus Torvalds  *	holding a resource the IRQ handler may need you will deadlock.
5811da177e4SLinus Torvalds  *
5821da177e4SLinus Torvalds  *	This function may be called - with care - from IRQ context.
5831da177e4SLinus Torvalds  */
5841da177e4SLinus Torvalds void disable_irq(unsigned int irq)
5851da177e4SLinus Torvalds {
58602725e74SThomas Gleixner 	if (!__disable_irq_nosync(irq))
5871da177e4SLinus Torvalds 		synchronize_irq(irq);
5881da177e4SLinus Torvalds }
5891da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq);
5901da177e4SLinus Torvalds 
59102cea395SPeter Zijlstra /**
59202cea395SPeter Zijlstra  *	disable_hardirq - disables an irq and waits for hardirq completion
59302cea395SPeter Zijlstra  *	@irq: Interrupt to disable
59402cea395SPeter Zijlstra  *
59502cea395SPeter Zijlstra  *	Disable the selected interrupt line.  Enables and Disables are
59602cea395SPeter Zijlstra  *	nested.
59702cea395SPeter Zijlstra  *	This function waits for any pending hard IRQ handlers for this
59802cea395SPeter Zijlstra  *	interrupt to complete before returning. If you use this function while
59902cea395SPeter Zijlstra  *	holding a resource the hard IRQ handler may need you will deadlock.
60002cea395SPeter Zijlstra  *
60102cea395SPeter Zijlstra  *	When used to optimistically disable an interrupt from atomic context
60202cea395SPeter Zijlstra  *	the return value must be checked.
60302cea395SPeter Zijlstra  *
60402cea395SPeter Zijlstra  *	Returns: false if a threaded handler is active.
60502cea395SPeter Zijlstra  *
60602cea395SPeter Zijlstra  *	This function may be called - with care - from IRQ context.
60702cea395SPeter Zijlstra  */
60802cea395SPeter Zijlstra bool disable_hardirq(unsigned int irq)
60902cea395SPeter Zijlstra {
61002cea395SPeter Zijlstra 	if (!__disable_irq_nosync(irq))
61102cea395SPeter Zijlstra 		return synchronize_hardirq(irq);
61202cea395SPeter Zijlstra 
61302cea395SPeter Zijlstra 	return false;
61402cea395SPeter Zijlstra }
61502cea395SPeter Zijlstra EXPORT_SYMBOL_GPL(disable_hardirq);
61602cea395SPeter Zijlstra 
617b525903cSJulien Thierry /**
618b525903cSJulien Thierry  *	disable_nmi_nosync - disable an nmi without waiting
619b525903cSJulien Thierry  *	@irq: Interrupt to disable
620b525903cSJulien Thierry  *
621b525903cSJulien Thierry  *	Disable the selected interrupt line. Disables and enables are
622b525903cSJulien Thierry  *	nested.
623b525903cSJulien Thierry  *	The interrupt to disable must have been requested through request_nmi.
624b525903cSJulien Thierry  *	Unlike disable_nmi(), this function does not ensure existing
625b525903cSJulien Thierry  *	instances of the IRQ handler have completed before returning.
626b525903cSJulien Thierry  */
627b525903cSJulien Thierry void disable_nmi_nosync(unsigned int irq)
628b525903cSJulien Thierry {
629b525903cSJulien Thierry 	disable_irq_nosync(irq);
630b525903cSJulien Thierry }
631b525903cSJulien Thierry 
63279ff1cdaSJiang Liu void __enable_irq(struct irq_desc *desc)
6331adb0850SThomas Gleixner {
6341adb0850SThomas Gleixner 	switch (desc->depth) {
6351adb0850SThomas Gleixner 	case 0:
6360a0c5168SRafael J. Wysocki  err_out:
63779ff1cdaSJiang Liu 		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
63879ff1cdaSJiang Liu 		     irq_desc_get_irq(desc));
6391adb0850SThomas Gleixner 		break;
6401adb0850SThomas Gleixner 	case 1: {
641c531e836SThomas Gleixner 		if (desc->istate & IRQS_SUSPENDED)
6420a0c5168SRafael J. Wysocki 			goto err_out;
6431adb0850SThomas Gleixner 		/* Prevent probing on this irq: */
6441ccb4e61SThomas Gleixner 		irq_settings_set_noprobe(desc);
645201d7f47SThomas Gleixner 		/*
646201d7f47SThomas Gleixner 		 * Call irq_startup() not irq_enable() here because the
647201d7f47SThomas Gleixner 		 * interrupt might be marked NOAUTOEN. So irq_startup()
648201d7f47SThomas Gleixner 		 * needs to be invoked when it gets enabled the first
649201d7f47SThomas Gleixner 		 * time. If it was already started up, then irq_startup()
650201d7f47SThomas Gleixner 		 * will invoke irq_enable() under the hood.
651201d7f47SThomas Gleixner 		 */
652c942cee4SThomas Gleixner 		irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
653201d7f47SThomas Gleixner 		break;
6541adb0850SThomas Gleixner 	}
6551adb0850SThomas Gleixner 	default:
6561adb0850SThomas Gleixner 		desc->depth--;
6571adb0850SThomas Gleixner 	}
6581adb0850SThomas Gleixner }
6591adb0850SThomas Gleixner 
6601da177e4SLinus Torvalds /**
6611da177e4SLinus Torvalds  *	enable_irq - enable handling of an irq
6621da177e4SLinus Torvalds  *	@irq: Interrupt to enable
6631da177e4SLinus Torvalds  *
6641da177e4SLinus Torvalds  *	Undoes the effect of one call to disable_irq().  If this
6651da177e4SLinus Torvalds  *	matches the last disable, processing of interrupts on this
6661da177e4SLinus Torvalds  *	IRQ line is re-enabled.
6671da177e4SLinus Torvalds  *
66870aedd24SThomas Gleixner  *	This function may be called from IRQ context only when
6696b8ff312SThomas Gleixner  *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
6701da177e4SLinus Torvalds  */
6711da177e4SLinus Torvalds void enable_irq(unsigned int irq)
6721da177e4SLinus Torvalds {
6731da177e4SLinus Torvalds 	unsigned long flags;
67431d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
6751da177e4SLinus Torvalds 
6767d94f7caSYinghai Lu 	if (!desc)
677c2b5a251SMatthew Wilcox 		return;
67850f7c032SThomas Gleixner 	if (WARN(!desc->irq_data.chip,
6792656c366SThomas Gleixner 		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
68002725e74SThomas Gleixner 		goto out;
6812656c366SThomas Gleixner 
68279ff1cdaSJiang Liu 	__enable_irq(desc);
68302725e74SThomas Gleixner out:
68402725e74SThomas Gleixner 	irq_put_desc_busunlock(desc, flags);
6851da177e4SLinus Torvalds }
6861da177e4SLinus Torvalds EXPORT_SYMBOL(enable_irq);
6871da177e4SLinus Torvalds 
688b525903cSJulien Thierry /**
689b525903cSJulien Thierry  *	enable_nmi - enable handling of an nmi
690b525903cSJulien Thierry  *	@irq: Interrupt to enable
691b525903cSJulien Thierry  *
692b525903cSJulien Thierry  *	The interrupt to enable must have been requested through request_nmi.
693b525903cSJulien Thierry  *	Undoes the effect of one call to disable_nmi(). If this
694b525903cSJulien Thierry  *	matches the last disable, processing of interrupts on this
695b525903cSJulien Thierry  *	IRQ line is re-enabled.
696b525903cSJulien Thierry  */
697b525903cSJulien Thierry void enable_nmi(unsigned int irq)
698b525903cSJulien Thierry {
699b525903cSJulien Thierry 	enable_irq(irq);
700b525903cSJulien Thierry }
701b525903cSJulien Thierry 
7020c5d1eb7SDavid Brownell static int set_irq_wake_real(unsigned int irq, unsigned int on)
7032db87321SUwe Kleine-König {
70408678b08SYinghai Lu 	struct irq_desc *desc = irq_to_desc(irq);
7052db87321SUwe Kleine-König 	int ret = -ENXIO;
7062db87321SUwe Kleine-König 
70760f96b41SSantosh Shilimkar 	if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
70860f96b41SSantosh Shilimkar 		return 0;
70960f96b41SSantosh Shilimkar 
7102f7e99bbSThomas Gleixner 	if (desc->irq_data.chip->irq_set_wake)
7112f7e99bbSThomas Gleixner 		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
7122db87321SUwe Kleine-König 
7132db87321SUwe Kleine-König 	return ret;
7142db87321SUwe Kleine-König }
7152db87321SUwe Kleine-König 
716ba9a2331SThomas Gleixner /**
717a0cd9ca2SThomas Gleixner  *	irq_set_irq_wake - control irq power management wakeup
718ba9a2331SThomas Gleixner  *	@irq:	interrupt to control
719ba9a2331SThomas Gleixner  *	@on:	enable/disable power management wakeup
720ba9a2331SThomas Gleixner  *
72115a647ebSDavid Brownell  *	Enable/disable power management wakeup mode, which is
72215a647ebSDavid Brownell  *	disabled by default.  Enables and disables must match,
72315a647ebSDavid Brownell  *	just as they match for non-wakeup mode support.
72415a647ebSDavid Brownell  *
72515a647ebSDavid Brownell  *	Wakeup mode lets this IRQ wake the system from sleep
72615a647ebSDavid Brownell  *	states like "suspend to RAM".
727f9f21ceaSStephen Boyd  *
728f9f21ceaSStephen Boyd  *	Note: irq enable/disable state is completely orthogonal
729f9f21ceaSStephen Boyd  *	to the enable/disable state of irq wake. An irq can be
730f9f21ceaSStephen Boyd  *	disabled with disable_irq() and still wake the system as
731f9f21ceaSStephen Boyd  *	long as the irq has wake enabled. If this does not hold,
732f9f21ceaSStephen Boyd  *	then the underlying irq chip and the related driver need
733f9f21ceaSStephen Boyd  *	to be investigated.
734ba9a2331SThomas Gleixner  */
735a0cd9ca2SThomas Gleixner int irq_set_irq_wake(unsigned int irq, unsigned int on)
736ba9a2331SThomas Gleixner {
737ba9a2331SThomas Gleixner 	unsigned long flags;
73831d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
7392db87321SUwe Kleine-König 	int ret = 0;
740ba9a2331SThomas Gleixner 
74113863a66SJesper Juhl 	if (!desc)
74213863a66SJesper Juhl 		return -EINVAL;
74313863a66SJesper Juhl 
744b525903cSJulien Thierry 	/* Don't use NMIs as wake up interrupts please */
745b525903cSJulien Thierry 	if (desc->istate & IRQS_NMI) {
746b525903cSJulien Thierry 		ret = -EINVAL;
747b525903cSJulien Thierry 		goto out_unlock;
748b525903cSJulien Thierry 	}
749b525903cSJulien Thierry 
75015a647ebSDavid Brownell 	/* wakeup-capable irqs can be shared between drivers that
75115a647ebSDavid Brownell 	 * don't need to have the same sleep mode behaviors.
75215a647ebSDavid Brownell 	 */
75315a647ebSDavid Brownell 	if (on) {
7542db87321SUwe Kleine-König 		if (desc->wake_depth++ == 0) {
7552db87321SUwe Kleine-König 			ret = set_irq_wake_real(irq, on);
7562db87321SUwe Kleine-König 			if (ret)
7572db87321SUwe Kleine-König 				desc->wake_depth = 0;
75815a647ebSDavid Brownell 			else
7597f94226fSThomas Gleixner 				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
7602db87321SUwe Kleine-König 		}
76115a647ebSDavid Brownell 	} else {
76215a647ebSDavid Brownell 		if (desc->wake_depth == 0) {
7637a2c4770SArjan van de Ven 			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
7642db87321SUwe Kleine-König 		} else if (--desc->wake_depth == 0) {
7652db87321SUwe Kleine-König 			ret = set_irq_wake_real(irq, on);
7662db87321SUwe Kleine-König 			if (ret)
7672db87321SUwe Kleine-König 				desc->wake_depth = 1;
76815a647ebSDavid Brownell 			else
7697f94226fSThomas Gleixner 				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
77015a647ebSDavid Brownell 		}
7712db87321SUwe Kleine-König 	}
772b525903cSJulien Thierry 
773b525903cSJulien Thierry out_unlock:
77402725e74SThomas Gleixner 	irq_put_desc_busunlock(desc, flags);
775ba9a2331SThomas Gleixner 	return ret;
776ba9a2331SThomas Gleixner }
777a0cd9ca2SThomas Gleixner EXPORT_SYMBOL(irq_set_irq_wake);
778ba9a2331SThomas Gleixner 
7791da177e4SLinus Torvalds /*
7801da177e4SLinus Torvalds  * Internal function that tells the architecture code whether a
7811da177e4SLinus Torvalds  * particular irq has been exclusively allocated or is available
7821da177e4SLinus Torvalds  * for driver use.
7831da177e4SLinus Torvalds  */
7841da177e4SLinus Torvalds int can_request_irq(unsigned int irq, unsigned long irqflags)
7851da177e4SLinus Torvalds {
786cc8c3b78SThomas Gleixner 	unsigned long flags;
78731d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
78802725e74SThomas Gleixner 	int canrequest = 0;
7891da177e4SLinus Torvalds 
7907d94f7caSYinghai Lu 	if (!desc)
7917d94f7caSYinghai Lu 		return 0;
7927d94f7caSYinghai Lu 
79302725e74SThomas Gleixner 	if (irq_settings_can_request(desc)) {
7942779db8dSBen Hutchings 		if (!desc->action ||
7952779db8dSBen Hutchings 		    irqflags & desc->action->flags & IRQF_SHARED)
79602725e74SThomas Gleixner 			canrequest = 1;
79702725e74SThomas Gleixner 	}
79802725e74SThomas Gleixner 	irq_put_desc_unlock(desc, flags);
79902725e74SThomas Gleixner 	return canrequest;
8001da177e4SLinus Torvalds }
8011da177e4SLinus Torvalds 
802a1ff541aSJiang Liu int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
80382736f4dSUwe Kleine-König {
8046b8ff312SThomas Gleixner 	struct irq_chip *chip = desc->irq_data.chip;
805d4d5e089SThomas Gleixner 	int ret, unmask = 0;
80682736f4dSUwe Kleine-König 
807b2ba2c30SThomas Gleixner 	if (!chip || !chip->irq_set_type) {
80882736f4dSUwe Kleine-König 		/*
80982736f4dSUwe Kleine-König 		 * IRQF_TRIGGER_* but the PIC does not support multiple
81082736f4dSUwe Kleine-König 		 * flow-types?
81182736f4dSUwe Kleine-König 		 */
812a1ff541aSJiang Liu 		pr_debug("No set_type function for IRQ %d (%s)\n",
813a1ff541aSJiang Liu 			 irq_desc_get_irq(desc),
81482736f4dSUwe Kleine-König 			 chip ? (chip->name ? : "unknown") : "unknown");
81582736f4dSUwe Kleine-König 		return 0;
81682736f4dSUwe Kleine-König 	}
81782736f4dSUwe Kleine-König 
818d4d5e089SThomas Gleixner 	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
81932f4125eSThomas Gleixner 		if (!irqd_irq_masked(&desc->irq_data))
820d4d5e089SThomas Gleixner 			mask_irq(desc);
82132f4125eSThomas Gleixner 		if (!irqd_irq_disabled(&desc->irq_data))
822d4d5e089SThomas Gleixner 			unmask = 1;
823d4d5e089SThomas Gleixner 	}
824d4d5e089SThomas Gleixner 
82500b992deSAlexander Kuleshov 	/* Mask all flags except trigger mode */
82600b992deSAlexander Kuleshov 	flags &= IRQ_TYPE_SENSE_MASK;
827b2ba2c30SThomas Gleixner 	ret = chip->irq_set_type(&desc->irq_data, flags);
82882736f4dSUwe Kleine-König 
829876dbd4cSThomas Gleixner 	switch (ret) {
830876dbd4cSThomas Gleixner 	case IRQ_SET_MASK_OK:
8312cb62547SJiang Liu 	case IRQ_SET_MASK_OK_DONE:
832876dbd4cSThomas Gleixner 		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
833876dbd4cSThomas Gleixner 		irqd_set(&desc->irq_data, flags);
83444133f7eSMathieu Malaterre 		/* fall through */
835876dbd4cSThomas Gleixner 
836876dbd4cSThomas Gleixner 	case IRQ_SET_MASK_OK_NOCOPY:
837876dbd4cSThomas Gleixner 		flags = irqd_get_trigger_type(&desc->irq_data);
838876dbd4cSThomas Gleixner 		irq_settings_set_trigger_mask(desc, flags);
839876dbd4cSThomas Gleixner 		irqd_clear(&desc->irq_data, IRQD_LEVEL);
840876dbd4cSThomas Gleixner 		irq_settings_clr_level(desc);
841876dbd4cSThomas Gleixner 		if (flags & IRQ_TYPE_LEVEL_MASK) {
842876dbd4cSThomas Gleixner 			irq_settings_set_level(desc);
843876dbd4cSThomas Gleixner 			irqd_set(&desc->irq_data, IRQD_LEVEL);
844876dbd4cSThomas Gleixner 		}
84546732475SThomas Gleixner 
846d4d5e089SThomas Gleixner 		ret = 0;
8478fff39e0SThomas Gleixner 		break;
848876dbd4cSThomas Gleixner 	default:
849d75f773cSSakari Ailus 		pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
850a1ff541aSJiang Liu 		       flags, irq_desc_get_irq(desc), chip->irq_set_type);
8510c5d1eb7SDavid Brownell 	}
852d4d5e089SThomas Gleixner 	if (unmask)
853d4d5e089SThomas Gleixner 		unmask_irq(desc);
85482736f4dSUwe Kleine-König 	return ret;
85582736f4dSUwe Kleine-König }
85682736f4dSUwe Kleine-König 
857293a7a0aSThomas Gleixner #ifdef CONFIG_HARDIRQS_SW_RESEND
858293a7a0aSThomas Gleixner int irq_set_parent(int irq, int parent_irq)
859293a7a0aSThomas Gleixner {
860293a7a0aSThomas Gleixner 	unsigned long flags;
861293a7a0aSThomas Gleixner 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
862293a7a0aSThomas Gleixner 
863293a7a0aSThomas Gleixner 	if (!desc)
864293a7a0aSThomas Gleixner 		return -EINVAL;
865293a7a0aSThomas Gleixner 
866293a7a0aSThomas Gleixner 	desc->parent_irq = parent_irq;
867293a7a0aSThomas Gleixner 
868293a7a0aSThomas Gleixner 	irq_put_desc_unlock(desc, flags);
869293a7a0aSThomas Gleixner 	return 0;
870293a7a0aSThomas Gleixner }
8713118dac5SSudip Mukherjee EXPORT_SYMBOL_GPL(irq_set_parent);
872293a7a0aSThomas Gleixner #endif
873293a7a0aSThomas Gleixner 
874b25c340cSThomas Gleixner /*
875b25c340cSThomas Gleixner  * Default primary interrupt handler for threaded interrupts. Is
876b25c340cSThomas Gleixner  * assigned as primary handler when request_threaded_irq is called
877b25c340cSThomas Gleixner  * with handler == NULL. Useful for oneshot interrupts.
878b25c340cSThomas Gleixner  */
879b25c340cSThomas Gleixner static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
880b25c340cSThomas Gleixner {
881b25c340cSThomas Gleixner 	return IRQ_WAKE_THREAD;
882b25c340cSThomas Gleixner }
883b25c340cSThomas Gleixner 
884399b5da2SThomas Gleixner /*
885399b5da2SThomas Gleixner  * Primary handler for nested threaded interrupts. Should never be
886399b5da2SThomas Gleixner  * called.
887399b5da2SThomas Gleixner  */
888399b5da2SThomas Gleixner static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
889399b5da2SThomas Gleixner {
890399b5da2SThomas Gleixner 	WARN(1, "Primary handler called for nested irq %d\n", irq);
891399b5da2SThomas Gleixner 	return IRQ_NONE;
892399b5da2SThomas Gleixner }
893399b5da2SThomas Gleixner 
8942a1d3ab8SThomas Gleixner static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
8952a1d3ab8SThomas Gleixner {
8962a1d3ab8SThomas Gleixner 	WARN(1, "Secondary action handler called for irq %d\n", irq);
8972a1d3ab8SThomas Gleixner 	return IRQ_NONE;
8982a1d3ab8SThomas Gleixner }
8992a1d3ab8SThomas Gleixner 
9003aa551c9SThomas Gleixner static int irq_wait_for_interrupt(struct irqaction *action)
9013aa551c9SThomas Gleixner {
902519cc865SLukas Wunner 	for (;;) {
9033aa551c9SThomas Gleixner 		set_current_state(TASK_INTERRUPTIBLE);
904f48fe81eSThomas Gleixner 
905519cc865SLukas Wunner 		if (kthread_should_stop()) {
906519cc865SLukas Wunner 			/* may need to run one last time */
907519cc865SLukas Wunner 			if (test_and_clear_bit(IRQTF_RUNTHREAD,
908519cc865SLukas Wunner 					       &action->thread_flags)) {
909519cc865SLukas Wunner 				__set_current_state(TASK_RUNNING);
910519cc865SLukas Wunner 				return 0;
911519cc865SLukas Wunner 			}
912519cc865SLukas Wunner 			__set_current_state(TASK_RUNNING);
913519cc865SLukas Wunner 			return -1;
914519cc865SLukas Wunner 		}
915550acb19SIdo Yariv 
916f48fe81eSThomas Gleixner 		if (test_and_clear_bit(IRQTF_RUNTHREAD,
917f48fe81eSThomas Gleixner 				       &action->thread_flags)) {
9183aa551c9SThomas Gleixner 			__set_current_state(TASK_RUNNING);
9193aa551c9SThomas Gleixner 			return 0;
920f48fe81eSThomas Gleixner 		}
9213aa551c9SThomas Gleixner 		schedule();
9223aa551c9SThomas Gleixner 	}
9233aa551c9SThomas Gleixner }
9243aa551c9SThomas Gleixner 
925b25c340cSThomas Gleixner /*
926b25c340cSThomas Gleixner  * Oneshot interrupts keep the irq line masked until the threaded
927b25c340cSThomas Gleixner  * handler finished. unmask if the interrupt has not been disabled and
928b25c340cSThomas Gleixner  * is marked MASKED.
929b25c340cSThomas Gleixner  */
930b5faba21SThomas Gleixner static void irq_finalize_oneshot(struct irq_desc *desc,
931f3f79e38SAlexander Gordeev 				 struct irqaction *action)
932b25c340cSThomas Gleixner {
9332a1d3ab8SThomas Gleixner 	if (!(desc->istate & IRQS_ONESHOT) ||
9342a1d3ab8SThomas Gleixner 	    action->handler == irq_forced_secondary_handler)
935b5faba21SThomas Gleixner 		return;
9360b1adaa0SThomas Gleixner again:
9373876ec9eSThomas Gleixner 	chip_bus_lock(desc);
938239007b8SThomas Gleixner 	raw_spin_lock_irq(&desc->lock);
9390b1adaa0SThomas Gleixner 
9400b1adaa0SThomas Gleixner 	/*
9410b1adaa0SThomas Gleixner 	 * Implausible though it may be we need to protect us against
9420b1adaa0SThomas Gleixner 	 * the following scenario:
9430b1adaa0SThomas Gleixner 	 *
9440b1adaa0SThomas Gleixner 	 * The thread is faster done than the hard interrupt handler
9450b1adaa0SThomas Gleixner 	 * on the other CPU. If we unmask the irq line then the
9460b1adaa0SThomas Gleixner 	 * interrupt can come in again and masks the line, leaves due
947009b4c3bSThomas Gleixner 	 * to IRQS_INPROGRESS and the irq line is masked forever.
948b5faba21SThomas Gleixner 	 *
949b5faba21SThomas Gleixner 	 * This also serializes the state of shared oneshot handlers
950b5faba21SThomas Gleixner 	 * versus "desc->threads_onehsot |= action->thread_mask;" in
951b5faba21SThomas Gleixner 	 * irq_wake_thread(). See the comment there which explains the
952b5faba21SThomas Gleixner 	 * serialization.
9530b1adaa0SThomas Gleixner 	 */
95432f4125eSThomas Gleixner 	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
9550b1adaa0SThomas Gleixner 		raw_spin_unlock_irq(&desc->lock);
9563876ec9eSThomas Gleixner 		chip_bus_sync_unlock(desc);
9570b1adaa0SThomas Gleixner 		cpu_relax();
9580b1adaa0SThomas Gleixner 		goto again;
9590b1adaa0SThomas Gleixner 	}
9600b1adaa0SThomas Gleixner 
961b5faba21SThomas Gleixner 	/*
962b5faba21SThomas Gleixner 	 * Now check again, whether the thread should run. Otherwise
963b5faba21SThomas Gleixner 	 * we would clear the threads_oneshot bit of this thread which
964b5faba21SThomas Gleixner 	 * was just set.
965b5faba21SThomas Gleixner 	 */
966f3f79e38SAlexander Gordeev 	if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
967b5faba21SThomas Gleixner 		goto out_unlock;
968b5faba21SThomas Gleixner 
969b5faba21SThomas Gleixner 	desc->threads_oneshot &= ~action->thread_mask;
970b5faba21SThomas Gleixner 
97132f4125eSThomas Gleixner 	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
97232f4125eSThomas Gleixner 	    irqd_irq_masked(&desc->irq_data))
973328a4978SThomas Gleixner 		unmask_threaded_irq(desc);
97432f4125eSThomas Gleixner 
975b5faba21SThomas Gleixner out_unlock:
976239007b8SThomas Gleixner 	raw_spin_unlock_irq(&desc->lock);
9773876ec9eSThomas Gleixner 	chip_bus_sync_unlock(desc);
978b25c340cSThomas Gleixner }
979b25c340cSThomas Gleixner 
98061f38261SBruno Premont #ifdef CONFIG_SMP
9813aa551c9SThomas Gleixner /*
982b04c644eSChuansheng Liu  * Check whether we need to change the affinity of the interrupt thread.
983591d2fb0SThomas Gleixner  */
984591d2fb0SThomas Gleixner static void
985591d2fb0SThomas Gleixner irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
986591d2fb0SThomas Gleixner {
987591d2fb0SThomas Gleixner 	cpumask_var_t mask;
98804aa530eSThomas Gleixner 	bool valid = true;
989591d2fb0SThomas Gleixner 
990591d2fb0SThomas Gleixner 	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
991591d2fb0SThomas Gleixner 		return;
992591d2fb0SThomas Gleixner 
993591d2fb0SThomas Gleixner 	/*
994591d2fb0SThomas Gleixner 	 * In case we are out of memory we set IRQTF_AFFINITY again and
995591d2fb0SThomas Gleixner 	 * try again next time
996591d2fb0SThomas Gleixner 	 */
997591d2fb0SThomas Gleixner 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
998591d2fb0SThomas Gleixner 		set_bit(IRQTF_AFFINITY, &action->thread_flags);
999591d2fb0SThomas Gleixner 		return;
1000591d2fb0SThomas Gleixner 	}
1001591d2fb0SThomas Gleixner 
1002239007b8SThomas Gleixner 	raw_spin_lock_irq(&desc->lock);
100304aa530eSThomas Gleixner 	/*
100404aa530eSThomas Gleixner 	 * This code is triggered unconditionally. Check the affinity
100504aa530eSThomas Gleixner 	 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
100604aa530eSThomas Gleixner 	 */
1007cbf86999SThomas Gleixner 	if (cpumask_available(desc->irq_common_data.affinity)) {
1008cbf86999SThomas Gleixner 		const struct cpumask *m;
1009cbf86999SThomas Gleixner 
1010cbf86999SThomas Gleixner 		m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1011cbf86999SThomas Gleixner 		cpumask_copy(mask, m);
1012cbf86999SThomas Gleixner 	} else {
101304aa530eSThomas Gleixner 		valid = false;
1014cbf86999SThomas Gleixner 	}
1015239007b8SThomas Gleixner 	raw_spin_unlock_irq(&desc->lock);
1016591d2fb0SThomas Gleixner 
101704aa530eSThomas Gleixner 	if (valid)
1018591d2fb0SThomas Gleixner 		set_cpus_allowed_ptr(current, mask);
1019591d2fb0SThomas Gleixner 	free_cpumask_var(mask);
1020591d2fb0SThomas Gleixner }
102161f38261SBruno Premont #else
102261f38261SBruno Premont static inline void
102361f38261SBruno Premont irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
102461f38261SBruno Premont #endif
1025591d2fb0SThomas Gleixner 
1026591d2fb0SThomas Gleixner /*
1027c5f48c0aSIngo Molnar  * Interrupts which are not explicitly requested as threaded
10288d32a307SThomas Gleixner  * interrupts rely on the implicit bh/preempt disable of the hard irq
10298d32a307SThomas Gleixner  * context. So we need to disable bh here to avoid deadlocks and other
10308d32a307SThomas Gleixner  * side effects.
10318d32a307SThomas Gleixner  */
10323a43e05fSSebastian Andrzej Siewior static irqreturn_t
10338d32a307SThomas Gleixner irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
10348d32a307SThomas Gleixner {
10353a43e05fSSebastian Andrzej Siewior 	irqreturn_t ret;
10363a43e05fSSebastian Andrzej Siewior 
10378d32a307SThomas Gleixner 	local_bh_disable();
10383a43e05fSSebastian Andrzej Siewior 	ret = action->thread_fn(action->irq, action->dev_id);
1039746a923bSLukas Wunner 	if (ret == IRQ_HANDLED)
1040746a923bSLukas Wunner 		atomic_inc(&desc->threads_handled);
1041746a923bSLukas Wunner 
1042f3f79e38SAlexander Gordeev 	irq_finalize_oneshot(desc, action);
10438d32a307SThomas Gleixner 	local_bh_enable();
10443a43e05fSSebastian Andrzej Siewior 	return ret;
10458d32a307SThomas Gleixner }
10468d32a307SThomas Gleixner 
10478d32a307SThomas Gleixner /*
1048f788e7bfSXie XiuQi  * Interrupts explicitly requested as threaded interrupts want to be
10498d32a307SThomas Gleixner  * preemtible - many of them need to sleep and wait for slow busses to
10508d32a307SThomas Gleixner  * complete.
10518d32a307SThomas Gleixner  */
10523a43e05fSSebastian Andrzej Siewior static irqreturn_t irq_thread_fn(struct irq_desc *desc,
10533a43e05fSSebastian Andrzej Siewior 		struct irqaction *action)
10548d32a307SThomas Gleixner {
10553a43e05fSSebastian Andrzej Siewior 	irqreturn_t ret;
10563a43e05fSSebastian Andrzej Siewior 
10573a43e05fSSebastian Andrzej Siewior 	ret = action->thread_fn(action->irq, action->dev_id);
1058746a923bSLukas Wunner 	if (ret == IRQ_HANDLED)
1059746a923bSLukas Wunner 		atomic_inc(&desc->threads_handled);
1060746a923bSLukas Wunner 
1061f3f79e38SAlexander Gordeev 	irq_finalize_oneshot(desc, action);
10623a43e05fSSebastian Andrzej Siewior 	return ret;
10638d32a307SThomas Gleixner }
10648d32a307SThomas Gleixner 
10657140ea19SIdo Yariv static void wake_threads_waitq(struct irq_desc *desc)
10667140ea19SIdo Yariv {
1067c685689fSChuansheng Liu 	if (atomic_dec_and_test(&desc->threads_active))
10687140ea19SIdo Yariv 		wake_up(&desc->wait_for_threads);
10697140ea19SIdo Yariv }
10707140ea19SIdo Yariv 
107167d12145SAl Viro static void irq_thread_dtor(struct callback_head *unused)
10724d1d61a6SOleg Nesterov {
10734d1d61a6SOleg Nesterov 	struct task_struct *tsk = current;
10744d1d61a6SOleg Nesterov 	struct irq_desc *desc;
10754d1d61a6SOleg Nesterov 	struct irqaction *action;
10764d1d61a6SOleg Nesterov 
10774d1d61a6SOleg Nesterov 	if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
10784d1d61a6SOleg Nesterov 		return;
10794d1d61a6SOleg Nesterov 
10804d1d61a6SOleg Nesterov 	action = kthread_data(tsk);
10814d1d61a6SOleg Nesterov 
1082fb21affaSLinus Torvalds 	pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
108319af395dSAlan Cox 	       tsk->comm, tsk->pid, action->irq);
10844d1d61a6SOleg Nesterov 
10854d1d61a6SOleg Nesterov 
10864d1d61a6SOleg Nesterov 	desc = irq_to_desc(action->irq);
10874d1d61a6SOleg Nesterov 	/*
10884d1d61a6SOleg Nesterov 	 * If IRQTF_RUNTHREAD is set, we need to decrement
10894d1d61a6SOleg Nesterov 	 * desc->threads_active and wake possible waiters.
10904d1d61a6SOleg Nesterov 	 */
10914d1d61a6SOleg Nesterov 	if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
10924d1d61a6SOleg Nesterov 		wake_threads_waitq(desc);
10934d1d61a6SOleg Nesterov 
10944d1d61a6SOleg Nesterov 	/* Prevent a stale desc->threads_oneshot */
10954d1d61a6SOleg Nesterov 	irq_finalize_oneshot(desc, action);
10964d1d61a6SOleg Nesterov }
10974d1d61a6SOleg Nesterov 
10982a1d3ab8SThomas Gleixner static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
10992a1d3ab8SThomas Gleixner {
11002a1d3ab8SThomas Gleixner 	struct irqaction *secondary = action->secondary;
11012a1d3ab8SThomas Gleixner 
11022a1d3ab8SThomas Gleixner 	if (WARN_ON_ONCE(!secondary))
11032a1d3ab8SThomas Gleixner 		return;
11042a1d3ab8SThomas Gleixner 
11052a1d3ab8SThomas Gleixner 	raw_spin_lock_irq(&desc->lock);
11062a1d3ab8SThomas Gleixner 	__irq_wake_thread(desc, secondary);
11072a1d3ab8SThomas Gleixner 	raw_spin_unlock_irq(&desc->lock);
11082a1d3ab8SThomas Gleixner }
11092a1d3ab8SThomas Gleixner 
11108d32a307SThomas Gleixner /*
11113aa551c9SThomas Gleixner  * Interrupt handler thread
11123aa551c9SThomas Gleixner  */
11133aa551c9SThomas Gleixner static int irq_thread(void *data)
11143aa551c9SThomas Gleixner {
111567d12145SAl Viro 	struct callback_head on_exit_work;
11163aa551c9SThomas Gleixner 	struct irqaction *action = data;
11173aa551c9SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(action->irq);
11183a43e05fSSebastian Andrzej Siewior 	irqreturn_t (*handler_fn)(struct irq_desc *desc,
11193a43e05fSSebastian Andrzej Siewior 			struct irqaction *action);
11203aa551c9SThomas Gleixner 
1121540b60e2SAlexander Gordeev 	if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
11228d32a307SThomas Gleixner 					&action->thread_flags))
11238d32a307SThomas Gleixner 		handler_fn = irq_forced_thread_fn;
11248d32a307SThomas Gleixner 	else
11258d32a307SThomas Gleixner 		handler_fn = irq_thread_fn;
11268d32a307SThomas Gleixner 
112741f9d29fSAl Viro 	init_task_work(&on_exit_work, irq_thread_dtor);
11284d1d61a6SOleg Nesterov 	task_work_add(current, &on_exit_work, false);
11293aa551c9SThomas Gleixner 
1130f3de44edSSankara Muthukrishnan 	irq_thread_check_affinity(desc, action);
1131f3de44edSSankara Muthukrishnan 
11323aa551c9SThomas Gleixner 	while (!irq_wait_for_interrupt(action)) {
11337140ea19SIdo Yariv 		irqreturn_t action_ret;
11343aa551c9SThomas Gleixner 
1135591d2fb0SThomas Gleixner 		irq_thread_check_affinity(desc, action);
1136591d2fb0SThomas Gleixner 
11373a43e05fSSebastian Andrzej Siewior 		action_ret = handler_fn(desc, action);
11382a1d3ab8SThomas Gleixner 		if (action_ret == IRQ_WAKE_THREAD)
11392a1d3ab8SThomas Gleixner 			irq_wake_secondary(desc, action);
11407140ea19SIdo Yariv 
11417140ea19SIdo Yariv 		wake_threads_waitq(desc);
11423aa551c9SThomas Gleixner 	}
11433aa551c9SThomas Gleixner 
11447140ea19SIdo Yariv 	/*
11457140ea19SIdo Yariv 	 * This is the regular exit path. __free_irq() is stopping the
11467140ea19SIdo Yariv 	 * thread via kthread_stop() after calling
1147519cc865SLukas Wunner 	 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1148836557bdSLukas Wunner 	 * oneshot mask bit can be set.
11493aa551c9SThomas Gleixner 	 */
11504d1d61a6SOleg Nesterov 	task_work_cancel(current, irq_thread_dtor);
11513aa551c9SThomas Gleixner 	return 0;
11523aa551c9SThomas Gleixner }
11533aa551c9SThomas Gleixner 
1154a92444c6SThomas Gleixner /**
1155a92444c6SThomas Gleixner  *	irq_wake_thread - wake the irq thread for the action identified by dev_id
1156a92444c6SThomas Gleixner  *	@irq:		Interrupt line
1157a92444c6SThomas Gleixner  *	@dev_id:	Device identity for which the thread should be woken
1158a92444c6SThomas Gleixner  *
1159a92444c6SThomas Gleixner  */
1160a92444c6SThomas Gleixner void irq_wake_thread(unsigned int irq, void *dev_id)
1161a92444c6SThomas Gleixner {
1162a92444c6SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
1163a92444c6SThomas Gleixner 	struct irqaction *action;
1164a92444c6SThomas Gleixner 	unsigned long flags;
1165a92444c6SThomas Gleixner 
1166a92444c6SThomas Gleixner 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1167a92444c6SThomas Gleixner 		return;
1168a92444c6SThomas Gleixner 
1169a92444c6SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
1170f944b5a7SDaniel Lezcano 	for_each_action_of_desc(desc, action) {
1171a92444c6SThomas Gleixner 		if (action->dev_id == dev_id) {
1172a92444c6SThomas Gleixner 			if (action->thread)
1173a92444c6SThomas Gleixner 				__irq_wake_thread(desc, action);
1174a92444c6SThomas Gleixner 			break;
1175a92444c6SThomas Gleixner 		}
1176a92444c6SThomas Gleixner 	}
1177a92444c6SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1178a92444c6SThomas Gleixner }
1179a92444c6SThomas Gleixner EXPORT_SYMBOL_GPL(irq_wake_thread);
1180a92444c6SThomas Gleixner 
11812a1d3ab8SThomas Gleixner static int irq_setup_forced_threading(struct irqaction *new)
11828d32a307SThomas Gleixner {
11838d32a307SThomas Gleixner 	if (!force_irqthreads)
11842a1d3ab8SThomas Gleixner 		return 0;
11858d32a307SThomas Gleixner 	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
11862a1d3ab8SThomas Gleixner 		return 0;
11878d32a307SThomas Gleixner 
1188d1f0301bSThomas Gleixner 	/*
1189d1f0301bSThomas Gleixner 	 * No further action required for interrupts which are requested as
1190d1f0301bSThomas Gleixner 	 * threaded interrupts already
1191d1f0301bSThomas Gleixner 	 */
1192d1f0301bSThomas Gleixner 	if (new->handler == irq_default_primary_handler)
1193d1f0301bSThomas Gleixner 		return 0;
1194d1f0301bSThomas Gleixner 
11958d32a307SThomas Gleixner 	new->flags |= IRQF_ONESHOT;
11968d32a307SThomas Gleixner 
11972a1d3ab8SThomas Gleixner 	/*
11982a1d3ab8SThomas Gleixner 	 * Handle the case where we have a real primary handler and a
11992a1d3ab8SThomas Gleixner 	 * thread handler. We force thread them as well by creating a
12002a1d3ab8SThomas Gleixner 	 * secondary action.
12012a1d3ab8SThomas Gleixner 	 */
1202d1f0301bSThomas Gleixner 	if (new->handler && new->thread_fn) {
12032a1d3ab8SThomas Gleixner 		/* Allocate the secondary action */
12042a1d3ab8SThomas Gleixner 		new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
12052a1d3ab8SThomas Gleixner 		if (!new->secondary)
12062a1d3ab8SThomas Gleixner 			return -ENOMEM;
12072a1d3ab8SThomas Gleixner 		new->secondary->handler = irq_forced_secondary_handler;
12082a1d3ab8SThomas Gleixner 		new->secondary->thread_fn = new->thread_fn;
12092a1d3ab8SThomas Gleixner 		new->secondary->dev_id = new->dev_id;
12102a1d3ab8SThomas Gleixner 		new->secondary->irq = new->irq;
12112a1d3ab8SThomas Gleixner 		new->secondary->name = new->name;
12122a1d3ab8SThomas Gleixner 	}
12132a1d3ab8SThomas Gleixner 	/* Deal with the primary handler */
12148d32a307SThomas Gleixner 	set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
12158d32a307SThomas Gleixner 	new->thread_fn = new->handler;
12168d32a307SThomas Gleixner 	new->handler = irq_default_primary_handler;
12172a1d3ab8SThomas Gleixner 	return 0;
12188d32a307SThomas Gleixner }
12198d32a307SThomas Gleixner 
1220c1bacbaeSThomas Gleixner static int irq_request_resources(struct irq_desc *desc)
1221c1bacbaeSThomas Gleixner {
1222c1bacbaeSThomas Gleixner 	struct irq_data *d = &desc->irq_data;
1223c1bacbaeSThomas Gleixner 	struct irq_chip *c = d->chip;
1224c1bacbaeSThomas Gleixner 
1225c1bacbaeSThomas Gleixner 	return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1226c1bacbaeSThomas Gleixner }
1227c1bacbaeSThomas Gleixner 
1228c1bacbaeSThomas Gleixner static void irq_release_resources(struct irq_desc *desc)
1229c1bacbaeSThomas Gleixner {
1230c1bacbaeSThomas Gleixner 	struct irq_data *d = &desc->irq_data;
1231c1bacbaeSThomas Gleixner 	struct irq_chip *c = d->chip;
1232c1bacbaeSThomas Gleixner 
1233c1bacbaeSThomas Gleixner 	if (c->irq_release_resources)
1234c1bacbaeSThomas Gleixner 		c->irq_release_resources(d);
1235c1bacbaeSThomas Gleixner }
1236c1bacbaeSThomas Gleixner 
1237b525903cSJulien Thierry static bool irq_supports_nmi(struct irq_desc *desc)
1238b525903cSJulien Thierry {
1239b525903cSJulien Thierry 	struct irq_data *d = irq_desc_get_irq_data(desc);
1240b525903cSJulien Thierry 
1241b525903cSJulien Thierry #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1242b525903cSJulien Thierry 	/* Only IRQs directly managed by the root irqchip can be set as NMI */
1243b525903cSJulien Thierry 	if (d->parent_data)
1244b525903cSJulien Thierry 		return false;
1245b525903cSJulien Thierry #endif
1246b525903cSJulien Thierry 	/* Don't support NMIs for chips behind a slow bus */
1247b525903cSJulien Thierry 	if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1248b525903cSJulien Thierry 		return false;
1249b525903cSJulien Thierry 
1250b525903cSJulien Thierry 	return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1251b525903cSJulien Thierry }
1252b525903cSJulien Thierry 
1253b525903cSJulien Thierry static int irq_nmi_setup(struct irq_desc *desc)
1254b525903cSJulien Thierry {
1255b525903cSJulien Thierry 	struct irq_data *d = irq_desc_get_irq_data(desc);
1256b525903cSJulien Thierry 	struct irq_chip *c = d->chip;
1257b525903cSJulien Thierry 
1258b525903cSJulien Thierry 	return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1259b525903cSJulien Thierry }
1260b525903cSJulien Thierry 
1261b525903cSJulien Thierry static void irq_nmi_teardown(struct irq_desc *desc)
1262b525903cSJulien Thierry {
1263b525903cSJulien Thierry 	struct irq_data *d = irq_desc_get_irq_data(desc);
1264b525903cSJulien Thierry 	struct irq_chip *c = d->chip;
1265b525903cSJulien Thierry 
1266b525903cSJulien Thierry 	if (c->irq_nmi_teardown)
1267b525903cSJulien Thierry 		c->irq_nmi_teardown(d);
1268b525903cSJulien Thierry }
1269b525903cSJulien Thierry 
12702a1d3ab8SThomas Gleixner static int
12712a1d3ab8SThomas Gleixner setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
12722a1d3ab8SThomas Gleixner {
12732a1d3ab8SThomas Gleixner 	struct task_struct *t;
12742a1d3ab8SThomas Gleixner 
12752a1d3ab8SThomas Gleixner 	if (!secondary) {
12762a1d3ab8SThomas Gleixner 		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
12772a1d3ab8SThomas Gleixner 				   new->name);
12782a1d3ab8SThomas Gleixner 	} else {
12792a1d3ab8SThomas Gleixner 		t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
12802a1d3ab8SThomas Gleixner 				   new->name);
12812a1d3ab8SThomas Gleixner 	}
12822a1d3ab8SThomas Gleixner 
12832a1d3ab8SThomas Gleixner 	if (IS_ERR(t))
12842a1d3ab8SThomas Gleixner 		return PTR_ERR(t);
12852a1d3ab8SThomas Gleixner 
1286*7a40798cSPeter Zijlstra 	sched_set_fifo(t);
12872a1d3ab8SThomas Gleixner 
12882a1d3ab8SThomas Gleixner 	/*
12892a1d3ab8SThomas Gleixner 	 * We keep the reference to the task struct even if
12902a1d3ab8SThomas Gleixner 	 * the thread dies to avoid that the interrupt code
12912a1d3ab8SThomas Gleixner 	 * references an already freed task_struct.
12922a1d3ab8SThomas Gleixner 	 */
12937b3c92b8SMatthew Wilcox (Oracle) 	new->thread = get_task_struct(t);
12942a1d3ab8SThomas Gleixner 	/*
12952a1d3ab8SThomas Gleixner 	 * Tell the thread to set its affinity. This is
12962a1d3ab8SThomas Gleixner 	 * important for shared interrupt handlers as we do
12972a1d3ab8SThomas Gleixner 	 * not invoke setup_affinity() for the secondary
12982a1d3ab8SThomas Gleixner 	 * handlers as everything is already set up. Even for
12992a1d3ab8SThomas Gleixner 	 * interrupts marked with IRQF_NO_BALANCE this is
13002a1d3ab8SThomas Gleixner 	 * correct as we want the thread to move to the cpu(s)
13012a1d3ab8SThomas Gleixner 	 * on which the requesting code placed the interrupt.
13022a1d3ab8SThomas Gleixner 	 */
13032a1d3ab8SThomas Gleixner 	set_bit(IRQTF_AFFINITY, &new->thread_flags);
13042a1d3ab8SThomas Gleixner 	return 0;
13052a1d3ab8SThomas Gleixner }
13062a1d3ab8SThomas Gleixner 
13071da177e4SLinus Torvalds /*
13081da177e4SLinus Torvalds  * Internal function to register an irqaction - typically used to
13091da177e4SLinus Torvalds  * allocate special interrupts that are part of the architecture.
131019d39a38SThomas Gleixner  *
131119d39a38SThomas Gleixner  * Locking rules:
131219d39a38SThomas Gleixner  *
131319d39a38SThomas Gleixner  * desc->request_mutex	Provides serialization against a concurrent free_irq()
131419d39a38SThomas Gleixner  *   chip_bus_lock	Provides serialization for slow bus operations
131519d39a38SThomas Gleixner  *     desc->lock	Provides serialization against hard interrupts
131619d39a38SThomas Gleixner  *
131719d39a38SThomas Gleixner  * chip_bus_lock and desc->lock are sufficient for all other management and
131819d39a38SThomas Gleixner  * interrupt related functions. desc->request_mutex solely serializes
131919d39a38SThomas Gleixner  * request/free_irq().
13201da177e4SLinus Torvalds  */
1321d3c60047SThomas Gleixner static int
1322d3c60047SThomas Gleixner __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
13231da177e4SLinus Torvalds {
1324f17c7545SIngo Molnar 	struct irqaction *old, **old_ptr;
1325b5faba21SThomas Gleixner 	unsigned long flags, thread_mask = 0;
13263b8249e7SThomas Gleixner 	int ret, nested, shared = 0;
13271da177e4SLinus Torvalds 
13287d94f7caSYinghai Lu 	if (!desc)
1329c2b5a251SMatthew Wilcox 		return -EINVAL;
1330c2b5a251SMatthew Wilcox 
13316b8ff312SThomas Gleixner 	if (desc->irq_data.chip == &no_irq_chip)
13321da177e4SLinus Torvalds 		return -ENOSYS;
1333b6873807SSebastian Andrzej Siewior 	if (!try_module_get(desc->owner))
1334b6873807SSebastian Andrzej Siewior 		return -ENODEV;
13351da177e4SLinus Torvalds 
13362a1d3ab8SThomas Gleixner 	new->irq = irq;
13372a1d3ab8SThomas Gleixner 
13381da177e4SLinus Torvalds 	/*
13394b357daeSJon Hunter 	 * If the trigger type is not specified by the caller,
13404b357daeSJon Hunter 	 * then use the default for this interrupt.
13414b357daeSJon Hunter 	 */
13424b357daeSJon Hunter 	if (!(new->flags & IRQF_TRIGGER_MASK))
13434b357daeSJon Hunter 		new->flags |= irqd_get_trigger_type(&desc->irq_data);
13444b357daeSJon Hunter 
13454b357daeSJon Hunter 	/*
1346399b5da2SThomas Gleixner 	 * Check whether the interrupt nests into another interrupt
1347399b5da2SThomas Gleixner 	 * thread.
13483aa551c9SThomas Gleixner 	 */
13491ccb4e61SThomas Gleixner 	nested = irq_settings_is_nested_thread(desc);
1350399b5da2SThomas Gleixner 	if (nested) {
1351b6873807SSebastian Andrzej Siewior 		if (!new->thread_fn) {
1352b6873807SSebastian Andrzej Siewior 			ret = -EINVAL;
1353b6873807SSebastian Andrzej Siewior 			goto out_mput;
1354b6873807SSebastian Andrzej Siewior 		}
1355399b5da2SThomas Gleixner 		/*
1356399b5da2SThomas Gleixner 		 * Replace the primary handler which was provided from
1357399b5da2SThomas Gleixner 		 * the driver for non nested interrupt handling by the
1358399b5da2SThomas Gleixner 		 * dummy function which warns when called.
1359399b5da2SThomas Gleixner 		 */
1360399b5da2SThomas Gleixner 		new->handler = irq_nested_primary_handler;
13618d32a307SThomas Gleixner 	} else {
13622a1d3ab8SThomas Gleixner 		if (irq_settings_can_thread(desc)) {
13632a1d3ab8SThomas Gleixner 			ret = irq_setup_forced_threading(new);
13642a1d3ab8SThomas Gleixner 			if (ret)
13652a1d3ab8SThomas Gleixner 				goto out_mput;
13662a1d3ab8SThomas Gleixner 		}
1367399b5da2SThomas Gleixner 	}
1368399b5da2SThomas Gleixner 
1369399b5da2SThomas Gleixner 	/*
1370399b5da2SThomas Gleixner 	 * Create a handler thread when a thread function is supplied
1371399b5da2SThomas Gleixner 	 * and the interrupt does not nest into another interrupt
1372399b5da2SThomas Gleixner 	 * thread.
1373399b5da2SThomas Gleixner 	 */
1374399b5da2SThomas Gleixner 	if (new->thread_fn && !nested) {
13752a1d3ab8SThomas Gleixner 		ret = setup_irq_thread(new, irq, false);
13762a1d3ab8SThomas Gleixner 		if (ret)
1377b6873807SSebastian Andrzej Siewior 			goto out_mput;
13782a1d3ab8SThomas Gleixner 		if (new->secondary) {
13792a1d3ab8SThomas Gleixner 			ret = setup_irq_thread(new->secondary, irq, true);
13802a1d3ab8SThomas Gleixner 			if (ret)
13812a1d3ab8SThomas Gleixner 				goto out_thread;
1382b6873807SSebastian Andrzej Siewior 		}
13833aa551c9SThomas Gleixner 	}
13843aa551c9SThomas Gleixner 
13853aa551c9SThomas Gleixner 	/*
1386dc9b229aSThomas Gleixner 	 * Drivers are often written to work w/o knowledge about the
1387dc9b229aSThomas Gleixner 	 * underlying irq chip implementation, so a request for a
1388dc9b229aSThomas Gleixner 	 * threaded irq without a primary hard irq context handler
1389dc9b229aSThomas Gleixner 	 * requires the ONESHOT flag to be set. Some irq chips like
1390dc9b229aSThomas Gleixner 	 * MSI based interrupts are per se one shot safe. Check the
1391dc9b229aSThomas Gleixner 	 * chip flags, so we can avoid the unmask dance at the end of
1392dc9b229aSThomas Gleixner 	 * the threaded handler for those.
1393dc9b229aSThomas Gleixner 	 */
1394dc9b229aSThomas Gleixner 	if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1395dc9b229aSThomas Gleixner 		new->flags &= ~IRQF_ONESHOT;
1396dc9b229aSThomas Gleixner 
139719d39a38SThomas Gleixner 	/*
139819d39a38SThomas Gleixner 	 * Protects against a concurrent __free_irq() call which might wait
1399519cc865SLukas Wunner 	 * for synchronize_hardirq() to complete without holding the optional
1400836557bdSLukas Wunner 	 * chip bus lock and desc->lock. Also protects against handing out
1401836557bdSLukas Wunner 	 * a recycled oneshot thread_mask bit while it's still in use by
1402836557bdSLukas Wunner 	 * its previous owner.
140319d39a38SThomas Gleixner 	 */
14049114014cSThomas Gleixner 	mutex_lock(&desc->request_mutex);
140519d39a38SThomas Gleixner 
140619d39a38SThomas Gleixner 	/*
140719d39a38SThomas Gleixner 	 * Acquire bus lock as the irq_request_resources() callback below
140819d39a38SThomas Gleixner 	 * might rely on the serialization or the magic power management
140919d39a38SThomas Gleixner 	 * functions which are abusing the irq_bus_lock() callback,
141019d39a38SThomas Gleixner 	 */
141119d39a38SThomas Gleixner 	chip_bus_lock(desc);
141219d39a38SThomas Gleixner 
141319d39a38SThomas Gleixner 	/* First installed action requests resources. */
141446e48e25SThomas Gleixner 	if (!desc->action) {
141546e48e25SThomas Gleixner 		ret = irq_request_resources(desc);
141646e48e25SThomas Gleixner 		if (ret) {
141746e48e25SThomas Gleixner 			pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
141846e48e25SThomas Gleixner 			       new->name, irq, desc->irq_data.chip->name);
141919d39a38SThomas Gleixner 			goto out_bus_unlock;
142046e48e25SThomas Gleixner 		}
142146e48e25SThomas Gleixner 	}
14229114014cSThomas Gleixner 
1423dc9b229aSThomas Gleixner 	/*
14241da177e4SLinus Torvalds 	 * The following block of code has to be executed atomically
142519d39a38SThomas Gleixner 	 * protected against a concurrent interrupt and any of the other
142619d39a38SThomas Gleixner 	 * management calls which are not serialized via
142719d39a38SThomas Gleixner 	 * desc->request_mutex or the optional bus lock.
14281da177e4SLinus Torvalds 	 */
1429239007b8SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
1430f17c7545SIngo Molnar 	old_ptr = &desc->action;
1431f17c7545SIngo Molnar 	old = *old_ptr;
143206fcb0c6SIngo Molnar 	if (old) {
1433e76de9f8SThomas Gleixner 		/*
1434e76de9f8SThomas Gleixner 		 * Can't share interrupts unless both agree to and are
1435e76de9f8SThomas Gleixner 		 * the same type (level, edge, polarity). So both flag
14363cca53b0SThomas Gleixner 		 * fields must have IRQF_SHARED set and the bits which
14379d591eddSThomas Gleixner 		 * set the trigger type must match. Also all must
14389d591eddSThomas Gleixner 		 * agree on ONESHOT.
1439b525903cSJulien Thierry 		 * Interrupt lines used for NMIs cannot be shared.
1440e76de9f8SThomas Gleixner 		 */
14414f8413a3SMarc Zyngier 		unsigned int oldtype;
14424f8413a3SMarc Zyngier 
1443b525903cSJulien Thierry 		if (desc->istate & IRQS_NMI) {
1444b525903cSJulien Thierry 			pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1445b525903cSJulien Thierry 				new->name, irq, desc->irq_data.chip->name);
1446b525903cSJulien Thierry 			ret = -EINVAL;
1447b525903cSJulien Thierry 			goto out_unlock;
1448b525903cSJulien Thierry 		}
1449b525903cSJulien Thierry 
14504f8413a3SMarc Zyngier 		/*
14514f8413a3SMarc Zyngier 		 * If nobody did set the configuration before, inherit
14524f8413a3SMarc Zyngier 		 * the one provided by the requester.
14534f8413a3SMarc Zyngier 		 */
14544f8413a3SMarc Zyngier 		if (irqd_trigger_type_was_set(&desc->irq_data)) {
14554f8413a3SMarc Zyngier 			oldtype = irqd_get_trigger_type(&desc->irq_data);
14564f8413a3SMarc Zyngier 		} else {
14574f8413a3SMarc Zyngier 			oldtype = new->flags & IRQF_TRIGGER_MASK;
14584f8413a3SMarc Zyngier 			irqd_set_trigger_type(&desc->irq_data, oldtype);
14594f8413a3SMarc Zyngier 		}
1460382bd4deSHans de Goede 
14613cca53b0SThomas Gleixner 		if (!((old->flags & new->flags) & IRQF_SHARED) ||
1462382bd4deSHans de Goede 		    (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1463f5d89470SThomas Gleixner 		    ((old->flags ^ new->flags) & IRQF_ONESHOT))
1464f5163427SDimitri Sivanich 			goto mismatch;
1465f5163427SDimitri Sivanich 
1466f5163427SDimitri Sivanich 		/* All handlers must agree on per-cpuness */
14673cca53b0SThomas Gleixner 		if ((old->flags & IRQF_PERCPU) !=
14683cca53b0SThomas Gleixner 		    (new->flags & IRQF_PERCPU))
1469f5163427SDimitri Sivanich 			goto mismatch;
14701da177e4SLinus Torvalds 
14711da177e4SLinus Torvalds 		/* add new interrupt at end of irq queue */
14721da177e4SLinus Torvalds 		do {
147352abb700SThomas Gleixner 			/*
147452abb700SThomas Gleixner 			 * Or all existing action->thread_mask bits,
147552abb700SThomas Gleixner 			 * so we can find the next zero bit for this
147652abb700SThomas Gleixner 			 * new action.
147752abb700SThomas Gleixner 			 */
1478b5faba21SThomas Gleixner 			thread_mask |= old->thread_mask;
1479f17c7545SIngo Molnar 			old_ptr = &old->next;
1480f17c7545SIngo Molnar 			old = *old_ptr;
14811da177e4SLinus Torvalds 		} while (old);
14821da177e4SLinus Torvalds 		shared = 1;
14831da177e4SLinus Torvalds 	}
14841da177e4SLinus Torvalds 
1485b5faba21SThomas Gleixner 	/*
148652abb700SThomas Gleixner 	 * Setup the thread mask for this irqaction for ONESHOT. For
148752abb700SThomas Gleixner 	 * !ONESHOT irqs the thread mask is 0 so we can avoid a
148852abb700SThomas Gleixner 	 * conditional in irq_wake_thread().
1489b5faba21SThomas Gleixner 	 */
149052abb700SThomas Gleixner 	if (new->flags & IRQF_ONESHOT) {
149152abb700SThomas Gleixner 		/*
149252abb700SThomas Gleixner 		 * Unlikely to have 32 resp 64 irqs sharing one line,
149352abb700SThomas Gleixner 		 * but who knows.
149452abb700SThomas Gleixner 		 */
149552abb700SThomas Gleixner 		if (thread_mask == ~0UL) {
1496b5faba21SThomas Gleixner 			ret = -EBUSY;
1497cba4235eSThomas Gleixner 			goto out_unlock;
1498b5faba21SThomas Gleixner 		}
149952abb700SThomas Gleixner 		/*
150052abb700SThomas Gleixner 		 * The thread_mask for the action is or'ed to
150152abb700SThomas Gleixner 		 * desc->thread_active to indicate that the
150252abb700SThomas Gleixner 		 * IRQF_ONESHOT thread handler has been woken, but not
150352abb700SThomas Gleixner 		 * yet finished. The bit is cleared when a thread
150452abb700SThomas Gleixner 		 * completes. When all threads of a shared interrupt
150552abb700SThomas Gleixner 		 * line have completed desc->threads_active becomes
150652abb700SThomas Gleixner 		 * zero and the interrupt line is unmasked. See
150752abb700SThomas Gleixner 		 * handle.c:irq_wake_thread() for further information.
150852abb700SThomas Gleixner 		 *
150952abb700SThomas Gleixner 		 * If no thread is woken by primary (hard irq context)
151052abb700SThomas Gleixner 		 * interrupt handlers, then desc->threads_active is
151152abb700SThomas Gleixner 		 * also checked for zero to unmask the irq line in the
151252abb700SThomas Gleixner 		 * affected hard irq flow handlers
151352abb700SThomas Gleixner 		 * (handle_[fasteoi|level]_irq).
151452abb700SThomas Gleixner 		 *
151552abb700SThomas Gleixner 		 * The new action gets the first zero bit of
151652abb700SThomas Gleixner 		 * thread_mask assigned. See the loop above which or's
151752abb700SThomas Gleixner 		 * all existing action->thread_mask bits.
151852abb700SThomas Gleixner 		 */
1519ffc661c9SRasmus Villemoes 		new->thread_mask = 1UL << ffz(thread_mask);
15201c6c6952SThomas Gleixner 
1521dc9b229aSThomas Gleixner 	} else if (new->handler == irq_default_primary_handler &&
1522dc9b229aSThomas Gleixner 		   !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
15231c6c6952SThomas Gleixner 		/*
15241c6c6952SThomas Gleixner 		 * The interrupt was requested with handler = NULL, so
15251c6c6952SThomas Gleixner 		 * we use the default primary handler for it. But it
15261c6c6952SThomas Gleixner 		 * does not have the oneshot flag set. In combination
15271c6c6952SThomas Gleixner 		 * with level interrupts this is deadly, because the
15281c6c6952SThomas Gleixner 		 * default primary handler just wakes the thread, then
15291c6c6952SThomas Gleixner 		 * the irq lines is reenabled, but the device still
15301c6c6952SThomas Gleixner 		 * has the level irq asserted. Rinse and repeat....
15311c6c6952SThomas Gleixner 		 *
15321c6c6952SThomas Gleixner 		 * While this works for edge type interrupts, we play
15331c6c6952SThomas Gleixner 		 * it safe and reject unconditionally because we can't
15341c6c6952SThomas Gleixner 		 * say for sure which type this interrupt really
15351c6c6952SThomas Gleixner 		 * has. The type flags are unreliable as the
15361c6c6952SThomas Gleixner 		 * underlying chip implementation can override them.
15371c6c6952SThomas Gleixner 		 */
1538025af39bSLuca Ceresoli 		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1539025af39bSLuca Ceresoli 		       new->name, irq);
15401c6c6952SThomas Gleixner 		ret = -EINVAL;
1541cba4235eSThomas Gleixner 		goto out_unlock;
154252abb700SThomas Gleixner 	}
1543b5faba21SThomas Gleixner 
15441da177e4SLinus Torvalds 	if (!shared) {
15453aa551c9SThomas Gleixner 		init_waitqueue_head(&desc->wait_for_threads);
15463aa551c9SThomas Gleixner 
154782736f4dSUwe Kleine-König 		/* Setup the type (level, edge polarity) if configured: */
154882736f4dSUwe Kleine-König 		if (new->flags & IRQF_TRIGGER_MASK) {
1549a1ff541aSJiang Liu 			ret = __irq_set_trigger(desc,
1550f2b662daSDavid Brownell 						new->flags & IRQF_TRIGGER_MASK);
155182736f4dSUwe Kleine-König 
155219d39a38SThomas Gleixner 			if (ret)
1553cba4235eSThomas Gleixner 				goto out_unlock;
1554091738a2SThomas Gleixner 		}
1555f75d222bSAhmed S. Darwish 
1556c942cee4SThomas Gleixner 		/*
1557c942cee4SThomas Gleixner 		 * Activate the interrupt. That activation must happen
1558c942cee4SThomas Gleixner 		 * independently of IRQ_NOAUTOEN. request_irq() can fail
1559c942cee4SThomas Gleixner 		 * and the callers are supposed to handle
1560c942cee4SThomas Gleixner 		 * that. enable_irq() of an interrupt requested with
1561c942cee4SThomas Gleixner 		 * IRQ_NOAUTOEN is not supposed to fail. The activation
1562c942cee4SThomas Gleixner 		 * keeps it in shutdown mode, it merily associates
1563c942cee4SThomas Gleixner 		 * resources if necessary and if that's not possible it
1564c942cee4SThomas Gleixner 		 * fails. Interrupts which are in managed shutdown mode
1565c942cee4SThomas Gleixner 		 * will simply ignore that activation request.
1566c942cee4SThomas Gleixner 		 */
1567c942cee4SThomas Gleixner 		ret = irq_activate(desc);
1568c942cee4SThomas Gleixner 		if (ret)
1569c942cee4SThomas Gleixner 			goto out_unlock;
1570c942cee4SThomas Gleixner 
1571009b4c3bSThomas Gleixner 		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
157232f4125eSThomas Gleixner 				  IRQS_ONESHOT | IRQS_WAITING);
157332f4125eSThomas Gleixner 		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
157494d39e1fSThomas Gleixner 
1575a005677bSThomas Gleixner 		if (new->flags & IRQF_PERCPU) {
1576a005677bSThomas Gleixner 			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1577a005677bSThomas Gleixner 			irq_settings_set_per_cpu(desc);
1578a005677bSThomas Gleixner 		}
15796a58fb3bSThomas Gleixner 
1580b25c340cSThomas Gleixner 		if (new->flags & IRQF_ONESHOT)
15813d67baecSThomas Gleixner 			desc->istate |= IRQS_ONESHOT;
1582b25c340cSThomas Gleixner 
15832e051552SThomas Gleixner 		/* Exclude IRQ from balancing if requested */
15842e051552SThomas Gleixner 		if (new->flags & IRQF_NOBALANCING) {
15852e051552SThomas Gleixner 			irq_settings_set_no_balancing(desc);
15862e051552SThomas Gleixner 			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
15872e051552SThomas Gleixner 		}
15882e051552SThomas Gleixner 
158904c848d3SThomas Gleixner 		if (irq_settings_can_autoenable(desc)) {
15904cde9c6bSThomas Gleixner 			irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
159104c848d3SThomas Gleixner 		} else {
159204c848d3SThomas Gleixner 			/*
159304c848d3SThomas Gleixner 			 * Shared interrupts do not go well with disabling
159404c848d3SThomas Gleixner 			 * auto enable. The sharing interrupt might request
159504c848d3SThomas Gleixner 			 * it while it's still disabled and then wait for
159604c848d3SThomas Gleixner 			 * interrupts forever.
159704c848d3SThomas Gleixner 			 */
159804c848d3SThomas Gleixner 			WARN_ON_ONCE(new->flags & IRQF_SHARED);
1599e76de9f8SThomas Gleixner 			/* Undo nested disables: */
1600e76de9f8SThomas Gleixner 			desc->depth = 1;
160104c848d3SThomas Gleixner 		}
160218404756SMax Krasnyansky 
1603876dbd4cSThomas Gleixner 	} else if (new->flags & IRQF_TRIGGER_MASK) {
1604876dbd4cSThomas Gleixner 		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
16057ee7e87dSThomas Gleixner 		unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1606876dbd4cSThomas Gleixner 
1607876dbd4cSThomas Gleixner 		if (nmsk != omsk)
1608876dbd4cSThomas Gleixner 			/* hope the handler works with current  trigger mode */
1609a395d6a7SJoe Perches 			pr_warn("irq %d uses trigger mode %u; requested %u\n",
16107ee7e87dSThomas Gleixner 				irq, omsk, nmsk);
161194d39e1fSThomas Gleixner 	}
161282736f4dSUwe Kleine-König 
1613f17c7545SIngo Molnar 	*old_ptr = new;
161482736f4dSUwe Kleine-König 
1615cab303beSThomas Gleixner 	irq_pm_install_action(desc, new);
1616cab303beSThomas Gleixner 
16178528b0f1SLinus Torvalds 	/* Reset broken irq detection when installing new handler */
16188528b0f1SLinus Torvalds 	desc->irq_count = 0;
16198528b0f1SLinus Torvalds 	desc->irqs_unhandled = 0;
16201adb0850SThomas Gleixner 
16211adb0850SThomas Gleixner 	/*
16221adb0850SThomas Gleixner 	 * Check whether we disabled the irq via the spurious handler
16231adb0850SThomas Gleixner 	 * before. Reenable it and give it another chance.
16241adb0850SThomas Gleixner 	 */
16257acdd53eSThomas Gleixner 	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
16267acdd53eSThomas Gleixner 		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
162779ff1cdaSJiang Liu 		__enable_irq(desc);
16281adb0850SThomas Gleixner 	}
16291adb0850SThomas Gleixner 
1630239007b8SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
16313a90795eSThomas Gleixner 	chip_bus_sync_unlock(desc);
16329114014cSThomas Gleixner 	mutex_unlock(&desc->request_mutex);
16331da177e4SLinus Torvalds 
1634b2d3d61aSDaniel Lezcano 	irq_setup_timings(desc, new);
1635b2d3d61aSDaniel Lezcano 
163669ab8494SThomas Gleixner 	/*
163769ab8494SThomas Gleixner 	 * Strictly no need to wake it up, but hung_task complains
163869ab8494SThomas Gleixner 	 * when no hard interrupt wakes the thread up.
163969ab8494SThomas Gleixner 	 */
164069ab8494SThomas Gleixner 	if (new->thread)
164169ab8494SThomas Gleixner 		wake_up_process(new->thread);
16422a1d3ab8SThomas Gleixner 	if (new->secondary)
16432a1d3ab8SThomas Gleixner 		wake_up_process(new->secondary->thread);
164469ab8494SThomas Gleixner 
16452c6927a3SYinghai Lu 	register_irq_proc(irq, desc);
16461da177e4SLinus Torvalds 	new->dir = NULL;
16471da177e4SLinus Torvalds 	register_handler_proc(irq, new);
16481da177e4SLinus Torvalds 	return 0;
1649f5163427SDimitri Sivanich 
1650f5163427SDimitri Sivanich mismatch:
16513cca53b0SThomas Gleixner 	if (!(new->flags & IRQF_PROBE_SHARED)) {
165297fd75b7SAndrew Morton 		pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1653f5d89470SThomas Gleixner 		       irq, new->flags, new->name, old->flags, old->name);
1654f5d89470SThomas Gleixner #ifdef CONFIG_DEBUG_SHIRQ
1655f5163427SDimitri Sivanich 		dump_stack();
16563f050447SAlan Cox #endif
1657f5d89470SThomas Gleixner 	}
16583aa551c9SThomas Gleixner 	ret = -EBUSY;
16593aa551c9SThomas Gleixner 
1660cba4235eSThomas Gleixner out_unlock:
16611c389795SDan Carpenter 	raw_spin_unlock_irqrestore(&desc->lock, flags);
16623b8249e7SThomas Gleixner 
166346e48e25SThomas Gleixner 	if (!desc->action)
166446e48e25SThomas Gleixner 		irq_release_resources(desc);
166519d39a38SThomas Gleixner out_bus_unlock:
166619d39a38SThomas Gleixner 	chip_bus_sync_unlock(desc);
16679114014cSThomas Gleixner 	mutex_unlock(&desc->request_mutex);
16689114014cSThomas Gleixner 
16693aa551c9SThomas Gleixner out_thread:
16703aa551c9SThomas Gleixner 	if (new->thread) {
16713aa551c9SThomas Gleixner 		struct task_struct *t = new->thread;
16723aa551c9SThomas Gleixner 
16733aa551c9SThomas Gleixner 		new->thread = NULL;
16743aa551c9SThomas Gleixner 		kthread_stop(t);
16753aa551c9SThomas Gleixner 		put_task_struct(t);
16763aa551c9SThomas Gleixner 	}
16772a1d3ab8SThomas Gleixner 	if (new->secondary && new->secondary->thread) {
16782a1d3ab8SThomas Gleixner 		struct task_struct *t = new->secondary->thread;
16792a1d3ab8SThomas Gleixner 
16802a1d3ab8SThomas Gleixner 		new->secondary->thread = NULL;
16812a1d3ab8SThomas Gleixner 		kthread_stop(t);
16822a1d3ab8SThomas Gleixner 		put_task_struct(t);
16832a1d3ab8SThomas Gleixner 	}
1684b6873807SSebastian Andrzej Siewior out_mput:
1685b6873807SSebastian Andrzej Siewior 	module_put(desc->owner);
16863aa551c9SThomas Gleixner 	return ret;
16871da177e4SLinus Torvalds }
16881da177e4SLinus Torvalds 
1689cbf94f06SMagnus Damm /*
1690cbf94f06SMagnus Damm  * Internal function to unregister an irqaction - used to free
1691cbf94f06SMagnus Damm  * regular and special interrupts that are part of the architecture.
16921da177e4SLinus Torvalds  */
169383ac4ca9SUwe Kleine König static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
16941da177e4SLinus Torvalds {
169583ac4ca9SUwe Kleine König 	unsigned irq = desc->irq_data.irq;
1696f17c7545SIngo Molnar 	struct irqaction *action, **action_ptr;
16971da177e4SLinus Torvalds 	unsigned long flags;
16981da177e4SLinus Torvalds 
1699ae88a23bSIngo Molnar 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
17007d94f7caSYinghai Lu 
17019114014cSThomas Gleixner 	mutex_lock(&desc->request_mutex);
1702abc7e40cSThomas Gleixner 	chip_bus_lock(desc);
1703239007b8SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
1704ae88a23bSIngo Molnar 
1705ae88a23bSIngo Molnar 	/*
1706ae88a23bSIngo Molnar 	 * There can be multiple actions per IRQ descriptor, find the right
1707ae88a23bSIngo Molnar 	 * one based on the dev_id:
1708ae88a23bSIngo Molnar 	 */
1709f17c7545SIngo Molnar 	action_ptr = &desc->action;
17101da177e4SLinus Torvalds 	for (;;) {
1711f17c7545SIngo Molnar 		action = *action_ptr;
17121da177e4SLinus Torvalds 
1713ae88a23bSIngo Molnar 		if (!action) {
1714ae88a23bSIngo Molnar 			WARN(1, "Trying to free already-free IRQ %d\n", irq);
1715239007b8SThomas Gleixner 			raw_spin_unlock_irqrestore(&desc->lock, flags);
1716abc7e40cSThomas Gleixner 			chip_bus_sync_unlock(desc);
171719d39a38SThomas Gleixner 			mutex_unlock(&desc->request_mutex);
1718f21cfb25SMagnus Damm 			return NULL;
1719ae88a23bSIngo Molnar 		}
17201da177e4SLinus Torvalds 
17218316e381SIngo Molnar 		if (action->dev_id == dev_id)
1722ae88a23bSIngo Molnar 			break;
1723f17c7545SIngo Molnar 		action_ptr = &action->next;
1724ae88a23bSIngo Molnar 	}
1725ae88a23bSIngo Molnar 
1726ae88a23bSIngo Molnar 	/* Found it - now remove it from the list of entries: */
1727f17c7545SIngo Molnar 	*action_ptr = action->next;
1728dbce706eSPaolo 'Blaisorblade' Giarrusso 
1729cab303beSThomas Gleixner 	irq_pm_remove_action(desc, action);
1730cab303beSThomas Gleixner 
1731ae88a23bSIngo Molnar 	/* If this was the last handler, shut down the IRQ line: */
1732c1bacbaeSThomas Gleixner 	if (!desc->action) {
1733e9849777SThomas Gleixner 		irq_settings_clr_disable_unlazy(desc);
17344001d8e8SThomas Gleixner 		/* Only shutdown. Deactivate after synchronize_hardirq() */
173546999238SThomas Gleixner 		irq_shutdown(desc);
1736c1bacbaeSThomas Gleixner 	}
17373aa551c9SThomas Gleixner 
1738e7a297b0SPeter P Waskiewicz Jr #ifdef CONFIG_SMP
1739e7a297b0SPeter P Waskiewicz Jr 	/* make sure affinity_hint is cleaned up */
1740e7a297b0SPeter P Waskiewicz Jr 	if (WARN_ON_ONCE(desc->affinity_hint))
1741e7a297b0SPeter P Waskiewicz Jr 		desc->affinity_hint = NULL;
1742e7a297b0SPeter P Waskiewicz Jr #endif
1743e7a297b0SPeter P Waskiewicz Jr 
1744239007b8SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
174519d39a38SThomas Gleixner 	/*
174619d39a38SThomas Gleixner 	 * Drop bus_lock here so the changes which were done in the chip
174719d39a38SThomas Gleixner 	 * callbacks above are synced out to the irq chips which hang
1748519cc865SLukas Wunner 	 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
174919d39a38SThomas Gleixner 	 *
175019d39a38SThomas Gleixner 	 * Aside of that the bus_lock can also be taken from the threaded
175119d39a38SThomas Gleixner 	 * handler in irq_finalize_oneshot() which results in a deadlock
1752519cc865SLukas Wunner 	 * because kthread_stop() would wait forever for the thread to
175319d39a38SThomas Gleixner 	 * complete, which is blocked on the bus lock.
175419d39a38SThomas Gleixner 	 *
175519d39a38SThomas Gleixner 	 * The still held desc->request_mutex() protects against a
175619d39a38SThomas Gleixner 	 * concurrent request_irq() of this irq so the release of resources
175719d39a38SThomas Gleixner 	 * and timing data is properly serialized.
175819d39a38SThomas Gleixner 	 */
1759abc7e40cSThomas Gleixner 	chip_bus_sync_unlock(desc);
1760ae88a23bSIngo Molnar 
17611da177e4SLinus Torvalds 	unregister_handler_proc(irq, action);
17621da177e4SLinus Torvalds 
176362e04686SThomas Gleixner 	/*
176462e04686SThomas Gleixner 	 * Make sure it's not being used on another CPU and if the chip
176562e04686SThomas Gleixner 	 * supports it also make sure that there is no (not yet serviced)
176662e04686SThomas Gleixner 	 * interrupt in flight at the hardware level.
176762e04686SThomas Gleixner 	 */
176862e04686SThomas Gleixner 	__synchronize_hardirq(desc, true);
1769ae88a23bSIngo Molnar 
17701d99493bSDavid Woodhouse #ifdef CONFIG_DEBUG_SHIRQ
17711d99493bSDavid Woodhouse 	/*
1772ae88a23bSIngo Molnar 	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1773ae88a23bSIngo Molnar 	 * event to happen even now it's being freed, so let's make sure that
1774ae88a23bSIngo Molnar 	 * is so by doing an extra call to the handler ....
1775ae88a23bSIngo Molnar 	 *
1776ae88a23bSIngo Molnar 	 * ( We do this after actually deregistering it, to make sure that a
17770a13ec0bSJonathan Neuschäfer 	 *   'real' IRQ doesn't run in parallel with our fake. )
17781d99493bSDavid Woodhouse 	 */
17791d99493bSDavid Woodhouse 	if (action->flags & IRQF_SHARED) {
17801d99493bSDavid Woodhouse 		local_irq_save(flags);
17811d99493bSDavid Woodhouse 		action->handler(irq, dev_id);
17821d99493bSDavid Woodhouse 		local_irq_restore(flags);
17831d99493bSDavid Woodhouse 	}
17841d99493bSDavid Woodhouse #endif
17852d860ad7SLinus Torvalds 
1786519cc865SLukas Wunner 	/*
1787519cc865SLukas Wunner 	 * The action has already been removed above, but the thread writes
1788519cc865SLukas Wunner 	 * its oneshot mask bit when it completes. Though request_mutex is
1789519cc865SLukas Wunner 	 * held across this which prevents __setup_irq() from handing out
1790519cc865SLukas Wunner 	 * the same bit to a newly requested action.
1791519cc865SLukas Wunner 	 */
17922d860ad7SLinus Torvalds 	if (action->thread) {
17932d860ad7SLinus Torvalds 		kthread_stop(action->thread);
17942d860ad7SLinus Torvalds 		put_task_struct(action->thread);
17952a1d3ab8SThomas Gleixner 		if (action->secondary && action->secondary->thread) {
17962a1d3ab8SThomas Gleixner 			kthread_stop(action->secondary->thread);
17972a1d3ab8SThomas Gleixner 			put_task_struct(action->secondary->thread);
17982a1d3ab8SThomas Gleixner 		}
17992d860ad7SLinus Torvalds 	}
18002d860ad7SLinus Torvalds 
180119d39a38SThomas Gleixner 	/* Last action releases resources */
18022343877fSThomas Gleixner 	if (!desc->action) {
180319d39a38SThomas Gleixner 		/*
180419d39a38SThomas Gleixner 		 * Reaquire bus lock as irq_release_resources() might
180519d39a38SThomas Gleixner 		 * require it to deallocate resources over the slow bus.
180619d39a38SThomas Gleixner 		 */
180719d39a38SThomas Gleixner 		chip_bus_lock(desc);
18084001d8e8SThomas Gleixner 		/*
18094001d8e8SThomas Gleixner 		 * There is no interrupt on the fly anymore. Deactivate it
18104001d8e8SThomas Gleixner 		 * completely.
18114001d8e8SThomas Gleixner 		 */
18124001d8e8SThomas Gleixner 		raw_spin_lock_irqsave(&desc->lock, flags);
18134001d8e8SThomas Gleixner 		irq_domain_deactivate_irq(&desc->irq_data);
18144001d8e8SThomas Gleixner 		raw_spin_unlock_irqrestore(&desc->lock, flags);
18154001d8e8SThomas Gleixner 
181646e48e25SThomas Gleixner 		irq_release_resources(desc);
181719d39a38SThomas Gleixner 		chip_bus_sync_unlock(desc);
18182343877fSThomas Gleixner 		irq_remove_timings(desc);
18192343877fSThomas Gleixner 	}
182046e48e25SThomas Gleixner 
18219114014cSThomas Gleixner 	mutex_unlock(&desc->request_mutex);
18229114014cSThomas Gleixner 
1823be45beb2SJon Hunter 	irq_chip_pm_put(&desc->irq_data);
1824b6873807SSebastian Andrzej Siewior 	module_put(desc->owner);
18252a1d3ab8SThomas Gleixner 	kfree(action->secondary);
1826f21cfb25SMagnus Damm 	return action;
1827f21cfb25SMagnus Damm }
18281da177e4SLinus Torvalds 
18291da177e4SLinus Torvalds /**
1830f21cfb25SMagnus Damm  *	free_irq - free an interrupt allocated with request_irq
18311da177e4SLinus Torvalds  *	@irq: Interrupt line to free
18321da177e4SLinus Torvalds  *	@dev_id: Device identity to free
18331da177e4SLinus Torvalds  *
18341da177e4SLinus Torvalds  *	Remove an interrupt handler. The handler is removed and if the
18351da177e4SLinus Torvalds  *	interrupt line is no longer in use by any driver it is disabled.
18361da177e4SLinus Torvalds  *	On a shared IRQ the caller must ensure the interrupt is disabled
18371da177e4SLinus Torvalds  *	on the card it drives before calling this function. The function
18381da177e4SLinus Torvalds  *	does not return until any executing interrupts for this IRQ
18391da177e4SLinus Torvalds  *	have completed.
18401da177e4SLinus Torvalds  *
18411da177e4SLinus Torvalds  *	This function must not be called from interrupt context.
184225ce4be7SChristoph Hellwig  *
184325ce4be7SChristoph Hellwig  *	Returns the devname argument passed to request_irq.
18441da177e4SLinus Torvalds  */
184525ce4be7SChristoph Hellwig const void *free_irq(unsigned int irq, void *dev_id)
18461da177e4SLinus Torvalds {
184770aedd24SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
184825ce4be7SChristoph Hellwig 	struct irqaction *action;
184925ce4be7SChristoph Hellwig 	const char *devname;
185070aedd24SThomas Gleixner 
185131d9d9b6SMarc Zyngier 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
185225ce4be7SChristoph Hellwig 		return NULL;
185370aedd24SThomas Gleixner 
1854cd7eab44SBen Hutchings #ifdef CONFIG_SMP
1855cd7eab44SBen Hutchings 	if (WARN_ON(desc->affinity_notify))
1856cd7eab44SBen Hutchings 		desc->affinity_notify = NULL;
1857cd7eab44SBen Hutchings #endif
1858cd7eab44SBen Hutchings 
185983ac4ca9SUwe Kleine König 	action = __free_irq(desc, dev_id);
18602827a418SAlexandru Moise 
18612827a418SAlexandru Moise 	if (!action)
18622827a418SAlexandru Moise 		return NULL;
18632827a418SAlexandru Moise 
186425ce4be7SChristoph Hellwig 	devname = action->name;
186525ce4be7SChristoph Hellwig 	kfree(action);
186625ce4be7SChristoph Hellwig 	return devname;
18671da177e4SLinus Torvalds }
18681da177e4SLinus Torvalds EXPORT_SYMBOL(free_irq);
18691da177e4SLinus Torvalds 
1870b525903cSJulien Thierry /* This function must be called with desc->lock held */
1871b525903cSJulien Thierry static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
1872b525903cSJulien Thierry {
1873b525903cSJulien Thierry 	const char *devname = NULL;
1874b525903cSJulien Thierry 
1875b525903cSJulien Thierry 	desc->istate &= ~IRQS_NMI;
1876b525903cSJulien Thierry 
1877b525903cSJulien Thierry 	if (!WARN_ON(desc->action == NULL)) {
1878b525903cSJulien Thierry 		irq_pm_remove_action(desc, desc->action);
1879b525903cSJulien Thierry 		devname = desc->action->name;
1880b525903cSJulien Thierry 		unregister_handler_proc(irq, desc->action);
1881b525903cSJulien Thierry 
1882b525903cSJulien Thierry 		kfree(desc->action);
1883b525903cSJulien Thierry 		desc->action = NULL;
1884b525903cSJulien Thierry 	}
1885b525903cSJulien Thierry 
1886b525903cSJulien Thierry 	irq_settings_clr_disable_unlazy(desc);
18874001d8e8SThomas Gleixner 	irq_shutdown_and_deactivate(desc);
1888b525903cSJulien Thierry 
1889b525903cSJulien Thierry 	irq_release_resources(desc);
1890b525903cSJulien Thierry 
1891b525903cSJulien Thierry 	irq_chip_pm_put(&desc->irq_data);
1892b525903cSJulien Thierry 	module_put(desc->owner);
1893b525903cSJulien Thierry 
1894b525903cSJulien Thierry 	return devname;
1895b525903cSJulien Thierry }
1896b525903cSJulien Thierry 
1897b525903cSJulien Thierry const void *free_nmi(unsigned int irq, void *dev_id)
1898b525903cSJulien Thierry {
1899b525903cSJulien Thierry 	struct irq_desc *desc = irq_to_desc(irq);
1900b525903cSJulien Thierry 	unsigned long flags;
1901b525903cSJulien Thierry 	const void *devname;
1902b525903cSJulien Thierry 
1903b525903cSJulien Thierry 	if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
1904b525903cSJulien Thierry 		return NULL;
1905b525903cSJulien Thierry 
1906b525903cSJulien Thierry 	if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1907b525903cSJulien Thierry 		return NULL;
1908b525903cSJulien Thierry 
1909b525903cSJulien Thierry 	/* NMI still enabled */
1910b525903cSJulien Thierry 	if (WARN_ON(desc->depth == 0))
1911b525903cSJulien Thierry 		disable_nmi_nosync(irq);
1912b525903cSJulien Thierry 
1913b525903cSJulien Thierry 	raw_spin_lock_irqsave(&desc->lock, flags);
1914b525903cSJulien Thierry 
1915b525903cSJulien Thierry 	irq_nmi_teardown(desc);
1916b525903cSJulien Thierry 	devname = __cleanup_nmi(irq, desc);
1917b525903cSJulien Thierry 
1918b525903cSJulien Thierry 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1919b525903cSJulien Thierry 
1920b525903cSJulien Thierry 	return devname;
1921b525903cSJulien Thierry }
1922b525903cSJulien Thierry 
19231da177e4SLinus Torvalds /**
19243aa551c9SThomas Gleixner  *	request_threaded_irq - allocate an interrupt line
19251da177e4SLinus Torvalds  *	@irq: Interrupt line to allocate
19263aa551c9SThomas Gleixner  *	@handler: Function to be called when the IRQ occurs.
19273aa551c9SThomas Gleixner  *		  Primary handler for threaded interrupts
1928b25c340cSThomas Gleixner  *		  If NULL and thread_fn != NULL the default
1929b25c340cSThomas Gleixner  *		  primary handler is installed
19303aa551c9SThomas Gleixner  *	@thread_fn: Function called from the irq handler thread
19313aa551c9SThomas Gleixner  *		    If NULL, no irq thread is created
19321da177e4SLinus Torvalds  *	@irqflags: Interrupt type flags
19331da177e4SLinus Torvalds  *	@devname: An ascii name for the claiming device
19341da177e4SLinus Torvalds  *	@dev_id: A cookie passed back to the handler function
19351da177e4SLinus Torvalds  *
19361da177e4SLinus Torvalds  *	This call allocates interrupt resources and enables the
19371da177e4SLinus Torvalds  *	interrupt line and IRQ handling. From the point this
19381da177e4SLinus Torvalds  *	call is made your handler function may be invoked. Since
19391da177e4SLinus Torvalds  *	your handler function must clear any interrupt the board
19401da177e4SLinus Torvalds  *	raises, you must take care both to initialise your hardware
19411da177e4SLinus Torvalds  *	and to set up the interrupt handler in the right order.
19421da177e4SLinus Torvalds  *
19433aa551c9SThomas Gleixner  *	If you want to set up a threaded irq handler for your device
19446d21af4fSJavi Merino  *	then you need to supply @handler and @thread_fn. @handler is
19453aa551c9SThomas Gleixner  *	still called in hard interrupt context and has to check
19463aa551c9SThomas Gleixner  *	whether the interrupt originates from the device. If yes it
19473aa551c9SThomas Gleixner  *	needs to disable the interrupt on the device and return
194839a2eddbSSteven Rostedt  *	IRQ_WAKE_THREAD which will wake up the handler thread and run
19493aa551c9SThomas Gleixner  *	@thread_fn. This split handler design is necessary to support
19503aa551c9SThomas Gleixner  *	shared interrupts.
19513aa551c9SThomas Gleixner  *
19521da177e4SLinus Torvalds  *	Dev_id must be globally unique. Normally the address of the
19531da177e4SLinus Torvalds  *	device data structure is used as the cookie. Since the handler
19541da177e4SLinus Torvalds  *	receives this value it makes sense to use it.
19551da177e4SLinus Torvalds  *
19561da177e4SLinus Torvalds  *	If your interrupt is shared you must pass a non NULL dev_id
19571da177e4SLinus Torvalds  *	as this is required when freeing the interrupt.
19581da177e4SLinus Torvalds  *
19591da177e4SLinus Torvalds  *	Flags:
19601da177e4SLinus Torvalds  *
19613cca53b0SThomas Gleixner  *	IRQF_SHARED		Interrupt is shared
19620c5d1eb7SDavid Brownell  *	IRQF_TRIGGER_*		Specify active edge(s) or level
19631da177e4SLinus Torvalds  *
19641da177e4SLinus Torvalds  */
19653aa551c9SThomas Gleixner int request_threaded_irq(unsigned int irq, irq_handler_t handler,
19663aa551c9SThomas Gleixner 			 irq_handler_t thread_fn, unsigned long irqflags,
19673aa551c9SThomas Gleixner 			 const char *devname, void *dev_id)
19681da177e4SLinus Torvalds {
19691da177e4SLinus Torvalds 	struct irqaction *action;
197008678b08SYinghai Lu 	struct irq_desc *desc;
1971d3c60047SThomas Gleixner 	int retval;
19721da177e4SLinus Torvalds 
1973e237a551SChen Fan 	if (irq == IRQ_NOTCONNECTED)
1974e237a551SChen Fan 		return -ENOTCONN;
1975e237a551SChen Fan 
1976470c6623SDavid Brownell 	/*
19771da177e4SLinus Torvalds 	 * Sanity-check: shared interrupts must pass in a real dev-ID,
19781da177e4SLinus Torvalds 	 * otherwise we'll have trouble later trying to figure out
19791da177e4SLinus Torvalds 	 * which interrupt is which (messes up the interrupt freeing
19801da177e4SLinus Torvalds 	 * logic etc).
198117f48034SRafael J. Wysocki 	 *
198217f48034SRafael J. Wysocki 	 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
198317f48034SRafael J. Wysocki 	 * it cannot be set along with IRQF_NO_SUSPEND.
19841da177e4SLinus Torvalds 	 */
198517f48034SRafael J. Wysocki 	if (((irqflags & IRQF_SHARED) && !dev_id) ||
198617f48034SRafael J. Wysocki 	    (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
198717f48034SRafael J. Wysocki 	    ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
19881da177e4SLinus Torvalds 		return -EINVAL;
19897d94f7caSYinghai Lu 
1990cb5bc832SYinghai Lu 	desc = irq_to_desc(irq);
19917d94f7caSYinghai Lu 	if (!desc)
19921da177e4SLinus Torvalds 		return -EINVAL;
19937d94f7caSYinghai Lu 
199431d9d9b6SMarc Zyngier 	if (!irq_settings_can_request(desc) ||
199531d9d9b6SMarc Zyngier 	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))
19966550c775SThomas Gleixner 		return -EINVAL;
1997b25c340cSThomas Gleixner 
1998b25c340cSThomas Gleixner 	if (!handler) {
1999b25c340cSThomas Gleixner 		if (!thread_fn)
20001da177e4SLinus Torvalds 			return -EINVAL;
2001b25c340cSThomas Gleixner 		handler = irq_default_primary_handler;
2002b25c340cSThomas Gleixner 	}
20031da177e4SLinus Torvalds 
200445535732SThomas Gleixner 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
20051da177e4SLinus Torvalds 	if (!action)
20061da177e4SLinus Torvalds 		return -ENOMEM;
20071da177e4SLinus Torvalds 
20081da177e4SLinus Torvalds 	action->handler = handler;
20093aa551c9SThomas Gleixner 	action->thread_fn = thread_fn;
20101da177e4SLinus Torvalds 	action->flags = irqflags;
20111da177e4SLinus Torvalds 	action->name = devname;
20121da177e4SLinus Torvalds 	action->dev_id = dev_id;
20131da177e4SLinus Torvalds 
2014be45beb2SJon Hunter 	retval = irq_chip_pm_get(&desc->irq_data);
20154396f46cSShawn Lin 	if (retval < 0) {
20164396f46cSShawn Lin 		kfree(action);
2017be45beb2SJon Hunter 		return retval;
20184396f46cSShawn Lin 	}
2019be45beb2SJon Hunter 
2020d3c60047SThomas Gleixner 	retval = __setup_irq(irq, desc, action);
202170aedd24SThomas Gleixner 
20222a1d3ab8SThomas Gleixner 	if (retval) {
2023be45beb2SJon Hunter 		irq_chip_pm_put(&desc->irq_data);
20242a1d3ab8SThomas Gleixner 		kfree(action->secondary);
2025377bf1e4SAnton Vorontsov 		kfree(action);
20262a1d3ab8SThomas Gleixner 	}
2027377bf1e4SAnton Vorontsov 
20286d83f94dSThomas Gleixner #ifdef CONFIG_DEBUG_SHIRQ_FIXME
20296ce51c43SLuis Henriques 	if (!retval && (irqflags & IRQF_SHARED)) {
2030a304e1b8SDavid Woodhouse 		/*
2031a304e1b8SDavid Woodhouse 		 * It's a shared IRQ -- the driver ought to be prepared for it
2032a304e1b8SDavid Woodhouse 		 * to happen immediately, so let's make sure....
2033377bf1e4SAnton Vorontsov 		 * We disable the irq to make sure that a 'real' IRQ doesn't
2034377bf1e4SAnton Vorontsov 		 * run in parallel with our fake.
2035a304e1b8SDavid Woodhouse 		 */
2036a304e1b8SDavid Woodhouse 		unsigned long flags;
2037a304e1b8SDavid Woodhouse 
2038377bf1e4SAnton Vorontsov 		disable_irq(irq);
2039a304e1b8SDavid Woodhouse 		local_irq_save(flags);
2040377bf1e4SAnton Vorontsov 
2041a304e1b8SDavid Woodhouse 		handler(irq, dev_id);
2042377bf1e4SAnton Vorontsov 
2043a304e1b8SDavid Woodhouse 		local_irq_restore(flags);
2044377bf1e4SAnton Vorontsov 		enable_irq(irq);
2045a304e1b8SDavid Woodhouse 	}
2046a304e1b8SDavid Woodhouse #endif
20471da177e4SLinus Torvalds 	return retval;
20481da177e4SLinus Torvalds }
20493aa551c9SThomas Gleixner EXPORT_SYMBOL(request_threaded_irq);
2050ae731f8dSMarc Zyngier 
2051ae731f8dSMarc Zyngier /**
2052ae731f8dSMarc Zyngier  *	request_any_context_irq - allocate an interrupt line
2053ae731f8dSMarc Zyngier  *	@irq: Interrupt line to allocate
2054ae731f8dSMarc Zyngier  *	@handler: Function to be called when the IRQ occurs.
2055ae731f8dSMarc Zyngier  *		  Threaded handler for threaded interrupts.
2056ae731f8dSMarc Zyngier  *	@flags: Interrupt type flags
2057ae731f8dSMarc Zyngier  *	@name: An ascii name for the claiming device
2058ae731f8dSMarc Zyngier  *	@dev_id: A cookie passed back to the handler function
2059ae731f8dSMarc Zyngier  *
2060ae731f8dSMarc Zyngier  *	This call allocates interrupt resources and enables the
2061ae731f8dSMarc Zyngier  *	interrupt line and IRQ handling. It selects either a
2062ae731f8dSMarc Zyngier  *	hardirq or threaded handling method depending on the
2063ae731f8dSMarc Zyngier  *	context.
2064ae731f8dSMarc Zyngier  *
2065ae731f8dSMarc Zyngier  *	On failure, it returns a negative value. On success,
2066ae731f8dSMarc Zyngier  *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2067ae731f8dSMarc Zyngier  */
2068ae731f8dSMarc Zyngier int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2069ae731f8dSMarc Zyngier 			    unsigned long flags, const char *name, void *dev_id)
2070ae731f8dSMarc Zyngier {
2071e237a551SChen Fan 	struct irq_desc *desc;
2072ae731f8dSMarc Zyngier 	int ret;
2073ae731f8dSMarc Zyngier 
2074e237a551SChen Fan 	if (irq == IRQ_NOTCONNECTED)
2075e237a551SChen Fan 		return -ENOTCONN;
2076e237a551SChen Fan 
2077e237a551SChen Fan 	desc = irq_to_desc(irq);
2078ae731f8dSMarc Zyngier 	if (!desc)
2079ae731f8dSMarc Zyngier 		return -EINVAL;
2080ae731f8dSMarc Zyngier 
20811ccb4e61SThomas Gleixner 	if (irq_settings_is_nested_thread(desc)) {
2082ae731f8dSMarc Zyngier 		ret = request_threaded_irq(irq, NULL, handler,
2083ae731f8dSMarc Zyngier 					   flags, name, dev_id);
2084ae731f8dSMarc Zyngier 		return !ret ? IRQC_IS_NESTED : ret;
2085ae731f8dSMarc Zyngier 	}
2086ae731f8dSMarc Zyngier 
2087ae731f8dSMarc Zyngier 	ret = request_irq(irq, handler, flags, name, dev_id);
2088ae731f8dSMarc Zyngier 	return !ret ? IRQC_IS_HARDIRQ : ret;
2089ae731f8dSMarc Zyngier }
2090ae731f8dSMarc Zyngier EXPORT_SYMBOL_GPL(request_any_context_irq);
209131d9d9b6SMarc Zyngier 
2092b525903cSJulien Thierry /**
2093b525903cSJulien Thierry  *	request_nmi - allocate an interrupt line for NMI delivery
2094b525903cSJulien Thierry  *	@irq: Interrupt line to allocate
2095b525903cSJulien Thierry  *	@handler: Function to be called when the IRQ occurs.
2096b525903cSJulien Thierry  *		  Threaded handler for threaded interrupts.
2097b525903cSJulien Thierry  *	@irqflags: Interrupt type flags
2098b525903cSJulien Thierry  *	@name: An ascii name for the claiming device
2099b525903cSJulien Thierry  *	@dev_id: A cookie passed back to the handler function
2100b525903cSJulien Thierry  *
2101b525903cSJulien Thierry  *	This call allocates interrupt resources and enables the
2102b525903cSJulien Thierry  *	interrupt line and IRQ handling. It sets up the IRQ line
2103b525903cSJulien Thierry  *	to be handled as an NMI.
2104b525903cSJulien Thierry  *
2105b525903cSJulien Thierry  *	An interrupt line delivering NMIs cannot be shared and IRQ handling
2106b525903cSJulien Thierry  *	cannot be threaded.
2107b525903cSJulien Thierry  *
2108b525903cSJulien Thierry  *	Interrupt lines requested for NMI delivering must produce per cpu
2109b525903cSJulien Thierry  *	interrupts and have auto enabling setting disabled.
2110b525903cSJulien Thierry  *
2111b525903cSJulien Thierry  *	Dev_id must be globally unique. Normally the address of the
2112b525903cSJulien Thierry  *	device data structure is used as the cookie. Since the handler
2113b525903cSJulien Thierry  *	receives this value it makes sense to use it.
2114b525903cSJulien Thierry  *
2115b525903cSJulien Thierry  *	If the interrupt line cannot be used to deliver NMIs, function
2116b525903cSJulien Thierry  *	will fail and return a negative value.
2117b525903cSJulien Thierry  */
2118b525903cSJulien Thierry int request_nmi(unsigned int irq, irq_handler_t handler,
2119b525903cSJulien Thierry 		unsigned long irqflags, const char *name, void *dev_id)
2120b525903cSJulien Thierry {
2121b525903cSJulien Thierry 	struct irqaction *action;
2122b525903cSJulien Thierry 	struct irq_desc *desc;
2123b525903cSJulien Thierry 	unsigned long flags;
2124b525903cSJulien Thierry 	int retval;
2125b525903cSJulien Thierry 
2126b525903cSJulien Thierry 	if (irq == IRQ_NOTCONNECTED)
2127b525903cSJulien Thierry 		return -ENOTCONN;
2128b525903cSJulien Thierry 
2129b525903cSJulien Thierry 	/* NMI cannot be shared, used for Polling */
2130b525903cSJulien Thierry 	if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2131b525903cSJulien Thierry 		return -EINVAL;
2132b525903cSJulien Thierry 
2133b525903cSJulien Thierry 	if (!(irqflags & IRQF_PERCPU))
2134b525903cSJulien Thierry 		return -EINVAL;
2135b525903cSJulien Thierry 
2136b525903cSJulien Thierry 	if (!handler)
2137b525903cSJulien Thierry 		return -EINVAL;
2138b525903cSJulien Thierry 
2139b525903cSJulien Thierry 	desc = irq_to_desc(irq);
2140b525903cSJulien Thierry 
2141b525903cSJulien Thierry 	if (!desc || irq_settings_can_autoenable(desc) ||
2142b525903cSJulien Thierry 	    !irq_settings_can_request(desc) ||
2143b525903cSJulien Thierry 	    WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2144b525903cSJulien Thierry 	    !irq_supports_nmi(desc))
2145b525903cSJulien Thierry 		return -EINVAL;
2146b525903cSJulien Thierry 
2147b525903cSJulien Thierry 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2148b525903cSJulien Thierry 	if (!action)
2149b525903cSJulien Thierry 		return -ENOMEM;
2150b525903cSJulien Thierry 
2151b525903cSJulien Thierry 	action->handler = handler;
2152b525903cSJulien Thierry 	action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2153b525903cSJulien Thierry 	action->name = name;
2154b525903cSJulien Thierry 	action->dev_id = dev_id;
2155b525903cSJulien Thierry 
2156b525903cSJulien Thierry 	retval = irq_chip_pm_get(&desc->irq_data);
2157b525903cSJulien Thierry 	if (retval < 0)
2158b525903cSJulien Thierry 		goto err_out;
2159b525903cSJulien Thierry 
2160b525903cSJulien Thierry 	retval = __setup_irq(irq, desc, action);
2161b525903cSJulien Thierry 	if (retval)
2162b525903cSJulien Thierry 		goto err_irq_setup;
2163b525903cSJulien Thierry 
2164b525903cSJulien Thierry 	raw_spin_lock_irqsave(&desc->lock, flags);
2165b525903cSJulien Thierry 
2166b525903cSJulien Thierry 	/* Setup NMI state */
2167b525903cSJulien Thierry 	desc->istate |= IRQS_NMI;
2168b525903cSJulien Thierry 	retval = irq_nmi_setup(desc);
2169b525903cSJulien Thierry 	if (retval) {
2170b525903cSJulien Thierry 		__cleanup_nmi(irq, desc);
2171b525903cSJulien Thierry 		raw_spin_unlock_irqrestore(&desc->lock, flags);
2172b525903cSJulien Thierry 		return -EINVAL;
2173b525903cSJulien Thierry 	}
2174b525903cSJulien Thierry 
2175b525903cSJulien Thierry 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2176b525903cSJulien Thierry 
2177b525903cSJulien Thierry 	return 0;
2178b525903cSJulien Thierry 
2179b525903cSJulien Thierry err_irq_setup:
2180b525903cSJulien Thierry 	irq_chip_pm_put(&desc->irq_data);
2181b525903cSJulien Thierry err_out:
2182b525903cSJulien Thierry 	kfree(action);
2183b525903cSJulien Thierry 
2184b525903cSJulien Thierry 	return retval;
2185b525903cSJulien Thierry }
2186b525903cSJulien Thierry 
21871e7c5fd2SMarc Zyngier void enable_percpu_irq(unsigned int irq, unsigned int type)
218831d9d9b6SMarc Zyngier {
218931d9d9b6SMarc Zyngier 	unsigned int cpu = smp_processor_id();
219031d9d9b6SMarc Zyngier 	unsigned long flags;
219131d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
219231d9d9b6SMarc Zyngier 
219331d9d9b6SMarc Zyngier 	if (!desc)
219431d9d9b6SMarc Zyngier 		return;
219531d9d9b6SMarc Zyngier 
2196f35ad083SMarc Zyngier 	/*
2197f35ad083SMarc Zyngier 	 * If the trigger type is not specified by the caller, then
2198f35ad083SMarc Zyngier 	 * use the default for this interrupt.
2199f35ad083SMarc Zyngier 	 */
22001e7c5fd2SMarc Zyngier 	type &= IRQ_TYPE_SENSE_MASK;
2201f35ad083SMarc Zyngier 	if (type == IRQ_TYPE_NONE)
2202f35ad083SMarc Zyngier 		type = irqd_get_trigger_type(&desc->irq_data);
2203f35ad083SMarc Zyngier 
22041e7c5fd2SMarc Zyngier 	if (type != IRQ_TYPE_NONE) {
22051e7c5fd2SMarc Zyngier 		int ret;
22061e7c5fd2SMarc Zyngier 
2207a1ff541aSJiang Liu 		ret = __irq_set_trigger(desc, type);
22081e7c5fd2SMarc Zyngier 
22091e7c5fd2SMarc Zyngier 		if (ret) {
221032cffddeSThomas Gleixner 			WARN(1, "failed to set type for IRQ%d\n", irq);
22111e7c5fd2SMarc Zyngier 			goto out;
22121e7c5fd2SMarc Zyngier 		}
22131e7c5fd2SMarc Zyngier 	}
22141e7c5fd2SMarc Zyngier 
221531d9d9b6SMarc Zyngier 	irq_percpu_enable(desc, cpu);
22161e7c5fd2SMarc Zyngier out:
221731d9d9b6SMarc Zyngier 	irq_put_desc_unlock(desc, flags);
221831d9d9b6SMarc Zyngier }
221936a5df85SChris Metcalf EXPORT_SYMBOL_GPL(enable_percpu_irq);
222031d9d9b6SMarc Zyngier 
22214b078c3fSJulien Thierry void enable_percpu_nmi(unsigned int irq, unsigned int type)
22224b078c3fSJulien Thierry {
22234b078c3fSJulien Thierry 	enable_percpu_irq(irq, type);
22244b078c3fSJulien Thierry }
22254b078c3fSJulien Thierry 
2226f0cb3220SThomas Petazzoni /**
2227f0cb3220SThomas Petazzoni  * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2228f0cb3220SThomas Petazzoni  * @irq:	Linux irq number to check for
2229f0cb3220SThomas Petazzoni  *
2230f0cb3220SThomas Petazzoni  * Must be called from a non migratable context. Returns the enable
2231f0cb3220SThomas Petazzoni  * state of a per cpu interrupt on the current cpu.
2232f0cb3220SThomas Petazzoni  */
2233f0cb3220SThomas Petazzoni bool irq_percpu_is_enabled(unsigned int irq)
2234f0cb3220SThomas Petazzoni {
2235f0cb3220SThomas Petazzoni 	unsigned int cpu = smp_processor_id();
2236f0cb3220SThomas Petazzoni 	struct irq_desc *desc;
2237f0cb3220SThomas Petazzoni 	unsigned long flags;
2238f0cb3220SThomas Petazzoni 	bool is_enabled;
2239f0cb3220SThomas Petazzoni 
2240f0cb3220SThomas Petazzoni 	desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2241f0cb3220SThomas Petazzoni 	if (!desc)
2242f0cb3220SThomas Petazzoni 		return false;
2243f0cb3220SThomas Petazzoni 
2244f0cb3220SThomas Petazzoni 	is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2245f0cb3220SThomas Petazzoni 	irq_put_desc_unlock(desc, flags);
2246f0cb3220SThomas Petazzoni 
2247f0cb3220SThomas Petazzoni 	return is_enabled;
2248f0cb3220SThomas Petazzoni }
2249f0cb3220SThomas Petazzoni EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2250f0cb3220SThomas Petazzoni 
225131d9d9b6SMarc Zyngier void disable_percpu_irq(unsigned int irq)
225231d9d9b6SMarc Zyngier {
225331d9d9b6SMarc Zyngier 	unsigned int cpu = smp_processor_id();
225431d9d9b6SMarc Zyngier 	unsigned long flags;
225531d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
225631d9d9b6SMarc Zyngier 
225731d9d9b6SMarc Zyngier 	if (!desc)
225831d9d9b6SMarc Zyngier 		return;
225931d9d9b6SMarc Zyngier 
226031d9d9b6SMarc Zyngier 	irq_percpu_disable(desc, cpu);
226131d9d9b6SMarc Zyngier 	irq_put_desc_unlock(desc, flags);
226231d9d9b6SMarc Zyngier }
226336a5df85SChris Metcalf EXPORT_SYMBOL_GPL(disable_percpu_irq);
226431d9d9b6SMarc Zyngier 
22654b078c3fSJulien Thierry void disable_percpu_nmi(unsigned int irq)
22664b078c3fSJulien Thierry {
22674b078c3fSJulien Thierry 	disable_percpu_irq(irq);
22684b078c3fSJulien Thierry }
22694b078c3fSJulien Thierry 
227031d9d9b6SMarc Zyngier /*
227131d9d9b6SMarc Zyngier  * Internal function to unregister a percpu irqaction.
227231d9d9b6SMarc Zyngier  */
227331d9d9b6SMarc Zyngier static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
227431d9d9b6SMarc Zyngier {
227531d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
227631d9d9b6SMarc Zyngier 	struct irqaction *action;
227731d9d9b6SMarc Zyngier 	unsigned long flags;
227831d9d9b6SMarc Zyngier 
227931d9d9b6SMarc Zyngier 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
228031d9d9b6SMarc Zyngier 
228131d9d9b6SMarc Zyngier 	if (!desc)
228231d9d9b6SMarc Zyngier 		return NULL;
228331d9d9b6SMarc Zyngier 
228431d9d9b6SMarc Zyngier 	raw_spin_lock_irqsave(&desc->lock, flags);
228531d9d9b6SMarc Zyngier 
228631d9d9b6SMarc Zyngier 	action = desc->action;
228731d9d9b6SMarc Zyngier 	if (!action || action->percpu_dev_id != dev_id) {
228831d9d9b6SMarc Zyngier 		WARN(1, "Trying to free already-free IRQ %d\n", irq);
228931d9d9b6SMarc Zyngier 		goto bad;
229031d9d9b6SMarc Zyngier 	}
229131d9d9b6SMarc Zyngier 
229231d9d9b6SMarc Zyngier 	if (!cpumask_empty(desc->percpu_enabled)) {
229331d9d9b6SMarc Zyngier 		WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
229431d9d9b6SMarc Zyngier 		     irq, cpumask_first(desc->percpu_enabled));
229531d9d9b6SMarc Zyngier 		goto bad;
229631d9d9b6SMarc Zyngier 	}
229731d9d9b6SMarc Zyngier 
229831d9d9b6SMarc Zyngier 	/* Found it - now remove it from the list of entries: */
229931d9d9b6SMarc Zyngier 	desc->action = NULL;
230031d9d9b6SMarc Zyngier 
23014b078c3fSJulien Thierry 	desc->istate &= ~IRQS_NMI;
23024b078c3fSJulien Thierry 
230331d9d9b6SMarc Zyngier 	raw_spin_unlock_irqrestore(&desc->lock, flags);
230431d9d9b6SMarc Zyngier 
230531d9d9b6SMarc Zyngier 	unregister_handler_proc(irq, action);
230631d9d9b6SMarc Zyngier 
2307be45beb2SJon Hunter 	irq_chip_pm_put(&desc->irq_data);
230831d9d9b6SMarc Zyngier 	module_put(desc->owner);
230931d9d9b6SMarc Zyngier 	return action;
231031d9d9b6SMarc Zyngier 
231131d9d9b6SMarc Zyngier bad:
231231d9d9b6SMarc Zyngier 	raw_spin_unlock_irqrestore(&desc->lock, flags);
231331d9d9b6SMarc Zyngier 	return NULL;
231431d9d9b6SMarc Zyngier }
231531d9d9b6SMarc Zyngier 
231631d9d9b6SMarc Zyngier /**
231731d9d9b6SMarc Zyngier  *	remove_percpu_irq - free a per-cpu interrupt
231831d9d9b6SMarc Zyngier  *	@irq: Interrupt line to free
231931d9d9b6SMarc Zyngier  *	@act: irqaction for the interrupt
232031d9d9b6SMarc Zyngier  *
232131d9d9b6SMarc Zyngier  * Used to remove interrupts statically setup by the early boot process.
232231d9d9b6SMarc Zyngier  */
232331d9d9b6SMarc Zyngier void remove_percpu_irq(unsigned int irq, struct irqaction *act)
232431d9d9b6SMarc Zyngier {
232531d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
232631d9d9b6SMarc Zyngier 
232731d9d9b6SMarc Zyngier 	if (desc && irq_settings_is_per_cpu_devid(desc))
232831d9d9b6SMarc Zyngier 	    __free_percpu_irq(irq, act->percpu_dev_id);
232931d9d9b6SMarc Zyngier }
233031d9d9b6SMarc Zyngier 
233131d9d9b6SMarc Zyngier /**
233231d9d9b6SMarc Zyngier  *	free_percpu_irq - free an interrupt allocated with request_percpu_irq
233331d9d9b6SMarc Zyngier  *	@irq: Interrupt line to free
233431d9d9b6SMarc Zyngier  *	@dev_id: Device identity to free
233531d9d9b6SMarc Zyngier  *
233631d9d9b6SMarc Zyngier  *	Remove a percpu interrupt handler. The handler is removed, but
233731d9d9b6SMarc Zyngier  *	the interrupt line is not disabled. This must be done on each
233831d9d9b6SMarc Zyngier  *	CPU before calling this function. The function does not return
233931d9d9b6SMarc Zyngier  *	until any executing interrupts for this IRQ have completed.
234031d9d9b6SMarc Zyngier  *
234131d9d9b6SMarc Zyngier  *	This function must not be called from interrupt context.
234231d9d9b6SMarc Zyngier  */
234331d9d9b6SMarc Zyngier void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
234431d9d9b6SMarc Zyngier {
234531d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
234631d9d9b6SMarc Zyngier 
234731d9d9b6SMarc Zyngier 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
234831d9d9b6SMarc Zyngier 		return;
234931d9d9b6SMarc Zyngier 
235031d9d9b6SMarc Zyngier 	chip_bus_lock(desc);
235131d9d9b6SMarc Zyngier 	kfree(__free_percpu_irq(irq, dev_id));
235231d9d9b6SMarc Zyngier 	chip_bus_sync_unlock(desc);
235331d9d9b6SMarc Zyngier }
2354aec2e2adSMaxime Ripard EXPORT_SYMBOL_GPL(free_percpu_irq);
235531d9d9b6SMarc Zyngier 
23564b078c3fSJulien Thierry void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
23574b078c3fSJulien Thierry {
23584b078c3fSJulien Thierry 	struct irq_desc *desc = irq_to_desc(irq);
23594b078c3fSJulien Thierry 
23604b078c3fSJulien Thierry 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
23614b078c3fSJulien Thierry 		return;
23624b078c3fSJulien Thierry 
23634b078c3fSJulien Thierry 	if (WARN_ON(!(desc->istate & IRQS_NMI)))
23644b078c3fSJulien Thierry 		return;
23654b078c3fSJulien Thierry 
23664b078c3fSJulien Thierry 	kfree(__free_percpu_irq(irq, dev_id));
23674b078c3fSJulien Thierry }
23684b078c3fSJulien Thierry 
236931d9d9b6SMarc Zyngier /**
237031d9d9b6SMarc Zyngier  *	setup_percpu_irq - setup a per-cpu interrupt
237131d9d9b6SMarc Zyngier  *	@irq: Interrupt line to setup
237231d9d9b6SMarc Zyngier  *	@act: irqaction for the interrupt
237331d9d9b6SMarc Zyngier  *
237431d9d9b6SMarc Zyngier  * Used to statically setup per-cpu interrupts in the early boot process.
237531d9d9b6SMarc Zyngier  */
237631d9d9b6SMarc Zyngier int setup_percpu_irq(unsigned int irq, struct irqaction *act)
237731d9d9b6SMarc Zyngier {
237831d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
237931d9d9b6SMarc Zyngier 	int retval;
238031d9d9b6SMarc Zyngier 
238131d9d9b6SMarc Zyngier 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
238231d9d9b6SMarc Zyngier 		return -EINVAL;
2383be45beb2SJon Hunter 
2384be45beb2SJon Hunter 	retval = irq_chip_pm_get(&desc->irq_data);
2385be45beb2SJon Hunter 	if (retval < 0)
2386be45beb2SJon Hunter 		return retval;
2387be45beb2SJon Hunter 
238831d9d9b6SMarc Zyngier 	retval = __setup_irq(irq, desc, act);
238931d9d9b6SMarc Zyngier 
2390be45beb2SJon Hunter 	if (retval)
2391be45beb2SJon Hunter 		irq_chip_pm_put(&desc->irq_data);
2392be45beb2SJon Hunter 
239331d9d9b6SMarc Zyngier 	return retval;
239431d9d9b6SMarc Zyngier }
239531d9d9b6SMarc Zyngier 
239631d9d9b6SMarc Zyngier /**
2397c80081b9SDaniel Lezcano  *	__request_percpu_irq - allocate a percpu interrupt line
239831d9d9b6SMarc Zyngier  *	@irq: Interrupt line to allocate
239931d9d9b6SMarc Zyngier  *	@handler: Function to be called when the IRQ occurs.
2400c80081b9SDaniel Lezcano  *	@flags: Interrupt type flags (IRQF_TIMER only)
240131d9d9b6SMarc Zyngier  *	@devname: An ascii name for the claiming device
240231d9d9b6SMarc Zyngier  *	@dev_id: A percpu cookie passed back to the handler function
240331d9d9b6SMarc Zyngier  *
2404a1b7febdSMaxime Ripard  *	This call allocates interrupt resources and enables the
2405a1b7febdSMaxime Ripard  *	interrupt on the local CPU. If the interrupt is supposed to be
2406a1b7febdSMaxime Ripard  *	enabled on other CPUs, it has to be done on each CPU using
2407a1b7febdSMaxime Ripard  *	enable_percpu_irq().
240831d9d9b6SMarc Zyngier  *
240931d9d9b6SMarc Zyngier  *	Dev_id must be globally unique. It is a per-cpu variable, and
241031d9d9b6SMarc Zyngier  *	the handler gets called with the interrupted CPU's instance of
241131d9d9b6SMarc Zyngier  *	that variable.
241231d9d9b6SMarc Zyngier  */
2413c80081b9SDaniel Lezcano int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2414c80081b9SDaniel Lezcano 			 unsigned long flags, const char *devname,
2415c80081b9SDaniel Lezcano 			 void __percpu *dev_id)
241631d9d9b6SMarc Zyngier {
241731d9d9b6SMarc Zyngier 	struct irqaction *action;
241831d9d9b6SMarc Zyngier 	struct irq_desc *desc;
241931d9d9b6SMarc Zyngier 	int retval;
242031d9d9b6SMarc Zyngier 
242131d9d9b6SMarc Zyngier 	if (!dev_id)
242231d9d9b6SMarc Zyngier 		return -EINVAL;
242331d9d9b6SMarc Zyngier 
242431d9d9b6SMarc Zyngier 	desc = irq_to_desc(irq);
242531d9d9b6SMarc Zyngier 	if (!desc || !irq_settings_can_request(desc) ||
242631d9d9b6SMarc Zyngier 	    !irq_settings_is_per_cpu_devid(desc))
242731d9d9b6SMarc Zyngier 		return -EINVAL;
242831d9d9b6SMarc Zyngier 
2429c80081b9SDaniel Lezcano 	if (flags && flags != IRQF_TIMER)
2430c80081b9SDaniel Lezcano 		return -EINVAL;
2431c80081b9SDaniel Lezcano 
243231d9d9b6SMarc Zyngier 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
243331d9d9b6SMarc Zyngier 	if (!action)
243431d9d9b6SMarc Zyngier 		return -ENOMEM;
243531d9d9b6SMarc Zyngier 
243631d9d9b6SMarc Zyngier 	action->handler = handler;
2437c80081b9SDaniel Lezcano 	action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
243831d9d9b6SMarc Zyngier 	action->name = devname;
243931d9d9b6SMarc Zyngier 	action->percpu_dev_id = dev_id;
244031d9d9b6SMarc Zyngier 
2441be45beb2SJon Hunter 	retval = irq_chip_pm_get(&desc->irq_data);
24424396f46cSShawn Lin 	if (retval < 0) {
24434396f46cSShawn Lin 		kfree(action);
2444be45beb2SJon Hunter 		return retval;
24454396f46cSShawn Lin 	}
2446be45beb2SJon Hunter 
244731d9d9b6SMarc Zyngier 	retval = __setup_irq(irq, desc, action);
244831d9d9b6SMarc Zyngier 
2449be45beb2SJon Hunter 	if (retval) {
2450be45beb2SJon Hunter 		irq_chip_pm_put(&desc->irq_data);
245131d9d9b6SMarc Zyngier 		kfree(action);
2452be45beb2SJon Hunter 	}
245331d9d9b6SMarc Zyngier 
245431d9d9b6SMarc Zyngier 	return retval;
245531d9d9b6SMarc Zyngier }
2456c80081b9SDaniel Lezcano EXPORT_SYMBOL_GPL(__request_percpu_irq);
24571b7047edSMarc Zyngier 
24581b7047edSMarc Zyngier /**
24594b078c3fSJulien Thierry  *	request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
24604b078c3fSJulien Thierry  *	@irq: Interrupt line to allocate
24614b078c3fSJulien Thierry  *	@handler: Function to be called when the IRQ occurs.
24624b078c3fSJulien Thierry  *	@name: An ascii name for the claiming device
24634b078c3fSJulien Thierry  *	@dev_id: A percpu cookie passed back to the handler function
24644b078c3fSJulien Thierry  *
24654b078c3fSJulien Thierry  *	This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2466a5186694SJulien Thierry  *	have to be setup on each CPU by calling prepare_percpu_nmi() before
2467a5186694SJulien Thierry  *	being enabled on the same CPU by using enable_percpu_nmi().
24684b078c3fSJulien Thierry  *
24694b078c3fSJulien Thierry  *	Dev_id must be globally unique. It is a per-cpu variable, and
24704b078c3fSJulien Thierry  *	the handler gets called with the interrupted CPU's instance of
24714b078c3fSJulien Thierry  *	that variable.
24724b078c3fSJulien Thierry  *
24734b078c3fSJulien Thierry  *	Interrupt lines requested for NMI delivering should have auto enabling
24744b078c3fSJulien Thierry  *	setting disabled.
24754b078c3fSJulien Thierry  *
24764b078c3fSJulien Thierry  *	If the interrupt line cannot be used to deliver NMIs, function
24774b078c3fSJulien Thierry  *	will fail returning a negative value.
24784b078c3fSJulien Thierry  */
24794b078c3fSJulien Thierry int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
24804b078c3fSJulien Thierry 		       const char *name, void __percpu *dev_id)
24814b078c3fSJulien Thierry {
24824b078c3fSJulien Thierry 	struct irqaction *action;
24834b078c3fSJulien Thierry 	struct irq_desc *desc;
24844b078c3fSJulien Thierry 	unsigned long flags;
24854b078c3fSJulien Thierry 	int retval;
24864b078c3fSJulien Thierry 
24874b078c3fSJulien Thierry 	if (!handler)
24884b078c3fSJulien Thierry 		return -EINVAL;
24894b078c3fSJulien Thierry 
24904b078c3fSJulien Thierry 	desc = irq_to_desc(irq);
24914b078c3fSJulien Thierry 
24924b078c3fSJulien Thierry 	if (!desc || !irq_settings_can_request(desc) ||
24934b078c3fSJulien Thierry 	    !irq_settings_is_per_cpu_devid(desc) ||
24944b078c3fSJulien Thierry 	    irq_settings_can_autoenable(desc) ||
24954b078c3fSJulien Thierry 	    !irq_supports_nmi(desc))
24964b078c3fSJulien Thierry 		return -EINVAL;
24974b078c3fSJulien Thierry 
24984b078c3fSJulien Thierry 	/* The line cannot already be NMI */
24994b078c3fSJulien Thierry 	if (desc->istate & IRQS_NMI)
25004b078c3fSJulien Thierry 		return -EINVAL;
25014b078c3fSJulien Thierry 
25024b078c3fSJulien Thierry 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
25034b078c3fSJulien Thierry 	if (!action)
25044b078c3fSJulien Thierry 		return -ENOMEM;
25054b078c3fSJulien Thierry 
25064b078c3fSJulien Thierry 	action->handler = handler;
25074b078c3fSJulien Thierry 	action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
25084b078c3fSJulien Thierry 		| IRQF_NOBALANCING;
25094b078c3fSJulien Thierry 	action->name = name;
25104b078c3fSJulien Thierry 	action->percpu_dev_id = dev_id;
25114b078c3fSJulien Thierry 
25124b078c3fSJulien Thierry 	retval = irq_chip_pm_get(&desc->irq_data);
25134b078c3fSJulien Thierry 	if (retval < 0)
25144b078c3fSJulien Thierry 		goto err_out;
25154b078c3fSJulien Thierry 
25164b078c3fSJulien Thierry 	retval = __setup_irq(irq, desc, action);
25174b078c3fSJulien Thierry 	if (retval)
25184b078c3fSJulien Thierry 		goto err_irq_setup;
25194b078c3fSJulien Thierry 
25204b078c3fSJulien Thierry 	raw_spin_lock_irqsave(&desc->lock, flags);
25214b078c3fSJulien Thierry 	desc->istate |= IRQS_NMI;
25224b078c3fSJulien Thierry 	raw_spin_unlock_irqrestore(&desc->lock, flags);
25234b078c3fSJulien Thierry 
25244b078c3fSJulien Thierry 	return 0;
25254b078c3fSJulien Thierry 
25264b078c3fSJulien Thierry err_irq_setup:
25274b078c3fSJulien Thierry 	irq_chip_pm_put(&desc->irq_data);
25284b078c3fSJulien Thierry err_out:
25294b078c3fSJulien Thierry 	kfree(action);
25304b078c3fSJulien Thierry 
25314b078c3fSJulien Thierry 	return retval;
25324b078c3fSJulien Thierry }
25334b078c3fSJulien Thierry 
25344b078c3fSJulien Thierry /**
25354b078c3fSJulien Thierry  *	prepare_percpu_nmi - performs CPU local setup for NMI delivery
25364b078c3fSJulien Thierry  *	@irq: Interrupt line to prepare for NMI delivery
25374b078c3fSJulien Thierry  *
25384b078c3fSJulien Thierry  *	This call prepares an interrupt line to deliver NMI on the current CPU,
25394b078c3fSJulien Thierry  *	before that interrupt line gets enabled with enable_percpu_nmi().
25404b078c3fSJulien Thierry  *
25414b078c3fSJulien Thierry  *	As a CPU local operation, this should be called from non-preemptible
25424b078c3fSJulien Thierry  *	context.
25434b078c3fSJulien Thierry  *
25444b078c3fSJulien Thierry  *	If the interrupt line cannot be used to deliver NMIs, function
25454b078c3fSJulien Thierry  *	will fail returning a negative value.
25464b078c3fSJulien Thierry  */
25474b078c3fSJulien Thierry int prepare_percpu_nmi(unsigned int irq)
25484b078c3fSJulien Thierry {
25494b078c3fSJulien Thierry 	unsigned long flags;
25504b078c3fSJulien Thierry 	struct irq_desc *desc;
25514b078c3fSJulien Thierry 	int ret = 0;
25524b078c3fSJulien Thierry 
25534b078c3fSJulien Thierry 	WARN_ON(preemptible());
25544b078c3fSJulien Thierry 
25554b078c3fSJulien Thierry 	desc = irq_get_desc_lock(irq, &flags,
25564b078c3fSJulien Thierry 				 IRQ_GET_DESC_CHECK_PERCPU);
25574b078c3fSJulien Thierry 	if (!desc)
25584b078c3fSJulien Thierry 		return -EINVAL;
25594b078c3fSJulien Thierry 
25604b078c3fSJulien Thierry 	if (WARN(!(desc->istate & IRQS_NMI),
25614b078c3fSJulien Thierry 		 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
25624b078c3fSJulien Thierry 		 irq)) {
25634b078c3fSJulien Thierry 		ret = -EINVAL;
25644b078c3fSJulien Thierry 		goto out;
25654b078c3fSJulien Thierry 	}
25664b078c3fSJulien Thierry 
25674b078c3fSJulien Thierry 	ret = irq_nmi_setup(desc);
25684b078c3fSJulien Thierry 	if (ret) {
25694b078c3fSJulien Thierry 		pr_err("Failed to setup NMI delivery: irq %u\n", irq);
25704b078c3fSJulien Thierry 		goto out;
25714b078c3fSJulien Thierry 	}
25724b078c3fSJulien Thierry 
25734b078c3fSJulien Thierry out:
25744b078c3fSJulien Thierry 	irq_put_desc_unlock(desc, flags);
25754b078c3fSJulien Thierry 	return ret;
25764b078c3fSJulien Thierry }
25774b078c3fSJulien Thierry 
25784b078c3fSJulien Thierry /**
25794b078c3fSJulien Thierry  *	teardown_percpu_nmi - undoes NMI setup of IRQ line
25804b078c3fSJulien Thierry  *	@irq: Interrupt line from which CPU local NMI configuration should be
25814b078c3fSJulien Thierry  *	      removed
25824b078c3fSJulien Thierry  *
25834b078c3fSJulien Thierry  *	This call undoes the setup done by prepare_percpu_nmi().
25844b078c3fSJulien Thierry  *
25854b078c3fSJulien Thierry  *	IRQ line should not be enabled for the current CPU.
25864b078c3fSJulien Thierry  *
25874b078c3fSJulien Thierry  *	As a CPU local operation, this should be called from non-preemptible
25884b078c3fSJulien Thierry  *	context.
25894b078c3fSJulien Thierry  */
25904b078c3fSJulien Thierry void teardown_percpu_nmi(unsigned int irq)
25914b078c3fSJulien Thierry {
25924b078c3fSJulien Thierry 	unsigned long flags;
25934b078c3fSJulien Thierry 	struct irq_desc *desc;
25944b078c3fSJulien Thierry 
25954b078c3fSJulien Thierry 	WARN_ON(preemptible());
25964b078c3fSJulien Thierry 
25974b078c3fSJulien Thierry 	desc = irq_get_desc_lock(irq, &flags,
25984b078c3fSJulien Thierry 				 IRQ_GET_DESC_CHECK_PERCPU);
25994b078c3fSJulien Thierry 	if (!desc)
26004b078c3fSJulien Thierry 		return;
26014b078c3fSJulien Thierry 
26024b078c3fSJulien Thierry 	if (WARN_ON(!(desc->istate & IRQS_NMI)))
26034b078c3fSJulien Thierry 		goto out;
26044b078c3fSJulien Thierry 
26054b078c3fSJulien Thierry 	irq_nmi_teardown(desc);
26064b078c3fSJulien Thierry out:
26074b078c3fSJulien Thierry 	irq_put_desc_unlock(desc, flags);
26084b078c3fSJulien Thierry }
26094b078c3fSJulien Thierry 
261062e04686SThomas Gleixner int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
261162e04686SThomas Gleixner 			    bool *state)
261262e04686SThomas Gleixner {
261362e04686SThomas Gleixner 	struct irq_chip *chip;
261462e04686SThomas Gleixner 	int err = -EINVAL;
261562e04686SThomas Gleixner 
261662e04686SThomas Gleixner 	do {
261762e04686SThomas Gleixner 		chip = irq_data_get_irq_chip(data);
26181d0326f3SMarek Vasut 		if (WARN_ON_ONCE(!chip))
26191d0326f3SMarek Vasut 			return -ENODEV;
262062e04686SThomas Gleixner 		if (chip->irq_get_irqchip_state)
262162e04686SThomas Gleixner 			break;
262262e04686SThomas Gleixner #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
262362e04686SThomas Gleixner 		data = data->parent_data;
262462e04686SThomas Gleixner #else
262562e04686SThomas Gleixner 		data = NULL;
262662e04686SThomas Gleixner #endif
262762e04686SThomas Gleixner 	} while (data);
262862e04686SThomas Gleixner 
262962e04686SThomas Gleixner 	if (data)
263062e04686SThomas Gleixner 		err = chip->irq_get_irqchip_state(data, which, state);
263162e04686SThomas Gleixner 	return err;
263262e04686SThomas Gleixner }
263362e04686SThomas Gleixner 
26344b078c3fSJulien Thierry /**
26351b7047edSMarc Zyngier  *	irq_get_irqchip_state - returns the irqchip state of a interrupt.
26361b7047edSMarc Zyngier  *	@irq: Interrupt line that is forwarded to a VM
26371b7047edSMarc Zyngier  *	@which: One of IRQCHIP_STATE_* the caller wants to know about
26381b7047edSMarc Zyngier  *	@state: a pointer to a boolean where the state is to be storeed
26391b7047edSMarc Zyngier  *
26401b7047edSMarc Zyngier  *	This call snapshots the internal irqchip state of an
26411b7047edSMarc Zyngier  *	interrupt, returning into @state the bit corresponding to
26421b7047edSMarc Zyngier  *	stage @which
26431b7047edSMarc Zyngier  *
26441b7047edSMarc Zyngier  *	This function should be called with preemption disabled if the
26451b7047edSMarc Zyngier  *	interrupt controller has per-cpu registers.
26461b7047edSMarc Zyngier  */
26471b7047edSMarc Zyngier int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
26481b7047edSMarc Zyngier 			  bool *state)
26491b7047edSMarc Zyngier {
26501b7047edSMarc Zyngier 	struct irq_desc *desc;
26511b7047edSMarc Zyngier 	struct irq_data *data;
26521b7047edSMarc Zyngier 	unsigned long flags;
26531b7047edSMarc Zyngier 	int err = -EINVAL;
26541b7047edSMarc Zyngier 
26551b7047edSMarc Zyngier 	desc = irq_get_desc_buslock(irq, &flags, 0);
26561b7047edSMarc Zyngier 	if (!desc)
26571b7047edSMarc Zyngier 		return err;
26581b7047edSMarc Zyngier 
26591b7047edSMarc Zyngier 	data = irq_desc_get_irq_data(desc);
26601b7047edSMarc Zyngier 
266162e04686SThomas Gleixner 	err = __irq_get_irqchip_state(data, which, state);
26621b7047edSMarc Zyngier 
26631b7047edSMarc Zyngier 	irq_put_desc_busunlock(desc, flags);
26641b7047edSMarc Zyngier 	return err;
26651b7047edSMarc Zyngier }
26661ee4fb3eSBjorn Andersson EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
26671b7047edSMarc Zyngier 
26681b7047edSMarc Zyngier /**
26691b7047edSMarc Zyngier  *	irq_set_irqchip_state - set the state of a forwarded interrupt.
26701b7047edSMarc Zyngier  *	@irq: Interrupt line that is forwarded to a VM
26711b7047edSMarc Zyngier  *	@which: State to be restored (one of IRQCHIP_STATE_*)
26721b7047edSMarc Zyngier  *	@val: Value corresponding to @which
26731b7047edSMarc Zyngier  *
26741b7047edSMarc Zyngier  *	This call sets the internal irqchip state of an interrupt,
26751b7047edSMarc Zyngier  *	depending on the value of @which.
26761b7047edSMarc Zyngier  *
26771b7047edSMarc Zyngier  *	This function should be called with preemption disabled if the
26781b7047edSMarc Zyngier  *	interrupt controller has per-cpu registers.
26791b7047edSMarc Zyngier  */
26801b7047edSMarc Zyngier int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
26811b7047edSMarc Zyngier 			  bool val)
26821b7047edSMarc Zyngier {
26831b7047edSMarc Zyngier 	struct irq_desc *desc;
26841b7047edSMarc Zyngier 	struct irq_data *data;
26851b7047edSMarc Zyngier 	struct irq_chip *chip;
26861b7047edSMarc Zyngier 	unsigned long flags;
26871b7047edSMarc Zyngier 	int err = -EINVAL;
26881b7047edSMarc Zyngier 
26891b7047edSMarc Zyngier 	desc = irq_get_desc_buslock(irq, &flags, 0);
26901b7047edSMarc Zyngier 	if (!desc)
26911b7047edSMarc Zyngier 		return err;
26921b7047edSMarc Zyngier 
26931b7047edSMarc Zyngier 	data = irq_desc_get_irq_data(desc);
26941b7047edSMarc Zyngier 
26951b7047edSMarc Zyngier 	do {
26961b7047edSMarc Zyngier 		chip = irq_data_get_irq_chip(data);
26971d0326f3SMarek Vasut 		if (WARN_ON_ONCE(!chip))
26981d0326f3SMarek Vasut 			return -ENODEV;
26991b7047edSMarc Zyngier 		if (chip->irq_set_irqchip_state)
27001b7047edSMarc Zyngier 			break;
27011b7047edSMarc Zyngier #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
27021b7047edSMarc Zyngier 		data = data->parent_data;
27031b7047edSMarc Zyngier #else
27041b7047edSMarc Zyngier 		data = NULL;
27051b7047edSMarc Zyngier #endif
27061b7047edSMarc Zyngier 	} while (data);
27071b7047edSMarc Zyngier 
27081b7047edSMarc Zyngier 	if (data)
27091b7047edSMarc Zyngier 		err = chip->irq_set_irqchip_state(data, which, val);
27101b7047edSMarc Zyngier 
27111b7047edSMarc Zyngier 	irq_put_desc_busunlock(desc, flags);
27121b7047edSMarc Zyngier 	return err;
27131b7047edSMarc Zyngier }
27141ee4fb3eSBjorn Andersson EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2715