xref: /openbmc/linux/kernel/irq/manage.c (revision cbf8699996a6e7f2f674b3a2a4cef9f666ff613e)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * linux/kernel/irq/manage.c
31da177e4SLinus Torvalds  *
4a34db9b2SIngo Molnar  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5a34db9b2SIngo Molnar  * Copyright (C) 2005-2006 Thomas Gleixner
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * This file contains driver APIs to the irq subsystem.
81da177e4SLinus Torvalds  */
91da177e4SLinus Torvalds 
1097fd75b7SAndrew Morton #define pr_fmt(fmt) "genirq: " fmt
1197fd75b7SAndrew Morton 
121da177e4SLinus Torvalds #include <linux/irq.h>
133aa551c9SThomas Gleixner #include <linux/kthread.h>
141da177e4SLinus Torvalds #include <linux/module.h>
151da177e4SLinus Torvalds #include <linux/random.h>
161da177e4SLinus Torvalds #include <linux/interrupt.h>
171aeb272cSRobert P. J. Day #include <linux/slab.h>
183aa551c9SThomas Gleixner #include <linux/sched.h>
198bd75c77SClark Williams #include <linux/sched/rt.h>
200881e7bdSIngo Molnar #include <linux/sched/task.h>
21ae7e81c0SIngo Molnar #include <uapi/linux/sched/types.h>
224d1d61a6SOleg Nesterov #include <linux/task_work.h>
231da177e4SLinus Torvalds 
241da177e4SLinus Torvalds #include "internals.h"
251da177e4SLinus Torvalds 
268d32a307SThomas Gleixner #ifdef CONFIG_IRQ_FORCED_THREADING
278d32a307SThomas Gleixner __read_mostly bool force_irqthreads;
288d32a307SThomas Gleixner 
298d32a307SThomas Gleixner static int __init setup_forced_irqthreads(char *arg)
308d32a307SThomas Gleixner {
318d32a307SThomas Gleixner 	force_irqthreads = true;
328d32a307SThomas Gleixner 	return 0;
338d32a307SThomas Gleixner }
348d32a307SThomas Gleixner early_param("threadirqs", setup_forced_irqthreads);
358d32a307SThomas Gleixner #endif
368d32a307SThomas Gleixner 
3718258f72SThomas Gleixner static void __synchronize_hardirq(struct irq_desc *desc)
381da177e4SLinus Torvalds {
3932f4125eSThomas Gleixner 	bool inprogress;
401da177e4SLinus Torvalds 
41a98ce5c6SHerbert Xu 	do {
42a98ce5c6SHerbert Xu 		unsigned long flags;
43a98ce5c6SHerbert Xu 
44a98ce5c6SHerbert Xu 		/*
45a98ce5c6SHerbert Xu 		 * Wait until we're out of the critical section.  This might
46a98ce5c6SHerbert Xu 		 * give the wrong answer due to the lack of memory barriers.
47a98ce5c6SHerbert Xu 		 */
4832f4125eSThomas Gleixner 		while (irqd_irq_inprogress(&desc->irq_data))
491da177e4SLinus Torvalds 			cpu_relax();
50a98ce5c6SHerbert Xu 
51a98ce5c6SHerbert Xu 		/* Ok, that indicated we're done: double-check carefully. */
52239007b8SThomas Gleixner 		raw_spin_lock_irqsave(&desc->lock, flags);
5332f4125eSThomas Gleixner 		inprogress = irqd_irq_inprogress(&desc->irq_data);
54239007b8SThomas Gleixner 		raw_spin_unlock_irqrestore(&desc->lock, flags);
55a98ce5c6SHerbert Xu 
56a98ce5c6SHerbert Xu 		/* Oops, that failed? */
5732f4125eSThomas Gleixner 	} while (inprogress);
5818258f72SThomas Gleixner }
593aa551c9SThomas Gleixner 
6018258f72SThomas Gleixner /**
6118258f72SThomas Gleixner  *	synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
6218258f72SThomas Gleixner  *	@irq: interrupt number to wait for
6318258f72SThomas Gleixner  *
6418258f72SThomas Gleixner  *	This function waits for any pending hard IRQ handlers for this
6518258f72SThomas Gleixner  *	interrupt to complete before returning. If you use this
6618258f72SThomas Gleixner  *	function while holding a resource the IRQ handler may need you
6718258f72SThomas Gleixner  *	will deadlock. It does not take associated threaded handlers
6818258f72SThomas Gleixner  *	into account.
6918258f72SThomas Gleixner  *
7018258f72SThomas Gleixner  *	Do not use this for shutdown scenarios where you must be sure
7118258f72SThomas Gleixner  *	that all parts (hardirq and threaded handler) have completed.
7218258f72SThomas Gleixner  *
7302cea395SPeter Zijlstra  *	Returns: false if a threaded handler is active.
7402cea395SPeter Zijlstra  *
7518258f72SThomas Gleixner  *	This function may be called - with care - from IRQ context.
763aa551c9SThomas Gleixner  */
7702cea395SPeter Zijlstra bool synchronize_hardirq(unsigned int irq)
7818258f72SThomas Gleixner {
7918258f72SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
8018258f72SThomas Gleixner 
8102cea395SPeter Zijlstra 	if (desc) {
8218258f72SThomas Gleixner 		__synchronize_hardirq(desc);
8302cea395SPeter Zijlstra 		return !atomic_read(&desc->threads_active);
8402cea395SPeter Zijlstra 	}
8502cea395SPeter Zijlstra 
8602cea395SPeter Zijlstra 	return true;
8718258f72SThomas Gleixner }
8818258f72SThomas Gleixner EXPORT_SYMBOL(synchronize_hardirq);
8918258f72SThomas Gleixner 
9018258f72SThomas Gleixner /**
9118258f72SThomas Gleixner  *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
9218258f72SThomas Gleixner  *	@irq: interrupt number to wait for
9318258f72SThomas Gleixner  *
9418258f72SThomas Gleixner  *	This function waits for any pending IRQ handlers for this interrupt
9518258f72SThomas Gleixner  *	to complete before returning. If you use this function while
9618258f72SThomas Gleixner  *	holding a resource the IRQ handler may need you will deadlock.
9718258f72SThomas Gleixner  *
9818258f72SThomas Gleixner  *	This function may be called - with care - from IRQ context.
9918258f72SThomas Gleixner  */
10018258f72SThomas Gleixner void synchronize_irq(unsigned int irq)
10118258f72SThomas Gleixner {
10218258f72SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
10318258f72SThomas Gleixner 
10418258f72SThomas Gleixner 	if (desc) {
10518258f72SThomas Gleixner 		__synchronize_hardirq(desc);
10618258f72SThomas Gleixner 		/*
10718258f72SThomas Gleixner 		 * We made sure that no hardirq handler is
10818258f72SThomas Gleixner 		 * running. Now verify that no threaded handlers are
10918258f72SThomas Gleixner 		 * active.
11018258f72SThomas Gleixner 		 */
11118258f72SThomas Gleixner 		wait_event(desc->wait_for_threads,
11218258f72SThomas Gleixner 			   !atomic_read(&desc->threads_active));
11318258f72SThomas Gleixner 	}
1141da177e4SLinus Torvalds }
1151da177e4SLinus Torvalds EXPORT_SYMBOL(synchronize_irq);
1161da177e4SLinus Torvalds 
1173aa551c9SThomas Gleixner #ifdef CONFIG_SMP
1183aa551c9SThomas Gleixner cpumask_var_t irq_default_affinity;
1193aa551c9SThomas Gleixner 
1209c255583SThomas Gleixner static bool __irq_can_set_affinity(struct irq_desc *desc)
121e019c249SJiang Liu {
122e019c249SJiang Liu 	if (!desc || !irqd_can_balance(&desc->irq_data) ||
123e019c249SJiang Liu 	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
1249c255583SThomas Gleixner 		return false;
1259c255583SThomas Gleixner 	return true;
126e019c249SJiang Liu }
127e019c249SJiang Liu 
128771ee3b0SThomas Gleixner /**
129771ee3b0SThomas Gleixner  *	irq_can_set_affinity - Check if the affinity of a given irq can be set
130771ee3b0SThomas Gleixner  *	@irq:		Interrupt to check
131771ee3b0SThomas Gleixner  *
132771ee3b0SThomas Gleixner  */
133771ee3b0SThomas Gleixner int irq_can_set_affinity(unsigned int irq)
134771ee3b0SThomas Gleixner {
135e019c249SJiang Liu 	return __irq_can_set_affinity(irq_to_desc(irq));
136771ee3b0SThomas Gleixner }
137771ee3b0SThomas Gleixner 
138591d2fb0SThomas Gleixner /**
1399c255583SThomas Gleixner  * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
1409c255583SThomas Gleixner  * @irq:	Interrupt to check
1419c255583SThomas Gleixner  *
1429c255583SThomas Gleixner  * Like irq_can_set_affinity() above, but additionally checks for the
1439c255583SThomas Gleixner  * AFFINITY_MANAGED flag.
1449c255583SThomas Gleixner  */
1459c255583SThomas Gleixner bool irq_can_set_affinity_usr(unsigned int irq)
1469c255583SThomas Gleixner {
1479c255583SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
1489c255583SThomas Gleixner 
1499c255583SThomas Gleixner 	return __irq_can_set_affinity(desc) &&
1509c255583SThomas Gleixner 		!irqd_affinity_is_managed(&desc->irq_data);
1519c255583SThomas Gleixner }
1529c255583SThomas Gleixner 
1539c255583SThomas Gleixner /**
154591d2fb0SThomas Gleixner  *	irq_set_thread_affinity - Notify irq threads to adjust affinity
155591d2fb0SThomas Gleixner  *	@desc:		irq descriptor which has affitnity changed
156591d2fb0SThomas Gleixner  *
157591d2fb0SThomas Gleixner  *	We just set IRQTF_AFFINITY and delegate the affinity setting
158591d2fb0SThomas Gleixner  *	to the interrupt thread itself. We can not call
159591d2fb0SThomas Gleixner  *	set_cpus_allowed_ptr() here as we hold desc->lock and this
160591d2fb0SThomas Gleixner  *	code can be called from hard interrupt context.
161591d2fb0SThomas Gleixner  */
162591d2fb0SThomas Gleixner void irq_set_thread_affinity(struct irq_desc *desc)
1633aa551c9SThomas Gleixner {
164f944b5a7SDaniel Lezcano 	struct irqaction *action;
1653aa551c9SThomas Gleixner 
166f944b5a7SDaniel Lezcano 	for_each_action_of_desc(desc, action)
1673aa551c9SThomas Gleixner 		if (action->thread)
168591d2fb0SThomas Gleixner 			set_bit(IRQTF_AFFINITY, &action->thread_flags);
1693aa551c9SThomas Gleixner }
1703aa551c9SThomas Gleixner 
17119e1d4e9SThomas Gleixner static void irq_validate_effective_affinity(struct irq_data *data)
17219e1d4e9SThomas Gleixner {
17319e1d4e9SThomas Gleixner #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
17419e1d4e9SThomas Gleixner 	const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
17519e1d4e9SThomas Gleixner 	struct irq_chip *chip = irq_data_get_irq_chip(data);
17619e1d4e9SThomas Gleixner 
17719e1d4e9SThomas Gleixner 	if (!cpumask_empty(m))
17819e1d4e9SThomas Gleixner 		return;
17919e1d4e9SThomas Gleixner 	pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
18019e1d4e9SThomas Gleixner 		     chip->name, data->irq);
18119e1d4e9SThomas Gleixner #endif
18219e1d4e9SThomas Gleixner }
18319e1d4e9SThomas Gleixner 
184818b0f3bSJiang Liu int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
185818b0f3bSJiang Liu 			bool force)
186818b0f3bSJiang Liu {
187818b0f3bSJiang Liu 	struct irq_desc *desc = irq_data_to_desc(data);
188818b0f3bSJiang Liu 	struct irq_chip *chip = irq_data_get_irq_chip(data);
189818b0f3bSJiang Liu 	int ret;
190818b0f3bSJiang Liu 
191e43b3b58SThomas Gleixner 	if (!chip || !chip->irq_set_affinity)
192e43b3b58SThomas Gleixner 		return -EINVAL;
193e43b3b58SThomas Gleixner 
19401f8fa4fSThomas Gleixner 	ret = chip->irq_set_affinity(data, mask, force);
195818b0f3bSJiang Liu 	switch (ret) {
196818b0f3bSJiang Liu 	case IRQ_SET_MASK_OK:
1972cb62547SJiang Liu 	case IRQ_SET_MASK_OK_DONE:
1989df872faSJiang Liu 		cpumask_copy(desc->irq_common_data.affinity, mask);
199818b0f3bSJiang Liu 	case IRQ_SET_MASK_OK_NOCOPY:
20019e1d4e9SThomas Gleixner 		irq_validate_effective_affinity(data);
201818b0f3bSJiang Liu 		irq_set_thread_affinity(desc);
202818b0f3bSJiang Liu 		ret = 0;
203818b0f3bSJiang Liu 	}
204818b0f3bSJiang Liu 
205818b0f3bSJiang Liu 	return ret;
206818b0f3bSJiang Liu }
207818b0f3bSJiang Liu 
20801f8fa4fSThomas Gleixner int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
20901f8fa4fSThomas Gleixner 			    bool force)
210c2d0c555SDavid Daney {
211c2d0c555SDavid Daney 	struct irq_chip *chip = irq_data_get_irq_chip(data);
212c2d0c555SDavid Daney 	struct irq_desc *desc = irq_data_to_desc(data);
213c2d0c555SDavid Daney 	int ret = 0;
214c2d0c555SDavid Daney 
215c2d0c555SDavid Daney 	if (!chip || !chip->irq_set_affinity)
216c2d0c555SDavid Daney 		return -EINVAL;
217c2d0c555SDavid Daney 
2180ef5ca1eSThomas Gleixner 	if (irq_can_move_pcntxt(data)) {
21901f8fa4fSThomas Gleixner 		ret = irq_do_set_affinity(data, mask, force);
220c2d0c555SDavid Daney 	} else {
221c2d0c555SDavid Daney 		irqd_set_move_pending(data);
222c2d0c555SDavid Daney 		irq_copy_pending(desc, mask);
223c2d0c555SDavid Daney 	}
224c2d0c555SDavid Daney 
225c2d0c555SDavid Daney 	if (desc->affinity_notify) {
226c2d0c555SDavid Daney 		kref_get(&desc->affinity_notify->kref);
227c2d0c555SDavid Daney 		schedule_work(&desc->affinity_notify->work);
228c2d0c555SDavid Daney 	}
229c2d0c555SDavid Daney 	irqd_set(data, IRQD_AFFINITY_SET);
230c2d0c555SDavid Daney 
231c2d0c555SDavid Daney 	return ret;
232c2d0c555SDavid Daney }
233c2d0c555SDavid Daney 
23401f8fa4fSThomas Gleixner int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
235771ee3b0SThomas Gleixner {
23608678b08SYinghai Lu 	struct irq_desc *desc = irq_to_desc(irq);
237f6d87f4bSThomas Gleixner 	unsigned long flags;
238c2d0c555SDavid Daney 	int ret;
239771ee3b0SThomas Gleixner 
240c2d0c555SDavid Daney 	if (!desc)
241771ee3b0SThomas Gleixner 		return -EINVAL;
242771ee3b0SThomas Gleixner 
243239007b8SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
24401f8fa4fSThomas Gleixner 	ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
245239007b8SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2461fa46f1fSThomas Gleixner 	return ret;
247771ee3b0SThomas Gleixner }
248771ee3b0SThomas Gleixner 
249e7a297b0SPeter P Waskiewicz Jr int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
250e7a297b0SPeter P Waskiewicz Jr {
251e7a297b0SPeter P Waskiewicz Jr 	unsigned long flags;
25231d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
253e7a297b0SPeter P Waskiewicz Jr 
254e7a297b0SPeter P Waskiewicz Jr 	if (!desc)
255e7a297b0SPeter P Waskiewicz Jr 		return -EINVAL;
256e7a297b0SPeter P Waskiewicz Jr 	desc->affinity_hint = m;
25702725e74SThomas Gleixner 	irq_put_desc_unlock(desc, flags);
258e2e64a93SJesse Brandeburg 	/* set the initial affinity to prevent every interrupt being on CPU0 */
2594fe7ffb7SJesse Brandeburg 	if (m)
260e2e64a93SJesse Brandeburg 		__irq_set_affinity(irq, m, false);
261e7a297b0SPeter P Waskiewicz Jr 	return 0;
262e7a297b0SPeter P Waskiewicz Jr }
263e7a297b0SPeter P Waskiewicz Jr EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
264e7a297b0SPeter P Waskiewicz Jr 
265cd7eab44SBen Hutchings static void irq_affinity_notify(struct work_struct *work)
266cd7eab44SBen Hutchings {
267cd7eab44SBen Hutchings 	struct irq_affinity_notify *notify =
268cd7eab44SBen Hutchings 		container_of(work, struct irq_affinity_notify, work);
269cd7eab44SBen Hutchings 	struct irq_desc *desc = irq_to_desc(notify->irq);
270cd7eab44SBen Hutchings 	cpumask_var_t cpumask;
271cd7eab44SBen Hutchings 	unsigned long flags;
272cd7eab44SBen Hutchings 
2731fa46f1fSThomas Gleixner 	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
274cd7eab44SBen Hutchings 		goto out;
275cd7eab44SBen Hutchings 
276cd7eab44SBen Hutchings 	raw_spin_lock_irqsave(&desc->lock, flags);
2770ef5ca1eSThomas Gleixner 	if (irq_move_pending(&desc->irq_data))
2781fa46f1fSThomas Gleixner 		irq_get_pending(cpumask, desc);
279cd7eab44SBen Hutchings 	else
2809df872faSJiang Liu 		cpumask_copy(cpumask, desc->irq_common_data.affinity);
281cd7eab44SBen Hutchings 	raw_spin_unlock_irqrestore(&desc->lock, flags);
282cd7eab44SBen Hutchings 
283cd7eab44SBen Hutchings 	notify->notify(notify, cpumask);
284cd7eab44SBen Hutchings 
285cd7eab44SBen Hutchings 	free_cpumask_var(cpumask);
286cd7eab44SBen Hutchings out:
287cd7eab44SBen Hutchings 	kref_put(&notify->kref, notify->release);
288cd7eab44SBen Hutchings }
289cd7eab44SBen Hutchings 
290cd7eab44SBen Hutchings /**
291cd7eab44SBen Hutchings  *	irq_set_affinity_notifier - control notification of IRQ affinity changes
292cd7eab44SBen Hutchings  *	@irq:		Interrupt for which to enable/disable notification
293cd7eab44SBen Hutchings  *	@notify:	Context for notification, or %NULL to disable
294cd7eab44SBen Hutchings  *			notification.  Function pointers must be initialised;
295cd7eab44SBen Hutchings  *			the other fields will be initialised by this function.
296cd7eab44SBen Hutchings  *
297cd7eab44SBen Hutchings  *	Must be called in process context.  Notification may only be enabled
298cd7eab44SBen Hutchings  *	after the IRQ is allocated and must be disabled before the IRQ is
299cd7eab44SBen Hutchings  *	freed using free_irq().
300cd7eab44SBen Hutchings  */
301cd7eab44SBen Hutchings int
302cd7eab44SBen Hutchings irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
303cd7eab44SBen Hutchings {
304cd7eab44SBen Hutchings 	struct irq_desc *desc = irq_to_desc(irq);
305cd7eab44SBen Hutchings 	struct irq_affinity_notify *old_notify;
306cd7eab44SBen Hutchings 	unsigned long flags;
307cd7eab44SBen Hutchings 
308cd7eab44SBen Hutchings 	/* The release function is promised process context */
309cd7eab44SBen Hutchings 	might_sleep();
310cd7eab44SBen Hutchings 
311cd7eab44SBen Hutchings 	if (!desc)
312cd7eab44SBen Hutchings 		return -EINVAL;
313cd7eab44SBen Hutchings 
314cd7eab44SBen Hutchings 	/* Complete initialisation of *notify */
315cd7eab44SBen Hutchings 	if (notify) {
316cd7eab44SBen Hutchings 		notify->irq = irq;
317cd7eab44SBen Hutchings 		kref_init(&notify->kref);
318cd7eab44SBen Hutchings 		INIT_WORK(&notify->work, irq_affinity_notify);
319cd7eab44SBen Hutchings 	}
320cd7eab44SBen Hutchings 
321cd7eab44SBen Hutchings 	raw_spin_lock_irqsave(&desc->lock, flags);
322cd7eab44SBen Hutchings 	old_notify = desc->affinity_notify;
323cd7eab44SBen Hutchings 	desc->affinity_notify = notify;
324cd7eab44SBen Hutchings 	raw_spin_unlock_irqrestore(&desc->lock, flags);
325cd7eab44SBen Hutchings 
326cd7eab44SBen Hutchings 	if (old_notify)
327cd7eab44SBen Hutchings 		kref_put(&old_notify->kref, old_notify->release);
328cd7eab44SBen Hutchings 
329cd7eab44SBen Hutchings 	return 0;
330cd7eab44SBen Hutchings }
331cd7eab44SBen Hutchings EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
332cd7eab44SBen Hutchings 
33318404756SMax Krasnyansky #ifndef CONFIG_AUTO_IRQ_AFFINITY
33418404756SMax Krasnyansky /*
33518404756SMax Krasnyansky  * Generic version of the affinity autoselector.
33618404756SMax Krasnyansky  */
33743564bd9SThomas Gleixner int irq_setup_affinity(struct irq_desc *desc)
33818404756SMax Krasnyansky {
339569bda8dSThomas Gleixner 	struct cpumask *set = irq_default_affinity;
340cba4235eSThomas Gleixner 	int ret, node = irq_desc_get_node(desc);
341cba4235eSThomas Gleixner 	static DEFINE_RAW_SPINLOCK(mask_lock);
342cba4235eSThomas Gleixner 	static struct cpumask mask;
343569bda8dSThomas Gleixner 
344b008207cSThomas Gleixner 	/* Excludes PER_CPU and NO_BALANCE interrupts */
345e019c249SJiang Liu 	if (!__irq_can_set_affinity(desc))
34618404756SMax Krasnyansky 		return 0;
34718404756SMax Krasnyansky 
348cba4235eSThomas Gleixner 	raw_spin_lock(&mask_lock);
349f6d87f4bSThomas Gleixner 	/*
3509332ef9dSMasahiro Yamada 	 * Preserve the managed affinity setting and a userspace affinity
35106ee6d57SThomas Gleixner 	 * setup, but make sure that one of the targets is online.
352f6d87f4bSThomas Gleixner 	 */
35306ee6d57SThomas Gleixner 	if (irqd_affinity_is_managed(&desc->irq_data) ||
35406ee6d57SThomas Gleixner 	    irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
3559df872faSJiang Liu 		if (cpumask_intersects(desc->irq_common_data.affinity,
356569bda8dSThomas Gleixner 				       cpu_online_mask))
3579df872faSJiang Liu 			set = desc->irq_common_data.affinity;
3580c6f8a8bSThomas Gleixner 		else
3592bdd1055SThomas Gleixner 			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
3602bdd1055SThomas Gleixner 	}
36118404756SMax Krasnyansky 
362cba4235eSThomas Gleixner 	cpumask_and(&mask, cpu_online_mask, set);
363241fc640SPrarit Bhargava 	if (node != NUMA_NO_NODE) {
364241fc640SPrarit Bhargava 		const struct cpumask *nodemask = cpumask_of_node(node);
365241fc640SPrarit Bhargava 
366241fc640SPrarit Bhargava 		/* make sure at least one of the cpus in nodemask is online */
367cba4235eSThomas Gleixner 		if (cpumask_intersects(&mask, nodemask))
368cba4235eSThomas Gleixner 			cpumask_and(&mask, &mask, nodemask);
369241fc640SPrarit Bhargava 	}
370cba4235eSThomas Gleixner 	ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
371cba4235eSThomas Gleixner 	raw_spin_unlock(&mask_lock);
372cba4235eSThomas Gleixner 	return ret;
37318404756SMax Krasnyansky }
374f6d87f4bSThomas Gleixner #else
375a8a98eacSJiang Liu /* Wrapper for ALPHA specific affinity selector magic */
376cba4235eSThomas Gleixner int irq_setup_affinity(struct irq_desc *desc)
377f6d87f4bSThomas Gleixner {
378cba4235eSThomas Gleixner 	return irq_select_affinity(irq_desc_get_irq(desc));
379f6d87f4bSThomas Gleixner }
38018404756SMax Krasnyansky #endif
38118404756SMax Krasnyansky 
382f6d87f4bSThomas Gleixner /*
383cba4235eSThomas Gleixner  * Called when a bogus affinity is set via /proc/irq
384f6d87f4bSThomas Gleixner  */
385cba4235eSThomas Gleixner int irq_select_affinity_usr(unsigned int irq)
386f6d87f4bSThomas Gleixner {
387f6d87f4bSThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
388f6d87f4bSThomas Gleixner 	unsigned long flags;
389f6d87f4bSThomas Gleixner 	int ret;
390f6d87f4bSThomas Gleixner 
391239007b8SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
392cba4235eSThomas Gleixner 	ret = irq_setup_affinity(desc);
393239007b8SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
394f6d87f4bSThomas Gleixner 	return ret;
395f6d87f4bSThomas Gleixner }
3961da177e4SLinus Torvalds #endif
3971da177e4SLinus Torvalds 
398fcf1ae2fSFeng Wu /**
399fcf1ae2fSFeng Wu  *	irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
400fcf1ae2fSFeng Wu  *	@irq: interrupt number to set affinity
401250a53d6SChristoffer Dall  *	@vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
402250a53d6SChristoffer Dall  *	            specific data for percpu_devid interrupts
403fcf1ae2fSFeng Wu  *
404fcf1ae2fSFeng Wu  *	This function uses the vCPU specific data to set the vCPU
405fcf1ae2fSFeng Wu  *	affinity for an irq. The vCPU specific data is passed from
406fcf1ae2fSFeng Wu  *	outside, such as KVM. One example code path is as below:
407fcf1ae2fSFeng Wu  *	KVM -> IOMMU -> irq_set_vcpu_affinity().
408fcf1ae2fSFeng Wu  */
409fcf1ae2fSFeng Wu int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
410fcf1ae2fSFeng Wu {
411fcf1ae2fSFeng Wu 	unsigned long flags;
412fcf1ae2fSFeng Wu 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
413fcf1ae2fSFeng Wu 	struct irq_data *data;
414fcf1ae2fSFeng Wu 	struct irq_chip *chip;
415fcf1ae2fSFeng Wu 	int ret = -ENOSYS;
416fcf1ae2fSFeng Wu 
417fcf1ae2fSFeng Wu 	if (!desc)
418fcf1ae2fSFeng Wu 		return -EINVAL;
419fcf1ae2fSFeng Wu 
420fcf1ae2fSFeng Wu 	data = irq_desc_get_irq_data(desc);
4210abce64aSMarc Zyngier 	do {
422fcf1ae2fSFeng Wu 		chip = irq_data_get_irq_chip(data);
423fcf1ae2fSFeng Wu 		if (chip && chip->irq_set_vcpu_affinity)
4240abce64aSMarc Zyngier 			break;
4250abce64aSMarc Zyngier #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
4260abce64aSMarc Zyngier 		data = data->parent_data;
4270abce64aSMarc Zyngier #else
4280abce64aSMarc Zyngier 		data = NULL;
4290abce64aSMarc Zyngier #endif
4300abce64aSMarc Zyngier 	} while (data);
4310abce64aSMarc Zyngier 
4320abce64aSMarc Zyngier 	if (data)
433fcf1ae2fSFeng Wu 		ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
434fcf1ae2fSFeng Wu 	irq_put_desc_unlock(desc, flags);
435fcf1ae2fSFeng Wu 
436fcf1ae2fSFeng Wu 	return ret;
437fcf1ae2fSFeng Wu }
438fcf1ae2fSFeng Wu EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
439fcf1ae2fSFeng Wu 
44079ff1cdaSJiang Liu void __disable_irq(struct irq_desc *desc)
4410a0c5168SRafael J. Wysocki {
4423aae994fSThomas Gleixner 	if (!desc->depth++)
44387923470SThomas Gleixner 		irq_disable(desc);
4440a0c5168SRafael J. Wysocki }
4450a0c5168SRafael J. Wysocki 
44602725e74SThomas Gleixner static int __disable_irq_nosync(unsigned int irq)
44702725e74SThomas Gleixner {
44802725e74SThomas Gleixner 	unsigned long flags;
44931d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
45002725e74SThomas Gleixner 
45102725e74SThomas Gleixner 	if (!desc)
45202725e74SThomas Gleixner 		return -EINVAL;
45379ff1cdaSJiang Liu 	__disable_irq(desc);
45402725e74SThomas Gleixner 	irq_put_desc_busunlock(desc, flags);
45502725e74SThomas Gleixner 	return 0;
45602725e74SThomas Gleixner }
45702725e74SThomas Gleixner 
4581da177e4SLinus Torvalds /**
4591da177e4SLinus Torvalds  *	disable_irq_nosync - disable an irq without waiting
4601da177e4SLinus Torvalds  *	@irq: Interrupt to disable
4611da177e4SLinus Torvalds  *
4621da177e4SLinus Torvalds  *	Disable the selected interrupt line.  Disables and Enables are
4631da177e4SLinus Torvalds  *	nested.
4641da177e4SLinus Torvalds  *	Unlike disable_irq(), this function does not ensure existing
4651da177e4SLinus Torvalds  *	instances of the IRQ handler have completed before returning.
4661da177e4SLinus Torvalds  *
4671da177e4SLinus Torvalds  *	This function may be called from IRQ context.
4681da177e4SLinus Torvalds  */
4691da177e4SLinus Torvalds void disable_irq_nosync(unsigned int irq)
4701da177e4SLinus Torvalds {
47102725e74SThomas Gleixner 	__disable_irq_nosync(irq);
4721da177e4SLinus Torvalds }
4731da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq_nosync);
4741da177e4SLinus Torvalds 
4751da177e4SLinus Torvalds /**
4761da177e4SLinus Torvalds  *	disable_irq - disable an irq and wait for completion
4771da177e4SLinus Torvalds  *	@irq: Interrupt to disable
4781da177e4SLinus Torvalds  *
4791da177e4SLinus Torvalds  *	Disable the selected interrupt line.  Enables and Disables are
4801da177e4SLinus Torvalds  *	nested.
4811da177e4SLinus Torvalds  *	This function waits for any pending IRQ handlers for this interrupt
4821da177e4SLinus Torvalds  *	to complete before returning. If you use this function while
4831da177e4SLinus Torvalds  *	holding a resource the IRQ handler may need you will deadlock.
4841da177e4SLinus Torvalds  *
4851da177e4SLinus Torvalds  *	This function may be called - with care - from IRQ context.
4861da177e4SLinus Torvalds  */
4871da177e4SLinus Torvalds void disable_irq(unsigned int irq)
4881da177e4SLinus Torvalds {
48902725e74SThomas Gleixner 	if (!__disable_irq_nosync(irq))
4901da177e4SLinus Torvalds 		synchronize_irq(irq);
4911da177e4SLinus Torvalds }
4921da177e4SLinus Torvalds EXPORT_SYMBOL(disable_irq);
4931da177e4SLinus Torvalds 
49402cea395SPeter Zijlstra /**
49502cea395SPeter Zijlstra  *	disable_hardirq - disables an irq and waits for hardirq completion
49602cea395SPeter Zijlstra  *	@irq: Interrupt to disable
49702cea395SPeter Zijlstra  *
49802cea395SPeter Zijlstra  *	Disable the selected interrupt line.  Enables and Disables are
49902cea395SPeter Zijlstra  *	nested.
50002cea395SPeter Zijlstra  *	This function waits for any pending hard IRQ handlers for this
50102cea395SPeter Zijlstra  *	interrupt to complete before returning. If you use this function while
50202cea395SPeter Zijlstra  *	holding a resource the hard IRQ handler may need you will deadlock.
50302cea395SPeter Zijlstra  *
50402cea395SPeter Zijlstra  *	When used to optimistically disable an interrupt from atomic context
50502cea395SPeter Zijlstra  *	the return value must be checked.
50602cea395SPeter Zijlstra  *
50702cea395SPeter Zijlstra  *	Returns: false if a threaded handler is active.
50802cea395SPeter Zijlstra  *
50902cea395SPeter Zijlstra  *	This function may be called - with care - from IRQ context.
51002cea395SPeter Zijlstra  */
51102cea395SPeter Zijlstra bool disable_hardirq(unsigned int irq)
51202cea395SPeter Zijlstra {
51302cea395SPeter Zijlstra 	if (!__disable_irq_nosync(irq))
51402cea395SPeter Zijlstra 		return synchronize_hardirq(irq);
51502cea395SPeter Zijlstra 
51602cea395SPeter Zijlstra 	return false;
51702cea395SPeter Zijlstra }
51802cea395SPeter Zijlstra EXPORT_SYMBOL_GPL(disable_hardirq);
51902cea395SPeter Zijlstra 
52079ff1cdaSJiang Liu void __enable_irq(struct irq_desc *desc)
5211adb0850SThomas Gleixner {
5221adb0850SThomas Gleixner 	switch (desc->depth) {
5231adb0850SThomas Gleixner 	case 0:
5240a0c5168SRafael J. Wysocki  err_out:
52579ff1cdaSJiang Liu 		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
52679ff1cdaSJiang Liu 		     irq_desc_get_irq(desc));
5271adb0850SThomas Gleixner 		break;
5281adb0850SThomas Gleixner 	case 1: {
529c531e836SThomas Gleixner 		if (desc->istate & IRQS_SUSPENDED)
5300a0c5168SRafael J. Wysocki 			goto err_out;
5311adb0850SThomas Gleixner 		/* Prevent probing on this irq: */
5321ccb4e61SThomas Gleixner 		irq_settings_set_noprobe(desc);
533201d7f47SThomas Gleixner 		/*
534201d7f47SThomas Gleixner 		 * Call irq_startup() not irq_enable() here because the
535201d7f47SThomas Gleixner 		 * interrupt might be marked NOAUTOEN. So irq_startup()
536201d7f47SThomas Gleixner 		 * needs to be invoked when it gets enabled the first
537201d7f47SThomas Gleixner 		 * time. If it was already started up, then irq_startup()
538201d7f47SThomas Gleixner 		 * will invoke irq_enable() under the hood.
539201d7f47SThomas Gleixner 		 */
540c942cee4SThomas Gleixner 		irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
541201d7f47SThomas Gleixner 		break;
5421adb0850SThomas Gleixner 	}
5431adb0850SThomas Gleixner 	default:
5441adb0850SThomas Gleixner 		desc->depth--;
5451adb0850SThomas Gleixner 	}
5461adb0850SThomas Gleixner }
5471adb0850SThomas Gleixner 
5481da177e4SLinus Torvalds /**
5491da177e4SLinus Torvalds  *	enable_irq - enable handling of an irq
5501da177e4SLinus Torvalds  *	@irq: Interrupt to enable
5511da177e4SLinus Torvalds  *
5521da177e4SLinus Torvalds  *	Undoes the effect of one call to disable_irq().  If this
5531da177e4SLinus Torvalds  *	matches the last disable, processing of interrupts on this
5541da177e4SLinus Torvalds  *	IRQ line is re-enabled.
5551da177e4SLinus Torvalds  *
55670aedd24SThomas Gleixner  *	This function may be called from IRQ context only when
5576b8ff312SThomas Gleixner  *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
5581da177e4SLinus Torvalds  */
5591da177e4SLinus Torvalds void enable_irq(unsigned int irq)
5601da177e4SLinus Torvalds {
5611da177e4SLinus Torvalds 	unsigned long flags;
56231d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
5631da177e4SLinus Torvalds 
5647d94f7caSYinghai Lu 	if (!desc)
565c2b5a251SMatthew Wilcox 		return;
56650f7c032SThomas Gleixner 	if (WARN(!desc->irq_data.chip,
5672656c366SThomas Gleixner 		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
56802725e74SThomas Gleixner 		goto out;
5692656c366SThomas Gleixner 
57079ff1cdaSJiang Liu 	__enable_irq(desc);
57102725e74SThomas Gleixner out:
57202725e74SThomas Gleixner 	irq_put_desc_busunlock(desc, flags);
5731da177e4SLinus Torvalds }
5741da177e4SLinus Torvalds EXPORT_SYMBOL(enable_irq);
5751da177e4SLinus Torvalds 
5760c5d1eb7SDavid Brownell static int set_irq_wake_real(unsigned int irq, unsigned int on)
5772db87321SUwe Kleine-König {
57808678b08SYinghai Lu 	struct irq_desc *desc = irq_to_desc(irq);
5792db87321SUwe Kleine-König 	int ret = -ENXIO;
5802db87321SUwe Kleine-König 
58160f96b41SSantosh Shilimkar 	if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
58260f96b41SSantosh Shilimkar 		return 0;
58360f96b41SSantosh Shilimkar 
5842f7e99bbSThomas Gleixner 	if (desc->irq_data.chip->irq_set_wake)
5852f7e99bbSThomas Gleixner 		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
5862db87321SUwe Kleine-König 
5872db87321SUwe Kleine-König 	return ret;
5882db87321SUwe Kleine-König }
5892db87321SUwe Kleine-König 
590ba9a2331SThomas Gleixner /**
591a0cd9ca2SThomas Gleixner  *	irq_set_irq_wake - control irq power management wakeup
592ba9a2331SThomas Gleixner  *	@irq:	interrupt to control
593ba9a2331SThomas Gleixner  *	@on:	enable/disable power management wakeup
594ba9a2331SThomas Gleixner  *
59515a647ebSDavid Brownell  *	Enable/disable power management wakeup mode, which is
59615a647ebSDavid Brownell  *	disabled by default.  Enables and disables must match,
59715a647ebSDavid Brownell  *	just as they match for non-wakeup mode support.
59815a647ebSDavid Brownell  *
59915a647ebSDavid Brownell  *	Wakeup mode lets this IRQ wake the system from sleep
60015a647ebSDavid Brownell  *	states like "suspend to RAM".
601ba9a2331SThomas Gleixner  */
602a0cd9ca2SThomas Gleixner int irq_set_irq_wake(unsigned int irq, unsigned int on)
603ba9a2331SThomas Gleixner {
604ba9a2331SThomas Gleixner 	unsigned long flags;
60531d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
6062db87321SUwe Kleine-König 	int ret = 0;
607ba9a2331SThomas Gleixner 
60813863a66SJesper Juhl 	if (!desc)
60913863a66SJesper Juhl 		return -EINVAL;
61013863a66SJesper Juhl 
61115a647ebSDavid Brownell 	/* wakeup-capable irqs can be shared between drivers that
61215a647ebSDavid Brownell 	 * don't need to have the same sleep mode behaviors.
61315a647ebSDavid Brownell 	 */
61415a647ebSDavid Brownell 	if (on) {
6152db87321SUwe Kleine-König 		if (desc->wake_depth++ == 0) {
6162db87321SUwe Kleine-König 			ret = set_irq_wake_real(irq, on);
6172db87321SUwe Kleine-König 			if (ret)
6182db87321SUwe Kleine-König 				desc->wake_depth = 0;
61915a647ebSDavid Brownell 			else
6207f94226fSThomas Gleixner 				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
6212db87321SUwe Kleine-König 		}
62215a647ebSDavid Brownell 	} else {
62315a647ebSDavid Brownell 		if (desc->wake_depth == 0) {
6247a2c4770SArjan van de Ven 			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
6252db87321SUwe Kleine-König 		} else if (--desc->wake_depth == 0) {
6262db87321SUwe Kleine-König 			ret = set_irq_wake_real(irq, on);
6272db87321SUwe Kleine-König 			if (ret)
6282db87321SUwe Kleine-König 				desc->wake_depth = 1;
62915a647ebSDavid Brownell 			else
6307f94226fSThomas Gleixner 				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
63115a647ebSDavid Brownell 		}
6322db87321SUwe Kleine-König 	}
63302725e74SThomas Gleixner 	irq_put_desc_busunlock(desc, flags);
634ba9a2331SThomas Gleixner 	return ret;
635ba9a2331SThomas Gleixner }
636a0cd9ca2SThomas Gleixner EXPORT_SYMBOL(irq_set_irq_wake);
637ba9a2331SThomas Gleixner 
6381da177e4SLinus Torvalds /*
6391da177e4SLinus Torvalds  * Internal function that tells the architecture code whether a
6401da177e4SLinus Torvalds  * particular irq has been exclusively allocated or is available
6411da177e4SLinus Torvalds  * for driver use.
6421da177e4SLinus Torvalds  */
6431da177e4SLinus Torvalds int can_request_irq(unsigned int irq, unsigned long irqflags)
6441da177e4SLinus Torvalds {
645cc8c3b78SThomas Gleixner 	unsigned long flags;
64631d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
64702725e74SThomas Gleixner 	int canrequest = 0;
6481da177e4SLinus Torvalds 
6497d94f7caSYinghai Lu 	if (!desc)
6507d94f7caSYinghai Lu 		return 0;
6517d94f7caSYinghai Lu 
65202725e74SThomas Gleixner 	if (irq_settings_can_request(desc)) {
6532779db8dSBen Hutchings 		if (!desc->action ||
6542779db8dSBen Hutchings 		    irqflags & desc->action->flags & IRQF_SHARED)
65502725e74SThomas Gleixner 			canrequest = 1;
65602725e74SThomas Gleixner 	}
65702725e74SThomas Gleixner 	irq_put_desc_unlock(desc, flags);
65802725e74SThomas Gleixner 	return canrequest;
6591da177e4SLinus Torvalds }
6601da177e4SLinus Torvalds 
661a1ff541aSJiang Liu int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
66282736f4dSUwe Kleine-König {
6636b8ff312SThomas Gleixner 	struct irq_chip *chip = desc->irq_data.chip;
664d4d5e089SThomas Gleixner 	int ret, unmask = 0;
66582736f4dSUwe Kleine-König 
666b2ba2c30SThomas Gleixner 	if (!chip || !chip->irq_set_type) {
66782736f4dSUwe Kleine-König 		/*
66882736f4dSUwe Kleine-König 		 * IRQF_TRIGGER_* but the PIC does not support multiple
66982736f4dSUwe Kleine-König 		 * flow-types?
67082736f4dSUwe Kleine-König 		 */
671a1ff541aSJiang Liu 		pr_debug("No set_type function for IRQ %d (%s)\n",
672a1ff541aSJiang Liu 			 irq_desc_get_irq(desc),
67382736f4dSUwe Kleine-König 			 chip ? (chip->name ? : "unknown") : "unknown");
67482736f4dSUwe Kleine-König 		return 0;
67582736f4dSUwe Kleine-König 	}
67682736f4dSUwe Kleine-König 
677d4d5e089SThomas Gleixner 	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
67832f4125eSThomas Gleixner 		if (!irqd_irq_masked(&desc->irq_data))
679d4d5e089SThomas Gleixner 			mask_irq(desc);
68032f4125eSThomas Gleixner 		if (!irqd_irq_disabled(&desc->irq_data))
681d4d5e089SThomas Gleixner 			unmask = 1;
682d4d5e089SThomas Gleixner 	}
683d4d5e089SThomas Gleixner 
68400b992deSAlexander Kuleshov 	/* Mask all flags except trigger mode */
68500b992deSAlexander Kuleshov 	flags &= IRQ_TYPE_SENSE_MASK;
686b2ba2c30SThomas Gleixner 	ret = chip->irq_set_type(&desc->irq_data, flags);
68782736f4dSUwe Kleine-König 
688876dbd4cSThomas Gleixner 	switch (ret) {
689876dbd4cSThomas Gleixner 	case IRQ_SET_MASK_OK:
6902cb62547SJiang Liu 	case IRQ_SET_MASK_OK_DONE:
691876dbd4cSThomas Gleixner 		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
692876dbd4cSThomas Gleixner 		irqd_set(&desc->irq_data, flags);
693876dbd4cSThomas Gleixner 
694876dbd4cSThomas Gleixner 	case IRQ_SET_MASK_OK_NOCOPY:
695876dbd4cSThomas Gleixner 		flags = irqd_get_trigger_type(&desc->irq_data);
696876dbd4cSThomas Gleixner 		irq_settings_set_trigger_mask(desc, flags);
697876dbd4cSThomas Gleixner 		irqd_clear(&desc->irq_data, IRQD_LEVEL);
698876dbd4cSThomas Gleixner 		irq_settings_clr_level(desc);
699876dbd4cSThomas Gleixner 		if (flags & IRQ_TYPE_LEVEL_MASK) {
700876dbd4cSThomas Gleixner 			irq_settings_set_level(desc);
701876dbd4cSThomas Gleixner 			irqd_set(&desc->irq_data, IRQD_LEVEL);
702876dbd4cSThomas Gleixner 		}
70346732475SThomas Gleixner 
704d4d5e089SThomas Gleixner 		ret = 0;
7058fff39e0SThomas Gleixner 		break;
706876dbd4cSThomas Gleixner 	default:
70797fd75b7SAndrew Morton 		pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
708a1ff541aSJiang Liu 		       flags, irq_desc_get_irq(desc), chip->irq_set_type);
7090c5d1eb7SDavid Brownell 	}
710d4d5e089SThomas Gleixner 	if (unmask)
711d4d5e089SThomas Gleixner 		unmask_irq(desc);
71282736f4dSUwe Kleine-König 	return ret;
71382736f4dSUwe Kleine-König }
71482736f4dSUwe Kleine-König 
715293a7a0aSThomas Gleixner #ifdef CONFIG_HARDIRQS_SW_RESEND
716293a7a0aSThomas Gleixner int irq_set_parent(int irq, int parent_irq)
717293a7a0aSThomas Gleixner {
718293a7a0aSThomas Gleixner 	unsigned long flags;
719293a7a0aSThomas Gleixner 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
720293a7a0aSThomas Gleixner 
721293a7a0aSThomas Gleixner 	if (!desc)
722293a7a0aSThomas Gleixner 		return -EINVAL;
723293a7a0aSThomas Gleixner 
724293a7a0aSThomas Gleixner 	desc->parent_irq = parent_irq;
725293a7a0aSThomas Gleixner 
726293a7a0aSThomas Gleixner 	irq_put_desc_unlock(desc, flags);
727293a7a0aSThomas Gleixner 	return 0;
728293a7a0aSThomas Gleixner }
7293118dac5SSudip Mukherjee EXPORT_SYMBOL_GPL(irq_set_parent);
730293a7a0aSThomas Gleixner #endif
731293a7a0aSThomas Gleixner 
732b25c340cSThomas Gleixner /*
733b25c340cSThomas Gleixner  * Default primary interrupt handler for threaded interrupts. Is
734b25c340cSThomas Gleixner  * assigned as primary handler when request_threaded_irq is called
735b25c340cSThomas Gleixner  * with handler == NULL. Useful for oneshot interrupts.
736b25c340cSThomas Gleixner  */
737b25c340cSThomas Gleixner static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
738b25c340cSThomas Gleixner {
739b25c340cSThomas Gleixner 	return IRQ_WAKE_THREAD;
740b25c340cSThomas Gleixner }
741b25c340cSThomas Gleixner 
742399b5da2SThomas Gleixner /*
743399b5da2SThomas Gleixner  * Primary handler for nested threaded interrupts. Should never be
744399b5da2SThomas Gleixner  * called.
745399b5da2SThomas Gleixner  */
746399b5da2SThomas Gleixner static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
747399b5da2SThomas Gleixner {
748399b5da2SThomas Gleixner 	WARN(1, "Primary handler called for nested irq %d\n", irq);
749399b5da2SThomas Gleixner 	return IRQ_NONE;
750399b5da2SThomas Gleixner }
751399b5da2SThomas Gleixner 
7522a1d3ab8SThomas Gleixner static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
7532a1d3ab8SThomas Gleixner {
7542a1d3ab8SThomas Gleixner 	WARN(1, "Secondary action handler called for irq %d\n", irq);
7552a1d3ab8SThomas Gleixner 	return IRQ_NONE;
7562a1d3ab8SThomas Gleixner }
7572a1d3ab8SThomas Gleixner 
7583aa551c9SThomas Gleixner static int irq_wait_for_interrupt(struct irqaction *action)
7593aa551c9SThomas Gleixner {
7603aa551c9SThomas Gleixner 	set_current_state(TASK_INTERRUPTIBLE);
761f48fe81eSThomas Gleixner 
762550acb19SIdo Yariv 	while (!kthread_should_stop()) {
763550acb19SIdo Yariv 
764f48fe81eSThomas Gleixner 		if (test_and_clear_bit(IRQTF_RUNTHREAD,
765f48fe81eSThomas Gleixner 				       &action->thread_flags)) {
7663aa551c9SThomas Gleixner 			__set_current_state(TASK_RUNNING);
7673aa551c9SThomas Gleixner 			return 0;
768f48fe81eSThomas Gleixner 		}
7693aa551c9SThomas Gleixner 		schedule();
770550acb19SIdo Yariv 		set_current_state(TASK_INTERRUPTIBLE);
7713aa551c9SThomas Gleixner 	}
772550acb19SIdo Yariv 	__set_current_state(TASK_RUNNING);
7733aa551c9SThomas Gleixner 	return -1;
7743aa551c9SThomas Gleixner }
7753aa551c9SThomas Gleixner 
776b25c340cSThomas Gleixner /*
777b25c340cSThomas Gleixner  * Oneshot interrupts keep the irq line masked until the threaded
778b25c340cSThomas Gleixner  * handler finished. unmask if the interrupt has not been disabled and
779b25c340cSThomas Gleixner  * is marked MASKED.
780b25c340cSThomas Gleixner  */
781b5faba21SThomas Gleixner static void irq_finalize_oneshot(struct irq_desc *desc,
782f3f79e38SAlexander Gordeev 				 struct irqaction *action)
783b25c340cSThomas Gleixner {
7842a1d3ab8SThomas Gleixner 	if (!(desc->istate & IRQS_ONESHOT) ||
7852a1d3ab8SThomas Gleixner 	    action->handler == irq_forced_secondary_handler)
786b5faba21SThomas Gleixner 		return;
7870b1adaa0SThomas Gleixner again:
7883876ec9eSThomas Gleixner 	chip_bus_lock(desc);
789239007b8SThomas Gleixner 	raw_spin_lock_irq(&desc->lock);
7900b1adaa0SThomas Gleixner 
7910b1adaa0SThomas Gleixner 	/*
7920b1adaa0SThomas Gleixner 	 * Implausible though it may be we need to protect us against
7930b1adaa0SThomas Gleixner 	 * the following scenario:
7940b1adaa0SThomas Gleixner 	 *
7950b1adaa0SThomas Gleixner 	 * The thread is faster done than the hard interrupt handler
7960b1adaa0SThomas Gleixner 	 * on the other CPU. If we unmask the irq line then the
7970b1adaa0SThomas Gleixner 	 * interrupt can come in again and masks the line, leaves due
798009b4c3bSThomas Gleixner 	 * to IRQS_INPROGRESS and the irq line is masked forever.
799b5faba21SThomas Gleixner 	 *
800b5faba21SThomas Gleixner 	 * This also serializes the state of shared oneshot handlers
801b5faba21SThomas Gleixner 	 * versus "desc->threads_onehsot |= action->thread_mask;" in
802b5faba21SThomas Gleixner 	 * irq_wake_thread(). See the comment there which explains the
803b5faba21SThomas Gleixner 	 * serialization.
8040b1adaa0SThomas Gleixner 	 */
80532f4125eSThomas Gleixner 	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
8060b1adaa0SThomas Gleixner 		raw_spin_unlock_irq(&desc->lock);
8073876ec9eSThomas Gleixner 		chip_bus_sync_unlock(desc);
8080b1adaa0SThomas Gleixner 		cpu_relax();
8090b1adaa0SThomas Gleixner 		goto again;
8100b1adaa0SThomas Gleixner 	}
8110b1adaa0SThomas Gleixner 
812b5faba21SThomas Gleixner 	/*
813b5faba21SThomas Gleixner 	 * Now check again, whether the thread should run. Otherwise
814b5faba21SThomas Gleixner 	 * we would clear the threads_oneshot bit of this thread which
815b5faba21SThomas Gleixner 	 * was just set.
816b5faba21SThomas Gleixner 	 */
817f3f79e38SAlexander Gordeev 	if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
818b5faba21SThomas Gleixner 		goto out_unlock;
819b5faba21SThomas Gleixner 
820b5faba21SThomas Gleixner 	desc->threads_oneshot &= ~action->thread_mask;
821b5faba21SThomas Gleixner 
82232f4125eSThomas Gleixner 	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
82332f4125eSThomas Gleixner 	    irqd_irq_masked(&desc->irq_data))
824328a4978SThomas Gleixner 		unmask_threaded_irq(desc);
82532f4125eSThomas Gleixner 
826b5faba21SThomas Gleixner out_unlock:
827239007b8SThomas Gleixner 	raw_spin_unlock_irq(&desc->lock);
8283876ec9eSThomas Gleixner 	chip_bus_sync_unlock(desc);
829b25c340cSThomas Gleixner }
830b25c340cSThomas Gleixner 
83161f38261SBruno Premont #ifdef CONFIG_SMP
8323aa551c9SThomas Gleixner /*
833b04c644eSChuansheng Liu  * Check whether we need to change the affinity of the interrupt thread.
834591d2fb0SThomas Gleixner  */
835591d2fb0SThomas Gleixner static void
836591d2fb0SThomas Gleixner irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
837591d2fb0SThomas Gleixner {
838591d2fb0SThomas Gleixner 	cpumask_var_t mask;
83904aa530eSThomas Gleixner 	bool valid = true;
840591d2fb0SThomas Gleixner 
841591d2fb0SThomas Gleixner 	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
842591d2fb0SThomas Gleixner 		return;
843591d2fb0SThomas Gleixner 
844591d2fb0SThomas Gleixner 	/*
845591d2fb0SThomas Gleixner 	 * In case we are out of memory we set IRQTF_AFFINITY again and
846591d2fb0SThomas Gleixner 	 * try again next time
847591d2fb0SThomas Gleixner 	 */
848591d2fb0SThomas Gleixner 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
849591d2fb0SThomas Gleixner 		set_bit(IRQTF_AFFINITY, &action->thread_flags);
850591d2fb0SThomas Gleixner 		return;
851591d2fb0SThomas Gleixner 	}
852591d2fb0SThomas Gleixner 
853239007b8SThomas Gleixner 	raw_spin_lock_irq(&desc->lock);
85404aa530eSThomas Gleixner 	/*
85504aa530eSThomas Gleixner 	 * This code is triggered unconditionally. Check the affinity
85604aa530eSThomas Gleixner 	 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
85704aa530eSThomas Gleixner 	 */
858*cbf86999SThomas Gleixner 	if (cpumask_available(desc->irq_common_data.affinity)) {
859*cbf86999SThomas Gleixner 		const struct cpumask *m;
860*cbf86999SThomas Gleixner 
861*cbf86999SThomas Gleixner 		m = irq_data_get_effective_affinity_mask(&desc->irq_data);
862*cbf86999SThomas Gleixner 		cpumask_copy(mask, m);
863*cbf86999SThomas Gleixner 	} else {
86404aa530eSThomas Gleixner 		valid = false;
865*cbf86999SThomas Gleixner 	}
866239007b8SThomas Gleixner 	raw_spin_unlock_irq(&desc->lock);
867591d2fb0SThomas Gleixner 
86804aa530eSThomas Gleixner 	if (valid)
869591d2fb0SThomas Gleixner 		set_cpus_allowed_ptr(current, mask);
870591d2fb0SThomas Gleixner 	free_cpumask_var(mask);
871591d2fb0SThomas Gleixner }
87261f38261SBruno Premont #else
87361f38261SBruno Premont static inline void
87461f38261SBruno Premont irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
87561f38261SBruno Premont #endif
876591d2fb0SThomas Gleixner 
877591d2fb0SThomas Gleixner /*
8788d32a307SThomas Gleixner  * Interrupts which are not explicitely requested as threaded
8798d32a307SThomas Gleixner  * interrupts rely on the implicit bh/preempt disable of the hard irq
8808d32a307SThomas Gleixner  * context. So we need to disable bh here to avoid deadlocks and other
8818d32a307SThomas Gleixner  * side effects.
8828d32a307SThomas Gleixner  */
8833a43e05fSSebastian Andrzej Siewior static irqreturn_t
8848d32a307SThomas Gleixner irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
8858d32a307SThomas Gleixner {
8863a43e05fSSebastian Andrzej Siewior 	irqreturn_t ret;
8873a43e05fSSebastian Andrzej Siewior 
8888d32a307SThomas Gleixner 	local_bh_disable();
8893a43e05fSSebastian Andrzej Siewior 	ret = action->thread_fn(action->irq, action->dev_id);
890f3f79e38SAlexander Gordeev 	irq_finalize_oneshot(desc, action);
8918d32a307SThomas Gleixner 	local_bh_enable();
8923a43e05fSSebastian Andrzej Siewior 	return ret;
8938d32a307SThomas Gleixner }
8948d32a307SThomas Gleixner 
8958d32a307SThomas Gleixner /*
896f788e7bfSXie XiuQi  * Interrupts explicitly requested as threaded interrupts want to be
8978d32a307SThomas Gleixner  * preemtible - many of them need to sleep and wait for slow busses to
8988d32a307SThomas Gleixner  * complete.
8998d32a307SThomas Gleixner  */
9003a43e05fSSebastian Andrzej Siewior static irqreturn_t irq_thread_fn(struct irq_desc *desc,
9013a43e05fSSebastian Andrzej Siewior 		struct irqaction *action)
9028d32a307SThomas Gleixner {
9033a43e05fSSebastian Andrzej Siewior 	irqreturn_t ret;
9043a43e05fSSebastian Andrzej Siewior 
9053a43e05fSSebastian Andrzej Siewior 	ret = action->thread_fn(action->irq, action->dev_id);
906f3f79e38SAlexander Gordeev 	irq_finalize_oneshot(desc, action);
9073a43e05fSSebastian Andrzej Siewior 	return ret;
9088d32a307SThomas Gleixner }
9098d32a307SThomas Gleixner 
9107140ea19SIdo Yariv static void wake_threads_waitq(struct irq_desc *desc)
9117140ea19SIdo Yariv {
912c685689fSChuansheng Liu 	if (atomic_dec_and_test(&desc->threads_active))
9137140ea19SIdo Yariv 		wake_up(&desc->wait_for_threads);
9147140ea19SIdo Yariv }
9157140ea19SIdo Yariv 
91667d12145SAl Viro static void irq_thread_dtor(struct callback_head *unused)
9174d1d61a6SOleg Nesterov {
9184d1d61a6SOleg Nesterov 	struct task_struct *tsk = current;
9194d1d61a6SOleg Nesterov 	struct irq_desc *desc;
9204d1d61a6SOleg Nesterov 	struct irqaction *action;
9214d1d61a6SOleg Nesterov 
9224d1d61a6SOleg Nesterov 	if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
9234d1d61a6SOleg Nesterov 		return;
9244d1d61a6SOleg Nesterov 
9254d1d61a6SOleg Nesterov 	action = kthread_data(tsk);
9264d1d61a6SOleg Nesterov 
927fb21affaSLinus Torvalds 	pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
92819af395dSAlan Cox 	       tsk->comm, tsk->pid, action->irq);
9294d1d61a6SOleg Nesterov 
9304d1d61a6SOleg Nesterov 
9314d1d61a6SOleg Nesterov 	desc = irq_to_desc(action->irq);
9324d1d61a6SOleg Nesterov 	/*
9334d1d61a6SOleg Nesterov 	 * If IRQTF_RUNTHREAD is set, we need to decrement
9344d1d61a6SOleg Nesterov 	 * desc->threads_active and wake possible waiters.
9354d1d61a6SOleg Nesterov 	 */
9364d1d61a6SOleg Nesterov 	if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
9374d1d61a6SOleg Nesterov 		wake_threads_waitq(desc);
9384d1d61a6SOleg Nesterov 
9394d1d61a6SOleg Nesterov 	/* Prevent a stale desc->threads_oneshot */
9404d1d61a6SOleg Nesterov 	irq_finalize_oneshot(desc, action);
9414d1d61a6SOleg Nesterov }
9424d1d61a6SOleg Nesterov 
9432a1d3ab8SThomas Gleixner static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
9442a1d3ab8SThomas Gleixner {
9452a1d3ab8SThomas Gleixner 	struct irqaction *secondary = action->secondary;
9462a1d3ab8SThomas Gleixner 
9472a1d3ab8SThomas Gleixner 	if (WARN_ON_ONCE(!secondary))
9482a1d3ab8SThomas Gleixner 		return;
9492a1d3ab8SThomas Gleixner 
9502a1d3ab8SThomas Gleixner 	raw_spin_lock_irq(&desc->lock);
9512a1d3ab8SThomas Gleixner 	__irq_wake_thread(desc, secondary);
9522a1d3ab8SThomas Gleixner 	raw_spin_unlock_irq(&desc->lock);
9532a1d3ab8SThomas Gleixner }
9542a1d3ab8SThomas Gleixner 
9558d32a307SThomas Gleixner /*
9563aa551c9SThomas Gleixner  * Interrupt handler thread
9573aa551c9SThomas Gleixner  */
9583aa551c9SThomas Gleixner static int irq_thread(void *data)
9593aa551c9SThomas Gleixner {
96067d12145SAl Viro 	struct callback_head on_exit_work;
9613aa551c9SThomas Gleixner 	struct irqaction *action = data;
9623aa551c9SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(action->irq);
9633a43e05fSSebastian Andrzej Siewior 	irqreturn_t (*handler_fn)(struct irq_desc *desc,
9643a43e05fSSebastian Andrzej Siewior 			struct irqaction *action);
9653aa551c9SThomas Gleixner 
966540b60e2SAlexander Gordeev 	if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
9678d32a307SThomas Gleixner 					&action->thread_flags))
9688d32a307SThomas Gleixner 		handler_fn = irq_forced_thread_fn;
9698d32a307SThomas Gleixner 	else
9708d32a307SThomas Gleixner 		handler_fn = irq_thread_fn;
9718d32a307SThomas Gleixner 
97241f9d29fSAl Viro 	init_task_work(&on_exit_work, irq_thread_dtor);
9734d1d61a6SOleg Nesterov 	task_work_add(current, &on_exit_work, false);
9743aa551c9SThomas Gleixner 
975f3de44edSSankara Muthukrishnan 	irq_thread_check_affinity(desc, action);
976f3de44edSSankara Muthukrishnan 
9773aa551c9SThomas Gleixner 	while (!irq_wait_for_interrupt(action)) {
9787140ea19SIdo Yariv 		irqreturn_t action_ret;
9793aa551c9SThomas Gleixner 
980591d2fb0SThomas Gleixner 		irq_thread_check_affinity(desc, action);
981591d2fb0SThomas Gleixner 
9823a43e05fSSebastian Andrzej Siewior 		action_ret = handler_fn(desc, action);
9831e77d0a1SThomas Gleixner 		if (action_ret == IRQ_HANDLED)
9841e77d0a1SThomas Gleixner 			atomic_inc(&desc->threads_handled);
9852a1d3ab8SThomas Gleixner 		if (action_ret == IRQ_WAKE_THREAD)
9862a1d3ab8SThomas Gleixner 			irq_wake_secondary(desc, action);
9877140ea19SIdo Yariv 
9887140ea19SIdo Yariv 		wake_threads_waitq(desc);
9893aa551c9SThomas Gleixner 	}
9903aa551c9SThomas Gleixner 
9917140ea19SIdo Yariv 	/*
9927140ea19SIdo Yariv 	 * This is the regular exit path. __free_irq() is stopping the
9937140ea19SIdo Yariv 	 * thread via kthread_stop() after calling
9947140ea19SIdo Yariv 	 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
995e04268b0SThomas Gleixner 	 * oneshot mask bit can be set. We cannot verify that as we
996e04268b0SThomas Gleixner 	 * cannot touch the oneshot mask at this point anymore as
997e04268b0SThomas Gleixner 	 * __setup_irq() might have given out currents thread_mask
998e04268b0SThomas Gleixner 	 * again.
9993aa551c9SThomas Gleixner 	 */
10004d1d61a6SOleg Nesterov 	task_work_cancel(current, irq_thread_dtor);
10013aa551c9SThomas Gleixner 	return 0;
10023aa551c9SThomas Gleixner }
10033aa551c9SThomas Gleixner 
1004a92444c6SThomas Gleixner /**
1005a92444c6SThomas Gleixner  *	irq_wake_thread - wake the irq thread for the action identified by dev_id
1006a92444c6SThomas Gleixner  *	@irq:		Interrupt line
1007a92444c6SThomas Gleixner  *	@dev_id:	Device identity for which the thread should be woken
1008a92444c6SThomas Gleixner  *
1009a92444c6SThomas Gleixner  */
1010a92444c6SThomas Gleixner void irq_wake_thread(unsigned int irq, void *dev_id)
1011a92444c6SThomas Gleixner {
1012a92444c6SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
1013a92444c6SThomas Gleixner 	struct irqaction *action;
1014a92444c6SThomas Gleixner 	unsigned long flags;
1015a92444c6SThomas Gleixner 
1016a92444c6SThomas Gleixner 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1017a92444c6SThomas Gleixner 		return;
1018a92444c6SThomas Gleixner 
1019a92444c6SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
1020f944b5a7SDaniel Lezcano 	for_each_action_of_desc(desc, action) {
1021a92444c6SThomas Gleixner 		if (action->dev_id == dev_id) {
1022a92444c6SThomas Gleixner 			if (action->thread)
1023a92444c6SThomas Gleixner 				__irq_wake_thread(desc, action);
1024a92444c6SThomas Gleixner 			break;
1025a92444c6SThomas Gleixner 		}
1026a92444c6SThomas Gleixner 	}
1027a92444c6SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1028a92444c6SThomas Gleixner }
1029a92444c6SThomas Gleixner EXPORT_SYMBOL_GPL(irq_wake_thread);
1030a92444c6SThomas Gleixner 
10312a1d3ab8SThomas Gleixner static int irq_setup_forced_threading(struct irqaction *new)
10328d32a307SThomas Gleixner {
10338d32a307SThomas Gleixner 	if (!force_irqthreads)
10342a1d3ab8SThomas Gleixner 		return 0;
10358d32a307SThomas Gleixner 	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
10362a1d3ab8SThomas Gleixner 		return 0;
10378d32a307SThomas Gleixner 
10388d32a307SThomas Gleixner 	new->flags |= IRQF_ONESHOT;
10398d32a307SThomas Gleixner 
10402a1d3ab8SThomas Gleixner 	/*
10412a1d3ab8SThomas Gleixner 	 * Handle the case where we have a real primary handler and a
10422a1d3ab8SThomas Gleixner 	 * thread handler. We force thread them as well by creating a
10432a1d3ab8SThomas Gleixner 	 * secondary action.
10442a1d3ab8SThomas Gleixner 	 */
10452a1d3ab8SThomas Gleixner 	if (new->handler != irq_default_primary_handler && new->thread_fn) {
10462a1d3ab8SThomas Gleixner 		/* Allocate the secondary action */
10472a1d3ab8SThomas Gleixner 		new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
10482a1d3ab8SThomas Gleixner 		if (!new->secondary)
10492a1d3ab8SThomas Gleixner 			return -ENOMEM;
10502a1d3ab8SThomas Gleixner 		new->secondary->handler = irq_forced_secondary_handler;
10512a1d3ab8SThomas Gleixner 		new->secondary->thread_fn = new->thread_fn;
10522a1d3ab8SThomas Gleixner 		new->secondary->dev_id = new->dev_id;
10532a1d3ab8SThomas Gleixner 		new->secondary->irq = new->irq;
10542a1d3ab8SThomas Gleixner 		new->secondary->name = new->name;
10552a1d3ab8SThomas Gleixner 	}
10562a1d3ab8SThomas Gleixner 	/* Deal with the primary handler */
10578d32a307SThomas Gleixner 	set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
10588d32a307SThomas Gleixner 	new->thread_fn = new->handler;
10598d32a307SThomas Gleixner 	new->handler = irq_default_primary_handler;
10602a1d3ab8SThomas Gleixner 	return 0;
10618d32a307SThomas Gleixner }
10628d32a307SThomas Gleixner 
1063c1bacbaeSThomas Gleixner static int irq_request_resources(struct irq_desc *desc)
1064c1bacbaeSThomas Gleixner {
1065c1bacbaeSThomas Gleixner 	struct irq_data *d = &desc->irq_data;
1066c1bacbaeSThomas Gleixner 	struct irq_chip *c = d->chip;
1067c1bacbaeSThomas Gleixner 
1068c1bacbaeSThomas Gleixner 	return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1069c1bacbaeSThomas Gleixner }
1070c1bacbaeSThomas Gleixner 
1071c1bacbaeSThomas Gleixner static void irq_release_resources(struct irq_desc *desc)
1072c1bacbaeSThomas Gleixner {
1073c1bacbaeSThomas Gleixner 	struct irq_data *d = &desc->irq_data;
1074c1bacbaeSThomas Gleixner 	struct irq_chip *c = d->chip;
1075c1bacbaeSThomas Gleixner 
1076c1bacbaeSThomas Gleixner 	if (c->irq_release_resources)
1077c1bacbaeSThomas Gleixner 		c->irq_release_resources(d);
1078c1bacbaeSThomas Gleixner }
1079c1bacbaeSThomas Gleixner 
10802a1d3ab8SThomas Gleixner static int
10812a1d3ab8SThomas Gleixner setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
10822a1d3ab8SThomas Gleixner {
10832a1d3ab8SThomas Gleixner 	struct task_struct *t;
10842a1d3ab8SThomas Gleixner 	struct sched_param param = {
10852a1d3ab8SThomas Gleixner 		.sched_priority = MAX_USER_RT_PRIO/2,
10862a1d3ab8SThomas Gleixner 	};
10872a1d3ab8SThomas Gleixner 
10882a1d3ab8SThomas Gleixner 	if (!secondary) {
10892a1d3ab8SThomas Gleixner 		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
10902a1d3ab8SThomas Gleixner 				   new->name);
10912a1d3ab8SThomas Gleixner 	} else {
10922a1d3ab8SThomas Gleixner 		t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
10932a1d3ab8SThomas Gleixner 				   new->name);
10942a1d3ab8SThomas Gleixner 		param.sched_priority -= 1;
10952a1d3ab8SThomas Gleixner 	}
10962a1d3ab8SThomas Gleixner 
10972a1d3ab8SThomas Gleixner 	if (IS_ERR(t))
10982a1d3ab8SThomas Gleixner 		return PTR_ERR(t);
10992a1d3ab8SThomas Gleixner 
11002a1d3ab8SThomas Gleixner 	sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
11012a1d3ab8SThomas Gleixner 
11022a1d3ab8SThomas Gleixner 	/*
11032a1d3ab8SThomas Gleixner 	 * We keep the reference to the task struct even if
11042a1d3ab8SThomas Gleixner 	 * the thread dies to avoid that the interrupt code
11052a1d3ab8SThomas Gleixner 	 * references an already freed task_struct.
11062a1d3ab8SThomas Gleixner 	 */
11072a1d3ab8SThomas Gleixner 	get_task_struct(t);
11082a1d3ab8SThomas Gleixner 	new->thread = t;
11092a1d3ab8SThomas Gleixner 	/*
11102a1d3ab8SThomas Gleixner 	 * Tell the thread to set its affinity. This is
11112a1d3ab8SThomas Gleixner 	 * important for shared interrupt handlers as we do
11122a1d3ab8SThomas Gleixner 	 * not invoke setup_affinity() for the secondary
11132a1d3ab8SThomas Gleixner 	 * handlers as everything is already set up. Even for
11142a1d3ab8SThomas Gleixner 	 * interrupts marked with IRQF_NO_BALANCE this is
11152a1d3ab8SThomas Gleixner 	 * correct as we want the thread to move to the cpu(s)
11162a1d3ab8SThomas Gleixner 	 * on which the requesting code placed the interrupt.
11172a1d3ab8SThomas Gleixner 	 */
11182a1d3ab8SThomas Gleixner 	set_bit(IRQTF_AFFINITY, &new->thread_flags);
11192a1d3ab8SThomas Gleixner 	return 0;
11202a1d3ab8SThomas Gleixner }
11212a1d3ab8SThomas Gleixner 
11221da177e4SLinus Torvalds /*
11231da177e4SLinus Torvalds  * Internal function to register an irqaction - typically used to
11241da177e4SLinus Torvalds  * allocate special interrupts that are part of the architecture.
112519d39a38SThomas Gleixner  *
112619d39a38SThomas Gleixner  * Locking rules:
112719d39a38SThomas Gleixner  *
112819d39a38SThomas Gleixner  * desc->request_mutex	Provides serialization against a concurrent free_irq()
112919d39a38SThomas Gleixner  *   chip_bus_lock	Provides serialization for slow bus operations
113019d39a38SThomas Gleixner  *     desc->lock	Provides serialization against hard interrupts
113119d39a38SThomas Gleixner  *
113219d39a38SThomas Gleixner  * chip_bus_lock and desc->lock are sufficient for all other management and
113319d39a38SThomas Gleixner  * interrupt related functions. desc->request_mutex solely serializes
113419d39a38SThomas Gleixner  * request/free_irq().
11351da177e4SLinus Torvalds  */
1136d3c60047SThomas Gleixner static int
1137d3c60047SThomas Gleixner __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
11381da177e4SLinus Torvalds {
1139f17c7545SIngo Molnar 	struct irqaction *old, **old_ptr;
1140b5faba21SThomas Gleixner 	unsigned long flags, thread_mask = 0;
11413b8249e7SThomas Gleixner 	int ret, nested, shared = 0;
11421da177e4SLinus Torvalds 
11437d94f7caSYinghai Lu 	if (!desc)
1144c2b5a251SMatthew Wilcox 		return -EINVAL;
1145c2b5a251SMatthew Wilcox 
11466b8ff312SThomas Gleixner 	if (desc->irq_data.chip == &no_irq_chip)
11471da177e4SLinus Torvalds 		return -ENOSYS;
1148b6873807SSebastian Andrzej Siewior 	if (!try_module_get(desc->owner))
1149b6873807SSebastian Andrzej Siewior 		return -ENODEV;
11501da177e4SLinus Torvalds 
11512a1d3ab8SThomas Gleixner 	new->irq = irq;
11522a1d3ab8SThomas Gleixner 
11531da177e4SLinus Torvalds 	/*
11544b357daeSJon Hunter 	 * If the trigger type is not specified by the caller,
11554b357daeSJon Hunter 	 * then use the default for this interrupt.
11564b357daeSJon Hunter 	 */
11574b357daeSJon Hunter 	if (!(new->flags & IRQF_TRIGGER_MASK))
11584b357daeSJon Hunter 		new->flags |= irqd_get_trigger_type(&desc->irq_data);
11594b357daeSJon Hunter 
11604b357daeSJon Hunter 	/*
1161399b5da2SThomas Gleixner 	 * Check whether the interrupt nests into another interrupt
1162399b5da2SThomas Gleixner 	 * thread.
11633aa551c9SThomas Gleixner 	 */
11641ccb4e61SThomas Gleixner 	nested = irq_settings_is_nested_thread(desc);
1165399b5da2SThomas Gleixner 	if (nested) {
1166b6873807SSebastian Andrzej Siewior 		if (!new->thread_fn) {
1167b6873807SSebastian Andrzej Siewior 			ret = -EINVAL;
1168b6873807SSebastian Andrzej Siewior 			goto out_mput;
1169b6873807SSebastian Andrzej Siewior 		}
1170399b5da2SThomas Gleixner 		/*
1171399b5da2SThomas Gleixner 		 * Replace the primary handler which was provided from
1172399b5da2SThomas Gleixner 		 * the driver for non nested interrupt handling by the
1173399b5da2SThomas Gleixner 		 * dummy function which warns when called.
1174399b5da2SThomas Gleixner 		 */
1175399b5da2SThomas Gleixner 		new->handler = irq_nested_primary_handler;
11768d32a307SThomas Gleixner 	} else {
11772a1d3ab8SThomas Gleixner 		if (irq_settings_can_thread(desc)) {
11782a1d3ab8SThomas Gleixner 			ret = irq_setup_forced_threading(new);
11792a1d3ab8SThomas Gleixner 			if (ret)
11802a1d3ab8SThomas Gleixner 				goto out_mput;
11812a1d3ab8SThomas Gleixner 		}
1182399b5da2SThomas Gleixner 	}
1183399b5da2SThomas Gleixner 
1184399b5da2SThomas Gleixner 	/*
1185399b5da2SThomas Gleixner 	 * Create a handler thread when a thread function is supplied
1186399b5da2SThomas Gleixner 	 * and the interrupt does not nest into another interrupt
1187399b5da2SThomas Gleixner 	 * thread.
1188399b5da2SThomas Gleixner 	 */
1189399b5da2SThomas Gleixner 	if (new->thread_fn && !nested) {
11902a1d3ab8SThomas Gleixner 		ret = setup_irq_thread(new, irq, false);
11912a1d3ab8SThomas Gleixner 		if (ret)
1192b6873807SSebastian Andrzej Siewior 			goto out_mput;
11932a1d3ab8SThomas Gleixner 		if (new->secondary) {
11942a1d3ab8SThomas Gleixner 			ret = setup_irq_thread(new->secondary, irq, true);
11952a1d3ab8SThomas Gleixner 			if (ret)
11962a1d3ab8SThomas Gleixner 				goto out_thread;
1197b6873807SSebastian Andrzej Siewior 		}
11983aa551c9SThomas Gleixner 	}
11993aa551c9SThomas Gleixner 
12003aa551c9SThomas Gleixner 	/*
1201dc9b229aSThomas Gleixner 	 * Drivers are often written to work w/o knowledge about the
1202dc9b229aSThomas Gleixner 	 * underlying irq chip implementation, so a request for a
1203dc9b229aSThomas Gleixner 	 * threaded irq without a primary hard irq context handler
1204dc9b229aSThomas Gleixner 	 * requires the ONESHOT flag to be set. Some irq chips like
1205dc9b229aSThomas Gleixner 	 * MSI based interrupts are per se one shot safe. Check the
1206dc9b229aSThomas Gleixner 	 * chip flags, so we can avoid the unmask dance at the end of
1207dc9b229aSThomas Gleixner 	 * the threaded handler for those.
1208dc9b229aSThomas Gleixner 	 */
1209dc9b229aSThomas Gleixner 	if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1210dc9b229aSThomas Gleixner 		new->flags &= ~IRQF_ONESHOT;
1211dc9b229aSThomas Gleixner 
121219d39a38SThomas Gleixner 	/*
121319d39a38SThomas Gleixner 	 * Protects against a concurrent __free_irq() call which might wait
121419d39a38SThomas Gleixner 	 * for synchronize_irq() to complete without holding the optional
121519d39a38SThomas Gleixner 	 * chip bus lock and desc->lock.
121619d39a38SThomas Gleixner 	 */
12179114014cSThomas Gleixner 	mutex_lock(&desc->request_mutex);
121819d39a38SThomas Gleixner 
121919d39a38SThomas Gleixner 	/*
122019d39a38SThomas Gleixner 	 * Acquire bus lock as the irq_request_resources() callback below
122119d39a38SThomas Gleixner 	 * might rely on the serialization or the magic power management
122219d39a38SThomas Gleixner 	 * functions which are abusing the irq_bus_lock() callback,
122319d39a38SThomas Gleixner 	 */
122419d39a38SThomas Gleixner 	chip_bus_lock(desc);
122519d39a38SThomas Gleixner 
122619d39a38SThomas Gleixner 	/* First installed action requests resources. */
122746e48e25SThomas Gleixner 	if (!desc->action) {
122846e48e25SThomas Gleixner 		ret = irq_request_resources(desc);
122946e48e25SThomas Gleixner 		if (ret) {
123046e48e25SThomas Gleixner 			pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
123146e48e25SThomas Gleixner 			       new->name, irq, desc->irq_data.chip->name);
123219d39a38SThomas Gleixner 			goto out_bus_unlock;
123346e48e25SThomas Gleixner 		}
123446e48e25SThomas Gleixner 	}
12359114014cSThomas Gleixner 
1236dc9b229aSThomas Gleixner 	/*
12371da177e4SLinus Torvalds 	 * The following block of code has to be executed atomically
123819d39a38SThomas Gleixner 	 * protected against a concurrent interrupt and any of the other
123919d39a38SThomas Gleixner 	 * management calls which are not serialized via
124019d39a38SThomas Gleixner 	 * desc->request_mutex or the optional bus lock.
12411da177e4SLinus Torvalds 	 */
1242239007b8SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
1243f17c7545SIngo Molnar 	old_ptr = &desc->action;
1244f17c7545SIngo Molnar 	old = *old_ptr;
124506fcb0c6SIngo Molnar 	if (old) {
1246e76de9f8SThomas Gleixner 		/*
1247e76de9f8SThomas Gleixner 		 * Can't share interrupts unless both agree to and are
1248e76de9f8SThomas Gleixner 		 * the same type (level, edge, polarity). So both flag
12493cca53b0SThomas Gleixner 		 * fields must have IRQF_SHARED set and the bits which
12509d591eddSThomas Gleixner 		 * set the trigger type must match. Also all must
12519d591eddSThomas Gleixner 		 * agree on ONESHOT.
1252e76de9f8SThomas Gleixner 		 */
12534f8413a3SMarc Zyngier 		unsigned int oldtype;
12544f8413a3SMarc Zyngier 
12554f8413a3SMarc Zyngier 		/*
12564f8413a3SMarc Zyngier 		 * If nobody did set the configuration before, inherit
12574f8413a3SMarc Zyngier 		 * the one provided by the requester.
12584f8413a3SMarc Zyngier 		 */
12594f8413a3SMarc Zyngier 		if (irqd_trigger_type_was_set(&desc->irq_data)) {
12604f8413a3SMarc Zyngier 			oldtype = irqd_get_trigger_type(&desc->irq_data);
12614f8413a3SMarc Zyngier 		} else {
12624f8413a3SMarc Zyngier 			oldtype = new->flags & IRQF_TRIGGER_MASK;
12634f8413a3SMarc Zyngier 			irqd_set_trigger_type(&desc->irq_data, oldtype);
12644f8413a3SMarc Zyngier 		}
1265382bd4deSHans de Goede 
12663cca53b0SThomas Gleixner 		if (!((old->flags & new->flags) & IRQF_SHARED) ||
1267382bd4deSHans de Goede 		    (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1268f5d89470SThomas Gleixner 		    ((old->flags ^ new->flags) & IRQF_ONESHOT))
1269f5163427SDimitri Sivanich 			goto mismatch;
1270f5163427SDimitri Sivanich 
1271f5163427SDimitri Sivanich 		/* All handlers must agree on per-cpuness */
12723cca53b0SThomas Gleixner 		if ((old->flags & IRQF_PERCPU) !=
12733cca53b0SThomas Gleixner 		    (new->flags & IRQF_PERCPU))
1274f5163427SDimitri Sivanich 			goto mismatch;
12751da177e4SLinus Torvalds 
12761da177e4SLinus Torvalds 		/* add new interrupt at end of irq queue */
12771da177e4SLinus Torvalds 		do {
127852abb700SThomas Gleixner 			/*
127952abb700SThomas Gleixner 			 * Or all existing action->thread_mask bits,
128052abb700SThomas Gleixner 			 * so we can find the next zero bit for this
128152abb700SThomas Gleixner 			 * new action.
128252abb700SThomas Gleixner 			 */
1283b5faba21SThomas Gleixner 			thread_mask |= old->thread_mask;
1284f17c7545SIngo Molnar 			old_ptr = &old->next;
1285f17c7545SIngo Molnar 			old = *old_ptr;
12861da177e4SLinus Torvalds 		} while (old);
12871da177e4SLinus Torvalds 		shared = 1;
12881da177e4SLinus Torvalds 	}
12891da177e4SLinus Torvalds 
1290b5faba21SThomas Gleixner 	/*
129152abb700SThomas Gleixner 	 * Setup the thread mask for this irqaction for ONESHOT. For
129252abb700SThomas Gleixner 	 * !ONESHOT irqs the thread mask is 0 so we can avoid a
129352abb700SThomas Gleixner 	 * conditional in irq_wake_thread().
1294b5faba21SThomas Gleixner 	 */
129552abb700SThomas Gleixner 	if (new->flags & IRQF_ONESHOT) {
129652abb700SThomas Gleixner 		/*
129752abb700SThomas Gleixner 		 * Unlikely to have 32 resp 64 irqs sharing one line,
129852abb700SThomas Gleixner 		 * but who knows.
129952abb700SThomas Gleixner 		 */
130052abb700SThomas Gleixner 		if (thread_mask == ~0UL) {
1301b5faba21SThomas Gleixner 			ret = -EBUSY;
1302cba4235eSThomas Gleixner 			goto out_unlock;
1303b5faba21SThomas Gleixner 		}
130452abb700SThomas Gleixner 		/*
130552abb700SThomas Gleixner 		 * The thread_mask for the action is or'ed to
130652abb700SThomas Gleixner 		 * desc->thread_active to indicate that the
130752abb700SThomas Gleixner 		 * IRQF_ONESHOT thread handler has been woken, but not
130852abb700SThomas Gleixner 		 * yet finished. The bit is cleared when a thread
130952abb700SThomas Gleixner 		 * completes. When all threads of a shared interrupt
131052abb700SThomas Gleixner 		 * line have completed desc->threads_active becomes
131152abb700SThomas Gleixner 		 * zero and the interrupt line is unmasked. See
131252abb700SThomas Gleixner 		 * handle.c:irq_wake_thread() for further information.
131352abb700SThomas Gleixner 		 *
131452abb700SThomas Gleixner 		 * If no thread is woken by primary (hard irq context)
131552abb700SThomas Gleixner 		 * interrupt handlers, then desc->threads_active is
131652abb700SThomas Gleixner 		 * also checked for zero to unmask the irq line in the
131752abb700SThomas Gleixner 		 * affected hard irq flow handlers
131852abb700SThomas Gleixner 		 * (handle_[fasteoi|level]_irq).
131952abb700SThomas Gleixner 		 *
132052abb700SThomas Gleixner 		 * The new action gets the first zero bit of
132152abb700SThomas Gleixner 		 * thread_mask assigned. See the loop above which or's
132252abb700SThomas Gleixner 		 * all existing action->thread_mask bits.
132352abb700SThomas Gleixner 		 */
1324ffc661c9SRasmus Villemoes 		new->thread_mask = 1UL << ffz(thread_mask);
13251c6c6952SThomas Gleixner 
1326dc9b229aSThomas Gleixner 	} else if (new->handler == irq_default_primary_handler &&
1327dc9b229aSThomas Gleixner 		   !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
13281c6c6952SThomas Gleixner 		/*
13291c6c6952SThomas Gleixner 		 * The interrupt was requested with handler = NULL, so
13301c6c6952SThomas Gleixner 		 * we use the default primary handler for it. But it
13311c6c6952SThomas Gleixner 		 * does not have the oneshot flag set. In combination
13321c6c6952SThomas Gleixner 		 * with level interrupts this is deadly, because the
13331c6c6952SThomas Gleixner 		 * default primary handler just wakes the thread, then
13341c6c6952SThomas Gleixner 		 * the irq lines is reenabled, but the device still
13351c6c6952SThomas Gleixner 		 * has the level irq asserted. Rinse and repeat....
13361c6c6952SThomas Gleixner 		 *
13371c6c6952SThomas Gleixner 		 * While this works for edge type interrupts, we play
13381c6c6952SThomas Gleixner 		 * it safe and reject unconditionally because we can't
13391c6c6952SThomas Gleixner 		 * say for sure which type this interrupt really
13401c6c6952SThomas Gleixner 		 * has. The type flags are unreliable as the
13411c6c6952SThomas Gleixner 		 * underlying chip implementation can override them.
13421c6c6952SThomas Gleixner 		 */
134397fd75b7SAndrew Morton 		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
13441c6c6952SThomas Gleixner 		       irq);
13451c6c6952SThomas Gleixner 		ret = -EINVAL;
1346cba4235eSThomas Gleixner 		goto out_unlock;
134752abb700SThomas Gleixner 	}
1348b5faba21SThomas Gleixner 
13491da177e4SLinus Torvalds 	if (!shared) {
13503aa551c9SThomas Gleixner 		init_waitqueue_head(&desc->wait_for_threads);
13513aa551c9SThomas Gleixner 
135282736f4dSUwe Kleine-König 		/* Setup the type (level, edge polarity) if configured: */
135382736f4dSUwe Kleine-König 		if (new->flags & IRQF_TRIGGER_MASK) {
1354a1ff541aSJiang Liu 			ret = __irq_set_trigger(desc,
1355f2b662daSDavid Brownell 						new->flags & IRQF_TRIGGER_MASK);
135682736f4dSUwe Kleine-König 
135719d39a38SThomas Gleixner 			if (ret)
1358cba4235eSThomas Gleixner 				goto out_unlock;
1359091738a2SThomas Gleixner 		}
1360f75d222bSAhmed S. Darwish 
1361c942cee4SThomas Gleixner 		/*
1362c942cee4SThomas Gleixner 		 * Activate the interrupt. That activation must happen
1363c942cee4SThomas Gleixner 		 * independently of IRQ_NOAUTOEN. request_irq() can fail
1364c942cee4SThomas Gleixner 		 * and the callers are supposed to handle
1365c942cee4SThomas Gleixner 		 * that. enable_irq() of an interrupt requested with
1366c942cee4SThomas Gleixner 		 * IRQ_NOAUTOEN is not supposed to fail. The activation
1367c942cee4SThomas Gleixner 		 * keeps it in shutdown mode, it merily associates
1368c942cee4SThomas Gleixner 		 * resources if necessary and if that's not possible it
1369c942cee4SThomas Gleixner 		 * fails. Interrupts which are in managed shutdown mode
1370c942cee4SThomas Gleixner 		 * will simply ignore that activation request.
1371c942cee4SThomas Gleixner 		 */
1372c942cee4SThomas Gleixner 		ret = irq_activate(desc);
1373c942cee4SThomas Gleixner 		if (ret)
1374c942cee4SThomas Gleixner 			goto out_unlock;
1375c942cee4SThomas Gleixner 
1376009b4c3bSThomas Gleixner 		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
137732f4125eSThomas Gleixner 				  IRQS_ONESHOT | IRQS_WAITING);
137832f4125eSThomas Gleixner 		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
137994d39e1fSThomas Gleixner 
1380a005677bSThomas Gleixner 		if (new->flags & IRQF_PERCPU) {
1381a005677bSThomas Gleixner 			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1382a005677bSThomas Gleixner 			irq_settings_set_per_cpu(desc);
1383a005677bSThomas Gleixner 		}
13846a58fb3bSThomas Gleixner 
1385b25c340cSThomas Gleixner 		if (new->flags & IRQF_ONESHOT)
13863d67baecSThomas Gleixner 			desc->istate |= IRQS_ONESHOT;
1387b25c340cSThomas Gleixner 
13882e051552SThomas Gleixner 		/* Exclude IRQ from balancing if requested */
13892e051552SThomas Gleixner 		if (new->flags & IRQF_NOBALANCING) {
13902e051552SThomas Gleixner 			irq_settings_set_no_balancing(desc);
13912e051552SThomas Gleixner 			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
13922e051552SThomas Gleixner 		}
13932e051552SThomas Gleixner 
139404c848d3SThomas Gleixner 		if (irq_settings_can_autoenable(desc)) {
13954cde9c6bSThomas Gleixner 			irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
139604c848d3SThomas Gleixner 		} else {
139704c848d3SThomas Gleixner 			/*
139804c848d3SThomas Gleixner 			 * Shared interrupts do not go well with disabling
139904c848d3SThomas Gleixner 			 * auto enable. The sharing interrupt might request
140004c848d3SThomas Gleixner 			 * it while it's still disabled and then wait for
140104c848d3SThomas Gleixner 			 * interrupts forever.
140204c848d3SThomas Gleixner 			 */
140304c848d3SThomas Gleixner 			WARN_ON_ONCE(new->flags & IRQF_SHARED);
1404e76de9f8SThomas Gleixner 			/* Undo nested disables: */
1405e76de9f8SThomas Gleixner 			desc->depth = 1;
140604c848d3SThomas Gleixner 		}
140718404756SMax Krasnyansky 
1408876dbd4cSThomas Gleixner 	} else if (new->flags & IRQF_TRIGGER_MASK) {
1409876dbd4cSThomas Gleixner 		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
14107ee7e87dSThomas Gleixner 		unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1411876dbd4cSThomas Gleixner 
1412876dbd4cSThomas Gleixner 		if (nmsk != omsk)
1413876dbd4cSThomas Gleixner 			/* hope the handler works with current  trigger mode */
1414a395d6a7SJoe Perches 			pr_warn("irq %d uses trigger mode %u; requested %u\n",
14157ee7e87dSThomas Gleixner 				irq, omsk, nmsk);
141694d39e1fSThomas Gleixner 	}
141782736f4dSUwe Kleine-König 
1418f17c7545SIngo Molnar 	*old_ptr = new;
141982736f4dSUwe Kleine-König 
1420cab303beSThomas Gleixner 	irq_pm_install_action(desc, new);
1421cab303beSThomas Gleixner 
14228528b0f1SLinus Torvalds 	/* Reset broken irq detection when installing new handler */
14238528b0f1SLinus Torvalds 	desc->irq_count = 0;
14248528b0f1SLinus Torvalds 	desc->irqs_unhandled = 0;
14251adb0850SThomas Gleixner 
14261adb0850SThomas Gleixner 	/*
14271adb0850SThomas Gleixner 	 * Check whether we disabled the irq via the spurious handler
14281adb0850SThomas Gleixner 	 * before. Reenable it and give it another chance.
14291adb0850SThomas Gleixner 	 */
14307acdd53eSThomas Gleixner 	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
14317acdd53eSThomas Gleixner 		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
143279ff1cdaSJiang Liu 		__enable_irq(desc);
14331adb0850SThomas Gleixner 	}
14341adb0850SThomas Gleixner 
1435239007b8SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
14363a90795eSThomas Gleixner 	chip_bus_sync_unlock(desc);
14379114014cSThomas Gleixner 	mutex_unlock(&desc->request_mutex);
14381da177e4SLinus Torvalds 
1439b2d3d61aSDaniel Lezcano 	irq_setup_timings(desc, new);
1440b2d3d61aSDaniel Lezcano 
144169ab8494SThomas Gleixner 	/*
144269ab8494SThomas Gleixner 	 * Strictly no need to wake it up, but hung_task complains
144369ab8494SThomas Gleixner 	 * when no hard interrupt wakes the thread up.
144469ab8494SThomas Gleixner 	 */
144569ab8494SThomas Gleixner 	if (new->thread)
144669ab8494SThomas Gleixner 		wake_up_process(new->thread);
14472a1d3ab8SThomas Gleixner 	if (new->secondary)
14482a1d3ab8SThomas Gleixner 		wake_up_process(new->secondary->thread);
144969ab8494SThomas Gleixner 
14502c6927a3SYinghai Lu 	register_irq_proc(irq, desc);
14511da177e4SLinus Torvalds 	new->dir = NULL;
14521da177e4SLinus Torvalds 	register_handler_proc(irq, new);
14531da177e4SLinus Torvalds 	return 0;
1454f5163427SDimitri Sivanich 
1455f5163427SDimitri Sivanich mismatch:
14563cca53b0SThomas Gleixner 	if (!(new->flags & IRQF_PROBE_SHARED)) {
145797fd75b7SAndrew Morton 		pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1458f5d89470SThomas Gleixner 		       irq, new->flags, new->name, old->flags, old->name);
1459f5d89470SThomas Gleixner #ifdef CONFIG_DEBUG_SHIRQ
1460f5163427SDimitri Sivanich 		dump_stack();
14613f050447SAlan Cox #endif
1462f5d89470SThomas Gleixner 	}
14633aa551c9SThomas Gleixner 	ret = -EBUSY;
14643aa551c9SThomas Gleixner 
1465cba4235eSThomas Gleixner out_unlock:
14661c389795SDan Carpenter 	raw_spin_unlock_irqrestore(&desc->lock, flags);
14673b8249e7SThomas Gleixner 
146846e48e25SThomas Gleixner 	if (!desc->action)
146946e48e25SThomas Gleixner 		irq_release_resources(desc);
147019d39a38SThomas Gleixner out_bus_unlock:
147119d39a38SThomas Gleixner 	chip_bus_sync_unlock(desc);
14729114014cSThomas Gleixner 	mutex_unlock(&desc->request_mutex);
14739114014cSThomas Gleixner 
14743aa551c9SThomas Gleixner out_thread:
14753aa551c9SThomas Gleixner 	if (new->thread) {
14763aa551c9SThomas Gleixner 		struct task_struct *t = new->thread;
14773aa551c9SThomas Gleixner 
14783aa551c9SThomas Gleixner 		new->thread = NULL;
14793aa551c9SThomas Gleixner 		kthread_stop(t);
14803aa551c9SThomas Gleixner 		put_task_struct(t);
14813aa551c9SThomas Gleixner 	}
14822a1d3ab8SThomas Gleixner 	if (new->secondary && new->secondary->thread) {
14832a1d3ab8SThomas Gleixner 		struct task_struct *t = new->secondary->thread;
14842a1d3ab8SThomas Gleixner 
14852a1d3ab8SThomas Gleixner 		new->secondary->thread = NULL;
14862a1d3ab8SThomas Gleixner 		kthread_stop(t);
14872a1d3ab8SThomas Gleixner 		put_task_struct(t);
14882a1d3ab8SThomas Gleixner 	}
1489b6873807SSebastian Andrzej Siewior out_mput:
1490b6873807SSebastian Andrzej Siewior 	module_put(desc->owner);
14913aa551c9SThomas Gleixner 	return ret;
14921da177e4SLinus Torvalds }
14931da177e4SLinus Torvalds 
14941da177e4SLinus Torvalds /**
1495d3c60047SThomas Gleixner  *	setup_irq - setup an interrupt
1496d3c60047SThomas Gleixner  *	@irq: Interrupt line to setup
1497d3c60047SThomas Gleixner  *	@act: irqaction for the interrupt
1498d3c60047SThomas Gleixner  *
1499d3c60047SThomas Gleixner  * Used to statically setup interrupts in the early boot process.
1500d3c60047SThomas Gleixner  */
1501d3c60047SThomas Gleixner int setup_irq(unsigned int irq, struct irqaction *act)
1502d3c60047SThomas Gleixner {
1503986c011dSDavid Daney 	int retval;
1504d3c60047SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
1505d3c60047SThomas Gleixner 
15069b5d585dSJon Hunter 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
150731d9d9b6SMarc Zyngier 		return -EINVAL;
1508be45beb2SJon Hunter 
1509be45beb2SJon Hunter 	retval = irq_chip_pm_get(&desc->irq_data);
1510be45beb2SJon Hunter 	if (retval < 0)
1511be45beb2SJon Hunter 		return retval;
1512be45beb2SJon Hunter 
1513986c011dSDavid Daney 	retval = __setup_irq(irq, desc, act);
1514986c011dSDavid Daney 
1515be45beb2SJon Hunter 	if (retval)
1516be45beb2SJon Hunter 		irq_chip_pm_put(&desc->irq_data);
1517be45beb2SJon Hunter 
1518986c011dSDavid Daney 	return retval;
1519d3c60047SThomas Gleixner }
1520eb53b4e8SMagnus Damm EXPORT_SYMBOL_GPL(setup_irq);
1521d3c60047SThomas Gleixner 
1522cbf94f06SMagnus Damm /*
1523cbf94f06SMagnus Damm  * Internal function to unregister an irqaction - used to free
1524cbf94f06SMagnus Damm  * regular and special interrupts that are part of the architecture.
15251da177e4SLinus Torvalds  */
1526cbf94f06SMagnus Damm static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
15271da177e4SLinus Torvalds {
1528d3c60047SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
1529f17c7545SIngo Molnar 	struct irqaction *action, **action_ptr;
15301da177e4SLinus Torvalds 	unsigned long flags;
15311da177e4SLinus Torvalds 
1532ae88a23bSIngo Molnar 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
15337d94f7caSYinghai Lu 
15347d94f7caSYinghai Lu 	if (!desc)
1535f21cfb25SMagnus Damm 		return NULL;
15361da177e4SLinus Torvalds 
15379114014cSThomas Gleixner 	mutex_lock(&desc->request_mutex);
1538abc7e40cSThomas Gleixner 	chip_bus_lock(desc);
1539239007b8SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
1540ae88a23bSIngo Molnar 
1541ae88a23bSIngo Molnar 	/*
1542ae88a23bSIngo Molnar 	 * There can be multiple actions per IRQ descriptor, find the right
1543ae88a23bSIngo Molnar 	 * one based on the dev_id:
1544ae88a23bSIngo Molnar 	 */
1545f17c7545SIngo Molnar 	action_ptr = &desc->action;
15461da177e4SLinus Torvalds 	for (;;) {
1547f17c7545SIngo Molnar 		action = *action_ptr;
15481da177e4SLinus Torvalds 
1549ae88a23bSIngo Molnar 		if (!action) {
1550ae88a23bSIngo Molnar 			WARN(1, "Trying to free already-free IRQ %d\n", irq);
1551239007b8SThomas Gleixner 			raw_spin_unlock_irqrestore(&desc->lock, flags);
1552abc7e40cSThomas Gleixner 			chip_bus_sync_unlock(desc);
155319d39a38SThomas Gleixner 			mutex_unlock(&desc->request_mutex);
1554f21cfb25SMagnus Damm 			return NULL;
1555ae88a23bSIngo Molnar 		}
15561da177e4SLinus Torvalds 
15578316e381SIngo Molnar 		if (action->dev_id == dev_id)
1558ae88a23bSIngo Molnar 			break;
1559f17c7545SIngo Molnar 		action_ptr = &action->next;
1560ae88a23bSIngo Molnar 	}
1561ae88a23bSIngo Molnar 
1562ae88a23bSIngo Molnar 	/* Found it - now remove it from the list of entries: */
1563f17c7545SIngo Molnar 	*action_ptr = action->next;
1564dbce706eSPaolo 'Blaisorblade' Giarrusso 
1565cab303beSThomas Gleixner 	irq_pm_remove_action(desc, action);
1566cab303beSThomas Gleixner 
1567ae88a23bSIngo Molnar 	/* If this was the last handler, shut down the IRQ line: */
1568c1bacbaeSThomas Gleixner 	if (!desc->action) {
1569e9849777SThomas Gleixner 		irq_settings_clr_disable_unlazy(desc);
157046999238SThomas Gleixner 		irq_shutdown(desc);
1571c1bacbaeSThomas Gleixner 	}
15723aa551c9SThomas Gleixner 
1573e7a297b0SPeter P Waskiewicz Jr #ifdef CONFIG_SMP
1574e7a297b0SPeter P Waskiewicz Jr 	/* make sure affinity_hint is cleaned up */
1575e7a297b0SPeter P Waskiewicz Jr 	if (WARN_ON_ONCE(desc->affinity_hint))
1576e7a297b0SPeter P Waskiewicz Jr 		desc->affinity_hint = NULL;
1577e7a297b0SPeter P Waskiewicz Jr #endif
1578e7a297b0SPeter P Waskiewicz Jr 
1579239007b8SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
158019d39a38SThomas Gleixner 	/*
158119d39a38SThomas Gleixner 	 * Drop bus_lock here so the changes which were done in the chip
158219d39a38SThomas Gleixner 	 * callbacks above are synced out to the irq chips which hang
158319d39a38SThomas Gleixner 	 * behind a slow bus (I2C, SPI) before calling synchronize_irq().
158419d39a38SThomas Gleixner 	 *
158519d39a38SThomas Gleixner 	 * Aside of that the bus_lock can also be taken from the threaded
158619d39a38SThomas Gleixner 	 * handler in irq_finalize_oneshot() which results in a deadlock
158719d39a38SThomas Gleixner 	 * because synchronize_irq() would wait forever for the thread to
158819d39a38SThomas Gleixner 	 * complete, which is blocked on the bus lock.
158919d39a38SThomas Gleixner 	 *
159019d39a38SThomas Gleixner 	 * The still held desc->request_mutex() protects against a
159119d39a38SThomas Gleixner 	 * concurrent request_irq() of this irq so the release of resources
159219d39a38SThomas Gleixner 	 * and timing data is properly serialized.
159319d39a38SThomas Gleixner 	 */
1594abc7e40cSThomas Gleixner 	chip_bus_sync_unlock(desc);
1595ae88a23bSIngo Molnar 
15961da177e4SLinus Torvalds 	unregister_handler_proc(irq, action);
15971da177e4SLinus Torvalds 
1598ae88a23bSIngo Molnar 	/* Make sure it's not being used on another CPU: */
15991da177e4SLinus Torvalds 	synchronize_irq(irq);
1600ae88a23bSIngo Molnar 
16011d99493bSDavid Woodhouse #ifdef CONFIG_DEBUG_SHIRQ
16021d99493bSDavid Woodhouse 	/*
1603ae88a23bSIngo Molnar 	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1604ae88a23bSIngo Molnar 	 * event to happen even now it's being freed, so let's make sure that
1605ae88a23bSIngo Molnar 	 * is so by doing an extra call to the handler ....
1606ae88a23bSIngo Molnar 	 *
1607ae88a23bSIngo Molnar 	 * ( We do this after actually deregistering it, to make sure that a
1608ae88a23bSIngo Molnar 	 *   'real' IRQ doesn't run in * parallel with our fake. )
16091d99493bSDavid Woodhouse 	 */
16101d99493bSDavid Woodhouse 	if (action->flags & IRQF_SHARED) {
16111d99493bSDavid Woodhouse 		local_irq_save(flags);
16121d99493bSDavid Woodhouse 		action->handler(irq, dev_id);
16131d99493bSDavid Woodhouse 		local_irq_restore(flags);
16141d99493bSDavid Woodhouse 	}
16151d99493bSDavid Woodhouse #endif
16162d860ad7SLinus Torvalds 
16172d860ad7SLinus Torvalds 	if (action->thread) {
16182d860ad7SLinus Torvalds 		kthread_stop(action->thread);
16192d860ad7SLinus Torvalds 		put_task_struct(action->thread);
16202a1d3ab8SThomas Gleixner 		if (action->secondary && action->secondary->thread) {
16212a1d3ab8SThomas Gleixner 			kthread_stop(action->secondary->thread);
16222a1d3ab8SThomas Gleixner 			put_task_struct(action->secondary->thread);
16232a1d3ab8SThomas Gleixner 		}
16242d860ad7SLinus Torvalds 	}
16252d860ad7SLinus Torvalds 
162619d39a38SThomas Gleixner 	/* Last action releases resources */
16272343877fSThomas Gleixner 	if (!desc->action) {
162819d39a38SThomas Gleixner 		/*
162919d39a38SThomas Gleixner 		 * Reaquire bus lock as irq_release_resources() might
163019d39a38SThomas Gleixner 		 * require it to deallocate resources over the slow bus.
163119d39a38SThomas Gleixner 		 */
163219d39a38SThomas Gleixner 		chip_bus_lock(desc);
163346e48e25SThomas Gleixner 		irq_release_resources(desc);
163419d39a38SThomas Gleixner 		chip_bus_sync_unlock(desc);
16352343877fSThomas Gleixner 		irq_remove_timings(desc);
16362343877fSThomas Gleixner 	}
163746e48e25SThomas Gleixner 
16389114014cSThomas Gleixner 	mutex_unlock(&desc->request_mutex);
16399114014cSThomas Gleixner 
1640be45beb2SJon Hunter 	irq_chip_pm_put(&desc->irq_data);
1641b6873807SSebastian Andrzej Siewior 	module_put(desc->owner);
16422a1d3ab8SThomas Gleixner 	kfree(action->secondary);
1643f21cfb25SMagnus Damm 	return action;
1644f21cfb25SMagnus Damm }
16451da177e4SLinus Torvalds 
16461da177e4SLinus Torvalds /**
1647cbf94f06SMagnus Damm  *	remove_irq - free an interrupt
1648cbf94f06SMagnus Damm  *	@irq: Interrupt line to free
1649cbf94f06SMagnus Damm  *	@act: irqaction for the interrupt
1650cbf94f06SMagnus Damm  *
1651cbf94f06SMagnus Damm  * Used to remove interrupts statically setup by the early boot process.
1652cbf94f06SMagnus Damm  */
1653cbf94f06SMagnus Damm void remove_irq(unsigned int irq, struct irqaction *act)
1654cbf94f06SMagnus Damm {
165531d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
165631d9d9b6SMarc Zyngier 
165731d9d9b6SMarc Zyngier 	if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1658cbf94f06SMagnus Damm 		__free_irq(irq, act->dev_id);
1659cbf94f06SMagnus Damm }
1660eb53b4e8SMagnus Damm EXPORT_SYMBOL_GPL(remove_irq);
1661cbf94f06SMagnus Damm 
1662cbf94f06SMagnus Damm /**
1663f21cfb25SMagnus Damm  *	free_irq - free an interrupt allocated with request_irq
16641da177e4SLinus Torvalds  *	@irq: Interrupt line to free
16651da177e4SLinus Torvalds  *	@dev_id: Device identity to free
16661da177e4SLinus Torvalds  *
16671da177e4SLinus Torvalds  *	Remove an interrupt handler. The handler is removed and if the
16681da177e4SLinus Torvalds  *	interrupt line is no longer in use by any driver it is disabled.
16691da177e4SLinus Torvalds  *	On a shared IRQ the caller must ensure the interrupt is disabled
16701da177e4SLinus Torvalds  *	on the card it drives before calling this function. The function
16711da177e4SLinus Torvalds  *	does not return until any executing interrupts for this IRQ
16721da177e4SLinus Torvalds  *	have completed.
16731da177e4SLinus Torvalds  *
16741da177e4SLinus Torvalds  *	This function must not be called from interrupt context.
167525ce4be7SChristoph Hellwig  *
167625ce4be7SChristoph Hellwig  *	Returns the devname argument passed to request_irq.
16771da177e4SLinus Torvalds  */
167825ce4be7SChristoph Hellwig const void *free_irq(unsigned int irq, void *dev_id)
16791da177e4SLinus Torvalds {
168070aedd24SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
168125ce4be7SChristoph Hellwig 	struct irqaction *action;
168225ce4be7SChristoph Hellwig 	const char *devname;
168370aedd24SThomas Gleixner 
168431d9d9b6SMarc Zyngier 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
168525ce4be7SChristoph Hellwig 		return NULL;
168670aedd24SThomas Gleixner 
1687cd7eab44SBen Hutchings #ifdef CONFIG_SMP
1688cd7eab44SBen Hutchings 	if (WARN_ON(desc->affinity_notify))
1689cd7eab44SBen Hutchings 		desc->affinity_notify = NULL;
1690cd7eab44SBen Hutchings #endif
1691cd7eab44SBen Hutchings 
169225ce4be7SChristoph Hellwig 	action = __free_irq(irq, dev_id);
16932827a418SAlexandru Moise 
16942827a418SAlexandru Moise 	if (!action)
16952827a418SAlexandru Moise 		return NULL;
16962827a418SAlexandru Moise 
169725ce4be7SChristoph Hellwig 	devname = action->name;
169825ce4be7SChristoph Hellwig 	kfree(action);
169925ce4be7SChristoph Hellwig 	return devname;
17001da177e4SLinus Torvalds }
17011da177e4SLinus Torvalds EXPORT_SYMBOL(free_irq);
17021da177e4SLinus Torvalds 
17031da177e4SLinus Torvalds /**
17043aa551c9SThomas Gleixner  *	request_threaded_irq - allocate an interrupt line
17051da177e4SLinus Torvalds  *	@irq: Interrupt line to allocate
17063aa551c9SThomas Gleixner  *	@handler: Function to be called when the IRQ occurs.
17073aa551c9SThomas Gleixner  *		  Primary handler for threaded interrupts
1708b25c340cSThomas Gleixner  *		  If NULL and thread_fn != NULL the default
1709b25c340cSThomas Gleixner  *		  primary handler is installed
17103aa551c9SThomas Gleixner  *	@thread_fn: Function called from the irq handler thread
17113aa551c9SThomas Gleixner  *		    If NULL, no irq thread is created
17121da177e4SLinus Torvalds  *	@irqflags: Interrupt type flags
17131da177e4SLinus Torvalds  *	@devname: An ascii name for the claiming device
17141da177e4SLinus Torvalds  *	@dev_id: A cookie passed back to the handler function
17151da177e4SLinus Torvalds  *
17161da177e4SLinus Torvalds  *	This call allocates interrupt resources and enables the
17171da177e4SLinus Torvalds  *	interrupt line and IRQ handling. From the point this
17181da177e4SLinus Torvalds  *	call is made your handler function may be invoked. Since
17191da177e4SLinus Torvalds  *	your handler function must clear any interrupt the board
17201da177e4SLinus Torvalds  *	raises, you must take care both to initialise your hardware
17211da177e4SLinus Torvalds  *	and to set up the interrupt handler in the right order.
17221da177e4SLinus Torvalds  *
17233aa551c9SThomas Gleixner  *	If you want to set up a threaded irq handler for your device
17246d21af4fSJavi Merino  *	then you need to supply @handler and @thread_fn. @handler is
17253aa551c9SThomas Gleixner  *	still called in hard interrupt context and has to check
17263aa551c9SThomas Gleixner  *	whether the interrupt originates from the device. If yes it
17273aa551c9SThomas Gleixner  *	needs to disable the interrupt on the device and return
172839a2eddbSSteven Rostedt  *	IRQ_WAKE_THREAD which will wake up the handler thread and run
17293aa551c9SThomas Gleixner  *	@thread_fn. This split handler design is necessary to support
17303aa551c9SThomas Gleixner  *	shared interrupts.
17313aa551c9SThomas Gleixner  *
17321da177e4SLinus Torvalds  *	Dev_id must be globally unique. Normally the address of the
17331da177e4SLinus Torvalds  *	device data structure is used as the cookie. Since the handler
17341da177e4SLinus Torvalds  *	receives this value it makes sense to use it.
17351da177e4SLinus Torvalds  *
17361da177e4SLinus Torvalds  *	If your interrupt is shared you must pass a non NULL dev_id
17371da177e4SLinus Torvalds  *	as this is required when freeing the interrupt.
17381da177e4SLinus Torvalds  *
17391da177e4SLinus Torvalds  *	Flags:
17401da177e4SLinus Torvalds  *
17413cca53b0SThomas Gleixner  *	IRQF_SHARED		Interrupt is shared
17420c5d1eb7SDavid Brownell  *	IRQF_TRIGGER_*		Specify active edge(s) or level
17431da177e4SLinus Torvalds  *
17441da177e4SLinus Torvalds  */
17453aa551c9SThomas Gleixner int request_threaded_irq(unsigned int irq, irq_handler_t handler,
17463aa551c9SThomas Gleixner 			 irq_handler_t thread_fn, unsigned long irqflags,
17473aa551c9SThomas Gleixner 			 const char *devname, void *dev_id)
17481da177e4SLinus Torvalds {
17491da177e4SLinus Torvalds 	struct irqaction *action;
175008678b08SYinghai Lu 	struct irq_desc *desc;
1751d3c60047SThomas Gleixner 	int retval;
17521da177e4SLinus Torvalds 
1753e237a551SChen Fan 	if (irq == IRQ_NOTCONNECTED)
1754e237a551SChen Fan 		return -ENOTCONN;
1755e237a551SChen Fan 
1756470c6623SDavid Brownell 	/*
17571da177e4SLinus Torvalds 	 * Sanity-check: shared interrupts must pass in a real dev-ID,
17581da177e4SLinus Torvalds 	 * otherwise we'll have trouble later trying to figure out
17591da177e4SLinus Torvalds 	 * which interrupt is which (messes up the interrupt freeing
17601da177e4SLinus Torvalds 	 * logic etc).
176117f48034SRafael J. Wysocki 	 *
176217f48034SRafael J. Wysocki 	 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
176317f48034SRafael J. Wysocki 	 * it cannot be set along with IRQF_NO_SUSPEND.
17641da177e4SLinus Torvalds 	 */
176517f48034SRafael J. Wysocki 	if (((irqflags & IRQF_SHARED) && !dev_id) ||
176617f48034SRafael J. Wysocki 	    (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
176717f48034SRafael J. Wysocki 	    ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
17681da177e4SLinus Torvalds 		return -EINVAL;
17697d94f7caSYinghai Lu 
1770cb5bc832SYinghai Lu 	desc = irq_to_desc(irq);
17717d94f7caSYinghai Lu 	if (!desc)
17721da177e4SLinus Torvalds 		return -EINVAL;
17737d94f7caSYinghai Lu 
177431d9d9b6SMarc Zyngier 	if (!irq_settings_can_request(desc) ||
177531d9d9b6SMarc Zyngier 	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))
17766550c775SThomas Gleixner 		return -EINVAL;
1777b25c340cSThomas Gleixner 
1778b25c340cSThomas Gleixner 	if (!handler) {
1779b25c340cSThomas Gleixner 		if (!thread_fn)
17801da177e4SLinus Torvalds 			return -EINVAL;
1781b25c340cSThomas Gleixner 		handler = irq_default_primary_handler;
1782b25c340cSThomas Gleixner 	}
17831da177e4SLinus Torvalds 
178445535732SThomas Gleixner 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
17851da177e4SLinus Torvalds 	if (!action)
17861da177e4SLinus Torvalds 		return -ENOMEM;
17871da177e4SLinus Torvalds 
17881da177e4SLinus Torvalds 	action->handler = handler;
17893aa551c9SThomas Gleixner 	action->thread_fn = thread_fn;
17901da177e4SLinus Torvalds 	action->flags = irqflags;
17911da177e4SLinus Torvalds 	action->name = devname;
17921da177e4SLinus Torvalds 	action->dev_id = dev_id;
17931da177e4SLinus Torvalds 
1794be45beb2SJon Hunter 	retval = irq_chip_pm_get(&desc->irq_data);
17954396f46cSShawn Lin 	if (retval < 0) {
17964396f46cSShawn Lin 		kfree(action);
1797be45beb2SJon Hunter 		return retval;
17984396f46cSShawn Lin 	}
1799be45beb2SJon Hunter 
1800d3c60047SThomas Gleixner 	retval = __setup_irq(irq, desc, action);
180170aedd24SThomas Gleixner 
18022a1d3ab8SThomas Gleixner 	if (retval) {
1803be45beb2SJon Hunter 		irq_chip_pm_put(&desc->irq_data);
18042a1d3ab8SThomas Gleixner 		kfree(action->secondary);
1805377bf1e4SAnton Vorontsov 		kfree(action);
18062a1d3ab8SThomas Gleixner 	}
1807377bf1e4SAnton Vorontsov 
18086d83f94dSThomas Gleixner #ifdef CONFIG_DEBUG_SHIRQ_FIXME
18096ce51c43SLuis Henriques 	if (!retval && (irqflags & IRQF_SHARED)) {
1810a304e1b8SDavid Woodhouse 		/*
1811a304e1b8SDavid Woodhouse 		 * It's a shared IRQ -- the driver ought to be prepared for it
1812a304e1b8SDavid Woodhouse 		 * to happen immediately, so let's make sure....
1813377bf1e4SAnton Vorontsov 		 * We disable the irq to make sure that a 'real' IRQ doesn't
1814377bf1e4SAnton Vorontsov 		 * run in parallel with our fake.
1815a304e1b8SDavid Woodhouse 		 */
1816a304e1b8SDavid Woodhouse 		unsigned long flags;
1817a304e1b8SDavid Woodhouse 
1818377bf1e4SAnton Vorontsov 		disable_irq(irq);
1819a304e1b8SDavid Woodhouse 		local_irq_save(flags);
1820377bf1e4SAnton Vorontsov 
1821a304e1b8SDavid Woodhouse 		handler(irq, dev_id);
1822377bf1e4SAnton Vorontsov 
1823a304e1b8SDavid Woodhouse 		local_irq_restore(flags);
1824377bf1e4SAnton Vorontsov 		enable_irq(irq);
1825a304e1b8SDavid Woodhouse 	}
1826a304e1b8SDavid Woodhouse #endif
18271da177e4SLinus Torvalds 	return retval;
18281da177e4SLinus Torvalds }
18293aa551c9SThomas Gleixner EXPORT_SYMBOL(request_threaded_irq);
1830ae731f8dSMarc Zyngier 
1831ae731f8dSMarc Zyngier /**
1832ae731f8dSMarc Zyngier  *	request_any_context_irq - allocate an interrupt line
1833ae731f8dSMarc Zyngier  *	@irq: Interrupt line to allocate
1834ae731f8dSMarc Zyngier  *	@handler: Function to be called when the IRQ occurs.
1835ae731f8dSMarc Zyngier  *		  Threaded handler for threaded interrupts.
1836ae731f8dSMarc Zyngier  *	@flags: Interrupt type flags
1837ae731f8dSMarc Zyngier  *	@name: An ascii name for the claiming device
1838ae731f8dSMarc Zyngier  *	@dev_id: A cookie passed back to the handler function
1839ae731f8dSMarc Zyngier  *
1840ae731f8dSMarc Zyngier  *	This call allocates interrupt resources and enables the
1841ae731f8dSMarc Zyngier  *	interrupt line and IRQ handling. It selects either a
1842ae731f8dSMarc Zyngier  *	hardirq or threaded handling method depending on the
1843ae731f8dSMarc Zyngier  *	context.
1844ae731f8dSMarc Zyngier  *
1845ae731f8dSMarc Zyngier  *	On failure, it returns a negative value. On success,
1846ae731f8dSMarc Zyngier  *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1847ae731f8dSMarc Zyngier  */
1848ae731f8dSMarc Zyngier int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1849ae731f8dSMarc Zyngier 			    unsigned long flags, const char *name, void *dev_id)
1850ae731f8dSMarc Zyngier {
1851e237a551SChen Fan 	struct irq_desc *desc;
1852ae731f8dSMarc Zyngier 	int ret;
1853ae731f8dSMarc Zyngier 
1854e237a551SChen Fan 	if (irq == IRQ_NOTCONNECTED)
1855e237a551SChen Fan 		return -ENOTCONN;
1856e237a551SChen Fan 
1857e237a551SChen Fan 	desc = irq_to_desc(irq);
1858ae731f8dSMarc Zyngier 	if (!desc)
1859ae731f8dSMarc Zyngier 		return -EINVAL;
1860ae731f8dSMarc Zyngier 
18611ccb4e61SThomas Gleixner 	if (irq_settings_is_nested_thread(desc)) {
1862ae731f8dSMarc Zyngier 		ret = request_threaded_irq(irq, NULL, handler,
1863ae731f8dSMarc Zyngier 					   flags, name, dev_id);
1864ae731f8dSMarc Zyngier 		return !ret ? IRQC_IS_NESTED : ret;
1865ae731f8dSMarc Zyngier 	}
1866ae731f8dSMarc Zyngier 
1867ae731f8dSMarc Zyngier 	ret = request_irq(irq, handler, flags, name, dev_id);
1868ae731f8dSMarc Zyngier 	return !ret ? IRQC_IS_HARDIRQ : ret;
1869ae731f8dSMarc Zyngier }
1870ae731f8dSMarc Zyngier EXPORT_SYMBOL_GPL(request_any_context_irq);
187131d9d9b6SMarc Zyngier 
18721e7c5fd2SMarc Zyngier void enable_percpu_irq(unsigned int irq, unsigned int type)
187331d9d9b6SMarc Zyngier {
187431d9d9b6SMarc Zyngier 	unsigned int cpu = smp_processor_id();
187531d9d9b6SMarc Zyngier 	unsigned long flags;
187631d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
187731d9d9b6SMarc Zyngier 
187831d9d9b6SMarc Zyngier 	if (!desc)
187931d9d9b6SMarc Zyngier 		return;
188031d9d9b6SMarc Zyngier 
1881f35ad083SMarc Zyngier 	/*
1882f35ad083SMarc Zyngier 	 * If the trigger type is not specified by the caller, then
1883f35ad083SMarc Zyngier 	 * use the default for this interrupt.
1884f35ad083SMarc Zyngier 	 */
18851e7c5fd2SMarc Zyngier 	type &= IRQ_TYPE_SENSE_MASK;
1886f35ad083SMarc Zyngier 	if (type == IRQ_TYPE_NONE)
1887f35ad083SMarc Zyngier 		type = irqd_get_trigger_type(&desc->irq_data);
1888f35ad083SMarc Zyngier 
18891e7c5fd2SMarc Zyngier 	if (type != IRQ_TYPE_NONE) {
18901e7c5fd2SMarc Zyngier 		int ret;
18911e7c5fd2SMarc Zyngier 
1892a1ff541aSJiang Liu 		ret = __irq_set_trigger(desc, type);
18931e7c5fd2SMarc Zyngier 
18941e7c5fd2SMarc Zyngier 		if (ret) {
189532cffddeSThomas Gleixner 			WARN(1, "failed to set type for IRQ%d\n", irq);
18961e7c5fd2SMarc Zyngier 			goto out;
18971e7c5fd2SMarc Zyngier 		}
18981e7c5fd2SMarc Zyngier 	}
18991e7c5fd2SMarc Zyngier 
190031d9d9b6SMarc Zyngier 	irq_percpu_enable(desc, cpu);
19011e7c5fd2SMarc Zyngier out:
190231d9d9b6SMarc Zyngier 	irq_put_desc_unlock(desc, flags);
190331d9d9b6SMarc Zyngier }
190436a5df85SChris Metcalf EXPORT_SYMBOL_GPL(enable_percpu_irq);
190531d9d9b6SMarc Zyngier 
1906f0cb3220SThomas Petazzoni /**
1907f0cb3220SThomas Petazzoni  * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1908f0cb3220SThomas Petazzoni  * @irq:	Linux irq number to check for
1909f0cb3220SThomas Petazzoni  *
1910f0cb3220SThomas Petazzoni  * Must be called from a non migratable context. Returns the enable
1911f0cb3220SThomas Petazzoni  * state of a per cpu interrupt on the current cpu.
1912f0cb3220SThomas Petazzoni  */
1913f0cb3220SThomas Petazzoni bool irq_percpu_is_enabled(unsigned int irq)
1914f0cb3220SThomas Petazzoni {
1915f0cb3220SThomas Petazzoni 	unsigned int cpu = smp_processor_id();
1916f0cb3220SThomas Petazzoni 	struct irq_desc *desc;
1917f0cb3220SThomas Petazzoni 	unsigned long flags;
1918f0cb3220SThomas Petazzoni 	bool is_enabled;
1919f0cb3220SThomas Petazzoni 
1920f0cb3220SThomas Petazzoni 	desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1921f0cb3220SThomas Petazzoni 	if (!desc)
1922f0cb3220SThomas Petazzoni 		return false;
1923f0cb3220SThomas Petazzoni 
1924f0cb3220SThomas Petazzoni 	is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1925f0cb3220SThomas Petazzoni 	irq_put_desc_unlock(desc, flags);
1926f0cb3220SThomas Petazzoni 
1927f0cb3220SThomas Petazzoni 	return is_enabled;
1928f0cb3220SThomas Petazzoni }
1929f0cb3220SThomas Petazzoni EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1930f0cb3220SThomas Petazzoni 
193131d9d9b6SMarc Zyngier void disable_percpu_irq(unsigned int irq)
193231d9d9b6SMarc Zyngier {
193331d9d9b6SMarc Zyngier 	unsigned int cpu = smp_processor_id();
193431d9d9b6SMarc Zyngier 	unsigned long flags;
193531d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
193631d9d9b6SMarc Zyngier 
193731d9d9b6SMarc Zyngier 	if (!desc)
193831d9d9b6SMarc Zyngier 		return;
193931d9d9b6SMarc Zyngier 
194031d9d9b6SMarc Zyngier 	irq_percpu_disable(desc, cpu);
194131d9d9b6SMarc Zyngier 	irq_put_desc_unlock(desc, flags);
194231d9d9b6SMarc Zyngier }
194336a5df85SChris Metcalf EXPORT_SYMBOL_GPL(disable_percpu_irq);
194431d9d9b6SMarc Zyngier 
194531d9d9b6SMarc Zyngier /*
194631d9d9b6SMarc Zyngier  * Internal function to unregister a percpu irqaction.
194731d9d9b6SMarc Zyngier  */
194831d9d9b6SMarc Zyngier static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
194931d9d9b6SMarc Zyngier {
195031d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
195131d9d9b6SMarc Zyngier 	struct irqaction *action;
195231d9d9b6SMarc Zyngier 	unsigned long flags;
195331d9d9b6SMarc Zyngier 
195431d9d9b6SMarc Zyngier 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
195531d9d9b6SMarc Zyngier 
195631d9d9b6SMarc Zyngier 	if (!desc)
195731d9d9b6SMarc Zyngier 		return NULL;
195831d9d9b6SMarc Zyngier 
195931d9d9b6SMarc Zyngier 	raw_spin_lock_irqsave(&desc->lock, flags);
196031d9d9b6SMarc Zyngier 
196131d9d9b6SMarc Zyngier 	action = desc->action;
196231d9d9b6SMarc Zyngier 	if (!action || action->percpu_dev_id != dev_id) {
196331d9d9b6SMarc Zyngier 		WARN(1, "Trying to free already-free IRQ %d\n", irq);
196431d9d9b6SMarc Zyngier 		goto bad;
196531d9d9b6SMarc Zyngier 	}
196631d9d9b6SMarc Zyngier 
196731d9d9b6SMarc Zyngier 	if (!cpumask_empty(desc->percpu_enabled)) {
196831d9d9b6SMarc Zyngier 		WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
196931d9d9b6SMarc Zyngier 		     irq, cpumask_first(desc->percpu_enabled));
197031d9d9b6SMarc Zyngier 		goto bad;
197131d9d9b6SMarc Zyngier 	}
197231d9d9b6SMarc Zyngier 
197331d9d9b6SMarc Zyngier 	/* Found it - now remove it from the list of entries: */
197431d9d9b6SMarc Zyngier 	desc->action = NULL;
197531d9d9b6SMarc Zyngier 
197631d9d9b6SMarc Zyngier 	raw_spin_unlock_irqrestore(&desc->lock, flags);
197731d9d9b6SMarc Zyngier 
197831d9d9b6SMarc Zyngier 	unregister_handler_proc(irq, action);
197931d9d9b6SMarc Zyngier 
1980be45beb2SJon Hunter 	irq_chip_pm_put(&desc->irq_data);
198131d9d9b6SMarc Zyngier 	module_put(desc->owner);
198231d9d9b6SMarc Zyngier 	return action;
198331d9d9b6SMarc Zyngier 
198431d9d9b6SMarc Zyngier bad:
198531d9d9b6SMarc Zyngier 	raw_spin_unlock_irqrestore(&desc->lock, flags);
198631d9d9b6SMarc Zyngier 	return NULL;
198731d9d9b6SMarc Zyngier }
198831d9d9b6SMarc Zyngier 
198931d9d9b6SMarc Zyngier /**
199031d9d9b6SMarc Zyngier  *	remove_percpu_irq - free a per-cpu interrupt
199131d9d9b6SMarc Zyngier  *	@irq: Interrupt line to free
199231d9d9b6SMarc Zyngier  *	@act: irqaction for the interrupt
199331d9d9b6SMarc Zyngier  *
199431d9d9b6SMarc Zyngier  * Used to remove interrupts statically setup by the early boot process.
199531d9d9b6SMarc Zyngier  */
199631d9d9b6SMarc Zyngier void remove_percpu_irq(unsigned int irq, struct irqaction *act)
199731d9d9b6SMarc Zyngier {
199831d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
199931d9d9b6SMarc Zyngier 
200031d9d9b6SMarc Zyngier 	if (desc && irq_settings_is_per_cpu_devid(desc))
200131d9d9b6SMarc Zyngier 	    __free_percpu_irq(irq, act->percpu_dev_id);
200231d9d9b6SMarc Zyngier }
200331d9d9b6SMarc Zyngier 
200431d9d9b6SMarc Zyngier /**
200531d9d9b6SMarc Zyngier  *	free_percpu_irq - free an interrupt allocated with request_percpu_irq
200631d9d9b6SMarc Zyngier  *	@irq: Interrupt line to free
200731d9d9b6SMarc Zyngier  *	@dev_id: Device identity to free
200831d9d9b6SMarc Zyngier  *
200931d9d9b6SMarc Zyngier  *	Remove a percpu interrupt handler. The handler is removed, but
201031d9d9b6SMarc Zyngier  *	the interrupt line is not disabled. This must be done on each
201131d9d9b6SMarc Zyngier  *	CPU before calling this function. The function does not return
201231d9d9b6SMarc Zyngier  *	until any executing interrupts for this IRQ have completed.
201331d9d9b6SMarc Zyngier  *
201431d9d9b6SMarc Zyngier  *	This function must not be called from interrupt context.
201531d9d9b6SMarc Zyngier  */
201631d9d9b6SMarc Zyngier void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
201731d9d9b6SMarc Zyngier {
201831d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
201931d9d9b6SMarc Zyngier 
202031d9d9b6SMarc Zyngier 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
202131d9d9b6SMarc Zyngier 		return;
202231d9d9b6SMarc Zyngier 
202331d9d9b6SMarc Zyngier 	chip_bus_lock(desc);
202431d9d9b6SMarc Zyngier 	kfree(__free_percpu_irq(irq, dev_id));
202531d9d9b6SMarc Zyngier 	chip_bus_sync_unlock(desc);
202631d9d9b6SMarc Zyngier }
2027aec2e2adSMaxime Ripard EXPORT_SYMBOL_GPL(free_percpu_irq);
202831d9d9b6SMarc Zyngier 
202931d9d9b6SMarc Zyngier /**
203031d9d9b6SMarc Zyngier  *	setup_percpu_irq - setup a per-cpu interrupt
203131d9d9b6SMarc Zyngier  *	@irq: Interrupt line to setup
203231d9d9b6SMarc Zyngier  *	@act: irqaction for the interrupt
203331d9d9b6SMarc Zyngier  *
203431d9d9b6SMarc Zyngier  * Used to statically setup per-cpu interrupts in the early boot process.
203531d9d9b6SMarc Zyngier  */
203631d9d9b6SMarc Zyngier int setup_percpu_irq(unsigned int irq, struct irqaction *act)
203731d9d9b6SMarc Zyngier {
203831d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
203931d9d9b6SMarc Zyngier 	int retval;
204031d9d9b6SMarc Zyngier 
204131d9d9b6SMarc Zyngier 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
204231d9d9b6SMarc Zyngier 		return -EINVAL;
2043be45beb2SJon Hunter 
2044be45beb2SJon Hunter 	retval = irq_chip_pm_get(&desc->irq_data);
2045be45beb2SJon Hunter 	if (retval < 0)
2046be45beb2SJon Hunter 		return retval;
2047be45beb2SJon Hunter 
204831d9d9b6SMarc Zyngier 	retval = __setup_irq(irq, desc, act);
204931d9d9b6SMarc Zyngier 
2050be45beb2SJon Hunter 	if (retval)
2051be45beb2SJon Hunter 		irq_chip_pm_put(&desc->irq_data);
2052be45beb2SJon Hunter 
205331d9d9b6SMarc Zyngier 	return retval;
205431d9d9b6SMarc Zyngier }
205531d9d9b6SMarc Zyngier 
205631d9d9b6SMarc Zyngier /**
2057c80081b9SDaniel Lezcano  *	__request_percpu_irq - allocate a percpu interrupt line
205831d9d9b6SMarc Zyngier  *	@irq: Interrupt line to allocate
205931d9d9b6SMarc Zyngier  *	@handler: Function to be called when the IRQ occurs.
2060c80081b9SDaniel Lezcano  *	@flags: Interrupt type flags (IRQF_TIMER only)
206131d9d9b6SMarc Zyngier  *	@devname: An ascii name for the claiming device
206231d9d9b6SMarc Zyngier  *	@dev_id: A percpu cookie passed back to the handler function
206331d9d9b6SMarc Zyngier  *
2064a1b7febdSMaxime Ripard  *	This call allocates interrupt resources and enables the
2065a1b7febdSMaxime Ripard  *	interrupt on the local CPU. If the interrupt is supposed to be
2066a1b7febdSMaxime Ripard  *	enabled on other CPUs, it has to be done on each CPU using
2067a1b7febdSMaxime Ripard  *	enable_percpu_irq().
206831d9d9b6SMarc Zyngier  *
206931d9d9b6SMarc Zyngier  *	Dev_id must be globally unique. It is a per-cpu variable, and
207031d9d9b6SMarc Zyngier  *	the handler gets called with the interrupted CPU's instance of
207131d9d9b6SMarc Zyngier  *	that variable.
207231d9d9b6SMarc Zyngier  */
2073c80081b9SDaniel Lezcano int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2074c80081b9SDaniel Lezcano 			 unsigned long flags, const char *devname,
2075c80081b9SDaniel Lezcano 			 void __percpu *dev_id)
207631d9d9b6SMarc Zyngier {
207731d9d9b6SMarc Zyngier 	struct irqaction *action;
207831d9d9b6SMarc Zyngier 	struct irq_desc *desc;
207931d9d9b6SMarc Zyngier 	int retval;
208031d9d9b6SMarc Zyngier 
208131d9d9b6SMarc Zyngier 	if (!dev_id)
208231d9d9b6SMarc Zyngier 		return -EINVAL;
208331d9d9b6SMarc Zyngier 
208431d9d9b6SMarc Zyngier 	desc = irq_to_desc(irq);
208531d9d9b6SMarc Zyngier 	if (!desc || !irq_settings_can_request(desc) ||
208631d9d9b6SMarc Zyngier 	    !irq_settings_is_per_cpu_devid(desc))
208731d9d9b6SMarc Zyngier 		return -EINVAL;
208831d9d9b6SMarc Zyngier 
2089c80081b9SDaniel Lezcano 	if (flags && flags != IRQF_TIMER)
2090c80081b9SDaniel Lezcano 		return -EINVAL;
2091c80081b9SDaniel Lezcano 
209231d9d9b6SMarc Zyngier 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
209331d9d9b6SMarc Zyngier 	if (!action)
209431d9d9b6SMarc Zyngier 		return -ENOMEM;
209531d9d9b6SMarc Zyngier 
209631d9d9b6SMarc Zyngier 	action->handler = handler;
2097c80081b9SDaniel Lezcano 	action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
209831d9d9b6SMarc Zyngier 	action->name = devname;
209931d9d9b6SMarc Zyngier 	action->percpu_dev_id = dev_id;
210031d9d9b6SMarc Zyngier 
2101be45beb2SJon Hunter 	retval = irq_chip_pm_get(&desc->irq_data);
21024396f46cSShawn Lin 	if (retval < 0) {
21034396f46cSShawn Lin 		kfree(action);
2104be45beb2SJon Hunter 		return retval;
21054396f46cSShawn Lin 	}
2106be45beb2SJon Hunter 
210731d9d9b6SMarc Zyngier 	retval = __setup_irq(irq, desc, action);
210831d9d9b6SMarc Zyngier 
2109be45beb2SJon Hunter 	if (retval) {
2110be45beb2SJon Hunter 		irq_chip_pm_put(&desc->irq_data);
211131d9d9b6SMarc Zyngier 		kfree(action);
2112be45beb2SJon Hunter 	}
211331d9d9b6SMarc Zyngier 
211431d9d9b6SMarc Zyngier 	return retval;
211531d9d9b6SMarc Zyngier }
2116c80081b9SDaniel Lezcano EXPORT_SYMBOL_GPL(__request_percpu_irq);
21171b7047edSMarc Zyngier 
21181b7047edSMarc Zyngier /**
21191b7047edSMarc Zyngier  *	irq_get_irqchip_state - returns the irqchip state of a interrupt.
21201b7047edSMarc Zyngier  *	@irq: Interrupt line that is forwarded to a VM
21211b7047edSMarc Zyngier  *	@which: One of IRQCHIP_STATE_* the caller wants to know about
21221b7047edSMarc Zyngier  *	@state: a pointer to a boolean where the state is to be storeed
21231b7047edSMarc Zyngier  *
21241b7047edSMarc Zyngier  *	This call snapshots the internal irqchip state of an
21251b7047edSMarc Zyngier  *	interrupt, returning into @state the bit corresponding to
21261b7047edSMarc Zyngier  *	stage @which
21271b7047edSMarc Zyngier  *
21281b7047edSMarc Zyngier  *	This function should be called with preemption disabled if the
21291b7047edSMarc Zyngier  *	interrupt controller has per-cpu registers.
21301b7047edSMarc Zyngier  */
21311b7047edSMarc Zyngier int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
21321b7047edSMarc Zyngier 			  bool *state)
21331b7047edSMarc Zyngier {
21341b7047edSMarc Zyngier 	struct irq_desc *desc;
21351b7047edSMarc Zyngier 	struct irq_data *data;
21361b7047edSMarc Zyngier 	struct irq_chip *chip;
21371b7047edSMarc Zyngier 	unsigned long flags;
21381b7047edSMarc Zyngier 	int err = -EINVAL;
21391b7047edSMarc Zyngier 
21401b7047edSMarc Zyngier 	desc = irq_get_desc_buslock(irq, &flags, 0);
21411b7047edSMarc Zyngier 	if (!desc)
21421b7047edSMarc Zyngier 		return err;
21431b7047edSMarc Zyngier 
21441b7047edSMarc Zyngier 	data = irq_desc_get_irq_data(desc);
21451b7047edSMarc Zyngier 
21461b7047edSMarc Zyngier 	do {
21471b7047edSMarc Zyngier 		chip = irq_data_get_irq_chip(data);
21481b7047edSMarc Zyngier 		if (chip->irq_get_irqchip_state)
21491b7047edSMarc Zyngier 			break;
21501b7047edSMarc Zyngier #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
21511b7047edSMarc Zyngier 		data = data->parent_data;
21521b7047edSMarc Zyngier #else
21531b7047edSMarc Zyngier 		data = NULL;
21541b7047edSMarc Zyngier #endif
21551b7047edSMarc Zyngier 	} while (data);
21561b7047edSMarc Zyngier 
21571b7047edSMarc Zyngier 	if (data)
21581b7047edSMarc Zyngier 		err = chip->irq_get_irqchip_state(data, which, state);
21591b7047edSMarc Zyngier 
21601b7047edSMarc Zyngier 	irq_put_desc_busunlock(desc, flags);
21611b7047edSMarc Zyngier 	return err;
21621b7047edSMarc Zyngier }
21631ee4fb3eSBjorn Andersson EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
21641b7047edSMarc Zyngier 
21651b7047edSMarc Zyngier /**
21661b7047edSMarc Zyngier  *	irq_set_irqchip_state - set the state of a forwarded interrupt.
21671b7047edSMarc Zyngier  *	@irq: Interrupt line that is forwarded to a VM
21681b7047edSMarc Zyngier  *	@which: State to be restored (one of IRQCHIP_STATE_*)
21691b7047edSMarc Zyngier  *	@val: Value corresponding to @which
21701b7047edSMarc Zyngier  *
21711b7047edSMarc Zyngier  *	This call sets the internal irqchip state of an interrupt,
21721b7047edSMarc Zyngier  *	depending on the value of @which.
21731b7047edSMarc Zyngier  *
21741b7047edSMarc Zyngier  *	This function should be called with preemption disabled if the
21751b7047edSMarc Zyngier  *	interrupt controller has per-cpu registers.
21761b7047edSMarc Zyngier  */
21771b7047edSMarc Zyngier int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
21781b7047edSMarc Zyngier 			  bool val)
21791b7047edSMarc Zyngier {
21801b7047edSMarc Zyngier 	struct irq_desc *desc;
21811b7047edSMarc Zyngier 	struct irq_data *data;
21821b7047edSMarc Zyngier 	struct irq_chip *chip;
21831b7047edSMarc Zyngier 	unsigned long flags;
21841b7047edSMarc Zyngier 	int err = -EINVAL;
21851b7047edSMarc Zyngier 
21861b7047edSMarc Zyngier 	desc = irq_get_desc_buslock(irq, &flags, 0);
21871b7047edSMarc Zyngier 	if (!desc)
21881b7047edSMarc Zyngier 		return err;
21891b7047edSMarc Zyngier 
21901b7047edSMarc Zyngier 	data = irq_desc_get_irq_data(desc);
21911b7047edSMarc Zyngier 
21921b7047edSMarc Zyngier 	do {
21931b7047edSMarc Zyngier 		chip = irq_data_get_irq_chip(data);
21941b7047edSMarc Zyngier 		if (chip->irq_set_irqchip_state)
21951b7047edSMarc Zyngier 			break;
21961b7047edSMarc Zyngier #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
21971b7047edSMarc Zyngier 		data = data->parent_data;
21981b7047edSMarc Zyngier #else
21991b7047edSMarc Zyngier 		data = NULL;
22001b7047edSMarc Zyngier #endif
22011b7047edSMarc Zyngier 	} while (data);
22021b7047edSMarc Zyngier 
22031b7047edSMarc Zyngier 	if (data)
22041b7047edSMarc Zyngier 		err = chip->irq_set_irqchip_state(data, which, val);
22051b7047edSMarc Zyngier 
22061b7047edSMarc Zyngier 	irq_put_desc_busunlock(desc, flags);
22071b7047edSMarc Zyngier 	return err;
22081b7047edSMarc Zyngier }
22091ee4fb3eSBjorn Andersson EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2210