xref: /openbmc/linux/kernel/irq/manage.c (revision 0e17c50f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4  * Copyright (C) 2005-2006 Thomas Gleixner
5  *
6  * This file contains driver APIs to the irq subsystem.
7  */
8 
9 #define pr_fmt(fmt) "genirq: " fmt
10 
11 #include <linux/irq.h>
12 #include <linux/kthread.h>
13 #include <linux/module.h>
14 #include <linux/random.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/isolation.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/task_work.h>
24 
25 #include "internals.h"
26 
27 #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
28 __read_mostly bool force_irqthreads;
29 EXPORT_SYMBOL_GPL(force_irqthreads);
30 
31 static int __init setup_forced_irqthreads(char *arg)
32 {
33 	force_irqthreads = true;
34 	return 0;
35 }
36 early_param("threadirqs", setup_forced_irqthreads);
37 #endif
38 
39 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
40 {
41 	struct irq_data *irqd = irq_desc_get_irq_data(desc);
42 	bool inprogress;
43 
44 	do {
45 		unsigned long flags;
46 
47 		/*
48 		 * Wait until we're out of the critical section.  This might
49 		 * give the wrong answer due to the lack of memory barriers.
50 		 */
51 		while (irqd_irq_inprogress(&desc->irq_data))
52 			cpu_relax();
53 
54 		/* Ok, that indicated we're done: double-check carefully. */
55 		raw_spin_lock_irqsave(&desc->lock, flags);
56 		inprogress = irqd_irq_inprogress(&desc->irq_data);
57 
58 		/*
59 		 * If requested and supported, check at the chip whether it
60 		 * is in flight at the hardware level, i.e. already pending
61 		 * in a CPU and waiting for service and acknowledge.
62 		 */
63 		if (!inprogress && sync_chip) {
64 			/*
65 			 * Ignore the return code. inprogress is only updated
66 			 * when the chip supports it.
67 			 */
68 			__irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
69 						&inprogress);
70 		}
71 		raw_spin_unlock_irqrestore(&desc->lock, flags);
72 
73 		/* Oops, that failed? */
74 	} while (inprogress);
75 }
76 
77 /**
78  *	synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
79  *	@irq: interrupt number to wait for
80  *
81  *	This function waits for any pending hard IRQ handlers for this
82  *	interrupt to complete before returning. If you use this
83  *	function while holding a resource the IRQ handler may need you
84  *	will deadlock. It does not take associated threaded handlers
85  *	into account.
86  *
87  *	Do not use this for shutdown scenarios where you must be sure
88  *	that all parts (hardirq and threaded handler) have completed.
89  *
90  *	Returns: false if a threaded handler is active.
91  *
92  *	This function may be called - with care - from IRQ context.
93  *
94  *	It does not check whether there is an interrupt in flight at the
95  *	hardware level, but not serviced yet, as this might deadlock when
96  *	called with interrupts disabled and the target CPU of the interrupt
97  *	is the current CPU.
98  */
99 bool synchronize_hardirq(unsigned int irq)
100 {
101 	struct irq_desc *desc = irq_to_desc(irq);
102 
103 	if (desc) {
104 		__synchronize_hardirq(desc, false);
105 		return !atomic_read(&desc->threads_active);
106 	}
107 
108 	return true;
109 }
110 EXPORT_SYMBOL(synchronize_hardirq);
111 
112 /**
113  *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
114  *	@irq: interrupt number to wait for
115  *
116  *	This function waits for any pending IRQ handlers for this interrupt
117  *	to complete before returning. If you use this function while
118  *	holding a resource the IRQ handler may need you will deadlock.
119  *
120  *	Can only be called from preemptible code as it might sleep when
121  *	an interrupt thread is associated to @irq.
122  *
123  *	It optionally makes sure (when the irq chip supports that method)
124  *	that the interrupt is not pending in any CPU and waiting for
125  *	service.
126  */
127 void synchronize_irq(unsigned int irq)
128 {
129 	struct irq_desc *desc = irq_to_desc(irq);
130 
131 	if (desc) {
132 		__synchronize_hardirq(desc, true);
133 		/*
134 		 * We made sure that no hardirq handler is
135 		 * running. Now verify that no threaded handlers are
136 		 * active.
137 		 */
138 		wait_event(desc->wait_for_threads,
139 			   !atomic_read(&desc->threads_active));
140 	}
141 }
142 EXPORT_SYMBOL(synchronize_irq);
143 
144 #ifdef CONFIG_SMP
145 cpumask_var_t irq_default_affinity;
146 
147 static bool __irq_can_set_affinity(struct irq_desc *desc)
148 {
149 	if (!desc || !irqd_can_balance(&desc->irq_data) ||
150 	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
151 		return false;
152 	return true;
153 }
154 
155 /**
156  *	irq_can_set_affinity - Check if the affinity of a given irq can be set
157  *	@irq:		Interrupt to check
158  *
159  */
160 int irq_can_set_affinity(unsigned int irq)
161 {
162 	return __irq_can_set_affinity(irq_to_desc(irq));
163 }
164 
165 /**
166  * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
167  * @irq:	Interrupt to check
168  *
169  * Like irq_can_set_affinity() above, but additionally checks for the
170  * AFFINITY_MANAGED flag.
171  */
172 bool irq_can_set_affinity_usr(unsigned int irq)
173 {
174 	struct irq_desc *desc = irq_to_desc(irq);
175 
176 	return __irq_can_set_affinity(desc) &&
177 		!irqd_affinity_is_managed(&desc->irq_data);
178 }
179 
180 /**
181  *	irq_set_thread_affinity - Notify irq threads to adjust affinity
182  *	@desc:		irq descriptor which has affitnity changed
183  *
184  *	We just set IRQTF_AFFINITY and delegate the affinity setting
185  *	to the interrupt thread itself. We can not call
186  *	set_cpus_allowed_ptr() here as we hold desc->lock and this
187  *	code can be called from hard interrupt context.
188  */
189 void irq_set_thread_affinity(struct irq_desc *desc)
190 {
191 	struct irqaction *action;
192 
193 	for_each_action_of_desc(desc, action)
194 		if (action->thread)
195 			set_bit(IRQTF_AFFINITY, &action->thread_flags);
196 }
197 
198 static void irq_validate_effective_affinity(struct irq_data *data)
199 {
200 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
201 	const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
202 	struct irq_chip *chip = irq_data_get_irq_chip(data);
203 
204 	if (!cpumask_empty(m))
205 		return;
206 	pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
207 		     chip->name, data->irq);
208 #endif
209 }
210 
211 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
212 			bool force)
213 {
214 	struct irq_desc *desc = irq_data_to_desc(data);
215 	struct irq_chip *chip = irq_data_get_irq_chip(data);
216 	int ret;
217 
218 	if (!chip || !chip->irq_set_affinity)
219 		return -EINVAL;
220 
221 	/*
222 	 * If this is a managed interrupt and housekeeping is enabled on
223 	 * it check whether the requested affinity mask intersects with
224 	 * a housekeeping CPU. If so, then remove the isolated CPUs from
225 	 * the mask and just keep the housekeeping CPU(s). This prevents
226 	 * the affinity setter from routing the interrupt to an isolated
227 	 * CPU to avoid that I/O submitted from a housekeeping CPU causes
228 	 * interrupts on an isolated one.
229 	 *
230 	 * If the masks do not intersect or include online CPU(s) then
231 	 * keep the requested mask. The isolated target CPUs are only
232 	 * receiving interrupts when the I/O operation was submitted
233 	 * directly from them.
234 	 *
235 	 * If all housekeeping CPUs in the affinity mask are offline, the
236 	 * interrupt will be migrated by the CPU hotplug code once a
237 	 * housekeeping CPU which belongs to the affinity mask comes
238 	 * online.
239 	 */
240 	if (irqd_affinity_is_managed(data) &&
241 	    housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) {
242 		const struct cpumask *hk_mask, *prog_mask;
243 
244 		static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
245 		static struct cpumask tmp_mask;
246 
247 		hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
248 
249 		raw_spin_lock(&tmp_mask_lock);
250 		cpumask_and(&tmp_mask, mask, hk_mask);
251 		if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
252 			prog_mask = mask;
253 		else
254 			prog_mask = &tmp_mask;
255 		ret = chip->irq_set_affinity(data, prog_mask, force);
256 		raw_spin_unlock(&tmp_mask_lock);
257 	} else {
258 		ret = chip->irq_set_affinity(data, mask, force);
259 	}
260 	switch (ret) {
261 	case IRQ_SET_MASK_OK:
262 	case IRQ_SET_MASK_OK_DONE:
263 		cpumask_copy(desc->irq_common_data.affinity, mask);
264 		/* fall through */
265 	case IRQ_SET_MASK_OK_NOCOPY:
266 		irq_validate_effective_affinity(data);
267 		irq_set_thread_affinity(desc);
268 		ret = 0;
269 	}
270 
271 	return ret;
272 }
273 
274 #ifdef CONFIG_GENERIC_PENDING_IRQ
275 static inline int irq_set_affinity_pending(struct irq_data *data,
276 					   const struct cpumask *dest)
277 {
278 	struct irq_desc *desc = irq_data_to_desc(data);
279 
280 	irqd_set_move_pending(data);
281 	irq_copy_pending(desc, dest);
282 	return 0;
283 }
284 #else
285 static inline int irq_set_affinity_pending(struct irq_data *data,
286 					   const struct cpumask *dest)
287 {
288 	return -EBUSY;
289 }
290 #endif
291 
292 static int irq_try_set_affinity(struct irq_data *data,
293 				const struct cpumask *dest, bool force)
294 {
295 	int ret = irq_do_set_affinity(data, dest, force);
296 
297 	/*
298 	 * In case that the underlying vector management is busy and the
299 	 * architecture supports the generic pending mechanism then utilize
300 	 * this to avoid returning an error to user space.
301 	 */
302 	if (ret == -EBUSY && !force)
303 		ret = irq_set_affinity_pending(data, dest);
304 	return ret;
305 }
306 
307 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
308 			    bool force)
309 {
310 	struct irq_chip *chip = irq_data_get_irq_chip(data);
311 	struct irq_desc *desc = irq_data_to_desc(data);
312 	int ret = 0;
313 
314 	if (!chip || !chip->irq_set_affinity)
315 		return -EINVAL;
316 
317 	if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
318 		ret = irq_try_set_affinity(data, mask, force);
319 	} else {
320 		irqd_set_move_pending(data);
321 		irq_copy_pending(desc, mask);
322 	}
323 
324 	if (desc->affinity_notify) {
325 		kref_get(&desc->affinity_notify->kref);
326 		schedule_work(&desc->affinity_notify->work);
327 	}
328 	irqd_set(data, IRQD_AFFINITY_SET);
329 
330 	return ret;
331 }
332 
333 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
334 {
335 	struct irq_desc *desc = irq_to_desc(irq);
336 	unsigned long flags;
337 	int ret;
338 
339 	if (!desc)
340 		return -EINVAL;
341 
342 	raw_spin_lock_irqsave(&desc->lock, flags);
343 	ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
344 	raw_spin_unlock_irqrestore(&desc->lock, flags);
345 	return ret;
346 }
347 
348 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
349 {
350 	unsigned long flags;
351 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
352 
353 	if (!desc)
354 		return -EINVAL;
355 	desc->affinity_hint = m;
356 	irq_put_desc_unlock(desc, flags);
357 	/* set the initial affinity to prevent every interrupt being on CPU0 */
358 	if (m)
359 		__irq_set_affinity(irq, m, false);
360 	return 0;
361 }
362 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
363 
364 static void irq_affinity_notify(struct work_struct *work)
365 {
366 	struct irq_affinity_notify *notify =
367 		container_of(work, struct irq_affinity_notify, work);
368 	struct irq_desc *desc = irq_to_desc(notify->irq);
369 	cpumask_var_t cpumask;
370 	unsigned long flags;
371 
372 	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
373 		goto out;
374 
375 	raw_spin_lock_irqsave(&desc->lock, flags);
376 	if (irq_move_pending(&desc->irq_data))
377 		irq_get_pending(cpumask, desc);
378 	else
379 		cpumask_copy(cpumask, desc->irq_common_data.affinity);
380 	raw_spin_unlock_irqrestore(&desc->lock, flags);
381 
382 	notify->notify(notify, cpumask);
383 
384 	free_cpumask_var(cpumask);
385 out:
386 	kref_put(&notify->kref, notify->release);
387 }
388 
389 /**
390  *	irq_set_affinity_notifier - control notification of IRQ affinity changes
391  *	@irq:		Interrupt for which to enable/disable notification
392  *	@notify:	Context for notification, or %NULL to disable
393  *			notification.  Function pointers must be initialised;
394  *			the other fields will be initialised by this function.
395  *
396  *	Must be called in process context.  Notification may only be enabled
397  *	after the IRQ is allocated and must be disabled before the IRQ is
398  *	freed using free_irq().
399  */
400 int
401 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
402 {
403 	struct irq_desc *desc = irq_to_desc(irq);
404 	struct irq_affinity_notify *old_notify;
405 	unsigned long flags;
406 
407 	/* The release function is promised process context */
408 	might_sleep();
409 
410 	if (!desc || desc->istate & IRQS_NMI)
411 		return -EINVAL;
412 
413 	/* Complete initialisation of *notify */
414 	if (notify) {
415 		notify->irq = irq;
416 		kref_init(&notify->kref);
417 		INIT_WORK(&notify->work, irq_affinity_notify);
418 	}
419 
420 	raw_spin_lock_irqsave(&desc->lock, flags);
421 	old_notify = desc->affinity_notify;
422 	desc->affinity_notify = notify;
423 	raw_spin_unlock_irqrestore(&desc->lock, flags);
424 
425 	if (old_notify) {
426 		cancel_work_sync(&old_notify->work);
427 		kref_put(&old_notify->kref, old_notify->release);
428 	}
429 
430 	return 0;
431 }
432 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
433 
434 #ifndef CONFIG_AUTO_IRQ_AFFINITY
435 /*
436  * Generic version of the affinity autoselector.
437  */
438 int irq_setup_affinity(struct irq_desc *desc)
439 {
440 	struct cpumask *set = irq_default_affinity;
441 	int ret, node = irq_desc_get_node(desc);
442 	static DEFINE_RAW_SPINLOCK(mask_lock);
443 	static struct cpumask mask;
444 
445 	/* Excludes PER_CPU and NO_BALANCE interrupts */
446 	if (!__irq_can_set_affinity(desc))
447 		return 0;
448 
449 	raw_spin_lock(&mask_lock);
450 	/*
451 	 * Preserve the managed affinity setting and a userspace affinity
452 	 * setup, but make sure that one of the targets is online.
453 	 */
454 	if (irqd_affinity_is_managed(&desc->irq_data) ||
455 	    irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
456 		if (cpumask_intersects(desc->irq_common_data.affinity,
457 				       cpu_online_mask))
458 			set = desc->irq_common_data.affinity;
459 		else
460 			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
461 	}
462 
463 	cpumask_and(&mask, cpu_online_mask, set);
464 	if (cpumask_empty(&mask))
465 		cpumask_copy(&mask, cpu_online_mask);
466 
467 	if (node != NUMA_NO_NODE) {
468 		const struct cpumask *nodemask = cpumask_of_node(node);
469 
470 		/* make sure at least one of the cpus in nodemask is online */
471 		if (cpumask_intersects(&mask, nodemask))
472 			cpumask_and(&mask, &mask, nodemask);
473 	}
474 	ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
475 	raw_spin_unlock(&mask_lock);
476 	return ret;
477 }
478 #else
479 /* Wrapper for ALPHA specific affinity selector magic */
480 int irq_setup_affinity(struct irq_desc *desc)
481 {
482 	return irq_select_affinity(irq_desc_get_irq(desc));
483 }
484 #endif
485 
486 /*
487  * Called when a bogus affinity is set via /proc/irq
488  */
489 int irq_select_affinity_usr(unsigned int irq)
490 {
491 	struct irq_desc *desc = irq_to_desc(irq);
492 	unsigned long flags;
493 	int ret;
494 
495 	raw_spin_lock_irqsave(&desc->lock, flags);
496 	ret = irq_setup_affinity(desc);
497 	raw_spin_unlock_irqrestore(&desc->lock, flags);
498 	return ret;
499 }
500 #endif
501 
502 /**
503  *	irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
504  *	@irq: interrupt number to set affinity
505  *	@vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
506  *	            specific data for percpu_devid interrupts
507  *
508  *	This function uses the vCPU specific data to set the vCPU
509  *	affinity for an irq. The vCPU specific data is passed from
510  *	outside, such as KVM. One example code path is as below:
511  *	KVM -> IOMMU -> irq_set_vcpu_affinity().
512  */
513 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
514 {
515 	unsigned long flags;
516 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
517 	struct irq_data *data;
518 	struct irq_chip *chip;
519 	int ret = -ENOSYS;
520 
521 	if (!desc)
522 		return -EINVAL;
523 
524 	data = irq_desc_get_irq_data(desc);
525 	do {
526 		chip = irq_data_get_irq_chip(data);
527 		if (chip && chip->irq_set_vcpu_affinity)
528 			break;
529 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
530 		data = data->parent_data;
531 #else
532 		data = NULL;
533 #endif
534 	} while (data);
535 
536 	if (data)
537 		ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
538 	irq_put_desc_unlock(desc, flags);
539 
540 	return ret;
541 }
542 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
543 
544 void __disable_irq(struct irq_desc *desc)
545 {
546 	if (!desc->depth++)
547 		irq_disable(desc);
548 }
549 
550 static int __disable_irq_nosync(unsigned int irq)
551 {
552 	unsigned long flags;
553 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
554 
555 	if (!desc)
556 		return -EINVAL;
557 	__disable_irq(desc);
558 	irq_put_desc_busunlock(desc, flags);
559 	return 0;
560 }
561 
562 /**
563  *	disable_irq_nosync - disable an irq without waiting
564  *	@irq: Interrupt to disable
565  *
566  *	Disable the selected interrupt line.  Disables and Enables are
567  *	nested.
568  *	Unlike disable_irq(), this function does not ensure existing
569  *	instances of the IRQ handler have completed before returning.
570  *
571  *	This function may be called from IRQ context.
572  */
573 void disable_irq_nosync(unsigned int irq)
574 {
575 	__disable_irq_nosync(irq);
576 }
577 EXPORT_SYMBOL(disable_irq_nosync);
578 
579 /**
580  *	disable_irq - disable an irq and wait for completion
581  *	@irq: Interrupt to disable
582  *
583  *	Disable the selected interrupt line.  Enables and Disables are
584  *	nested.
585  *	This function waits for any pending IRQ handlers for this interrupt
586  *	to complete before returning. If you use this function while
587  *	holding a resource the IRQ handler may need you will deadlock.
588  *
589  *	This function may be called - with care - from IRQ context.
590  */
591 void disable_irq(unsigned int irq)
592 {
593 	if (!__disable_irq_nosync(irq))
594 		synchronize_irq(irq);
595 }
596 EXPORT_SYMBOL(disable_irq);
597 
598 /**
599  *	disable_hardirq - disables an irq and waits for hardirq completion
600  *	@irq: Interrupt to disable
601  *
602  *	Disable the selected interrupt line.  Enables and Disables are
603  *	nested.
604  *	This function waits for any pending hard IRQ handlers for this
605  *	interrupt to complete before returning. If you use this function while
606  *	holding a resource the hard IRQ handler may need you will deadlock.
607  *
608  *	When used to optimistically disable an interrupt from atomic context
609  *	the return value must be checked.
610  *
611  *	Returns: false if a threaded handler is active.
612  *
613  *	This function may be called - with care - from IRQ context.
614  */
615 bool disable_hardirq(unsigned int irq)
616 {
617 	if (!__disable_irq_nosync(irq))
618 		return synchronize_hardirq(irq);
619 
620 	return false;
621 }
622 EXPORT_SYMBOL_GPL(disable_hardirq);
623 
624 /**
625  *	disable_nmi_nosync - disable an nmi without waiting
626  *	@irq: Interrupt to disable
627  *
628  *	Disable the selected interrupt line. Disables and enables are
629  *	nested.
630  *	The interrupt to disable must have been requested through request_nmi.
631  *	Unlike disable_nmi(), this function does not ensure existing
632  *	instances of the IRQ handler have completed before returning.
633  */
634 void disable_nmi_nosync(unsigned int irq)
635 {
636 	disable_irq_nosync(irq);
637 }
638 
639 void __enable_irq(struct irq_desc *desc)
640 {
641 	switch (desc->depth) {
642 	case 0:
643  err_out:
644 		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
645 		     irq_desc_get_irq(desc));
646 		break;
647 	case 1: {
648 		if (desc->istate & IRQS_SUSPENDED)
649 			goto err_out;
650 		/* Prevent probing on this irq: */
651 		irq_settings_set_noprobe(desc);
652 		/*
653 		 * Call irq_startup() not irq_enable() here because the
654 		 * interrupt might be marked NOAUTOEN. So irq_startup()
655 		 * needs to be invoked when it gets enabled the first
656 		 * time. If it was already started up, then irq_startup()
657 		 * will invoke irq_enable() under the hood.
658 		 */
659 		irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
660 		break;
661 	}
662 	default:
663 		desc->depth--;
664 	}
665 }
666 
667 /**
668  *	enable_irq - enable handling of an irq
669  *	@irq: Interrupt to enable
670  *
671  *	Undoes the effect of one call to disable_irq().  If this
672  *	matches the last disable, processing of interrupts on this
673  *	IRQ line is re-enabled.
674  *
675  *	This function may be called from IRQ context only when
676  *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
677  */
678 void enable_irq(unsigned int irq)
679 {
680 	unsigned long flags;
681 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
682 
683 	if (!desc)
684 		return;
685 	if (WARN(!desc->irq_data.chip,
686 		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
687 		goto out;
688 
689 	__enable_irq(desc);
690 out:
691 	irq_put_desc_busunlock(desc, flags);
692 }
693 EXPORT_SYMBOL(enable_irq);
694 
695 /**
696  *	enable_nmi - enable handling of an nmi
697  *	@irq: Interrupt to enable
698  *
699  *	The interrupt to enable must have been requested through request_nmi.
700  *	Undoes the effect of one call to disable_nmi(). If this
701  *	matches the last disable, processing of interrupts on this
702  *	IRQ line is re-enabled.
703  */
704 void enable_nmi(unsigned int irq)
705 {
706 	enable_irq(irq);
707 }
708 
709 static int set_irq_wake_real(unsigned int irq, unsigned int on)
710 {
711 	struct irq_desc *desc = irq_to_desc(irq);
712 	int ret = -ENXIO;
713 
714 	if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
715 		return 0;
716 
717 	if (desc->irq_data.chip->irq_set_wake)
718 		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
719 
720 	return ret;
721 }
722 
723 /**
724  *	irq_set_irq_wake - control irq power management wakeup
725  *	@irq:	interrupt to control
726  *	@on:	enable/disable power management wakeup
727  *
728  *	Enable/disable power management wakeup mode, which is
729  *	disabled by default.  Enables and disables must match,
730  *	just as they match for non-wakeup mode support.
731  *
732  *	Wakeup mode lets this IRQ wake the system from sleep
733  *	states like "suspend to RAM".
734  *
735  *	Note: irq enable/disable state is completely orthogonal
736  *	to the enable/disable state of irq wake. An irq can be
737  *	disabled with disable_irq() and still wake the system as
738  *	long as the irq has wake enabled. If this does not hold,
739  *	then the underlying irq chip and the related driver need
740  *	to be investigated.
741  */
742 int irq_set_irq_wake(unsigned int irq, unsigned int on)
743 {
744 	unsigned long flags;
745 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
746 	int ret = 0;
747 
748 	if (!desc)
749 		return -EINVAL;
750 
751 	/* Don't use NMIs as wake up interrupts please */
752 	if (desc->istate & IRQS_NMI) {
753 		ret = -EINVAL;
754 		goto out_unlock;
755 	}
756 
757 	/* wakeup-capable irqs can be shared between drivers that
758 	 * don't need to have the same sleep mode behaviors.
759 	 */
760 	if (on) {
761 		if (desc->wake_depth++ == 0) {
762 			ret = set_irq_wake_real(irq, on);
763 			if (ret)
764 				desc->wake_depth = 0;
765 			else
766 				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
767 		}
768 	} else {
769 		if (desc->wake_depth == 0) {
770 			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
771 		} else if (--desc->wake_depth == 0) {
772 			ret = set_irq_wake_real(irq, on);
773 			if (ret)
774 				desc->wake_depth = 1;
775 			else
776 				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
777 		}
778 	}
779 
780 out_unlock:
781 	irq_put_desc_busunlock(desc, flags);
782 	return ret;
783 }
784 EXPORT_SYMBOL(irq_set_irq_wake);
785 
786 /*
787  * Internal function that tells the architecture code whether a
788  * particular irq has been exclusively allocated or is available
789  * for driver use.
790  */
791 int can_request_irq(unsigned int irq, unsigned long irqflags)
792 {
793 	unsigned long flags;
794 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
795 	int canrequest = 0;
796 
797 	if (!desc)
798 		return 0;
799 
800 	if (irq_settings_can_request(desc)) {
801 		if (!desc->action ||
802 		    irqflags & desc->action->flags & IRQF_SHARED)
803 			canrequest = 1;
804 	}
805 	irq_put_desc_unlock(desc, flags);
806 	return canrequest;
807 }
808 
809 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
810 {
811 	struct irq_chip *chip = desc->irq_data.chip;
812 	int ret, unmask = 0;
813 
814 	if (!chip || !chip->irq_set_type) {
815 		/*
816 		 * IRQF_TRIGGER_* but the PIC does not support multiple
817 		 * flow-types?
818 		 */
819 		pr_debug("No set_type function for IRQ %d (%s)\n",
820 			 irq_desc_get_irq(desc),
821 			 chip ? (chip->name ? : "unknown") : "unknown");
822 		return 0;
823 	}
824 
825 	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
826 		if (!irqd_irq_masked(&desc->irq_data))
827 			mask_irq(desc);
828 		if (!irqd_irq_disabled(&desc->irq_data))
829 			unmask = 1;
830 	}
831 
832 	/* Mask all flags except trigger mode */
833 	flags &= IRQ_TYPE_SENSE_MASK;
834 	ret = chip->irq_set_type(&desc->irq_data, flags);
835 
836 	switch (ret) {
837 	case IRQ_SET_MASK_OK:
838 	case IRQ_SET_MASK_OK_DONE:
839 		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
840 		irqd_set(&desc->irq_data, flags);
841 		/* fall through */
842 
843 	case IRQ_SET_MASK_OK_NOCOPY:
844 		flags = irqd_get_trigger_type(&desc->irq_data);
845 		irq_settings_set_trigger_mask(desc, flags);
846 		irqd_clear(&desc->irq_data, IRQD_LEVEL);
847 		irq_settings_clr_level(desc);
848 		if (flags & IRQ_TYPE_LEVEL_MASK) {
849 			irq_settings_set_level(desc);
850 			irqd_set(&desc->irq_data, IRQD_LEVEL);
851 		}
852 
853 		ret = 0;
854 		break;
855 	default:
856 		pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
857 		       flags, irq_desc_get_irq(desc), chip->irq_set_type);
858 	}
859 	if (unmask)
860 		unmask_irq(desc);
861 	return ret;
862 }
863 
864 #ifdef CONFIG_HARDIRQS_SW_RESEND
865 int irq_set_parent(int irq, int parent_irq)
866 {
867 	unsigned long flags;
868 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
869 
870 	if (!desc)
871 		return -EINVAL;
872 
873 	desc->parent_irq = parent_irq;
874 
875 	irq_put_desc_unlock(desc, flags);
876 	return 0;
877 }
878 EXPORT_SYMBOL_GPL(irq_set_parent);
879 #endif
880 
881 /*
882  * Default primary interrupt handler for threaded interrupts. Is
883  * assigned as primary handler when request_threaded_irq is called
884  * with handler == NULL. Useful for oneshot interrupts.
885  */
886 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
887 {
888 	return IRQ_WAKE_THREAD;
889 }
890 
891 /*
892  * Primary handler for nested threaded interrupts. Should never be
893  * called.
894  */
895 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
896 {
897 	WARN(1, "Primary handler called for nested irq %d\n", irq);
898 	return IRQ_NONE;
899 }
900 
901 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
902 {
903 	WARN(1, "Secondary action handler called for irq %d\n", irq);
904 	return IRQ_NONE;
905 }
906 
907 static int irq_wait_for_interrupt(struct irqaction *action)
908 {
909 	for (;;) {
910 		set_current_state(TASK_INTERRUPTIBLE);
911 
912 		if (kthread_should_stop()) {
913 			/* may need to run one last time */
914 			if (test_and_clear_bit(IRQTF_RUNTHREAD,
915 					       &action->thread_flags)) {
916 				__set_current_state(TASK_RUNNING);
917 				return 0;
918 			}
919 			__set_current_state(TASK_RUNNING);
920 			return -1;
921 		}
922 
923 		if (test_and_clear_bit(IRQTF_RUNTHREAD,
924 				       &action->thread_flags)) {
925 			__set_current_state(TASK_RUNNING);
926 			return 0;
927 		}
928 		schedule();
929 	}
930 }
931 
932 /*
933  * Oneshot interrupts keep the irq line masked until the threaded
934  * handler finished. unmask if the interrupt has not been disabled and
935  * is marked MASKED.
936  */
937 static void irq_finalize_oneshot(struct irq_desc *desc,
938 				 struct irqaction *action)
939 {
940 	if (!(desc->istate & IRQS_ONESHOT) ||
941 	    action->handler == irq_forced_secondary_handler)
942 		return;
943 again:
944 	chip_bus_lock(desc);
945 	raw_spin_lock_irq(&desc->lock);
946 
947 	/*
948 	 * Implausible though it may be we need to protect us against
949 	 * the following scenario:
950 	 *
951 	 * The thread is faster done than the hard interrupt handler
952 	 * on the other CPU. If we unmask the irq line then the
953 	 * interrupt can come in again and masks the line, leaves due
954 	 * to IRQS_INPROGRESS and the irq line is masked forever.
955 	 *
956 	 * This also serializes the state of shared oneshot handlers
957 	 * versus "desc->threads_onehsot |= action->thread_mask;" in
958 	 * irq_wake_thread(). See the comment there which explains the
959 	 * serialization.
960 	 */
961 	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
962 		raw_spin_unlock_irq(&desc->lock);
963 		chip_bus_sync_unlock(desc);
964 		cpu_relax();
965 		goto again;
966 	}
967 
968 	/*
969 	 * Now check again, whether the thread should run. Otherwise
970 	 * we would clear the threads_oneshot bit of this thread which
971 	 * was just set.
972 	 */
973 	if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
974 		goto out_unlock;
975 
976 	desc->threads_oneshot &= ~action->thread_mask;
977 
978 	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
979 	    irqd_irq_masked(&desc->irq_data))
980 		unmask_threaded_irq(desc);
981 
982 out_unlock:
983 	raw_spin_unlock_irq(&desc->lock);
984 	chip_bus_sync_unlock(desc);
985 }
986 
987 #ifdef CONFIG_SMP
988 /*
989  * Check whether we need to change the affinity of the interrupt thread.
990  */
991 static void
992 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
993 {
994 	cpumask_var_t mask;
995 	bool valid = true;
996 
997 	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
998 		return;
999 
1000 	/*
1001 	 * In case we are out of memory we set IRQTF_AFFINITY again and
1002 	 * try again next time
1003 	 */
1004 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1005 		set_bit(IRQTF_AFFINITY, &action->thread_flags);
1006 		return;
1007 	}
1008 
1009 	raw_spin_lock_irq(&desc->lock);
1010 	/*
1011 	 * This code is triggered unconditionally. Check the affinity
1012 	 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1013 	 */
1014 	if (cpumask_available(desc->irq_common_data.affinity)) {
1015 		const struct cpumask *m;
1016 
1017 		m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1018 		cpumask_copy(mask, m);
1019 	} else {
1020 		valid = false;
1021 	}
1022 	raw_spin_unlock_irq(&desc->lock);
1023 
1024 	if (valid)
1025 		set_cpus_allowed_ptr(current, mask);
1026 	free_cpumask_var(mask);
1027 }
1028 #else
1029 static inline void
1030 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1031 #endif
1032 
1033 /*
1034  * Interrupts which are not explicitly requested as threaded
1035  * interrupts rely on the implicit bh/preempt disable of the hard irq
1036  * context. So we need to disable bh here to avoid deadlocks and other
1037  * side effects.
1038  */
1039 static irqreturn_t
1040 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1041 {
1042 	irqreturn_t ret;
1043 
1044 	local_bh_disable();
1045 	ret = action->thread_fn(action->irq, action->dev_id);
1046 	if (ret == IRQ_HANDLED)
1047 		atomic_inc(&desc->threads_handled);
1048 
1049 	irq_finalize_oneshot(desc, action);
1050 	local_bh_enable();
1051 	return ret;
1052 }
1053 
1054 /*
1055  * Interrupts explicitly requested as threaded interrupts want to be
1056  * preemtible - many of them need to sleep and wait for slow busses to
1057  * complete.
1058  */
1059 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1060 		struct irqaction *action)
1061 {
1062 	irqreturn_t ret;
1063 
1064 	ret = action->thread_fn(action->irq, action->dev_id);
1065 	if (ret == IRQ_HANDLED)
1066 		atomic_inc(&desc->threads_handled);
1067 
1068 	irq_finalize_oneshot(desc, action);
1069 	return ret;
1070 }
1071 
1072 static void wake_threads_waitq(struct irq_desc *desc)
1073 {
1074 	if (atomic_dec_and_test(&desc->threads_active))
1075 		wake_up(&desc->wait_for_threads);
1076 }
1077 
1078 static void irq_thread_dtor(struct callback_head *unused)
1079 {
1080 	struct task_struct *tsk = current;
1081 	struct irq_desc *desc;
1082 	struct irqaction *action;
1083 
1084 	if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1085 		return;
1086 
1087 	action = kthread_data(tsk);
1088 
1089 	pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1090 	       tsk->comm, tsk->pid, action->irq);
1091 
1092 
1093 	desc = irq_to_desc(action->irq);
1094 	/*
1095 	 * If IRQTF_RUNTHREAD is set, we need to decrement
1096 	 * desc->threads_active and wake possible waiters.
1097 	 */
1098 	if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1099 		wake_threads_waitq(desc);
1100 
1101 	/* Prevent a stale desc->threads_oneshot */
1102 	irq_finalize_oneshot(desc, action);
1103 }
1104 
1105 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1106 {
1107 	struct irqaction *secondary = action->secondary;
1108 
1109 	if (WARN_ON_ONCE(!secondary))
1110 		return;
1111 
1112 	raw_spin_lock_irq(&desc->lock);
1113 	__irq_wake_thread(desc, secondary);
1114 	raw_spin_unlock_irq(&desc->lock);
1115 }
1116 
1117 /*
1118  * Interrupt handler thread
1119  */
1120 static int irq_thread(void *data)
1121 {
1122 	struct callback_head on_exit_work;
1123 	struct irqaction *action = data;
1124 	struct irq_desc *desc = irq_to_desc(action->irq);
1125 	irqreturn_t (*handler_fn)(struct irq_desc *desc,
1126 			struct irqaction *action);
1127 
1128 	if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1129 					&action->thread_flags))
1130 		handler_fn = irq_forced_thread_fn;
1131 	else
1132 		handler_fn = irq_thread_fn;
1133 
1134 	init_task_work(&on_exit_work, irq_thread_dtor);
1135 	task_work_add(current, &on_exit_work, false);
1136 
1137 	irq_thread_check_affinity(desc, action);
1138 
1139 	while (!irq_wait_for_interrupt(action)) {
1140 		irqreturn_t action_ret;
1141 
1142 		irq_thread_check_affinity(desc, action);
1143 
1144 		action_ret = handler_fn(desc, action);
1145 		if (action_ret == IRQ_WAKE_THREAD)
1146 			irq_wake_secondary(desc, action);
1147 
1148 		wake_threads_waitq(desc);
1149 	}
1150 
1151 	/*
1152 	 * This is the regular exit path. __free_irq() is stopping the
1153 	 * thread via kthread_stop() after calling
1154 	 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1155 	 * oneshot mask bit can be set.
1156 	 */
1157 	task_work_cancel(current, irq_thread_dtor);
1158 	return 0;
1159 }
1160 
1161 /**
1162  *	irq_wake_thread - wake the irq thread for the action identified by dev_id
1163  *	@irq:		Interrupt line
1164  *	@dev_id:	Device identity for which the thread should be woken
1165  *
1166  */
1167 void irq_wake_thread(unsigned int irq, void *dev_id)
1168 {
1169 	struct irq_desc *desc = irq_to_desc(irq);
1170 	struct irqaction *action;
1171 	unsigned long flags;
1172 
1173 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1174 		return;
1175 
1176 	raw_spin_lock_irqsave(&desc->lock, flags);
1177 	for_each_action_of_desc(desc, action) {
1178 		if (action->dev_id == dev_id) {
1179 			if (action->thread)
1180 				__irq_wake_thread(desc, action);
1181 			break;
1182 		}
1183 	}
1184 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1185 }
1186 EXPORT_SYMBOL_GPL(irq_wake_thread);
1187 
1188 static int irq_setup_forced_threading(struct irqaction *new)
1189 {
1190 	if (!force_irqthreads)
1191 		return 0;
1192 	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1193 		return 0;
1194 
1195 	/*
1196 	 * No further action required for interrupts which are requested as
1197 	 * threaded interrupts already
1198 	 */
1199 	if (new->handler == irq_default_primary_handler)
1200 		return 0;
1201 
1202 	new->flags |= IRQF_ONESHOT;
1203 
1204 	/*
1205 	 * Handle the case where we have a real primary handler and a
1206 	 * thread handler. We force thread them as well by creating a
1207 	 * secondary action.
1208 	 */
1209 	if (new->handler && new->thread_fn) {
1210 		/* Allocate the secondary action */
1211 		new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1212 		if (!new->secondary)
1213 			return -ENOMEM;
1214 		new->secondary->handler = irq_forced_secondary_handler;
1215 		new->secondary->thread_fn = new->thread_fn;
1216 		new->secondary->dev_id = new->dev_id;
1217 		new->secondary->irq = new->irq;
1218 		new->secondary->name = new->name;
1219 	}
1220 	/* Deal with the primary handler */
1221 	set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1222 	new->thread_fn = new->handler;
1223 	new->handler = irq_default_primary_handler;
1224 	return 0;
1225 }
1226 
1227 static int irq_request_resources(struct irq_desc *desc)
1228 {
1229 	struct irq_data *d = &desc->irq_data;
1230 	struct irq_chip *c = d->chip;
1231 
1232 	return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1233 }
1234 
1235 static void irq_release_resources(struct irq_desc *desc)
1236 {
1237 	struct irq_data *d = &desc->irq_data;
1238 	struct irq_chip *c = d->chip;
1239 
1240 	if (c->irq_release_resources)
1241 		c->irq_release_resources(d);
1242 }
1243 
1244 static bool irq_supports_nmi(struct irq_desc *desc)
1245 {
1246 	struct irq_data *d = irq_desc_get_irq_data(desc);
1247 
1248 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1249 	/* Only IRQs directly managed by the root irqchip can be set as NMI */
1250 	if (d->parent_data)
1251 		return false;
1252 #endif
1253 	/* Don't support NMIs for chips behind a slow bus */
1254 	if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1255 		return false;
1256 
1257 	return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1258 }
1259 
1260 static int irq_nmi_setup(struct irq_desc *desc)
1261 {
1262 	struct irq_data *d = irq_desc_get_irq_data(desc);
1263 	struct irq_chip *c = d->chip;
1264 
1265 	return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1266 }
1267 
1268 static void irq_nmi_teardown(struct irq_desc *desc)
1269 {
1270 	struct irq_data *d = irq_desc_get_irq_data(desc);
1271 	struct irq_chip *c = d->chip;
1272 
1273 	if (c->irq_nmi_teardown)
1274 		c->irq_nmi_teardown(d);
1275 }
1276 
1277 static int
1278 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1279 {
1280 	struct task_struct *t;
1281 	struct sched_param param = {
1282 		.sched_priority = MAX_USER_RT_PRIO/2,
1283 	};
1284 
1285 	if (!secondary) {
1286 		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1287 				   new->name);
1288 	} else {
1289 		t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1290 				   new->name);
1291 		param.sched_priority -= 1;
1292 	}
1293 
1294 	if (IS_ERR(t))
1295 		return PTR_ERR(t);
1296 
1297 	sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1298 
1299 	/*
1300 	 * We keep the reference to the task struct even if
1301 	 * the thread dies to avoid that the interrupt code
1302 	 * references an already freed task_struct.
1303 	 */
1304 	new->thread = get_task_struct(t);
1305 	/*
1306 	 * Tell the thread to set its affinity. This is
1307 	 * important for shared interrupt handlers as we do
1308 	 * not invoke setup_affinity() for the secondary
1309 	 * handlers as everything is already set up. Even for
1310 	 * interrupts marked with IRQF_NO_BALANCE this is
1311 	 * correct as we want the thread to move to the cpu(s)
1312 	 * on which the requesting code placed the interrupt.
1313 	 */
1314 	set_bit(IRQTF_AFFINITY, &new->thread_flags);
1315 	return 0;
1316 }
1317 
1318 /*
1319  * Internal function to register an irqaction - typically used to
1320  * allocate special interrupts that are part of the architecture.
1321  *
1322  * Locking rules:
1323  *
1324  * desc->request_mutex	Provides serialization against a concurrent free_irq()
1325  *   chip_bus_lock	Provides serialization for slow bus operations
1326  *     desc->lock	Provides serialization against hard interrupts
1327  *
1328  * chip_bus_lock and desc->lock are sufficient for all other management and
1329  * interrupt related functions. desc->request_mutex solely serializes
1330  * request/free_irq().
1331  */
1332 static int
1333 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1334 {
1335 	struct irqaction *old, **old_ptr;
1336 	unsigned long flags, thread_mask = 0;
1337 	int ret, nested, shared = 0;
1338 
1339 	if (!desc)
1340 		return -EINVAL;
1341 
1342 	if (desc->irq_data.chip == &no_irq_chip)
1343 		return -ENOSYS;
1344 	if (!try_module_get(desc->owner))
1345 		return -ENODEV;
1346 
1347 	new->irq = irq;
1348 
1349 	/*
1350 	 * If the trigger type is not specified by the caller,
1351 	 * then use the default for this interrupt.
1352 	 */
1353 	if (!(new->flags & IRQF_TRIGGER_MASK))
1354 		new->flags |= irqd_get_trigger_type(&desc->irq_data);
1355 
1356 	/*
1357 	 * Check whether the interrupt nests into another interrupt
1358 	 * thread.
1359 	 */
1360 	nested = irq_settings_is_nested_thread(desc);
1361 	if (nested) {
1362 		if (!new->thread_fn) {
1363 			ret = -EINVAL;
1364 			goto out_mput;
1365 		}
1366 		/*
1367 		 * Replace the primary handler which was provided from
1368 		 * the driver for non nested interrupt handling by the
1369 		 * dummy function which warns when called.
1370 		 */
1371 		new->handler = irq_nested_primary_handler;
1372 	} else {
1373 		if (irq_settings_can_thread(desc)) {
1374 			ret = irq_setup_forced_threading(new);
1375 			if (ret)
1376 				goto out_mput;
1377 		}
1378 	}
1379 
1380 	/*
1381 	 * Create a handler thread when a thread function is supplied
1382 	 * and the interrupt does not nest into another interrupt
1383 	 * thread.
1384 	 */
1385 	if (new->thread_fn && !nested) {
1386 		ret = setup_irq_thread(new, irq, false);
1387 		if (ret)
1388 			goto out_mput;
1389 		if (new->secondary) {
1390 			ret = setup_irq_thread(new->secondary, irq, true);
1391 			if (ret)
1392 				goto out_thread;
1393 		}
1394 	}
1395 
1396 	/*
1397 	 * Drivers are often written to work w/o knowledge about the
1398 	 * underlying irq chip implementation, so a request for a
1399 	 * threaded irq without a primary hard irq context handler
1400 	 * requires the ONESHOT flag to be set. Some irq chips like
1401 	 * MSI based interrupts are per se one shot safe. Check the
1402 	 * chip flags, so we can avoid the unmask dance at the end of
1403 	 * the threaded handler for those.
1404 	 */
1405 	if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1406 		new->flags &= ~IRQF_ONESHOT;
1407 
1408 	/*
1409 	 * Protects against a concurrent __free_irq() call which might wait
1410 	 * for synchronize_hardirq() to complete without holding the optional
1411 	 * chip bus lock and desc->lock. Also protects against handing out
1412 	 * a recycled oneshot thread_mask bit while it's still in use by
1413 	 * its previous owner.
1414 	 */
1415 	mutex_lock(&desc->request_mutex);
1416 
1417 	/*
1418 	 * Acquire bus lock as the irq_request_resources() callback below
1419 	 * might rely on the serialization or the magic power management
1420 	 * functions which are abusing the irq_bus_lock() callback,
1421 	 */
1422 	chip_bus_lock(desc);
1423 
1424 	/* First installed action requests resources. */
1425 	if (!desc->action) {
1426 		ret = irq_request_resources(desc);
1427 		if (ret) {
1428 			pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1429 			       new->name, irq, desc->irq_data.chip->name);
1430 			goto out_bus_unlock;
1431 		}
1432 	}
1433 
1434 	/*
1435 	 * The following block of code has to be executed atomically
1436 	 * protected against a concurrent interrupt and any of the other
1437 	 * management calls which are not serialized via
1438 	 * desc->request_mutex or the optional bus lock.
1439 	 */
1440 	raw_spin_lock_irqsave(&desc->lock, flags);
1441 	old_ptr = &desc->action;
1442 	old = *old_ptr;
1443 	if (old) {
1444 		/*
1445 		 * Can't share interrupts unless both agree to and are
1446 		 * the same type (level, edge, polarity). So both flag
1447 		 * fields must have IRQF_SHARED set and the bits which
1448 		 * set the trigger type must match. Also all must
1449 		 * agree on ONESHOT.
1450 		 * Interrupt lines used for NMIs cannot be shared.
1451 		 */
1452 		unsigned int oldtype;
1453 
1454 		if (desc->istate & IRQS_NMI) {
1455 			pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1456 				new->name, irq, desc->irq_data.chip->name);
1457 			ret = -EINVAL;
1458 			goto out_unlock;
1459 		}
1460 
1461 		/*
1462 		 * If nobody did set the configuration before, inherit
1463 		 * the one provided by the requester.
1464 		 */
1465 		if (irqd_trigger_type_was_set(&desc->irq_data)) {
1466 			oldtype = irqd_get_trigger_type(&desc->irq_data);
1467 		} else {
1468 			oldtype = new->flags & IRQF_TRIGGER_MASK;
1469 			irqd_set_trigger_type(&desc->irq_data, oldtype);
1470 		}
1471 
1472 		if (!((old->flags & new->flags) & IRQF_SHARED) ||
1473 		    (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1474 		    ((old->flags ^ new->flags) & IRQF_ONESHOT))
1475 			goto mismatch;
1476 
1477 		/* All handlers must agree on per-cpuness */
1478 		if ((old->flags & IRQF_PERCPU) !=
1479 		    (new->flags & IRQF_PERCPU))
1480 			goto mismatch;
1481 
1482 		/* add new interrupt at end of irq queue */
1483 		do {
1484 			/*
1485 			 * Or all existing action->thread_mask bits,
1486 			 * so we can find the next zero bit for this
1487 			 * new action.
1488 			 */
1489 			thread_mask |= old->thread_mask;
1490 			old_ptr = &old->next;
1491 			old = *old_ptr;
1492 		} while (old);
1493 		shared = 1;
1494 	}
1495 
1496 	/*
1497 	 * Setup the thread mask for this irqaction for ONESHOT. For
1498 	 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1499 	 * conditional in irq_wake_thread().
1500 	 */
1501 	if (new->flags & IRQF_ONESHOT) {
1502 		/*
1503 		 * Unlikely to have 32 resp 64 irqs sharing one line,
1504 		 * but who knows.
1505 		 */
1506 		if (thread_mask == ~0UL) {
1507 			ret = -EBUSY;
1508 			goto out_unlock;
1509 		}
1510 		/*
1511 		 * The thread_mask for the action is or'ed to
1512 		 * desc->thread_active to indicate that the
1513 		 * IRQF_ONESHOT thread handler has been woken, but not
1514 		 * yet finished. The bit is cleared when a thread
1515 		 * completes. When all threads of a shared interrupt
1516 		 * line have completed desc->threads_active becomes
1517 		 * zero and the interrupt line is unmasked. See
1518 		 * handle.c:irq_wake_thread() for further information.
1519 		 *
1520 		 * If no thread is woken by primary (hard irq context)
1521 		 * interrupt handlers, then desc->threads_active is
1522 		 * also checked for zero to unmask the irq line in the
1523 		 * affected hard irq flow handlers
1524 		 * (handle_[fasteoi|level]_irq).
1525 		 *
1526 		 * The new action gets the first zero bit of
1527 		 * thread_mask assigned. See the loop above which or's
1528 		 * all existing action->thread_mask bits.
1529 		 */
1530 		new->thread_mask = 1UL << ffz(thread_mask);
1531 
1532 	} else if (new->handler == irq_default_primary_handler &&
1533 		   !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1534 		/*
1535 		 * The interrupt was requested with handler = NULL, so
1536 		 * we use the default primary handler for it. But it
1537 		 * does not have the oneshot flag set. In combination
1538 		 * with level interrupts this is deadly, because the
1539 		 * default primary handler just wakes the thread, then
1540 		 * the irq lines is reenabled, but the device still
1541 		 * has the level irq asserted. Rinse and repeat....
1542 		 *
1543 		 * While this works for edge type interrupts, we play
1544 		 * it safe and reject unconditionally because we can't
1545 		 * say for sure which type this interrupt really
1546 		 * has. The type flags are unreliable as the
1547 		 * underlying chip implementation can override them.
1548 		 */
1549 		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1550 		       new->name, irq);
1551 		ret = -EINVAL;
1552 		goto out_unlock;
1553 	}
1554 
1555 	if (!shared) {
1556 		init_waitqueue_head(&desc->wait_for_threads);
1557 
1558 		/* Setup the type (level, edge polarity) if configured: */
1559 		if (new->flags & IRQF_TRIGGER_MASK) {
1560 			ret = __irq_set_trigger(desc,
1561 						new->flags & IRQF_TRIGGER_MASK);
1562 
1563 			if (ret)
1564 				goto out_unlock;
1565 		}
1566 
1567 		/*
1568 		 * Activate the interrupt. That activation must happen
1569 		 * independently of IRQ_NOAUTOEN. request_irq() can fail
1570 		 * and the callers are supposed to handle
1571 		 * that. enable_irq() of an interrupt requested with
1572 		 * IRQ_NOAUTOEN is not supposed to fail. The activation
1573 		 * keeps it in shutdown mode, it merily associates
1574 		 * resources if necessary and if that's not possible it
1575 		 * fails. Interrupts which are in managed shutdown mode
1576 		 * will simply ignore that activation request.
1577 		 */
1578 		ret = irq_activate(desc);
1579 		if (ret)
1580 			goto out_unlock;
1581 
1582 		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1583 				  IRQS_ONESHOT | IRQS_WAITING);
1584 		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1585 
1586 		if (new->flags & IRQF_PERCPU) {
1587 			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1588 			irq_settings_set_per_cpu(desc);
1589 		}
1590 
1591 		if (new->flags & IRQF_ONESHOT)
1592 			desc->istate |= IRQS_ONESHOT;
1593 
1594 		/* Exclude IRQ from balancing if requested */
1595 		if (new->flags & IRQF_NOBALANCING) {
1596 			irq_settings_set_no_balancing(desc);
1597 			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1598 		}
1599 
1600 		if (irq_settings_can_autoenable(desc)) {
1601 			irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1602 		} else {
1603 			/*
1604 			 * Shared interrupts do not go well with disabling
1605 			 * auto enable. The sharing interrupt might request
1606 			 * it while it's still disabled and then wait for
1607 			 * interrupts forever.
1608 			 */
1609 			WARN_ON_ONCE(new->flags & IRQF_SHARED);
1610 			/* Undo nested disables: */
1611 			desc->depth = 1;
1612 		}
1613 
1614 	} else if (new->flags & IRQF_TRIGGER_MASK) {
1615 		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1616 		unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1617 
1618 		if (nmsk != omsk)
1619 			/* hope the handler works with current  trigger mode */
1620 			pr_warn("irq %d uses trigger mode %u; requested %u\n",
1621 				irq, omsk, nmsk);
1622 	}
1623 
1624 	*old_ptr = new;
1625 
1626 	irq_pm_install_action(desc, new);
1627 
1628 	/* Reset broken irq detection when installing new handler */
1629 	desc->irq_count = 0;
1630 	desc->irqs_unhandled = 0;
1631 
1632 	/*
1633 	 * Check whether we disabled the irq via the spurious handler
1634 	 * before. Reenable it and give it another chance.
1635 	 */
1636 	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1637 		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1638 		__enable_irq(desc);
1639 	}
1640 
1641 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1642 	chip_bus_sync_unlock(desc);
1643 	mutex_unlock(&desc->request_mutex);
1644 
1645 	irq_setup_timings(desc, new);
1646 
1647 	/*
1648 	 * Strictly no need to wake it up, but hung_task complains
1649 	 * when no hard interrupt wakes the thread up.
1650 	 */
1651 	if (new->thread)
1652 		wake_up_process(new->thread);
1653 	if (new->secondary)
1654 		wake_up_process(new->secondary->thread);
1655 
1656 	register_irq_proc(irq, desc);
1657 	new->dir = NULL;
1658 	register_handler_proc(irq, new);
1659 	return 0;
1660 
1661 mismatch:
1662 	if (!(new->flags & IRQF_PROBE_SHARED)) {
1663 		pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1664 		       irq, new->flags, new->name, old->flags, old->name);
1665 #ifdef CONFIG_DEBUG_SHIRQ
1666 		dump_stack();
1667 #endif
1668 	}
1669 	ret = -EBUSY;
1670 
1671 out_unlock:
1672 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1673 
1674 	if (!desc->action)
1675 		irq_release_resources(desc);
1676 out_bus_unlock:
1677 	chip_bus_sync_unlock(desc);
1678 	mutex_unlock(&desc->request_mutex);
1679 
1680 out_thread:
1681 	if (new->thread) {
1682 		struct task_struct *t = new->thread;
1683 
1684 		new->thread = NULL;
1685 		kthread_stop(t);
1686 		put_task_struct(t);
1687 	}
1688 	if (new->secondary && new->secondary->thread) {
1689 		struct task_struct *t = new->secondary->thread;
1690 
1691 		new->secondary->thread = NULL;
1692 		kthread_stop(t);
1693 		put_task_struct(t);
1694 	}
1695 out_mput:
1696 	module_put(desc->owner);
1697 	return ret;
1698 }
1699 
1700 /**
1701  *	setup_irq - setup an interrupt
1702  *	@irq: Interrupt line to setup
1703  *	@act: irqaction for the interrupt
1704  *
1705  * Used to statically setup interrupts in the early boot process.
1706  */
1707 int setup_irq(unsigned int irq, struct irqaction *act)
1708 {
1709 	int retval;
1710 	struct irq_desc *desc = irq_to_desc(irq);
1711 
1712 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1713 		return -EINVAL;
1714 
1715 	retval = irq_chip_pm_get(&desc->irq_data);
1716 	if (retval < 0)
1717 		return retval;
1718 
1719 	retval = __setup_irq(irq, desc, act);
1720 
1721 	if (retval)
1722 		irq_chip_pm_put(&desc->irq_data);
1723 
1724 	return retval;
1725 }
1726 EXPORT_SYMBOL_GPL(setup_irq);
1727 
1728 /*
1729  * Internal function to unregister an irqaction - used to free
1730  * regular and special interrupts that are part of the architecture.
1731  */
1732 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1733 {
1734 	unsigned irq = desc->irq_data.irq;
1735 	struct irqaction *action, **action_ptr;
1736 	unsigned long flags;
1737 
1738 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1739 
1740 	mutex_lock(&desc->request_mutex);
1741 	chip_bus_lock(desc);
1742 	raw_spin_lock_irqsave(&desc->lock, flags);
1743 
1744 	/*
1745 	 * There can be multiple actions per IRQ descriptor, find the right
1746 	 * one based on the dev_id:
1747 	 */
1748 	action_ptr = &desc->action;
1749 	for (;;) {
1750 		action = *action_ptr;
1751 
1752 		if (!action) {
1753 			WARN(1, "Trying to free already-free IRQ %d\n", irq);
1754 			raw_spin_unlock_irqrestore(&desc->lock, flags);
1755 			chip_bus_sync_unlock(desc);
1756 			mutex_unlock(&desc->request_mutex);
1757 			return NULL;
1758 		}
1759 
1760 		if (action->dev_id == dev_id)
1761 			break;
1762 		action_ptr = &action->next;
1763 	}
1764 
1765 	/* Found it - now remove it from the list of entries: */
1766 	*action_ptr = action->next;
1767 
1768 	irq_pm_remove_action(desc, action);
1769 
1770 	/* If this was the last handler, shut down the IRQ line: */
1771 	if (!desc->action) {
1772 		irq_settings_clr_disable_unlazy(desc);
1773 		/* Only shutdown. Deactivate after synchronize_hardirq() */
1774 		irq_shutdown(desc);
1775 	}
1776 
1777 #ifdef CONFIG_SMP
1778 	/* make sure affinity_hint is cleaned up */
1779 	if (WARN_ON_ONCE(desc->affinity_hint))
1780 		desc->affinity_hint = NULL;
1781 #endif
1782 
1783 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1784 	/*
1785 	 * Drop bus_lock here so the changes which were done in the chip
1786 	 * callbacks above are synced out to the irq chips which hang
1787 	 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1788 	 *
1789 	 * Aside of that the bus_lock can also be taken from the threaded
1790 	 * handler in irq_finalize_oneshot() which results in a deadlock
1791 	 * because kthread_stop() would wait forever for the thread to
1792 	 * complete, which is blocked on the bus lock.
1793 	 *
1794 	 * The still held desc->request_mutex() protects against a
1795 	 * concurrent request_irq() of this irq so the release of resources
1796 	 * and timing data is properly serialized.
1797 	 */
1798 	chip_bus_sync_unlock(desc);
1799 
1800 	unregister_handler_proc(irq, action);
1801 
1802 	/*
1803 	 * Make sure it's not being used on another CPU and if the chip
1804 	 * supports it also make sure that there is no (not yet serviced)
1805 	 * interrupt in flight at the hardware level.
1806 	 */
1807 	__synchronize_hardirq(desc, true);
1808 
1809 #ifdef CONFIG_DEBUG_SHIRQ
1810 	/*
1811 	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1812 	 * event to happen even now it's being freed, so let's make sure that
1813 	 * is so by doing an extra call to the handler ....
1814 	 *
1815 	 * ( We do this after actually deregistering it, to make sure that a
1816 	 *   'real' IRQ doesn't run in parallel with our fake. )
1817 	 */
1818 	if (action->flags & IRQF_SHARED) {
1819 		local_irq_save(flags);
1820 		action->handler(irq, dev_id);
1821 		local_irq_restore(flags);
1822 	}
1823 #endif
1824 
1825 	/*
1826 	 * The action has already been removed above, but the thread writes
1827 	 * its oneshot mask bit when it completes. Though request_mutex is
1828 	 * held across this which prevents __setup_irq() from handing out
1829 	 * the same bit to a newly requested action.
1830 	 */
1831 	if (action->thread) {
1832 		kthread_stop(action->thread);
1833 		put_task_struct(action->thread);
1834 		if (action->secondary && action->secondary->thread) {
1835 			kthread_stop(action->secondary->thread);
1836 			put_task_struct(action->secondary->thread);
1837 		}
1838 	}
1839 
1840 	/* Last action releases resources */
1841 	if (!desc->action) {
1842 		/*
1843 		 * Reaquire bus lock as irq_release_resources() might
1844 		 * require it to deallocate resources over the slow bus.
1845 		 */
1846 		chip_bus_lock(desc);
1847 		/*
1848 		 * There is no interrupt on the fly anymore. Deactivate it
1849 		 * completely.
1850 		 */
1851 		raw_spin_lock_irqsave(&desc->lock, flags);
1852 		irq_domain_deactivate_irq(&desc->irq_data);
1853 		raw_spin_unlock_irqrestore(&desc->lock, flags);
1854 
1855 		irq_release_resources(desc);
1856 		chip_bus_sync_unlock(desc);
1857 		irq_remove_timings(desc);
1858 	}
1859 
1860 	mutex_unlock(&desc->request_mutex);
1861 
1862 	irq_chip_pm_put(&desc->irq_data);
1863 	module_put(desc->owner);
1864 	kfree(action->secondary);
1865 	return action;
1866 }
1867 
1868 /**
1869  *	remove_irq - free an interrupt
1870  *	@irq: Interrupt line to free
1871  *	@act: irqaction for the interrupt
1872  *
1873  * Used to remove interrupts statically setup by the early boot process.
1874  */
1875 void remove_irq(unsigned int irq, struct irqaction *act)
1876 {
1877 	struct irq_desc *desc = irq_to_desc(irq);
1878 
1879 	if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1880 		__free_irq(desc, act->dev_id);
1881 }
1882 EXPORT_SYMBOL_GPL(remove_irq);
1883 
1884 /**
1885  *	free_irq - free an interrupt allocated with request_irq
1886  *	@irq: Interrupt line to free
1887  *	@dev_id: Device identity to free
1888  *
1889  *	Remove an interrupt handler. The handler is removed and if the
1890  *	interrupt line is no longer in use by any driver it is disabled.
1891  *	On a shared IRQ the caller must ensure the interrupt is disabled
1892  *	on the card it drives before calling this function. The function
1893  *	does not return until any executing interrupts for this IRQ
1894  *	have completed.
1895  *
1896  *	This function must not be called from interrupt context.
1897  *
1898  *	Returns the devname argument passed to request_irq.
1899  */
1900 const void *free_irq(unsigned int irq, void *dev_id)
1901 {
1902 	struct irq_desc *desc = irq_to_desc(irq);
1903 	struct irqaction *action;
1904 	const char *devname;
1905 
1906 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1907 		return NULL;
1908 
1909 #ifdef CONFIG_SMP
1910 	if (WARN_ON(desc->affinity_notify))
1911 		desc->affinity_notify = NULL;
1912 #endif
1913 
1914 	action = __free_irq(desc, dev_id);
1915 
1916 	if (!action)
1917 		return NULL;
1918 
1919 	devname = action->name;
1920 	kfree(action);
1921 	return devname;
1922 }
1923 EXPORT_SYMBOL(free_irq);
1924 
1925 /* This function must be called with desc->lock held */
1926 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
1927 {
1928 	const char *devname = NULL;
1929 
1930 	desc->istate &= ~IRQS_NMI;
1931 
1932 	if (!WARN_ON(desc->action == NULL)) {
1933 		irq_pm_remove_action(desc, desc->action);
1934 		devname = desc->action->name;
1935 		unregister_handler_proc(irq, desc->action);
1936 
1937 		kfree(desc->action);
1938 		desc->action = NULL;
1939 	}
1940 
1941 	irq_settings_clr_disable_unlazy(desc);
1942 	irq_shutdown_and_deactivate(desc);
1943 
1944 	irq_release_resources(desc);
1945 
1946 	irq_chip_pm_put(&desc->irq_data);
1947 	module_put(desc->owner);
1948 
1949 	return devname;
1950 }
1951 
1952 const void *free_nmi(unsigned int irq, void *dev_id)
1953 {
1954 	struct irq_desc *desc = irq_to_desc(irq);
1955 	unsigned long flags;
1956 	const void *devname;
1957 
1958 	if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
1959 		return NULL;
1960 
1961 	if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1962 		return NULL;
1963 
1964 	/* NMI still enabled */
1965 	if (WARN_ON(desc->depth == 0))
1966 		disable_nmi_nosync(irq);
1967 
1968 	raw_spin_lock_irqsave(&desc->lock, flags);
1969 
1970 	irq_nmi_teardown(desc);
1971 	devname = __cleanup_nmi(irq, desc);
1972 
1973 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1974 
1975 	return devname;
1976 }
1977 
1978 /**
1979  *	request_threaded_irq - allocate an interrupt line
1980  *	@irq: Interrupt line to allocate
1981  *	@handler: Function to be called when the IRQ occurs.
1982  *		  Primary handler for threaded interrupts
1983  *		  If NULL and thread_fn != NULL the default
1984  *		  primary handler is installed
1985  *	@thread_fn: Function called from the irq handler thread
1986  *		    If NULL, no irq thread is created
1987  *	@irqflags: Interrupt type flags
1988  *	@devname: An ascii name for the claiming device
1989  *	@dev_id: A cookie passed back to the handler function
1990  *
1991  *	This call allocates interrupt resources and enables the
1992  *	interrupt line and IRQ handling. From the point this
1993  *	call is made your handler function may be invoked. Since
1994  *	your handler function must clear any interrupt the board
1995  *	raises, you must take care both to initialise your hardware
1996  *	and to set up the interrupt handler in the right order.
1997  *
1998  *	If you want to set up a threaded irq handler for your device
1999  *	then you need to supply @handler and @thread_fn. @handler is
2000  *	still called in hard interrupt context and has to check
2001  *	whether the interrupt originates from the device. If yes it
2002  *	needs to disable the interrupt on the device and return
2003  *	IRQ_WAKE_THREAD which will wake up the handler thread and run
2004  *	@thread_fn. This split handler design is necessary to support
2005  *	shared interrupts.
2006  *
2007  *	Dev_id must be globally unique. Normally the address of the
2008  *	device data structure is used as the cookie. Since the handler
2009  *	receives this value it makes sense to use it.
2010  *
2011  *	If your interrupt is shared you must pass a non NULL dev_id
2012  *	as this is required when freeing the interrupt.
2013  *
2014  *	Flags:
2015  *
2016  *	IRQF_SHARED		Interrupt is shared
2017  *	IRQF_TRIGGER_*		Specify active edge(s) or level
2018  *
2019  */
2020 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
2021 			 irq_handler_t thread_fn, unsigned long irqflags,
2022 			 const char *devname, void *dev_id)
2023 {
2024 	struct irqaction *action;
2025 	struct irq_desc *desc;
2026 	int retval;
2027 
2028 	if (irq == IRQ_NOTCONNECTED)
2029 		return -ENOTCONN;
2030 
2031 	/*
2032 	 * Sanity-check: shared interrupts must pass in a real dev-ID,
2033 	 * otherwise we'll have trouble later trying to figure out
2034 	 * which interrupt is which (messes up the interrupt freeing
2035 	 * logic etc).
2036 	 *
2037 	 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
2038 	 * it cannot be set along with IRQF_NO_SUSPEND.
2039 	 */
2040 	if (((irqflags & IRQF_SHARED) && !dev_id) ||
2041 	    (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
2042 	    ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
2043 		return -EINVAL;
2044 
2045 	desc = irq_to_desc(irq);
2046 	if (!desc)
2047 		return -EINVAL;
2048 
2049 	if (!irq_settings_can_request(desc) ||
2050 	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2051 		return -EINVAL;
2052 
2053 	if (!handler) {
2054 		if (!thread_fn)
2055 			return -EINVAL;
2056 		handler = irq_default_primary_handler;
2057 	}
2058 
2059 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2060 	if (!action)
2061 		return -ENOMEM;
2062 
2063 	action->handler = handler;
2064 	action->thread_fn = thread_fn;
2065 	action->flags = irqflags;
2066 	action->name = devname;
2067 	action->dev_id = dev_id;
2068 
2069 	retval = irq_chip_pm_get(&desc->irq_data);
2070 	if (retval < 0) {
2071 		kfree(action);
2072 		return retval;
2073 	}
2074 
2075 	retval = __setup_irq(irq, desc, action);
2076 
2077 	if (retval) {
2078 		irq_chip_pm_put(&desc->irq_data);
2079 		kfree(action->secondary);
2080 		kfree(action);
2081 	}
2082 
2083 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
2084 	if (!retval && (irqflags & IRQF_SHARED)) {
2085 		/*
2086 		 * It's a shared IRQ -- the driver ought to be prepared for it
2087 		 * to happen immediately, so let's make sure....
2088 		 * We disable the irq to make sure that a 'real' IRQ doesn't
2089 		 * run in parallel with our fake.
2090 		 */
2091 		unsigned long flags;
2092 
2093 		disable_irq(irq);
2094 		local_irq_save(flags);
2095 
2096 		handler(irq, dev_id);
2097 
2098 		local_irq_restore(flags);
2099 		enable_irq(irq);
2100 	}
2101 #endif
2102 	return retval;
2103 }
2104 EXPORT_SYMBOL(request_threaded_irq);
2105 
2106 /**
2107  *	request_any_context_irq - allocate an interrupt line
2108  *	@irq: Interrupt line to allocate
2109  *	@handler: Function to be called when the IRQ occurs.
2110  *		  Threaded handler for threaded interrupts.
2111  *	@flags: Interrupt type flags
2112  *	@name: An ascii name for the claiming device
2113  *	@dev_id: A cookie passed back to the handler function
2114  *
2115  *	This call allocates interrupt resources and enables the
2116  *	interrupt line and IRQ handling. It selects either a
2117  *	hardirq or threaded handling method depending on the
2118  *	context.
2119  *
2120  *	On failure, it returns a negative value. On success,
2121  *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2122  */
2123 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2124 			    unsigned long flags, const char *name, void *dev_id)
2125 {
2126 	struct irq_desc *desc;
2127 	int ret;
2128 
2129 	if (irq == IRQ_NOTCONNECTED)
2130 		return -ENOTCONN;
2131 
2132 	desc = irq_to_desc(irq);
2133 	if (!desc)
2134 		return -EINVAL;
2135 
2136 	if (irq_settings_is_nested_thread(desc)) {
2137 		ret = request_threaded_irq(irq, NULL, handler,
2138 					   flags, name, dev_id);
2139 		return !ret ? IRQC_IS_NESTED : ret;
2140 	}
2141 
2142 	ret = request_irq(irq, handler, flags, name, dev_id);
2143 	return !ret ? IRQC_IS_HARDIRQ : ret;
2144 }
2145 EXPORT_SYMBOL_GPL(request_any_context_irq);
2146 
2147 /**
2148  *	request_nmi - allocate an interrupt line for NMI delivery
2149  *	@irq: Interrupt line to allocate
2150  *	@handler: Function to be called when the IRQ occurs.
2151  *		  Threaded handler for threaded interrupts.
2152  *	@irqflags: Interrupt type flags
2153  *	@name: An ascii name for the claiming device
2154  *	@dev_id: A cookie passed back to the handler function
2155  *
2156  *	This call allocates interrupt resources and enables the
2157  *	interrupt line and IRQ handling. It sets up the IRQ line
2158  *	to be handled as an NMI.
2159  *
2160  *	An interrupt line delivering NMIs cannot be shared and IRQ handling
2161  *	cannot be threaded.
2162  *
2163  *	Interrupt lines requested for NMI delivering must produce per cpu
2164  *	interrupts and have auto enabling setting disabled.
2165  *
2166  *	Dev_id must be globally unique. Normally the address of the
2167  *	device data structure is used as the cookie. Since the handler
2168  *	receives this value it makes sense to use it.
2169  *
2170  *	If the interrupt line cannot be used to deliver NMIs, function
2171  *	will fail and return a negative value.
2172  */
2173 int request_nmi(unsigned int irq, irq_handler_t handler,
2174 		unsigned long irqflags, const char *name, void *dev_id)
2175 {
2176 	struct irqaction *action;
2177 	struct irq_desc *desc;
2178 	unsigned long flags;
2179 	int retval;
2180 
2181 	if (irq == IRQ_NOTCONNECTED)
2182 		return -ENOTCONN;
2183 
2184 	/* NMI cannot be shared, used for Polling */
2185 	if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2186 		return -EINVAL;
2187 
2188 	if (!(irqflags & IRQF_PERCPU))
2189 		return -EINVAL;
2190 
2191 	if (!handler)
2192 		return -EINVAL;
2193 
2194 	desc = irq_to_desc(irq);
2195 
2196 	if (!desc || irq_settings_can_autoenable(desc) ||
2197 	    !irq_settings_can_request(desc) ||
2198 	    WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2199 	    !irq_supports_nmi(desc))
2200 		return -EINVAL;
2201 
2202 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2203 	if (!action)
2204 		return -ENOMEM;
2205 
2206 	action->handler = handler;
2207 	action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2208 	action->name = name;
2209 	action->dev_id = dev_id;
2210 
2211 	retval = irq_chip_pm_get(&desc->irq_data);
2212 	if (retval < 0)
2213 		goto err_out;
2214 
2215 	retval = __setup_irq(irq, desc, action);
2216 	if (retval)
2217 		goto err_irq_setup;
2218 
2219 	raw_spin_lock_irqsave(&desc->lock, flags);
2220 
2221 	/* Setup NMI state */
2222 	desc->istate |= IRQS_NMI;
2223 	retval = irq_nmi_setup(desc);
2224 	if (retval) {
2225 		__cleanup_nmi(irq, desc);
2226 		raw_spin_unlock_irqrestore(&desc->lock, flags);
2227 		return -EINVAL;
2228 	}
2229 
2230 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2231 
2232 	return 0;
2233 
2234 err_irq_setup:
2235 	irq_chip_pm_put(&desc->irq_data);
2236 err_out:
2237 	kfree(action);
2238 
2239 	return retval;
2240 }
2241 
2242 void enable_percpu_irq(unsigned int irq, unsigned int type)
2243 {
2244 	unsigned int cpu = smp_processor_id();
2245 	unsigned long flags;
2246 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2247 
2248 	if (!desc)
2249 		return;
2250 
2251 	/*
2252 	 * If the trigger type is not specified by the caller, then
2253 	 * use the default for this interrupt.
2254 	 */
2255 	type &= IRQ_TYPE_SENSE_MASK;
2256 	if (type == IRQ_TYPE_NONE)
2257 		type = irqd_get_trigger_type(&desc->irq_data);
2258 
2259 	if (type != IRQ_TYPE_NONE) {
2260 		int ret;
2261 
2262 		ret = __irq_set_trigger(desc, type);
2263 
2264 		if (ret) {
2265 			WARN(1, "failed to set type for IRQ%d\n", irq);
2266 			goto out;
2267 		}
2268 	}
2269 
2270 	irq_percpu_enable(desc, cpu);
2271 out:
2272 	irq_put_desc_unlock(desc, flags);
2273 }
2274 EXPORT_SYMBOL_GPL(enable_percpu_irq);
2275 
2276 void enable_percpu_nmi(unsigned int irq, unsigned int type)
2277 {
2278 	enable_percpu_irq(irq, type);
2279 }
2280 
2281 /**
2282  * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2283  * @irq:	Linux irq number to check for
2284  *
2285  * Must be called from a non migratable context. Returns the enable
2286  * state of a per cpu interrupt on the current cpu.
2287  */
2288 bool irq_percpu_is_enabled(unsigned int irq)
2289 {
2290 	unsigned int cpu = smp_processor_id();
2291 	struct irq_desc *desc;
2292 	unsigned long flags;
2293 	bool is_enabled;
2294 
2295 	desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2296 	if (!desc)
2297 		return false;
2298 
2299 	is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2300 	irq_put_desc_unlock(desc, flags);
2301 
2302 	return is_enabled;
2303 }
2304 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2305 
2306 void disable_percpu_irq(unsigned int irq)
2307 {
2308 	unsigned int cpu = smp_processor_id();
2309 	unsigned long flags;
2310 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2311 
2312 	if (!desc)
2313 		return;
2314 
2315 	irq_percpu_disable(desc, cpu);
2316 	irq_put_desc_unlock(desc, flags);
2317 }
2318 EXPORT_SYMBOL_GPL(disable_percpu_irq);
2319 
2320 void disable_percpu_nmi(unsigned int irq)
2321 {
2322 	disable_percpu_irq(irq);
2323 }
2324 
2325 /*
2326  * Internal function to unregister a percpu irqaction.
2327  */
2328 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2329 {
2330 	struct irq_desc *desc = irq_to_desc(irq);
2331 	struct irqaction *action;
2332 	unsigned long flags;
2333 
2334 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2335 
2336 	if (!desc)
2337 		return NULL;
2338 
2339 	raw_spin_lock_irqsave(&desc->lock, flags);
2340 
2341 	action = desc->action;
2342 	if (!action || action->percpu_dev_id != dev_id) {
2343 		WARN(1, "Trying to free already-free IRQ %d\n", irq);
2344 		goto bad;
2345 	}
2346 
2347 	if (!cpumask_empty(desc->percpu_enabled)) {
2348 		WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2349 		     irq, cpumask_first(desc->percpu_enabled));
2350 		goto bad;
2351 	}
2352 
2353 	/* Found it - now remove it from the list of entries: */
2354 	desc->action = NULL;
2355 
2356 	desc->istate &= ~IRQS_NMI;
2357 
2358 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2359 
2360 	unregister_handler_proc(irq, action);
2361 
2362 	irq_chip_pm_put(&desc->irq_data);
2363 	module_put(desc->owner);
2364 	return action;
2365 
2366 bad:
2367 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2368 	return NULL;
2369 }
2370 
2371 /**
2372  *	remove_percpu_irq - free a per-cpu interrupt
2373  *	@irq: Interrupt line to free
2374  *	@act: irqaction for the interrupt
2375  *
2376  * Used to remove interrupts statically setup by the early boot process.
2377  */
2378 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2379 {
2380 	struct irq_desc *desc = irq_to_desc(irq);
2381 
2382 	if (desc && irq_settings_is_per_cpu_devid(desc))
2383 	    __free_percpu_irq(irq, act->percpu_dev_id);
2384 }
2385 
2386 /**
2387  *	free_percpu_irq - free an interrupt allocated with request_percpu_irq
2388  *	@irq: Interrupt line to free
2389  *	@dev_id: Device identity to free
2390  *
2391  *	Remove a percpu interrupt handler. The handler is removed, but
2392  *	the interrupt line is not disabled. This must be done on each
2393  *	CPU before calling this function. The function does not return
2394  *	until any executing interrupts for this IRQ have completed.
2395  *
2396  *	This function must not be called from interrupt context.
2397  */
2398 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2399 {
2400 	struct irq_desc *desc = irq_to_desc(irq);
2401 
2402 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
2403 		return;
2404 
2405 	chip_bus_lock(desc);
2406 	kfree(__free_percpu_irq(irq, dev_id));
2407 	chip_bus_sync_unlock(desc);
2408 }
2409 EXPORT_SYMBOL_GPL(free_percpu_irq);
2410 
2411 void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2412 {
2413 	struct irq_desc *desc = irq_to_desc(irq);
2414 
2415 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
2416 		return;
2417 
2418 	if (WARN_ON(!(desc->istate & IRQS_NMI)))
2419 		return;
2420 
2421 	kfree(__free_percpu_irq(irq, dev_id));
2422 }
2423 
2424 /**
2425  *	setup_percpu_irq - setup a per-cpu interrupt
2426  *	@irq: Interrupt line to setup
2427  *	@act: irqaction for the interrupt
2428  *
2429  * Used to statically setup per-cpu interrupts in the early boot process.
2430  */
2431 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2432 {
2433 	struct irq_desc *desc = irq_to_desc(irq);
2434 	int retval;
2435 
2436 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
2437 		return -EINVAL;
2438 
2439 	retval = irq_chip_pm_get(&desc->irq_data);
2440 	if (retval < 0)
2441 		return retval;
2442 
2443 	retval = __setup_irq(irq, desc, act);
2444 
2445 	if (retval)
2446 		irq_chip_pm_put(&desc->irq_data);
2447 
2448 	return retval;
2449 }
2450 
2451 /**
2452  *	__request_percpu_irq - allocate a percpu interrupt line
2453  *	@irq: Interrupt line to allocate
2454  *	@handler: Function to be called when the IRQ occurs.
2455  *	@flags: Interrupt type flags (IRQF_TIMER only)
2456  *	@devname: An ascii name for the claiming device
2457  *	@dev_id: A percpu cookie passed back to the handler function
2458  *
2459  *	This call allocates interrupt resources and enables the
2460  *	interrupt on the local CPU. If the interrupt is supposed to be
2461  *	enabled on other CPUs, it has to be done on each CPU using
2462  *	enable_percpu_irq().
2463  *
2464  *	Dev_id must be globally unique. It is a per-cpu variable, and
2465  *	the handler gets called with the interrupted CPU's instance of
2466  *	that variable.
2467  */
2468 int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2469 			 unsigned long flags, const char *devname,
2470 			 void __percpu *dev_id)
2471 {
2472 	struct irqaction *action;
2473 	struct irq_desc *desc;
2474 	int retval;
2475 
2476 	if (!dev_id)
2477 		return -EINVAL;
2478 
2479 	desc = irq_to_desc(irq);
2480 	if (!desc || !irq_settings_can_request(desc) ||
2481 	    !irq_settings_is_per_cpu_devid(desc))
2482 		return -EINVAL;
2483 
2484 	if (flags && flags != IRQF_TIMER)
2485 		return -EINVAL;
2486 
2487 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2488 	if (!action)
2489 		return -ENOMEM;
2490 
2491 	action->handler = handler;
2492 	action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2493 	action->name = devname;
2494 	action->percpu_dev_id = dev_id;
2495 
2496 	retval = irq_chip_pm_get(&desc->irq_data);
2497 	if (retval < 0) {
2498 		kfree(action);
2499 		return retval;
2500 	}
2501 
2502 	retval = __setup_irq(irq, desc, action);
2503 
2504 	if (retval) {
2505 		irq_chip_pm_put(&desc->irq_data);
2506 		kfree(action);
2507 	}
2508 
2509 	return retval;
2510 }
2511 EXPORT_SYMBOL_GPL(__request_percpu_irq);
2512 
2513 /**
2514  *	request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2515  *	@irq: Interrupt line to allocate
2516  *	@handler: Function to be called when the IRQ occurs.
2517  *	@name: An ascii name for the claiming device
2518  *	@dev_id: A percpu cookie passed back to the handler function
2519  *
2520  *	This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2521  *	have to be setup on each CPU by calling prepare_percpu_nmi() before
2522  *	being enabled on the same CPU by using enable_percpu_nmi().
2523  *
2524  *	Dev_id must be globally unique. It is a per-cpu variable, and
2525  *	the handler gets called with the interrupted CPU's instance of
2526  *	that variable.
2527  *
2528  *	Interrupt lines requested for NMI delivering should have auto enabling
2529  *	setting disabled.
2530  *
2531  *	If the interrupt line cannot be used to deliver NMIs, function
2532  *	will fail returning a negative value.
2533  */
2534 int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2535 		       const char *name, void __percpu *dev_id)
2536 {
2537 	struct irqaction *action;
2538 	struct irq_desc *desc;
2539 	unsigned long flags;
2540 	int retval;
2541 
2542 	if (!handler)
2543 		return -EINVAL;
2544 
2545 	desc = irq_to_desc(irq);
2546 
2547 	if (!desc || !irq_settings_can_request(desc) ||
2548 	    !irq_settings_is_per_cpu_devid(desc) ||
2549 	    irq_settings_can_autoenable(desc) ||
2550 	    !irq_supports_nmi(desc))
2551 		return -EINVAL;
2552 
2553 	/* The line cannot already be NMI */
2554 	if (desc->istate & IRQS_NMI)
2555 		return -EINVAL;
2556 
2557 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2558 	if (!action)
2559 		return -ENOMEM;
2560 
2561 	action->handler = handler;
2562 	action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2563 		| IRQF_NOBALANCING;
2564 	action->name = name;
2565 	action->percpu_dev_id = dev_id;
2566 
2567 	retval = irq_chip_pm_get(&desc->irq_data);
2568 	if (retval < 0)
2569 		goto err_out;
2570 
2571 	retval = __setup_irq(irq, desc, action);
2572 	if (retval)
2573 		goto err_irq_setup;
2574 
2575 	raw_spin_lock_irqsave(&desc->lock, flags);
2576 	desc->istate |= IRQS_NMI;
2577 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2578 
2579 	return 0;
2580 
2581 err_irq_setup:
2582 	irq_chip_pm_put(&desc->irq_data);
2583 err_out:
2584 	kfree(action);
2585 
2586 	return retval;
2587 }
2588 
2589 /**
2590  *	prepare_percpu_nmi - performs CPU local setup for NMI delivery
2591  *	@irq: Interrupt line to prepare for NMI delivery
2592  *
2593  *	This call prepares an interrupt line to deliver NMI on the current CPU,
2594  *	before that interrupt line gets enabled with enable_percpu_nmi().
2595  *
2596  *	As a CPU local operation, this should be called from non-preemptible
2597  *	context.
2598  *
2599  *	If the interrupt line cannot be used to deliver NMIs, function
2600  *	will fail returning a negative value.
2601  */
2602 int prepare_percpu_nmi(unsigned int irq)
2603 {
2604 	unsigned long flags;
2605 	struct irq_desc *desc;
2606 	int ret = 0;
2607 
2608 	WARN_ON(preemptible());
2609 
2610 	desc = irq_get_desc_lock(irq, &flags,
2611 				 IRQ_GET_DESC_CHECK_PERCPU);
2612 	if (!desc)
2613 		return -EINVAL;
2614 
2615 	if (WARN(!(desc->istate & IRQS_NMI),
2616 		 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2617 		 irq)) {
2618 		ret = -EINVAL;
2619 		goto out;
2620 	}
2621 
2622 	ret = irq_nmi_setup(desc);
2623 	if (ret) {
2624 		pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2625 		goto out;
2626 	}
2627 
2628 out:
2629 	irq_put_desc_unlock(desc, flags);
2630 	return ret;
2631 }
2632 
2633 /**
2634  *	teardown_percpu_nmi - undoes NMI setup of IRQ line
2635  *	@irq: Interrupt line from which CPU local NMI configuration should be
2636  *	      removed
2637  *
2638  *	This call undoes the setup done by prepare_percpu_nmi().
2639  *
2640  *	IRQ line should not be enabled for the current CPU.
2641  *
2642  *	As a CPU local operation, this should be called from non-preemptible
2643  *	context.
2644  */
2645 void teardown_percpu_nmi(unsigned int irq)
2646 {
2647 	unsigned long flags;
2648 	struct irq_desc *desc;
2649 
2650 	WARN_ON(preemptible());
2651 
2652 	desc = irq_get_desc_lock(irq, &flags,
2653 				 IRQ_GET_DESC_CHECK_PERCPU);
2654 	if (!desc)
2655 		return;
2656 
2657 	if (WARN_ON(!(desc->istate & IRQS_NMI)))
2658 		goto out;
2659 
2660 	irq_nmi_teardown(desc);
2661 out:
2662 	irq_put_desc_unlock(desc, flags);
2663 }
2664 
2665 int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2666 			    bool *state)
2667 {
2668 	struct irq_chip *chip;
2669 	int err = -EINVAL;
2670 
2671 	do {
2672 		chip = irq_data_get_irq_chip(data);
2673 		if (chip->irq_get_irqchip_state)
2674 			break;
2675 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2676 		data = data->parent_data;
2677 #else
2678 		data = NULL;
2679 #endif
2680 	} while (data);
2681 
2682 	if (data)
2683 		err = chip->irq_get_irqchip_state(data, which, state);
2684 	return err;
2685 }
2686 
2687 /**
2688  *	irq_get_irqchip_state - returns the irqchip state of a interrupt.
2689  *	@irq: Interrupt line that is forwarded to a VM
2690  *	@which: One of IRQCHIP_STATE_* the caller wants to know about
2691  *	@state: a pointer to a boolean where the state is to be storeed
2692  *
2693  *	This call snapshots the internal irqchip state of an
2694  *	interrupt, returning into @state the bit corresponding to
2695  *	stage @which
2696  *
2697  *	This function should be called with preemption disabled if the
2698  *	interrupt controller has per-cpu registers.
2699  */
2700 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2701 			  bool *state)
2702 {
2703 	struct irq_desc *desc;
2704 	struct irq_data *data;
2705 	unsigned long flags;
2706 	int err = -EINVAL;
2707 
2708 	desc = irq_get_desc_buslock(irq, &flags, 0);
2709 	if (!desc)
2710 		return err;
2711 
2712 	data = irq_desc_get_irq_data(desc);
2713 
2714 	err = __irq_get_irqchip_state(data, which, state);
2715 
2716 	irq_put_desc_busunlock(desc, flags);
2717 	return err;
2718 }
2719 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2720 
2721 /**
2722  *	irq_set_irqchip_state - set the state of a forwarded interrupt.
2723  *	@irq: Interrupt line that is forwarded to a VM
2724  *	@which: State to be restored (one of IRQCHIP_STATE_*)
2725  *	@val: Value corresponding to @which
2726  *
2727  *	This call sets the internal irqchip state of an interrupt,
2728  *	depending on the value of @which.
2729  *
2730  *	This function should be called with preemption disabled if the
2731  *	interrupt controller has per-cpu registers.
2732  */
2733 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2734 			  bool val)
2735 {
2736 	struct irq_desc *desc;
2737 	struct irq_data *data;
2738 	struct irq_chip *chip;
2739 	unsigned long flags;
2740 	int err = -EINVAL;
2741 
2742 	desc = irq_get_desc_buslock(irq, &flags, 0);
2743 	if (!desc)
2744 		return err;
2745 
2746 	data = irq_desc_get_irq_data(desc);
2747 
2748 	do {
2749 		chip = irq_data_get_irq_chip(data);
2750 		if (chip->irq_set_irqchip_state)
2751 			break;
2752 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2753 		data = data->parent_data;
2754 #else
2755 		data = NULL;
2756 #endif
2757 	} while (data);
2758 
2759 	if (data)
2760 		err = chip->irq_set_irqchip_state(data, which, val);
2761 
2762 	irq_put_desc_busunlock(desc, flags);
2763 	return err;
2764 }
2765 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2766