xref: /openbmc/linux/kernel/irq/manage.c (revision 174cd4b1)
1 /*
2  * linux/kernel/irq/manage.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006 Thomas Gleixner
6  *
7  * This file contains driver APIs to the irq subsystem.
8  */
9 
10 #define pr_fmt(fmt) "genirq: " fmt
11 
12 #include <linux/irq.h>
13 #include <linux/kthread.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <uapi/linux/sched/types.h>
21 #include <linux/task_work.h>
22 
23 #include "internals.h"
24 
25 #ifdef CONFIG_IRQ_FORCED_THREADING
26 __read_mostly bool force_irqthreads;
27 
28 static int __init setup_forced_irqthreads(char *arg)
29 {
30 	force_irqthreads = true;
31 	return 0;
32 }
33 early_param("threadirqs", setup_forced_irqthreads);
34 #endif
35 
36 static void __synchronize_hardirq(struct irq_desc *desc)
37 {
38 	bool inprogress;
39 
40 	do {
41 		unsigned long flags;
42 
43 		/*
44 		 * Wait until we're out of the critical section.  This might
45 		 * give the wrong answer due to the lack of memory barriers.
46 		 */
47 		while (irqd_irq_inprogress(&desc->irq_data))
48 			cpu_relax();
49 
50 		/* Ok, that indicated we're done: double-check carefully. */
51 		raw_spin_lock_irqsave(&desc->lock, flags);
52 		inprogress = irqd_irq_inprogress(&desc->irq_data);
53 		raw_spin_unlock_irqrestore(&desc->lock, flags);
54 
55 		/* Oops, that failed? */
56 	} while (inprogress);
57 }
58 
59 /**
60  *	synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
61  *	@irq: interrupt number to wait for
62  *
63  *	This function waits for any pending hard IRQ handlers for this
64  *	interrupt to complete before returning. If you use this
65  *	function while holding a resource the IRQ handler may need you
66  *	will deadlock. It does not take associated threaded handlers
67  *	into account.
68  *
69  *	Do not use this for shutdown scenarios where you must be sure
70  *	that all parts (hardirq and threaded handler) have completed.
71  *
72  *	Returns: false if a threaded handler is active.
73  *
74  *	This function may be called - with care - from IRQ context.
75  */
76 bool synchronize_hardirq(unsigned int irq)
77 {
78 	struct irq_desc *desc = irq_to_desc(irq);
79 
80 	if (desc) {
81 		__synchronize_hardirq(desc);
82 		return !atomic_read(&desc->threads_active);
83 	}
84 
85 	return true;
86 }
87 EXPORT_SYMBOL(synchronize_hardirq);
88 
89 /**
90  *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
91  *	@irq: interrupt number to wait for
92  *
93  *	This function waits for any pending IRQ handlers for this interrupt
94  *	to complete before returning. If you use this function while
95  *	holding a resource the IRQ handler may need you will deadlock.
96  *
97  *	This function may be called - with care - from IRQ context.
98  */
99 void synchronize_irq(unsigned int irq)
100 {
101 	struct irq_desc *desc = irq_to_desc(irq);
102 
103 	if (desc) {
104 		__synchronize_hardirq(desc);
105 		/*
106 		 * We made sure that no hardirq handler is
107 		 * running. Now verify that no threaded handlers are
108 		 * active.
109 		 */
110 		wait_event(desc->wait_for_threads,
111 			   !atomic_read(&desc->threads_active));
112 	}
113 }
114 EXPORT_SYMBOL(synchronize_irq);
115 
116 #ifdef CONFIG_SMP
117 cpumask_var_t irq_default_affinity;
118 
119 static bool __irq_can_set_affinity(struct irq_desc *desc)
120 {
121 	if (!desc || !irqd_can_balance(&desc->irq_data) ||
122 	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
123 		return false;
124 	return true;
125 }
126 
127 /**
128  *	irq_can_set_affinity - Check if the affinity of a given irq can be set
129  *	@irq:		Interrupt to check
130  *
131  */
132 int irq_can_set_affinity(unsigned int irq)
133 {
134 	return __irq_can_set_affinity(irq_to_desc(irq));
135 }
136 
137 /**
138  * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
139  * @irq:	Interrupt to check
140  *
141  * Like irq_can_set_affinity() above, but additionally checks for the
142  * AFFINITY_MANAGED flag.
143  */
144 bool irq_can_set_affinity_usr(unsigned int irq)
145 {
146 	struct irq_desc *desc = irq_to_desc(irq);
147 
148 	return __irq_can_set_affinity(desc) &&
149 		!irqd_affinity_is_managed(&desc->irq_data);
150 }
151 
152 /**
153  *	irq_set_thread_affinity - Notify irq threads to adjust affinity
154  *	@desc:		irq descriptor which has affitnity changed
155  *
156  *	We just set IRQTF_AFFINITY and delegate the affinity setting
157  *	to the interrupt thread itself. We can not call
158  *	set_cpus_allowed_ptr() here as we hold desc->lock and this
159  *	code can be called from hard interrupt context.
160  */
161 void irq_set_thread_affinity(struct irq_desc *desc)
162 {
163 	struct irqaction *action;
164 
165 	for_each_action_of_desc(desc, action)
166 		if (action->thread)
167 			set_bit(IRQTF_AFFINITY, &action->thread_flags);
168 }
169 
170 #ifdef CONFIG_GENERIC_PENDING_IRQ
171 static inline bool irq_can_move_pcntxt(struct irq_data *data)
172 {
173 	return irqd_can_move_in_process_context(data);
174 }
175 static inline bool irq_move_pending(struct irq_data *data)
176 {
177 	return irqd_is_setaffinity_pending(data);
178 }
179 static inline void
180 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
181 {
182 	cpumask_copy(desc->pending_mask, mask);
183 }
184 static inline void
185 irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
186 {
187 	cpumask_copy(mask, desc->pending_mask);
188 }
189 #else
190 static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
191 static inline bool irq_move_pending(struct irq_data *data) { return false; }
192 static inline void
193 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
194 static inline void
195 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
196 #endif
197 
198 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
199 			bool force)
200 {
201 	struct irq_desc *desc = irq_data_to_desc(data);
202 	struct irq_chip *chip = irq_data_get_irq_chip(data);
203 	int ret;
204 
205 	ret = chip->irq_set_affinity(data, mask, force);
206 	switch (ret) {
207 	case IRQ_SET_MASK_OK:
208 	case IRQ_SET_MASK_OK_DONE:
209 		cpumask_copy(desc->irq_common_data.affinity, mask);
210 	case IRQ_SET_MASK_OK_NOCOPY:
211 		irq_set_thread_affinity(desc);
212 		ret = 0;
213 	}
214 
215 	return ret;
216 }
217 
218 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
219 			    bool force)
220 {
221 	struct irq_chip *chip = irq_data_get_irq_chip(data);
222 	struct irq_desc *desc = irq_data_to_desc(data);
223 	int ret = 0;
224 
225 	if (!chip || !chip->irq_set_affinity)
226 		return -EINVAL;
227 
228 	if (irq_can_move_pcntxt(data)) {
229 		ret = irq_do_set_affinity(data, mask, force);
230 	} else {
231 		irqd_set_move_pending(data);
232 		irq_copy_pending(desc, mask);
233 	}
234 
235 	if (desc->affinity_notify) {
236 		kref_get(&desc->affinity_notify->kref);
237 		schedule_work(&desc->affinity_notify->work);
238 	}
239 	irqd_set(data, IRQD_AFFINITY_SET);
240 
241 	return ret;
242 }
243 
244 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
245 {
246 	struct irq_desc *desc = irq_to_desc(irq);
247 	unsigned long flags;
248 	int ret;
249 
250 	if (!desc)
251 		return -EINVAL;
252 
253 	raw_spin_lock_irqsave(&desc->lock, flags);
254 	ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
255 	raw_spin_unlock_irqrestore(&desc->lock, flags);
256 	return ret;
257 }
258 
259 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
260 {
261 	unsigned long flags;
262 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
263 
264 	if (!desc)
265 		return -EINVAL;
266 	desc->affinity_hint = m;
267 	irq_put_desc_unlock(desc, flags);
268 	/* set the initial affinity to prevent every interrupt being on CPU0 */
269 	if (m)
270 		__irq_set_affinity(irq, m, false);
271 	return 0;
272 }
273 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
274 
275 static void irq_affinity_notify(struct work_struct *work)
276 {
277 	struct irq_affinity_notify *notify =
278 		container_of(work, struct irq_affinity_notify, work);
279 	struct irq_desc *desc = irq_to_desc(notify->irq);
280 	cpumask_var_t cpumask;
281 	unsigned long flags;
282 
283 	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
284 		goto out;
285 
286 	raw_spin_lock_irqsave(&desc->lock, flags);
287 	if (irq_move_pending(&desc->irq_data))
288 		irq_get_pending(cpumask, desc);
289 	else
290 		cpumask_copy(cpumask, desc->irq_common_data.affinity);
291 	raw_spin_unlock_irqrestore(&desc->lock, flags);
292 
293 	notify->notify(notify, cpumask);
294 
295 	free_cpumask_var(cpumask);
296 out:
297 	kref_put(&notify->kref, notify->release);
298 }
299 
300 /**
301  *	irq_set_affinity_notifier - control notification of IRQ affinity changes
302  *	@irq:		Interrupt for which to enable/disable notification
303  *	@notify:	Context for notification, or %NULL to disable
304  *			notification.  Function pointers must be initialised;
305  *			the other fields will be initialised by this function.
306  *
307  *	Must be called in process context.  Notification may only be enabled
308  *	after the IRQ is allocated and must be disabled before the IRQ is
309  *	freed using free_irq().
310  */
311 int
312 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
313 {
314 	struct irq_desc *desc = irq_to_desc(irq);
315 	struct irq_affinity_notify *old_notify;
316 	unsigned long flags;
317 
318 	/* The release function is promised process context */
319 	might_sleep();
320 
321 	if (!desc)
322 		return -EINVAL;
323 
324 	/* Complete initialisation of *notify */
325 	if (notify) {
326 		notify->irq = irq;
327 		kref_init(&notify->kref);
328 		INIT_WORK(&notify->work, irq_affinity_notify);
329 	}
330 
331 	raw_spin_lock_irqsave(&desc->lock, flags);
332 	old_notify = desc->affinity_notify;
333 	desc->affinity_notify = notify;
334 	raw_spin_unlock_irqrestore(&desc->lock, flags);
335 
336 	if (old_notify)
337 		kref_put(&old_notify->kref, old_notify->release);
338 
339 	return 0;
340 }
341 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
342 
343 #ifndef CONFIG_AUTO_IRQ_AFFINITY
344 /*
345  * Generic version of the affinity autoselector.
346  */
347 static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
348 {
349 	struct cpumask *set = irq_default_affinity;
350 	int node = irq_desc_get_node(desc);
351 
352 	/* Excludes PER_CPU and NO_BALANCE interrupts */
353 	if (!__irq_can_set_affinity(desc))
354 		return 0;
355 
356 	/*
357 	 * Preserve the managed affinity setting and a userspace affinity
358 	 * setup, but make sure that one of the targets is online.
359 	 */
360 	if (irqd_affinity_is_managed(&desc->irq_data) ||
361 	    irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
362 		if (cpumask_intersects(desc->irq_common_data.affinity,
363 				       cpu_online_mask))
364 			set = desc->irq_common_data.affinity;
365 		else
366 			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
367 	}
368 
369 	cpumask_and(mask, cpu_online_mask, set);
370 	if (node != NUMA_NO_NODE) {
371 		const struct cpumask *nodemask = cpumask_of_node(node);
372 
373 		/* make sure at least one of the cpus in nodemask is online */
374 		if (cpumask_intersects(mask, nodemask))
375 			cpumask_and(mask, mask, nodemask);
376 	}
377 	irq_do_set_affinity(&desc->irq_data, mask, false);
378 	return 0;
379 }
380 #else
381 /* Wrapper for ALPHA specific affinity selector magic */
382 static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask)
383 {
384 	return irq_select_affinity(irq_desc_get_irq(d));
385 }
386 #endif
387 
388 /*
389  * Called when affinity is set via /proc/irq
390  */
391 int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
392 {
393 	struct irq_desc *desc = irq_to_desc(irq);
394 	unsigned long flags;
395 	int ret;
396 
397 	raw_spin_lock_irqsave(&desc->lock, flags);
398 	ret = setup_affinity(desc, mask);
399 	raw_spin_unlock_irqrestore(&desc->lock, flags);
400 	return ret;
401 }
402 
403 #else
404 static inline int
405 setup_affinity(struct irq_desc *desc, struct cpumask *mask)
406 {
407 	return 0;
408 }
409 #endif
410 
411 /**
412  *	irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
413  *	@irq: interrupt number to set affinity
414  *	@vcpu_info: vCPU specific data
415  *
416  *	This function uses the vCPU specific data to set the vCPU
417  *	affinity for an irq. The vCPU specific data is passed from
418  *	outside, such as KVM. One example code path is as below:
419  *	KVM -> IOMMU -> irq_set_vcpu_affinity().
420  */
421 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
422 {
423 	unsigned long flags;
424 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
425 	struct irq_data *data;
426 	struct irq_chip *chip;
427 	int ret = -ENOSYS;
428 
429 	if (!desc)
430 		return -EINVAL;
431 
432 	data = irq_desc_get_irq_data(desc);
433 	chip = irq_data_get_irq_chip(data);
434 	if (chip && chip->irq_set_vcpu_affinity)
435 		ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
436 	irq_put_desc_unlock(desc, flags);
437 
438 	return ret;
439 }
440 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
441 
442 void __disable_irq(struct irq_desc *desc)
443 {
444 	if (!desc->depth++)
445 		irq_disable(desc);
446 }
447 
448 static int __disable_irq_nosync(unsigned int irq)
449 {
450 	unsigned long flags;
451 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
452 
453 	if (!desc)
454 		return -EINVAL;
455 	__disable_irq(desc);
456 	irq_put_desc_busunlock(desc, flags);
457 	return 0;
458 }
459 
460 /**
461  *	disable_irq_nosync - disable an irq without waiting
462  *	@irq: Interrupt to disable
463  *
464  *	Disable the selected interrupt line.  Disables and Enables are
465  *	nested.
466  *	Unlike disable_irq(), this function does not ensure existing
467  *	instances of the IRQ handler have completed before returning.
468  *
469  *	This function may be called from IRQ context.
470  */
471 void disable_irq_nosync(unsigned int irq)
472 {
473 	__disable_irq_nosync(irq);
474 }
475 EXPORT_SYMBOL(disable_irq_nosync);
476 
477 /**
478  *	disable_irq - disable an irq and wait for completion
479  *	@irq: Interrupt to disable
480  *
481  *	Disable the selected interrupt line.  Enables and Disables are
482  *	nested.
483  *	This function waits for any pending IRQ handlers for this interrupt
484  *	to complete before returning. If you use this function while
485  *	holding a resource the IRQ handler may need you will deadlock.
486  *
487  *	This function may be called - with care - from IRQ context.
488  */
489 void disable_irq(unsigned int irq)
490 {
491 	if (!__disable_irq_nosync(irq))
492 		synchronize_irq(irq);
493 }
494 EXPORT_SYMBOL(disable_irq);
495 
496 /**
497  *	disable_hardirq - disables an irq and waits for hardirq completion
498  *	@irq: Interrupt to disable
499  *
500  *	Disable the selected interrupt line.  Enables and Disables are
501  *	nested.
502  *	This function waits for any pending hard IRQ handlers for this
503  *	interrupt to complete before returning. If you use this function while
504  *	holding a resource the hard IRQ handler may need you will deadlock.
505  *
506  *	When used to optimistically disable an interrupt from atomic context
507  *	the return value must be checked.
508  *
509  *	Returns: false if a threaded handler is active.
510  *
511  *	This function may be called - with care - from IRQ context.
512  */
513 bool disable_hardirq(unsigned int irq)
514 {
515 	if (!__disable_irq_nosync(irq))
516 		return synchronize_hardirq(irq);
517 
518 	return false;
519 }
520 EXPORT_SYMBOL_GPL(disable_hardirq);
521 
522 void __enable_irq(struct irq_desc *desc)
523 {
524 	switch (desc->depth) {
525 	case 0:
526  err_out:
527 		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
528 		     irq_desc_get_irq(desc));
529 		break;
530 	case 1: {
531 		if (desc->istate & IRQS_SUSPENDED)
532 			goto err_out;
533 		/* Prevent probing on this irq: */
534 		irq_settings_set_noprobe(desc);
535 		irq_enable(desc);
536 		check_irq_resend(desc);
537 		/* fall-through */
538 	}
539 	default:
540 		desc->depth--;
541 	}
542 }
543 
544 /**
545  *	enable_irq - enable handling of an irq
546  *	@irq: Interrupt to enable
547  *
548  *	Undoes the effect of one call to disable_irq().  If this
549  *	matches the last disable, processing of interrupts on this
550  *	IRQ line is re-enabled.
551  *
552  *	This function may be called from IRQ context only when
553  *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
554  */
555 void enable_irq(unsigned int irq)
556 {
557 	unsigned long flags;
558 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
559 
560 	if (!desc)
561 		return;
562 	if (WARN(!desc->irq_data.chip,
563 		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
564 		goto out;
565 
566 	__enable_irq(desc);
567 out:
568 	irq_put_desc_busunlock(desc, flags);
569 }
570 EXPORT_SYMBOL(enable_irq);
571 
572 static int set_irq_wake_real(unsigned int irq, unsigned int on)
573 {
574 	struct irq_desc *desc = irq_to_desc(irq);
575 	int ret = -ENXIO;
576 
577 	if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
578 		return 0;
579 
580 	if (desc->irq_data.chip->irq_set_wake)
581 		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
582 
583 	return ret;
584 }
585 
586 /**
587  *	irq_set_irq_wake - control irq power management wakeup
588  *	@irq:	interrupt to control
589  *	@on:	enable/disable power management wakeup
590  *
591  *	Enable/disable power management wakeup mode, which is
592  *	disabled by default.  Enables and disables must match,
593  *	just as they match for non-wakeup mode support.
594  *
595  *	Wakeup mode lets this IRQ wake the system from sleep
596  *	states like "suspend to RAM".
597  */
598 int irq_set_irq_wake(unsigned int irq, unsigned int on)
599 {
600 	unsigned long flags;
601 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
602 	int ret = 0;
603 
604 	if (!desc)
605 		return -EINVAL;
606 
607 	/* wakeup-capable irqs can be shared between drivers that
608 	 * don't need to have the same sleep mode behaviors.
609 	 */
610 	if (on) {
611 		if (desc->wake_depth++ == 0) {
612 			ret = set_irq_wake_real(irq, on);
613 			if (ret)
614 				desc->wake_depth = 0;
615 			else
616 				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
617 		}
618 	} else {
619 		if (desc->wake_depth == 0) {
620 			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
621 		} else if (--desc->wake_depth == 0) {
622 			ret = set_irq_wake_real(irq, on);
623 			if (ret)
624 				desc->wake_depth = 1;
625 			else
626 				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
627 		}
628 	}
629 	irq_put_desc_busunlock(desc, flags);
630 	return ret;
631 }
632 EXPORT_SYMBOL(irq_set_irq_wake);
633 
634 /*
635  * Internal function that tells the architecture code whether a
636  * particular irq has been exclusively allocated or is available
637  * for driver use.
638  */
639 int can_request_irq(unsigned int irq, unsigned long irqflags)
640 {
641 	unsigned long flags;
642 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
643 	int canrequest = 0;
644 
645 	if (!desc)
646 		return 0;
647 
648 	if (irq_settings_can_request(desc)) {
649 		if (!desc->action ||
650 		    irqflags & desc->action->flags & IRQF_SHARED)
651 			canrequest = 1;
652 	}
653 	irq_put_desc_unlock(desc, flags);
654 	return canrequest;
655 }
656 
657 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
658 {
659 	struct irq_chip *chip = desc->irq_data.chip;
660 	int ret, unmask = 0;
661 
662 	if (!chip || !chip->irq_set_type) {
663 		/*
664 		 * IRQF_TRIGGER_* but the PIC does not support multiple
665 		 * flow-types?
666 		 */
667 		pr_debug("No set_type function for IRQ %d (%s)\n",
668 			 irq_desc_get_irq(desc),
669 			 chip ? (chip->name ? : "unknown") : "unknown");
670 		return 0;
671 	}
672 
673 	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
674 		if (!irqd_irq_masked(&desc->irq_data))
675 			mask_irq(desc);
676 		if (!irqd_irq_disabled(&desc->irq_data))
677 			unmask = 1;
678 	}
679 
680 	/* Mask all flags except trigger mode */
681 	flags &= IRQ_TYPE_SENSE_MASK;
682 	ret = chip->irq_set_type(&desc->irq_data, flags);
683 
684 	switch (ret) {
685 	case IRQ_SET_MASK_OK:
686 	case IRQ_SET_MASK_OK_DONE:
687 		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
688 		irqd_set(&desc->irq_data, flags);
689 
690 	case IRQ_SET_MASK_OK_NOCOPY:
691 		flags = irqd_get_trigger_type(&desc->irq_data);
692 		irq_settings_set_trigger_mask(desc, flags);
693 		irqd_clear(&desc->irq_data, IRQD_LEVEL);
694 		irq_settings_clr_level(desc);
695 		if (flags & IRQ_TYPE_LEVEL_MASK) {
696 			irq_settings_set_level(desc);
697 			irqd_set(&desc->irq_data, IRQD_LEVEL);
698 		}
699 
700 		ret = 0;
701 		break;
702 	default:
703 		pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
704 		       flags, irq_desc_get_irq(desc), chip->irq_set_type);
705 	}
706 	if (unmask)
707 		unmask_irq(desc);
708 	return ret;
709 }
710 
711 #ifdef CONFIG_HARDIRQS_SW_RESEND
712 int irq_set_parent(int irq, int parent_irq)
713 {
714 	unsigned long flags;
715 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
716 
717 	if (!desc)
718 		return -EINVAL;
719 
720 	desc->parent_irq = parent_irq;
721 
722 	irq_put_desc_unlock(desc, flags);
723 	return 0;
724 }
725 EXPORT_SYMBOL_GPL(irq_set_parent);
726 #endif
727 
728 /*
729  * Default primary interrupt handler for threaded interrupts. Is
730  * assigned as primary handler when request_threaded_irq is called
731  * with handler == NULL. Useful for oneshot interrupts.
732  */
733 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
734 {
735 	return IRQ_WAKE_THREAD;
736 }
737 
738 /*
739  * Primary handler for nested threaded interrupts. Should never be
740  * called.
741  */
742 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
743 {
744 	WARN(1, "Primary handler called for nested irq %d\n", irq);
745 	return IRQ_NONE;
746 }
747 
748 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
749 {
750 	WARN(1, "Secondary action handler called for irq %d\n", irq);
751 	return IRQ_NONE;
752 }
753 
754 static int irq_wait_for_interrupt(struct irqaction *action)
755 {
756 	set_current_state(TASK_INTERRUPTIBLE);
757 
758 	while (!kthread_should_stop()) {
759 
760 		if (test_and_clear_bit(IRQTF_RUNTHREAD,
761 				       &action->thread_flags)) {
762 			__set_current_state(TASK_RUNNING);
763 			return 0;
764 		}
765 		schedule();
766 		set_current_state(TASK_INTERRUPTIBLE);
767 	}
768 	__set_current_state(TASK_RUNNING);
769 	return -1;
770 }
771 
772 /*
773  * Oneshot interrupts keep the irq line masked until the threaded
774  * handler finished. unmask if the interrupt has not been disabled and
775  * is marked MASKED.
776  */
777 static void irq_finalize_oneshot(struct irq_desc *desc,
778 				 struct irqaction *action)
779 {
780 	if (!(desc->istate & IRQS_ONESHOT) ||
781 	    action->handler == irq_forced_secondary_handler)
782 		return;
783 again:
784 	chip_bus_lock(desc);
785 	raw_spin_lock_irq(&desc->lock);
786 
787 	/*
788 	 * Implausible though it may be we need to protect us against
789 	 * the following scenario:
790 	 *
791 	 * The thread is faster done than the hard interrupt handler
792 	 * on the other CPU. If we unmask the irq line then the
793 	 * interrupt can come in again and masks the line, leaves due
794 	 * to IRQS_INPROGRESS and the irq line is masked forever.
795 	 *
796 	 * This also serializes the state of shared oneshot handlers
797 	 * versus "desc->threads_onehsot |= action->thread_mask;" in
798 	 * irq_wake_thread(). See the comment there which explains the
799 	 * serialization.
800 	 */
801 	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
802 		raw_spin_unlock_irq(&desc->lock);
803 		chip_bus_sync_unlock(desc);
804 		cpu_relax();
805 		goto again;
806 	}
807 
808 	/*
809 	 * Now check again, whether the thread should run. Otherwise
810 	 * we would clear the threads_oneshot bit of this thread which
811 	 * was just set.
812 	 */
813 	if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
814 		goto out_unlock;
815 
816 	desc->threads_oneshot &= ~action->thread_mask;
817 
818 	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
819 	    irqd_irq_masked(&desc->irq_data))
820 		unmask_threaded_irq(desc);
821 
822 out_unlock:
823 	raw_spin_unlock_irq(&desc->lock);
824 	chip_bus_sync_unlock(desc);
825 }
826 
827 #ifdef CONFIG_SMP
828 /*
829  * Check whether we need to change the affinity of the interrupt thread.
830  */
831 static void
832 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
833 {
834 	cpumask_var_t mask;
835 	bool valid = true;
836 
837 	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
838 		return;
839 
840 	/*
841 	 * In case we are out of memory we set IRQTF_AFFINITY again and
842 	 * try again next time
843 	 */
844 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
845 		set_bit(IRQTF_AFFINITY, &action->thread_flags);
846 		return;
847 	}
848 
849 	raw_spin_lock_irq(&desc->lock);
850 	/*
851 	 * This code is triggered unconditionally. Check the affinity
852 	 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
853 	 */
854 	if (desc->irq_common_data.affinity)
855 		cpumask_copy(mask, desc->irq_common_data.affinity);
856 	else
857 		valid = false;
858 	raw_spin_unlock_irq(&desc->lock);
859 
860 	if (valid)
861 		set_cpus_allowed_ptr(current, mask);
862 	free_cpumask_var(mask);
863 }
864 #else
865 static inline void
866 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
867 #endif
868 
869 /*
870  * Interrupts which are not explicitely requested as threaded
871  * interrupts rely on the implicit bh/preempt disable of the hard irq
872  * context. So we need to disable bh here to avoid deadlocks and other
873  * side effects.
874  */
875 static irqreturn_t
876 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
877 {
878 	irqreturn_t ret;
879 
880 	local_bh_disable();
881 	ret = action->thread_fn(action->irq, action->dev_id);
882 	irq_finalize_oneshot(desc, action);
883 	local_bh_enable();
884 	return ret;
885 }
886 
887 /*
888  * Interrupts explicitly requested as threaded interrupts want to be
889  * preemtible - many of them need to sleep and wait for slow busses to
890  * complete.
891  */
892 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
893 		struct irqaction *action)
894 {
895 	irqreturn_t ret;
896 
897 	ret = action->thread_fn(action->irq, action->dev_id);
898 	irq_finalize_oneshot(desc, action);
899 	return ret;
900 }
901 
902 static void wake_threads_waitq(struct irq_desc *desc)
903 {
904 	if (atomic_dec_and_test(&desc->threads_active))
905 		wake_up(&desc->wait_for_threads);
906 }
907 
908 static void irq_thread_dtor(struct callback_head *unused)
909 {
910 	struct task_struct *tsk = current;
911 	struct irq_desc *desc;
912 	struct irqaction *action;
913 
914 	if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
915 		return;
916 
917 	action = kthread_data(tsk);
918 
919 	pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
920 	       tsk->comm, tsk->pid, action->irq);
921 
922 
923 	desc = irq_to_desc(action->irq);
924 	/*
925 	 * If IRQTF_RUNTHREAD is set, we need to decrement
926 	 * desc->threads_active and wake possible waiters.
927 	 */
928 	if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
929 		wake_threads_waitq(desc);
930 
931 	/* Prevent a stale desc->threads_oneshot */
932 	irq_finalize_oneshot(desc, action);
933 }
934 
935 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
936 {
937 	struct irqaction *secondary = action->secondary;
938 
939 	if (WARN_ON_ONCE(!secondary))
940 		return;
941 
942 	raw_spin_lock_irq(&desc->lock);
943 	__irq_wake_thread(desc, secondary);
944 	raw_spin_unlock_irq(&desc->lock);
945 }
946 
947 /*
948  * Interrupt handler thread
949  */
950 static int irq_thread(void *data)
951 {
952 	struct callback_head on_exit_work;
953 	struct irqaction *action = data;
954 	struct irq_desc *desc = irq_to_desc(action->irq);
955 	irqreturn_t (*handler_fn)(struct irq_desc *desc,
956 			struct irqaction *action);
957 
958 	if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
959 					&action->thread_flags))
960 		handler_fn = irq_forced_thread_fn;
961 	else
962 		handler_fn = irq_thread_fn;
963 
964 	init_task_work(&on_exit_work, irq_thread_dtor);
965 	task_work_add(current, &on_exit_work, false);
966 
967 	irq_thread_check_affinity(desc, action);
968 
969 	while (!irq_wait_for_interrupt(action)) {
970 		irqreturn_t action_ret;
971 
972 		irq_thread_check_affinity(desc, action);
973 
974 		action_ret = handler_fn(desc, action);
975 		if (action_ret == IRQ_HANDLED)
976 			atomic_inc(&desc->threads_handled);
977 		if (action_ret == IRQ_WAKE_THREAD)
978 			irq_wake_secondary(desc, action);
979 
980 		wake_threads_waitq(desc);
981 	}
982 
983 	/*
984 	 * This is the regular exit path. __free_irq() is stopping the
985 	 * thread via kthread_stop() after calling
986 	 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
987 	 * oneshot mask bit can be set. We cannot verify that as we
988 	 * cannot touch the oneshot mask at this point anymore as
989 	 * __setup_irq() might have given out currents thread_mask
990 	 * again.
991 	 */
992 	task_work_cancel(current, irq_thread_dtor);
993 	return 0;
994 }
995 
996 /**
997  *	irq_wake_thread - wake the irq thread for the action identified by dev_id
998  *	@irq:		Interrupt line
999  *	@dev_id:	Device identity for which the thread should be woken
1000  *
1001  */
1002 void irq_wake_thread(unsigned int irq, void *dev_id)
1003 {
1004 	struct irq_desc *desc = irq_to_desc(irq);
1005 	struct irqaction *action;
1006 	unsigned long flags;
1007 
1008 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1009 		return;
1010 
1011 	raw_spin_lock_irqsave(&desc->lock, flags);
1012 	for_each_action_of_desc(desc, action) {
1013 		if (action->dev_id == dev_id) {
1014 			if (action->thread)
1015 				__irq_wake_thread(desc, action);
1016 			break;
1017 		}
1018 	}
1019 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1020 }
1021 EXPORT_SYMBOL_GPL(irq_wake_thread);
1022 
1023 static int irq_setup_forced_threading(struct irqaction *new)
1024 {
1025 	if (!force_irqthreads)
1026 		return 0;
1027 	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1028 		return 0;
1029 
1030 	new->flags |= IRQF_ONESHOT;
1031 
1032 	/*
1033 	 * Handle the case where we have a real primary handler and a
1034 	 * thread handler. We force thread them as well by creating a
1035 	 * secondary action.
1036 	 */
1037 	if (new->handler != irq_default_primary_handler && new->thread_fn) {
1038 		/* Allocate the secondary action */
1039 		new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1040 		if (!new->secondary)
1041 			return -ENOMEM;
1042 		new->secondary->handler = irq_forced_secondary_handler;
1043 		new->secondary->thread_fn = new->thread_fn;
1044 		new->secondary->dev_id = new->dev_id;
1045 		new->secondary->irq = new->irq;
1046 		new->secondary->name = new->name;
1047 	}
1048 	/* Deal with the primary handler */
1049 	set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1050 	new->thread_fn = new->handler;
1051 	new->handler = irq_default_primary_handler;
1052 	return 0;
1053 }
1054 
1055 static int irq_request_resources(struct irq_desc *desc)
1056 {
1057 	struct irq_data *d = &desc->irq_data;
1058 	struct irq_chip *c = d->chip;
1059 
1060 	return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1061 }
1062 
1063 static void irq_release_resources(struct irq_desc *desc)
1064 {
1065 	struct irq_data *d = &desc->irq_data;
1066 	struct irq_chip *c = d->chip;
1067 
1068 	if (c->irq_release_resources)
1069 		c->irq_release_resources(d);
1070 }
1071 
1072 static int
1073 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1074 {
1075 	struct task_struct *t;
1076 	struct sched_param param = {
1077 		.sched_priority = MAX_USER_RT_PRIO/2,
1078 	};
1079 
1080 	if (!secondary) {
1081 		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1082 				   new->name);
1083 	} else {
1084 		t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1085 				   new->name);
1086 		param.sched_priority -= 1;
1087 	}
1088 
1089 	if (IS_ERR(t))
1090 		return PTR_ERR(t);
1091 
1092 	sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1093 
1094 	/*
1095 	 * We keep the reference to the task struct even if
1096 	 * the thread dies to avoid that the interrupt code
1097 	 * references an already freed task_struct.
1098 	 */
1099 	get_task_struct(t);
1100 	new->thread = t;
1101 	/*
1102 	 * Tell the thread to set its affinity. This is
1103 	 * important for shared interrupt handlers as we do
1104 	 * not invoke setup_affinity() for the secondary
1105 	 * handlers as everything is already set up. Even for
1106 	 * interrupts marked with IRQF_NO_BALANCE this is
1107 	 * correct as we want the thread to move to the cpu(s)
1108 	 * on which the requesting code placed the interrupt.
1109 	 */
1110 	set_bit(IRQTF_AFFINITY, &new->thread_flags);
1111 	return 0;
1112 }
1113 
1114 /*
1115  * Internal function to register an irqaction - typically used to
1116  * allocate special interrupts that are part of the architecture.
1117  */
1118 static int
1119 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1120 {
1121 	struct irqaction *old, **old_ptr;
1122 	unsigned long flags, thread_mask = 0;
1123 	int ret, nested, shared = 0;
1124 	cpumask_var_t mask;
1125 
1126 	if (!desc)
1127 		return -EINVAL;
1128 
1129 	if (desc->irq_data.chip == &no_irq_chip)
1130 		return -ENOSYS;
1131 	if (!try_module_get(desc->owner))
1132 		return -ENODEV;
1133 
1134 	new->irq = irq;
1135 
1136 	/*
1137 	 * If the trigger type is not specified by the caller,
1138 	 * then use the default for this interrupt.
1139 	 */
1140 	if (!(new->flags & IRQF_TRIGGER_MASK))
1141 		new->flags |= irqd_get_trigger_type(&desc->irq_data);
1142 
1143 	/*
1144 	 * Check whether the interrupt nests into another interrupt
1145 	 * thread.
1146 	 */
1147 	nested = irq_settings_is_nested_thread(desc);
1148 	if (nested) {
1149 		if (!new->thread_fn) {
1150 			ret = -EINVAL;
1151 			goto out_mput;
1152 		}
1153 		/*
1154 		 * Replace the primary handler which was provided from
1155 		 * the driver for non nested interrupt handling by the
1156 		 * dummy function which warns when called.
1157 		 */
1158 		new->handler = irq_nested_primary_handler;
1159 	} else {
1160 		if (irq_settings_can_thread(desc)) {
1161 			ret = irq_setup_forced_threading(new);
1162 			if (ret)
1163 				goto out_mput;
1164 		}
1165 	}
1166 
1167 	/*
1168 	 * Create a handler thread when a thread function is supplied
1169 	 * and the interrupt does not nest into another interrupt
1170 	 * thread.
1171 	 */
1172 	if (new->thread_fn && !nested) {
1173 		ret = setup_irq_thread(new, irq, false);
1174 		if (ret)
1175 			goto out_mput;
1176 		if (new->secondary) {
1177 			ret = setup_irq_thread(new->secondary, irq, true);
1178 			if (ret)
1179 				goto out_thread;
1180 		}
1181 	}
1182 
1183 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1184 		ret = -ENOMEM;
1185 		goto out_thread;
1186 	}
1187 
1188 	/*
1189 	 * Drivers are often written to work w/o knowledge about the
1190 	 * underlying irq chip implementation, so a request for a
1191 	 * threaded irq without a primary hard irq context handler
1192 	 * requires the ONESHOT flag to be set. Some irq chips like
1193 	 * MSI based interrupts are per se one shot safe. Check the
1194 	 * chip flags, so we can avoid the unmask dance at the end of
1195 	 * the threaded handler for those.
1196 	 */
1197 	if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1198 		new->flags &= ~IRQF_ONESHOT;
1199 
1200 	/*
1201 	 * The following block of code has to be executed atomically
1202 	 */
1203 	raw_spin_lock_irqsave(&desc->lock, flags);
1204 	old_ptr = &desc->action;
1205 	old = *old_ptr;
1206 	if (old) {
1207 		/*
1208 		 * Can't share interrupts unless both agree to and are
1209 		 * the same type (level, edge, polarity). So both flag
1210 		 * fields must have IRQF_SHARED set and the bits which
1211 		 * set the trigger type must match. Also all must
1212 		 * agree on ONESHOT.
1213 		 */
1214 		if (!((old->flags & new->flags) & IRQF_SHARED) ||
1215 		    ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1216 		    ((old->flags ^ new->flags) & IRQF_ONESHOT))
1217 			goto mismatch;
1218 
1219 		/* All handlers must agree on per-cpuness */
1220 		if ((old->flags & IRQF_PERCPU) !=
1221 		    (new->flags & IRQF_PERCPU))
1222 			goto mismatch;
1223 
1224 		/* add new interrupt at end of irq queue */
1225 		do {
1226 			/*
1227 			 * Or all existing action->thread_mask bits,
1228 			 * so we can find the next zero bit for this
1229 			 * new action.
1230 			 */
1231 			thread_mask |= old->thread_mask;
1232 			old_ptr = &old->next;
1233 			old = *old_ptr;
1234 		} while (old);
1235 		shared = 1;
1236 	}
1237 
1238 	/*
1239 	 * Setup the thread mask for this irqaction for ONESHOT. For
1240 	 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1241 	 * conditional in irq_wake_thread().
1242 	 */
1243 	if (new->flags & IRQF_ONESHOT) {
1244 		/*
1245 		 * Unlikely to have 32 resp 64 irqs sharing one line,
1246 		 * but who knows.
1247 		 */
1248 		if (thread_mask == ~0UL) {
1249 			ret = -EBUSY;
1250 			goto out_mask;
1251 		}
1252 		/*
1253 		 * The thread_mask for the action is or'ed to
1254 		 * desc->thread_active to indicate that the
1255 		 * IRQF_ONESHOT thread handler has been woken, but not
1256 		 * yet finished. The bit is cleared when a thread
1257 		 * completes. When all threads of a shared interrupt
1258 		 * line have completed desc->threads_active becomes
1259 		 * zero and the interrupt line is unmasked. See
1260 		 * handle.c:irq_wake_thread() for further information.
1261 		 *
1262 		 * If no thread is woken by primary (hard irq context)
1263 		 * interrupt handlers, then desc->threads_active is
1264 		 * also checked for zero to unmask the irq line in the
1265 		 * affected hard irq flow handlers
1266 		 * (handle_[fasteoi|level]_irq).
1267 		 *
1268 		 * The new action gets the first zero bit of
1269 		 * thread_mask assigned. See the loop above which or's
1270 		 * all existing action->thread_mask bits.
1271 		 */
1272 		new->thread_mask = 1 << ffz(thread_mask);
1273 
1274 	} else if (new->handler == irq_default_primary_handler &&
1275 		   !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1276 		/*
1277 		 * The interrupt was requested with handler = NULL, so
1278 		 * we use the default primary handler for it. But it
1279 		 * does not have the oneshot flag set. In combination
1280 		 * with level interrupts this is deadly, because the
1281 		 * default primary handler just wakes the thread, then
1282 		 * the irq lines is reenabled, but the device still
1283 		 * has the level irq asserted. Rinse and repeat....
1284 		 *
1285 		 * While this works for edge type interrupts, we play
1286 		 * it safe and reject unconditionally because we can't
1287 		 * say for sure which type this interrupt really
1288 		 * has. The type flags are unreliable as the
1289 		 * underlying chip implementation can override them.
1290 		 */
1291 		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1292 		       irq);
1293 		ret = -EINVAL;
1294 		goto out_mask;
1295 	}
1296 
1297 	if (!shared) {
1298 		ret = irq_request_resources(desc);
1299 		if (ret) {
1300 			pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1301 			       new->name, irq, desc->irq_data.chip->name);
1302 			goto out_mask;
1303 		}
1304 
1305 		init_waitqueue_head(&desc->wait_for_threads);
1306 
1307 		/* Setup the type (level, edge polarity) if configured: */
1308 		if (new->flags & IRQF_TRIGGER_MASK) {
1309 			ret = __irq_set_trigger(desc,
1310 						new->flags & IRQF_TRIGGER_MASK);
1311 
1312 			if (ret)
1313 				goto out_mask;
1314 		}
1315 
1316 		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1317 				  IRQS_ONESHOT | IRQS_WAITING);
1318 		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1319 
1320 		if (new->flags & IRQF_PERCPU) {
1321 			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1322 			irq_settings_set_per_cpu(desc);
1323 		}
1324 
1325 		if (new->flags & IRQF_ONESHOT)
1326 			desc->istate |= IRQS_ONESHOT;
1327 
1328 		if (irq_settings_can_autoenable(desc))
1329 			irq_startup(desc, true);
1330 		else
1331 			/* Undo nested disables: */
1332 			desc->depth = 1;
1333 
1334 		/* Exclude IRQ from balancing if requested */
1335 		if (new->flags & IRQF_NOBALANCING) {
1336 			irq_settings_set_no_balancing(desc);
1337 			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1338 		}
1339 
1340 		/* Set default affinity mask once everything is setup */
1341 		setup_affinity(desc, mask);
1342 
1343 	} else if (new->flags & IRQF_TRIGGER_MASK) {
1344 		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1345 		unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1346 
1347 		if (nmsk != omsk)
1348 			/* hope the handler works with current  trigger mode */
1349 			pr_warn("irq %d uses trigger mode %u; requested %u\n",
1350 				irq, omsk, nmsk);
1351 	}
1352 
1353 	*old_ptr = new;
1354 
1355 	irq_pm_install_action(desc, new);
1356 
1357 	/* Reset broken irq detection when installing new handler */
1358 	desc->irq_count = 0;
1359 	desc->irqs_unhandled = 0;
1360 
1361 	/*
1362 	 * Check whether we disabled the irq via the spurious handler
1363 	 * before. Reenable it and give it another chance.
1364 	 */
1365 	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1366 		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1367 		__enable_irq(desc);
1368 	}
1369 
1370 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1371 
1372 	/*
1373 	 * Strictly no need to wake it up, but hung_task complains
1374 	 * when no hard interrupt wakes the thread up.
1375 	 */
1376 	if (new->thread)
1377 		wake_up_process(new->thread);
1378 	if (new->secondary)
1379 		wake_up_process(new->secondary->thread);
1380 
1381 	register_irq_proc(irq, desc);
1382 	new->dir = NULL;
1383 	register_handler_proc(irq, new);
1384 	free_cpumask_var(mask);
1385 
1386 	return 0;
1387 
1388 mismatch:
1389 	if (!(new->flags & IRQF_PROBE_SHARED)) {
1390 		pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1391 		       irq, new->flags, new->name, old->flags, old->name);
1392 #ifdef CONFIG_DEBUG_SHIRQ
1393 		dump_stack();
1394 #endif
1395 	}
1396 	ret = -EBUSY;
1397 
1398 out_mask:
1399 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1400 	free_cpumask_var(mask);
1401 
1402 out_thread:
1403 	if (new->thread) {
1404 		struct task_struct *t = new->thread;
1405 
1406 		new->thread = NULL;
1407 		kthread_stop(t);
1408 		put_task_struct(t);
1409 	}
1410 	if (new->secondary && new->secondary->thread) {
1411 		struct task_struct *t = new->secondary->thread;
1412 
1413 		new->secondary->thread = NULL;
1414 		kthread_stop(t);
1415 		put_task_struct(t);
1416 	}
1417 out_mput:
1418 	module_put(desc->owner);
1419 	return ret;
1420 }
1421 
1422 /**
1423  *	setup_irq - setup an interrupt
1424  *	@irq: Interrupt line to setup
1425  *	@act: irqaction for the interrupt
1426  *
1427  * Used to statically setup interrupts in the early boot process.
1428  */
1429 int setup_irq(unsigned int irq, struct irqaction *act)
1430 {
1431 	int retval;
1432 	struct irq_desc *desc = irq_to_desc(irq);
1433 
1434 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1435 		return -EINVAL;
1436 
1437 	retval = irq_chip_pm_get(&desc->irq_data);
1438 	if (retval < 0)
1439 		return retval;
1440 
1441 	chip_bus_lock(desc);
1442 	retval = __setup_irq(irq, desc, act);
1443 	chip_bus_sync_unlock(desc);
1444 
1445 	if (retval)
1446 		irq_chip_pm_put(&desc->irq_data);
1447 
1448 	return retval;
1449 }
1450 EXPORT_SYMBOL_GPL(setup_irq);
1451 
1452 /*
1453  * Internal function to unregister an irqaction - used to free
1454  * regular and special interrupts that are part of the architecture.
1455  */
1456 static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1457 {
1458 	struct irq_desc *desc = irq_to_desc(irq);
1459 	struct irqaction *action, **action_ptr;
1460 	unsigned long flags;
1461 
1462 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1463 
1464 	if (!desc)
1465 		return NULL;
1466 
1467 	chip_bus_lock(desc);
1468 	raw_spin_lock_irqsave(&desc->lock, flags);
1469 
1470 	/*
1471 	 * There can be multiple actions per IRQ descriptor, find the right
1472 	 * one based on the dev_id:
1473 	 */
1474 	action_ptr = &desc->action;
1475 	for (;;) {
1476 		action = *action_ptr;
1477 
1478 		if (!action) {
1479 			WARN(1, "Trying to free already-free IRQ %d\n", irq);
1480 			raw_spin_unlock_irqrestore(&desc->lock, flags);
1481 			chip_bus_sync_unlock(desc);
1482 			return NULL;
1483 		}
1484 
1485 		if (action->dev_id == dev_id)
1486 			break;
1487 		action_ptr = &action->next;
1488 	}
1489 
1490 	/* Found it - now remove it from the list of entries: */
1491 	*action_ptr = action->next;
1492 
1493 	irq_pm_remove_action(desc, action);
1494 
1495 	/* If this was the last handler, shut down the IRQ line: */
1496 	if (!desc->action) {
1497 		irq_settings_clr_disable_unlazy(desc);
1498 		irq_shutdown(desc);
1499 		irq_release_resources(desc);
1500 	}
1501 
1502 #ifdef CONFIG_SMP
1503 	/* make sure affinity_hint is cleaned up */
1504 	if (WARN_ON_ONCE(desc->affinity_hint))
1505 		desc->affinity_hint = NULL;
1506 #endif
1507 
1508 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1509 	chip_bus_sync_unlock(desc);
1510 
1511 	unregister_handler_proc(irq, action);
1512 
1513 	/* Make sure it's not being used on another CPU: */
1514 	synchronize_irq(irq);
1515 
1516 #ifdef CONFIG_DEBUG_SHIRQ
1517 	/*
1518 	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1519 	 * event to happen even now it's being freed, so let's make sure that
1520 	 * is so by doing an extra call to the handler ....
1521 	 *
1522 	 * ( We do this after actually deregistering it, to make sure that a
1523 	 *   'real' IRQ doesn't run in * parallel with our fake. )
1524 	 */
1525 	if (action->flags & IRQF_SHARED) {
1526 		local_irq_save(flags);
1527 		action->handler(irq, dev_id);
1528 		local_irq_restore(flags);
1529 	}
1530 #endif
1531 
1532 	if (action->thread) {
1533 		kthread_stop(action->thread);
1534 		put_task_struct(action->thread);
1535 		if (action->secondary && action->secondary->thread) {
1536 			kthread_stop(action->secondary->thread);
1537 			put_task_struct(action->secondary->thread);
1538 		}
1539 	}
1540 
1541 	irq_chip_pm_put(&desc->irq_data);
1542 	module_put(desc->owner);
1543 	kfree(action->secondary);
1544 	return action;
1545 }
1546 
1547 /**
1548  *	remove_irq - free an interrupt
1549  *	@irq: Interrupt line to free
1550  *	@act: irqaction for the interrupt
1551  *
1552  * Used to remove interrupts statically setup by the early boot process.
1553  */
1554 void remove_irq(unsigned int irq, struct irqaction *act)
1555 {
1556 	struct irq_desc *desc = irq_to_desc(irq);
1557 
1558 	if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1559 	    __free_irq(irq, act->dev_id);
1560 }
1561 EXPORT_SYMBOL_GPL(remove_irq);
1562 
1563 /**
1564  *	free_irq - free an interrupt allocated with request_irq
1565  *	@irq: Interrupt line to free
1566  *	@dev_id: Device identity to free
1567  *
1568  *	Remove an interrupt handler. The handler is removed and if the
1569  *	interrupt line is no longer in use by any driver it is disabled.
1570  *	On a shared IRQ the caller must ensure the interrupt is disabled
1571  *	on the card it drives before calling this function. The function
1572  *	does not return until any executing interrupts for this IRQ
1573  *	have completed.
1574  *
1575  *	This function must not be called from interrupt context.
1576  */
1577 void free_irq(unsigned int irq, void *dev_id)
1578 {
1579 	struct irq_desc *desc = irq_to_desc(irq);
1580 
1581 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1582 		return;
1583 
1584 #ifdef CONFIG_SMP
1585 	if (WARN_ON(desc->affinity_notify))
1586 		desc->affinity_notify = NULL;
1587 #endif
1588 
1589 	kfree(__free_irq(irq, dev_id));
1590 }
1591 EXPORT_SYMBOL(free_irq);
1592 
1593 /**
1594  *	request_threaded_irq - allocate an interrupt line
1595  *	@irq: Interrupt line to allocate
1596  *	@handler: Function to be called when the IRQ occurs.
1597  *		  Primary handler for threaded interrupts
1598  *		  If NULL and thread_fn != NULL the default
1599  *		  primary handler is installed
1600  *	@thread_fn: Function called from the irq handler thread
1601  *		    If NULL, no irq thread is created
1602  *	@irqflags: Interrupt type flags
1603  *	@devname: An ascii name for the claiming device
1604  *	@dev_id: A cookie passed back to the handler function
1605  *
1606  *	This call allocates interrupt resources and enables the
1607  *	interrupt line and IRQ handling. From the point this
1608  *	call is made your handler function may be invoked. Since
1609  *	your handler function must clear any interrupt the board
1610  *	raises, you must take care both to initialise your hardware
1611  *	and to set up the interrupt handler in the right order.
1612  *
1613  *	If you want to set up a threaded irq handler for your device
1614  *	then you need to supply @handler and @thread_fn. @handler is
1615  *	still called in hard interrupt context and has to check
1616  *	whether the interrupt originates from the device. If yes it
1617  *	needs to disable the interrupt on the device and return
1618  *	IRQ_WAKE_THREAD which will wake up the handler thread and run
1619  *	@thread_fn. This split handler design is necessary to support
1620  *	shared interrupts.
1621  *
1622  *	Dev_id must be globally unique. Normally the address of the
1623  *	device data structure is used as the cookie. Since the handler
1624  *	receives this value it makes sense to use it.
1625  *
1626  *	If your interrupt is shared you must pass a non NULL dev_id
1627  *	as this is required when freeing the interrupt.
1628  *
1629  *	Flags:
1630  *
1631  *	IRQF_SHARED		Interrupt is shared
1632  *	IRQF_TRIGGER_*		Specify active edge(s) or level
1633  *
1634  */
1635 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1636 			 irq_handler_t thread_fn, unsigned long irqflags,
1637 			 const char *devname, void *dev_id)
1638 {
1639 	struct irqaction *action;
1640 	struct irq_desc *desc;
1641 	int retval;
1642 
1643 	if (irq == IRQ_NOTCONNECTED)
1644 		return -ENOTCONN;
1645 
1646 	/*
1647 	 * Sanity-check: shared interrupts must pass in a real dev-ID,
1648 	 * otherwise we'll have trouble later trying to figure out
1649 	 * which interrupt is which (messes up the interrupt freeing
1650 	 * logic etc).
1651 	 *
1652 	 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1653 	 * it cannot be set along with IRQF_NO_SUSPEND.
1654 	 */
1655 	if (((irqflags & IRQF_SHARED) && !dev_id) ||
1656 	    (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1657 	    ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1658 		return -EINVAL;
1659 
1660 	desc = irq_to_desc(irq);
1661 	if (!desc)
1662 		return -EINVAL;
1663 
1664 	if (!irq_settings_can_request(desc) ||
1665 	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1666 		return -EINVAL;
1667 
1668 	if (!handler) {
1669 		if (!thread_fn)
1670 			return -EINVAL;
1671 		handler = irq_default_primary_handler;
1672 	}
1673 
1674 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1675 	if (!action)
1676 		return -ENOMEM;
1677 
1678 	action->handler = handler;
1679 	action->thread_fn = thread_fn;
1680 	action->flags = irqflags;
1681 	action->name = devname;
1682 	action->dev_id = dev_id;
1683 
1684 	retval = irq_chip_pm_get(&desc->irq_data);
1685 	if (retval < 0) {
1686 		kfree(action);
1687 		return retval;
1688 	}
1689 
1690 	chip_bus_lock(desc);
1691 	retval = __setup_irq(irq, desc, action);
1692 	chip_bus_sync_unlock(desc);
1693 
1694 	if (retval) {
1695 		irq_chip_pm_put(&desc->irq_data);
1696 		kfree(action->secondary);
1697 		kfree(action);
1698 	}
1699 
1700 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1701 	if (!retval && (irqflags & IRQF_SHARED)) {
1702 		/*
1703 		 * It's a shared IRQ -- the driver ought to be prepared for it
1704 		 * to happen immediately, so let's make sure....
1705 		 * We disable the irq to make sure that a 'real' IRQ doesn't
1706 		 * run in parallel with our fake.
1707 		 */
1708 		unsigned long flags;
1709 
1710 		disable_irq(irq);
1711 		local_irq_save(flags);
1712 
1713 		handler(irq, dev_id);
1714 
1715 		local_irq_restore(flags);
1716 		enable_irq(irq);
1717 	}
1718 #endif
1719 	return retval;
1720 }
1721 EXPORT_SYMBOL(request_threaded_irq);
1722 
1723 /**
1724  *	request_any_context_irq - allocate an interrupt line
1725  *	@irq: Interrupt line to allocate
1726  *	@handler: Function to be called when the IRQ occurs.
1727  *		  Threaded handler for threaded interrupts.
1728  *	@flags: Interrupt type flags
1729  *	@name: An ascii name for the claiming device
1730  *	@dev_id: A cookie passed back to the handler function
1731  *
1732  *	This call allocates interrupt resources and enables the
1733  *	interrupt line and IRQ handling. It selects either a
1734  *	hardirq or threaded handling method depending on the
1735  *	context.
1736  *
1737  *	On failure, it returns a negative value. On success,
1738  *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1739  */
1740 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1741 			    unsigned long flags, const char *name, void *dev_id)
1742 {
1743 	struct irq_desc *desc;
1744 	int ret;
1745 
1746 	if (irq == IRQ_NOTCONNECTED)
1747 		return -ENOTCONN;
1748 
1749 	desc = irq_to_desc(irq);
1750 	if (!desc)
1751 		return -EINVAL;
1752 
1753 	if (irq_settings_is_nested_thread(desc)) {
1754 		ret = request_threaded_irq(irq, NULL, handler,
1755 					   flags, name, dev_id);
1756 		return !ret ? IRQC_IS_NESTED : ret;
1757 	}
1758 
1759 	ret = request_irq(irq, handler, flags, name, dev_id);
1760 	return !ret ? IRQC_IS_HARDIRQ : ret;
1761 }
1762 EXPORT_SYMBOL_GPL(request_any_context_irq);
1763 
1764 void enable_percpu_irq(unsigned int irq, unsigned int type)
1765 {
1766 	unsigned int cpu = smp_processor_id();
1767 	unsigned long flags;
1768 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1769 
1770 	if (!desc)
1771 		return;
1772 
1773 	/*
1774 	 * If the trigger type is not specified by the caller, then
1775 	 * use the default for this interrupt.
1776 	 */
1777 	type &= IRQ_TYPE_SENSE_MASK;
1778 	if (type == IRQ_TYPE_NONE)
1779 		type = irqd_get_trigger_type(&desc->irq_data);
1780 
1781 	if (type != IRQ_TYPE_NONE) {
1782 		int ret;
1783 
1784 		ret = __irq_set_trigger(desc, type);
1785 
1786 		if (ret) {
1787 			WARN(1, "failed to set type for IRQ%d\n", irq);
1788 			goto out;
1789 		}
1790 	}
1791 
1792 	irq_percpu_enable(desc, cpu);
1793 out:
1794 	irq_put_desc_unlock(desc, flags);
1795 }
1796 EXPORT_SYMBOL_GPL(enable_percpu_irq);
1797 
1798 /**
1799  * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1800  * @irq:	Linux irq number to check for
1801  *
1802  * Must be called from a non migratable context. Returns the enable
1803  * state of a per cpu interrupt on the current cpu.
1804  */
1805 bool irq_percpu_is_enabled(unsigned int irq)
1806 {
1807 	unsigned int cpu = smp_processor_id();
1808 	struct irq_desc *desc;
1809 	unsigned long flags;
1810 	bool is_enabled;
1811 
1812 	desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1813 	if (!desc)
1814 		return false;
1815 
1816 	is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1817 	irq_put_desc_unlock(desc, flags);
1818 
1819 	return is_enabled;
1820 }
1821 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1822 
1823 void disable_percpu_irq(unsigned int irq)
1824 {
1825 	unsigned int cpu = smp_processor_id();
1826 	unsigned long flags;
1827 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1828 
1829 	if (!desc)
1830 		return;
1831 
1832 	irq_percpu_disable(desc, cpu);
1833 	irq_put_desc_unlock(desc, flags);
1834 }
1835 EXPORT_SYMBOL_GPL(disable_percpu_irq);
1836 
1837 /*
1838  * Internal function to unregister a percpu irqaction.
1839  */
1840 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1841 {
1842 	struct irq_desc *desc = irq_to_desc(irq);
1843 	struct irqaction *action;
1844 	unsigned long flags;
1845 
1846 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1847 
1848 	if (!desc)
1849 		return NULL;
1850 
1851 	raw_spin_lock_irqsave(&desc->lock, flags);
1852 
1853 	action = desc->action;
1854 	if (!action || action->percpu_dev_id != dev_id) {
1855 		WARN(1, "Trying to free already-free IRQ %d\n", irq);
1856 		goto bad;
1857 	}
1858 
1859 	if (!cpumask_empty(desc->percpu_enabled)) {
1860 		WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1861 		     irq, cpumask_first(desc->percpu_enabled));
1862 		goto bad;
1863 	}
1864 
1865 	/* Found it - now remove it from the list of entries: */
1866 	desc->action = NULL;
1867 
1868 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1869 
1870 	unregister_handler_proc(irq, action);
1871 
1872 	irq_chip_pm_put(&desc->irq_data);
1873 	module_put(desc->owner);
1874 	return action;
1875 
1876 bad:
1877 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1878 	return NULL;
1879 }
1880 
1881 /**
1882  *	remove_percpu_irq - free a per-cpu interrupt
1883  *	@irq: Interrupt line to free
1884  *	@act: irqaction for the interrupt
1885  *
1886  * Used to remove interrupts statically setup by the early boot process.
1887  */
1888 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1889 {
1890 	struct irq_desc *desc = irq_to_desc(irq);
1891 
1892 	if (desc && irq_settings_is_per_cpu_devid(desc))
1893 	    __free_percpu_irq(irq, act->percpu_dev_id);
1894 }
1895 
1896 /**
1897  *	free_percpu_irq - free an interrupt allocated with request_percpu_irq
1898  *	@irq: Interrupt line to free
1899  *	@dev_id: Device identity to free
1900  *
1901  *	Remove a percpu interrupt handler. The handler is removed, but
1902  *	the interrupt line is not disabled. This must be done on each
1903  *	CPU before calling this function. The function does not return
1904  *	until any executing interrupts for this IRQ have completed.
1905  *
1906  *	This function must not be called from interrupt context.
1907  */
1908 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1909 {
1910 	struct irq_desc *desc = irq_to_desc(irq);
1911 
1912 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
1913 		return;
1914 
1915 	chip_bus_lock(desc);
1916 	kfree(__free_percpu_irq(irq, dev_id));
1917 	chip_bus_sync_unlock(desc);
1918 }
1919 EXPORT_SYMBOL_GPL(free_percpu_irq);
1920 
1921 /**
1922  *	setup_percpu_irq - setup a per-cpu interrupt
1923  *	@irq: Interrupt line to setup
1924  *	@act: irqaction for the interrupt
1925  *
1926  * Used to statically setup per-cpu interrupts in the early boot process.
1927  */
1928 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1929 {
1930 	struct irq_desc *desc = irq_to_desc(irq);
1931 	int retval;
1932 
1933 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
1934 		return -EINVAL;
1935 
1936 	retval = irq_chip_pm_get(&desc->irq_data);
1937 	if (retval < 0)
1938 		return retval;
1939 
1940 	chip_bus_lock(desc);
1941 	retval = __setup_irq(irq, desc, act);
1942 	chip_bus_sync_unlock(desc);
1943 
1944 	if (retval)
1945 		irq_chip_pm_put(&desc->irq_data);
1946 
1947 	return retval;
1948 }
1949 
1950 /**
1951  *	request_percpu_irq - allocate a percpu interrupt line
1952  *	@irq: Interrupt line to allocate
1953  *	@handler: Function to be called when the IRQ occurs.
1954  *	@devname: An ascii name for the claiming device
1955  *	@dev_id: A percpu cookie passed back to the handler function
1956  *
1957  *	This call allocates interrupt resources and enables the
1958  *	interrupt on the local CPU. If the interrupt is supposed to be
1959  *	enabled on other CPUs, it has to be done on each CPU using
1960  *	enable_percpu_irq().
1961  *
1962  *	Dev_id must be globally unique. It is a per-cpu variable, and
1963  *	the handler gets called with the interrupted CPU's instance of
1964  *	that variable.
1965  */
1966 int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1967 		       const char *devname, void __percpu *dev_id)
1968 {
1969 	struct irqaction *action;
1970 	struct irq_desc *desc;
1971 	int retval;
1972 
1973 	if (!dev_id)
1974 		return -EINVAL;
1975 
1976 	desc = irq_to_desc(irq);
1977 	if (!desc || !irq_settings_can_request(desc) ||
1978 	    !irq_settings_is_per_cpu_devid(desc))
1979 		return -EINVAL;
1980 
1981 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1982 	if (!action)
1983 		return -ENOMEM;
1984 
1985 	action->handler = handler;
1986 	action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1987 	action->name = devname;
1988 	action->percpu_dev_id = dev_id;
1989 
1990 	retval = irq_chip_pm_get(&desc->irq_data);
1991 	if (retval < 0) {
1992 		kfree(action);
1993 		return retval;
1994 	}
1995 
1996 	chip_bus_lock(desc);
1997 	retval = __setup_irq(irq, desc, action);
1998 	chip_bus_sync_unlock(desc);
1999 
2000 	if (retval) {
2001 		irq_chip_pm_put(&desc->irq_data);
2002 		kfree(action);
2003 	}
2004 
2005 	return retval;
2006 }
2007 EXPORT_SYMBOL_GPL(request_percpu_irq);
2008 
2009 /**
2010  *	irq_get_irqchip_state - returns the irqchip state of a interrupt.
2011  *	@irq: Interrupt line that is forwarded to a VM
2012  *	@which: One of IRQCHIP_STATE_* the caller wants to know about
2013  *	@state: a pointer to a boolean where the state is to be storeed
2014  *
2015  *	This call snapshots the internal irqchip state of an
2016  *	interrupt, returning into @state the bit corresponding to
2017  *	stage @which
2018  *
2019  *	This function should be called with preemption disabled if the
2020  *	interrupt controller has per-cpu registers.
2021  */
2022 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2023 			  bool *state)
2024 {
2025 	struct irq_desc *desc;
2026 	struct irq_data *data;
2027 	struct irq_chip *chip;
2028 	unsigned long flags;
2029 	int err = -EINVAL;
2030 
2031 	desc = irq_get_desc_buslock(irq, &flags, 0);
2032 	if (!desc)
2033 		return err;
2034 
2035 	data = irq_desc_get_irq_data(desc);
2036 
2037 	do {
2038 		chip = irq_data_get_irq_chip(data);
2039 		if (chip->irq_get_irqchip_state)
2040 			break;
2041 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2042 		data = data->parent_data;
2043 #else
2044 		data = NULL;
2045 #endif
2046 	} while (data);
2047 
2048 	if (data)
2049 		err = chip->irq_get_irqchip_state(data, which, state);
2050 
2051 	irq_put_desc_busunlock(desc, flags);
2052 	return err;
2053 }
2054 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2055 
2056 /**
2057  *	irq_set_irqchip_state - set the state of a forwarded interrupt.
2058  *	@irq: Interrupt line that is forwarded to a VM
2059  *	@which: State to be restored (one of IRQCHIP_STATE_*)
2060  *	@val: Value corresponding to @which
2061  *
2062  *	This call sets the internal irqchip state of an interrupt,
2063  *	depending on the value of @which.
2064  *
2065  *	This function should be called with preemption disabled if the
2066  *	interrupt controller has per-cpu registers.
2067  */
2068 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2069 			  bool val)
2070 {
2071 	struct irq_desc *desc;
2072 	struct irq_data *data;
2073 	struct irq_chip *chip;
2074 	unsigned long flags;
2075 	int err = -EINVAL;
2076 
2077 	desc = irq_get_desc_buslock(irq, &flags, 0);
2078 	if (!desc)
2079 		return err;
2080 
2081 	data = irq_desc_get_irq_data(desc);
2082 
2083 	do {
2084 		chip = irq_data_get_irq_chip(data);
2085 		if (chip->irq_set_irqchip_state)
2086 			break;
2087 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2088 		data = data->parent_data;
2089 #else
2090 		data = NULL;
2091 #endif
2092 	} while (data);
2093 
2094 	if (data)
2095 		err = chip->irq_set_irqchip_state(data, which, val);
2096 
2097 	irq_put_desc_busunlock(desc, flags);
2098 	return err;
2099 }
2100 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2101