xref: /openbmc/linux/kernel/irq/chip.c (revision 239480ab)
1 /*
2  * linux/kernel/irq/chip.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6  *
7  * This file contains the core interrupt handling code, for irq-chip
8  * based architectures.
9  *
10  * Detailed information is available in Documentation/DocBook/genericirq
11  */
12 
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/irqdomain.h>
19 
20 #include <trace/events/irq.h>
21 
22 #include "internals.h"
23 
24 static irqreturn_t bad_chained_irq(int irq, void *dev_id)
25 {
26 	WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
27 	return IRQ_NONE;
28 }
29 
30 /*
31  * Chained handlers should never call action on their IRQ. This default
32  * action will emit warning if such thing happens.
33  */
34 struct irqaction chained_action = {
35 	.handler = bad_chained_irq,
36 };
37 
38 /**
39  *	irq_set_chip - set the irq chip for an irq
40  *	@irq:	irq number
41  *	@chip:	pointer to irq chip description structure
42  */
43 int irq_set_chip(unsigned int irq, struct irq_chip *chip)
44 {
45 	unsigned long flags;
46 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
47 
48 	if (!desc)
49 		return -EINVAL;
50 
51 	if (!chip)
52 		chip = &no_irq_chip;
53 
54 	desc->irq_data.chip = chip;
55 	irq_put_desc_unlock(desc, flags);
56 	/*
57 	 * For !CONFIG_SPARSE_IRQ make the irq show up in
58 	 * allocated_irqs.
59 	 */
60 	irq_mark_irq(irq);
61 	return 0;
62 }
63 EXPORT_SYMBOL(irq_set_chip);
64 
65 /**
66  *	irq_set_type - set the irq trigger type for an irq
67  *	@irq:	irq number
68  *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
69  */
70 int irq_set_irq_type(unsigned int irq, unsigned int type)
71 {
72 	unsigned long flags;
73 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
74 	int ret = 0;
75 
76 	if (!desc)
77 		return -EINVAL;
78 
79 	ret = __irq_set_trigger(desc, type);
80 	irq_put_desc_busunlock(desc, flags);
81 	return ret;
82 }
83 EXPORT_SYMBOL(irq_set_irq_type);
84 
85 /**
86  *	irq_set_handler_data - set irq handler data for an irq
87  *	@irq:	Interrupt number
88  *	@data:	Pointer to interrupt specific data
89  *
90  *	Set the hardware irq controller data for an irq
91  */
92 int irq_set_handler_data(unsigned int irq, void *data)
93 {
94 	unsigned long flags;
95 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
96 
97 	if (!desc)
98 		return -EINVAL;
99 	desc->irq_common_data.handler_data = data;
100 	irq_put_desc_unlock(desc, flags);
101 	return 0;
102 }
103 EXPORT_SYMBOL(irq_set_handler_data);
104 
105 /**
106  *	irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
107  *	@irq_base:	Interrupt number base
108  *	@irq_offset:	Interrupt number offset
109  *	@entry:		Pointer to MSI descriptor data
110  *
111  *	Set the MSI descriptor entry for an irq at offset
112  */
113 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
114 			 struct msi_desc *entry)
115 {
116 	unsigned long flags;
117 	struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
118 
119 	if (!desc)
120 		return -EINVAL;
121 	desc->irq_common_data.msi_desc = entry;
122 	if (entry && !irq_offset)
123 		entry->irq = irq_base;
124 	irq_put_desc_unlock(desc, flags);
125 	return 0;
126 }
127 
128 /**
129  *	irq_set_msi_desc - set MSI descriptor data for an irq
130  *	@irq:	Interrupt number
131  *	@entry:	Pointer to MSI descriptor data
132  *
133  *	Set the MSI descriptor entry for an irq
134  */
135 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
136 {
137 	return irq_set_msi_desc_off(irq, 0, entry);
138 }
139 
140 /**
141  *	irq_set_chip_data - set irq chip data for an irq
142  *	@irq:	Interrupt number
143  *	@data:	Pointer to chip specific data
144  *
145  *	Set the hardware irq chip data for an irq
146  */
147 int irq_set_chip_data(unsigned int irq, void *data)
148 {
149 	unsigned long flags;
150 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
151 
152 	if (!desc)
153 		return -EINVAL;
154 	desc->irq_data.chip_data = data;
155 	irq_put_desc_unlock(desc, flags);
156 	return 0;
157 }
158 EXPORT_SYMBOL(irq_set_chip_data);
159 
160 struct irq_data *irq_get_irq_data(unsigned int irq)
161 {
162 	struct irq_desc *desc = irq_to_desc(irq);
163 
164 	return desc ? &desc->irq_data : NULL;
165 }
166 EXPORT_SYMBOL_GPL(irq_get_irq_data);
167 
168 static void irq_state_clr_disabled(struct irq_desc *desc)
169 {
170 	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
171 }
172 
173 static void irq_state_set_disabled(struct irq_desc *desc)
174 {
175 	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
176 }
177 
178 static void irq_state_clr_masked(struct irq_desc *desc)
179 {
180 	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
181 }
182 
183 static void irq_state_set_masked(struct irq_desc *desc)
184 {
185 	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
186 }
187 
188 int irq_startup(struct irq_desc *desc, bool resend)
189 {
190 	int ret = 0;
191 
192 	irq_state_clr_disabled(desc);
193 	desc->depth = 0;
194 
195 	irq_domain_activate_irq(&desc->irq_data);
196 	if (desc->irq_data.chip->irq_startup) {
197 		ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
198 		irq_state_clr_masked(desc);
199 	} else {
200 		irq_enable(desc);
201 	}
202 	if (resend)
203 		check_irq_resend(desc);
204 	return ret;
205 }
206 
207 void irq_shutdown(struct irq_desc *desc)
208 {
209 	irq_state_set_disabled(desc);
210 	desc->depth = 1;
211 	if (desc->irq_data.chip->irq_shutdown)
212 		desc->irq_data.chip->irq_shutdown(&desc->irq_data);
213 	else if (desc->irq_data.chip->irq_disable)
214 		desc->irq_data.chip->irq_disable(&desc->irq_data);
215 	else
216 		desc->irq_data.chip->irq_mask(&desc->irq_data);
217 	irq_domain_deactivate_irq(&desc->irq_data);
218 	irq_state_set_masked(desc);
219 }
220 
221 void irq_enable(struct irq_desc *desc)
222 {
223 	irq_state_clr_disabled(desc);
224 	if (desc->irq_data.chip->irq_enable)
225 		desc->irq_data.chip->irq_enable(&desc->irq_data);
226 	else
227 		desc->irq_data.chip->irq_unmask(&desc->irq_data);
228 	irq_state_clr_masked(desc);
229 }
230 
231 /**
232  * irq_disable - Mark interrupt disabled
233  * @desc:	irq descriptor which should be disabled
234  *
235  * If the chip does not implement the irq_disable callback, we
236  * use a lazy disable approach. That means we mark the interrupt
237  * disabled, but leave the hardware unmasked. That's an
238  * optimization because we avoid the hardware access for the
239  * common case where no interrupt happens after we marked it
240  * disabled. If an interrupt happens, then the interrupt flow
241  * handler masks the line at the hardware level and marks it
242  * pending.
243  *
244  * If the interrupt chip does not implement the irq_disable callback,
245  * a driver can disable the lazy approach for a particular irq line by
246  * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
247  * be used for devices which cannot disable the interrupt at the
248  * device level under certain circumstances and have to use
249  * disable_irq[_nosync] instead.
250  */
251 void irq_disable(struct irq_desc *desc)
252 {
253 	irq_state_set_disabled(desc);
254 	if (desc->irq_data.chip->irq_disable) {
255 		desc->irq_data.chip->irq_disable(&desc->irq_data);
256 		irq_state_set_masked(desc);
257 	} else if (irq_settings_disable_unlazy(desc)) {
258 		mask_irq(desc);
259 	}
260 }
261 
262 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
263 {
264 	if (desc->irq_data.chip->irq_enable)
265 		desc->irq_data.chip->irq_enable(&desc->irq_data);
266 	else
267 		desc->irq_data.chip->irq_unmask(&desc->irq_data);
268 	cpumask_set_cpu(cpu, desc->percpu_enabled);
269 }
270 
271 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
272 {
273 	if (desc->irq_data.chip->irq_disable)
274 		desc->irq_data.chip->irq_disable(&desc->irq_data);
275 	else
276 		desc->irq_data.chip->irq_mask(&desc->irq_data);
277 	cpumask_clear_cpu(cpu, desc->percpu_enabled);
278 }
279 
280 static inline void mask_ack_irq(struct irq_desc *desc)
281 {
282 	if (desc->irq_data.chip->irq_mask_ack)
283 		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
284 	else {
285 		desc->irq_data.chip->irq_mask(&desc->irq_data);
286 		if (desc->irq_data.chip->irq_ack)
287 			desc->irq_data.chip->irq_ack(&desc->irq_data);
288 	}
289 	irq_state_set_masked(desc);
290 }
291 
292 void mask_irq(struct irq_desc *desc)
293 {
294 	if (desc->irq_data.chip->irq_mask) {
295 		desc->irq_data.chip->irq_mask(&desc->irq_data);
296 		irq_state_set_masked(desc);
297 	}
298 }
299 
300 void unmask_irq(struct irq_desc *desc)
301 {
302 	if (desc->irq_data.chip->irq_unmask) {
303 		desc->irq_data.chip->irq_unmask(&desc->irq_data);
304 		irq_state_clr_masked(desc);
305 	}
306 }
307 
308 void unmask_threaded_irq(struct irq_desc *desc)
309 {
310 	struct irq_chip *chip = desc->irq_data.chip;
311 
312 	if (chip->flags & IRQCHIP_EOI_THREADED)
313 		chip->irq_eoi(&desc->irq_data);
314 
315 	if (chip->irq_unmask) {
316 		chip->irq_unmask(&desc->irq_data);
317 		irq_state_clr_masked(desc);
318 	}
319 }
320 
321 /*
322  *	handle_nested_irq - Handle a nested irq from a irq thread
323  *	@irq:	the interrupt number
324  *
325  *	Handle interrupts which are nested into a threaded interrupt
326  *	handler. The handler function is called inside the calling
327  *	threads context.
328  */
329 void handle_nested_irq(unsigned int irq)
330 {
331 	struct irq_desc *desc = irq_to_desc(irq);
332 	struct irqaction *action;
333 	irqreturn_t action_ret;
334 
335 	might_sleep();
336 
337 	raw_spin_lock_irq(&desc->lock);
338 
339 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
340 
341 	action = desc->action;
342 	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
343 		desc->istate |= IRQS_PENDING;
344 		goto out_unlock;
345 	}
346 
347 	kstat_incr_irqs_this_cpu(desc);
348 	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
349 	raw_spin_unlock_irq(&desc->lock);
350 
351 	action_ret = IRQ_NONE;
352 	for_each_action_of_desc(desc, action)
353 		action_ret |= action->thread_fn(action->irq, action->dev_id);
354 
355 	if (!noirqdebug)
356 		note_interrupt(desc, action_ret);
357 
358 	raw_spin_lock_irq(&desc->lock);
359 	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
360 
361 out_unlock:
362 	raw_spin_unlock_irq(&desc->lock);
363 }
364 EXPORT_SYMBOL_GPL(handle_nested_irq);
365 
366 static bool irq_check_poll(struct irq_desc *desc)
367 {
368 	if (!(desc->istate & IRQS_POLL_INPROGRESS))
369 		return false;
370 	return irq_wait_for_poll(desc);
371 }
372 
373 static bool irq_may_run(struct irq_desc *desc)
374 {
375 	unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
376 
377 	/*
378 	 * If the interrupt is not in progress and is not an armed
379 	 * wakeup interrupt, proceed.
380 	 */
381 	if (!irqd_has_set(&desc->irq_data, mask))
382 		return true;
383 
384 	/*
385 	 * If the interrupt is an armed wakeup source, mark it pending
386 	 * and suspended, disable it and notify the pm core about the
387 	 * event.
388 	 */
389 	if (irq_pm_check_wakeup(desc))
390 		return false;
391 
392 	/*
393 	 * Handle a potential concurrent poll on a different core.
394 	 */
395 	return irq_check_poll(desc);
396 }
397 
398 /**
399  *	handle_simple_irq - Simple and software-decoded IRQs.
400  *	@desc:	the interrupt description structure for this irq
401  *
402  *	Simple interrupts are either sent from a demultiplexing interrupt
403  *	handler or come from hardware, where no interrupt hardware control
404  *	is necessary.
405  *
406  *	Note: The caller is expected to handle the ack, clear, mask and
407  *	unmask issues if necessary.
408  */
409 void handle_simple_irq(struct irq_desc *desc)
410 {
411 	raw_spin_lock(&desc->lock);
412 
413 	if (!irq_may_run(desc))
414 		goto out_unlock;
415 
416 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
417 
418 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
419 		desc->istate |= IRQS_PENDING;
420 		goto out_unlock;
421 	}
422 
423 	kstat_incr_irqs_this_cpu(desc);
424 	handle_irq_event(desc);
425 
426 out_unlock:
427 	raw_spin_unlock(&desc->lock);
428 }
429 EXPORT_SYMBOL_GPL(handle_simple_irq);
430 
431 /**
432  *	handle_untracked_irq - Simple and software-decoded IRQs.
433  *	@desc:	the interrupt description structure for this irq
434  *
435  *	Untracked interrupts are sent from a demultiplexing interrupt
436  *	handler when the demultiplexer does not know which device it its
437  *	multiplexed irq domain generated the interrupt. IRQ's handled
438  *	through here are not subjected to stats tracking, randomness, or
439  *	spurious interrupt detection.
440  *
441  *	Note: Like handle_simple_irq, the caller is expected to handle
442  *	the ack, clear, mask and unmask issues if necessary.
443  */
444 void handle_untracked_irq(struct irq_desc *desc)
445 {
446 	unsigned int flags = 0;
447 
448 	raw_spin_lock(&desc->lock);
449 
450 	if (!irq_may_run(desc))
451 		goto out_unlock;
452 
453 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
454 
455 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
456 		desc->istate |= IRQS_PENDING;
457 		goto out_unlock;
458 	}
459 
460 	desc->istate &= ~IRQS_PENDING;
461 	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
462 	raw_spin_unlock(&desc->lock);
463 
464 	__handle_irq_event_percpu(desc, &flags);
465 
466 	raw_spin_lock(&desc->lock);
467 	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
468 
469 out_unlock:
470 	raw_spin_unlock(&desc->lock);
471 }
472 EXPORT_SYMBOL_GPL(handle_untracked_irq);
473 
474 /*
475  * Called unconditionally from handle_level_irq() and only for oneshot
476  * interrupts from handle_fasteoi_irq()
477  */
478 static void cond_unmask_irq(struct irq_desc *desc)
479 {
480 	/*
481 	 * We need to unmask in the following cases:
482 	 * - Standard level irq (IRQF_ONESHOT is not set)
483 	 * - Oneshot irq which did not wake the thread (caused by a
484 	 *   spurious interrupt or a primary handler handling it
485 	 *   completely).
486 	 */
487 	if (!irqd_irq_disabled(&desc->irq_data) &&
488 	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
489 		unmask_irq(desc);
490 }
491 
492 /**
493  *	handle_level_irq - Level type irq handler
494  *	@desc:	the interrupt description structure for this irq
495  *
496  *	Level type interrupts are active as long as the hardware line has
497  *	the active level. This may require to mask the interrupt and unmask
498  *	it after the associated handler has acknowledged the device, so the
499  *	interrupt line is back to inactive.
500  */
501 void handle_level_irq(struct irq_desc *desc)
502 {
503 	raw_spin_lock(&desc->lock);
504 	mask_ack_irq(desc);
505 
506 	if (!irq_may_run(desc))
507 		goto out_unlock;
508 
509 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
510 
511 	/*
512 	 * If its disabled or no action available
513 	 * keep it masked and get out of here
514 	 */
515 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
516 		desc->istate |= IRQS_PENDING;
517 		goto out_unlock;
518 	}
519 
520 	kstat_incr_irqs_this_cpu(desc);
521 	handle_irq_event(desc);
522 
523 	cond_unmask_irq(desc);
524 
525 out_unlock:
526 	raw_spin_unlock(&desc->lock);
527 }
528 EXPORT_SYMBOL_GPL(handle_level_irq);
529 
530 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
531 static inline void preflow_handler(struct irq_desc *desc)
532 {
533 	if (desc->preflow_handler)
534 		desc->preflow_handler(&desc->irq_data);
535 }
536 #else
537 static inline void preflow_handler(struct irq_desc *desc) { }
538 #endif
539 
540 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
541 {
542 	if (!(desc->istate & IRQS_ONESHOT)) {
543 		chip->irq_eoi(&desc->irq_data);
544 		return;
545 	}
546 	/*
547 	 * We need to unmask in the following cases:
548 	 * - Oneshot irq which did not wake the thread (caused by a
549 	 *   spurious interrupt or a primary handler handling it
550 	 *   completely).
551 	 */
552 	if (!irqd_irq_disabled(&desc->irq_data) &&
553 	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
554 		chip->irq_eoi(&desc->irq_data);
555 		unmask_irq(desc);
556 	} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
557 		chip->irq_eoi(&desc->irq_data);
558 	}
559 }
560 
561 /**
562  *	handle_fasteoi_irq - irq handler for transparent controllers
563  *	@desc:	the interrupt description structure for this irq
564  *
565  *	Only a single callback will be issued to the chip: an ->eoi()
566  *	call when the interrupt has been serviced. This enables support
567  *	for modern forms of interrupt handlers, which handle the flow
568  *	details in hardware, transparently.
569  */
570 void handle_fasteoi_irq(struct irq_desc *desc)
571 {
572 	struct irq_chip *chip = desc->irq_data.chip;
573 
574 	raw_spin_lock(&desc->lock);
575 
576 	if (!irq_may_run(desc))
577 		goto out;
578 
579 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
580 
581 	/*
582 	 * If its disabled or no action available
583 	 * then mask it and get out of here:
584 	 */
585 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
586 		desc->istate |= IRQS_PENDING;
587 		mask_irq(desc);
588 		goto out;
589 	}
590 
591 	kstat_incr_irqs_this_cpu(desc);
592 	if (desc->istate & IRQS_ONESHOT)
593 		mask_irq(desc);
594 
595 	preflow_handler(desc);
596 	handle_irq_event(desc);
597 
598 	cond_unmask_eoi_irq(desc, chip);
599 
600 	raw_spin_unlock(&desc->lock);
601 	return;
602 out:
603 	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
604 		chip->irq_eoi(&desc->irq_data);
605 	raw_spin_unlock(&desc->lock);
606 }
607 EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
608 
609 /**
610  *	handle_edge_irq - edge type IRQ handler
611  *	@desc:	the interrupt description structure for this irq
612  *
613  *	Interrupt occures on the falling and/or rising edge of a hardware
614  *	signal. The occurrence is latched into the irq controller hardware
615  *	and must be acked in order to be reenabled. After the ack another
616  *	interrupt can happen on the same source even before the first one
617  *	is handled by the associated event handler. If this happens it
618  *	might be necessary to disable (mask) the interrupt depending on the
619  *	controller hardware. This requires to reenable the interrupt inside
620  *	of the loop which handles the interrupts which have arrived while
621  *	the handler was running. If all pending interrupts are handled, the
622  *	loop is left.
623  */
624 void handle_edge_irq(struct irq_desc *desc)
625 {
626 	raw_spin_lock(&desc->lock);
627 
628 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
629 
630 	if (!irq_may_run(desc)) {
631 		desc->istate |= IRQS_PENDING;
632 		mask_ack_irq(desc);
633 		goto out_unlock;
634 	}
635 
636 	/*
637 	 * If its disabled or no action available then mask it and get
638 	 * out of here.
639 	 */
640 	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
641 		desc->istate |= IRQS_PENDING;
642 		mask_ack_irq(desc);
643 		goto out_unlock;
644 	}
645 
646 	kstat_incr_irqs_this_cpu(desc);
647 
648 	/* Start handling the irq */
649 	desc->irq_data.chip->irq_ack(&desc->irq_data);
650 
651 	do {
652 		if (unlikely(!desc->action)) {
653 			mask_irq(desc);
654 			goto out_unlock;
655 		}
656 
657 		/*
658 		 * When another irq arrived while we were handling
659 		 * one, we could have masked the irq.
660 		 * Renable it, if it was not disabled in meantime.
661 		 */
662 		if (unlikely(desc->istate & IRQS_PENDING)) {
663 			if (!irqd_irq_disabled(&desc->irq_data) &&
664 			    irqd_irq_masked(&desc->irq_data))
665 				unmask_irq(desc);
666 		}
667 
668 		handle_irq_event(desc);
669 
670 	} while ((desc->istate & IRQS_PENDING) &&
671 		 !irqd_irq_disabled(&desc->irq_data));
672 
673 out_unlock:
674 	raw_spin_unlock(&desc->lock);
675 }
676 EXPORT_SYMBOL(handle_edge_irq);
677 
678 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
679 /**
680  *	handle_edge_eoi_irq - edge eoi type IRQ handler
681  *	@desc:	the interrupt description structure for this irq
682  *
683  * Similar as the above handle_edge_irq, but using eoi and w/o the
684  * mask/unmask logic.
685  */
686 void handle_edge_eoi_irq(struct irq_desc *desc)
687 {
688 	struct irq_chip *chip = irq_desc_get_chip(desc);
689 
690 	raw_spin_lock(&desc->lock);
691 
692 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
693 
694 	if (!irq_may_run(desc)) {
695 		desc->istate |= IRQS_PENDING;
696 		goto out_eoi;
697 	}
698 
699 	/*
700 	 * If its disabled or no action available then mask it and get
701 	 * out of here.
702 	 */
703 	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
704 		desc->istate |= IRQS_PENDING;
705 		goto out_eoi;
706 	}
707 
708 	kstat_incr_irqs_this_cpu(desc);
709 
710 	do {
711 		if (unlikely(!desc->action))
712 			goto out_eoi;
713 
714 		handle_irq_event(desc);
715 
716 	} while ((desc->istate & IRQS_PENDING) &&
717 		 !irqd_irq_disabled(&desc->irq_data));
718 
719 out_eoi:
720 	chip->irq_eoi(&desc->irq_data);
721 	raw_spin_unlock(&desc->lock);
722 }
723 #endif
724 
725 /**
726  *	handle_percpu_irq - Per CPU local irq handler
727  *	@desc:	the interrupt description structure for this irq
728  *
729  *	Per CPU interrupts on SMP machines without locking requirements
730  */
731 void handle_percpu_irq(struct irq_desc *desc)
732 {
733 	struct irq_chip *chip = irq_desc_get_chip(desc);
734 
735 	kstat_incr_irqs_this_cpu(desc);
736 
737 	if (chip->irq_ack)
738 		chip->irq_ack(&desc->irq_data);
739 
740 	handle_irq_event_percpu(desc);
741 
742 	if (chip->irq_eoi)
743 		chip->irq_eoi(&desc->irq_data);
744 }
745 
746 /**
747  * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
748  * @desc:	the interrupt description structure for this irq
749  *
750  * Per CPU interrupts on SMP machines without locking requirements. Same as
751  * handle_percpu_irq() above but with the following extras:
752  *
753  * action->percpu_dev_id is a pointer to percpu variables which
754  * contain the real device id for the cpu on which this handler is
755  * called
756  */
757 void handle_percpu_devid_irq(struct irq_desc *desc)
758 {
759 	struct irq_chip *chip = irq_desc_get_chip(desc);
760 	struct irqaction *action = desc->action;
761 	unsigned int irq = irq_desc_get_irq(desc);
762 	irqreturn_t res;
763 
764 	kstat_incr_irqs_this_cpu(desc);
765 
766 	if (chip->irq_ack)
767 		chip->irq_ack(&desc->irq_data);
768 
769 	if (likely(action)) {
770 		trace_irq_handler_entry(irq, action);
771 		res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
772 		trace_irq_handler_exit(irq, action, res);
773 	} else {
774 		unsigned int cpu = smp_processor_id();
775 		bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
776 
777 		if (enabled)
778 			irq_percpu_disable(desc, cpu);
779 
780 		pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
781 			    enabled ? " and unmasked" : "", irq, cpu);
782 	}
783 
784 	if (chip->irq_eoi)
785 		chip->irq_eoi(&desc->irq_data);
786 }
787 
788 static void
789 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
790 		     int is_chained, const char *name)
791 {
792 	if (!handle) {
793 		handle = handle_bad_irq;
794 	} else {
795 		struct irq_data *irq_data = &desc->irq_data;
796 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
797 		/*
798 		 * With hierarchical domains we might run into a
799 		 * situation where the outermost chip is not yet set
800 		 * up, but the inner chips are there.  Instead of
801 		 * bailing we install the handler, but obviously we
802 		 * cannot enable/startup the interrupt at this point.
803 		 */
804 		while (irq_data) {
805 			if (irq_data->chip != &no_irq_chip)
806 				break;
807 			/*
808 			 * Bail out if the outer chip is not set up
809 			 * and the interrrupt supposed to be started
810 			 * right away.
811 			 */
812 			if (WARN_ON(is_chained))
813 				return;
814 			/* Try the parent */
815 			irq_data = irq_data->parent_data;
816 		}
817 #endif
818 		if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
819 			return;
820 	}
821 
822 	/* Uninstall? */
823 	if (handle == handle_bad_irq) {
824 		if (desc->irq_data.chip != &no_irq_chip)
825 			mask_ack_irq(desc);
826 		irq_state_set_disabled(desc);
827 		if (is_chained)
828 			desc->action = NULL;
829 		desc->depth = 1;
830 	}
831 	desc->handle_irq = handle;
832 	desc->name = name;
833 
834 	if (handle != handle_bad_irq && is_chained) {
835 		unsigned int type = irqd_get_trigger_type(&desc->irq_data);
836 
837 		/*
838 		 * We're about to start this interrupt immediately,
839 		 * hence the need to set the trigger configuration.
840 		 * But the .set_type callback may have overridden the
841 		 * flow handler, ignoring that we're dealing with a
842 		 * chained interrupt. Reset it immediately because we
843 		 * do know better.
844 		 */
845 		if (type != IRQ_TYPE_NONE) {
846 			__irq_set_trigger(desc, type);
847 			desc->handle_irq = handle;
848 		}
849 
850 		irq_settings_set_noprobe(desc);
851 		irq_settings_set_norequest(desc);
852 		irq_settings_set_nothread(desc);
853 		desc->action = &chained_action;
854 		irq_startup(desc, true);
855 	}
856 }
857 
858 void
859 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
860 		  const char *name)
861 {
862 	unsigned long flags;
863 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
864 
865 	if (!desc)
866 		return;
867 
868 	__irq_do_set_handler(desc, handle, is_chained, name);
869 	irq_put_desc_busunlock(desc, flags);
870 }
871 EXPORT_SYMBOL_GPL(__irq_set_handler);
872 
873 void
874 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
875 				 void *data)
876 {
877 	unsigned long flags;
878 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
879 
880 	if (!desc)
881 		return;
882 
883 	__irq_do_set_handler(desc, handle, 1, NULL);
884 	desc->irq_common_data.handler_data = data;
885 
886 	irq_put_desc_busunlock(desc, flags);
887 }
888 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
889 
890 void
891 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
892 			      irq_flow_handler_t handle, const char *name)
893 {
894 	irq_set_chip(irq, chip);
895 	__irq_set_handler(irq, handle, 0, name);
896 }
897 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
898 
899 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
900 {
901 	unsigned long flags;
902 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
903 
904 	if (!desc)
905 		return;
906 	irq_settings_clr_and_set(desc, clr, set);
907 
908 	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
909 		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
910 	if (irq_settings_has_no_balance_set(desc))
911 		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
912 	if (irq_settings_is_per_cpu(desc))
913 		irqd_set(&desc->irq_data, IRQD_PER_CPU);
914 	if (irq_settings_can_move_pcntxt(desc))
915 		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
916 	if (irq_settings_is_level(desc))
917 		irqd_set(&desc->irq_data, IRQD_LEVEL);
918 
919 	irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
920 
921 	irq_put_desc_unlock(desc, flags);
922 }
923 EXPORT_SYMBOL_GPL(irq_modify_status);
924 
925 /**
926  *	irq_cpu_online - Invoke all irq_cpu_online functions.
927  *
928  *	Iterate through all irqs and invoke the chip.irq_cpu_online()
929  *	for each.
930  */
931 void irq_cpu_online(void)
932 {
933 	struct irq_desc *desc;
934 	struct irq_chip *chip;
935 	unsigned long flags;
936 	unsigned int irq;
937 
938 	for_each_active_irq(irq) {
939 		desc = irq_to_desc(irq);
940 		if (!desc)
941 			continue;
942 
943 		raw_spin_lock_irqsave(&desc->lock, flags);
944 
945 		chip = irq_data_get_irq_chip(&desc->irq_data);
946 		if (chip && chip->irq_cpu_online &&
947 		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
948 		     !irqd_irq_disabled(&desc->irq_data)))
949 			chip->irq_cpu_online(&desc->irq_data);
950 
951 		raw_spin_unlock_irqrestore(&desc->lock, flags);
952 	}
953 }
954 
955 /**
956  *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
957  *
958  *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
959  *	for each.
960  */
961 void irq_cpu_offline(void)
962 {
963 	struct irq_desc *desc;
964 	struct irq_chip *chip;
965 	unsigned long flags;
966 	unsigned int irq;
967 
968 	for_each_active_irq(irq) {
969 		desc = irq_to_desc(irq);
970 		if (!desc)
971 			continue;
972 
973 		raw_spin_lock_irqsave(&desc->lock, flags);
974 
975 		chip = irq_data_get_irq_chip(&desc->irq_data);
976 		if (chip && chip->irq_cpu_offline &&
977 		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
978 		     !irqd_irq_disabled(&desc->irq_data)))
979 			chip->irq_cpu_offline(&desc->irq_data);
980 
981 		raw_spin_unlock_irqrestore(&desc->lock, flags);
982 	}
983 }
984 
985 #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
986 /**
987  * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
988  * NULL)
989  * @data:	Pointer to interrupt specific data
990  */
991 void irq_chip_enable_parent(struct irq_data *data)
992 {
993 	data = data->parent_data;
994 	if (data->chip->irq_enable)
995 		data->chip->irq_enable(data);
996 	else
997 		data->chip->irq_unmask(data);
998 }
999 
1000 /**
1001  * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1002  * NULL)
1003  * @data:	Pointer to interrupt specific data
1004  */
1005 void irq_chip_disable_parent(struct irq_data *data)
1006 {
1007 	data = data->parent_data;
1008 	if (data->chip->irq_disable)
1009 		data->chip->irq_disable(data);
1010 	else
1011 		data->chip->irq_mask(data);
1012 }
1013 
1014 /**
1015  * irq_chip_ack_parent - Acknowledge the parent interrupt
1016  * @data:	Pointer to interrupt specific data
1017  */
1018 void irq_chip_ack_parent(struct irq_data *data)
1019 {
1020 	data = data->parent_data;
1021 	data->chip->irq_ack(data);
1022 }
1023 EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1024 
1025 /**
1026  * irq_chip_mask_parent - Mask the parent interrupt
1027  * @data:	Pointer to interrupt specific data
1028  */
1029 void irq_chip_mask_parent(struct irq_data *data)
1030 {
1031 	data = data->parent_data;
1032 	data->chip->irq_mask(data);
1033 }
1034 EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1035 
1036 /**
1037  * irq_chip_unmask_parent - Unmask the parent interrupt
1038  * @data:	Pointer to interrupt specific data
1039  */
1040 void irq_chip_unmask_parent(struct irq_data *data)
1041 {
1042 	data = data->parent_data;
1043 	data->chip->irq_unmask(data);
1044 }
1045 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1046 
1047 /**
1048  * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1049  * @data:	Pointer to interrupt specific data
1050  */
1051 void irq_chip_eoi_parent(struct irq_data *data)
1052 {
1053 	data = data->parent_data;
1054 	data->chip->irq_eoi(data);
1055 }
1056 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1057 
1058 /**
1059  * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1060  * @data:	Pointer to interrupt specific data
1061  * @dest:	The affinity mask to set
1062  * @force:	Flag to enforce setting (disable online checks)
1063  *
1064  * Conditinal, as the underlying parent chip might not implement it.
1065  */
1066 int irq_chip_set_affinity_parent(struct irq_data *data,
1067 				 const struct cpumask *dest, bool force)
1068 {
1069 	data = data->parent_data;
1070 	if (data->chip->irq_set_affinity)
1071 		return data->chip->irq_set_affinity(data, dest, force);
1072 
1073 	return -ENOSYS;
1074 }
1075 
1076 /**
1077  * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1078  * @data:	Pointer to interrupt specific data
1079  * @type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1080  *
1081  * Conditional, as the underlying parent chip might not implement it.
1082  */
1083 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1084 {
1085 	data = data->parent_data;
1086 
1087 	if (data->chip->irq_set_type)
1088 		return data->chip->irq_set_type(data, type);
1089 
1090 	return -ENOSYS;
1091 }
1092 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1093 
1094 /**
1095  * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1096  * @data:	Pointer to interrupt specific data
1097  *
1098  * Iterate through the domain hierarchy of the interrupt and check
1099  * whether a hw retrigger function exists. If yes, invoke it.
1100  */
1101 int irq_chip_retrigger_hierarchy(struct irq_data *data)
1102 {
1103 	for (data = data->parent_data; data; data = data->parent_data)
1104 		if (data->chip && data->chip->irq_retrigger)
1105 			return data->chip->irq_retrigger(data);
1106 
1107 	return 0;
1108 }
1109 
1110 /**
1111  * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1112  * @data:	Pointer to interrupt specific data
1113  * @vcpu_info:	The vcpu affinity information
1114  */
1115 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1116 {
1117 	data = data->parent_data;
1118 	if (data->chip->irq_set_vcpu_affinity)
1119 		return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1120 
1121 	return -ENOSYS;
1122 }
1123 
1124 /**
1125  * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1126  * @data:	Pointer to interrupt specific data
1127  * @on:		Whether to set or reset the wake-up capability of this irq
1128  *
1129  * Conditional, as the underlying parent chip might not implement it.
1130  */
1131 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1132 {
1133 	data = data->parent_data;
1134 	if (data->chip->irq_set_wake)
1135 		return data->chip->irq_set_wake(data, on);
1136 
1137 	return -ENOSYS;
1138 }
1139 #endif
1140 
1141 /**
1142  * irq_chip_compose_msi_msg - Componse msi message for a irq chip
1143  * @data:	Pointer to interrupt specific data
1144  * @msg:	Pointer to the MSI message
1145  *
1146  * For hierarchical domains we find the first chip in the hierarchy
1147  * which implements the irq_compose_msi_msg callback. For non
1148  * hierarchical we use the top level chip.
1149  */
1150 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1151 {
1152 	struct irq_data *pos = NULL;
1153 
1154 #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
1155 	for (; data; data = data->parent_data)
1156 #endif
1157 		if (data->chip && data->chip->irq_compose_msi_msg)
1158 			pos = data;
1159 	if (!pos)
1160 		return -ENOSYS;
1161 
1162 	pos->chip->irq_compose_msi_msg(pos, msg);
1163 
1164 	return 0;
1165 }
1166 
1167 /**
1168  * irq_chip_pm_get - Enable power for an IRQ chip
1169  * @data:	Pointer to interrupt specific data
1170  *
1171  * Enable the power to the IRQ chip referenced by the interrupt data
1172  * structure.
1173  */
1174 int irq_chip_pm_get(struct irq_data *data)
1175 {
1176 	int retval;
1177 
1178 	if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
1179 		retval = pm_runtime_get_sync(data->chip->parent_device);
1180 		if (retval < 0) {
1181 			pm_runtime_put_noidle(data->chip->parent_device);
1182 			return retval;
1183 		}
1184 	}
1185 
1186 	return 0;
1187 }
1188 
1189 /**
1190  * irq_chip_pm_put - Disable power for an IRQ chip
1191  * @data:	Pointer to interrupt specific data
1192  *
1193  * Disable the power to the IRQ chip referenced by the interrupt data
1194  * structure, belongs. Note that power will only be disabled, once this
1195  * function has been called for all IRQs that have called irq_chip_pm_get().
1196  */
1197 int irq_chip_pm_put(struct irq_data *data)
1198 {
1199 	int retval = 0;
1200 
1201 	if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
1202 		retval = pm_runtime_put(data->chip->parent_device);
1203 
1204 	return (retval < 0) ? retval : 0;
1205 }
1206