xref: /openbmc/linux/kernel/irq/chip.c (revision f3a8b664)
1 /*
2  * linux/kernel/irq/chip.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6  *
7  * This file contains the core interrupt handling code, for irq-chip
8  * based architectures.
9  *
10  * Detailed information is available in Documentation/DocBook/genericirq
11  */
12 
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/irqdomain.h>
19 
20 #include <trace/events/irq.h>
21 
22 #include "internals.h"
23 
24 static irqreturn_t bad_chained_irq(int irq, void *dev_id)
25 {
26 	WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
27 	return IRQ_NONE;
28 }
29 
30 /*
31  * Chained handlers should never call action on their IRQ. This default
32  * action will emit warning if such thing happens.
33  */
34 struct irqaction chained_action = {
35 	.handler = bad_chained_irq,
36 };
37 
38 /**
39  *	irq_set_chip - set the irq chip for an irq
40  *	@irq:	irq number
41  *	@chip:	pointer to irq chip description structure
42  */
43 int irq_set_chip(unsigned int irq, struct irq_chip *chip)
44 {
45 	unsigned long flags;
46 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
47 
48 	if (!desc)
49 		return -EINVAL;
50 
51 	if (!chip)
52 		chip = &no_irq_chip;
53 
54 	desc->irq_data.chip = chip;
55 	irq_put_desc_unlock(desc, flags);
56 	/*
57 	 * For !CONFIG_SPARSE_IRQ make the irq show up in
58 	 * allocated_irqs.
59 	 */
60 	irq_mark_irq(irq);
61 	return 0;
62 }
63 EXPORT_SYMBOL(irq_set_chip);
64 
65 /**
66  *	irq_set_type - set the irq trigger type for an irq
67  *	@irq:	irq number
68  *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
69  */
70 int irq_set_irq_type(unsigned int irq, unsigned int type)
71 {
72 	unsigned long flags;
73 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
74 	int ret = 0;
75 
76 	if (!desc)
77 		return -EINVAL;
78 
79 	ret = __irq_set_trigger(desc, type);
80 	irq_put_desc_busunlock(desc, flags);
81 	return ret;
82 }
83 EXPORT_SYMBOL(irq_set_irq_type);
84 
85 /**
86  *	irq_set_handler_data - set irq handler data for an irq
87  *	@irq:	Interrupt number
88  *	@data:	Pointer to interrupt specific data
89  *
90  *	Set the hardware irq controller data for an irq
91  */
92 int irq_set_handler_data(unsigned int irq, void *data)
93 {
94 	unsigned long flags;
95 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
96 
97 	if (!desc)
98 		return -EINVAL;
99 	desc->irq_common_data.handler_data = data;
100 	irq_put_desc_unlock(desc, flags);
101 	return 0;
102 }
103 EXPORT_SYMBOL(irq_set_handler_data);
104 
105 /**
106  *	irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
107  *	@irq_base:	Interrupt number base
108  *	@irq_offset:	Interrupt number offset
109  *	@entry:		Pointer to MSI descriptor data
110  *
111  *	Set the MSI descriptor entry for an irq at offset
112  */
113 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
114 			 struct msi_desc *entry)
115 {
116 	unsigned long flags;
117 	struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
118 
119 	if (!desc)
120 		return -EINVAL;
121 	desc->irq_common_data.msi_desc = entry;
122 	if (entry && !irq_offset)
123 		entry->irq = irq_base;
124 	irq_put_desc_unlock(desc, flags);
125 	return 0;
126 }
127 
128 /**
129  *	irq_set_msi_desc - set MSI descriptor data for an irq
130  *	@irq:	Interrupt number
131  *	@entry:	Pointer to MSI descriptor data
132  *
133  *	Set the MSI descriptor entry for an irq
134  */
135 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
136 {
137 	return irq_set_msi_desc_off(irq, 0, entry);
138 }
139 
140 /**
141  *	irq_set_chip_data - set irq chip data for an irq
142  *	@irq:	Interrupt number
143  *	@data:	Pointer to chip specific data
144  *
145  *	Set the hardware irq chip data for an irq
146  */
147 int irq_set_chip_data(unsigned int irq, void *data)
148 {
149 	unsigned long flags;
150 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
151 
152 	if (!desc)
153 		return -EINVAL;
154 	desc->irq_data.chip_data = data;
155 	irq_put_desc_unlock(desc, flags);
156 	return 0;
157 }
158 EXPORT_SYMBOL(irq_set_chip_data);
159 
160 struct irq_data *irq_get_irq_data(unsigned int irq)
161 {
162 	struct irq_desc *desc = irq_to_desc(irq);
163 
164 	return desc ? &desc->irq_data : NULL;
165 }
166 EXPORT_SYMBOL_GPL(irq_get_irq_data);
167 
168 static void irq_state_clr_disabled(struct irq_desc *desc)
169 {
170 	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
171 }
172 
173 static void irq_state_set_disabled(struct irq_desc *desc)
174 {
175 	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
176 }
177 
178 static void irq_state_clr_masked(struct irq_desc *desc)
179 {
180 	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
181 }
182 
183 static void irq_state_set_masked(struct irq_desc *desc)
184 {
185 	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
186 }
187 
188 int irq_startup(struct irq_desc *desc, bool resend)
189 {
190 	int ret = 0;
191 
192 	irq_state_clr_disabled(desc);
193 	desc->depth = 0;
194 
195 	irq_domain_activate_irq(&desc->irq_data);
196 	if (desc->irq_data.chip->irq_startup) {
197 		ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
198 		irq_state_clr_masked(desc);
199 	} else {
200 		irq_enable(desc);
201 	}
202 	if (resend)
203 		check_irq_resend(desc);
204 	return ret;
205 }
206 
207 void irq_shutdown(struct irq_desc *desc)
208 {
209 	irq_state_set_disabled(desc);
210 	desc->depth = 1;
211 	if (desc->irq_data.chip->irq_shutdown)
212 		desc->irq_data.chip->irq_shutdown(&desc->irq_data);
213 	else if (desc->irq_data.chip->irq_disable)
214 		desc->irq_data.chip->irq_disable(&desc->irq_data);
215 	else
216 		desc->irq_data.chip->irq_mask(&desc->irq_data);
217 	irq_domain_deactivate_irq(&desc->irq_data);
218 	irq_state_set_masked(desc);
219 }
220 
221 void irq_enable(struct irq_desc *desc)
222 {
223 	irq_state_clr_disabled(desc);
224 	if (desc->irq_data.chip->irq_enable)
225 		desc->irq_data.chip->irq_enable(&desc->irq_data);
226 	else
227 		desc->irq_data.chip->irq_unmask(&desc->irq_data);
228 	irq_state_clr_masked(desc);
229 }
230 
231 /**
232  * irq_disable - Mark interrupt disabled
233  * @desc:	irq descriptor which should be disabled
234  *
235  * If the chip does not implement the irq_disable callback, we
236  * use a lazy disable approach. That means we mark the interrupt
237  * disabled, but leave the hardware unmasked. That's an
238  * optimization because we avoid the hardware access for the
239  * common case where no interrupt happens after we marked it
240  * disabled. If an interrupt happens, then the interrupt flow
241  * handler masks the line at the hardware level and marks it
242  * pending.
243  *
244  * If the interrupt chip does not implement the irq_disable callback,
245  * a driver can disable the lazy approach for a particular irq line by
246  * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
247  * be used for devices which cannot disable the interrupt at the
248  * device level under certain circumstances and have to use
249  * disable_irq[_nosync] instead.
250  */
251 void irq_disable(struct irq_desc *desc)
252 {
253 	irq_state_set_disabled(desc);
254 	if (desc->irq_data.chip->irq_disable) {
255 		desc->irq_data.chip->irq_disable(&desc->irq_data);
256 		irq_state_set_masked(desc);
257 	} else if (irq_settings_disable_unlazy(desc)) {
258 		mask_irq(desc);
259 	}
260 }
261 
262 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
263 {
264 	if (desc->irq_data.chip->irq_enable)
265 		desc->irq_data.chip->irq_enable(&desc->irq_data);
266 	else
267 		desc->irq_data.chip->irq_unmask(&desc->irq_data);
268 	cpumask_set_cpu(cpu, desc->percpu_enabled);
269 }
270 
271 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
272 {
273 	if (desc->irq_data.chip->irq_disable)
274 		desc->irq_data.chip->irq_disable(&desc->irq_data);
275 	else
276 		desc->irq_data.chip->irq_mask(&desc->irq_data);
277 	cpumask_clear_cpu(cpu, desc->percpu_enabled);
278 }
279 
280 static inline void mask_ack_irq(struct irq_desc *desc)
281 {
282 	if (desc->irq_data.chip->irq_mask_ack)
283 		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
284 	else {
285 		desc->irq_data.chip->irq_mask(&desc->irq_data);
286 		if (desc->irq_data.chip->irq_ack)
287 			desc->irq_data.chip->irq_ack(&desc->irq_data);
288 	}
289 	irq_state_set_masked(desc);
290 }
291 
292 void mask_irq(struct irq_desc *desc)
293 {
294 	if (desc->irq_data.chip->irq_mask) {
295 		desc->irq_data.chip->irq_mask(&desc->irq_data);
296 		irq_state_set_masked(desc);
297 	}
298 }
299 
300 void unmask_irq(struct irq_desc *desc)
301 {
302 	if (desc->irq_data.chip->irq_unmask) {
303 		desc->irq_data.chip->irq_unmask(&desc->irq_data);
304 		irq_state_clr_masked(desc);
305 	}
306 }
307 
308 void unmask_threaded_irq(struct irq_desc *desc)
309 {
310 	struct irq_chip *chip = desc->irq_data.chip;
311 
312 	if (chip->flags & IRQCHIP_EOI_THREADED)
313 		chip->irq_eoi(&desc->irq_data);
314 
315 	if (chip->irq_unmask) {
316 		chip->irq_unmask(&desc->irq_data);
317 		irq_state_clr_masked(desc);
318 	}
319 }
320 
321 /*
322  *	handle_nested_irq - Handle a nested irq from a irq thread
323  *	@irq:	the interrupt number
324  *
325  *	Handle interrupts which are nested into a threaded interrupt
326  *	handler. The handler function is called inside the calling
327  *	threads context.
328  */
329 void handle_nested_irq(unsigned int irq)
330 {
331 	struct irq_desc *desc = irq_to_desc(irq);
332 	struct irqaction *action;
333 	irqreturn_t action_ret;
334 
335 	might_sleep();
336 
337 	raw_spin_lock_irq(&desc->lock);
338 
339 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
340 
341 	action = desc->action;
342 	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
343 		desc->istate |= IRQS_PENDING;
344 		goto out_unlock;
345 	}
346 
347 	kstat_incr_irqs_this_cpu(desc);
348 	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
349 	raw_spin_unlock_irq(&desc->lock);
350 
351 	action_ret = action->thread_fn(action->irq, action->dev_id);
352 	if (!noirqdebug)
353 		note_interrupt(desc, action_ret);
354 
355 	raw_spin_lock_irq(&desc->lock);
356 	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
357 
358 out_unlock:
359 	raw_spin_unlock_irq(&desc->lock);
360 }
361 EXPORT_SYMBOL_GPL(handle_nested_irq);
362 
363 static bool irq_check_poll(struct irq_desc *desc)
364 {
365 	if (!(desc->istate & IRQS_POLL_INPROGRESS))
366 		return false;
367 	return irq_wait_for_poll(desc);
368 }
369 
370 static bool irq_may_run(struct irq_desc *desc)
371 {
372 	unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
373 
374 	/*
375 	 * If the interrupt is not in progress and is not an armed
376 	 * wakeup interrupt, proceed.
377 	 */
378 	if (!irqd_has_set(&desc->irq_data, mask))
379 		return true;
380 
381 	/*
382 	 * If the interrupt is an armed wakeup source, mark it pending
383 	 * and suspended, disable it and notify the pm core about the
384 	 * event.
385 	 */
386 	if (irq_pm_check_wakeup(desc))
387 		return false;
388 
389 	/*
390 	 * Handle a potential concurrent poll on a different core.
391 	 */
392 	return irq_check_poll(desc);
393 }
394 
395 /**
396  *	handle_simple_irq - Simple and software-decoded IRQs.
397  *	@desc:	the interrupt description structure for this irq
398  *
399  *	Simple interrupts are either sent from a demultiplexing interrupt
400  *	handler or come from hardware, where no interrupt hardware control
401  *	is necessary.
402  *
403  *	Note: The caller is expected to handle the ack, clear, mask and
404  *	unmask issues if necessary.
405  */
406 void handle_simple_irq(struct irq_desc *desc)
407 {
408 	raw_spin_lock(&desc->lock);
409 
410 	if (!irq_may_run(desc))
411 		goto out_unlock;
412 
413 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
414 
415 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
416 		desc->istate |= IRQS_PENDING;
417 		goto out_unlock;
418 	}
419 
420 	kstat_incr_irqs_this_cpu(desc);
421 	handle_irq_event(desc);
422 
423 out_unlock:
424 	raw_spin_unlock(&desc->lock);
425 }
426 EXPORT_SYMBOL_GPL(handle_simple_irq);
427 
428 /**
429  *	handle_untracked_irq - Simple and software-decoded IRQs.
430  *	@desc:	the interrupt description structure for this irq
431  *
432  *	Untracked interrupts are sent from a demultiplexing interrupt
433  *	handler when the demultiplexer does not know which device it its
434  *	multiplexed irq domain generated the interrupt. IRQ's handled
435  *	through here are not subjected to stats tracking, randomness, or
436  *	spurious interrupt detection.
437  *
438  *	Note: Like handle_simple_irq, the caller is expected to handle
439  *	the ack, clear, mask and unmask issues if necessary.
440  */
441 void handle_untracked_irq(struct irq_desc *desc)
442 {
443 	unsigned int flags = 0;
444 
445 	raw_spin_lock(&desc->lock);
446 
447 	if (!irq_may_run(desc))
448 		goto out_unlock;
449 
450 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
451 
452 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
453 		desc->istate |= IRQS_PENDING;
454 		goto out_unlock;
455 	}
456 
457 	desc->istate &= ~IRQS_PENDING;
458 	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
459 	raw_spin_unlock(&desc->lock);
460 
461 	__handle_irq_event_percpu(desc, &flags);
462 
463 	raw_spin_lock(&desc->lock);
464 	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
465 
466 out_unlock:
467 	raw_spin_unlock(&desc->lock);
468 }
469 EXPORT_SYMBOL_GPL(handle_untracked_irq);
470 
471 /*
472  * Called unconditionally from handle_level_irq() and only for oneshot
473  * interrupts from handle_fasteoi_irq()
474  */
475 static void cond_unmask_irq(struct irq_desc *desc)
476 {
477 	/*
478 	 * We need to unmask in the following cases:
479 	 * - Standard level irq (IRQF_ONESHOT is not set)
480 	 * - Oneshot irq which did not wake the thread (caused by a
481 	 *   spurious interrupt or a primary handler handling it
482 	 *   completely).
483 	 */
484 	if (!irqd_irq_disabled(&desc->irq_data) &&
485 	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
486 		unmask_irq(desc);
487 }
488 
489 /**
490  *	handle_level_irq - Level type irq handler
491  *	@desc:	the interrupt description structure for this irq
492  *
493  *	Level type interrupts are active as long as the hardware line has
494  *	the active level. This may require to mask the interrupt and unmask
495  *	it after the associated handler has acknowledged the device, so the
496  *	interrupt line is back to inactive.
497  */
498 void handle_level_irq(struct irq_desc *desc)
499 {
500 	raw_spin_lock(&desc->lock);
501 	mask_ack_irq(desc);
502 
503 	if (!irq_may_run(desc))
504 		goto out_unlock;
505 
506 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
507 
508 	/*
509 	 * If its disabled or no action available
510 	 * keep it masked and get out of here
511 	 */
512 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
513 		desc->istate |= IRQS_PENDING;
514 		goto out_unlock;
515 	}
516 
517 	kstat_incr_irqs_this_cpu(desc);
518 	handle_irq_event(desc);
519 
520 	cond_unmask_irq(desc);
521 
522 out_unlock:
523 	raw_spin_unlock(&desc->lock);
524 }
525 EXPORT_SYMBOL_GPL(handle_level_irq);
526 
527 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
528 static inline void preflow_handler(struct irq_desc *desc)
529 {
530 	if (desc->preflow_handler)
531 		desc->preflow_handler(&desc->irq_data);
532 }
533 #else
534 static inline void preflow_handler(struct irq_desc *desc) { }
535 #endif
536 
537 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
538 {
539 	if (!(desc->istate & IRQS_ONESHOT)) {
540 		chip->irq_eoi(&desc->irq_data);
541 		return;
542 	}
543 	/*
544 	 * We need to unmask in the following cases:
545 	 * - Oneshot irq which did not wake the thread (caused by a
546 	 *   spurious interrupt or a primary handler handling it
547 	 *   completely).
548 	 */
549 	if (!irqd_irq_disabled(&desc->irq_data) &&
550 	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
551 		chip->irq_eoi(&desc->irq_data);
552 		unmask_irq(desc);
553 	} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
554 		chip->irq_eoi(&desc->irq_data);
555 	}
556 }
557 
558 /**
559  *	handle_fasteoi_irq - irq handler for transparent controllers
560  *	@desc:	the interrupt description structure for this irq
561  *
562  *	Only a single callback will be issued to the chip: an ->eoi()
563  *	call when the interrupt has been serviced. This enables support
564  *	for modern forms of interrupt handlers, which handle the flow
565  *	details in hardware, transparently.
566  */
567 void handle_fasteoi_irq(struct irq_desc *desc)
568 {
569 	struct irq_chip *chip = desc->irq_data.chip;
570 
571 	raw_spin_lock(&desc->lock);
572 
573 	if (!irq_may_run(desc))
574 		goto out;
575 
576 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
577 
578 	/*
579 	 * If its disabled or no action available
580 	 * then mask it and get out of here:
581 	 */
582 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
583 		desc->istate |= IRQS_PENDING;
584 		mask_irq(desc);
585 		goto out;
586 	}
587 
588 	kstat_incr_irqs_this_cpu(desc);
589 	if (desc->istate & IRQS_ONESHOT)
590 		mask_irq(desc);
591 
592 	preflow_handler(desc);
593 	handle_irq_event(desc);
594 
595 	cond_unmask_eoi_irq(desc, chip);
596 
597 	raw_spin_unlock(&desc->lock);
598 	return;
599 out:
600 	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
601 		chip->irq_eoi(&desc->irq_data);
602 	raw_spin_unlock(&desc->lock);
603 }
604 EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
605 
606 /**
607  *	handle_edge_irq - edge type IRQ handler
608  *	@desc:	the interrupt description structure for this irq
609  *
610  *	Interrupt occures on the falling and/or rising edge of a hardware
611  *	signal. The occurrence is latched into the irq controller hardware
612  *	and must be acked in order to be reenabled. After the ack another
613  *	interrupt can happen on the same source even before the first one
614  *	is handled by the associated event handler. If this happens it
615  *	might be necessary to disable (mask) the interrupt depending on the
616  *	controller hardware. This requires to reenable the interrupt inside
617  *	of the loop which handles the interrupts which have arrived while
618  *	the handler was running. If all pending interrupts are handled, the
619  *	loop is left.
620  */
621 void handle_edge_irq(struct irq_desc *desc)
622 {
623 	raw_spin_lock(&desc->lock);
624 
625 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
626 
627 	if (!irq_may_run(desc)) {
628 		desc->istate |= IRQS_PENDING;
629 		mask_ack_irq(desc);
630 		goto out_unlock;
631 	}
632 
633 	/*
634 	 * If its disabled or no action available then mask it and get
635 	 * out of here.
636 	 */
637 	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
638 		desc->istate |= IRQS_PENDING;
639 		mask_ack_irq(desc);
640 		goto out_unlock;
641 	}
642 
643 	kstat_incr_irqs_this_cpu(desc);
644 
645 	/* Start handling the irq */
646 	desc->irq_data.chip->irq_ack(&desc->irq_data);
647 
648 	do {
649 		if (unlikely(!desc->action)) {
650 			mask_irq(desc);
651 			goto out_unlock;
652 		}
653 
654 		/*
655 		 * When another irq arrived while we were handling
656 		 * one, we could have masked the irq.
657 		 * Renable it, if it was not disabled in meantime.
658 		 */
659 		if (unlikely(desc->istate & IRQS_PENDING)) {
660 			if (!irqd_irq_disabled(&desc->irq_data) &&
661 			    irqd_irq_masked(&desc->irq_data))
662 				unmask_irq(desc);
663 		}
664 
665 		handle_irq_event(desc);
666 
667 	} while ((desc->istate & IRQS_PENDING) &&
668 		 !irqd_irq_disabled(&desc->irq_data));
669 
670 out_unlock:
671 	raw_spin_unlock(&desc->lock);
672 }
673 EXPORT_SYMBOL(handle_edge_irq);
674 
675 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
676 /**
677  *	handle_edge_eoi_irq - edge eoi type IRQ handler
678  *	@desc:	the interrupt description structure for this irq
679  *
680  * Similar as the above handle_edge_irq, but using eoi and w/o the
681  * mask/unmask logic.
682  */
683 void handle_edge_eoi_irq(struct irq_desc *desc)
684 {
685 	struct irq_chip *chip = irq_desc_get_chip(desc);
686 
687 	raw_spin_lock(&desc->lock);
688 
689 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
690 
691 	if (!irq_may_run(desc)) {
692 		desc->istate |= IRQS_PENDING;
693 		goto out_eoi;
694 	}
695 
696 	/*
697 	 * If its disabled or no action available then mask it and get
698 	 * out of here.
699 	 */
700 	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
701 		desc->istate |= IRQS_PENDING;
702 		goto out_eoi;
703 	}
704 
705 	kstat_incr_irqs_this_cpu(desc);
706 
707 	do {
708 		if (unlikely(!desc->action))
709 			goto out_eoi;
710 
711 		handle_irq_event(desc);
712 
713 	} while ((desc->istate & IRQS_PENDING) &&
714 		 !irqd_irq_disabled(&desc->irq_data));
715 
716 out_eoi:
717 	chip->irq_eoi(&desc->irq_data);
718 	raw_spin_unlock(&desc->lock);
719 }
720 #endif
721 
722 /**
723  *	handle_percpu_irq - Per CPU local irq handler
724  *	@desc:	the interrupt description structure for this irq
725  *
726  *	Per CPU interrupts on SMP machines without locking requirements
727  */
728 void handle_percpu_irq(struct irq_desc *desc)
729 {
730 	struct irq_chip *chip = irq_desc_get_chip(desc);
731 
732 	kstat_incr_irqs_this_cpu(desc);
733 
734 	if (chip->irq_ack)
735 		chip->irq_ack(&desc->irq_data);
736 
737 	handle_irq_event_percpu(desc);
738 
739 	if (chip->irq_eoi)
740 		chip->irq_eoi(&desc->irq_data);
741 }
742 
743 /**
744  * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
745  * @desc:	the interrupt description structure for this irq
746  *
747  * Per CPU interrupts on SMP machines without locking requirements. Same as
748  * handle_percpu_irq() above but with the following extras:
749  *
750  * action->percpu_dev_id is a pointer to percpu variables which
751  * contain the real device id for the cpu on which this handler is
752  * called
753  */
754 void handle_percpu_devid_irq(struct irq_desc *desc)
755 {
756 	struct irq_chip *chip = irq_desc_get_chip(desc);
757 	struct irqaction *action = desc->action;
758 	unsigned int irq = irq_desc_get_irq(desc);
759 	irqreturn_t res;
760 
761 	kstat_incr_irqs_this_cpu(desc);
762 
763 	if (chip->irq_ack)
764 		chip->irq_ack(&desc->irq_data);
765 
766 	if (likely(action)) {
767 		trace_irq_handler_entry(irq, action);
768 		res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
769 		trace_irq_handler_exit(irq, action, res);
770 	} else {
771 		unsigned int cpu = smp_processor_id();
772 		bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
773 
774 		if (enabled)
775 			irq_percpu_disable(desc, cpu);
776 
777 		pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
778 			    enabled ? " and unmasked" : "", irq, cpu);
779 	}
780 
781 	if (chip->irq_eoi)
782 		chip->irq_eoi(&desc->irq_data);
783 }
784 
785 static void
786 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
787 		     int is_chained, const char *name)
788 {
789 	if (!handle) {
790 		handle = handle_bad_irq;
791 	} else {
792 		struct irq_data *irq_data = &desc->irq_data;
793 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
794 		/*
795 		 * With hierarchical domains we might run into a
796 		 * situation where the outermost chip is not yet set
797 		 * up, but the inner chips are there.  Instead of
798 		 * bailing we install the handler, but obviously we
799 		 * cannot enable/startup the interrupt at this point.
800 		 */
801 		while (irq_data) {
802 			if (irq_data->chip != &no_irq_chip)
803 				break;
804 			/*
805 			 * Bail out if the outer chip is not set up
806 			 * and the interrrupt supposed to be started
807 			 * right away.
808 			 */
809 			if (WARN_ON(is_chained))
810 				return;
811 			/* Try the parent */
812 			irq_data = irq_data->parent_data;
813 		}
814 #endif
815 		if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
816 			return;
817 	}
818 
819 	/* Uninstall? */
820 	if (handle == handle_bad_irq) {
821 		if (desc->irq_data.chip != &no_irq_chip)
822 			mask_ack_irq(desc);
823 		irq_state_set_disabled(desc);
824 		if (is_chained)
825 			desc->action = NULL;
826 		desc->depth = 1;
827 	}
828 	desc->handle_irq = handle;
829 	desc->name = name;
830 
831 	if (handle != handle_bad_irq && is_chained) {
832 		unsigned int type = irqd_get_trigger_type(&desc->irq_data);
833 
834 		/*
835 		 * We're about to start this interrupt immediately,
836 		 * hence the need to set the trigger configuration.
837 		 * But the .set_type callback may have overridden the
838 		 * flow handler, ignoring that we're dealing with a
839 		 * chained interrupt. Reset it immediately because we
840 		 * do know better.
841 		 */
842 		if (type != IRQ_TYPE_NONE) {
843 			__irq_set_trigger(desc, type);
844 			desc->handle_irq = handle;
845 		}
846 
847 		irq_settings_set_noprobe(desc);
848 		irq_settings_set_norequest(desc);
849 		irq_settings_set_nothread(desc);
850 		desc->action = &chained_action;
851 		irq_startup(desc, true);
852 	}
853 }
854 
855 void
856 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
857 		  const char *name)
858 {
859 	unsigned long flags;
860 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
861 
862 	if (!desc)
863 		return;
864 
865 	__irq_do_set_handler(desc, handle, is_chained, name);
866 	irq_put_desc_busunlock(desc, flags);
867 }
868 EXPORT_SYMBOL_GPL(__irq_set_handler);
869 
870 void
871 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
872 				 void *data)
873 {
874 	unsigned long flags;
875 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
876 
877 	if (!desc)
878 		return;
879 
880 	__irq_do_set_handler(desc, handle, 1, NULL);
881 	desc->irq_common_data.handler_data = data;
882 
883 	irq_put_desc_busunlock(desc, flags);
884 }
885 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
886 
887 void
888 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
889 			      irq_flow_handler_t handle, const char *name)
890 {
891 	irq_set_chip(irq, chip);
892 	__irq_set_handler(irq, handle, 0, name);
893 }
894 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
895 
896 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
897 {
898 	unsigned long flags;
899 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
900 
901 	if (!desc)
902 		return;
903 	irq_settings_clr_and_set(desc, clr, set);
904 
905 	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
906 		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
907 	if (irq_settings_has_no_balance_set(desc))
908 		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
909 	if (irq_settings_is_per_cpu(desc))
910 		irqd_set(&desc->irq_data, IRQD_PER_CPU);
911 	if (irq_settings_can_move_pcntxt(desc))
912 		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
913 	if (irq_settings_is_level(desc))
914 		irqd_set(&desc->irq_data, IRQD_LEVEL);
915 
916 	irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
917 
918 	irq_put_desc_unlock(desc, flags);
919 }
920 EXPORT_SYMBOL_GPL(irq_modify_status);
921 
922 /**
923  *	irq_cpu_online - Invoke all irq_cpu_online functions.
924  *
925  *	Iterate through all irqs and invoke the chip.irq_cpu_online()
926  *	for each.
927  */
928 void irq_cpu_online(void)
929 {
930 	struct irq_desc *desc;
931 	struct irq_chip *chip;
932 	unsigned long flags;
933 	unsigned int irq;
934 
935 	for_each_active_irq(irq) {
936 		desc = irq_to_desc(irq);
937 		if (!desc)
938 			continue;
939 
940 		raw_spin_lock_irqsave(&desc->lock, flags);
941 
942 		chip = irq_data_get_irq_chip(&desc->irq_data);
943 		if (chip && chip->irq_cpu_online &&
944 		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
945 		     !irqd_irq_disabled(&desc->irq_data)))
946 			chip->irq_cpu_online(&desc->irq_data);
947 
948 		raw_spin_unlock_irqrestore(&desc->lock, flags);
949 	}
950 }
951 
952 /**
953  *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
954  *
955  *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
956  *	for each.
957  */
958 void irq_cpu_offline(void)
959 {
960 	struct irq_desc *desc;
961 	struct irq_chip *chip;
962 	unsigned long flags;
963 	unsigned int irq;
964 
965 	for_each_active_irq(irq) {
966 		desc = irq_to_desc(irq);
967 		if (!desc)
968 			continue;
969 
970 		raw_spin_lock_irqsave(&desc->lock, flags);
971 
972 		chip = irq_data_get_irq_chip(&desc->irq_data);
973 		if (chip && chip->irq_cpu_offline &&
974 		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
975 		     !irqd_irq_disabled(&desc->irq_data)))
976 			chip->irq_cpu_offline(&desc->irq_data);
977 
978 		raw_spin_unlock_irqrestore(&desc->lock, flags);
979 	}
980 }
981 
982 #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
983 /**
984  * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
985  * NULL)
986  * @data:	Pointer to interrupt specific data
987  */
988 void irq_chip_enable_parent(struct irq_data *data)
989 {
990 	data = data->parent_data;
991 	if (data->chip->irq_enable)
992 		data->chip->irq_enable(data);
993 	else
994 		data->chip->irq_unmask(data);
995 }
996 
997 /**
998  * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
999  * NULL)
1000  * @data:	Pointer to interrupt specific data
1001  */
1002 void irq_chip_disable_parent(struct irq_data *data)
1003 {
1004 	data = data->parent_data;
1005 	if (data->chip->irq_disable)
1006 		data->chip->irq_disable(data);
1007 	else
1008 		data->chip->irq_mask(data);
1009 }
1010 
1011 /**
1012  * irq_chip_ack_parent - Acknowledge the parent interrupt
1013  * @data:	Pointer to interrupt specific data
1014  */
1015 void irq_chip_ack_parent(struct irq_data *data)
1016 {
1017 	data = data->parent_data;
1018 	data->chip->irq_ack(data);
1019 }
1020 EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1021 
1022 /**
1023  * irq_chip_mask_parent - Mask the parent interrupt
1024  * @data:	Pointer to interrupt specific data
1025  */
1026 void irq_chip_mask_parent(struct irq_data *data)
1027 {
1028 	data = data->parent_data;
1029 	data->chip->irq_mask(data);
1030 }
1031 EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1032 
1033 /**
1034  * irq_chip_unmask_parent - Unmask the parent interrupt
1035  * @data:	Pointer to interrupt specific data
1036  */
1037 void irq_chip_unmask_parent(struct irq_data *data)
1038 {
1039 	data = data->parent_data;
1040 	data->chip->irq_unmask(data);
1041 }
1042 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1043 
1044 /**
1045  * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1046  * @data:	Pointer to interrupt specific data
1047  */
1048 void irq_chip_eoi_parent(struct irq_data *data)
1049 {
1050 	data = data->parent_data;
1051 	data->chip->irq_eoi(data);
1052 }
1053 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1054 
1055 /**
1056  * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1057  * @data:	Pointer to interrupt specific data
1058  * @dest:	The affinity mask to set
1059  * @force:	Flag to enforce setting (disable online checks)
1060  *
1061  * Conditinal, as the underlying parent chip might not implement it.
1062  */
1063 int irq_chip_set_affinity_parent(struct irq_data *data,
1064 				 const struct cpumask *dest, bool force)
1065 {
1066 	data = data->parent_data;
1067 	if (data->chip->irq_set_affinity)
1068 		return data->chip->irq_set_affinity(data, dest, force);
1069 
1070 	return -ENOSYS;
1071 }
1072 
1073 /**
1074  * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1075  * @data:	Pointer to interrupt specific data
1076  * @type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1077  *
1078  * Conditional, as the underlying parent chip might not implement it.
1079  */
1080 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1081 {
1082 	data = data->parent_data;
1083 
1084 	if (data->chip->irq_set_type)
1085 		return data->chip->irq_set_type(data, type);
1086 
1087 	return -ENOSYS;
1088 }
1089 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1090 
1091 /**
1092  * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1093  * @data:	Pointer to interrupt specific data
1094  *
1095  * Iterate through the domain hierarchy of the interrupt and check
1096  * whether a hw retrigger function exists. If yes, invoke it.
1097  */
1098 int irq_chip_retrigger_hierarchy(struct irq_data *data)
1099 {
1100 	for (data = data->parent_data; data; data = data->parent_data)
1101 		if (data->chip && data->chip->irq_retrigger)
1102 			return data->chip->irq_retrigger(data);
1103 
1104 	return 0;
1105 }
1106 
1107 /**
1108  * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1109  * @data:	Pointer to interrupt specific data
1110  * @vcpu_info:	The vcpu affinity information
1111  */
1112 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1113 {
1114 	data = data->parent_data;
1115 	if (data->chip->irq_set_vcpu_affinity)
1116 		return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1117 
1118 	return -ENOSYS;
1119 }
1120 
1121 /**
1122  * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1123  * @data:	Pointer to interrupt specific data
1124  * @on:		Whether to set or reset the wake-up capability of this irq
1125  *
1126  * Conditional, as the underlying parent chip might not implement it.
1127  */
1128 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1129 {
1130 	data = data->parent_data;
1131 	if (data->chip->irq_set_wake)
1132 		return data->chip->irq_set_wake(data, on);
1133 
1134 	return -ENOSYS;
1135 }
1136 #endif
1137 
1138 /**
1139  * irq_chip_compose_msi_msg - Componse msi message for a irq chip
1140  * @data:	Pointer to interrupt specific data
1141  * @msg:	Pointer to the MSI message
1142  *
1143  * For hierarchical domains we find the first chip in the hierarchy
1144  * which implements the irq_compose_msi_msg callback. For non
1145  * hierarchical we use the top level chip.
1146  */
1147 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1148 {
1149 	struct irq_data *pos = NULL;
1150 
1151 #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
1152 	for (; data; data = data->parent_data)
1153 #endif
1154 		if (data->chip && data->chip->irq_compose_msi_msg)
1155 			pos = data;
1156 	if (!pos)
1157 		return -ENOSYS;
1158 
1159 	pos->chip->irq_compose_msi_msg(pos, msg);
1160 
1161 	return 0;
1162 }
1163 
1164 /**
1165  * irq_chip_pm_get - Enable power for an IRQ chip
1166  * @data:	Pointer to interrupt specific data
1167  *
1168  * Enable the power to the IRQ chip referenced by the interrupt data
1169  * structure.
1170  */
1171 int irq_chip_pm_get(struct irq_data *data)
1172 {
1173 	int retval;
1174 
1175 	if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
1176 		retval = pm_runtime_get_sync(data->chip->parent_device);
1177 		if (retval < 0) {
1178 			pm_runtime_put_noidle(data->chip->parent_device);
1179 			return retval;
1180 		}
1181 	}
1182 
1183 	return 0;
1184 }
1185 
1186 /**
1187  * irq_chip_pm_put - Disable power for an IRQ chip
1188  * @data:	Pointer to interrupt specific data
1189  *
1190  * Disable the power to the IRQ chip referenced by the interrupt data
1191  * structure, belongs. Note that power will only be disabled, once this
1192  * function has been called for all IRQs that have called irq_chip_pm_get().
1193  */
1194 int irq_chip_pm_put(struct irq_data *data)
1195 {
1196 	int retval = 0;
1197 
1198 	if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
1199 		retval = pm_runtime_put(data->chip->parent_device);
1200 
1201 	return (retval < 0) ? retval : 0;
1202 }
1203