xref: /openbmc/linux/kernel/irq/chip.c (revision 384740dc)
1 /*
2  * linux/kernel/irq/chip.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6  *
7  * This file contains the core interrupt handling code, for irq-chip
8  * based architectures.
9  *
10  * Detailed information is available in Documentation/DocBook/genericirq
11  */
12 
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18 
19 #include "internals.h"
20 
21 /**
22  *	dynamic_irq_init - initialize a dynamically allocated irq
23  *	@irq:	irq number to initialize
24  */
25 void dynamic_irq_init(unsigned int irq)
26 {
27 	struct irq_desc *desc;
28 	unsigned long flags;
29 
30 	if (irq >= NR_IRQS) {
31 		WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
32 		return;
33 	}
34 
35 	/* Ensure we don't have left over values from a previous use of this irq */
36 	desc = irq_desc + irq;
37 	spin_lock_irqsave(&desc->lock, flags);
38 	desc->status = IRQ_DISABLED;
39 	desc->chip = &no_irq_chip;
40 	desc->handle_irq = handle_bad_irq;
41 	desc->depth = 1;
42 	desc->msi_desc = NULL;
43 	desc->handler_data = NULL;
44 	desc->chip_data = NULL;
45 	desc->action = NULL;
46 	desc->irq_count = 0;
47 	desc->irqs_unhandled = 0;
48 #ifdef CONFIG_SMP
49 	cpus_setall(desc->affinity);
50 #endif
51 	spin_unlock_irqrestore(&desc->lock, flags);
52 }
53 
54 /**
55  *	dynamic_irq_cleanup - cleanup a dynamically allocated irq
56  *	@irq:	irq number to initialize
57  */
58 void dynamic_irq_cleanup(unsigned int irq)
59 {
60 	struct irq_desc *desc;
61 	unsigned long flags;
62 
63 	if (irq >= NR_IRQS) {
64 		WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
65 		return;
66 	}
67 
68 	desc = irq_desc + irq;
69 	spin_lock_irqsave(&desc->lock, flags);
70 	if (desc->action) {
71 		spin_unlock_irqrestore(&desc->lock, flags);
72 		WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
73 			irq);
74 		return;
75 	}
76 	desc->msi_desc = NULL;
77 	desc->handler_data = NULL;
78 	desc->chip_data = NULL;
79 	desc->handle_irq = handle_bad_irq;
80 	desc->chip = &no_irq_chip;
81 	spin_unlock_irqrestore(&desc->lock, flags);
82 }
83 
84 
85 /**
86  *	set_irq_chip - set the irq chip for an irq
87  *	@irq:	irq number
88  *	@chip:	pointer to irq chip description structure
89  */
90 int set_irq_chip(unsigned int irq, struct irq_chip *chip)
91 {
92 	struct irq_desc *desc;
93 	unsigned long flags;
94 
95 	if (irq >= NR_IRQS) {
96 		WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq);
97 		return -EINVAL;
98 	}
99 
100 	if (!chip)
101 		chip = &no_irq_chip;
102 
103 	desc = irq_desc + irq;
104 	spin_lock_irqsave(&desc->lock, flags);
105 	irq_chip_set_defaults(chip);
106 	desc->chip = chip;
107 	spin_unlock_irqrestore(&desc->lock, flags);
108 
109 	return 0;
110 }
111 EXPORT_SYMBOL(set_irq_chip);
112 
113 /**
114  *	set_irq_type - set the irq type for an irq
115  *	@irq:	irq number
116  *	@type:	interrupt type - see include/linux/interrupt.h
117  */
118 int set_irq_type(unsigned int irq, unsigned int type)
119 {
120 	struct irq_desc *desc;
121 	unsigned long flags;
122 	int ret = -ENXIO;
123 
124 	if (irq >= NR_IRQS) {
125 		printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
126 		return -ENODEV;
127 	}
128 
129 	desc = irq_desc + irq;
130 	if (desc->chip->set_type) {
131 		spin_lock_irqsave(&desc->lock, flags);
132 		ret = desc->chip->set_type(irq, type);
133 		spin_unlock_irqrestore(&desc->lock, flags);
134 	}
135 	return ret;
136 }
137 EXPORT_SYMBOL(set_irq_type);
138 
139 /**
140  *	set_irq_data - set irq type data for an irq
141  *	@irq:	Interrupt number
142  *	@data:	Pointer to interrupt specific data
143  *
144  *	Set the hardware irq controller data for an irq
145  */
146 int set_irq_data(unsigned int irq, void *data)
147 {
148 	struct irq_desc *desc;
149 	unsigned long flags;
150 
151 	if (irq >= NR_IRQS) {
152 		printk(KERN_ERR
153 		       "Trying to install controller data for IRQ%d\n", irq);
154 		return -EINVAL;
155 	}
156 
157 	desc = irq_desc + irq;
158 	spin_lock_irqsave(&desc->lock, flags);
159 	desc->handler_data = data;
160 	spin_unlock_irqrestore(&desc->lock, flags);
161 	return 0;
162 }
163 EXPORT_SYMBOL(set_irq_data);
164 
165 /**
166  *	set_irq_data - set irq type data for an irq
167  *	@irq:	Interrupt number
168  *	@entry:	Pointer to MSI descriptor data
169  *
170  *	Set the hardware irq controller data for an irq
171  */
172 int set_irq_msi(unsigned int irq, struct msi_desc *entry)
173 {
174 	struct irq_desc *desc;
175 	unsigned long flags;
176 
177 	if (irq >= NR_IRQS) {
178 		printk(KERN_ERR
179 		       "Trying to install msi data for IRQ%d\n", irq);
180 		return -EINVAL;
181 	}
182 	desc = irq_desc + irq;
183 	spin_lock_irqsave(&desc->lock, flags);
184 	desc->msi_desc = entry;
185 	if (entry)
186 		entry->irq = irq;
187 	spin_unlock_irqrestore(&desc->lock, flags);
188 	return 0;
189 }
190 
191 /**
192  *	set_irq_chip_data - set irq chip data for an irq
193  *	@irq:	Interrupt number
194  *	@data:	Pointer to chip specific data
195  *
196  *	Set the hardware irq chip data for an irq
197  */
198 int set_irq_chip_data(unsigned int irq, void *data)
199 {
200 	struct irq_desc *desc = irq_desc + irq;
201 	unsigned long flags;
202 
203 	if (irq >= NR_IRQS || !desc->chip) {
204 		printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
205 		return -EINVAL;
206 	}
207 
208 	spin_lock_irqsave(&desc->lock, flags);
209 	desc->chip_data = data;
210 	spin_unlock_irqrestore(&desc->lock, flags);
211 
212 	return 0;
213 }
214 EXPORT_SYMBOL(set_irq_chip_data);
215 
216 /*
217  * default enable function
218  */
219 static void default_enable(unsigned int irq)
220 {
221 	struct irq_desc *desc = irq_desc + irq;
222 
223 	desc->chip->unmask(irq);
224 	desc->status &= ~IRQ_MASKED;
225 }
226 
227 /*
228  * default disable function
229  */
230 static void default_disable(unsigned int irq)
231 {
232 }
233 
234 /*
235  * default startup function
236  */
237 static unsigned int default_startup(unsigned int irq)
238 {
239 	irq_desc[irq].chip->enable(irq);
240 
241 	return 0;
242 }
243 
244 /*
245  * default shutdown function
246  */
247 static void default_shutdown(unsigned int irq)
248 {
249 	struct irq_desc *desc = irq_desc + irq;
250 
251 	desc->chip->mask(irq);
252 	desc->status |= IRQ_MASKED;
253 }
254 
255 /*
256  * Fixup enable/disable function pointers
257  */
258 void irq_chip_set_defaults(struct irq_chip *chip)
259 {
260 	if (!chip->enable)
261 		chip->enable = default_enable;
262 	if (!chip->disable)
263 		chip->disable = default_disable;
264 	if (!chip->startup)
265 		chip->startup = default_startup;
266 	/*
267 	 * We use chip->disable, when the user provided its own. When
268 	 * we have default_disable set for chip->disable, then we need
269 	 * to use default_shutdown, otherwise the irq line is not
270 	 * disabled on free_irq():
271 	 */
272 	if (!chip->shutdown)
273 		chip->shutdown = chip->disable != default_disable ?
274 			chip->disable : default_shutdown;
275 	if (!chip->name)
276 		chip->name = chip->typename;
277 	if (!chip->end)
278 		chip->end = dummy_irq_chip.end;
279 }
280 
281 static inline void mask_ack_irq(struct irq_desc *desc, int irq)
282 {
283 	if (desc->chip->mask_ack)
284 		desc->chip->mask_ack(irq);
285 	else {
286 		desc->chip->mask(irq);
287 		desc->chip->ack(irq);
288 	}
289 }
290 
291 /**
292  *	handle_simple_irq - Simple and software-decoded IRQs.
293  *	@irq:	the interrupt number
294  *	@desc:	the interrupt description structure for this irq
295  *
296  *	Simple interrupts are either sent from a demultiplexing interrupt
297  *	handler or come from hardware, where no interrupt hardware control
298  *	is necessary.
299  *
300  *	Note: The caller is expected to handle the ack, clear, mask and
301  *	unmask issues if necessary.
302  */
303 void
304 handle_simple_irq(unsigned int irq, struct irq_desc *desc)
305 {
306 	struct irqaction *action;
307 	irqreturn_t action_ret;
308 	const unsigned int cpu = smp_processor_id();
309 
310 	spin_lock(&desc->lock);
311 
312 	if (unlikely(desc->status & IRQ_INPROGRESS))
313 		goto out_unlock;
314 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
315 	kstat_cpu(cpu).irqs[irq]++;
316 
317 	action = desc->action;
318 	if (unlikely(!action || (desc->status & IRQ_DISABLED)))
319 		goto out_unlock;
320 
321 	desc->status |= IRQ_INPROGRESS;
322 	spin_unlock(&desc->lock);
323 
324 	action_ret = handle_IRQ_event(irq, action);
325 	if (!noirqdebug)
326 		note_interrupt(irq, desc, action_ret);
327 
328 	spin_lock(&desc->lock);
329 	desc->status &= ~IRQ_INPROGRESS;
330 out_unlock:
331 	spin_unlock(&desc->lock);
332 }
333 
334 /**
335  *	handle_level_irq - Level type irq handler
336  *	@irq:	the interrupt number
337  *	@desc:	the interrupt description structure for this irq
338  *
339  *	Level type interrupts are active as long as the hardware line has
340  *	the active level. This may require to mask the interrupt and unmask
341  *	it after the associated handler has acknowledged the device, so the
342  *	interrupt line is back to inactive.
343  */
344 void
345 handle_level_irq(unsigned int irq, struct irq_desc *desc)
346 {
347 	unsigned int cpu = smp_processor_id();
348 	struct irqaction *action;
349 	irqreturn_t action_ret;
350 
351 	spin_lock(&desc->lock);
352 	mask_ack_irq(desc, irq);
353 
354 	if (unlikely(desc->status & IRQ_INPROGRESS))
355 		goto out_unlock;
356 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
357 	kstat_cpu(cpu).irqs[irq]++;
358 
359 	/*
360 	 * If its disabled or no action available
361 	 * keep it masked and get out of here
362 	 */
363 	action = desc->action;
364 	if (unlikely(!action || (desc->status & IRQ_DISABLED)))
365 		goto out_unlock;
366 
367 	desc->status |= IRQ_INPROGRESS;
368 	spin_unlock(&desc->lock);
369 
370 	action_ret = handle_IRQ_event(irq, action);
371 	if (!noirqdebug)
372 		note_interrupt(irq, desc, action_ret);
373 
374 	spin_lock(&desc->lock);
375 	desc->status &= ~IRQ_INPROGRESS;
376 	if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
377 		desc->chip->unmask(irq);
378 out_unlock:
379 	spin_unlock(&desc->lock);
380 }
381 
382 /**
383  *	handle_fasteoi_irq - irq handler for transparent controllers
384  *	@irq:	the interrupt number
385  *	@desc:	the interrupt description structure for this irq
386  *
387  *	Only a single callback will be issued to the chip: an ->eoi()
388  *	call when the interrupt has been serviced. This enables support
389  *	for modern forms of interrupt handlers, which handle the flow
390  *	details in hardware, transparently.
391  */
392 void
393 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
394 {
395 	unsigned int cpu = smp_processor_id();
396 	struct irqaction *action;
397 	irqreturn_t action_ret;
398 
399 	spin_lock(&desc->lock);
400 
401 	if (unlikely(desc->status & IRQ_INPROGRESS))
402 		goto out;
403 
404 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
405 	kstat_cpu(cpu).irqs[irq]++;
406 
407 	/*
408 	 * If its disabled or no action available
409 	 * then mask it and get out of here:
410 	 */
411 	action = desc->action;
412 	if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
413 		desc->status |= IRQ_PENDING;
414 		if (desc->chip->mask)
415 			desc->chip->mask(irq);
416 		goto out;
417 	}
418 
419 	desc->status |= IRQ_INPROGRESS;
420 	desc->status &= ~IRQ_PENDING;
421 	spin_unlock(&desc->lock);
422 
423 	action_ret = handle_IRQ_event(irq, action);
424 	if (!noirqdebug)
425 		note_interrupt(irq, desc, action_ret);
426 
427 	spin_lock(&desc->lock);
428 	desc->status &= ~IRQ_INPROGRESS;
429 out:
430 	desc->chip->eoi(irq);
431 
432 	spin_unlock(&desc->lock);
433 }
434 
435 /**
436  *	handle_edge_irq - edge type IRQ handler
437  *	@irq:	the interrupt number
438  *	@desc:	the interrupt description structure for this irq
439  *
440  *	Interrupt occures on the falling and/or rising edge of a hardware
441  *	signal. The occurence is latched into the irq controller hardware
442  *	and must be acked in order to be reenabled. After the ack another
443  *	interrupt can happen on the same source even before the first one
444  *	is handled by the assosiacted event handler. If this happens it
445  *	might be necessary to disable (mask) the interrupt depending on the
446  *	controller hardware. This requires to reenable the interrupt inside
447  *	of the loop which handles the interrupts which have arrived while
448  *	the handler was running. If all pending interrupts are handled, the
449  *	loop is left.
450  */
451 void
452 handle_edge_irq(unsigned int irq, struct irq_desc *desc)
453 {
454 	const unsigned int cpu = smp_processor_id();
455 
456 	spin_lock(&desc->lock);
457 
458 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
459 
460 	/*
461 	 * If we're currently running this IRQ, or its disabled,
462 	 * we shouldn't process the IRQ. Mark it pending, handle
463 	 * the necessary masking and go out
464 	 */
465 	if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
466 		    !desc->action)) {
467 		desc->status |= (IRQ_PENDING | IRQ_MASKED);
468 		mask_ack_irq(desc, irq);
469 		goto out_unlock;
470 	}
471 
472 	kstat_cpu(cpu).irqs[irq]++;
473 
474 	/* Start handling the irq */
475 	desc->chip->ack(irq);
476 
477 	/* Mark the IRQ currently in progress.*/
478 	desc->status |= IRQ_INPROGRESS;
479 
480 	do {
481 		struct irqaction *action = desc->action;
482 		irqreturn_t action_ret;
483 
484 		if (unlikely(!action)) {
485 			desc->chip->mask(irq);
486 			goto out_unlock;
487 		}
488 
489 		/*
490 		 * When another irq arrived while we were handling
491 		 * one, we could have masked the irq.
492 		 * Renable it, if it was not disabled in meantime.
493 		 */
494 		if (unlikely((desc->status &
495 			       (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
496 			      (IRQ_PENDING | IRQ_MASKED))) {
497 			desc->chip->unmask(irq);
498 			desc->status &= ~IRQ_MASKED;
499 		}
500 
501 		desc->status &= ~IRQ_PENDING;
502 		spin_unlock(&desc->lock);
503 		action_ret = handle_IRQ_event(irq, action);
504 		if (!noirqdebug)
505 			note_interrupt(irq, desc, action_ret);
506 		spin_lock(&desc->lock);
507 
508 	} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
509 
510 	desc->status &= ~IRQ_INPROGRESS;
511 out_unlock:
512 	spin_unlock(&desc->lock);
513 }
514 
515 /**
516  *	handle_percpu_IRQ - Per CPU local irq handler
517  *	@irq:	the interrupt number
518  *	@desc:	the interrupt description structure for this irq
519  *
520  *	Per CPU interrupts on SMP machines without locking requirements
521  */
522 void
523 handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
524 {
525 	irqreturn_t action_ret;
526 
527 	kstat_this_cpu.irqs[irq]++;
528 
529 	if (desc->chip->ack)
530 		desc->chip->ack(irq);
531 
532 	action_ret = handle_IRQ_event(irq, desc->action);
533 	if (!noirqdebug)
534 		note_interrupt(irq, desc, action_ret);
535 
536 	if (desc->chip->eoi)
537 		desc->chip->eoi(irq);
538 }
539 
540 void
541 __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
542 		  const char *name)
543 {
544 	struct irq_desc *desc;
545 	unsigned long flags;
546 
547 	if (irq >= NR_IRQS) {
548 		printk(KERN_ERR
549 		       "Trying to install type control for IRQ%d\n", irq);
550 		return;
551 	}
552 
553 	desc = irq_desc + irq;
554 
555 	if (!handle)
556 		handle = handle_bad_irq;
557 	else if (desc->chip == &no_irq_chip) {
558 		printk(KERN_WARNING "Trying to install %sinterrupt handler "
559 		       "for IRQ%d\n", is_chained ? "chained " : "", irq);
560 		/*
561 		 * Some ARM implementations install a handler for really dumb
562 		 * interrupt hardware without setting an irq_chip. This worked
563 		 * with the ARM no_irq_chip but the check in setup_irq would
564 		 * prevent us to setup the interrupt at all. Switch it to
565 		 * dummy_irq_chip for easy transition.
566 		 */
567 		desc->chip = &dummy_irq_chip;
568 	}
569 
570 	spin_lock_irqsave(&desc->lock, flags);
571 
572 	/* Uninstall? */
573 	if (handle == handle_bad_irq) {
574 		if (desc->chip != &no_irq_chip)
575 			mask_ack_irq(desc, irq);
576 		desc->status |= IRQ_DISABLED;
577 		desc->depth = 1;
578 	}
579 	desc->handle_irq = handle;
580 	desc->name = name;
581 
582 	if (handle != handle_bad_irq && is_chained) {
583 		desc->status &= ~IRQ_DISABLED;
584 		desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
585 		desc->depth = 0;
586 		desc->chip->unmask(irq);
587 	}
588 	spin_unlock_irqrestore(&desc->lock, flags);
589 }
590 
591 void
592 set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
593 			 irq_flow_handler_t handle)
594 {
595 	set_irq_chip(irq, chip);
596 	__set_irq_handler(irq, handle, 0, NULL);
597 }
598 
599 void
600 set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
601 			      irq_flow_handler_t handle, const char *name)
602 {
603 	set_irq_chip(irq, chip);
604 	__set_irq_handler(irq, handle, 0, name);
605 }
606 
607 void __init set_irq_noprobe(unsigned int irq)
608 {
609 	struct irq_desc *desc;
610 	unsigned long flags;
611 
612 	if (irq >= NR_IRQS) {
613 		printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq);
614 
615 		return;
616 	}
617 
618 	desc = irq_desc + irq;
619 
620 	spin_lock_irqsave(&desc->lock, flags);
621 	desc->status |= IRQ_NOPROBE;
622 	spin_unlock_irqrestore(&desc->lock, flags);
623 }
624 
625 void __init set_irq_probe(unsigned int irq)
626 {
627 	struct irq_desc *desc;
628 	unsigned long flags;
629 
630 	if (irq >= NR_IRQS) {
631 		printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq);
632 
633 		return;
634 	}
635 
636 	desc = irq_desc + irq;
637 
638 	spin_lock_irqsave(&desc->lock, flags);
639 	desc->status &= ~IRQ_NOPROBE;
640 	spin_unlock_irqrestore(&desc->lock, flags);
641 }
642