xref: /openbmc/linux/kernel/irq/handle.c (revision 7dd65feb)
1 /*
2  * linux/kernel/irq/handle.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6  *
7  * This file contains the core interrupt handling code.
8  *
9  * Detailed information is available in Documentation/DocBook/genericirq
10  *
11  */
12 
13 #include <linux/irq.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/interrupt.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/rculist.h>
21 #include <linux/hash.h>
22 #include <linux/bootmem.h>
23 #include <trace/events/irq.h>
24 
25 #include "internals.h"
26 
27 /*
28  * lockdep: we want to handle all irq_desc locks as a single lock-class:
29  */
30 struct lock_class_key irq_desc_lock_class;
31 
32 /**
33  * handle_bad_irq - handle spurious and unhandled irqs
34  * @irq:       the interrupt number
35  * @desc:      description of the interrupt
36  *
37  * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
38  */
39 void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
40 {
41 	print_irq_desc(irq, desc);
42 	kstat_incr_irqs_this_cpu(irq, desc);
43 	ack_bad_irq(irq);
44 }
45 
46 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
47 static void __init init_irq_default_affinity(void)
48 {
49 	alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
50 	cpumask_setall(irq_default_affinity);
51 }
52 #else
53 static void __init init_irq_default_affinity(void)
54 {
55 }
56 #endif
57 
58 /*
59  * Linux has a controller-independent interrupt architecture.
60  * Every controller has a 'controller-template', that is used
61  * by the main code to do the right thing. Each driver-visible
62  * interrupt source is transparently wired to the appropriate
63  * controller. Thus drivers need not be aware of the
64  * interrupt-controller.
65  *
66  * The code is designed to be easily extended with new/different
67  * interrupt controllers, without having to do assembly magic or
68  * having to touch the generic code.
69  *
70  * Controller mappings for all interrupt sources:
71  */
72 int nr_irqs = NR_IRQS;
73 EXPORT_SYMBOL_GPL(nr_irqs);
74 
75 #ifdef CONFIG_SPARSE_IRQ
76 
77 static struct irq_desc irq_desc_init = {
78 	.irq	    = -1,
79 	.status	    = IRQ_DISABLED,
80 	.chip	    = &no_irq_chip,
81 	.handle_irq = handle_bad_irq,
82 	.depth      = 1,
83 	.lock       = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
84 };
85 
86 void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
87 {
88 	void *ptr;
89 
90 	if (slab_is_available())
91 		ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
92 				   GFP_ATOMIC, node);
93 	else
94 		ptr = alloc_bootmem_node(NODE_DATA(node),
95 				nr * sizeof(*desc->kstat_irqs));
96 
97 	/*
98 	 * don't overwite if can not get new one
99 	 * init_copy_kstat_irqs() could still use old one
100 	 */
101 	if (ptr) {
102 		printk(KERN_DEBUG "  alloc kstat_irqs on node %d\n", node);
103 		desc->kstat_irqs = ptr;
104 	}
105 }
106 
107 static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
108 {
109 	memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
110 
111 	raw_spin_lock_init(&desc->lock);
112 	desc->irq = irq;
113 #ifdef CONFIG_SMP
114 	desc->node = node;
115 #endif
116 	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
117 	init_kstat_irqs(desc, node, nr_cpu_ids);
118 	if (!desc->kstat_irqs) {
119 		printk(KERN_ERR "can not alloc kstat_irqs\n");
120 		BUG_ON(1);
121 	}
122 	if (!alloc_desc_masks(desc, node, false)) {
123 		printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
124 		BUG_ON(1);
125 	}
126 	init_desc_masks(desc);
127 	arch_init_chip_data(desc, node);
128 }
129 
130 /*
131  * Protect the sparse_irqs:
132  */
133 DEFINE_RAW_SPINLOCK(sparse_irq_lock);
134 
135 struct irq_desc **irq_desc_ptrs __read_mostly;
136 
137 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
138 	[0 ... NR_IRQS_LEGACY-1] = {
139 		.irq	    = -1,
140 		.status	    = IRQ_DISABLED,
141 		.chip	    = &no_irq_chip,
142 		.handle_irq = handle_bad_irq,
143 		.depth	    = 1,
144 		.lock	    = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
145 	}
146 };
147 
148 static unsigned int *kstat_irqs_legacy;
149 
150 int __init early_irq_init(void)
151 {
152 	struct irq_desc *desc;
153 	int legacy_count;
154 	int node;
155 	int i;
156 
157 	init_irq_default_affinity();
158 
159 	 /* initialize nr_irqs based on nr_cpu_ids */
160 	arch_probe_nr_irqs();
161 	printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
162 
163 	desc = irq_desc_legacy;
164 	legacy_count = ARRAY_SIZE(irq_desc_legacy);
165 	node = first_online_node;
166 
167 	/* allocate irq_desc_ptrs array based on nr_irqs */
168 	irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT);
169 
170 	/* allocate based on nr_cpu_ids */
171 	kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
172 					  sizeof(int), GFP_NOWAIT, node);
173 
174 	for (i = 0; i < legacy_count; i++) {
175 		desc[i].irq = i;
176 #ifdef CONFIG_SMP
177 		desc[i].node = node;
178 #endif
179 		desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
180 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
181 		alloc_desc_masks(&desc[i], node, true);
182 		init_desc_masks(&desc[i]);
183 		irq_desc_ptrs[i] = desc + i;
184 	}
185 
186 	for (i = legacy_count; i < nr_irqs; i++)
187 		irq_desc_ptrs[i] = NULL;
188 
189 	return arch_early_irq_init();
190 }
191 
192 struct irq_desc *irq_to_desc(unsigned int irq)
193 {
194 	if (irq_desc_ptrs && irq < nr_irqs)
195 		return irq_desc_ptrs[irq];
196 
197 	return NULL;
198 }
199 
200 struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
201 {
202 	struct irq_desc *desc;
203 	unsigned long flags;
204 
205 	if (irq >= nr_irqs) {
206 		WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
207 			irq, nr_irqs);
208 		return NULL;
209 	}
210 
211 	desc = irq_desc_ptrs[irq];
212 	if (desc)
213 		return desc;
214 
215 	raw_spin_lock_irqsave(&sparse_irq_lock, flags);
216 
217 	/* We have to check it to avoid races with another CPU */
218 	desc = irq_desc_ptrs[irq];
219 	if (desc)
220 		goto out_unlock;
221 
222 	if (slab_is_available())
223 		desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
224 	else
225 		desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc));
226 
227 	printk(KERN_DEBUG "  alloc irq_desc for %d on node %d\n", irq, node);
228 	if (!desc) {
229 		printk(KERN_ERR "can not alloc irq_desc\n");
230 		BUG_ON(1);
231 	}
232 	init_one_irq_desc(irq, desc, node);
233 
234 	irq_desc_ptrs[irq] = desc;
235 
236 out_unlock:
237 	raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
238 
239 	return desc;
240 }
241 
242 #else /* !CONFIG_SPARSE_IRQ */
243 
244 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
245 	[0 ... NR_IRQS-1] = {
246 		.status = IRQ_DISABLED,
247 		.chip = &no_irq_chip,
248 		.handle_irq = handle_bad_irq,
249 		.depth = 1,
250 		.lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
251 	}
252 };
253 
254 static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
255 int __init early_irq_init(void)
256 {
257 	struct irq_desc *desc;
258 	int count;
259 	int i;
260 
261 	init_irq_default_affinity();
262 
263 	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
264 
265 	desc = irq_desc;
266 	count = ARRAY_SIZE(irq_desc);
267 
268 	for (i = 0; i < count; i++) {
269 		desc[i].irq = i;
270 		alloc_desc_masks(&desc[i], 0, true);
271 		init_desc_masks(&desc[i]);
272 		desc[i].kstat_irqs = kstat_irqs_all[i];
273 	}
274 	return arch_early_irq_init();
275 }
276 
277 struct irq_desc *irq_to_desc(unsigned int irq)
278 {
279 	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
280 }
281 
282 struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
283 {
284 	return irq_to_desc(irq);
285 }
286 #endif /* !CONFIG_SPARSE_IRQ */
287 
288 void clear_kstat_irqs(struct irq_desc *desc)
289 {
290 	memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
291 }
292 
293 /*
294  * What should we do if we get a hw irq event on an illegal vector?
295  * Each architecture has to answer this themself.
296  */
297 static void ack_bad(unsigned int irq)
298 {
299 	struct irq_desc *desc = irq_to_desc(irq);
300 
301 	print_irq_desc(irq, desc);
302 	ack_bad_irq(irq);
303 }
304 
305 /*
306  * NOP functions
307  */
308 static void noop(unsigned int irq)
309 {
310 }
311 
312 static unsigned int noop_ret(unsigned int irq)
313 {
314 	return 0;
315 }
316 
317 /*
318  * Generic no controller implementation
319  */
320 struct irq_chip no_irq_chip = {
321 	.name		= "none",
322 	.startup	= noop_ret,
323 	.shutdown	= noop,
324 	.enable		= noop,
325 	.disable	= noop,
326 	.ack		= ack_bad,
327 	.end		= noop,
328 };
329 
330 /*
331  * Generic dummy implementation which can be used for
332  * real dumb interrupt sources
333  */
334 struct irq_chip dummy_irq_chip = {
335 	.name		= "dummy",
336 	.startup	= noop_ret,
337 	.shutdown	= noop,
338 	.enable		= noop,
339 	.disable	= noop,
340 	.ack		= noop,
341 	.mask		= noop,
342 	.unmask		= noop,
343 	.end		= noop,
344 };
345 
346 /*
347  * Special, empty irq handler:
348  */
349 irqreturn_t no_action(int cpl, void *dev_id)
350 {
351 	return IRQ_NONE;
352 }
353 
354 static void warn_no_thread(unsigned int irq, struct irqaction *action)
355 {
356 	if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags))
357 		return;
358 
359 	printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD "
360 	       "but no thread function available.", irq, action->name);
361 }
362 
363 /**
364  * handle_IRQ_event - irq action chain handler
365  * @irq:	the interrupt number
366  * @action:	the interrupt action chain for this irq
367  *
368  * Handles the action chain of an irq event
369  */
370 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
371 {
372 	irqreturn_t ret, retval = IRQ_NONE;
373 	unsigned int status = 0;
374 
375 	if (!(action->flags & IRQF_DISABLED))
376 		local_irq_enable_in_hardirq();
377 
378 	do {
379 		trace_irq_handler_entry(irq, action);
380 		ret = action->handler(irq, action->dev_id);
381 		trace_irq_handler_exit(irq, action, ret);
382 
383 		switch (ret) {
384 		case IRQ_WAKE_THREAD:
385 			/*
386 			 * Set result to handled so the spurious check
387 			 * does not trigger.
388 			 */
389 			ret = IRQ_HANDLED;
390 
391 			/*
392 			 * Catch drivers which return WAKE_THREAD but
393 			 * did not set up a thread function
394 			 */
395 			if (unlikely(!action->thread_fn)) {
396 				warn_no_thread(irq, action);
397 				break;
398 			}
399 
400 			/*
401 			 * Wake up the handler thread for this
402 			 * action. In case the thread crashed and was
403 			 * killed we just pretend that we handled the
404 			 * interrupt. The hardirq handler above has
405 			 * disabled the device interrupt, so no irq
406 			 * storm is lurking.
407 			 */
408 			if (likely(!test_bit(IRQTF_DIED,
409 					     &action->thread_flags))) {
410 				set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
411 				wake_up_process(action->thread);
412 			}
413 
414 			/* Fall through to add to randomness */
415 		case IRQ_HANDLED:
416 			status |= action->flags;
417 			break;
418 
419 		default:
420 			break;
421 		}
422 
423 		retval |= ret;
424 		action = action->next;
425 	} while (action);
426 
427 	if (status & IRQF_SAMPLE_RANDOM)
428 		add_interrupt_randomness(irq);
429 	local_irq_disable();
430 
431 	return retval;
432 }
433 
434 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
435 
436 #ifdef CONFIG_ENABLE_WARN_DEPRECATED
437 # warning __do_IRQ is deprecated. Please convert to proper flow handlers
438 #endif
439 
440 /**
441  * __do_IRQ - original all in one highlevel IRQ handler
442  * @irq:	the interrupt number
443  *
444  * __do_IRQ handles all normal device IRQ's (the special
445  * SMP cross-CPU interrupts have their own specific
446  * handlers).
447  *
448  * This is the original x86 implementation which is used for every
449  * interrupt type.
450  */
451 unsigned int __do_IRQ(unsigned int irq)
452 {
453 	struct irq_desc *desc = irq_to_desc(irq);
454 	struct irqaction *action;
455 	unsigned int status;
456 
457 	kstat_incr_irqs_this_cpu(irq, desc);
458 
459 	if (CHECK_IRQ_PER_CPU(desc->status)) {
460 		irqreturn_t action_ret;
461 
462 		/*
463 		 * No locking required for CPU-local interrupts:
464 		 */
465 		if (desc->chip->ack)
466 			desc->chip->ack(irq);
467 		if (likely(!(desc->status & IRQ_DISABLED))) {
468 			action_ret = handle_IRQ_event(irq, desc->action);
469 			if (!noirqdebug)
470 				note_interrupt(irq, desc, action_ret);
471 		}
472 		desc->chip->end(irq);
473 		return 1;
474 	}
475 
476 	raw_spin_lock(&desc->lock);
477 	if (desc->chip->ack)
478 		desc->chip->ack(irq);
479 	/*
480 	 * REPLAY is when Linux resends an IRQ that was dropped earlier
481 	 * WAITING is used by probe to mark irqs that are being tested
482 	 */
483 	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
484 	status |= IRQ_PENDING; /* we _want_ to handle it */
485 
486 	/*
487 	 * If the IRQ is disabled for whatever reason, we cannot
488 	 * use the action we have.
489 	 */
490 	action = NULL;
491 	if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
492 		action = desc->action;
493 		status &= ~IRQ_PENDING; /* we commit to handling */
494 		status |= IRQ_INPROGRESS; /* we are handling it */
495 	}
496 	desc->status = status;
497 
498 	/*
499 	 * If there is no IRQ handler or it was disabled, exit early.
500 	 * Since we set PENDING, if another processor is handling
501 	 * a different instance of this same irq, the other processor
502 	 * will take care of it.
503 	 */
504 	if (unlikely(!action))
505 		goto out;
506 
507 	/*
508 	 * Edge triggered interrupts need to remember
509 	 * pending events.
510 	 * This applies to any hw interrupts that allow a second
511 	 * instance of the same irq to arrive while we are in do_IRQ
512 	 * or in the handler. But the code here only handles the _second_
513 	 * instance of the irq, not the third or fourth. So it is mostly
514 	 * useful for irq hardware that does not mask cleanly in an
515 	 * SMP environment.
516 	 */
517 	for (;;) {
518 		irqreturn_t action_ret;
519 
520 		raw_spin_unlock(&desc->lock);
521 
522 		action_ret = handle_IRQ_event(irq, action);
523 		if (!noirqdebug)
524 			note_interrupt(irq, desc, action_ret);
525 
526 		raw_spin_lock(&desc->lock);
527 		if (likely(!(desc->status & IRQ_PENDING)))
528 			break;
529 		desc->status &= ~IRQ_PENDING;
530 	}
531 	desc->status &= ~IRQ_INPROGRESS;
532 
533 out:
534 	/*
535 	 * The ->end() handler has to deal with interrupts which got
536 	 * disabled while the handler was running.
537 	 */
538 	desc->chip->end(irq);
539 	raw_spin_unlock(&desc->lock);
540 
541 	return 1;
542 }
543 #endif
544 
545 void early_init_irq_lock_class(void)
546 {
547 	struct irq_desc *desc;
548 	int i;
549 
550 	for_each_irq_desc(i, desc) {
551 		lockdep_set_class(&desc->lock, &irq_desc_lock_class);
552 	}
553 }
554 
555 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
556 {
557 	struct irq_desc *desc = irq_to_desc(irq);
558 	return desc ? desc->kstat_irqs[cpu] : 0;
559 }
560 EXPORT_SYMBOL(kstat_irqs_cpu);
561 
562