xref: /openbmc/linux/kernel/irq/handle.c (revision 1fa6ac37)
1 /*
2  * linux/kernel/irq/handle.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6  *
7  * This file contains the core interrupt handling code.
8  *
9  * Detailed information is available in Documentation/DocBook/genericirq
10  *
11  */
12 
13 #include <linux/irq.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/interrupt.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/rculist.h>
21 #include <linux/hash.h>
22 #include <linux/radix-tree.h>
23 #include <trace/events/irq.h>
24 
25 #include "internals.h"
26 
27 /*
28  * lockdep: we want to handle all irq_desc locks as a single lock-class:
29  */
30 struct lock_class_key irq_desc_lock_class;
31 
32 /**
33  * handle_bad_irq - handle spurious and unhandled irqs
34  * @irq:       the interrupt number
35  * @desc:      description of the interrupt
36  *
37  * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
38  */
39 void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
40 {
41 	print_irq_desc(irq, desc);
42 	kstat_incr_irqs_this_cpu(irq, desc);
43 	ack_bad_irq(irq);
44 }
45 
46 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
47 static void __init init_irq_default_affinity(void)
48 {
49 	alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
50 	cpumask_setall(irq_default_affinity);
51 }
52 #else
53 static void __init init_irq_default_affinity(void)
54 {
55 }
56 #endif
57 
58 /*
59  * Linux has a controller-independent interrupt architecture.
60  * Every controller has a 'controller-template', that is used
61  * by the main code to do the right thing. Each driver-visible
62  * interrupt source is transparently wired to the appropriate
63  * controller. Thus drivers need not be aware of the
64  * interrupt-controller.
65  *
66  * The code is designed to be easily extended with new/different
67  * interrupt controllers, without having to do assembly magic or
68  * having to touch the generic code.
69  *
70  * Controller mappings for all interrupt sources:
71  */
72 int nr_irqs = NR_IRQS;
73 EXPORT_SYMBOL_GPL(nr_irqs);
74 
75 #ifdef CONFIG_SPARSE_IRQ
76 
77 static struct irq_desc irq_desc_init = {
78 	.irq	    = -1,
79 	.status	    = IRQ_DISABLED,
80 	.chip	    = &no_irq_chip,
81 	.handle_irq = handle_bad_irq,
82 	.depth      = 1,
83 	.lock       = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
84 };
85 
86 void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
87 {
88 	void *ptr;
89 
90 	ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
91 			   GFP_ATOMIC, node);
92 
93 	/*
94 	 * don't overwite if can not get new one
95 	 * init_copy_kstat_irqs() could still use old one
96 	 */
97 	if (ptr) {
98 		printk(KERN_DEBUG "  alloc kstat_irqs on node %d\n", node);
99 		desc->kstat_irqs = ptr;
100 	}
101 }
102 
103 static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
104 {
105 	memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
106 
107 	raw_spin_lock_init(&desc->lock);
108 	desc->irq = irq;
109 #ifdef CONFIG_SMP
110 	desc->node = node;
111 #endif
112 	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
113 	init_kstat_irqs(desc, node, nr_cpu_ids);
114 	if (!desc->kstat_irqs) {
115 		printk(KERN_ERR "can not alloc kstat_irqs\n");
116 		BUG_ON(1);
117 	}
118 	if (!alloc_desc_masks(desc, node, false)) {
119 		printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
120 		BUG_ON(1);
121 	}
122 	init_desc_masks(desc);
123 	arch_init_chip_data(desc, node);
124 }
125 
126 /*
127  * Protect the sparse_irqs:
128  */
129 DEFINE_RAW_SPINLOCK(sparse_irq_lock);
130 
131 static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
132 
133 static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
134 {
135 	radix_tree_insert(&irq_desc_tree, irq, desc);
136 }
137 
138 struct irq_desc *irq_to_desc(unsigned int irq)
139 {
140 	return radix_tree_lookup(&irq_desc_tree, irq);
141 }
142 
143 void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
144 {
145 	void **ptr;
146 
147 	ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
148 	if (ptr)
149 		radix_tree_replace_slot(ptr, desc);
150 }
151 
152 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
153 	[0 ... NR_IRQS_LEGACY-1] = {
154 		.irq	    = -1,
155 		.status	    = IRQ_DISABLED,
156 		.chip	    = &no_irq_chip,
157 		.handle_irq = handle_bad_irq,
158 		.depth	    = 1,
159 		.lock	    = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
160 	}
161 };
162 
163 static unsigned int *kstat_irqs_legacy;
164 
165 int __init early_irq_init(void)
166 {
167 	struct irq_desc *desc;
168 	int legacy_count;
169 	int node;
170 	int i;
171 
172 	init_irq_default_affinity();
173 
174 	 /* initialize nr_irqs based on nr_cpu_ids */
175 	arch_probe_nr_irqs();
176 	printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
177 
178 	desc = irq_desc_legacy;
179 	legacy_count = ARRAY_SIZE(irq_desc_legacy);
180 	node = first_online_node;
181 
182 	/* allocate based on nr_cpu_ids */
183 	kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
184 					  sizeof(int), GFP_NOWAIT, node);
185 
186 	for (i = 0; i < legacy_count; i++) {
187 		desc[i].irq = i;
188 #ifdef CONFIG_SMP
189 		desc[i].node = node;
190 #endif
191 		desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
192 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
193 		alloc_desc_masks(&desc[i], node, true);
194 		init_desc_masks(&desc[i]);
195 		set_irq_desc(i, &desc[i]);
196 	}
197 
198 	return arch_early_irq_init();
199 }
200 
201 struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
202 {
203 	struct irq_desc *desc;
204 	unsigned long flags;
205 
206 	if (irq >= nr_irqs) {
207 		WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
208 			irq, nr_irqs);
209 		return NULL;
210 	}
211 
212 	desc = irq_to_desc(irq);
213 	if (desc)
214 		return desc;
215 
216 	raw_spin_lock_irqsave(&sparse_irq_lock, flags);
217 
218 	/* We have to check it to avoid races with another CPU */
219 	desc = irq_to_desc(irq);
220 	if (desc)
221 		goto out_unlock;
222 
223 	desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
224 
225 	printk(KERN_DEBUG "  alloc irq_desc for %d on node %d\n", irq, node);
226 	if (!desc) {
227 		printk(KERN_ERR "can not alloc irq_desc\n");
228 		BUG_ON(1);
229 	}
230 	init_one_irq_desc(irq, desc, node);
231 
232 	set_irq_desc(irq, desc);
233 
234 out_unlock:
235 	raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
236 
237 	return desc;
238 }
239 
240 #else /* !CONFIG_SPARSE_IRQ */
241 
242 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
243 	[0 ... NR_IRQS-1] = {
244 		.status = IRQ_DISABLED,
245 		.chip = &no_irq_chip,
246 		.handle_irq = handle_bad_irq,
247 		.depth = 1,
248 		.lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
249 	}
250 };
251 
252 static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
253 int __init early_irq_init(void)
254 {
255 	struct irq_desc *desc;
256 	int count;
257 	int i;
258 
259 	init_irq_default_affinity();
260 
261 	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
262 
263 	desc = irq_desc;
264 	count = ARRAY_SIZE(irq_desc);
265 
266 	for (i = 0; i < count; i++) {
267 		desc[i].irq = i;
268 		alloc_desc_masks(&desc[i], 0, true);
269 		init_desc_masks(&desc[i]);
270 		desc[i].kstat_irqs = kstat_irqs_all[i];
271 	}
272 	return arch_early_irq_init();
273 }
274 
275 struct irq_desc *irq_to_desc(unsigned int irq)
276 {
277 	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
278 }
279 
280 struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
281 {
282 	return irq_to_desc(irq);
283 }
284 #endif /* !CONFIG_SPARSE_IRQ */
285 
286 void clear_kstat_irqs(struct irq_desc *desc)
287 {
288 	memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
289 }
290 
291 /*
292  * What should we do if we get a hw irq event on an illegal vector?
293  * Each architecture has to answer this themself.
294  */
295 static void ack_bad(unsigned int irq)
296 {
297 	struct irq_desc *desc = irq_to_desc(irq);
298 
299 	print_irq_desc(irq, desc);
300 	ack_bad_irq(irq);
301 }
302 
303 /*
304  * NOP functions
305  */
306 static void noop(unsigned int irq)
307 {
308 }
309 
310 static unsigned int noop_ret(unsigned int irq)
311 {
312 	return 0;
313 }
314 
315 /*
316  * Generic no controller implementation
317  */
318 struct irq_chip no_irq_chip = {
319 	.name		= "none",
320 	.startup	= noop_ret,
321 	.shutdown	= noop,
322 	.enable		= noop,
323 	.disable	= noop,
324 	.ack		= ack_bad,
325 	.end		= noop,
326 };
327 
328 /*
329  * Generic dummy implementation which can be used for
330  * real dumb interrupt sources
331  */
332 struct irq_chip dummy_irq_chip = {
333 	.name		= "dummy",
334 	.startup	= noop_ret,
335 	.shutdown	= noop,
336 	.enable		= noop,
337 	.disable	= noop,
338 	.ack		= noop,
339 	.mask		= noop,
340 	.unmask		= noop,
341 	.end		= noop,
342 };
343 
344 /*
345  * Special, empty irq handler:
346  */
347 irqreturn_t no_action(int cpl, void *dev_id)
348 {
349 	return IRQ_NONE;
350 }
351 
352 static void warn_no_thread(unsigned int irq, struct irqaction *action)
353 {
354 	if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags))
355 		return;
356 
357 	printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD "
358 	       "but no thread function available.", irq, action->name);
359 }
360 
361 /**
362  * handle_IRQ_event - irq action chain handler
363  * @irq:	the interrupt number
364  * @action:	the interrupt action chain for this irq
365  *
366  * Handles the action chain of an irq event
367  */
368 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
369 {
370 	irqreturn_t ret, retval = IRQ_NONE;
371 	unsigned int status = 0;
372 
373 	do {
374 		trace_irq_handler_entry(irq, action);
375 		ret = action->handler(irq, action->dev_id);
376 		trace_irq_handler_exit(irq, action, ret);
377 
378 		switch (ret) {
379 		case IRQ_WAKE_THREAD:
380 			/*
381 			 * Set result to handled so the spurious check
382 			 * does not trigger.
383 			 */
384 			ret = IRQ_HANDLED;
385 
386 			/*
387 			 * Catch drivers which return WAKE_THREAD but
388 			 * did not set up a thread function
389 			 */
390 			if (unlikely(!action->thread_fn)) {
391 				warn_no_thread(irq, action);
392 				break;
393 			}
394 
395 			/*
396 			 * Wake up the handler thread for this
397 			 * action. In case the thread crashed and was
398 			 * killed we just pretend that we handled the
399 			 * interrupt. The hardirq handler above has
400 			 * disabled the device interrupt, so no irq
401 			 * storm is lurking.
402 			 */
403 			if (likely(!test_bit(IRQTF_DIED,
404 					     &action->thread_flags))) {
405 				set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
406 				wake_up_process(action->thread);
407 			}
408 
409 			/* Fall through to add to randomness */
410 		case IRQ_HANDLED:
411 			status |= action->flags;
412 			break;
413 
414 		default:
415 			break;
416 		}
417 
418 		retval |= ret;
419 		action = action->next;
420 	} while (action);
421 
422 	if (status & IRQF_SAMPLE_RANDOM)
423 		add_interrupt_randomness(irq);
424 	local_irq_disable();
425 
426 	return retval;
427 }
428 
429 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
430 
431 #ifdef CONFIG_ENABLE_WARN_DEPRECATED
432 # warning __do_IRQ is deprecated. Please convert to proper flow handlers
433 #endif
434 
435 /**
436  * __do_IRQ - original all in one highlevel IRQ handler
437  * @irq:	the interrupt number
438  *
439  * __do_IRQ handles all normal device IRQ's (the special
440  * SMP cross-CPU interrupts have their own specific
441  * handlers).
442  *
443  * This is the original x86 implementation which is used for every
444  * interrupt type.
445  */
446 unsigned int __do_IRQ(unsigned int irq)
447 {
448 	struct irq_desc *desc = irq_to_desc(irq);
449 	struct irqaction *action;
450 	unsigned int status;
451 
452 	kstat_incr_irqs_this_cpu(irq, desc);
453 
454 	if (CHECK_IRQ_PER_CPU(desc->status)) {
455 		irqreturn_t action_ret;
456 
457 		/*
458 		 * No locking required for CPU-local interrupts:
459 		 */
460 		if (desc->chip->ack)
461 			desc->chip->ack(irq);
462 		if (likely(!(desc->status & IRQ_DISABLED))) {
463 			action_ret = handle_IRQ_event(irq, desc->action);
464 			if (!noirqdebug)
465 				note_interrupt(irq, desc, action_ret);
466 		}
467 		desc->chip->end(irq);
468 		return 1;
469 	}
470 
471 	raw_spin_lock(&desc->lock);
472 	if (desc->chip->ack)
473 		desc->chip->ack(irq);
474 	/*
475 	 * REPLAY is when Linux resends an IRQ that was dropped earlier
476 	 * WAITING is used by probe to mark irqs that are being tested
477 	 */
478 	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
479 	status |= IRQ_PENDING; /* we _want_ to handle it */
480 
481 	/*
482 	 * If the IRQ is disabled for whatever reason, we cannot
483 	 * use the action we have.
484 	 */
485 	action = NULL;
486 	if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
487 		action = desc->action;
488 		status &= ~IRQ_PENDING; /* we commit to handling */
489 		status |= IRQ_INPROGRESS; /* we are handling it */
490 	}
491 	desc->status = status;
492 
493 	/*
494 	 * If there is no IRQ handler or it was disabled, exit early.
495 	 * Since we set PENDING, if another processor is handling
496 	 * a different instance of this same irq, the other processor
497 	 * will take care of it.
498 	 */
499 	if (unlikely(!action))
500 		goto out;
501 
502 	/*
503 	 * Edge triggered interrupts need to remember
504 	 * pending events.
505 	 * This applies to any hw interrupts that allow a second
506 	 * instance of the same irq to arrive while we are in do_IRQ
507 	 * or in the handler. But the code here only handles the _second_
508 	 * instance of the irq, not the third or fourth. So it is mostly
509 	 * useful for irq hardware that does not mask cleanly in an
510 	 * SMP environment.
511 	 */
512 	for (;;) {
513 		irqreturn_t action_ret;
514 
515 		raw_spin_unlock(&desc->lock);
516 
517 		action_ret = handle_IRQ_event(irq, action);
518 		if (!noirqdebug)
519 			note_interrupt(irq, desc, action_ret);
520 
521 		raw_spin_lock(&desc->lock);
522 		if (likely(!(desc->status & IRQ_PENDING)))
523 			break;
524 		desc->status &= ~IRQ_PENDING;
525 	}
526 	desc->status &= ~IRQ_INPROGRESS;
527 
528 out:
529 	/*
530 	 * The ->end() handler has to deal with interrupts which got
531 	 * disabled while the handler was running.
532 	 */
533 	desc->chip->end(irq);
534 	raw_spin_unlock(&desc->lock);
535 
536 	return 1;
537 }
538 #endif
539 
540 void early_init_irq_lock_class(void)
541 {
542 	struct irq_desc *desc;
543 	int i;
544 
545 	for_each_irq_desc(i, desc) {
546 		lockdep_set_class(&desc->lock, &irq_desc_lock_class);
547 	}
548 }
549 
550 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
551 {
552 	struct irq_desc *desc = irq_to_desc(irq);
553 	return desc ? desc->kstat_irqs[cpu] : 0;
554 }
555 EXPORT_SYMBOL(kstat_irqs_cpu);
556 
557