xref: /openbmc/linux/kernel/irq/irqdesc.c (revision c2732114)
1 /*
2  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4  *
5  * This file contains the interrupt descriptor management code
6  *
7  * Detailed information is available in Documentation/DocBook/genericirq
8  *
9  */
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
17 #include <linux/irqdomain.h>
18 
19 #include "internals.h"
20 
21 /*
22  * lockdep: we want to handle all irq_desc locks as a single lock-class:
23  */
24 static struct lock_class_key irq_desc_lock_class;
25 
26 #if defined(CONFIG_SMP)
27 static void __init init_irq_default_affinity(void)
28 {
29 	alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
30 	cpumask_setall(irq_default_affinity);
31 }
32 #else
33 static void __init init_irq_default_affinity(void)
34 {
35 }
36 #endif
37 
38 #ifdef CONFIG_SMP
39 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
40 {
41 	if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
42 		return -ENOMEM;
43 
44 #ifdef CONFIG_GENERIC_PENDING_IRQ
45 	if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
46 		free_cpumask_var(desc->irq_data.affinity);
47 		return -ENOMEM;
48 	}
49 #endif
50 	return 0;
51 }
52 
53 static void desc_smp_init(struct irq_desc *desc, int node)
54 {
55 	desc->irq_data.node = node;
56 	cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
57 #ifdef CONFIG_GENERIC_PENDING_IRQ
58 	cpumask_clear(desc->pending_mask);
59 #endif
60 }
61 
62 static inline int desc_node(struct irq_desc *desc)
63 {
64 	return desc->irq_data.node;
65 }
66 
67 #else
68 static inline int
69 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
70 static inline void desc_smp_init(struct irq_desc *desc, int node) { }
71 static inline int desc_node(struct irq_desc *desc) { return 0; }
72 #endif
73 
74 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
75 		struct module *owner)
76 {
77 	int cpu;
78 
79 	desc->irq_data.irq = irq;
80 	desc->irq_data.chip = &no_irq_chip;
81 	desc->irq_data.chip_data = NULL;
82 	desc->irq_data.handler_data = NULL;
83 	desc->irq_data.msi_desc = NULL;
84 	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
85 	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
86 	desc->handle_irq = handle_bad_irq;
87 	desc->depth = 1;
88 	desc->irq_count = 0;
89 	desc->irqs_unhandled = 0;
90 	desc->name = NULL;
91 	desc->owner = owner;
92 	for_each_possible_cpu(cpu)
93 		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
94 	desc_smp_init(desc, node);
95 }
96 
97 int nr_irqs = NR_IRQS;
98 EXPORT_SYMBOL_GPL(nr_irqs);
99 
100 static DEFINE_MUTEX(sparse_irq_lock);
101 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
102 
103 #ifdef CONFIG_SPARSE_IRQ
104 
105 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
106 
107 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
108 {
109 	radix_tree_insert(&irq_desc_tree, irq, desc);
110 }
111 
112 struct irq_desc *irq_to_desc(unsigned int irq)
113 {
114 	return radix_tree_lookup(&irq_desc_tree, irq);
115 }
116 EXPORT_SYMBOL(irq_to_desc);
117 
118 static void delete_irq_desc(unsigned int irq)
119 {
120 	radix_tree_delete(&irq_desc_tree, irq);
121 }
122 
123 #ifdef CONFIG_SMP
124 static void free_masks(struct irq_desc *desc)
125 {
126 #ifdef CONFIG_GENERIC_PENDING_IRQ
127 	free_cpumask_var(desc->pending_mask);
128 #endif
129 	free_cpumask_var(desc->irq_data.affinity);
130 }
131 #else
132 static inline void free_masks(struct irq_desc *desc) { }
133 #endif
134 
135 static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
136 {
137 	struct irq_desc *desc;
138 	gfp_t gfp = GFP_KERNEL;
139 
140 	desc = kzalloc_node(sizeof(*desc), gfp, node);
141 	if (!desc)
142 		return NULL;
143 	/* allocate based on nr_cpu_ids */
144 	desc->kstat_irqs = alloc_percpu(unsigned int);
145 	if (!desc->kstat_irqs)
146 		goto err_desc;
147 
148 	if (alloc_masks(desc, gfp, node))
149 		goto err_kstat;
150 
151 	raw_spin_lock_init(&desc->lock);
152 	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
153 
154 	desc_set_defaults(irq, desc, node, owner);
155 
156 	return desc;
157 
158 err_kstat:
159 	free_percpu(desc->kstat_irqs);
160 err_desc:
161 	kfree(desc);
162 	return NULL;
163 }
164 
165 static void free_desc(unsigned int irq)
166 {
167 	struct irq_desc *desc = irq_to_desc(irq);
168 
169 	unregister_irq_proc(irq, desc);
170 
171 	mutex_lock(&sparse_irq_lock);
172 	delete_irq_desc(irq);
173 	mutex_unlock(&sparse_irq_lock);
174 
175 	free_masks(desc);
176 	free_percpu(desc->kstat_irqs);
177 	kfree(desc);
178 }
179 
180 static int alloc_descs(unsigned int start, unsigned int cnt, int node,
181 		       struct module *owner)
182 {
183 	struct irq_desc *desc;
184 	int i;
185 
186 	for (i = 0; i < cnt; i++) {
187 		desc = alloc_desc(start + i, node, owner);
188 		if (!desc)
189 			goto err;
190 		mutex_lock(&sparse_irq_lock);
191 		irq_insert_desc(start + i, desc);
192 		mutex_unlock(&sparse_irq_lock);
193 	}
194 	return start;
195 
196 err:
197 	for (i--; i >= 0; i--)
198 		free_desc(start + i);
199 
200 	mutex_lock(&sparse_irq_lock);
201 	bitmap_clear(allocated_irqs, start, cnt);
202 	mutex_unlock(&sparse_irq_lock);
203 	return -ENOMEM;
204 }
205 
206 static int irq_expand_nr_irqs(unsigned int nr)
207 {
208 	if (nr > IRQ_BITMAP_BITS)
209 		return -ENOMEM;
210 	nr_irqs = nr;
211 	return 0;
212 }
213 
214 int __init early_irq_init(void)
215 {
216 	int i, initcnt, node = first_online_node;
217 	struct irq_desc *desc;
218 
219 	init_irq_default_affinity();
220 
221 	/* Let arch update nr_irqs and return the nr of preallocated irqs */
222 	initcnt = arch_probe_nr_irqs();
223 	printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
224 
225 	if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
226 		nr_irqs = IRQ_BITMAP_BITS;
227 
228 	if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
229 		initcnt = IRQ_BITMAP_BITS;
230 
231 	if (initcnt > nr_irqs)
232 		nr_irqs = initcnt;
233 
234 	for (i = 0; i < initcnt; i++) {
235 		desc = alloc_desc(i, node, NULL);
236 		set_bit(i, allocated_irqs);
237 		irq_insert_desc(i, desc);
238 	}
239 	return arch_early_irq_init();
240 }
241 
242 #else /* !CONFIG_SPARSE_IRQ */
243 
244 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
245 	[0 ... NR_IRQS-1] = {
246 		.handle_irq	= handle_bad_irq,
247 		.depth		= 1,
248 		.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
249 	}
250 };
251 
252 int __init early_irq_init(void)
253 {
254 	int count, i, node = first_online_node;
255 	struct irq_desc *desc;
256 
257 	init_irq_default_affinity();
258 
259 	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
260 
261 	desc = irq_desc;
262 	count = ARRAY_SIZE(irq_desc);
263 
264 	for (i = 0; i < count; i++) {
265 		desc[i].kstat_irqs = alloc_percpu(unsigned int);
266 		alloc_masks(&desc[i], GFP_KERNEL, node);
267 		raw_spin_lock_init(&desc[i].lock);
268 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
269 		desc_set_defaults(i, &desc[i], node, NULL);
270 	}
271 	return arch_early_irq_init();
272 }
273 
274 struct irq_desc *irq_to_desc(unsigned int irq)
275 {
276 	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
277 }
278 EXPORT_SYMBOL(irq_to_desc);
279 
280 static void free_desc(unsigned int irq)
281 {
282 	struct irq_desc *desc = irq_to_desc(irq);
283 	unsigned long flags;
284 
285 	raw_spin_lock_irqsave(&desc->lock, flags);
286 	desc_set_defaults(irq, desc, desc_node(desc), NULL);
287 	raw_spin_unlock_irqrestore(&desc->lock, flags);
288 }
289 
290 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
291 			      struct module *owner)
292 {
293 	u32 i;
294 
295 	for (i = 0; i < cnt; i++) {
296 		struct irq_desc *desc = irq_to_desc(start + i);
297 
298 		desc->owner = owner;
299 	}
300 	return start;
301 }
302 
303 static int irq_expand_nr_irqs(unsigned int nr)
304 {
305 	return -ENOMEM;
306 }
307 
308 void irq_mark_irq(unsigned int irq)
309 {
310 	mutex_lock(&sparse_irq_lock);
311 	bitmap_set(allocated_irqs, irq, 1);
312 	mutex_unlock(&sparse_irq_lock);
313 }
314 
315 #ifdef CONFIG_GENERIC_IRQ_LEGACY
316 void irq_init_desc(unsigned int irq)
317 {
318 	free_desc(irq);
319 }
320 #endif
321 
322 #endif /* !CONFIG_SPARSE_IRQ */
323 
324 /**
325  * generic_handle_irq - Invoke the handler for a particular irq
326  * @irq:	The irq number to handle
327  *
328  */
329 int generic_handle_irq(unsigned int irq)
330 {
331 	struct irq_desc *desc = irq_to_desc(irq);
332 
333 	if (!desc)
334 		return -EINVAL;
335 	generic_handle_irq_desc(irq, desc);
336 	return 0;
337 }
338 EXPORT_SYMBOL_GPL(generic_handle_irq);
339 
340 #ifdef CONFIG_HANDLE_DOMAIN_IRQ
341 /**
342  * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain
343  * @domain:	The domain where to perform the lookup
344  * @hwirq:	The HW irq number to convert to a logical one
345  * @lookup:	Whether to perform the domain lookup or not
346  * @regs:	Register file coming from the low-level handling code
347  *
348  * Returns:	0 on success, or -EINVAL if conversion has failed
349  */
350 int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
351 			bool lookup, struct pt_regs *regs)
352 {
353 	struct pt_regs *old_regs = set_irq_regs(regs);
354 	unsigned int irq = hwirq;
355 	int ret = 0;
356 
357 	irq_enter();
358 
359 #ifdef CONFIG_IRQ_DOMAIN
360 	if (lookup)
361 		irq = irq_find_mapping(domain, hwirq);
362 #endif
363 
364 	/*
365 	 * Some hardware gives randomly wrong interrupts.  Rather
366 	 * than crashing, do something sensible.
367 	 */
368 	if (unlikely(!irq || irq >= nr_irqs)) {
369 		ack_bad_irq(irq);
370 		ret = -EINVAL;
371 	} else {
372 		generic_handle_irq(irq);
373 	}
374 
375 	irq_exit();
376 	set_irq_regs(old_regs);
377 	return ret;
378 }
379 #endif
380 
381 /* Dynamic interrupt handling */
382 
383 /**
384  * irq_free_descs - free irq descriptors
385  * @from:	Start of descriptor range
386  * @cnt:	Number of consecutive irqs to free
387  */
388 void irq_free_descs(unsigned int from, unsigned int cnt)
389 {
390 	int i;
391 
392 	if (from >= nr_irqs || (from + cnt) > nr_irqs)
393 		return;
394 
395 	for (i = 0; i < cnt; i++)
396 		free_desc(from + i);
397 
398 	mutex_lock(&sparse_irq_lock);
399 	bitmap_clear(allocated_irqs, from, cnt);
400 	mutex_unlock(&sparse_irq_lock);
401 }
402 EXPORT_SYMBOL_GPL(irq_free_descs);
403 
404 /**
405  * irq_alloc_descs - allocate and initialize a range of irq descriptors
406  * @irq:	Allocate for specific irq number if irq >= 0
407  * @from:	Start the search from this irq number
408  * @cnt:	Number of consecutive irqs to allocate.
409  * @node:	Preferred node on which the irq descriptor should be allocated
410  * @owner:	Owning module (can be NULL)
411  *
412  * Returns the first irq number or error code
413  */
414 int __ref
415 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
416 		  struct module *owner)
417 {
418 	int start, ret;
419 
420 	if (!cnt)
421 		return -EINVAL;
422 
423 	if (irq >= 0) {
424 		if (from > irq)
425 			return -EINVAL;
426 		from = irq;
427 	} else {
428 		/*
429 		 * For interrupts which are freely allocated the
430 		 * architecture can force a lower bound to the @from
431 		 * argument. x86 uses this to exclude the GSI space.
432 		 */
433 		from = arch_dynirq_lower_bound(from);
434 	}
435 
436 	mutex_lock(&sparse_irq_lock);
437 
438 	start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
439 					   from, cnt, 0);
440 	ret = -EEXIST;
441 	if (irq >=0 && start != irq)
442 		goto err;
443 
444 	if (start + cnt > nr_irqs) {
445 		ret = irq_expand_nr_irqs(start + cnt);
446 		if (ret)
447 			goto err;
448 	}
449 
450 	bitmap_set(allocated_irqs, start, cnt);
451 	mutex_unlock(&sparse_irq_lock);
452 	return alloc_descs(start, cnt, node, owner);
453 
454 err:
455 	mutex_unlock(&sparse_irq_lock);
456 	return ret;
457 }
458 EXPORT_SYMBOL_GPL(__irq_alloc_descs);
459 
460 #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
461 /**
462  * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
463  * @cnt:	number of interrupts to allocate
464  * @node:	node on which to allocate
465  *
466  * Returns an interrupt number > 0 or 0, if the allocation fails.
467  */
468 unsigned int irq_alloc_hwirqs(int cnt, int node)
469 {
470 	int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL);
471 
472 	if (irq < 0)
473 		return 0;
474 
475 	for (i = irq; cnt > 0; i++, cnt--) {
476 		if (arch_setup_hwirq(i, node))
477 			goto err;
478 		irq_clear_status_flags(i, _IRQ_NOREQUEST);
479 	}
480 	return irq;
481 
482 err:
483 	for (i--; i >= irq; i--) {
484 		irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
485 		arch_teardown_hwirq(i);
486 	}
487 	irq_free_descs(irq, cnt);
488 	return 0;
489 }
490 EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
491 
492 /**
493  * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
494  * @from:	Free from irq number
495  * @cnt:	number of interrupts to free
496  *
497  */
498 void irq_free_hwirqs(unsigned int from, int cnt)
499 {
500 	int i, j;
501 
502 	for (i = from, j = cnt; j > 0; i++, j--) {
503 		irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
504 		arch_teardown_hwirq(i);
505 	}
506 	irq_free_descs(from, cnt);
507 }
508 EXPORT_SYMBOL_GPL(irq_free_hwirqs);
509 #endif
510 
511 /**
512  * irq_get_next_irq - get next allocated irq number
513  * @offset:	where to start the search
514  *
515  * Returns next irq number after offset or nr_irqs if none is found.
516  */
517 unsigned int irq_get_next_irq(unsigned int offset)
518 {
519 	return find_next_bit(allocated_irqs, nr_irqs, offset);
520 }
521 
522 struct irq_desc *
523 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
524 		    unsigned int check)
525 {
526 	struct irq_desc *desc = irq_to_desc(irq);
527 
528 	if (desc) {
529 		if (check & _IRQ_DESC_CHECK) {
530 			if ((check & _IRQ_DESC_PERCPU) &&
531 			    !irq_settings_is_per_cpu_devid(desc))
532 				return NULL;
533 
534 			if (!(check & _IRQ_DESC_PERCPU) &&
535 			    irq_settings_is_per_cpu_devid(desc))
536 				return NULL;
537 		}
538 
539 		if (bus)
540 			chip_bus_lock(desc);
541 		raw_spin_lock_irqsave(&desc->lock, *flags);
542 	}
543 	return desc;
544 }
545 
546 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
547 {
548 	raw_spin_unlock_irqrestore(&desc->lock, flags);
549 	if (bus)
550 		chip_bus_sync_unlock(desc);
551 }
552 
553 int irq_set_percpu_devid(unsigned int irq)
554 {
555 	struct irq_desc *desc = irq_to_desc(irq);
556 
557 	if (!desc)
558 		return -EINVAL;
559 
560 	if (desc->percpu_enabled)
561 		return -EINVAL;
562 
563 	desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
564 
565 	if (!desc->percpu_enabled)
566 		return -ENOMEM;
567 
568 	irq_set_percpu_devid_flags(irq);
569 	return 0;
570 }
571 
572 void kstat_incr_irq_this_cpu(unsigned int irq)
573 {
574 	kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
575 }
576 
577 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
578 {
579 	struct irq_desc *desc = irq_to_desc(irq);
580 
581 	return desc && desc->kstat_irqs ?
582 			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
583 }
584 
585 unsigned int kstat_irqs(unsigned int irq)
586 {
587 	struct irq_desc *desc = irq_to_desc(irq);
588 	int cpu;
589 	int sum = 0;
590 
591 	if (!desc || !desc->kstat_irqs)
592 		return 0;
593 	for_each_possible_cpu(cpu)
594 		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
595 	return sum;
596 }
597