xref: /openbmc/linux/kernel/irq/irqdesc.c (revision 1ab142d4)
1 /*
2  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4  *
5  * This file contains the interrupt descriptor management code
6  *
7  * Detailed information is available in Documentation/DocBook/genericirq
8  *
9  */
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
17 
18 #include "internals.h"
19 
20 /*
21  * lockdep: we want to handle all irq_desc locks as a single lock-class:
22  */
23 static struct lock_class_key irq_desc_lock_class;
24 
25 #if defined(CONFIG_SMP)
26 static void __init init_irq_default_affinity(void)
27 {
28 	alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
29 	cpumask_setall(irq_default_affinity);
30 }
31 #else
32 static void __init init_irq_default_affinity(void)
33 {
34 }
35 #endif
36 
37 #ifdef CONFIG_SMP
38 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
39 {
40 	if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
41 		return -ENOMEM;
42 
43 #ifdef CONFIG_GENERIC_PENDING_IRQ
44 	if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
45 		free_cpumask_var(desc->irq_data.affinity);
46 		return -ENOMEM;
47 	}
48 #endif
49 	return 0;
50 }
51 
52 static void desc_smp_init(struct irq_desc *desc, int node)
53 {
54 	desc->irq_data.node = node;
55 	cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
56 #ifdef CONFIG_GENERIC_PENDING_IRQ
57 	cpumask_clear(desc->pending_mask);
58 #endif
59 }
60 
61 static inline int desc_node(struct irq_desc *desc)
62 {
63 	return desc->irq_data.node;
64 }
65 
66 #else
67 static inline int
68 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
69 static inline void desc_smp_init(struct irq_desc *desc, int node) { }
70 static inline int desc_node(struct irq_desc *desc) { return 0; }
71 #endif
72 
73 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
74 		struct module *owner)
75 {
76 	int cpu;
77 
78 	desc->irq_data.irq = irq;
79 	desc->irq_data.chip = &no_irq_chip;
80 	desc->irq_data.chip_data = NULL;
81 	desc->irq_data.handler_data = NULL;
82 	desc->irq_data.msi_desc = NULL;
83 	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
84 	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
85 	desc->handle_irq = handle_bad_irq;
86 	desc->depth = 1;
87 	desc->irq_count = 0;
88 	desc->irqs_unhandled = 0;
89 	desc->name = NULL;
90 	desc->owner = owner;
91 	for_each_possible_cpu(cpu)
92 		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
93 	desc_smp_init(desc, node);
94 }
95 
96 int nr_irqs = NR_IRQS;
97 EXPORT_SYMBOL_GPL(nr_irqs);
98 
99 static DEFINE_MUTEX(sparse_irq_lock);
100 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
101 
102 #ifdef CONFIG_SPARSE_IRQ
103 
104 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
105 
106 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
107 {
108 	radix_tree_insert(&irq_desc_tree, irq, desc);
109 }
110 
111 struct irq_desc *irq_to_desc(unsigned int irq)
112 {
113 	return radix_tree_lookup(&irq_desc_tree, irq);
114 }
115 
116 static void delete_irq_desc(unsigned int irq)
117 {
118 	radix_tree_delete(&irq_desc_tree, irq);
119 }
120 
121 #ifdef CONFIG_SMP
122 static void free_masks(struct irq_desc *desc)
123 {
124 #ifdef CONFIG_GENERIC_PENDING_IRQ
125 	free_cpumask_var(desc->pending_mask);
126 #endif
127 	free_cpumask_var(desc->irq_data.affinity);
128 }
129 #else
130 static inline void free_masks(struct irq_desc *desc) { }
131 #endif
132 
133 static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
134 {
135 	struct irq_desc *desc;
136 	gfp_t gfp = GFP_KERNEL;
137 
138 	desc = kzalloc_node(sizeof(*desc), gfp, node);
139 	if (!desc)
140 		return NULL;
141 	/* allocate based on nr_cpu_ids */
142 	desc->kstat_irqs = alloc_percpu(unsigned int);
143 	if (!desc->kstat_irqs)
144 		goto err_desc;
145 
146 	if (alloc_masks(desc, gfp, node))
147 		goto err_kstat;
148 
149 	raw_spin_lock_init(&desc->lock);
150 	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
151 
152 	desc_set_defaults(irq, desc, node, owner);
153 
154 	return desc;
155 
156 err_kstat:
157 	free_percpu(desc->kstat_irqs);
158 err_desc:
159 	kfree(desc);
160 	return NULL;
161 }
162 
163 static void free_desc(unsigned int irq)
164 {
165 	struct irq_desc *desc = irq_to_desc(irq);
166 
167 	unregister_irq_proc(irq, desc);
168 
169 	mutex_lock(&sparse_irq_lock);
170 	delete_irq_desc(irq);
171 	mutex_unlock(&sparse_irq_lock);
172 
173 	free_masks(desc);
174 	free_percpu(desc->kstat_irqs);
175 	kfree(desc);
176 }
177 
178 static int alloc_descs(unsigned int start, unsigned int cnt, int node,
179 		       struct module *owner)
180 {
181 	struct irq_desc *desc;
182 	int i;
183 
184 	for (i = 0; i < cnt; i++) {
185 		desc = alloc_desc(start + i, node, owner);
186 		if (!desc)
187 			goto err;
188 		mutex_lock(&sparse_irq_lock);
189 		irq_insert_desc(start + i, desc);
190 		mutex_unlock(&sparse_irq_lock);
191 	}
192 	return start;
193 
194 err:
195 	for (i--; i >= 0; i--)
196 		free_desc(start + i);
197 
198 	mutex_lock(&sparse_irq_lock);
199 	bitmap_clear(allocated_irqs, start, cnt);
200 	mutex_unlock(&sparse_irq_lock);
201 	return -ENOMEM;
202 }
203 
204 static int irq_expand_nr_irqs(unsigned int nr)
205 {
206 	if (nr > IRQ_BITMAP_BITS)
207 		return -ENOMEM;
208 	nr_irqs = nr;
209 	return 0;
210 }
211 
212 int __init early_irq_init(void)
213 {
214 	int i, initcnt, node = first_online_node;
215 	struct irq_desc *desc;
216 
217 	init_irq_default_affinity();
218 
219 	/* Let arch update nr_irqs and return the nr of preallocated irqs */
220 	initcnt = arch_probe_nr_irqs();
221 	printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
222 
223 	if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
224 		nr_irqs = IRQ_BITMAP_BITS;
225 
226 	if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
227 		initcnt = IRQ_BITMAP_BITS;
228 
229 	if (initcnt > nr_irqs)
230 		nr_irqs = initcnt;
231 
232 	for (i = 0; i < initcnt; i++) {
233 		desc = alloc_desc(i, node, NULL);
234 		set_bit(i, allocated_irqs);
235 		irq_insert_desc(i, desc);
236 	}
237 	return arch_early_irq_init();
238 }
239 
240 #else /* !CONFIG_SPARSE_IRQ */
241 
242 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
243 	[0 ... NR_IRQS-1] = {
244 		.handle_irq	= handle_bad_irq,
245 		.depth		= 1,
246 		.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
247 	}
248 };
249 
250 int __init early_irq_init(void)
251 {
252 	int count, i, node = first_online_node;
253 	struct irq_desc *desc;
254 
255 	init_irq_default_affinity();
256 
257 	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
258 
259 	desc = irq_desc;
260 	count = ARRAY_SIZE(irq_desc);
261 
262 	for (i = 0; i < count; i++) {
263 		desc[i].kstat_irqs = alloc_percpu(unsigned int);
264 		alloc_masks(&desc[i], GFP_KERNEL, node);
265 		raw_spin_lock_init(&desc[i].lock);
266 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
267 		desc_set_defaults(i, &desc[i], node, NULL);
268 	}
269 	return arch_early_irq_init();
270 }
271 
272 struct irq_desc *irq_to_desc(unsigned int irq)
273 {
274 	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
275 }
276 
277 static void free_desc(unsigned int irq)
278 {
279 	dynamic_irq_cleanup(irq);
280 }
281 
282 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
283 			      struct module *owner)
284 {
285 	u32 i;
286 
287 	for (i = 0; i < cnt; i++) {
288 		struct irq_desc *desc = irq_to_desc(start + i);
289 
290 		desc->owner = owner;
291 	}
292 	return start;
293 }
294 
295 static int irq_expand_nr_irqs(unsigned int nr)
296 {
297 	return -ENOMEM;
298 }
299 
300 #endif /* !CONFIG_SPARSE_IRQ */
301 
302 /**
303  * generic_handle_irq - Invoke the handler for a particular irq
304  * @irq:	The irq number to handle
305  *
306  */
307 int generic_handle_irq(unsigned int irq)
308 {
309 	struct irq_desc *desc = irq_to_desc(irq);
310 
311 	if (!desc)
312 		return -EINVAL;
313 	generic_handle_irq_desc(irq, desc);
314 	return 0;
315 }
316 EXPORT_SYMBOL_GPL(generic_handle_irq);
317 
318 /* Dynamic interrupt handling */
319 
320 /**
321  * irq_free_descs - free irq descriptors
322  * @from:	Start of descriptor range
323  * @cnt:	Number of consecutive irqs to free
324  */
325 void irq_free_descs(unsigned int from, unsigned int cnt)
326 {
327 	int i;
328 
329 	if (from >= nr_irqs || (from + cnt) > nr_irqs)
330 		return;
331 
332 	for (i = 0; i < cnt; i++)
333 		free_desc(from + i);
334 
335 	mutex_lock(&sparse_irq_lock);
336 	bitmap_clear(allocated_irqs, from, cnt);
337 	mutex_unlock(&sparse_irq_lock);
338 }
339 EXPORT_SYMBOL_GPL(irq_free_descs);
340 
341 /**
342  * irq_alloc_descs - allocate and initialize a range of irq descriptors
343  * @irq:	Allocate for specific irq number if irq >= 0
344  * @from:	Start the search from this irq number
345  * @cnt:	Number of consecutive irqs to allocate.
346  * @node:	Preferred node on which the irq descriptor should be allocated
347  * @owner:	Owning module (can be NULL)
348  *
349  * Returns the first irq number or error code
350  */
351 int __ref
352 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
353 		  struct module *owner)
354 {
355 	int start, ret;
356 
357 	if (!cnt)
358 		return -EINVAL;
359 
360 	if (irq >= 0) {
361 		if (from > irq)
362 			return -EINVAL;
363 		from = irq;
364 	}
365 
366 	mutex_lock(&sparse_irq_lock);
367 
368 	start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
369 					   from, cnt, 0);
370 	ret = -EEXIST;
371 	if (irq >=0 && start != irq)
372 		goto err;
373 
374 	if (start + cnt > nr_irqs) {
375 		ret = irq_expand_nr_irqs(start + cnt);
376 		if (ret)
377 			goto err;
378 	}
379 
380 	bitmap_set(allocated_irqs, start, cnt);
381 	mutex_unlock(&sparse_irq_lock);
382 	return alloc_descs(start, cnt, node, owner);
383 
384 err:
385 	mutex_unlock(&sparse_irq_lock);
386 	return ret;
387 }
388 EXPORT_SYMBOL_GPL(__irq_alloc_descs);
389 
390 /**
391  * irq_reserve_irqs - mark irqs allocated
392  * @from:	mark from irq number
393  * @cnt:	number of irqs to mark
394  *
395  * Returns 0 on success or an appropriate error code
396  */
397 int irq_reserve_irqs(unsigned int from, unsigned int cnt)
398 {
399 	unsigned int start;
400 	int ret = 0;
401 
402 	if (!cnt || (from + cnt) > nr_irqs)
403 		return -EINVAL;
404 
405 	mutex_lock(&sparse_irq_lock);
406 	start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
407 	if (start == from)
408 		bitmap_set(allocated_irqs, start, cnt);
409 	else
410 		ret = -EEXIST;
411 	mutex_unlock(&sparse_irq_lock);
412 	return ret;
413 }
414 
415 /**
416  * irq_get_next_irq - get next allocated irq number
417  * @offset:	where to start the search
418  *
419  * Returns next irq number after offset or nr_irqs if none is found.
420  */
421 unsigned int irq_get_next_irq(unsigned int offset)
422 {
423 	return find_next_bit(allocated_irqs, nr_irqs, offset);
424 }
425 
426 struct irq_desc *
427 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
428 		    unsigned int check)
429 {
430 	struct irq_desc *desc = irq_to_desc(irq);
431 
432 	if (desc) {
433 		if (check & _IRQ_DESC_CHECK) {
434 			if ((check & _IRQ_DESC_PERCPU) &&
435 			    !irq_settings_is_per_cpu_devid(desc))
436 				return NULL;
437 
438 			if (!(check & _IRQ_DESC_PERCPU) &&
439 			    irq_settings_is_per_cpu_devid(desc))
440 				return NULL;
441 		}
442 
443 		if (bus)
444 			chip_bus_lock(desc);
445 		raw_spin_lock_irqsave(&desc->lock, *flags);
446 	}
447 	return desc;
448 }
449 
450 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
451 {
452 	raw_spin_unlock_irqrestore(&desc->lock, flags);
453 	if (bus)
454 		chip_bus_sync_unlock(desc);
455 }
456 
457 int irq_set_percpu_devid(unsigned int irq)
458 {
459 	struct irq_desc *desc = irq_to_desc(irq);
460 
461 	if (!desc)
462 		return -EINVAL;
463 
464 	if (desc->percpu_enabled)
465 		return -EINVAL;
466 
467 	desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
468 
469 	if (!desc->percpu_enabled)
470 		return -ENOMEM;
471 
472 	irq_set_percpu_devid_flags(irq);
473 	return 0;
474 }
475 
476 /**
477  * dynamic_irq_cleanup - cleanup a dynamically allocated irq
478  * @irq:	irq number to initialize
479  */
480 void dynamic_irq_cleanup(unsigned int irq)
481 {
482 	struct irq_desc *desc = irq_to_desc(irq);
483 	unsigned long flags;
484 
485 	raw_spin_lock_irqsave(&desc->lock, flags);
486 	desc_set_defaults(irq, desc, desc_node(desc), NULL);
487 	raw_spin_unlock_irqrestore(&desc->lock, flags);
488 }
489 
490 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
491 {
492 	struct irq_desc *desc = irq_to_desc(irq);
493 
494 	return desc && desc->kstat_irqs ?
495 			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
496 }
497 
498 unsigned int kstat_irqs(unsigned int irq)
499 {
500 	struct irq_desc *desc = irq_to_desc(irq);
501 	int cpu;
502 	int sum = 0;
503 
504 	if (!desc || !desc->kstat_irqs)
505 		return 0;
506 	for_each_possible_cpu(cpu)
507 		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
508 	return sum;
509 }
510