xref: /openbmc/linux/kernel/irq/irqdesc.c (revision 7fe2f639)
1 /*
2  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4  *
5  * This file contains the interrupt descriptor management code
6  *
7  * Detailed information is available in Documentation/DocBook/genericirq
8  *
9  */
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
17 
18 #include "internals.h"
19 
20 /*
21  * lockdep: we want to handle all irq_desc locks as a single lock-class:
22  */
23 static struct lock_class_key irq_desc_lock_class;
24 
25 #if defined(CONFIG_SMP)
26 static void __init init_irq_default_affinity(void)
27 {
28 	alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
29 	cpumask_setall(irq_default_affinity);
30 }
31 #else
32 static void __init init_irq_default_affinity(void)
33 {
34 }
35 #endif
36 
37 #ifdef CONFIG_SMP
38 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
39 {
40 	if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
41 		return -ENOMEM;
42 
43 #ifdef CONFIG_GENERIC_PENDING_IRQ
44 	if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
45 		free_cpumask_var(desc->irq_data.affinity);
46 		return -ENOMEM;
47 	}
48 #endif
49 	return 0;
50 }
51 
52 static void desc_smp_init(struct irq_desc *desc, int node)
53 {
54 	desc->irq_data.node = node;
55 	cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
56 #ifdef CONFIG_GENERIC_PENDING_IRQ
57 	cpumask_clear(desc->pending_mask);
58 #endif
59 }
60 
61 static inline int desc_node(struct irq_desc *desc)
62 {
63 	return desc->irq_data.node;
64 }
65 
66 #else
67 static inline int
68 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
69 static inline void desc_smp_init(struct irq_desc *desc, int node) { }
70 static inline int desc_node(struct irq_desc *desc) { return 0; }
71 #endif
72 
73 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
74 {
75 	int cpu;
76 
77 	desc->irq_data.irq = irq;
78 	desc->irq_data.chip = &no_irq_chip;
79 	desc->irq_data.chip_data = NULL;
80 	desc->irq_data.handler_data = NULL;
81 	desc->irq_data.msi_desc = NULL;
82 	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
83 	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
84 	desc->handle_irq = handle_bad_irq;
85 	desc->depth = 1;
86 	desc->irq_count = 0;
87 	desc->irqs_unhandled = 0;
88 	desc->name = NULL;
89 	for_each_possible_cpu(cpu)
90 		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
91 	desc_smp_init(desc, node);
92 }
93 
94 int nr_irqs = NR_IRQS;
95 EXPORT_SYMBOL_GPL(nr_irqs);
96 
97 static DEFINE_MUTEX(sparse_irq_lock);
98 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
99 
100 #ifdef CONFIG_SPARSE_IRQ
101 
102 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
103 
104 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
105 {
106 	radix_tree_insert(&irq_desc_tree, irq, desc);
107 }
108 
109 struct irq_desc *irq_to_desc(unsigned int irq)
110 {
111 	return radix_tree_lookup(&irq_desc_tree, irq);
112 }
113 
114 static void delete_irq_desc(unsigned int irq)
115 {
116 	radix_tree_delete(&irq_desc_tree, irq);
117 }
118 
119 #ifdef CONFIG_SMP
120 static void free_masks(struct irq_desc *desc)
121 {
122 #ifdef CONFIG_GENERIC_PENDING_IRQ
123 	free_cpumask_var(desc->pending_mask);
124 #endif
125 	free_cpumask_var(desc->irq_data.affinity);
126 }
127 #else
128 static inline void free_masks(struct irq_desc *desc) { }
129 #endif
130 
131 static struct irq_desc *alloc_desc(int irq, int node)
132 {
133 	struct irq_desc *desc;
134 	gfp_t gfp = GFP_KERNEL;
135 
136 	desc = kzalloc_node(sizeof(*desc), gfp, node);
137 	if (!desc)
138 		return NULL;
139 	/* allocate based on nr_cpu_ids */
140 	desc->kstat_irqs = alloc_percpu(unsigned int);
141 	if (!desc->kstat_irqs)
142 		goto err_desc;
143 
144 	if (alloc_masks(desc, gfp, node))
145 		goto err_kstat;
146 
147 	raw_spin_lock_init(&desc->lock);
148 	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
149 
150 	desc_set_defaults(irq, desc, node);
151 
152 	return desc;
153 
154 err_kstat:
155 	free_percpu(desc->kstat_irqs);
156 err_desc:
157 	kfree(desc);
158 	return NULL;
159 }
160 
161 static void free_desc(unsigned int irq)
162 {
163 	struct irq_desc *desc = irq_to_desc(irq);
164 
165 	unregister_irq_proc(irq, desc);
166 
167 	mutex_lock(&sparse_irq_lock);
168 	delete_irq_desc(irq);
169 	mutex_unlock(&sparse_irq_lock);
170 
171 	free_masks(desc);
172 	free_percpu(desc->kstat_irqs);
173 	kfree(desc);
174 }
175 
176 static int alloc_descs(unsigned int start, unsigned int cnt, int node)
177 {
178 	struct irq_desc *desc;
179 	int i;
180 
181 	for (i = 0; i < cnt; i++) {
182 		desc = alloc_desc(start + i, node);
183 		if (!desc)
184 			goto err;
185 		mutex_lock(&sparse_irq_lock);
186 		irq_insert_desc(start + i, desc);
187 		mutex_unlock(&sparse_irq_lock);
188 	}
189 	return start;
190 
191 err:
192 	for (i--; i >= 0; i--)
193 		free_desc(start + i);
194 
195 	mutex_lock(&sparse_irq_lock);
196 	bitmap_clear(allocated_irqs, start, cnt);
197 	mutex_unlock(&sparse_irq_lock);
198 	return -ENOMEM;
199 }
200 
201 static int irq_expand_nr_irqs(unsigned int nr)
202 {
203 	if (nr > IRQ_BITMAP_BITS)
204 		return -ENOMEM;
205 	nr_irqs = nr;
206 	return 0;
207 }
208 
209 int __init early_irq_init(void)
210 {
211 	int i, initcnt, node = first_online_node;
212 	struct irq_desc *desc;
213 
214 	init_irq_default_affinity();
215 
216 	/* Let arch update nr_irqs and return the nr of preallocated irqs */
217 	initcnt = arch_probe_nr_irqs();
218 	printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
219 
220 	if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
221 		nr_irqs = IRQ_BITMAP_BITS;
222 
223 	if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
224 		initcnt = IRQ_BITMAP_BITS;
225 
226 	if (initcnt > nr_irqs)
227 		nr_irqs = initcnt;
228 
229 	for (i = 0; i < initcnt; i++) {
230 		desc = alloc_desc(i, node);
231 		set_bit(i, allocated_irqs);
232 		irq_insert_desc(i, desc);
233 	}
234 	return arch_early_irq_init();
235 }
236 
237 #else /* !CONFIG_SPARSE_IRQ */
238 
239 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
240 	[0 ... NR_IRQS-1] = {
241 		.handle_irq	= handle_bad_irq,
242 		.depth		= 1,
243 		.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
244 	}
245 };
246 
247 int __init early_irq_init(void)
248 {
249 	int count, i, node = first_online_node;
250 	struct irq_desc *desc;
251 
252 	init_irq_default_affinity();
253 
254 	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
255 
256 	desc = irq_desc;
257 	count = ARRAY_SIZE(irq_desc);
258 
259 	for (i = 0; i < count; i++) {
260 		desc[i].kstat_irqs = alloc_percpu(unsigned int);
261 		alloc_masks(&desc[i], GFP_KERNEL, node);
262 		raw_spin_lock_init(&desc[i].lock);
263 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
264 		desc_set_defaults(i, &desc[i], node);
265 	}
266 	return arch_early_irq_init();
267 }
268 
269 struct irq_desc *irq_to_desc(unsigned int irq)
270 {
271 	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
272 }
273 
274 static void free_desc(unsigned int irq)
275 {
276 	dynamic_irq_cleanup(irq);
277 }
278 
279 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
280 {
281 	return start;
282 }
283 
284 static int irq_expand_nr_irqs(unsigned int nr)
285 {
286 	return -ENOMEM;
287 }
288 
289 #endif /* !CONFIG_SPARSE_IRQ */
290 
291 /**
292  * generic_handle_irq - Invoke the handler for a particular irq
293  * @irq:	The irq number to handle
294  *
295  */
296 int generic_handle_irq(unsigned int irq)
297 {
298 	struct irq_desc *desc = irq_to_desc(irq);
299 
300 	if (!desc)
301 		return -EINVAL;
302 	generic_handle_irq_desc(irq, desc);
303 	return 0;
304 }
305 EXPORT_SYMBOL_GPL(generic_handle_irq);
306 
307 /* Dynamic interrupt handling */
308 
309 /**
310  * irq_free_descs - free irq descriptors
311  * @from:	Start of descriptor range
312  * @cnt:	Number of consecutive irqs to free
313  */
314 void irq_free_descs(unsigned int from, unsigned int cnt)
315 {
316 	int i;
317 
318 	if (from >= nr_irqs || (from + cnt) > nr_irqs)
319 		return;
320 
321 	for (i = 0; i < cnt; i++)
322 		free_desc(from + i);
323 
324 	mutex_lock(&sparse_irq_lock);
325 	bitmap_clear(allocated_irqs, from, cnt);
326 	mutex_unlock(&sparse_irq_lock);
327 }
328 EXPORT_SYMBOL_GPL(irq_free_descs);
329 
330 /**
331  * irq_alloc_descs - allocate and initialize a range of irq descriptors
332  * @irq:	Allocate for specific irq number if irq >= 0
333  * @from:	Start the search from this irq number
334  * @cnt:	Number of consecutive irqs to allocate.
335  * @node:	Preferred node on which the irq descriptor should be allocated
336  *
337  * Returns the first irq number or error code
338  */
339 int __ref
340 irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
341 {
342 	int start, ret;
343 
344 	if (!cnt)
345 		return -EINVAL;
346 
347 	if (irq >= 0) {
348 		if (from > irq)
349 			return -EINVAL;
350 		from = irq;
351 	}
352 
353 	mutex_lock(&sparse_irq_lock);
354 
355 	start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
356 					   from, cnt, 0);
357 	ret = -EEXIST;
358 	if (irq >=0 && start != irq)
359 		goto err;
360 
361 	if (start + cnt > nr_irqs) {
362 		ret = irq_expand_nr_irqs(start + cnt);
363 		if (ret)
364 			goto err;
365 	}
366 
367 	bitmap_set(allocated_irqs, start, cnt);
368 	mutex_unlock(&sparse_irq_lock);
369 	return alloc_descs(start, cnt, node);
370 
371 err:
372 	mutex_unlock(&sparse_irq_lock);
373 	return ret;
374 }
375 EXPORT_SYMBOL_GPL(irq_alloc_descs);
376 
377 /**
378  * irq_reserve_irqs - mark irqs allocated
379  * @from:	mark from irq number
380  * @cnt:	number of irqs to mark
381  *
382  * Returns 0 on success or an appropriate error code
383  */
384 int irq_reserve_irqs(unsigned int from, unsigned int cnt)
385 {
386 	unsigned int start;
387 	int ret = 0;
388 
389 	if (!cnt || (from + cnt) > nr_irqs)
390 		return -EINVAL;
391 
392 	mutex_lock(&sparse_irq_lock);
393 	start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
394 	if (start == from)
395 		bitmap_set(allocated_irqs, start, cnt);
396 	else
397 		ret = -EEXIST;
398 	mutex_unlock(&sparse_irq_lock);
399 	return ret;
400 }
401 
402 /**
403  * irq_get_next_irq - get next allocated irq number
404  * @offset:	where to start the search
405  *
406  * Returns next irq number after offset or nr_irqs if none is found.
407  */
408 unsigned int irq_get_next_irq(unsigned int offset)
409 {
410 	return find_next_bit(allocated_irqs, nr_irqs, offset);
411 }
412 
413 struct irq_desc *
414 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus)
415 {
416 	struct irq_desc *desc = irq_to_desc(irq);
417 
418 	if (desc) {
419 		if (bus)
420 			chip_bus_lock(desc);
421 		raw_spin_lock_irqsave(&desc->lock, *flags);
422 	}
423 	return desc;
424 }
425 
426 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
427 {
428 	raw_spin_unlock_irqrestore(&desc->lock, flags);
429 	if (bus)
430 		chip_bus_sync_unlock(desc);
431 }
432 
433 /**
434  * dynamic_irq_cleanup - cleanup a dynamically allocated irq
435  * @irq:	irq number to initialize
436  */
437 void dynamic_irq_cleanup(unsigned int irq)
438 {
439 	struct irq_desc *desc = irq_to_desc(irq);
440 	unsigned long flags;
441 
442 	raw_spin_lock_irqsave(&desc->lock, flags);
443 	desc_set_defaults(irq, desc, desc_node(desc));
444 	raw_spin_unlock_irqrestore(&desc->lock, flags);
445 }
446 
447 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
448 {
449 	struct irq_desc *desc = irq_to_desc(irq);
450 
451 	return desc && desc->kstat_irqs ?
452 			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
453 }
454 
455 unsigned int kstat_irqs(unsigned int irq)
456 {
457 	struct irq_desc *desc = irq_to_desc(irq);
458 	int cpu;
459 	int sum = 0;
460 
461 	if (!desc || !desc->kstat_irqs)
462 		return 0;
463 	for_each_possible_cpu(cpu)
464 		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
465 	return sum;
466 }
467