xref: /openbmc/linux/kernel/irq/irqdesc.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4  *
5  * This file contains the interrupt descriptor management code
6  *
7  * Detailed information is available in Documentation/DocBook/genericirq
8  *
9  */
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
17 
18 #include "internals.h"
19 
20 /*
21  * lockdep: we want to handle all irq_desc locks as a single lock-class:
22  */
23 static struct lock_class_key irq_desc_lock_class;
24 
25 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
26 static void __init init_irq_default_affinity(void)
27 {
28 	alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
29 	cpumask_setall(irq_default_affinity);
30 }
31 #else
32 static void __init init_irq_default_affinity(void)
33 {
34 }
35 #endif
36 
37 #ifdef CONFIG_SMP
38 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
39 {
40 	if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
41 		return -ENOMEM;
42 
43 #ifdef CONFIG_GENERIC_PENDING_IRQ
44 	if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
45 		free_cpumask_var(desc->irq_data.affinity);
46 		return -ENOMEM;
47 	}
48 #endif
49 	return 0;
50 }
51 
52 static void desc_smp_init(struct irq_desc *desc, int node)
53 {
54 	desc->irq_data.node = node;
55 	cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
56 #ifdef CONFIG_GENERIC_PENDING_IRQ
57 	cpumask_clear(desc->pending_mask);
58 #endif
59 }
60 
61 static inline int desc_node(struct irq_desc *desc)
62 {
63 	return desc->irq_data.node;
64 }
65 
66 #else
67 static inline int
68 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
69 static inline void desc_smp_init(struct irq_desc *desc, int node) { }
70 static inline int desc_node(struct irq_desc *desc) { return 0; }
71 #endif
72 
73 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
74 {
75 	desc->irq_data.irq = irq;
76 	desc->irq_data.chip = &no_irq_chip;
77 	desc->irq_data.chip_data = NULL;
78 	desc->irq_data.handler_data = NULL;
79 	desc->irq_data.msi_desc = NULL;
80 	desc->status = IRQ_DEFAULT_INIT_FLAGS;
81 	desc->handle_irq = handle_bad_irq;
82 	desc->depth = 1;
83 	desc->irq_count = 0;
84 	desc->irqs_unhandled = 0;
85 	desc->name = NULL;
86 	memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
87 	desc_smp_init(desc, node);
88 }
89 
90 int nr_irqs = NR_IRQS;
91 EXPORT_SYMBOL_GPL(nr_irqs);
92 
93 static DEFINE_MUTEX(sparse_irq_lock);
94 static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
95 
96 #ifdef CONFIG_SPARSE_IRQ
97 
98 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
99 
100 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
101 {
102 	radix_tree_insert(&irq_desc_tree, irq, desc);
103 }
104 
105 struct irq_desc *irq_to_desc(unsigned int irq)
106 {
107 	return radix_tree_lookup(&irq_desc_tree, irq);
108 }
109 
110 static void delete_irq_desc(unsigned int irq)
111 {
112 	radix_tree_delete(&irq_desc_tree, irq);
113 }
114 
115 #ifdef CONFIG_SMP
116 static void free_masks(struct irq_desc *desc)
117 {
118 #ifdef CONFIG_GENERIC_PENDING_IRQ
119 	free_cpumask_var(desc->pending_mask);
120 #endif
121 	free_cpumask_var(desc->irq_data.affinity);
122 }
123 #else
124 static inline void free_masks(struct irq_desc *desc) { }
125 #endif
126 
127 static struct irq_desc *alloc_desc(int irq, int node)
128 {
129 	struct irq_desc *desc;
130 	gfp_t gfp = GFP_KERNEL;
131 
132 	desc = kzalloc_node(sizeof(*desc), gfp, node);
133 	if (!desc)
134 		return NULL;
135 	/* allocate based on nr_cpu_ids */
136 	desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs),
137 					 gfp, node);
138 	if (!desc->kstat_irqs)
139 		goto err_desc;
140 
141 	if (alloc_masks(desc, gfp, node))
142 		goto err_kstat;
143 
144 	raw_spin_lock_init(&desc->lock);
145 	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
146 
147 	desc_set_defaults(irq, desc, node);
148 
149 	return desc;
150 
151 err_kstat:
152 	kfree(desc->kstat_irqs);
153 err_desc:
154 	kfree(desc);
155 	return NULL;
156 }
157 
158 static void free_desc(unsigned int irq)
159 {
160 	struct irq_desc *desc = irq_to_desc(irq);
161 
162 	unregister_irq_proc(irq, desc);
163 
164 	mutex_lock(&sparse_irq_lock);
165 	delete_irq_desc(irq);
166 	mutex_unlock(&sparse_irq_lock);
167 
168 	free_masks(desc);
169 	kfree(desc->kstat_irqs);
170 	kfree(desc);
171 }
172 
173 static int alloc_descs(unsigned int start, unsigned int cnt, int node)
174 {
175 	struct irq_desc *desc;
176 	int i;
177 
178 	for (i = 0; i < cnt; i++) {
179 		desc = alloc_desc(start + i, node);
180 		if (!desc)
181 			goto err;
182 		mutex_lock(&sparse_irq_lock);
183 		irq_insert_desc(start + i, desc);
184 		mutex_unlock(&sparse_irq_lock);
185 	}
186 	return start;
187 
188 err:
189 	for (i--; i >= 0; i--)
190 		free_desc(start + i);
191 
192 	mutex_lock(&sparse_irq_lock);
193 	bitmap_clear(allocated_irqs, start, cnt);
194 	mutex_unlock(&sparse_irq_lock);
195 	return -ENOMEM;
196 }
197 
198 struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
199 {
200 	int res = irq_alloc_descs(irq, irq, 1, node);
201 
202 	if (res == -EEXIST || res == irq)
203 		return irq_to_desc(irq);
204 	return NULL;
205 }
206 
207 int __init early_irq_init(void)
208 {
209 	int i, initcnt, node = first_online_node;
210 	struct irq_desc *desc;
211 
212 	init_irq_default_affinity();
213 
214 	/* Let arch update nr_irqs and return the nr of preallocated irqs */
215 	initcnt = arch_probe_nr_irqs();
216 	printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
217 
218 	for (i = 0; i < initcnt; i++) {
219 		desc = alloc_desc(i, node);
220 		set_bit(i, allocated_irqs);
221 		irq_insert_desc(i, desc);
222 	}
223 	return arch_early_irq_init();
224 }
225 
226 #else /* !CONFIG_SPARSE_IRQ */
227 
228 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
229 	[0 ... NR_IRQS-1] = {
230 		.status		= IRQ_DEFAULT_INIT_FLAGS,
231 		.handle_irq	= handle_bad_irq,
232 		.depth		= 1,
233 		.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
234 	}
235 };
236 
237 static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
238 int __init early_irq_init(void)
239 {
240 	int count, i, node = first_online_node;
241 	struct irq_desc *desc;
242 
243 	init_irq_default_affinity();
244 
245 	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
246 
247 	desc = irq_desc;
248 	count = ARRAY_SIZE(irq_desc);
249 
250 	for (i = 0; i < count; i++) {
251 		desc[i].irq_data.irq = i;
252 		desc[i].irq_data.chip = &no_irq_chip;
253 		desc[i].kstat_irqs = kstat_irqs_all[i];
254 		alloc_masks(desc + i, GFP_KERNEL, node);
255 		desc_smp_init(desc + i, node);
256 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
257 	}
258 	return arch_early_irq_init();
259 }
260 
261 struct irq_desc *irq_to_desc(unsigned int irq)
262 {
263 	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
264 }
265 
266 struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
267 {
268 	return irq_to_desc(irq);
269 }
270 
271 static void free_desc(unsigned int irq)
272 {
273 	dynamic_irq_cleanup(irq);
274 }
275 
276 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
277 {
278 	return start;
279 }
280 #endif /* !CONFIG_SPARSE_IRQ */
281 
282 /* Dynamic interrupt handling */
283 
284 /**
285  * irq_free_descs - free irq descriptors
286  * @from:	Start of descriptor range
287  * @cnt:	Number of consecutive irqs to free
288  */
289 void irq_free_descs(unsigned int from, unsigned int cnt)
290 {
291 	int i;
292 
293 	if (from >= nr_irqs || (from + cnt) > nr_irqs)
294 		return;
295 
296 	for (i = 0; i < cnt; i++)
297 		free_desc(from + i);
298 
299 	mutex_lock(&sparse_irq_lock);
300 	bitmap_clear(allocated_irqs, from, cnt);
301 	mutex_unlock(&sparse_irq_lock);
302 }
303 
304 /**
305  * irq_alloc_descs - allocate and initialize a range of irq descriptors
306  * @irq:	Allocate for specific irq number if irq >= 0
307  * @from:	Start the search from this irq number
308  * @cnt:	Number of consecutive irqs to allocate.
309  * @node:	Preferred node on which the irq descriptor should be allocated
310  *
311  * Returns the first irq number or error code
312  */
313 int __ref
314 irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
315 {
316 	int start, ret;
317 
318 	if (!cnt)
319 		return -EINVAL;
320 
321 	mutex_lock(&sparse_irq_lock);
322 
323 	start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
324 	ret = -EEXIST;
325 	if (irq >=0 && start != irq)
326 		goto err;
327 
328 	ret = -ENOMEM;
329 	if (start >= nr_irqs)
330 		goto err;
331 
332 	bitmap_set(allocated_irqs, start, cnt);
333 	mutex_unlock(&sparse_irq_lock);
334 	return alloc_descs(start, cnt, node);
335 
336 err:
337 	mutex_unlock(&sparse_irq_lock);
338 	return ret;
339 }
340 
341 /**
342  * irq_reserve_irqs - mark irqs allocated
343  * @from:	mark from irq number
344  * @cnt:	number of irqs to mark
345  *
346  * Returns 0 on success or an appropriate error code
347  */
348 int irq_reserve_irqs(unsigned int from, unsigned int cnt)
349 {
350 	unsigned int start;
351 	int ret = 0;
352 
353 	if (!cnt || (from + cnt) > nr_irqs)
354 		return -EINVAL;
355 
356 	mutex_lock(&sparse_irq_lock);
357 	start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
358 	if (start == from)
359 		bitmap_set(allocated_irqs, start, cnt);
360 	else
361 		ret = -EEXIST;
362 	mutex_unlock(&sparse_irq_lock);
363 	return ret;
364 }
365 
366 /**
367  * irq_get_next_irq - get next allocated irq number
368  * @offset:	where to start the search
369  *
370  * Returns next irq number after offset or nr_irqs if none is found.
371  */
372 unsigned int irq_get_next_irq(unsigned int offset)
373 {
374 	return find_next_bit(allocated_irqs, nr_irqs, offset);
375 }
376 
377 /**
378  * dynamic_irq_cleanup - cleanup a dynamically allocated irq
379  * @irq:	irq number to initialize
380  */
381 void dynamic_irq_cleanup(unsigned int irq)
382 {
383 	struct irq_desc *desc = irq_to_desc(irq);
384 	unsigned long flags;
385 
386 	raw_spin_lock_irqsave(&desc->lock, flags);
387 	desc_set_defaults(irq, desc, desc_node(desc));
388 	raw_spin_unlock_irqrestore(&desc->lock, flags);
389 }
390 
391 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
392 {
393 	struct irq_desc *desc = irq_to_desc(irq);
394 	return desc ? desc->kstat_irqs[cpu] : 0;
395 }
396 
397 #ifdef CONFIG_GENERIC_HARDIRQS
398 unsigned int kstat_irqs(unsigned int irq)
399 {
400 	struct irq_desc *desc = irq_to_desc(irq);
401 	int cpu;
402 	int sum = 0;
403 
404 	if (!desc)
405 		return 0;
406 	for_each_possible_cpu(cpu)
407 		sum += desc->kstat_irqs[cpu];
408 	return sum;
409 }
410 #endif /* CONFIG_GENERIC_HARDIRQS */
411