xref: /openbmc/linux/kernel/irq/irqdesc.c (revision df2634f43f5106947f3735a0b61a6527a4b278cd)
1 /*
2  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4  *
5  * This file contains the interrupt descriptor management code
6  *
7  * Detailed information is available in Documentation/DocBook/genericirq
8  *
9  */
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
17 
18 #include "internals.h"
19 
20 /*
21  * lockdep: we want to handle all irq_desc locks as a single lock-class:
22  */
23 static struct lock_class_key irq_desc_lock_class;
24 
25 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
26 static void __init init_irq_default_affinity(void)
27 {
28 	alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
29 	cpumask_setall(irq_default_affinity);
30 }
31 #else
32 static void __init init_irq_default_affinity(void)
33 {
34 }
35 #endif
36 
37 #ifdef CONFIG_SMP
38 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
39 {
40 	if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
41 		return -ENOMEM;
42 
43 #ifdef CONFIG_GENERIC_PENDING_IRQ
44 	if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
45 		free_cpumask_var(desc->irq_data.affinity);
46 		return -ENOMEM;
47 	}
48 #endif
49 	return 0;
50 }
51 
52 static void desc_smp_init(struct irq_desc *desc, int node)
53 {
54 	desc->irq_data.node = node;
55 	cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
56 #ifdef CONFIG_GENERIC_PENDING_IRQ
57 	cpumask_clear(desc->pending_mask);
58 #endif
59 }
60 
61 static inline int desc_node(struct irq_desc *desc)
62 {
63 	return desc->irq_data.node;
64 }
65 
66 #else
67 static inline int
68 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
69 static inline void desc_smp_init(struct irq_desc *desc, int node) { }
70 static inline int desc_node(struct irq_desc *desc) { return 0; }
71 #endif
72 
73 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
74 {
75 	int cpu;
76 
77 	desc->irq_data.irq = irq;
78 	desc->irq_data.chip = &no_irq_chip;
79 	desc->irq_data.chip_data = NULL;
80 	desc->irq_data.handler_data = NULL;
81 	desc->irq_data.msi_desc = NULL;
82 	desc->status = IRQ_DEFAULT_INIT_FLAGS;
83 	desc->handle_irq = handle_bad_irq;
84 	desc->depth = 1;
85 	desc->irq_count = 0;
86 	desc->irqs_unhandled = 0;
87 	desc->name = NULL;
88 	for_each_possible_cpu(cpu)
89 		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
90 	desc_smp_init(desc, node);
91 }
92 
93 int nr_irqs = NR_IRQS;
94 EXPORT_SYMBOL_GPL(nr_irqs);
95 
96 static DEFINE_MUTEX(sparse_irq_lock);
97 static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
98 
99 #ifdef CONFIG_SPARSE_IRQ
100 
101 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
102 
103 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
104 {
105 	radix_tree_insert(&irq_desc_tree, irq, desc);
106 }
107 
108 struct irq_desc *irq_to_desc(unsigned int irq)
109 {
110 	return radix_tree_lookup(&irq_desc_tree, irq);
111 }
112 
113 static void delete_irq_desc(unsigned int irq)
114 {
115 	radix_tree_delete(&irq_desc_tree, irq);
116 }
117 
118 #ifdef CONFIG_SMP
119 static void free_masks(struct irq_desc *desc)
120 {
121 #ifdef CONFIG_GENERIC_PENDING_IRQ
122 	free_cpumask_var(desc->pending_mask);
123 #endif
124 	free_cpumask_var(desc->irq_data.affinity);
125 }
126 #else
127 static inline void free_masks(struct irq_desc *desc) { }
128 #endif
129 
130 static struct irq_desc *alloc_desc(int irq, int node)
131 {
132 	struct irq_desc *desc;
133 	gfp_t gfp = GFP_KERNEL;
134 
135 	desc = kzalloc_node(sizeof(*desc), gfp, node);
136 	if (!desc)
137 		return NULL;
138 	/* allocate based on nr_cpu_ids */
139 	desc->kstat_irqs = alloc_percpu(unsigned int);
140 	if (!desc->kstat_irqs)
141 		goto err_desc;
142 
143 	if (alloc_masks(desc, gfp, node))
144 		goto err_kstat;
145 
146 	raw_spin_lock_init(&desc->lock);
147 	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
148 
149 	desc_set_defaults(irq, desc, node);
150 
151 	return desc;
152 
153 err_kstat:
154 	free_percpu(desc->kstat_irqs);
155 err_desc:
156 	kfree(desc);
157 	return NULL;
158 }
159 
160 static void free_desc(unsigned int irq)
161 {
162 	struct irq_desc *desc = irq_to_desc(irq);
163 
164 	unregister_irq_proc(irq, desc);
165 
166 	mutex_lock(&sparse_irq_lock);
167 	delete_irq_desc(irq);
168 	mutex_unlock(&sparse_irq_lock);
169 
170 	free_masks(desc);
171 	free_percpu(desc->kstat_irqs);
172 	kfree(desc);
173 }
174 
175 static int alloc_descs(unsigned int start, unsigned int cnt, int node)
176 {
177 	struct irq_desc *desc;
178 	int i;
179 
180 	for (i = 0; i < cnt; i++) {
181 		desc = alloc_desc(start + i, node);
182 		if (!desc)
183 			goto err;
184 		mutex_lock(&sparse_irq_lock);
185 		irq_insert_desc(start + i, desc);
186 		mutex_unlock(&sparse_irq_lock);
187 	}
188 	return start;
189 
190 err:
191 	for (i--; i >= 0; i--)
192 		free_desc(start + i);
193 
194 	mutex_lock(&sparse_irq_lock);
195 	bitmap_clear(allocated_irqs, start, cnt);
196 	mutex_unlock(&sparse_irq_lock);
197 	return -ENOMEM;
198 }
199 
200 struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
201 {
202 	int res = irq_alloc_descs(irq, irq, 1, node);
203 
204 	if (res == -EEXIST || res == irq)
205 		return irq_to_desc(irq);
206 	return NULL;
207 }
208 
209 int __init early_irq_init(void)
210 {
211 	int i, initcnt, node = first_online_node;
212 	struct irq_desc *desc;
213 
214 	init_irq_default_affinity();
215 
216 	/* Let arch update nr_irqs and return the nr of preallocated irqs */
217 	initcnt = arch_probe_nr_irqs();
218 	printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
219 
220 	for (i = 0; i < initcnt; i++) {
221 		desc = alloc_desc(i, node);
222 		set_bit(i, allocated_irqs);
223 		irq_insert_desc(i, desc);
224 	}
225 	return arch_early_irq_init();
226 }
227 
228 #else /* !CONFIG_SPARSE_IRQ */
229 
230 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
231 	[0 ... NR_IRQS-1] = {
232 		.status		= IRQ_DEFAULT_INIT_FLAGS,
233 		.handle_irq	= handle_bad_irq,
234 		.depth		= 1,
235 		.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
236 	}
237 };
238 
239 int __init early_irq_init(void)
240 {
241 	int count, i, node = first_online_node;
242 	struct irq_desc *desc;
243 
244 	init_irq_default_affinity();
245 
246 	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
247 
248 	desc = irq_desc;
249 	count = ARRAY_SIZE(irq_desc);
250 
251 	for (i = 0; i < count; i++) {
252 		desc[i].irq_data.irq = i;
253 		desc[i].irq_data.chip = &no_irq_chip;
254 		/* TODO : do this allocation on-demand ... */
255 		desc[i].kstat_irqs = alloc_percpu(unsigned int);
256 		alloc_masks(desc + i, GFP_KERNEL, node);
257 		desc_smp_init(desc + i, node);
258 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
259 	}
260 	return arch_early_irq_init();
261 }
262 
263 struct irq_desc *irq_to_desc(unsigned int irq)
264 {
265 	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
266 }
267 
268 struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
269 {
270 	return irq_to_desc(irq);
271 }
272 
273 static void free_desc(unsigned int irq)
274 {
275 	dynamic_irq_cleanup(irq);
276 }
277 
278 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
279 {
280 #if defined(CONFIG_KSTAT_IRQS_ONDEMAND)
281 	struct irq_desc *desc;
282 	unsigned int i;
283 
284 	for (i = 0; i < cnt; i++) {
285 		desc = irq_to_desc(start + i);
286 		if (desc && !desc->kstat_irqs) {
287 			unsigned int __percpu *stats = alloc_percpu(unsigned int);
288 
289 			if (!stats)
290 				return -1;
291 			if (cmpxchg(&desc->kstat_irqs, NULL, stats) != NULL)
292 				free_percpu(stats);
293 		}
294 	}
295 #endif
296 	return start;
297 }
298 #endif /* !CONFIG_SPARSE_IRQ */
299 
300 /* Dynamic interrupt handling */
301 
302 /**
303  * irq_free_descs - free irq descriptors
304  * @from:	Start of descriptor range
305  * @cnt:	Number of consecutive irqs to free
306  */
307 void irq_free_descs(unsigned int from, unsigned int cnt)
308 {
309 	int i;
310 
311 	if (from >= nr_irqs || (from + cnt) > nr_irqs)
312 		return;
313 
314 	for (i = 0; i < cnt; i++)
315 		free_desc(from + i);
316 
317 	mutex_lock(&sparse_irq_lock);
318 	bitmap_clear(allocated_irqs, from, cnt);
319 	mutex_unlock(&sparse_irq_lock);
320 }
321 
322 /**
323  * irq_alloc_descs - allocate and initialize a range of irq descriptors
324  * @irq:	Allocate for specific irq number if irq >= 0
325  * @from:	Start the search from this irq number
326  * @cnt:	Number of consecutive irqs to allocate.
327  * @node:	Preferred node on which the irq descriptor should be allocated
328  *
329  * Returns the first irq number or error code
330  */
331 int __ref
332 irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
333 {
334 	int start, ret;
335 
336 	if (!cnt)
337 		return -EINVAL;
338 
339 	mutex_lock(&sparse_irq_lock);
340 
341 	start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
342 	ret = -EEXIST;
343 	if (irq >=0 && start != irq)
344 		goto err;
345 
346 	ret = -ENOMEM;
347 	if (start >= nr_irqs)
348 		goto err;
349 
350 	bitmap_set(allocated_irqs, start, cnt);
351 	mutex_unlock(&sparse_irq_lock);
352 	return alloc_descs(start, cnt, node);
353 
354 err:
355 	mutex_unlock(&sparse_irq_lock);
356 	return ret;
357 }
358 
359 /**
360  * irq_reserve_irqs - mark irqs allocated
361  * @from:	mark from irq number
362  * @cnt:	number of irqs to mark
363  *
364  * Returns 0 on success or an appropriate error code
365  */
366 int irq_reserve_irqs(unsigned int from, unsigned int cnt)
367 {
368 	unsigned int start;
369 	int ret = 0;
370 
371 	if (!cnt || (from + cnt) > nr_irqs)
372 		return -EINVAL;
373 
374 	mutex_lock(&sparse_irq_lock);
375 	start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
376 	if (start == from)
377 		bitmap_set(allocated_irqs, start, cnt);
378 	else
379 		ret = -EEXIST;
380 	mutex_unlock(&sparse_irq_lock);
381 	return ret;
382 }
383 
384 /**
385  * irq_get_next_irq - get next allocated irq number
386  * @offset:	where to start the search
387  *
388  * Returns next irq number after offset or nr_irqs if none is found.
389  */
390 unsigned int irq_get_next_irq(unsigned int offset)
391 {
392 	return find_next_bit(allocated_irqs, nr_irqs, offset);
393 }
394 
395 /**
396  * dynamic_irq_cleanup - cleanup a dynamically allocated irq
397  * @irq:	irq number to initialize
398  */
399 void dynamic_irq_cleanup(unsigned int irq)
400 {
401 	struct irq_desc *desc = irq_to_desc(irq);
402 	unsigned long flags;
403 
404 	raw_spin_lock_irqsave(&desc->lock, flags);
405 	desc_set_defaults(irq, desc, desc_node(desc));
406 	raw_spin_unlock_irqrestore(&desc->lock, flags);
407 }
408 
409 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
410 {
411 	struct irq_desc *desc = irq_to_desc(irq);
412 
413 	return desc && desc->kstat_irqs ?
414 			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
415 }
416 
417 #ifdef CONFIG_GENERIC_HARDIRQS
418 unsigned int kstat_irqs(unsigned int irq)
419 {
420 	struct irq_desc *desc = irq_to_desc(irq);
421 	int cpu;
422 	int sum = 0;
423 
424 	if (!desc || !desc->kstat_irqs)
425 		return 0;
426 	for_each_possible_cpu(cpu)
427 		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
428 	return sum;
429 }
430 #endif /* CONFIG_GENERIC_HARDIRQS */
431