xref: /openbmc/linux/kernel/irq/irqdesc.c (revision f6723b56)
1 /*
2  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4  *
5  * This file contains the interrupt descriptor management code
6  *
7  * Detailed information is available in Documentation/DocBook/genericirq
8  *
9  */
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
17 
18 #include "internals.h"
19 
20 /*
21  * lockdep: we want to handle all irq_desc locks as a single lock-class:
22  */
23 static struct lock_class_key irq_desc_lock_class;
24 
25 #if defined(CONFIG_SMP)
26 static void __init init_irq_default_affinity(void)
27 {
28 	alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
29 	cpumask_setall(irq_default_affinity);
30 }
31 #else
32 static void __init init_irq_default_affinity(void)
33 {
34 }
35 #endif
36 
37 #ifdef CONFIG_SMP
38 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
39 {
40 	if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
41 		return -ENOMEM;
42 
43 #ifdef CONFIG_GENERIC_PENDING_IRQ
44 	if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
45 		free_cpumask_var(desc->irq_data.affinity);
46 		return -ENOMEM;
47 	}
48 #endif
49 	return 0;
50 }
51 
52 static void desc_smp_init(struct irq_desc *desc, int node)
53 {
54 	desc->irq_data.node = node;
55 	cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
56 #ifdef CONFIG_GENERIC_PENDING_IRQ
57 	cpumask_clear(desc->pending_mask);
58 #endif
59 }
60 
61 static inline int desc_node(struct irq_desc *desc)
62 {
63 	return desc->irq_data.node;
64 }
65 
66 #else
67 static inline int
68 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
69 static inline void desc_smp_init(struct irq_desc *desc, int node) { }
70 static inline int desc_node(struct irq_desc *desc) { return 0; }
71 #endif
72 
73 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
74 		struct module *owner)
75 {
76 	int cpu;
77 
78 	desc->irq_data.irq = irq;
79 	desc->irq_data.chip = &no_irq_chip;
80 	desc->irq_data.chip_data = NULL;
81 	desc->irq_data.handler_data = NULL;
82 	desc->irq_data.msi_desc = NULL;
83 	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
84 	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
85 	desc->handle_irq = handle_bad_irq;
86 	desc->depth = 1;
87 	desc->irq_count = 0;
88 	desc->irqs_unhandled = 0;
89 	desc->name = NULL;
90 	desc->owner = owner;
91 	for_each_possible_cpu(cpu)
92 		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
93 	desc_smp_init(desc, node);
94 }
95 
96 int nr_irqs = NR_IRQS;
97 EXPORT_SYMBOL_GPL(nr_irqs);
98 
99 static DEFINE_MUTEX(sparse_irq_lock);
100 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
101 
102 #ifdef CONFIG_SPARSE_IRQ
103 
104 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
105 
106 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
107 {
108 	radix_tree_insert(&irq_desc_tree, irq, desc);
109 }
110 
111 struct irq_desc *irq_to_desc(unsigned int irq)
112 {
113 	return radix_tree_lookup(&irq_desc_tree, irq);
114 }
115 EXPORT_SYMBOL(irq_to_desc);
116 
117 static void delete_irq_desc(unsigned int irq)
118 {
119 	radix_tree_delete(&irq_desc_tree, irq);
120 }
121 
122 #ifdef CONFIG_SMP
123 static void free_masks(struct irq_desc *desc)
124 {
125 #ifdef CONFIG_GENERIC_PENDING_IRQ
126 	free_cpumask_var(desc->pending_mask);
127 #endif
128 	free_cpumask_var(desc->irq_data.affinity);
129 }
130 #else
131 static inline void free_masks(struct irq_desc *desc) { }
132 #endif
133 
134 static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
135 {
136 	struct irq_desc *desc;
137 	gfp_t gfp = GFP_KERNEL;
138 
139 	desc = kzalloc_node(sizeof(*desc), gfp, node);
140 	if (!desc)
141 		return NULL;
142 	/* allocate based on nr_cpu_ids */
143 	desc->kstat_irqs = alloc_percpu(unsigned int);
144 	if (!desc->kstat_irqs)
145 		goto err_desc;
146 
147 	if (alloc_masks(desc, gfp, node))
148 		goto err_kstat;
149 
150 	raw_spin_lock_init(&desc->lock);
151 	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
152 
153 	desc_set_defaults(irq, desc, node, owner);
154 
155 	return desc;
156 
157 err_kstat:
158 	free_percpu(desc->kstat_irqs);
159 err_desc:
160 	kfree(desc);
161 	return NULL;
162 }
163 
164 static void free_desc(unsigned int irq)
165 {
166 	struct irq_desc *desc = irq_to_desc(irq);
167 
168 	unregister_irq_proc(irq, desc);
169 
170 	mutex_lock(&sparse_irq_lock);
171 	delete_irq_desc(irq);
172 	mutex_unlock(&sparse_irq_lock);
173 
174 	free_masks(desc);
175 	free_percpu(desc->kstat_irqs);
176 	kfree(desc);
177 }
178 
179 static int alloc_descs(unsigned int start, unsigned int cnt, int node,
180 		       struct module *owner)
181 {
182 	struct irq_desc *desc;
183 	int i;
184 
185 	for (i = 0; i < cnt; i++) {
186 		desc = alloc_desc(start + i, node, owner);
187 		if (!desc)
188 			goto err;
189 		mutex_lock(&sparse_irq_lock);
190 		irq_insert_desc(start + i, desc);
191 		mutex_unlock(&sparse_irq_lock);
192 	}
193 	return start;
194 
195 err:
196 	for (i--; i >= 0; i--)
197 		free_desc(start + i);
198 
199 	mutex_lock(&sparse_irq_lock);
200 	bitmap_clear(allocated_irqs, start, cnt);
201 	mutex_unlock(&sparse_irq_lock);
202 	return -ENOMEM;
203 }
204 
205 static int irq_expand_nr_irqs(unsigned int nr)
206 {
207 	if (nr > IRQ_BITMAP_BITS)
208 		return -ENOMEM;
209 	nr_irqs = nr;
210 	return 0;
211 }
212 
213 int __init early_irq_init(void)
214 {
215 	int i, initcnt, node = first_online_node;
216 	struct irq_desc *desc;
217 
218 	init_irq_default_affinity();
219 
220 	/* Let arch update nr_irqs and return the nr of preallocated irqs */
221 	initcnt = arch_probe_nr_irqs();
222 	printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
223 
224 	if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
225 		nr_irqs = IRQ_BITMAP_BITS;
226 
227 	if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
228 		initcnt = IRQ_BITMAP_BITS;
229 
230 	if (initcnt > nr_irqs)
231 		nr_irqs = initcnt;
232 
233 	for (i = 0; i < initcnt; i++) {
234 		desc = alloc_desc(i, node, NULL);
235 		set_bit(i, allocated_irqs);
236 		irq_insert_desc(i, desc);
237 	}
238 	return arch_early_irq_init();
239 }
240 
241 #else /* !CONFIG_SPARSE_IRQ */
242 
243 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
244 	[0 ... NR_IRQS-1] = {
245 		.handle_irq	= handle_bad_irq,
246 		.depth		= 1,
247 		.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
248 	}
249 };
250 
251 int __init early_irq_init(void)
252 {
253 	int count, i, node = first_online_node;
254 	struct irq_desc *desc;
255 
256 	init_irq_default_affinity();
257 
258 	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
259 
260 	desc = irq_desc;
261 	count = ARRAY_SIZE(irq_desc);
262 
263 	for (i = 0; i < count; i++) {
264 		desc[i].kstat_irqs = alloc_percpu(unsigned int);
265 		alloc_masks(&desc[i], GFP_KERNEL, node);
266 		raw_spin_lock_init(&desc[i].lock);
267 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
268 		desc_set_defaults(i, &desc[i], node, NULL);
269 	}
270 	return arch_early_irq_init();
271 }
272 
273 struct irq_desc *irq_to_desc(unsigned int irq)
274 {
275 	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
276 }
277 EXPORT_SYMBOL(irq_to_desc);
278 
279 static void free_desc(unsigned int irq)
280 {
281 	dynamic_irq_cleanup(irq);
282 }
283 
284 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
285 			      struct module *owner)
286 {
287 	u32 i;
288 
289 	for (i = 0; i < cnt; i++) {
290 		struct irq_desc *desc = irq_to_desc(start + i);
291 
292 		desc->owner = owner;
293 	}
294 	return start;
295 }
296 
297 static int irq_expand_nr_irqs(unsigned int nr)
298 {
299 	return -ENOMEM;
300 }
301 
302 #endif /* !CONFIG_SPARSE_IRQ */
303 
304 /**
305  * generic_handle_irq - Invoke the handler for a particular irq
306  * @irq:	The irq number to handle
307  *
308  */
309 int generic_handle_irq(unsigned int irq)
310 {
311 	struct irq_desc *desc = irq_to_desc(irq);
312 
313 	if (!desc)
314 		return -EINVAL;
315 	generic_handle_irq_desc(irq, desc);
316 	return 0;
317 }
318 EXPORT_SYMBOL_GPL(generic_handle_irq);
319 
320 /* Dynamic interrupt handling */
321 
322 /**
323  * irq_free_descs - free irq descriptors
324  * @from:	Start of descriptor range
325  * @cnt:	Number of consecutive irqs to free
326  */
327 void irq_free_descs(unsigned int from, unsigned int cnt)
328 {
329 	int i;
330 
331 	if (from >= nr_irqs || (from + cnt) > nr_irqs)
332 		return;
333 
334 	for (i = 0; i < cnt; i++)
335 		free_desc(from + i);
336 
337 	mutex_lock(&sparse_irq_lock);
338 	bitmap_clear(allocated_irqs, from, cnt);
339 	mutex_unlock(&sparse_irq_lock);
340 }
341 EXPORT_SYMBOL_GPL(irq_free_descs);
342 
343 /**
344  * irq_alloc_descs - allocate and initialize a range of irq descriptors
345  * @irq:	Allocate for specific irq number if irq >= 0
346  * @from:	Start the search from this irq number
347  * @cnt:	Number of consecutive irqs to allocate.
348  * @node:	Preferred node on which the irq descriptor should be allocated
349  * @owner:	Owning module (can be NULL)
350  *
351  * Returns the first irq number or error code
352  */
353 int __ref
354 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
355 		  struct module *owner)
356 {
357 	int start, ret;
358 
359 	if (!cnt)
360 		return -EINVAL;
361 
362 	if (irq >= 0) {
363 		if (from > irq)
364 			return -EINVAL;
365 		from = irq;
366 	}
367 
368 	mutex_lock(&sparse_irq_lock);
369 
370 	start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
371 					   from, cnt, 0);
372 	ret = -EEXIST;
373 	if (irq >=0 && start != irq)
374 		goto err;
375 
376 	if (start + cnt > nr_irqs) {
377 		ret = irq_expand_nr_irqs(start + cnt);
378 		if (ret)
379 			goto err;
380 	}
381 
382 	bitmap_set(allocated_irqs, start, cnt);
383 	mutex_unlock(&sparse_irq_lock);
384 	return alloc_descs(start, cnt, node, owner);
385 
386 err:
387 	mutex_unlock(&sparse_irq_lock);
388 	return ret;
389 }
390 EXPORT_SYMBOL_GPL(__irq_alloc_descs);
391 
392 /**
393  * irq_reserve_irqs - mark irqs allocated
394  * @from:	mark from irq number
395  * @cnt:	number of irqs to mark
396  *
397  * Returns 0 on success or an appropriate error code
398  */
399 int irq_reserve_irqs(unsigned int from, unsigned int cnt)
400 {
401 	unsigned int start;
402 	int ret = 0;
403 
404 	if (!cnt || (from + cnt) > nr_irqs)
405 		return -EINVAL;
406 
407 	mutex_lock(&sparse_irq_lock);
408 	start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
409 	if (start == from)
410 		bitmap_set(allocated_irqs, start, cnt);
411 	else
412 		ret = -EEXIST;
413 	mutex_unlock(&sparse_irq_lock);
414 	return ret;
415 }
416 
417 /**
418  * irq_get_next_irq - get next allocated irq number
419  * @offset:	where to start the search
420  *
421  * Returns next irq number after offset or nr_irqs if none is found.
422  */
423 unsigned int irq_get_next_irq(unsigned int offset)
424 {
425 	return find_next_bit(allocated_irqs, nr_irqs, offset);
426 }
427 
428 struct irq_desc *
429 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
430 		    unsigned int check)
431 {
432 	struct irq_desc *desc = irq_to_desc(irq);
433 
434 	if (desc) {
435 		if (check & _IRQ_DESC_CHECK) {
436 			if ((check & _IRQ_DESC_PERCPU) &&
437 			    !irq_settings_is_per_cpu_devid(desc))
438 				return NULL;
439 
440 			if (!(check & _IRQ_DESC_PERCPU) &&
441 			    irq_settings_is_per_cpu_devid(desc))
442 				return NULL;
443 		}
444 
445 		if (bus)
446 			chip_bus_lock(desc);
447 		raw_spin_lock_irqsave(&desc->lock, *flags);
448 	}
449 	return desc;
450 }
451 
452 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
453 {
454 	raw_spin_unlock_irqrestore(&desc->lock, flags);
455 	if (bus)
456 		chip_bus_sync_unlock(desc);
457 }
458 
459 int irq_set_percpu_devid(unsigned int irq)
460 {
461 	struct irq_desc *desc = irq_to_desc(irq);
462 
463 	if (!desc)
464 		return -EINVAL;
465 
466 	if (desc->percpu_enabled)
467 		return -EINVAL;
468 
469 	desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
470 
471 	if (!desc->percpu_enabled)
472 		return -ENOMEM;
473 
474 	irq_set_percpu_devid_flags(irq);
475 	return 0;
476 }
477 
478 /**
479  * dynamic_irq_cleanup - cleanup a dynamically allocated irq
480  * @irq:	irq number to initialize
481  */
482 void dynamic_irq_cleanup(unsigned int irq)
483 {
484 	struct irq_desc *desc = irq_to_desc(irq);
485 	unsigned long flags;
486 
487 	raw_spin_lock_irqsave(&desc->lock, flags);
488 	desc_set_defaults(irq, desc, desc_node(desc), NULL);
489 	raw_spin_unlock_irqrestore(&desc->lock, flags);
490 }
491 
492 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
493 {
494 	struct irq_desc *desc = irq_to_desc(irq);
495 
496 	return desc && desc->kstat_irqs ?
497 			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
498 }
499 
500 unsigned int kstat_irqs(unsigned int irq)
501 {
502 	struct irq_desc *desc = irq_to_desc(irq);
503 	int cpu;
504 	int sum = 0;
505 
506 	if (!desc || !desc->kstat_irqs)
507 		return 0;
508 	for_each_possible_cpu(cpu)
509 		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
510 	return sum;
511 }
512