xref: /openbmc/linux/kernel/irq/irqdesc.c (revision 5e630aa8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
5  *
6  * This file contains the interrupt descriptor management code. Detailed
7  * information is available in Documentation/core-api/genericirq.rst
8  *
9  */
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
17 #include <linux/irqdomain.h>
18 #include <linux/sysfs.h>
19 
20 #include "internals.h"
21 
22 /*
23  * lockdep: we want to handle all irq_desc locks as a single lock-class:
24  */
25 static struct lock_class_key irq_desc_lock_class;
26 
27 #if defined(CONFIG_SMP)
28 static int __init irq_affinity_setup(char *str)
29 {
30 	alloc_bootmem_cpumask_var(&irq_default_affinity);
31 	cpulist_parse(str, irq_default_affinity);
32 	/*
33 	 * Set at least the boot cpu. We don't want to end up with
34 	 * bugreports caused by random commandline masks
35 	 */
36 	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
37 	return 1;
38 }
39 __setup("irqaffinity=", irq_affinity_setup);
40 
41 static void __init init_irq_default_affinity(void)
42 {
43 	if (!cpumask_available(irq_default_affinity))
44 		zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
45 	if (cpumask_empty(irq_default_affinity))
46 		cpumask_setall(irq_default_affinity);
47 }
48 #else
49 static void __init init_irq_default_affinity(void)
50 {
51 }
52 #endif
53 
54 #ifdef CONFIG_SMP
55 static int alloc_masks(struct irq_desc *desc, int node)
56 {
57 	if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
58 				     GFP_KERNEL, node))
59 		return -ENOMEM;
60 
61 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
62 	if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity,
63 				     GFP_KERNEL, node)) {
64 		free_cpumask_var(desc->irq_common_data.affinity);
65 		return -ENOMEM;
66 	}
67 #endif
68 
69 #ifdef CONFIG_GENERIC_PENDING_IRQ
70 	if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) {
71 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
72 		free_cpumask_var(desc->irq_common_data.effective_affinity);
73 #endif
74 		free_cpumask_var(desc->irq_common_data.affinity);
75 		return -ENOMEM;
76 	}
77 #endif
78 	return 0;
79 }
80 
81 static void desc_smp_init(struct irq_desc *desc, int node,
82 			  const struct cpumask *affinity)
83 {
84 	if (!affinity)
85 		affinity = irq_default_affinity;
86 	cpumask_copy(desc->irq_common_data.affinity, affinity);
87 
88 #ifdef CONFIG_GENERIC_PENDING_IRQ
89 	cpumask_clear(desc->pending_mask);
90 #endif
91 #ifdef CONFIG_NUMA
92 	desc->irq_common_data.node = node;
93 #endif
94 }
95 
96 #else
97 static inline int
98 alloc_masks(struct irq_desc *desc, int node) { return 0; }
99 static inline void
100 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
101 #endif
102 
103 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
104 			      const struct cpumask *affinity, struct module *owner)
105 {
106 	int cpu;
107 
108 	desc->irq_common_data.handler_data = NULL;
109 	desc->irq_common_data.msi_desc = NULL;
110 
111 	desc->irq_data.common = &desc->irq_common_data;
112 	desc->irq_data.irq = irq;
113 	desc->irq_data.chip = &no_irq_chip;
114 	desc->irq_data.chip_data = NULL;
115 	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
116 	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
117 	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
118 	desc->handle_irq = handle_bad_irq;
119 	desc->depth = 1;
120 	desc->irq_count = 0;
121 	desc->irqs_unhandled = 0;
122 	desc->tot_count = 0;
123 	desc->name = NULL;
124 	desc->owner = owner;
125 	for_each_possible_cpu(cpu)
126 		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
127 	desc_smp_init(desc, node, affinity);
128 }
129 
130 int nr_irqs = NR_IRQS;
131 EXPORT_SYMBOL_GPL(nr_irqs);
132 
133 static DEFINE_MUTEX(sparse_irq_lock);
134 static DECLARE_BITMAP(allocated_irqs, MAX_SPARSE_IRQS);
135 
136 static int irq_find_free_area(unsigned int from, unsigned int cnt)
137 {
138 	return bitmap_find_next_zero_area(allocated_irqs, MAX_SPARSE_IRQS,
139 					  from, cnt, 0);
140 }
141 
142 static unsigned int irq_find_at_or_after(unsigned int offset)
143 {
144 	return find_next_bit(allocated_irqs, nr_irqs, offset);
145 }
146 
147 #ifdef CONFIG_SPARSE_IRQ
148 
149 static void irq_kobj_release(struct kobject *kobj);
150 
151 #ifdef CONFIG_SYSFS
152 static struct kobject *irq_kobj_base;
153 
154 #define IRQ_ATTR_RO(_name) \
155 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
156 
157 static ssize_t per_cpu_count_show(struct kobject *kobj,
158 				  struct kobj_attribute *attr, char *buf)
159 {
160 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
161 	ssize_t ret = 0;
162 	char *p = "";
163 	int cpu;
164 
165 	for_each_possible_cpu(cpu) {
166 		unsigned int c = irq_desc_kstat_cpu(desc, cpu);
167 
168 		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
169 		p = ",";
170 	}
171 
172 	ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
173 	return ret;
174 }
175 IRQ_ATTR_RO(per_cpu_count);
176 
177 static ssize_t chip_name_show(struct kobject *kobj,
178 			      struct kobj_attribute *attr, char *buf)
179 {
180 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
181 	ssize_t ret = 0;
182 
183 	raw_spin_lock_irq(&desc->lock);
184 	if (desc->irq_data.chip && desc->irq_data.chip->name) {
185 		ret = scnprintf(buf, PAGE_SIZE, "%s\n",
186 				desc->irq_data.chip->name);
187 	}
188 	raw_spin_unlock_irq(&desc->lock);
189 
190 	return ret;
191 }
192 IRQ_ATTR_RO(chip_name);
193 
194 static ssize_t hwirq_show(struct kobject *kobj,
195 			  struct kobj_attribute *attr, char *buf)
196 {
197 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
198 	ssize_t ret = 0;
199 
200 	raw_spin_lock_irq(&desc->lock);
201 	if (desc->irq_data.domain)
202 		ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq);
203 	raw_spin_unlock_irq(&desc->lock);
204 
205 	return ret;
206 }
207 IRQ_ATTR_RO(hwirq);
208 
209 static ssize_t type_show(struct kobject *kobj,
210 			 struct kobj_attribute *attr, char *buf)
211 {
212 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
213 	ssize_t ret = 0;
214 
215 	raw_spin_lock_irq(&desc->lock);
216 	ret = sprintf(buf, "%s\n",
217 		      irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
218 	raw_spin_unlock_irq(&desc->lock);
219 
220 	return ret;
221 
222 }
223 IRQ_ATTR_RO(type);
224 
225 static ssize_t wakeup_show(struct kobject *kobj,
226 			   struct kobj_attribute *attr, char *buf)
227 {
228 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
229 	ssize_t ret = 0;
230 
231 	raw_spin_lock_irq(&desc->lock);
232 	ret = sprintf(buf, "%s\n",
233 		      irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled");
234 	raw_spin_unlock_irq(&desc->lock);
235 
236 	return ret;
237 
238 }
239 IRQ_ATTR_RO(wakeup);
240 
241 static ssize_t name_show(struct kobject *kobj,
242 			 struct kobj_attribute *attr, char *buf)
243 {
244 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
245 	ssize_t ret = 0;
246 
247 	raw_spin_lock_irq(&desc->lock);
248 	if (desc->name)
249 		ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name);
250 	raw_spin_unlock_irq(&desc->lock);
251 
252 	return ret;
253 }
254 IRQ_ATTR_RO(name);
255 
256 static ssize_t actions_show(struct kobject *kobj,
257 			    struct kobj_attribute *attr, char *buf)
258 {
259 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
260 	struct irqaction *action;
261 	ssize_t ret = 0;
262 	char *p = "";
263 
264 	raw_spin_lock_irq(&desc->lock);
265 	for_each_action_of_desc(desc, action) {
266 		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
267 				 p, action->name);
268 		p = ",";
269 	}
270 	raw_spin_unlock_irq(&desc->lock);
271 
272 	if (ret)
273 		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
274 
275 	return ret;
276 }
277 IRQ_ATTR_RO(actions);
278 
279 static struct attribute *irq_attrs[] = {
280 	&per_cpu_count_attr.attr,
281 	&chip_name_attr.attr,
282 	&hwirq_attr.attr,
283 	&type_attr.attr,
284 	&wakeup_attr.attr,
285 	&name_attr.attr,
286 	&actions_attr.attr,
287 	NULL
288 };
289 ATTRIBUTE_GROUPS(irq);
290 
291 static const struct kobj_type irq_kobj_type = {
292 	.release	= irq_kobj_release,
293 	.sysfs_ops	= &kobj_sysfs_ops,
294 	.default_groups = irq_groups,
295 };
296 
297 static void irq_sysfs_add(int irq, struct irq_desc *desc)
298 {
299 	if (irq_kobj_base) {
300 		/*
301 		 * Continue even in case of failure as this is nothing
302 		 * crucial and failures in the late irq_sysfs_init()
303 		 * cannot be rolled back.
304 		 */
305 		if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
306 			pr_warn("Failed to add kobject for irq %d\n", irq);
307 		else
308 			desc->istate |= IRQS_SYSFS;
309 	}
310 }
311 
312 static void irq_sysfs_del(struct irq_desc *desc)
313 {
314 	/*
315 	 * Only invoke kobject_del() when kobject_add() was successfully
316 	 * invoked for the descriptor. This covers both early boot, where
317 	 * sysfs is not initialized yet, and the case of a failed
318 	 * kobject_add() invocation.
319 	 */
320 	if (desc->istate & IRQS_SYSFS)
321 		kobject_del(&desc->kobj);
322 }
323 
324 static int __init irq_sysfs_init(void)
325 {
326 	struct irq_desc *desc;
327 	int irq;
328 
329 	/* Prevent concurrent irq alloc/free */
330 	irq_lock_sparse();
331 
332 	irq_kobj_base = kobject_create_and_add("irq", kernel_kobj);
333 	if (!irq_kobj_base) {
334 		irq_unlock_sparse();
335 		return -ENOMEM;
336 	}
337 
338 	/* Add the already allocated interrupts */
339 	for_each_irq_desc(irq, desc)
340 		irq_sysfs_add(irq, desc);
341 	irq_unlock_sparse();
342 
343 	return 0;
344 }
345 postcore_initcall(irq_sysfs_init);
346 
347 #else /* !CONFIG_SYSFS */
348 
349 static const struct kobj_type irq_kobj_type = {
350 	.release	= irq_kobj_release,
351 };
352 
353 static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
354 static void irq_sysfs_del(struct irq_desc *desc) {}
355 
356 #endif /* CONFIG_SYSFS */
357 
358 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
359 
360 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
361 {
362 	radix_tree_insert(&irq_desc_tree, irq, desc);
363 }
364 
365 struct irq_desc *irq_to_desc(unsigned int irq)
366 {
367 	return radix_tree_lookup(&irq_desc_tree, irq);
368 }
369 #ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE
370 EXPORT_SYMBOL_GPL(irq_to_desc);
371 #endif
372 
373 static void delete_irq_desc(unsigned int irq)
374 {
375 	radix_tree_delete(&irq_desc_tree, irq);
376 }
377 
378 #ifdef CONFIG_SMP
379 static void free_masks(struct irq_desc *desc)
380 {
381 #ifdef CONFIG_GENERIC_PENDING_IRQ
382 	free_cpumask_var(desc->pending_mask);
383 #endif
384 	free_cpumask_var(desc->irq_common_data.affinity);
385 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
386 	free_cpumask_var(desc->irq_common_data.effective_affinity);
387 #endif
388 }
389 #else
390 static inline void free_masks(struct irq_desc *desc) { }
391 #endif
392 
393 void irq_lock_sparse(void)
394 {
395 	mutex_lock(&sparse_irq_lock);
396 }
397 
398 void irq_unlock_sparse(void)
399 {
400 	mutex_unlock(&sparse_irq_lock);
401 }
402 
403 static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
404 				   const struct cpumask *affinity,
405 				   struct module *owner)
406 {
407 	struct irq_desc *desc;
408 
409 	desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
410 	if (!desc)
411 		return NULL;
412 	/* allocate based on nr_cpu_ids */
413 	desc->kstat_irqs = alloc_percpu(unsigned int);
414 	if (!desc->kstat_irqs)
415 		goto err_desc;
416 
417 	if (alloc_masks(desc, node))
418 		goto err_kstat;
419 
420 	raw_spin_lock_init(&desc->lock);
421 	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
422 	mutex_init(&desc->request_mutex);
423 	init_rcu_head(&desc->rcu);
424 	init_waitqueue_head(&desc->wait_for_threads);
425 
426 	desc_set_defaults(irq, desc, node, affinity, owner);
427 	irqd_set(&desc->irq_data, flags);
428 	kobject_init(&desc->kobj, &irq_kobj_type);
429 	irq_resend_init(desc);
430 
431 	return desc;
432 
433 err_kstat:
434 	free_percpu(desc->kstat_irqs);
435 err_desc:
436 	kfree(desc);
437 	return NULL;
438 }
439 
440 static void irq_kobj_release(struct kobject *kobj)
441 {
442 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
443 
444 	free_masks(desc);
445 	free_percpu(desc->kstat_irqs);
446 	kfree(desc);
447 }
448 
449 static void delayed_free_desc(struct rcu_head *rhp)
450 {
451 	struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
452 
453 	kobject_put(&desc->kobj);
454 }
455 
456 static void free_desc(unsigned int irq)
457 {
458 	struct irq_desc *desc = irq_to_desc(irq);
459 
460 	irq_remove_debugfs_entry(desc);
461 	unregister_irq_proc(irq, desc);
462 
463 	/*
464 	 * sparse_irq_lock protects also show_interrupts() and
465 	 * kstat_irq_usr(). Once we deleted the descriptor from the
466 	 * sparse tree we can free it. Access in proc will fail to
467 	 * lookup the descriptor.
468 	 *
469 	 * The sysfs entry must be serialized against a concurrent
470 	 * irq_sysfs_init() as well.
471 	 */
472 	irq_sysfs_del(desc);
473 	delete_irq_desc(irq);
474 
475 	/*
476 	 * We free the descriptor, masks and stat fields via RCU. That
477 	 * allows demultiplex interrupts to do rcu based management of
478 	 * the child interrupts.
479 	 * This also allows us to use rcu in kstat_irqs_usr().
480 	 */
481 	call_rcu(&desc->rcu, delayed_free_desc);
482 }
483 
484 static int alloc_descs(unsigned int start, unsigned int cnt, int node,
485 		       const struct irq_affinity_desc *affinity,
486 		       struct module *owner)
487 {
488 	struct irq_desc *desc;
489 	int i;
490 
491 	/* Validate affinity mask(s) */
492 	if (affinity) {
493 		for (i = 0; i < cnt; i++) {
494 			if (cpumask_empty(&affinity[i].mask))
495 				return -EINVAL;
496 		}
497 	}
498 
499 	for (i = 0; i < cnt; i++) {
500 		const struct cpumask *mask = NULL;
501 		unsigned int flags = 0;
502 
503 		if (affinity) {
504 			if (affinity->is_managed) {
505 				flags = IRQD_AFFINITY_MANAGED |
506 					IRQD_MANAGED_SHUTDOWN;
507 			}
508 			mask = &affinity->mask;
509 			node = cpu_to_node(cpumask_first(mask));
510 			affinity++;
511 		}
512 
513 		desc = alloc_desc(start + i, node, flags, mask, owner);
514 		if (!desc)
515 			goto err;
516 		irq_insert_desc(start + i, desc);
517 		irq_sysfs_add(start + i, desc);
518 		irq_add_debugfs_entry(start + i, desc);
519 	}
520 	bitmap_set(allocated_irqs, start, cnt);
521 	return start;
522 
523 err:
524 	for (i--; i >= 0; i--)
525 		free_desc(start + i);
526 	return -ENOMEM;
527 }
528 
529 static int irq_expand_nr_irqs(unsigned int nr)
530 {
531 	if (nr > MAX_SPARSE_IRQS)
532 		return -ENOMEM;
533 	nr_irqs = nr;
534 	return 0;
535 }
536 
537 int __init early_irq_init(void)
538 {
539 	int i, initcnt, node = first_online_node;
540 	struct irq_desc *desc;
541 
542 	init_irq_default_affinity();
543 
544 	/* Let arch update nr_irqs and return the nr of preallocated irqs */
545 	initcnt = arch_probe_nr_irqs();
546 	printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n",
547 	       NR_IRQS, nr_irqs, initcnt);
548 
549 	if (WARN_ON(nr_irqs > MAX_SPARSE_IRQS))
550 		nr_irqs = MAX_SPARSE_IRQS;
551 
552 	if (WARN_ON(initcnt > MAX_SPARSE_IRQS))
553 		initcnt = MAX_SPARSE_IRQS;
554 
555 	if (initcnt > nr_irqs)
556 		nr_irqs = initcnt;
557 
558 	for (i = 0; i < initcnt; i++) {
559 		desc = alloc_desc(i, node, 0, NULL, NULL);
560 		set_bit(i, allocated_irqs);
561 		irq_insert_desc(i, desc);
562 	}
563 	return arch_early_irq_init();
564 }
565 
566 #else /* !CONFIG_SPARSE_IRQ */
567 
568 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
569 	[0 ... NR_IRQS-1] = {
570 		.handle_irq	= handle_bad_irq,
571 		.depth		= 1,
572 		.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
573 	}
574 };
575 
576 int __init early_irq_init(void)
577 {
578 	int count, i, node = first_online_node;
579 	struct irq_desc *desc;
580 
581 	init_irq_default_affinity();
582 
583 	printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
584 
585 	desc = irq_desc;
586 	count = ARRAY_SIZE(irq_desc);
587 
588 	for (i = 0; i < count; i++) {
589 		desc[i].kstat_irqs = alloc_percpu(unsigned int);
590 		alloc_masks(&desc[i], node);
591 		raw_spin_lock_init(&desc[i].lock);
592 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
593 		mutex_init(&desc[i].request_mutex);
594 		init_waitqueue_head(&desc[i].wait_for_threads);
595 		desc_set_defaults(i, &desc[i], node, NULL, NULL);
596 		irq_resend_init(desc);
597 	}
598 	return arch_early_irq_init();
599 }
600 
601 struct irq_desc *irq_to_desc(unsigned int irq)
602 {
603 	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
604 }
605 EXPORT_SYMBOL(irq_to_desc);
606 
607 static void free_desc(unsigned int irq)
608 {
609 	struct irq_desc *desc = irq_to_desc(irq);
610 	unsigned long flags;
611 
612 	raw_spin_lock_irqsave(&desc->lock, flags);
613 	desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
614 	raw_spin_unlock_irqrestore(&desc->lock, flags);
615 }
616 
617 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
618 			      const struct irq_affinity_desc *affinity,
619 			      struct module *owner)
620 {
621 	u32 i;
622 
623 	for (i = 0; i < cnt; i++) {
624 		struct irq_desc *desc = irq_to_desc(start + i);
625 
626 		desc->owner = owner;
627 	}
628 	bitmap_set(allocated_irqs, start, cnt);
629 	return start;
630 }
631 
632 static int irq_expand_nr_irqs(unsigned int nr)
633 {
634 	return -ENOMEM;
635 }
636 
637 void irq_mark_irq(unsigned int irq)
638 {
639 	mutex_lock(&sparse_irq_lock);
640 	bitmap_set(allocated_irqs, irq, 1);
641 	mutex_unlock(&sparse_irq_lock);
642 }
643 
644 #ifdef CONFIG_GENERIC_IRQ_LEGACY
645 void irq_init_desc(unsigned int irq)
646 {
647 	free_desc(irq);
648 }
649 #endif
650 
651 #endif /* !CONFIG_SPARSE_IRQ */
652 
653 int handle_irq_desc(struct irq_desc *desc)
654 {
655 	struct irq_data *data;
656 
657 	if (!desc)
658 		return -EINVAL;
659 
660 	data = irq_desc_get_irq_data(desc);
661 	if (WARN_ON_ONCE(!in_hardirq() && handle_enforce_irqctx(data)))
662 		return -EPERM;
663 
664 	generic_handle_irq_desc(desc);
665 	return 0;
666 }
667 
668 /**
669  * generic_handle_irq - Invoke the handler for a particular irq
670  * @irq:	The irq number to handle
671  *
672  * Returns:	0 on success, or -EINVAL if conversion has failed
673  *
674  * 		This function must be called from an IRQ context with irq regs
675  * 		initialized.
676   */
677 int generic_handle_irq(unsigned int irq)
678 {
679 	return handle_irq_desc(irq_to_desc(irq));
680 }
681 EXPORT_SYMBOL_GPL(generic_handle_irq);
682 
683 /**
684  * generic_handle_irq_safe - Invoke the handler for a particular irq from any
685  *			     context.
686  * @irq:	The irq number to handle
687  *
688  * Returns:	0 on success, a negative value on error.
689  *
690  * This function can be called from any context (IRQ or process context). It
691  * will report an error if not invoked from IRQ context and the irq has been
692  * marked to enforce IRQ-context only.
693  */
694 int generic_handle_irq_safe(unsigned int irq)
695 {
696 	unsigned long flags;
697 	int ret;
698 
699 	local_irq_save(flags);
700 	ret = handle_irq_desc(irq_to_desc(irq));
701 	local_irq_restore(flags);
702 	return ret;
703 }
704 EXPORT_SYMBOL_GPL(generic_handle_irq_safe);
705 
706 #ifdef CONFIG_IRQ_DOMAIN
707 /**
708  * generic_handle_domain_irq - Invoke the handler for a HW irq belonging
709  *                             to a domain.
710  * @domain:	The domain where to perform the lookup
711  * @hwirq:	The HW irq number to convert to a logical one
712  *
713  * Returns:	0 on success, or -EINVAL if conversion has failed
714  *
715  * 		This function must be called from an IRQ context with irq regs
716  * 		initialized.
717  */
718 int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq)
719 {
720 	return handle_irq_desc(irq_resolve_mapping(domain, hwirq));
721 }
722 EXPORT_SYMBOL_GPL(generic_handle_domain_irq);
723 
724  /**
725  * generic_handle_irq_safe - Invoke the handler for a HW irq belonging
726  *			     to a domain from any context.
727  * @domain:	The domain where to perform the lookup
728  * @hwirq:	The HW irq number to convert to a logical one
729  *
730  * Returns:	0 on success, a negative value on error.
731  *
732  * This function can be called from any context (IRQ or process
733  * context). If the interrupt is marked as 'enforce IRQ-context only' then
734  * the function must be invoked from hard interrupt context.
735  */
736 int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq)
737 {
738 	unsigned long flags;
739 	int ret;
740 
741 	local_irq_save(flags);
742 	ret = handle_irq_desc(irq_resolve_mapping(domain, hwirq));
743 	local_irq_restore(flags);
744 	return ret;
745 }
746 EXPORT_SYMBOL_GPL(generic_handle_domain_irq_safe);
747 
748 /**
749  * generic_handle_domain_nmi - Invoke the handler for a HW nmi belonging
750  *                             to a domain.
751  * @domain:	The domain where to perform the lookup
752  * @hwirq:	The HW irq number to convert to a logical one
753  *
754  * Returns:	0 on success, or -EINVAL if conversion has failed
755  *
756  * 		This function must be called from an NMI context with irq regs
757  * 		initialized.
758  **/
759 int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq)
760 {
761 	WARN_ON_ONCE(!in_nmi());
762 	return handle_irq_desc(irq_resolve_mapping(domain, hwirq));
763 }
764 #endif
765 
766 /* Dynamic interrupt handling */
767 
768 /**
769  * irq_free_descs - free irq descriptors
770  * @from:	Start of descriptor range
771  * @cnt:	Number of consecutive irqs to free
772  */
773 void irq_free_descs(unsigned int from, unsigned int cnt)
774 {
775 	int i;
776 
777 	if (from >= nr_irqs || (from + cnt) > nr_irqs)
778 		return;
779 
780 	mutex_lock(&sparse_irq_lock);
781 	for (i = 0; i < cnt; i++)
782 		free_desc(from + i);
783 
784 	bitmap_clear(allocated_irqs, from, cnt);
785 	mutex_unlock(&sparse_irq_lock);
786 }
787 EXPORT_SYMBOL_GPL(irq_free_descs);
788 
789 /**
790  * __irq_alloc_descs - allocate and initialize a range of irq descriptors
791  * @irq:	Allocate for specific irq number if irq >= 0
792  * @from:	Start the search from this irq number
793  * @cnt:	Number of consecutive irqs to allocate.
794  * @node:	Preferred node on which the irq descriptor should be allocated
795  * @owner:	Owning module (can be NULL)
796  * @affinity:	Optional pointer to an affinity mask array of size @cnt which
797  *		hints where the irq descriptors should be allocated and which
798  *		default affinities to use
799  *
800  * Returns the first irq number or error code
801  */
802 int __ref
803 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
804 		  struct module *owner, const struct irq_affinity_desc *affinity)
805 {
806 	int start, ret;
807 
808 	if (!cnt)
809 		return -EINVAL;
810 
811 	if (irq >= 0) {
812 		if (from > irq)
813 			return -EINVAL;
814 		from = irq;
815 	} else {
816 		/*
817 		 * For interrupts which are freely allocated the
818 		 * architecture can force a lower bound to the @from
819 		 * argument. x86 uses this to exclude the GSI space.
820 		 */
821 		from = arch_dynirq_lower_bound(from);
822 	}
823 
824 	mutex_lock(&sparse_irq_lock);
825 
826 	start = irq_find_free_area(from, cnt);
827 	ret = -EEXIST;
828 	if (irq >=0 && start != irq)
829 		goto unlock;
830 
831 	if (start + cnt > nr_irqs) {
832 		ret = irq_expand_nr_irqs(start + cnt);
833 		if (ret)
834 			goto unlock;
835 	}
836 	ret = alloc_descs(start, cnt, node, affinity, owner);
837 unlock:
838 	mutex_unlock(&sparse_irq_lock);
839 	return ret;
840 }
841 EXPORT_SYMBOL_GPL(__irq_alloc_descs);
842 
843 /**
844  * irq_get_next_irq - get next allocated irq number
845  * @offset:	where to start the search
846  *
847  * Returns next irq number at or after offset or nr_irqs if none is found.
848  */
849 unsigned int irq_get_next_irq(unsigned int offset)
850 {
851 	return irq_find_at_or_after(offset);
852 }
853 
854 struct irq_desc *
855 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
856 		    unsigned int check)
857 {
858 	struct irq_desc *desc = irq_to_desc(irq);
859 
860 	if (desc) {
861 		if (check & _IRQ_DESC_CHECK) {
862 			if ((check & _IRQ_DESC_PERCPU) &&
863 			    !irq_settings_is_per_cpu_devid(desc))
864 				return NULL;
865 
866 			if (!(check & _IRQ_DESC_PERCPU) &&
867 			    irq_settings_is_per_cpu_devid(desc))
868 				return NULL;
869 		}
870 
871 		if (bus)
872 			chip_bus_lock(desc);
873 		raw_spin_lock_irqsave(&desc->lock, *flags);
874 	}
875 	return desc;
876 }
877 
878 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
879 	__releases(&desc->lock)
880 {
881 	raw_spin_unlock_irqrestore(&desc->lock, flags);
882 	if (bus)
883 		chip_bus_sync_unlock(desc);
884 }
885 
886 int irq_set_percpu_devid_partition(unsigned int irq,
887 				   const struct cpumask *affinity)
888 {
889 	struct irq_desc *desc = irq_to_desc(irq);
890 
891 	if (!desc)
892 		return -EINVAL;
893 
894 	if (desc->percpu_enabled)
895 		return -EINVAL;
896 
897 	desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
898 
899 	if (!desc->percpu_enabled)
900 		return -ENOMEM;
901 
902 	if (affinity)
903 		desc->percpu_affinity = affinity;
904 	else
905 		desc->percpu_affinity = cpu_possible_mask;
906 
907 	irq_set_percpu_devid_flags(irq);
908 	return 0;
909 }
910 
911 int irq_set_percpu_devid(unsigned int irq)
912 {
913 	return irq_set_percpu_devid_partition(irq, NULL);
914 }
915 
916 int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
917 {
918 	struct irq_desc *desc = irq_to_desc(irq);
919 
920 	if (!desc || !desc->percpu_enabled)
921 		return -EINVAL;
922 
923 	if (affinity)
924 		cpumask_copy(affinity, desc->percpu_affinity);
925 
926 	return 0;
927 }
928 EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition);
929 
930 void kstat_incr_irq_this_cpu(unsigned int irq)
931 {
932 	kstat_incr_irqs_this_cpu(irq_to_desc(irq));
933 }
934 
935 /**
936  * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
937  * @irq:	The interrupt number
938  * @cpu:	The cpu number
939  *
940  * Returns the sum of interrupt counts on @cpu since boot for
941  * @irq. The caller must ensure that the interrupt is not removed
942  * concurrently.
943  */
944 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
945 {
946 	struct irq_desc *desc = irq_to_desc(irq);
947 
948 	return desc && desc->kstat_irqs ?
949 			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
950 }
951 
952 static bool irq_is_nmi(struct irq_desc *desc)
953 {
954 	return desc->istate & IRQS_NMI;
955 }
956 
957 static unsigned int kstat_irqs(unsigned int irq)
958 {
959 	struct irq_desc *desc = irq_to_desc(irq);
960 	unsigned int sum = 0;
961 	int cpu;
962 
963 	if (!desc || !desc->kstat_irqs)
964 		return 0;
965 	if (!irq_settings_is_per_cpu_devid(desc) &&
966 	    !irq_settings_is_per_cpu(desc) &&
967 	    !irq_is_nmi(desc))
968 		return data_race(desc->tot_count);
969 
970 	for_each_possible_cpu(cpu)
971 		sum += data_race(*per_cpu_ptr(desc->kstat_irqs, cpu));
972 	return sum;
973 }
974 
975 /**
976  * kstat_irqs_usr - Get the statistics for an interrupt from thread context
977  * @irq:	The interrupt number
978  *
979  * Returns the sum of interrupt counts on all cpus since boot for @irq.
980  *
981  * It uses rcu to protect the access since a concurrent removal of an
982  * interrupt descriptor is observing an rcu grace period before
983  * delayed_free_desc()/irq_kobj_release().
984  */
985 unsigned int kstat_irqs_usr(unsigned int irq)
986 {
987 	unsigned int sum;
988 
989 	rcu_read_lock();
990 	sum = kstat_irqs(irq);
991 	rcu_read_unlock();
992 	return sum;
993 }
994 
995 #ifdef CONFIG_LOCKDEP
996 void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
997 			     struct lock_class_key *request_class)
998 {
999 	struct irq_desc *desc = irq_to_desc(irq);
1000 
1001 	if (desc) {
1002 		lockdep_set_class(&desc->lock, lock_class);
1003 		lockdep_set_class(&desc->request_mutex, request_class);
1004 	}
1005 }
1006 EXPORT_SYMBOL_GPL(__irq_set_lockdep_class);
1007 #endif
1008