xref: /openbmc/linux/kernel/irq/irqdesc.c (revision c005e2f62f8421b13b9a31adb9db7281f1a19e68)
152a65ff5SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
23795de23SThomas Gleixner /*
33795de23SThomas Gleixner  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
43795de23SThomas Gleixner  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
53795de23SThomas Gleixner  *
699bfce5dSThomas Gleixner  * This file contains the interrupt descriptor management code. Detailed
799bfce5dSThomas Gleixner  * information is available in Documentation/core-api/genericirq.rst
83795de23SThomas Gleixner  *
93795de23SThomas Gleixner  */
103795de23SThomas Gleixner #include <linux/irq.h>
113795de23SThomas Gleixner #include <linux/slab.h>
12ec53cf23SPaul Gortmaker #include <linux/export.h>
133795de23SThomas Gleixner #include <linux/interrupt.h>
143795de23SThomas Gleixner #include <linux/kernel_stat.h>
15721255b9SShanker Donthineni #include <linux/maple_tree.h>
1676ba59f8SMarc Zyngier #include <linux/irqdomain.h>
17ecb3f394SCraig Gallek #include <linux/sysfs.h>
183795de23SThomas Gleixner 
193795de23SThomas Gleixner #include "internals.h"
203795de23SThomas Gleixner 
213795de23SThomas Gleixner /*
223795de23SThomas Gleixner  * lockdep: we want to handle all irq_desc locks as a single lock-class:
233795de23SThomas Gleixner  */
2478f90d91SThomas Gleixner static struct lock_class_key irq_desc_lock_class;
253795de23SThomas Gleixner 
26fe051434SThomas Gleixner #if defined(CONFIG_SMP)
irq_affinity_setup(char * str)27fbf19803SThomas Gleixner static int __init irq_affinity_setup(char *str)
28fbf19803SThomas Gleixner {
2910d94ff4SRakib Mullick 	alloc_bootmem_cpumask_var(&irq_default_affinity);
30fbf19803SThomas Gleixner 	cpulist_parse(str, irq_default_affinity);
31fbf19803SThomas Gleixner 	/*
32fbf19803SThomas Gleixner 	 * Set at least the boot cpu. We don't want to end up with
33a359f757SIngo Molnar 	 * bugreports caused by random commandline masks
34fbf19803SThomas Gleixner 	 */
35fbf19803SThomas Gleixner 	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
36fbf19803SThomas Gleixner 	return 1;
37fbf19803SThomas Gleixner }
38fbf19803SThomas Gleixner __setup("irqaffinity=", irq_affinity_setup);
39fbf19803SThomas Gleixner 
init_irq_default_affinity(void)403795de23SThomas Gleixner static void __init init_irq_default_affinity(void)
413795de23SThomas Gleixner {
4210d94ff4SRakib Mullick 	if (!cpumask_available(irq_default_affinity))
43fbf19803SThomas Gleixner 		zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
44fbf19803SThomas Gleixner 	if (cpumask_empty(irq_default_affinity))
453795de23SThomas Gleixner 		cpumask_setall(irq_default_affinity);
463795de23SThomas Gleixner }
473795de23SThomas Gleixner #else
init_irq_default_affinity(void)483795de23SThomas Gleixner static void __init init_irq_default_affinity(void)
493795de23SThomas Gleixner {
503795de23SThomas Gleixner }
513795de23SThomas Gleixner #endif
523795de23SThomas Gleixner 
531f5a5b87SThomas Gleixner #ifdef CONFIG_SMP
alloc_masks(struct irq_desc * desc,int node)544ab764c3SThomas Gleixner static int alloc_masks(struct irq_desc *desc, int node)
551f5a5b87SThomas Gleixner {
569df872faSJiang Liu 	if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
574ab764c3SThomas Gleixner 				     GFP_KERNEL, node))
581f5a5b87SThomas Gleixner 		return -ENOMEM;
591f5a5b87SThomas Gleixner 
600d3f5425SThomas Gleixner #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
610d3f5425SThomas Gleixner 	if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity,
620d3f5425SThomas Gleixner 				     GFP_KERNEL, node)) {
630d3f5425SThomas Gleixner 		free_cpumask_var(desc->irq_common_data.affinity);
640d3f5425SThomas Gleixner 		return -ENOMEM;
650d3f5425SThomas Gleixner 	}
660d3f5425SThomas Gleixner #endif
670d3f5425SThomas Gleixner 
681f5a5b87SThomas Gleixner #ifdef CONFIG_GENERIC_PENDING_IRQ
694ab764c3SThomas Gleixner 	if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) {
700d3f5425SThomas Gleixner #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
710d3f5425SThomas Gleixner 		free_cpumask_var(desc->irq_common_data.effective_affinity);
720d3f5425SThomas Gleixner #endif
739df872faSJiang Liu 		free_cpumask_var(desc->irq_common_data.affinity);
741f5a5b87SThomas Gleixner 		return -ENOMEM;
751f5a5b87SThomas Gleixner 	}
761f5a5b87SThomas Gleixner #endif
771f5a5b87SThomas Gleixner 	return 0;
781f5a5b87SThomas Gleixner }
791f5a5b87SThomas Gleixner 
desc_smp_init(struct irq_desc * desc,int node,const struct cpumask * affinity)8045ddcecbSThomas Gleixner static void desc_smp_init(struct irq_desc *desc, int node,
8145ddcecbSThomas Gleixner 			  const struct cpumask *affinity)
821f5a5b87SThomas Gleixner {
8345ddcecbSThomas Gleixner 	if (!affinity)
8445ddcecbSThomas Gleixner 		affinity = irq_default_affinity;
8545ddcecbSThomas Gleixner 	cpumask_copy(desc->irq_common_data.affinity, affinity);
8645ddcecbSThomas Gleixner 
87b7b29338SThomas Gleixner #ifdef CONFIG_GENERIC_PENDING_IRQ
88b7b29338SThomas Gleixner 	cpumask_clear(desc->pending_mask);
89b7b29338SThomas Gleixner #endif
90449e9caeSJiang Liu #ifdef CONFIG_NUMA
91449e9caeSJiang Liu 	desc->irq_common_data.node = node;
92449e9caeSJiang Liu #endif
93b7b29338SThomas Gleixner }
94b7b29338SThomas Gleixner 
951f5a5b87SThomas Gleixner #else
961f5a5b87SThomas Gleixner static inline int
alloc_masks(struct irq_desc * desc,int node)974ab764c3SThomas Gleixner alloc_masks(struct irq_desc *desc, int node) { return 0; }
9845ddcecbSThomas Gleixner static inline void
desc_smp_init(struct irq_desc * desc,int node,const struct cpumask * affinity)9945ddcecbSThomas Gleixner desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
1001f5a5b87SThomas Gleixner #endif
1011f5a5b87SThomas Gleixner 
desc_set_defaults(unsigned int irq,struct irq_desc * desc,int node,const struct cpumask * affinity,struct module * owner)102b6873807SSebastian Andrzej Siewior static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
10345ddcecbSThomas Gleixner 			      const struct cpumask *affinity, struct module *owner)
1041f5a5b87SThomas Gleixner {
1056c9ae009SEric Dumazet 	int cpu;
1066c9ae009SEric Dumazet 
107af7080e0SJiang Liu 	desc->irq_common_data.handler_data = NULL;
108b237721cSJiang Liu 	desc->irq_common_data.msi_desc = NULL;
109af7080e0SJiang Liu 
1100d0b4c86SJiang Liu 	desc->irq_data.common = &desc->irq_common_data;
1111f5a5b87SThomas Gleixner 	desc->irq_data.irq = irq;
1121f5a5b87SThomas Gleixner 	desc->irq_data.chip = &no_irq_chip;
1131f5a5b87SThomas Gleixner 	desc->irq_data.chip_data = NULL;
114f9e4989eSThomas Gleixner 	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
115801a0e9aSThomas Gleixner 	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
116d829b8fbSJeffy Chen 	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
1171f5a5b87SThomas Gleixner 	desc->handle_irq = handle_bad_irq;
1181f5a5b87SThomas Gleixner 	desc->depth = 1;
119b7b29338SThomas Gleixner 	desc->irq_count = 0;
120b7b29338SThomas Gleixner 	desc->irqs_unhandled = 0;
1211136b072SThomas Gleixner 	desc->tot_count = 0;
1221f5a5b87SThomas Gleixner 	desc->name = NULL;
123b6873807SSebastian Andrzej Siewior 	desc->owner = owner;
1246c9ae009SEric Dumazet 	for_each_possible_cpu(cpu)
1256c9ae009SEric Dumazet 		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
12645ddcecbSThomas Gleixner 	desc_smp_init(desc, node, affinity);
1271f5a5b87SThomas Gleixner }
1281f5a5b87SThomas Gleixner 
1293795de23SThomas Gleixner int nr_irqs = NR_IRQS;
1303795de23SThomas Gleixner EXPORT_SYMBOL_GPL(nr_irqs);
1313795de23SThomas Gleixner 
132a05a900aSThomas Gleixner static DEFINE_MUTEX(sparse_irq_lock);
133721255b9SShanker Donthineni static struct maple_tree sparse_irqs = MTREE_INIT_EXT(sparse_irqs,
134721255b9SShanker Donthineni 					MT_FLAGS_ALLOC_RANGE |
135721255b9SShanker Donthineni 					MT_FLAGS_LOCK_EXTERN |
136721255b9SShanker Donthineni 					MT_FLAGS_USE_RCU,
137721255b9SShanker Donthineni 					sparse_irq_lock);
1385e630aa8SShanker Donthineni 
irq_find_free_area(unsigned int from,unsigned int cnt)1395e630aa8SShanker Donthineni static int irq_find_free_area(unsigned int from, unsigned int cnt)
1405e630aa8SShanker Donthineni {
141721255b9SShanker Donthineni 	MA_STATE(mas, &sparse_irqs, 0, 0);
142721255b9SShanker Donthineni 
143721255b9SShanker Donthineni 	if (mas_empty_area(&mas, from, MAX_SPARSE_IRQS, cnt))
144721255b9SShanker Donthineni 		return -ENOSPC;
145721255b9SShanker Donthineni 	return mas.index;
1465e630aa8SShanker Donthineni }
1475e630aa8SShanker Donthineni 
irq_find_at_or_after(unsigned int offset)1485e630aa8SShanker Donthineni static unsigned int irq_find_at_or_after(unsigned int offset)
1495e630aa8SShanker Donthineni {
150721255b9SShanker Donthineni 	unsigned long index = offset;
1511c789181Sdicken.ding 	struct irq_desc *desc;
1521c789181Sdicken.ding 
1531c789181Sdicken.ding 	guard(rcu)();
1541c789181Sdicken.ding 	desc = mt_find(&sparse_irqs, &index, nr_irqs);
155721255b9SShanker Donthineni 
156721255b9SShanker Donthineni 	return desc ? irq_desc_get_irq(desc) : nr_irqs;
157721255b9SShanker Donthineni }
158721255b9SShanker Donthineni 
irq_insert_desc(unsigned int irq,struct irq_desc * desc)159721255b9SShanker Donthineni static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
160721255b9SShanker Donthineni {
161721255b9SShanker Donthineni 	MA_STATE(mas, &sparse_irqs, irq, irq);
162721255b9SShanker Donthineni 	WARN_ON(mas_store_gfp(&mas, desc, GFP_KERNEL) != 0);
163721255b9SShanker Donthineni }
164721255b9SShanker Donthineni 
delete_irq_desc(unsigned int irq)165721255b9SShanker Donthineni static void delete_irq_desc(unsigned int irq)
166721255b9SShanker Donthineni {
167721255b9SShanker Donthineni 	MA_STATE(mas, &sparse_irqs, irq, irq);
168721255b9SShanker Donthineni 	mas_erase(&mas);
1695e630aa8SShanker Donthineni }
1701f5a5b87SThomas Gleixner 
1713795de23SThomas Gleixner #ifdef CONFIG_SPARSE_IRQ
1723795de23SThomas Gleixner 
173ecb3f394SCraig Gallek static void irq_kobj_release(struct kobject *kobj);
174ecb3f394SCraig Gallek 
175ecb3f394SCraig Gallek #ifdef CONFIG_SYSFS
176ecb3f394SCraig Gallek static struct kobject *irq_kobj_base;
177ecb3f394SCraig Gallek 
178ecb3f394SCraig Gallek #define IRQ_ATTR_RO(_name) \
179ecb3f394SCraig Gallek static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
180ecb3f394SCraig Gallek 
per_cpu_count_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)181ecb3f394SCraig Gallek static ssize_t per_cpu_count_show(struct kobject *kobj,
182ecb3f394SCraig Gallek 				  struct kobj_attribute *attr, char *buf)
183ecb3f394SCraig Gallek {
184ecb3f394SCraig Gallek 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
185ecb3f394SCraig Gallek 	ssize_t ret = 0;
186ecb3f394SCraig Gallek 	char *p = "";
187501e2db6SThomas Gleixner 	int cpu;
188ecb3f394SCraig Gallek 
189ecb3f394SCraig Gallek 	for_each_possible_cpu(cpu) {
190501e2db6SThomas Gleixner 		unsigned int c = irq_desc_kstat_cpu(desc, cpu);
191ecb3f394SCraig Gallek 
192ecb3f394SCraig Gallek 		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
193ecb3f394SCraig Gallek 		p = ",";
194ecb3f394SCraig Gallek 	}
195ecb3f394SCraig Gallek 
196ecb3f394SCraig Gallek 	ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
197ecb3f394SCraig Gallek 	return ret;
198ecb3f394SCraig Gallek }
199ecb3f394SCraig Gallek IRQ_ATTR_RO(per_cpu_count);
200ecb3f394SCraig Gallek 
chip_name_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)201ecb3f394SCraig Gallek static ssize_t chip_name_show(struct kobject *kobj,
202ecb3f394SCraig Gallek 			      struct kobj_attribute *attr, char *buf)
203ecb3f394SCraig Gallek {
204ecb3f394SCraig Gallek 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
205ecb3f394SCraig Gallek 	ssize_t ret = 0;
206ecb3f394SCraig Gallek 
207ecb3f394SCraig Gallek 	raw_spin_lock_irq(&desc->lock);
208ecb3f394SCraig Gallek 	if (desc->irq_data.chip && desc->irq_data.chip->name) {
209ecb3f394SCraig Gallek 		ret = scnprintf(buf, PAGE_SIZE, "%s\n",
210ecb3f394SCraig Gallek 				desc->irq_data.chip->name);
211ecb3f394SCraig Gallek 	}
212ecb3f394SCraig Gallek 	raw_spin_unlock_irq(&desc->lock);
213ecb3f394SCraig Gallek 
214ecb3f394SCraig Gallek 	return ret;
215ecb3f394SCraig Gallek }
216ecb3f394SCraig Gallek IRQ_ATTR_RO(chip_name);
217ecb3f394SCraig Gallek 
hwirq_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)218ecb3f394SCraig Gallek static ssize_t hwirq_show(struct kobject *kobj,
219ecb3f394SCraig Gallek 			  struct kobj_attribute *attr, char *buf)
220ecb3f394SCraig Gallek {
221ecb3f394SCraig Gallek 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
222ecb3f394SCraig Gallek 	ssize_t ret = 0;
223ecb3f394SCraig Gallek 
224ecb3f394SCraig Gallek 	raw_spin_lock_irq(&desc->lock);
225ecb3f394SCraig Gallek 	if (desc->irq_data.domain)
226d92df42dSCédric Le Goater 		ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq);
227ecb3f394SCraig Gallek 	raw_spin_unlock_irq(&desc->lock);
228ecb3f394SCraig Gallek 
229ecb3f394SCraig Gallek 	return ret;
230ecb3f394SCraig Gallek }
231ecb3f394SCraig Gallek IRQ_ATTR_RO(hwirq);
232ecb3f394SCraig Gallek 
type_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)233ecb3f394SCraig Gallek static ssize_t type_show(struct kobject *kobj,
234ecb3f394SCraig Gallek 			 struct kobj_attribute *attr, char *buf)
235ecb3f394SCraig Gallek {
236ecb3f394SCraig Gallek 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
237ecb3f394SCraig Gallek 	ssize_t ret = 0;
238ecb3f394SCraig Gallek 
239ecb3f394SCraig Gallek 	raw_spin_lock_irq(&desc->lock);
240ecb3f394SCraig Gallek 	ret = sprintf(buf, "%s\n",
241ecb3f394SCraig Gallek 		      irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
242ecb3f394SCraig Gallek 	raw_spin_unlock_irq(&desc->lock);
243ecb3f394SCraig Gallek 
244ecb3f394SCraig Gallek 	return ret;
245ecb3f394SCraig Gallek 
246ecb3f394SCraig Gallek }
247ecb3f394SCraig Gallek IRQ_ATTR_RO(type);
248ecb3f394SCraig Gallek 
wakeup_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)249d61e2944SAndy Shevchenko static ssize_t wakeup_show(struct kobject *kobj,
250d61e2944SAndy Shevchenko 			   struct kobj_attribute *attr, char *buf)
251d61e2944SAndy Shevchenko {
252d61e2944SAndy Shevchenko 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
253d61e2944SAndy Shevchenko 	ssize_t ret = 0;
254d61e2944SAndy Shevchenko 
255d61e2944SAndy Shevchenko 	raw_spin_lock_irq(&desc->lock);
256d61e2944SAndy Shevchenko 	ret = sprintf(buf, "%s\n",
257d61e2944SAndy Shevchenko 		      irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled");
258d61e2944SAndy Shevchenko 	raw_spin_unlock_irq(&desc->lock);
259d61e2944SAndy Shevchenko 
260d61e2944SAndy Shevchenko 	return ret;
261d61e2944SAndy Shevchenko 
262d61e2944SAndy Shevchenko }
263d61e2944SAndy Shevchenko IRQ_ATTR_RO(wakeup);
264d61e2944SAndy Shevchenko 
name_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)265ecb3f394SCraig Gallek static ssize_t name_show(struct kobject *kobj,
266ecb3f394SCraig Gallek 			 struct kobj_attribute *attr, char *buf)
267ecb3f394SCraig Gallek {
268ecb3f394SCraig Gallek 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
269ecb3f394SCraig Gallek 	ssize_t ret = 0;
270ecb3f394SCraig Gallek 
271ecb3f394SCraig Gallek 	raw_spin_lock_irq(&desc->lock);
272ecb3f394SCraig Gallek 	if (desc->name)
273ecb3f394SCraig Gallek 		ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name);
274ecb3f394SCraig Gallek 	raw_spin_unlock_irq(&desc->lock);
275ecb3f394SCraig Gallek 
276ecb3f394SCraig Gallek 	return ret;
277ecb3f394SCraig Gallek }
278ecb3f394SCraig Gallek IRQ_ATTR_RO(name);
279ecb3f394SCraig Gallek 
actions_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)280ecb3f394SCraig Gallek static ssize_t actions_show(struct kobject *kobj,
281ecb3f394SCraig Gallek 			    struct kobj_attribute *attr, char *buf)
282ecb3f394SCraig Gallek {
283ecb3f394SCraig Gallek 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
284ecb3f394SCraig Gallek 	struct irqaction *action;
285ecb3f394SCraig Gallek 	ssize_t ret = 0;
286ecb3f394SCraig Gallek 	char *p = "";
287ecb3f394SCraig Gallek 
288ecb3f394SCraig Gallek 	raw_spin_lock_irq(&desc->lock);
289c904cda0SParan Lee 	for_each_action_of_desc(desc, action) {
290ecb3f394SCraig Gallek 		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
291ecb3f394SCraig Gallek 				 p, action->name);
292ecb3f394SCraig Gallek 		p = ",";
293ecb3f394SCraig Gallek 	}
294ecb3f394SCraig Gallek 	raw_spin_unlock_irq(&desc->lock);
295ecb3f394SCraig Gallek 
296ecb3f394SCraig Gallek 	if (ret)
297ecb3f394SCraig Gallek 		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
298ecb3f394SCraig Gallek 
299ecb3f394SCraig Gallek 	return ret;
300ecb3f394SCraig Gallek }
301ecb3f394SCraig Gallek IRQ_ATTR_RO(actions);
302ecb3f394SCraig Gallek 
303ecb3f394SCraig Gallek static struct attribute *irq_attrs[] = {
304ecb3f394SCraig Gallek 	&per_cpu_count_attr.attr,
305ecb3f394SCraig Gallek 	&chip_name_attr.attr,
306ecb3f394SCraig Gallek 	&hwirq_attr.attr,
307ecb3f394SCraig Gallek 	&type_attr.attr,
308d61e2944SAndy Shevchenko 	&wakeup_attr.attr,
309ecb3f394SCraig Gallek 	&name_attr.attr,
310ecb3f394SCraig Gallek 	&actions_attr.attr,
311ecb3f394SCraig Gallek 	NULL
312ecb3f394SCraig Gallek };
31352ba92f5SKimberly Brown ATTRIBUTE_GROUPS(irq);
314ecb3f394SCraig Gallek 
315ce7980aeSThomas Weißschuh static const struct kobj_type irq_kobj_type = {
316ecb3f394SCraig Gallek 	.release	= irq_kobj_release,
317ecb3f394SCraig Gallek 	.sysfs_ops	= &kobj_sysfs_ops,
31852ba92f5SKimberly Brown 	.default_groups = irq_groups,
319ecb3f394SCraig Gallek };
320ecb3f394SCraig Gallek 
irq_sysfs_add(int irq,struct irq_desc * desc)321ecb3f394SCraig Gallek static void irq_sysfs_add(int irq, struct irq_desc *desc)
322ecb3f394SCraig Gallek {
323ecb3f394SCraig Gallek 	if (irq_kobj_base) {
324ecb3f394SCraig Gallek 		/*
325ecb3f394SCraig Gallek 		 * Continue even in case of failure as this is nothing
3269049e1caSYang Yingliang 		 * crucial and failures in the late irq_sysfs_init()
3279049e1caSYang Yingliang 		 * cannot be rolled back.
328ecb3f394SCraig Gallek 		 */
329ecb3f394SCraig Gallek 		if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
330ecb3f394SCraig Gallek 			pr_warn("Failed to add kobject for irq %d\n", irq);
3319049e1caSYang Yingliang 		else
3329049e1caSYang Yingliang 			desc->istate |= IRQS_SYSFS;
333ecb3f394SCraig Gallek 	}
334ecb3f394SCraig Gallek }
335ecb3f394SCraig Gallek 
irq_sysfs_del(struct irq_desc * desc)336d0ff14fdSMichael Kelley static void irq_sysfs_del(struct irq_desc *desc)
337d0ff14fdSMichael Kelley {
338d0ff14fdSMichael Kelley 	/*
3399049e1caSYang Yingliang 	 * Only invoke kobject_del() when kobject_add() was successfully
3409049e1caSYang Yingliang 	 * invoked for the descriptor. This covers both early boot, where
3419049e1caSYang Yingliang 	 * sysfs is not initialized yet, and the case of a failed
3429049e1caSYang Yingliang 	 * kobject_add() invocation.
343d0ff14fdSMichael Kelley 	 */
3449049e1caSYang Yingliang 	if (desc->istate & IRQS_SYSFS)
345d0ff14fdSMichael Kelley 		kobject_del(&desc->kobj);
346d0ff14fdSMichael Kelley }
347d0ff14fdSMichael Kelley 
irq_sysfs_init(void)348ecb3f394SCraig Gallek static int __init irq_sysfs_init(void)
349ecb3f394SCraig Gallek {
350ecb3f394SCraig Gallek 	struct irq_desc *desc;
351ecb3f394SCraig Gallek 	int irq;
352ecb3f394SCraig Gallek 
353ecb3f394SCraig Gallek 	/* Prevent concurrent irq alloc/free */
354ecb3f394SCraig Gallek 	irq_lock_sparse();
355ecb3f394SCraig Gallek 
356ecb3f394SCraig Gallek 	irq_kobj_base = kobject_create_and_add("irq", kernel_kobj);
357ecb3f394SCraig Gallek 	if (!irq_kobj_base) {
358ecb3f394SCraig Gallek 		irq_unlock_sparse();
359ecb3f394SCraig Gallek 		return -ENOMEM;
360ecb3f394SCraig Gallek 	}
361ecb3f394SCraig Gallek 
362ecb3f394SCraig Gallek 	/* Add the already allocated interrupts */
363ecb3f394SCraig Gallek 	for_each_irq_desc(irq, desc)
364ecb3f394SCraig Gallek 		irq_sysfs_add(irq, desc);
365ecb3f394SCraig Gallek 	irq_unlock_sparse();
366ecb3f394SCraig Gallek 
367ecb3f394SCraig Gallek 	return 0;
368ecb3f394SCraig Gallek }
369ecb3f394SCraig Gallek postcore_initcall(irq_sysfs_init);
370ecb3f394SCraig Gallek 
371ecb3f394SCraig Gallek #else /* !CONFIG_SYSFS */
372ecb3f394SCraig Gallek 
373ce7980aeSThomas Weißschuh static const struct kobj_type irq_kobj_type = {
374ecb3f394SCraig Gallek 	.release	= irq_kobj_release,
375ecb3f394SCraig Gallek };
376ecb3f394SCraig Gallek 
irq_sysfs_add(int irq,struct irq_desc * desc)377ecb3f394SCraig Gallek static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
irq_sysfs_del(struct irq_desc * desc)378d0ff14fdSMichael Kelley static void irq_sysfs_del(struct irq_desc *desc) {}
379ecb3f394SCraig Gallek 
380ecb3f394SCraig Gallek #endif /* CONFIG_SYSFS */
381ecb3f394SCraig Gallek 
irq_to_desc(unsigned int irq)3823795de23SThomas Gleixner struct irq_desc *irq_to_desc(unsigned int irq)
3833795de23SThomas Gleixner {
384721255b9SShanker Donthineni 	return mtree_load(&sparse_irqs, irq);
3853795de23SThomas Gleixner }
38611cc92ebSMichael Ellerman #ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE
38764a1b95bSThomas Gleixner EXPORT_SYMBOL_GPL(irq_to_desc);
38864a1b95bSThomas Gleixner #endif
3893795de23SThomas Gleixner 
3901f5a5b87SThomas Gleixner #ifdef CONFIG_SMP
free_masks(struct irq_desc * desc)3911f5a5b87SThomas Gleixner static void free_masks(struct irq_desc *desc)
3921f5a5b87SThomas Gleixner {
3931f5a5b87SThomas Gleixner #ifdef CONFIG_GENERIC_PENDING_IRQ
3941f5a5b87SThomas Gleixner 	free_cpumask_var(desc->pending_mask);
3951f5a5b87SThomas Gleixner #endif
3969df872faSJiang Liu 	free_cpumask_var(desc->irq_common_data.affinity);
3970d3f5425SThomas Gleixner #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
3980d3f5425SThomas Gleixner 	free_cpumask_var(desc->irq_common_data.effective_affinity);
3990d3f5425SThomas Gleixner #endif
4001f5a5b87SThomas Gleixner }
4011f5a5b87SThomas Gleixner #else
free_masks(struct irq_desc * desc)4021f5a5b87SThomas Gleixner static inline void free_masks(struct irq_desc *desc) { }
4031f5a5b87SThomas Gleixner #endif
4041f5a5b87SThomas Gleixner 
irq_lock_sparse(void)405c291ee62SThomas Gleixner void irq_lock_sparse(void)
406c291ee62SThomas Gleixner {
407c291ee62SThomas Gleixner 	mutex_lock(&sparse_irq_lock);
408c291ee62SThomas Gleixner }
409c291ee62SThomas Gleixner 
irq_unlock_sparse(void)410c291ee62SThomas Gleixner void irq_unlock_sparse(void)
411c291ee62SThomas Gleixner {
412c291ee62SThomas Gleixner 	mutex_unlock(&sparse_irq_lock);
413c291ee62SThomas Gleixner }
414c291ee62SThomas Gleixner 
alloc_desc(int irq,int node,unsigned int flags,const struct cpumask * affinity,struct module * owner)41545ddcecbSThomas Gleixner static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
41645ddcecbSThomas Gleixner 				   const struct cpumask *affinity,
41745ddcecbSThomas Gleixner 				   struct module *owner)
4181f5a5b87SThomas Gleixner {
4191f5a5b87SThomas Gleixner 	struct irq_desc *desc;
4201f5a5b87SThomas Gleixner 
4214ab764c3SThomas Gleixner 	desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
4221f5a5b87SThomas Gleixner 	if (!desc)
4231f5a5b87SThomas Gleixner 		return NULL;
4241f5a5b87SThomas Gleixner 	/* allocate based on nr_cpu_ids */
4256c9ae009SEric Dumazet 	desc->kstat_irqs = alloc_percpu(unsigned int);
4261f5a5b87SThomas Gleixner 	if (!desc->kstat_irqs)
4271f5a5b87SThomas Gleixner 		goto err_desc;
4281f5a5b87SThomas Gleixner 
4294ab764c3SThomas Gleixner 	if (alloc_masks(desc, node))
4301f5a5b87SThomas Gleixner 		goto err_kstat;
4311f5a5b87SThomas Gleixner 
4321f5a5b87SThomas Gleixner 	raw_spin_lock_init(&desc->lock);
4331f5a5b87SThomas Gleixner 	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
4349114014cSThomas Gleixner 	mutex_init(&desc->request_mutex);
435425a5072SThomas Gleixner 	init_rcu_head(&desc->rcu);
4368707898eSThomas Pfaff 	init_waitqueue_head(&desc->wait_for_threads);
4371f5a5b87SThomas Gleixner 
43845ddcecbSThomas Gleixner 	desc_set_defaults(irq, desc, node, affinity, owner);
43945ddcecbSThomas Gleixner 	irqd_set(&desc->irq_data, flags);
440ecb3f394SCraig Gallek 	kobject_init(&desc->kobj, &irq_kobj_type);
441bc06a9e0SShanker Donthineni 	irq_resend_init(desc);
4421f5a5b87SThomas Gleixner 
4431f5a5b87SThomas Gleixner 	return desc;
4441f5a5b87SThomas Gleixner 
4451f5a5b87SThomas Gleixner err_kstat:
4466c9ae009SEric Dumazet 	free_percpu(desc->kstat_irqs);
4471f5a5b87SThomas Gleixner err_desc:
4481f5a5b87SThomas Gleixner 	kfree(desc);
4491f5a5b87SThomas Gleixner 	return NULL;
4501f5a5b87SThomas Gleixner }
4511f5a5b87SThomas Gleixner 
irq_kobj_release(struct kobject * kobj)452ecb3f394SCraig Gallek static void irq_kobj_release(struct kobject *kobj)
453425a5072SThomas Gleixner {
454ecb3f394SCraig Gallek 	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
455425a5072SThomas Gleixner 
456425a5072SThomas Gleixner 	free_masks(desc);
457425a5072SThomas Gleixner 	free_percpu(desc->kstat_irqs);
458425a5072SThomas Gleixner 	kfree(desc);
459425a5072SThomas Gleixner }
460425a5072SThomas Gleixner 
delayed_free_desc(struct rcu_head * rhp)461ecb3f394SCraig Gallek static void delayed_free_desc(struct rcu_head *rhp)
462ecb3f394SCraig Gallek {
463ecb3f394SCraig Gallek 	struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
464ecb3f394SCraig Gallek 
465ecb3f394SCraig Gallek 	kobject_put(&desc->kobj);
466ecb3f394SCraig Gallek }
467ecb3f394SCraig Gallek 
free_desc(unsigned int irq)4681f5a5b87SThomas Gleixner static void free_desc(unsigned int irq)
4691f5a5b87SThomas Gleixner {
4701f5a5b87SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
4711f5a5b87SThomas Gleixner 
472087cdfb6SThomas Gleixner 	irq_remove_debugfs_entry(desc);
47313bfe99eSThomas Gleixner 	unregister_irq_proc(irq, desc);
47413bfe99eSThomas Gleixner 
475c291ee62SThomas Gleixner 	/*
476c291ee62SThomas Gleixner 	 * sparse_irq_lock protects also show_interrupts() and
477c291ee62SThomas Gleixner 	 * kstat_irq_usr(). Once we deleted the descriptor from the
478c291ee62SThomas Gleixner 	 * sparse tree we can free it. Access in proc will fail to
479c291ee62SThomas Gleixner 	 * lookup the descriptor.
480ecb3f394SCraig Gallek 	 *
481ecb3f394SCraig Gallek 	 * The sysfs entry must be serialized against a concurrent
482ecb3f394SCraig Gallek 	 * irq_sysfs_init() as well.
483c291ee62SThomas Gleixner 	 */
484d0ff14fdSMichael Kelley 	irq_sysfs_del(desc);
4851f5a5b87SThomas Gleixner 	delete_irq_desc(irq);
4861f5a5b87SThomas Gleixner 
487425a5072SThomas Gleixner 	/*
488425a5072SThomas Gleixner 	 * We free the descriptor, masks and stat fields via RCU. That
489425a5072SThomas Gleixner 	 * allows demultiplex interrupts to do rcu based management of
490425a5072SThomas Gleixner 	 * the child interrupts.
4914a5f4d2fSEric Dumazet 	 * This also allows us to use rcu in kstat_irqs_usr().
492425a5072SThomas Gleixner 	 */
493425a5072SThomas Gleixner 	call_rcu(&desc->rcu, delayed_free_desc);
4941f5a5b87SThomas Gleixner }
4951f5a5b87SThomas Gleixner 
alloc_descs(unsigned int start,unsigned int cnt,int node,const struct irq_affinity_desc * affinity,struct module * owner)496b6873807SSebastian Andrzej Siewior static int alloc_descs(unsigned int start, unsigned int cnt, int node,
497bec04037SDou Liyang 		       const struct irq_affinity_desc *affinity,
498bec04037SDou Liyang 		       struct module *owner)
4991f5a5b87SThomas Gleixner {
5001f5a5b87SThomas Gleixner 	struct irq_desc *desc;
501e75eafb9SThomas Gleixner 	int i;
50245ddcecbSThomas Gleixner 
503e75eafb9SThomas Gleixner 	/* Validate affinity mask(s) */
504e75eafb9SThomas Gleixner 	if (affinity) {
50512fee4cdSHuacai Chen 		for (i = 0; i < cnt; i++) {
506bec04037SDou Liyang 			if (cpumask_empty(&affinity[i].mask))
50745ddcecbSThomas Gleixner 				return -EINVAL;
508e75eafb9SThomas Gleixner 		}
509e75eafb9SThomas Gleixner 	}
51045ddcecbSThomas Gleixner 
5111f5a5b87SThomas Gleixner 	for (i = 0; i < cnt; i++) {
512bec04037SDou Liyang 		const struct cpumask *mask = NULL;
513c410abbbSDou Liyang 		unsigned int flags = 0;
514bec04037SDou Liyang 
51545ddcecbSThomas Gleixner 		if (affinity) {
516c410abbbSDou Liyang 			if (affinity->is_managed) {
517c410abbbSDou Liyang 				flags = IRQD_AFFINITY_MANAGED |
518c410abbbSDou Liyang 					IRQD_MANAGED_SHUTDOWN;
519c410abbbSDou Liyang 			}
520*0688cacdSShay Drory 			flags |= IRQD_AFFINITY_SET;
521bec04037SDou Liyang 			mask = &affinity->mask;
522c410abbbSDou Liyang 			node = cpu_to_node(cpumask_first(mask));
523e75eafb9SThomas Gleixner 			affinity++;
52445ddcecbSThomas Gleixner 		}
525c410abbbSDou Liyang 
52645ddcecbSThomas Gleixner 		desc = alloc_desc(start + i, node, flags, mask, owner);
5271f5a5b87SThomas Gleixner 		if (!desc)
5281f5a5b87SThomas Gleixner 			goto err;
5291f5a5b87SThomas Gleixner 		irq_insert_desc(start + i, desc);
530ecb3f394SCraig Gallek 		irq_sysfs_add(start + i, desc);
531e0b47794SThomas Gleixner 		irq_add_debugfs_entry(start + i, desc);
5321f5a5b87SThomas Gleixner 	}
5331f5a5b87SThomas Gleixner 	return start;
5341f5a5b87SThomas Gleixner 
5351f5a5b87SThomas Gleixner err:
5361f5a5b87SThomas Gleixner 	for (i--; i >= 0; i--)
5371f5a5b87SThomas Gleixner 		free_desc(start + i);
5381f5a5b87SThomas Gleixner 	return -ENOMEM;
5391f5a5b87SThomas Gleixner }
5401f5a5b87SThomas Gleixner 
irq_expand_nr_irqs(unsigned int nr)541ed4dea6eSYinghai Lu static int irq_expand_nr_irqs(unsigned int nr)
542e7bcecb7SThomas Gleixner {
5435e630aa8SShanker Donthineni 	if (nr > MAX_SPARSE_IRQS)
544e7bcecb7SThomas Gleixner 		return -ENOMEM;
545ed4dea6eSYinghai Lu 	nr_irqs = nr;
546e7bcecb7SThomas Gleixner 	return 0;
547e7bcecb7SThomas Gleixner }
548e7bcecb7SThomas Gleixner 
early_irq_init(void)5493795de23SThomas Gleixner int __init early_irq_init(void)
5503795de23SThomas Gleixner {
551b683de2bSThomas Gleixner 	int i, initcnt, node = first_online_node;
5523795de23SThomas Gleixner 	struct irq_desc *desc;
5533795de23SThomas Gleixner 
5543795de23SThomas Gleixner 	init_irq_default_affinity();
5553795de23SThomas Gleixner 
556b683de2bSThomas Gleixner 	/* Let arch update nr_irqs and return the nr of preallocated irqs */
557b683de2bSThomas Gleixner 	initcnt = arch_probe_nr_irqs();
5585a29ef22SVincent Legoll 	printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n",
5595a29ef22SVincent Legoll 	       NR_IRQS, nr_irqs, initcnt);
5603795de23SThomas Gleixner 
5615e630aa8SShanker Donthineni 	if (WARN_ON(nr_irqs > MAX_SPARSE_IRQS))
5625e630aa8SShanker Donthineni 		nr_irqs = MAX_SPARSE_IRQS;
563c1ee6264SThomas Gleixner 
5645e630aa8SShanker Donthineni 	if (WARN_ON(initcnt > MAX_SPARSE_IRQS))
5655e630aa8SShanker Donthineni 		initcnt = MAX_SPARSE_IRQS;
566c1ee6264SThomas Gleixner 
567c1ee6264SThomas Gleixner 	if (initcnt > nr_irqs)
568c1ee6264SThomas Gleixner 		nr_irqs = initcnt;
569c1ee6264SThomas Gleixner 
570b683de2bSThomas Gleixner 	for (i = 0; i < initcnt; i++) {
57145ddcecbSThomas Gleixner 		desc = alloc_desc(i, node, 0, NULL, NULL);
572aa99ec0fSThomas Gleixner 		irq_insert_desc(i, desc);
5733795de23SThomas Gleixner 	}
5743795de23SThomas Gleixner 	return arch_early_irq_init();
5753795de23SThomas Gleixner }
5763795de23SThomas Gleixner 
5773795de23SThomas Gleixner #else /* !CONFIG_SPARSE_IRQ */
5783795de23SThomas Gleixner 
5793795de23SThomas Gleixner struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
5803795de23SThomas Gleixner 	[0 ... NR_IRQS-1] = {
5813795de23SThomas Gleixner 		.handle_irq	= handle_bad_irq,
5823795de23SThomas Gleixner 		.depth		= 1,
5833795de23SThomas Gleixner 		.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
5843795de23SThomas Gleixner 	}
5853795de23SThomas Gleixner };
5863795de23SThomas Gleixner 
early_irq_init(void)5873795de23SThomas Gleixner int __init early_irq_init(void)
5883795de23SThomas Gleixner {
589aa99ec0fSThomas Gleixner 	int count, i, node = first_online_node;
5903795de23SThomas Gleixner 	struct irq_desc *desc;
5913795de23SThomas Gleixner 
5923795de23SThomas Gleixner 	init_irq_default_affinity();
5933795de23SThomas Gleixner 
5943795de23SThomas Gleixner 	printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
5953795de23SThomas Gleixner 
5963795de23SThomas Gleixner 	desc = irq_desc;
5973795de23SThomas Gleixner 	count = ARRAY_SIZE(irq_desc);
5983795de23SThomas Gleixner 
5993795de23SThomas Gleixner 	for (i = 0; i < count; i++) {
6006c9ae009SEric Dumazet 		desc[i].kstat_irqs = alloc_percpu(unsigned int);
6014ab764c3SThomas Gleixner 		alloc_masks(&desc[i], node);
602e7fbad30SLinus Walleij 		raw_spin_lock_init(&desc[i].lock);
603154cd387SThomas Gleixner 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
604e8458e7aSKefeng Wang 		mutex_init(&desc[i].request_mutex);
6058707898eSThomas Pfaff 		init_waitqueue_head(&desc[i].wait_for_threads);
60645ddcecbSThomas Gleixner 		desc_set_defaults(i, &desc[i], node, NULL, NULL);
6075966ed9cSDawei Li 		irq_resend_init(&desc[i]);
6083795de23SThomas Gleixner 	}
6093795de23SThomas Gleixner 	return arch_early_irq_init();
6103795de23SThomas Gleixner }
6113795de23SThomas Gleixner 
irq_to_desc(unsigned int irq)6123795de23SThomas Gleixner struct irq_desc *irq_to_desc(unsigned int irq)
6133795de23SThomas Gleixner {
6143795de23SThomas Gleixner 	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
6153795de23SThomas Gleixner }
6162c45aadaSPaul Gortmaker EXPORT_SYMBOL(irq_to_desc);
6173795de23SThomas Gleixner 
free_desc(unsigned int irq)6181f5a5b87SThomas Gleixner static void free_desc(unsigned int irq)
6191f5a5b87SThomas Gleixner {
620d8179bc0SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
621d8179bc0SThomas Gleixner 	unsigned long flags;
622d8179bc0SThomas Gleixner 
623d8179bc0SThomas Gleixner 	raw_spin_lock_irqsave(&desc->lock, flags);
62445ddcecbSThomas Gleixner 	desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
625d8179bc0SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
626721255b9SShanker Donthineni 	delete_irq_desc(irq);
6271f5a5b87SThomas Gleixner }
6281f5a5b87SThomas Gleixner 
alloc_descs(unsigned int start,unsigned int cnt,int node,const struct irq_affinity_desc * affinity,struct module * owner)629b6873807SSebastian Andrzej Siewior static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
630bec04037SDou Liyang 			      const struct irq_affinity_desc *affinity,
631b6873807SSebastian Andrzej Siewior 			      struct module *owner)
6321f5a5b87SThomas Gleixner {
633b6873807SSebastian Andrzej Siewior 	u32 i;
634b6873807SSebastian Andrzej Siewior 
635b6873807SSebastian Andrzej Siewior 	for (i = 0; i < cnt; i++) {
636b6873807SSebastian Andrzej Siewior 		struct irq_desc *desc = irq_to_desc(start + i);
637b6873807SSebastian Andrzej Siewior 
638b6873807SSebastian Andrzej Siewior 		desc->owner = owner;
639721255b9SShanker Donthineni 		irq_insert_desc(start + i, desc);
640b6873807SSebastian Andrzej Siewior 	}
6411f5a5b87SThomas Gleixner 	return start;
6421f5a5b87SThomas Gleixner }
643e7bcecb7SThomas Gleixner 
irq_expand_nr_irqs(unsigned int nr)644ed4dea6eSYinghai Lu static int irq_expand_nr_irqs(unsigned int nr)
645e7bcecb7SThomas Gleixner {
646e7bcecb7SThomas Gleixner 	return -ENOMEM;
647e7bcecb7SThomas Gleixner }
648e7bcecb7SThomas Gleixner 
irq_mark_irq(unsigned int irq)649f63b6a05SThomas Gleixner void irq_mark_irq(unsigned int irq)
650f63b6a05SThomas Gleixner {
651f63b6a05SThomas Gleixner 	mutex_lock(&sparse_irq_lock);
652721255b9SShanker Donthineni 	irq_insert_desc(irq, irq_desc + irq);
653f63b6a05SThomas Gleixner 	mutex_unlock(&sparse_irq_lock);
654f63b6a05SThomas Gleixner }
655f63b6a05SThomas Gleixner 
656c940e01cSThomas Gleixner #ifdef CONFIG_GENERIC_IRQ_LEGACY
irq_init_desc(unsigned int irq)657c940e01cSThomas Gleixner void irq_init_desc(unsigned int irq)
658c940e01cSThomas Gleixner {
659d8179bc0SThomas Gleixner 	free_desc(irq);
660c940e01cSThomas Gleixner }
661c940e01cSThomas Gleixner #endif
662c940e01cSThomas Gleixner 
6633795de23SThomas Gleixner #endif /* !CONFIG_SPARSE_IRQ */
6643795de23SThomas Gleixner 
handle_irq_desc(struct irq_desc * desc)665a3016b26SMarc Zyngier int handle_irq_desc(struct irq_desc *desc)
666fe12bc2cSThomas Gleixner {
667c16816acSThomas Gleixner 	struct irq_data *data;
668fe12bc2cSThomas Gleixner 
669fe12bc2cSThomas Gleixner 	if (!desc)
670fe12bc2cSThomas Gleixner 		return -EINVAL;
671c16816acSThomas Gleixner 
672c16816acSThomas Gleixner 	data = irq_desc_get_irq_data(desc);
673fe13889cSChangbin Du 	if (WARN_ON_ONCE(!in_hardirq() && handle_enforce_irqctx(data)))
674c16816acSThomas Gleixner 		return -EPERM;
675c16816acSThomas Gleixner 
676bd0b9ac4SThomas Gleixner 	generic_handle_irq_desc(desc);
677fe12bc2cSThomas Gleixner 	return 0;
678fe12bc2cSThomas Gleixner }
679a3016b26SMarc Zyngier 
680a3016b26SMarc Zyngier /**
681a3016b26SMarc Zyngier  * generic_handle_irq - Invoke the handler for a particular irq
682a3016b26SMarc Zyngier  * @irq:	The irq number to handle
683a3016b26SMarc Zyngier  *
6840953fb26SMark Rutland  * Returns:	0 on success, or -EINVAL if conversion has failed
6850953fb26SMark Rutland  *
6860953fb26SMark Rutland  * 		This function must be called from an IRQ context with irq regs
6870953fb26SMark Rutland  * 		initialized.
688a3016b26SMarc Zyngier   */
generic_handle_irq(unsigned int irq)689a3016b26SMarc Zyngier int generic_handle_irq(unsigned int irq)
690a3016b26SMarc Zyngier {
691a3016b26SMarc Zyngier 	return handle_irq_desc(irq_to_desc(irq));
692a3016b26SMarc Zyngier }
693edf76f83SJonathan Cameron EXPORT_SYMBOL_GPL(generic_handle_irq);
694fe12bc2cSThomas Gleixner 
695509853f9SSebastian Andrzej Siewior /**
696509853f9SSebastian Andrzej Siewior  * generic_handle_irq_safe - Invoke the handler for a particular irq from any
697509853f9SSebastian Andrzej Siewior  *			     context.
698509853f9SSebastian Andrzej Siewior  * @irq:	The irq number to handle
699509853f9SSebastian Andrzej Siewior  *
700509853f9SSebastian Andrzej Siewior  * Returns:	0 on success, a negative value on error.
701509853f9SSebastian Andrzej Siewior  *
702509853f9SSebastian Andrzej Siewior  * This function can be called from any context (IRQ or process context). It
703509853f9SSebastian Andrzej Siewior  * will report an error if not invoked from IRQ context and the irq has been
704509853f9SSebastian Andrzej Siewior  * marked to enforce IRQ-context only.
705509853f9SSebastian Andrzej Siewior  */
generic_handle_irq_safe(unsigned int irq)706509853f9SSebastian Andrzej Siewior int generic_handle_irq_safe(unsigned int irq)
707509853f9SSebastian Andrzej Siewior {
708509853f9SSebastian Andrzej Siewior 	unsigned long flags;
709509853f9SSebastian Andrzej Siewior 	int ret;
710509853f9SSebastian Andrzej Siewior 
711509853f9SSebastian Andrzej Siewior 	local_irq_save(flags);
712509853f9SSebastian Andrzej Siewior 	ret = handle_irq_desc(irq_to_desc(irq));
713509853f9SSebastian Andrzej Siewior 	local_irq_restore(flags);
714509853f9SSebastian Andrzej Siewior 	return ret;
715509853f9SSebastian Andrzej Siewior }
716509853f9SSebastian Andrzej Siewior EXPORT_SYMBOL_GPL(generic_handle_irq_safe);
717509853f9SSebastian Andrzej Siewior 
718e1c05491SMarc Zyngier #ifdef CONFIG_IRQ_DOMAIN
71976ba59f8SMarc Zyngier /**
7208240ef50SMarc Zyngier  * generic_handle_domain_irq - Invoke the handler for a HW irq belonging
7210953fb26SMark Rutland  *                             to a domain.
7228240ef50SMarc Zyngier  * @domain:	The domain where to perform the lookup
7238240ef50SMarc Zyngier  * @hwirq:	The HW irq number to convert to a logical one
7248240ef50SMarc Zyngier  *
7258240ef50SMarc Zyngier  * Returns:	0 on success, or -EINVAL if conversion has failed
7268240ef50SMarc Zyngier  *
7270953fb26SMark Rutland  * 		This function must be called from an IRQ context with irq regs
7280953fb26SMark Rutland  * 		initialized.
7298240ef50SMarc Zyngier  */
generic_handle_domain_irq(struct irq_domain * domain,unsigned int hwirq)7308240ef50SMarc Zyngier int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq)
7318240ef50SMarc Zyngier {
7328240ef50SMarc Zyngier 	return handle_irq_desc(irq_resolve_mapping(domain, hwirq));
7338240ef50SMarc Zyngier }
7348240ef50SMarc Zyngier EXPORT_SYMBOL_GPL(generic_handle_domain_irq);
7358240ef50SMarc Zyngier 
7366a164c64SSebastian Andrzej Siewior  /**
7376a164c64SSebastian Andrzej Siewior  * generic_handle_irq_safe - Invoke the handler for a HW irq belonging
7386a164c64SSebastian Andrzej Siewior  *			     to a domain from any context.
7396a164c64SSebastian Andrzej Siewior  * @domain:	The domain where to perform the lookup
7406a164c64SSebastian Andrzej Siewior  * @hwirq:	The HW irq number to convert to a logical one
7416a164c64SSebastian Andrzej Siewior  *
7426a164c64SSebastian Andrzej Siewior  * Returns:	0 on success, a negative value on error.
7436a164c64SSebastian Andrzej Siewior  *
7446a164c64SSebastian Andrzej Siewior  * This function can be called from any context (IRQ or process
7456a164c64SSebastian Andrzej Siewior  * context). If the interrupt is marked as 'enforce IRQ-context only' then
7466a164c64SSebastian Andrzej Siewior  * the function must be invoked from hard interrupt context.
7476a164c64SSebastian Andrzej Siewior  */
generic_handle_domain_irq_safe(struct irq_domain * domain,unsigned int hwirq)7486a164c64SSebastian Andrzej Siewior int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq)
7496a164c64SSebastian Andrzej Siewior {
7506a164c64SSebastian Andrzej Siewior 	unsigned long flags;
7516a164c64SSebastian Andrzej Siewior 	int ret;
7526a164c64SSebastian Andrzej Siewior 
7536a164c64SSebastian Andrzej Siewior 	local_irq_save(flags);
7546a164c64SSebastian Andrzej Siewior 	ret = handle_irq_desc(irq_resolve_mapping(domain, hwirq));
7556a164c64SSebastian Andrzej Siewior 	local_irq_restore(flags);
7566a164c64SSebastian Andrzej Siewior 	return ret;
7576a164c64SSebastian Andrzej Siewior }
7586a164c64SSebastian Andrzej Siewior EXPORT_SYMBOL_GPL(generic_handle_domain_irq_safe);
7596a164c64SSebastian Andrzej Siewior 
7602fe35f8eSMark Rutland /**
7610953fb26SMark Rutland  * generic_handle_domain_nmi - Invoke the handler for a HW nmi belonging
7620953fb26SMark Rutland  *                             to a domain.
7632fe35f8eSMark Rutland  * @domain:	The domain where to perform the lookup
7642fe35f8eSMark Rutland  * @hwirq:	The HW irq number to convert to a logical one
7652fe35f8eSMark Rutland  *
7662fe35f8eSMark Rutland  * Returns:	0 on success, or -EINVAL if conversion has failed
7676e4933a0SJulien Thierry  *
7680953fb26SMark Rutland  * 		This function must be called from an NMI context with irq regs
7690953fb26SMark Rutland  * 		initialized.
7700953fb26SMark Rutland  **/
generic_handle_domain_nmi(struct irq_domain * domain,unsigned int hwirq)7710953fb26SMark Rutland int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq)
7726e4933a0SJulien Thierry {
7730953fb26SMark Rutland 	WARN_ON_ONCE(!in_nmi());
7740953fb26SMark Rutland 	return handle_irq_desc(irq_resolve_mapping(domain, hwirq));
7756e4933a0SJulien Thierry }
7766e4933a0SJulien Thierry #endif
77776ba59f8SMarc Zyngier 
7781f5a5b87SThomas Gleixner /* Dynamic interrupt handling */
7791f5a5b87SThomas Gleixner 
7801f5a5b87SThomas Gleixner /**
7811f5a5b87SThomas Gleixner  * irq_free_descs - free irq descriptors
7821f5a5b87SThomas Gleixner  * @from:	Start of descriptor range
7831f5a5b87SThomas Gleixner  * @cnt:	Number of consecutive irqs to free
7841f5a5b87SThomas Gleixner  */
irq_free_descs(unsigned int from,unsigned int cnt)7851f5a5b87SThomas Gleixner void irq_free_descs(unsigned int from, unsigned int cnt)
7861f5a5b87SThomas Gleixner {
7871f5a5b87SThomas Gleixner 	int i;
7881f5a5b87SThomas Gleixner 
7891f5a5b87SThomas Gleixner 	if (from >= nr_irqs || (from + cnt) > nr_irqs)
7901f5a5b87SThomas Gleixner 		return;
7911f5a5b87SThomas Gleixner 
79212ac1d0fSThomas Gleixner 	mutex_lock(&sparse_irq_lock);
7931f5a5b87SThomas Gleixner 	for (i = 0; i < cnt; i++)
7941f5a5b87SThomas Gleixner 		free_desc(from + i);
7951f5a5b87SThomas Gleixner 
796a05a900aSThomas Gleixner 	mutex_unlock(&sparse_irq_lock);
7971f5a5b87SThomas Gleixner }
798edf76f83SJonathan Cameron EXPORT_SYMBOL_GPL(irq_free_descs);
7991f5a5b87SThomas Gleixner 
8001f5a5b87SThomas Gleixner /**
80120a15ee0Sluanshi  * __irq_alloc_descs - allocate and initialize a range of irq descriptors
8021f5a5b87SThomas Gleixner  * @irq:	Allocate for specific irq number if irq >= 0
8031f5a5b87SThomas Gleixner  * @from:	Start the search from this irq number
8041f5a5b87SThomas Gleixner  * @cnt:	Number of consecutive irqs to allocate.
8051f5a5b87SThomas Gleixner  * @node:	Preferred node on which the irq descriptor should be allocated
806d522a0d1SRandy Dunlap  * @owner:	Owning module (can be NULL)
807e75eafb9SThomas Gleixner  * @affinity:	Optional pointer to an affinity mask array of size @cnt which
808e75eafb9SThomas Gleixner  *		hints where the irq descriptors should be allocated and which
809e75eafb9SThomas Gleixner  *		default affinities to use
8101f5a5b87SThomas Gleixner  *
8111f5a5b87SThomas Gleixner  * Returns the first irq number or error code
8121f5a5b87SThomas Gleixner  */
8131f5a5b87SThomas Gleixner int __ref
__irq_alloc_descs(int irq,unsigned int from,unsigned int cnt,int node,struct module * owner,const struct irq_affinity_desc * affinity)814b6873807SSebastian Andrzej Siewior __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
815bec04037SDou Liyang 		  struct module *owner, const struct irq_affinity_desc *affinity)
8161f5a5b87SThomas Gleixner {
8171f5a5b87SThomas Gleixner 	int start, ret;
8181f5a5b87SThomas Gleixner 
8191f5a5b87SThomas Gleixner 	if (!cnt)
8201f5a5b87SThomas Gleixner 		return -EINVAL;
8211f5a5b87SThomas Gleixner 
822c5182b88SMark Brown 	if (irq >= 0) {
823c5182b88SMark Brown 		if (from > irq)
824c5182b88SMark Brown 			return -EINVAL;
825c5182b88SMark Brown 		from = irq;
82662a08ae2SThomas Gleixner 	} else {
82762a08ae2SThomas Gleixner 		/*
82862a08ae2SThomas Gleixner 		 * For interrupts which are freely allocated the
82962a08ae2SThomas Gleixner 		 * architecture can force a lower bound to the @from
83062a08ae2SThomas Gleixner 		 * argument. x86 uses this to exclude the GSI space.
83162a08ae2SThomas Gleixner 		 */
83262a08ae2SThomas Gleixner 		from = arch_dynirq_lower_bound(from);
833c5182b88SMark Brown 	}
834c5182b88SMark Brown 
835a05a900aSThomas Gleixner 	mutex_lock(&sparse_irq_lock);
8361f5a5b87SThomas Gleixner 
8375e630aa8SShanker Donthineni 	start = irq_find_free_area(from, cnt);
8381f5a5b87SThomas Gleixner 	ret = -EEXIST;
8391f5a5b87SThomas Gleixner 	if (irq >=0 && start != irq)
84012ac1d0fSThomas Gleixner 		goto unlock;
8411f5a5b87SThomas Gleixner 
842ed4dea6eSYinghai Lu 	if (start + cnt > nr_irqs) {
843ed4dea6eSYinghai Lu 		ret = irq_expand_nr_irqs(start + cnt);
844e7bcecb7SThomas Gleixner 		if (ret)
84512ac1d0fSThomas Gleixner 			goto unlock;
846e7bcecb7SThomas Gleixner 	}
84712ac1d0fSThomas Gleixner 	ret = alloc_descs(start, cnt, node, affinity, owner);
84812ac1d0fSThomas Gleixner unlock:
849a05a900aSThomas Gleixner 	mutex_unlock(&sparse_irq_lock);
8501f5a5b87SThomas Gleixner 	return ret;
8511f5a5b87SThomas Gleixner }
852b6873807SSebastian Andrzej Siewior EXPORT_SYMBOL_GPL(__irq_alloc_descs);
8531f5a5b87SThomas Gleixner 
854a98d24b7SThomas Gleixner /**
855a98d24b7SThomas Gleixner  * irq_get_next_irq - get next allocated irq number
856a98d24b7SThomas Gleixner  * @offset:	where to start the search
857a98d24b7SThomas Gleixner  *
858721255b9SShanker Donthineni  * Returns next irq number after offset or nr_irqs if none is found.
859a98d24b7SThomas Gleixner  */
irq_get_next_irq(unsigned int offset)860a98d24b7SThomas Gleixner unsigned int irq_get_next_irq(unsigned int offset)
861a98d24b7SThomas Gleixner {
8625e630aa8SShanker Donthineni 	return irq_find_at_or_after(offset);
863a98d24b7SThomas Gleixner }
864a98d24b7SThomas Gleixner 
865d5eb4ad2SThomas Gleixner struct irq_desc *
__irq_get_desc_lock(unsigned int irq,unsigned long * flags,bool bus,unsigned int check)86631d9d9b6SMarc Zyngier __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
86731d9d9b6SMarc Zyngier 		    unsigned int check)
868d5eb4ad2SThomas Gleixner {
869d5eb4ad2SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
870d5eb4ad2SThomas Gleixner 
871d5eb4ad2SThomas Gleixner 	if (desc) {
87231d9d9b6SMarc Zyngier 		if (check & _IRQ_DESC_CHECK) {
87331d9d9b6SMarc Zyngier 			if ((check & _IRQ_DESC_PERCPU) &&
87431d9d9b6SMarc Zyngier 			    !irq_settings_is_per_cpu_devid(desc))
87531d9d9b6SMarc Zyngier 				return NULL;
87631d9d9b6SMarc Zyngier 
87731d9d9b6SMarc Zyngier 			if (!(check & _IRQ_DESC_PERCPU) &&
87831d9d9b6SMarc Zyngier 			    irq_settings_is_per_cpu_devid(desc))
87931d9d9b6SMarc Zyngier 				return NULL;
88031d9d9b6SMarc Zyngier 		}
88131d9d9b6SMarc Zyngier 
882d5eb4ad2SThomas Gleixner 		if (bus)
883d5eb4ad2SThomas Gleixner 			chip_bus_lock(desc);
884d5eb4ad2SThomas Gleixner 		raw_spin_lock_irqsave(&desc->lock, *flags);
885d5eb4ad2SThomas Gleixner 	}
886d5eb4ad2SThomas Gleixner 	return desc;
887d5eb4ad2SThomas Gleixner }
888d5eb4ad2SThomas Gleixner 
__irq_put_desc_unlock(struct irq_desc * desc,unsigned long flags,bool bus)889d5eb4ad2SThomas Gleixner void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
8908b3b5479SJules Irenge 	__releases(&desc->lock)
891d5eb4ad2SThomas Gleixner {
892d5eb4ad2SThomas Gleixner 	raw_spin_unlock_irqrestore(&desc->lock, flags);
893d5eb4ad2SThomas Gleixner 	if (bus)
894d5eb4ad2SThomas Gleixner 		chip_bus_sync_unlock(desc);
895d5eb4ad2SThomas Gleixner }
896d5eb4ad2SThomas Gleixner 
irq_set_percpu_devid_partition(unsigned int irq,const struct cpumask * affinity)897222df54fSMarc Zyngier int irq_set_percpu_devid_partition(unsigned int irq,
898222df54fSMarc Zyngier 				   const struct cpumask *affinity)
89931d9d9b6SMarc Zyngier {
90031d9d9b6SMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
90131d9d9b6SMarc Zyngier 
90231d9d9b6SMarc Zyngier 	if (!desc)
90331d9d9b6SMarc Zyngier 		return -EINVAL;
90431d9d9b6SMarc Zyngier 
90531d9d9b6SMarc Zyngier 	if (desc->percpu_enabled)
90631d9d9b6SMarc Zyngier 		return -EINVAL;
90731d9d9b6SMarc Zyngier 
90831d9d9b6SMarc Zyngier 	desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
90931d9d9b6SMarc Zyngier 
91031d9d9b6SMarc Zyngier 	if (!desc->percpu_enabled)
91131d9d9b6SMarc Zyngier 		return -ENOMEM;
91231d9d9b6SMarc Zyngier 
913222df54fSMarc Zyngier 	if (affinity)
914222df54fSMarc Zyngier 		desc->percpu_affinity = affinity;
915222df54fSMarc Zyngier 	else
916222df54fSMarc Zyngier 		desc->percpu_affinity = cpu_possible_mask;
917222df54fSMarc Zyngier 
91831d9d9b6SMarc Zyngier 	irq_set_percpu_devid_flags(irq);
91931d9d9b6SMarc Zyngier 	return 0;
92031d9d9b6SMarc Zyngier }
92131d9d9b6SMarc Zyngier 
irq_set_percpu_devid(unsigned int irq)922222df54fSMarc Zyngier int irq_set_percpu_devid(unsigned int irq)
923222df54fSMarc Zyngier {
924222df54fSMarc Zyngier 	return irq_set_percpu_devid_partition(irq, NULL);
925222df54fSMarc Zyngier }
926222df54fSMarc Zyngier 
irq_get_percpu_devid_partition(unsigned int irq,struct cpumask * affinity)927222df54fSMarc Zyngier int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
928222df54fSMarc Zyngier {
929222df54fSMarc Zyngier 	struct irq_desc *desc = irq_to_desc(irq);
930222df54fSMarc Zyngier 
931222df54fSMarc Zyngier 	if (!desc || !desc->percpu_enabled)
932222df54fSMarc Zyngier 		return -EINVAL;
933222df54fSMarc Zyngier 
934222df54fSMarc Zyngier 	if (affinity)
935222df54fSMarc Zyngier 		cpumask_copy(affinity, desc->percpu_affinity);
936222df54fSMarc Zyngier 
937222df54fSMarc Zyngier 	return 0;
938222df54fSMarc Zyngier }
9395ffeb050SWill Deacon EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition);
940222df54fSMarc Zyngier 
kstat_incr_irq_this_cpu(unsigned int irq)941792d0018SThomas Gleixner void kstat_incr_irq_this_cpu(unsigned int irq)
942792d0018SThomas Gleixner {
943b51bf95cSJiang Liu 	kstat_incr_irqs_this_cpu(irq_to_desc(irq));
944792d0018SThomas Gleixner }
945792d0018SThomas Gleixner 
946c291ee62SThomas Gleixner /**
947c291ee62SThomas Gleixner  * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
948c291ee62SThomas Gleixner  * @irq:	The interrupt number
949c291ee62SThomas Gleixner  * @cpu:	The cpu number
950c291ee62SThomas Gleixner  *
951c291ee62SThomas Gleixner  * Returns the sum of interrupt counts on @cpu since boot for
952c291ee62SThomas Gleixner  * @irq. The caller must ensure that the interrupt is not removed
953c291ee62SThomas Gleixner  * concurrently.
954c291ee62SThomas Gleixner  */
kstat_irqs_cpu(unsigned int irq,int cpu)9553795de23SThomas Gleixner unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
9563795de23SThomas Gleixner {
9573795de23SThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
9586c9ae009SEric Dumazet 
9596c9ae009SEric Dumazet 	return desc && desc->kstat_irqs ?
9606c9ae009SEric Dumazet 			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
9613795de23SThomas Gleixner }
962478735e3SKAMEZAWA Hiroyuki 
irq_is_nmi(struct irq_desc * desc)963c09cb129SShijith Thotton static bool irq_is_nmi(struct irq_desc *desc)
964c09cb129SShijith Thotton {
965c09cb129SShijith Thotton 	return desc->istate & IRQS_NMI;
966c09cb129SShijith Thotton }
967c09cb129SShijith Thotton 
kstat_irqs(unsigned int irq)96826c19d0aSThomas Gleixner static unsigned int kstat_irqs(unsigned int irq)
969478735e3SKAMEZAWA Hiroyuki {
970478735e3SKAMEZAWA Hiroyuki 	struct irq_desc *desc = irq_to_desc(irq);
9715e9662faSNicholas Mc Guire 	unsigned int sum = 0;
9721136b072SThomas Gleixner 	int cpu;
973478735e3SKAMEZAWA Hiroyuki 
9746c9ae009SEric Dumazet 	if (!desc || !desc->kstat_irqs)
975478735e3SKAMEZAWA Hiroyuki 		return 0;
9761136b072SThomas Gleixner 	if (!irq_settings_is_per_cpu_devid(desc) &&
977c09cb129SShijith Thotton 	    !irq_settings_is_per_cpu(desc) &&
978c09cb129SShijith Thotton 	    !irq_is_nmi(desc))
9799e42ad10SThomas Gleixner 		return data_race(desc->tot_count);
9801136b072SThomas Gleixner 
981478735e3SKAMEZAWA Hiroyuki 	for_each_possible_cpu(cpu)
9829e42ad10SThomas Gleixner 		sum += data_race(*per_cpu_ptr(desc->kstat_irqs, cpu));
983478735e3SKAMEZAWA Hiroyuki 	return sum;
984478735e3SKAMEZAWA Hiroyuki }
985c291ee62SThomas Gleixner 
986c291ee62SThomas Gleixner /**
98726c19d0aSThomas Gleixner  * kstat_irqs_usr - Get the statistics for an interrupt from thread context
988c291ee62SThomas Gleixner  * @irq:	The interrupt number
989c291ee62SThomas Gleixner  *
9904a5f4d2fSEric Dumazet  * Returns the sum of interrupt counts on all cpus since boot for @irq.
99126c19d0aSThomas Gleixner  *
99226c19d0aSThomas Gleixner  * It uses rcu to protect the access since a concurrent removal of an
99326c19d0aSThomas Gleixner  * interrupt descriptor is observing an rcu grace period before
99426c19d0aSThomas Gleixner  * delayed_free_desc()/irq_kobj_release().
995c291ee62SThomas Gleixner  */
kstat_irqs_usr(unsigned int irq)996c291ee62SThomas Gleixner unsigned int kstat_irqs_usr(unsigned int irq)
997c291ee62SThomas Gleixner {
9987df0b278SNicholas Mc Guire 	unsigned int sum;
999c291ee62SThomas Gleixner 
10004a5f4d2fSEric Dumazet 	rcu_read_lock();
1001c291ee62SThomas Gleixner 	sum = kstat_irqs(irq);
10024a5f4d2fSEric Dumazet 	rcu_read_unlock();
1003c291ee62SThomas Gleixner 	return sum;
1004c291ee62SThomas Gleixner }
1005f1c6306cSThomas Gleixner 
1006f1c6306cSThomas Gleixner #ifdef CONFIG_LOCKDEP
__irq_set_lockdep_class(unsigned int irq,struct lock_class_key * lock_class,struct lock_class_key * request_class)1007f1c6306cSThomas Gleixner void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
1008f1c6306cSThomas Gleixner 			     struct lock_class_key *request_class)
1009f1c6306cSThomas Gleixner {
1010f1c6306cSThomas Gleixner 	struct irq_desc *desc = irq_to_desc(irq);
1011f1c6306cSThomas Gleixner 
1012f1c6306cSThomas Gleixner 	if (desc) {
1013f1c6306cSThomas Gleixner 		lockdep_set_class(&desc->lock, lock_class);
1014f1c6306cSThomas Gleixner 		lockdep_set_class(&desc->request_mutex, request_class);
1015f1c6306cSThomas Gleixner 	}
1016f1c6306cSThomas Gleixner }
1017f1c6306cSThomas Gleixner EXPORT_SYMBOL_GPL(__irq_set_lockdep_class);
1018f1c6306cSThomas Gleixner #endif
1019