xref: /openbmc/linux/arch/arm/kernel/irq.c (revision 97da55fc)
1 /*
2  *  linux/arch/arm/kernel/irq.c
3  *
4  *  Copyright (C) 1992 Linus Torvalds
5  *  Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
6  *
7  *  Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation.
8  *  Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and
9  *  Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>.
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  *
15  *  This file contains the code used by various IRQ handling routines:
16  *  asking for different IRQ's should be done through these routines
17  *  instead of just grabbing them. Thus setups with different IRQ numbers
18  *  shouldn't result in any weird surprises, and installing new handlers
19  *  should be easier.
20  *
21  *  IRQ's are in fact implemented a bit like signal handlers for the kernel.
22  *  Naturally it's not a 1:1 relation, but there are similarities.
23  */
24 #include <linux/kernel_stat.h>
25 #include <linux/signal.h>
26 #include <linux/ioport.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/random.h>
30 #include <linux/smp.h>
31 #include <linux/init.h>
32 #include <linux/seq_file.h>
33 #include <linux/errno.h>
34 #include <linux/list.h>
35 #include <linux/kallsyms.h>
36 #include <linux/proc_fs.h>
37 #include <linux/export.h>
38 
39 #include <asm/exception.h>
40 #include <asm/mach/arch.h>
41 #include <asm/mach/irq.h>
42 #include <asm/mach/time.h>
43 
44 unsigned long irq_err_count;
45 
46 int arch_show_interrupts(struct seq_file *p, int prec)
47 {
48 #ifdef CONFIG_FIQ
49 	show_fiq_list(p, prec);
50 #endif
51 #ifdef CONFIG_SMP
52 	show_ipi_list(p, prec);
53 #endif
54 	seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
55 	return 0;
56 }
57 
58 /*
59  * handle_IRQ handles all hardware IRQ's.  Decoded IRQs should
60  * not come via this function.  Instead, they should provide their
61  * own 'handler'.  Used by platform code implementing C-based 1st
62  * level decoding.
63  */
64 void handle_IRQ(unsigned int irq, struct pt_regs *regs)
65 {
66 	struct pt_regs *old_regs = set_irq_regs(regs);
67 
68 	irq_enter();
69 
70 	/*
71 	 * Some hardware gives randomly wrong interrupts.  Rather
72 	 * than crashing, do something sensible.
73 	 */
74 	if (unlikely(irq >= nr_irqs)) {
75 		if (printk_ratelimit())
76 			printk(KERN_WARNING "Bad IRQ%u\n", irq);
77 		ack_bad_irq(irq);
78 	} else {
79 		generic_handle_irq(irq);
80 	}
81 
82 	irq_exit();
83 	set_irq_regs(old_regs);
84 }
85 
86 /*
87  * asm_do_IRQ is the interface to be used from assembly code.
88  */
89 asmlinkage void __exception_irq_entry
90 asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
91 {
92 	handle_IRQ(irq, regs);
93 }
94 
95 void set_irq_flags(unsigned int irq, unsigned int iflags)
96 {
97 	unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
98 
99 	if (irq >= nr_irqs) {
100 		printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
101 		return;
102 	}
103 
104 	if (iflags & IRQF_VALID)
105 		clr |= IRQ_NOREQUEST;
106 	if (iflags & IRQF_PROBE)
107 		clr |= IRQ_NOPROBE;
108 	if (!(iflags & IRQF_NOAUTOEN))
109 		clr |= IRQ_NOAUTOEN;
110 	/* Order is clear bits in "clr" then set bits in "set" */
111 	irq_modify_status(irq, clr, set & ~clr);
112 }
113 EXPORT_SYMBOL_GPL(set_irq_flags);
114 
115 void __init init_IRQ(void)
116 {
117 	machine_desc->init_irq();
118 }
119 
120 #ifdef CONFIG_MULTI_IRQ_HANDLER
121 void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
122 {
123 	if (handle_arch_irq)
124 		return;
125 
126 	handle_arch_irq = handle_irq;
127 }
128 #endif
129 
130 #ifdef CONFIG_SPARSE_IRQ
131 int __init arch_probe_nr_irqs(void)
132 {
133 	nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS;
134 	return nr_irqs;
135 }
136 #endif
137 
138 #ifdef CONFIG_HOTPLUG_CPU
139 
140 static bool migrate_one_irq(struct irq_desc *desc)
141 {
142 	struct irq_data *d = irq_desc_get_irq_data(desc);
143 	const struct cpumask *affinity = d->affinity;
144 	struct irq_chip *c;
145 	bool ret = false;
146 
147 	/*
148 	 * If this is a per-CPU interrupt, or the affinity does not
149 	 * include this CPU, then we have nothing to do.
150 	 */
151 	if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
152 		return false;
153 
154 	if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
155 		affinity = cpu_online_mask;
156 		ret = true;
157 	}
158 
159 	c = irq_data_get_irq_chip(d);
160 	if (!c->irq_set_affinity)
161 		pr_debug("IRQ%u: unable to set affinity\n", d->irq);
162 	else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
163 		cpumask_copy(d->affinity, affinity);
164 
165 	return ret;
166 }
167 
168 /*
169  * The current CPU has been marked offline.  Migrate IRQs off this CPU.
170  * If the affinity settings do not allow other CPUs, force them onto any
171  * available CPU.
172  *
173  * Note: we must iterate over all IRQs, whether they have an attached
174  * action structure or not, as we need to get chained interrupts too.
175  */
176 void migrate_irqs(void)
177 {
178 	unsigned int i;
179 	struct irq_desc *desc;
180 	unsigned long flags;
181 
182 	local_irq_save(flags);
183 
184 	for_each_irq_desc(i, desc) {
185 		bool affinity_broken;
186 
187 		raw_spin_lock(&desc->lock);
188 		affinity_broken = migrate_one_irq(desc);
189 		raw_spin_unlock(&desc->lock);
190 
191 		if (affinity_broken && printk_ratelimit())
192 			pr_warning("IRQ%u no longer affine to CPU%u\n", i,
193 				smp_processor_id());
194 	}
195 
196 	local_irq_restore(flags);
197 }
198 #endif /* CONFIG_HOTPLUG_CPU */
199