xref: /openbmc/linux/arch/x86/kernel/irqinit.c (revision 75f25bd3)
1 #include <linux/linkage.h>
2 #include <linux/errno.h>
3 #include <linux/signal.h>
4 #include <linux/sched.h>
5 #include <linux/ioport.h>
6 #include <linux/interrupt.h>
7 #include <linux/timex.h>
8 #include <linux/random.h>
9 #include <linux/kprobes.h>
10 #include <linux/init.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/sysdev.h>
13 #include <linux/bitops.h>
14 #include <linux/acpi.h>
15 #include <linux/io.h>
16 #include <linux/delay.h>
17 
18 #include <linux/atomic.h>
19 #include <asm/system.h>
20 #include <asm/timer.h>
21 #include <asm/hw_irq.h>
22 #include <asm/pgtable.h>
23 #include <asm/desc.h>
24 #include <asm/apic.h>
25 #include <asm/setup.h>
26 #include <asm/i8259.h>
27 #include <asm/traps.h>
28 #include <asm/prom.h>
29 
30 /*
31  * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
32  * (these are usually mapped to vectors 0x30-0x3f)
33  */
34 
35 /*
36  * The IO-APIC gives us many more interrupt sources. Most of these
37  * are unused but an SMP system is supposed to have enough memory ...
38  * sometimes (mostly wrt. hw bugs) we get corrupted vectors all
39  * across the spectrum, so we really want to be prepared to get all
40  * of these. Plus, more powerful systems might have more than 64
41  * IO-APIC registers.
42  *
43  * (these are usually mapped into the 0x30-0xff vector range)
44  */
45 
46 #ifdef CONFIG_X86_32
47 /*
48  * Note that on a 486, we don't want to do a SIGFPE on an irq13
49  * as the irq is unreliable, and exception 16 works correctly
50  * (ie as explained in the intel literature). On a 386, you
51  * can't use exception 16 due to bad IBM design, so we have to
52  * rely on the less exact irq13.
53  *
54  * Careful.. Not only is IRQ13 unreliable, but it is also
55  * leads to races. IBM designers who came up with it should
56  * be shot.
57  */
58 
59 static irqreturn_t math_error_irq(int cpl, void *dev_id)
60 {
61 	outb(0, 0xF0);
62 	if (ignore_fpu_irq || !boot_cpu_data.hard_math)
63 		return IRQ_NONE;
64 	math_error(get_irq_regs(), 0, 16);
65 	return IRQ_HANDLED;
66 }
67 
68 /*
69  * New motherboards sometimes make IRQ 13 be a PCI interrupt,
70  * so allow interrupt sharing.
71  */
72 static struct irqaction fpu_irq = {
73 	.handler = math_error_irq,
74 	.name = "fpu",
75 	.flags = IRQF_NO_THREAD,
76 };
77 #endif
78 
79 /*
80  * IRQ2 is cascade interrupt to second interrupt controller
81  */
82 static struct irqaction irq2 = {
83 	.handler = no_action,
84 	.name = "cascade",
85 	.flags = IRQF_NO_THREAD,
86 };
87 
88 DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
89 	[0 ... NR_VECTORS - 1] = -1,
90 };
91 
92 int vector_used_by_percpu_irq(unsigned int vector)
93 {
94 	int cpu;
95 
96 	for_each_online_cpu(cpu) {
97 		if (per_cpu(vector_irq, cpu)[vector] != -1)
98 			return 1;
99 	}
100 
101 	return 0;
102 }
103 
104 void __init init_ISA_irqs(void)
105 {
106 	struct irq_chip *chip = legacy_pic->chip;
107 	const char *name = chip->name;
108 	int i;
109 
110 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
111 	init_bsp_APIC();
112 #endif
113 	legacy_pic->init(0);
114 
115 	for (i = 0; i < legacy_pic->nr_legacy_irqs; i++)
116 		irq_set_chip_and_handler_name(i, chip, handle_level_irq, name);
117 }
118 
119 void __init init_IRQ(void)
120 {
121 	int i;
122 
123 	/*
124 	 * We probably need a better place for this, but it works for
125 	 * now ...
126 	 */
127 	x86_add_irq_domains();
128 
129 	/*
130 	 * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15.
131 	 * If these IRQ's are handled by legacy interrupt-controllers like PIC,
132 	 * then this configuration will likely be static after the boot. If
133 	 * these IRQ's are handled by more mordern controllers like IO-APIC,
134 	 * then this vector space can be freed and re-used dynamically as the
135 	 * irq's migrate etc.
136 	 */
137 	for (i = 0; i < legacy_pic->nr_legacy_irqs; i++)
138 		per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i;
139 
140 	x86_init.irqs.intr_init();
141 }
142 
143 /*
144  * Setup the vector to irq mappings.
145  */
146 void setup_vector_irq(int cpu)
147 {
148 #ifndef CONFIG_X86_IO_APIC
149 	int irq;
150 
151 	/*
152 	 * On most of the platforms, legacy PIC delivers the interrupts on the
153 	 * boot cpu. But there are certain platforms where PIC interrupts are
154 	 * delivered to multiple cpu's. If the legacy IRQ is handled by the
155 	 * legacy PIC, for the new cpu that is coming online, setup the static
156 	 * legacy vector to irq mapping:
157 	 */
158 	for (irq = 0; irq < legacy_pic->nr_legacy_irqs; irq++)
159 		per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
160 #endif
161 
162 	__setup_vector_irq(cpu);
163 }
164 
165 static void __init smp_intr_init(void)
166 {
167 #ifdef CONFIG_SMP
168 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
169 	/*
170 	 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
171 	 * IPI, driven by wakeup.
172 	 */
173 	alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
174 
175 	/* IPIs for invalidation */
176 #define ALLOC_INVTLB_VEC(NR) \
177 	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+NR, \
178 		invalidate_interrupt##NR)
179 
180 	switch (NUM_INVALIDATE_TLB_VECTORS) {
181 	default:
182 		ALLOC_INVTLB_VEC(31);
183 	case 31:
184 		ALLOC_INVTLB_VEC(30);
185 	case 30:
186 		ALLOC_INVTLB_VEC(29);
187 	case 29:
188 		ALLOC_INVTLB_VEC(28);
189 	case 28:
190 		ALLOC_INVTLB_VEC(27);
191 	case 27:
192 		ALLOC_INVTLB_VEC(26);
193 	case 26:
194 		ALLOC_INVTLB_VEC(25);
195 	case 25:
196 		ALLOC_INVTLB_VEC(24);
197 	case 24:
198 		ALLOC_INVTLB_VEC(23);
199 	case 23:
200 		ALLOC_INVTLB_VEC(22);
201 	case 22:
202 		ALLOC_INVTLB_VEC(21);
203 	case 21:
204 		ALLOC_INVTLB_VEC(20);
205 	case 20:
206 		ALLOC_INVTLB_VEC(19);
207 	case 19:
208 		ALLOC_INVTLB_VEC(18);
209 	case 18:
210 		ALLOC_INVTLB_VEC(17);
211 	case 17:
212 		ALLOC_INVTLB_VEC(16);
213 	case 16:
214 		ALLOC_INVTLB_VEC(15);
215 	case 15:
216 		ALLOC_INVTLB_VEC(14);
217 	case 14:
218 		ALLOC_INVTLB_VEC(13);
219 	case 13:
220 		ALLOC_INVTLB_VEC(12);
221 	case 12:
222 		ALLOC_INVTLB_VEC(11);
223 	case 11:
224 		ALLOC_INVTLB_VEC(10);
225 	case 10:
226 		ALLOC_INVTLB_VEC(9);
227 	case 9:
228 		ALLOC_INVTLB_VEC(8);
229 	case 8:
230 		ALLOC_INVTLB_VEC(7);
231 	case 7:
232 		ALLOC_INVTLB_VEC(6);
233 	case 6:
234 		ALLOC_INVTLB_VEC(5);
235 	case 5:
236 		ALLOC_INVTLB_VEC(4);
237 	case 4:
238 		ALLOC_INVTLB_VEC(3);
239 	case 3:
240 		ALLOC_INVTLB_VEC(2);
241 	case 2:
242 		ALLOC_INVTLB_VEC(1);
243 	case 1:
244 		ALLOC_INVTLB_VEC(0);
245 		break;
246 	}
247 
248 	/* IPI for generic function call */
249 	alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
250 
251 	/* IPI for generic single function call */
252 	alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
253 			call_function_single_interrupt);
254 
255 	/* Low priority IPI to cleanup after moving an irq */
256 	set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
257 	set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
258 
259 	/* IPI used for rebooting/stopping */
260 	alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt);
261 #endif
262 #endif /* CONFIG_SMP */
263 }
264 
265 static void __init apic_intr_init(void)
266 {
267 	smp_intr_init();
268 
269 #ifdef CONFIG_X86_THERMAL_VECTOR
270 	alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
271 #endif
272 #ifdef CONFIG_X86_MCE_THRESHOLD
273 	alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
274 #endif
275 
276 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
277 	/* self generated IPI for local APIC timer */
278 	alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
279 
280 	/* IPI for X86 platform specific use */
281 	alloc_intr_gate(X86_PLATFORM_IPI_VECTOR, x86_platform_ipi);
282 
283 	/* IPI vectors for APIC spurious and error interrupts */
284 	alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
285 	alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
286 
287 	/* IRQ work interrupts: */
288 # ifdef CONFIG_IRQ_WORK
289 	alloc_intr_gate(IRQ_WORK_VECTOR, irq_work_interrupt);
290 # endif
291 
292 #endif
293 }
294 
295 void __init native_init_IRQ(void)
296 {
297 	int i;
298 
299 	/* Execute any quirks before the call gates are initialised: */
300 	x86_init.irqs.pre_vector_init();
301 
302 	apic_intr_init();
303 
304 	/*
305 	 * Cover the whole vector space, no vector can escape
306 	 * us. (some of these will be overridden and become
307 	 * 'special' SMP interrupts)
308 	 */
309 	for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
310 		/* IA32_SYSCALL_VECTOR could be used in trap_init already. */
311 		if (!test_bit(i, used_vectors))
312 			set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
313 	}
314 
315 	if (!acpi_ioapic && !of_ioapic)
316 		setup_irq(2, &irq2);
317 
318 #ifdef CONFIG_X86_32
319 	/*
320 	 * External FPU? Set up irq13 if so, for
321 	 * original braindamaged IBM FERR coupling.
322 	 */
323 	if (boot_cpu_data.hard_math && !cpu_has_fpu)
324 		setup_irq(FPU_IRQ, &fpu_irq);
325 
326 	irq_ctx_init(smp_processor_id());
327 #endif
328 }
329