1 #include <linux/linkage.h> 2 #include <linux/errno.h> 3 #include <linux/signal.h> 4 #include <linux/sched.h> 5 #include <linux/ioport.h> 6 #include <linux/interrupt.h> 7 #include <linux/timex.h> 8 #include <linux/random.h> 9 #include <linux/kprobes.h> 10 #include <linux/init.h> 11 #include <linux/kernel_stat.h> 12 #include <linux/sysdev.h> 13 #include <linux/bitops.h> 14 #include <linux/acpi.h> 15 #include <linux/io.h> 16 #include <linux/delay.h> 17 18 #include <asm/atomic.h> 19 #include <asm/system.h> 20 #include <asm/timer.h> 21 #include <asm/hw_irq.h> 22 #include <asm/pgtable.h> 23 #include <asm/desc.h> 24 #include <asm/apic.h> 25 #include <asm/setup.h> 26 #include <asm/i8259.h> 27 #include <asm/traps.h> 28 29 /* 30 * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: 31 * (these are usually mapped to vectors 0x30-0x3f) 32 */ 33 34 /* 35 * The IO-APIC gives us many more interrupt sources. Most of these 36 * are unused but an SMP system is supposed to have enough memory ... 37 * sometimes (mostly wrt. hw bugs) we get corrupted vectors all 38 * across the spectrum, so we really want to be prepared to get all 39 * of these. Plus, more powerful systems might have more than 64 40 * IO-APIC registers. 41 * 42 * (these are usually mapped into the 0x30-0xff vector range) 43 */ 44 45 #ifdef CONFIG_X86_32 46 /* 47 * Note that on a 486, we don't want to do a SIGFPE on an irq13 48 * as the irq is unreliable, and exception 16 works correctly 49 * (ie as explained in the intel literature). On a 386, you 50 * can't use exception 16 due to bad IBM design, so we have to 51 * rely on the less exact irq13. 52 * 53 * Careful.. Not only is IRQ13 unreliable, but it is also 54 * leads to races. IBM designers who came up with it should 55 * be shot. 56 */ 57 58 static irqreturn_t math_error_irq(int cpl, void *dev_id) 59 { 60 outb(0, 0xF0); 61 if (ignore_fpu_irq || !boot_cpu_data.hard_math) 62 return IRQ_NONE; 63 math_error(get_irq_regs(), 0, 16); 64 return IRQ_HANDLED; 65 } 66 67 /* 68 * New motherboards sometimes make IRQ 13 be a PCI interrupt, 69 * so allow interrupt sharing. 70 */ 71 static struct irqaction fpu_irq = { 72 .handler = math_error_irq, 73 .name = "fpu", 74 }; 75 #endif 76 77 /* 78 * IRQ2 is cascade interrupt to second interrupt controller 79 */ 80 static struct irqaction irq2 = { 81 .handler = no_action, 82 .name = "cascade", 83 }; 84 85 DEFINE_PER_CPU(vector_irq_t, vector_irq) = { 86 [0 ... NR_VECTORS - 1] = -1, 87 }; 88 89 int vector_used_by_percpu_irq(unsigned int vector) 90 { 91 int cpu; 92 93 for_each_online_cpu(cpu) { 94 if (per_cpu(vector_irq, cpu)[vector] != -1) 95 return 1; 96 } 97 98 return 0; 99 } 100 101 void __init init_ISA_irqs(void) 102 { 103 struct irq_chip *chip = legacy_pic->chip; 104 const char *name = chip->name; 105 int i; 106 107 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) 108 init_bsp_APIC(); 109 #endif 110 legacy_pic->init(0); 111 112 for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) 113 set_irq_chip_and_handler_name(i, chip, handle_level_irq, name); 114 } 115 116 void __init init_IRQ(void) 117 { 118 int i; 119 120 /* 121 * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15. 122 * If these IRQ's are handled by legacy interrupt-controllers like PIC, 123 * then this configuration will likely be static after the boot. If 124 * these IRQ's are handled by more mordern controllers like IO-APIC, 125 * then this vector space can be freed and re-used dynamically as the 126 * irq's migrate etc. 127 */ 128 for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) 129 per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i; 130 131 x86_init.irqs.intr_init(); 132 } 133 134 /* 135 * Setup the vector to irq mappings. 136 */ 137 void setup_vector_irq(int cpu) 138 { 139 #ifndef CONFIG_X86_IO_APIC 140 int irq; 141 142 /* 143 * On most of the platforms, legacy PIC delivers the interrupts on the 144 * boot cpu. But there are certain platforms where PIC interrupts are 145 * delivered to multiple cpu's. If the legacy IRQ is handled by the 146 * legacy PIC, for the new cpu that is coming online, setup the static 147 * legacy vector to irq mapping: 148 */ 149 for (irq = 0; irq < legacy_pic->nr_legacy_irqs; irq++) 150 per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq; 151 #endif 152 153 __setup_vector_irq(cpu); 154 } 155 156 static void __init smp_intr_init(void) 157 { 158 #ifdef CONFIG_SMP 159 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) 160 /* 161 * The reschedule interrupt is a CPU-to-CPU reschedule-helper 162 * IPI, driven by wakeup. 163 */ 164 alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); 165 166 /* IPIs for invalidation */ 167 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0); 168 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1); 169 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2); 170 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3); 171 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4); 172 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5); 173 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6); 174 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7); 175 176 /* IPI for generic function call */ 177 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); 178 179 /* IPI for generic single function call */ 180 alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, 181 call_function_single_interrupt); 182 183 /* Low priority IPI to cleanup after moving an irq */ 184 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); 185 set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); 186 187 /* IPI used for rebooting/stopping */ 188 alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt); 189 #endif 190 #endif /* CONFIG_SMP */ 191 } 192 193 static void __init apic_intr_init(void) 194 { 195 smp_intr_init(); 196 197 #ifdef CONFIG_X86_THERMAL_VECTOR 198 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); 199 #endif 200 #ifdef CONFIG_X86_MCE_THRESHOLD 201 alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); 202 #endif 203 #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_LOCAL_APIC) 204 alloc_intr_gate(MCE_SELF_VECTOR, mce_self_interrupt); 205 #endif 206 207 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) 208 /* self generated IPI for local APIC timer */ 209 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); 210 211 /* IPI for X86 platform specific use */ 212 alloc_intr_gate(X86_PLATFORM_IPI_VECTOR, x86_platform_ipi); 213 214 /* IPI vectors for APIC spurious and error interrupts */ 215 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); 216 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); 217 218 /* IRQ work interrupts: */ 219 # ifdef CONFIG_IRQ_WORK 220 alloc_intr_gate(IRQ_WORK_VECTOR, irq_work_interrupt); 221 # endif 222 223 #endif 224 } 225 226 void __init native_init_IRQ(void) 227 { 228 int i; 229 230 /* Execute any quirks before the call gates are initialised: */ 231 x86_init.irqs.pre_vector_init(); 232 233 apic_intr_init(); 234 235 /* 236 * Cover the whole vector space, no vector can escape 237 * us. (some of these will be overridden and become 238 * 'special' SMP interrupts) 239 */ 240 for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { 241 /* IA32_SYSCALL_VECTOR could be used in trap_init already. */ 242 if (!test_bit(i, used_vectors)) 243 set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]); 244 } 245 246 if (!acpi_ioapic) 247 setup_irq(2, &irq2); 248 249 #ifdef CONFIG_X86_32 250 /* 251 * External FPU? Set up irq13 if so, for 252 * original braindamaged IBM FERR coupling. 253 */ 254 if (boot_cpu_data.hard_math && !cpu_has_fpu) 255 setup_irq(FPU_IRQ, &fpu_irq); 256 257 irq_ctx_init(smp_processor_id()); 258 #endif 259 } 260