1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Interrupt descriptor table related code 4 */ 5 #include <linux/interrupt.h> 6 7 #include <asm/cpu_entry_area.h> 8 #include <asm/set_memory.h> 9 #include <asm/traps.h> 10 #include <asm/proto.h> 11 #include <asm/desc.h> 12 #include <asm/hw_irq.h> 13 14 #define DPL0 0x0 15 #define DPL3 0x3 16 17 #define DEFAULT_STACK 0 18 19 #define G(_vector, _addr, _ist, _type, _dpl, _segment) \ 20 { \ 21 .vector = _vector, \ 22 .bits.ist = _ist, \ 23 .bits.type = _type, \ 24 .bits.dpl = _dpl, \ 25 .bits.p = 1, \ 26 .addr = _addr, \ 27 .segment = _segment, \ 28 } 29 30 /* Interrupt gate */ 31 #define INTG(_vector, _addr) \ 32 G(_vector, _addr, DEFAULT_STACK, GATE_INTERRUPT, DPL0, __KERNEL_CS) 33 34 /* System interrupt gate */ 35 #define SYSG(_vector, _addr) \ 36 G(_vector, _addr, DEFAULT_STACK, GATE_INTERRUPT, DPL3, __KERNEL_CS) 37 38 #ifdef CONFIG_X86_64 39 /* 40 * Interrupt gate with interrupt stack. The _ist index is the index in 41 * the tss.ist[] array, but for the descriptor it needs to start at 1. 42 */ 43 #define ISTG(_vector, _addr, _ist) \ 44 G(_vector, _addr, _ist + 1, GATE_INTERRUPT, DPL0, __KERNEL_CS) 45 #else 46 #define ISTG(_vector, _addr, _ist) INTG(_vector, _addr) 47 #endif 48 49 /* Task gate */ 50 #define TSKG(_vector, _gdt) \ 51 G(_vector, NULL, DEFAULT_STACK, GATE_TASK, DPL0, _gdt << 3) 52 53 #define IDT_TABLE_SIZE (IDT_ENTRIES * sizeof(gate_desc)) 54 55 static bool idt_setup_done __initdata; 56 57 /* 58 * Early traps running on the DEFAULT_STACK because the other interrupt 59 * stacks work only after cpu_init(). 60 */ 61 static const __initconst struct idt_data early_idts[] = { 62 INTG(X86_TRAP_DB, asm_exc_debug), 63 SYSG(X86_TRAP_BP, asm_exc_int3), 64 65 #ifdef CONFIG_X86_32 66 /* 67 * Not possible on 64-bit. See idt_setup_early_pf() for details. 68 */ 69 INTG(X86_TRAP_PF, asm_exc_page_fault), 70 #endif 71 }; 72 73 /* 74 * The default IDT entries which are set up in trap_init() before 75 * cpu_init() is invoked. Interrupt stacks cannot be used at that point and 76 * the traps which use them are reinitialized with IST after cpu_init() has 77 * set up TSS. 78 */ 79 static const __initconst struct idt_data def_idts[] = { 80 INTG(X86_TRAP_DE, asm_exc_divide_error), 81 ISTG(X86_TRAP_NMI, asm_exc_nmi, IST_INDEX_NMI), 82 INTG(X86_TRAP_BR, asm_exc_bounds), 83 INTG(X86_TRAP_UD, asm_exc_invalid_op), 84 INTG(X86_TRAP_NM, asm_exc_device_not_available), 85 INTG(X86_TRAP_OLD_MF, asm_exc_coproc_segment_overrun), 86 INTG(X86_TRAP_TS, asm_exc_invalid_tss), 87 INTG(X86_TRAP_NP, asm_exc_segment_not_present), 88 INTG(X86_TRAP_SS, asm_exc_stack_segment), 89 INTG(X86_TRAP_GP, asm_exc_general_protection), 90 INTG(X86_TRAP_SPURIOUS, asm_exc_spurious_interrupt_bug), 91 INTG(X86_TRAP_MF, asm_exc_coprocessor_error), 92 INTG(X86_TRAP_AC, asm_exc_alignment_check), 93 INTG(X86_TRAP_XF, asm_exc_simd_coprocessor_error), 94 95 #ifdef CONFIG_X86_32 96 TSKG(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS), 97 #else 98 ISTG(X86_TRAP_DF, asm_exc_double_fault, IST_INDEX_DF), 99 #endif 100 ISTG(X86_TRAP_DB, asm_exc_debug, IST_INDEX_DB), 101 102 #ifdef CONFIG_X86_MCE 103 ISTG(X86_TRAP_MC, asm_exc_machine_check, IST_INDEX_MCE), 104 #endif 105 106 #ifdef CONFIG_AMD_MEM_ENCRYPT 107 ISTG(X86_TRAP_VC, asm_exc_vmm_communication, IST_INDEX_VC), 108 #endif 109 110 SYSG(X86_TRAP_OF, asm_exc_overflow), 111 #if defined(CONFIG_IA32_EMULATION) 112 SYSG(IA32_SYSCALL_VECTOR, entry_INT80_compat), 113 #elif defined(CONFIG_X86_32) 114 SYSG(IA32_SYSCALL_VECTOR, entry_INT80_32), 115 #endif 116 }; 117 118 /* 119 * The APIC and SMP idt entries 120 */ 121 static const __initconst struct idt_data apic_idts[] = { 122 #ifdef CONFIG_SMP 123 INTG(RESCHEDULE_VECTOR, asm_sysvec_reschedule_ipi), 124 INTG(CALL_FUNCTION_VECTOR, asm_sysvec_call_function), 125 INTG(CALL_FUNCTION_SINGLE_VECTOR, asm_sysvec_call_function_single), 126 INTG(IRQ_MOVE_CLEANUP_VECTOR, asm_sysvec_irq_move_cleanup), 127 INTG(REBOOT_VECTOR, asm_sysvec_reboot), 128 #endif 129 130 #ifdef CONFIG_X86_THERMAL_VECTOR 131 INTG(THERMAL_APIC_VECTOR, asm_sysvec_thermal), 132 #endif 133 134 #ifdef CONFIG_X86_MCE_THRESHOLD 135 INTG(THRESHOLD_APIC_VECTOR, asm_sysvec_threshold), 136 #endif 137 138 #ifdef CONFIG_X86_MCE_AMD 139 INTG(DEFERRED_ERROR_VECTOR, asm_sysvec_deferred_error), 140 #endif 141 142 #ifdef CONFIG_X86_LOCAL_APIC 143 INTG(LOCAL_TIMER_VECTOR, asm_sysvec_apic_timer_interrupt), 144 INTG(X86_PLATFORM_IPI_VECTOR, asm_sysvec_x86_platform_ipi), 145 # ifdef CONFIG_HAVE_KVM 146 INTG(POSTED_INTR_VECTOR, asm_sysvec_kvm_posted_intr_ipi), 147 INTG(POSTED_INTR_WAKEUP_VECTOR, asm_sysvec_kvm_posted_intr_wakeup_ipi), 148 INTG(POSTED_INTR_NESTED_VECTOR, asm_sysvec_kvm_posted_intr_nested_ipi), 149 # endif 150 # ifdef CONFIG_IRQ_WORK 151 INTG(IRQ_WORK_VECTOR, asm_sysvec_irq_work), 152 # endif 153 INTG(SPURIOUS_APIC_VECTOR, asm_sysvec_spurious_apic_interrupt), 154 INTG(ERROR_APIC_VECTOR, asm_sysvec_error_interrupt), 155 #endif 156 }; 157 158 /* Must be page-aligned because the real IDT is used in the cpu entry area */ 159 static gate_desc idt_table[IDT_ENTRIES] __page_aligned_bss; 160 161 static struct desc_ptr idt_descr __ro_after_init = { 162 .size = IDT_TABLE_SIZE - 1, 163 .address = (unsigned long) idt_table, 164 }; 165 166 void load_current_idt(void) 167 { 168 lockdep_assert_irqs_disabled(); 169 load_idt(&idt_descr); 170 } 171 172 #ifdef CONFIG_X86_F00F_BUG 173 bool idt_is_f00f_address(unsigned long address) 174 { 175 return ((address - idt_descr.address) >> 3) == 6; 176 } 177 #endif 178 179 static __init void 180 idt_setup_from_table(gate_desc *idt, const struct idt_data *t, int size, bool sys) 181 { 182 gate_desc desc; 183 184 for (; size > 0; t++, size--) { 185 idt_init_desc(&desc, t); 186 write_idt_entry(idt, t->vector, &desc); 187 if (sys) 188 set_bit(t->vector, system_vectors); 189 } 190 } 191 192 static __init void set_intr_gate(unsigned int n, const void *addr) 193 { 194 struct idt_data data; 195 196 init_idt_data(&data, n, addr); 197 198 idt_setup_from_table(idt_table, &data, 1, false); 199 } 200 201 /** 202 * idt_setup_early_traps - Initialize the idt table with early traps 203 * 204 * On X8664 these traps do not use interrupt stacks as they can't work 205 * before cpu_init() is invoked and sets up TSS. The IST variants are 206 * installed after that. 207 */ 208 void __init idt_setup_early_traps(void) 209 { 210 idt_setup_from_table(idt_table, early_idts, ARRAY_SIZE(early_idts), 211 true); 212 load_idt(&idt_descr); 213 } 214 215 /** 216 * idt_setup_traps - Initialize the idt table with default traps 217 */ 218 void __init idt_setup_traps(void) 219 { 220 idt_setup_from_table(idt_table, def_idts, ARRAY_SIZE(def_idts), true); 221 } 222 223 #ifdef CONFIG_X86_64 224 /* 225 * Early traps running on the DEFAULT_STACK because the other interrupt 226 * stacks work only after cpu_init(). 227 */ 228 static const __initconst struct idt_data early_pf_idts[] = { 229 INTG(X86_TRAP_PF, asm_exc_page_fault), 230 }; 231 232 /** 233 * idt_setup_early_pf - Initialize the idt table with early pagefault handler 234 * 235 * On X8664 this does not use interrupt stacks as they can't work before 236 * cpu_init() is invoked and sets up TSS. The IST variant is installed 237 * after that. 238 * 239 * Note, that X86_64 cannot install the real #PF handler in 240 * idt_setup_early_traps() because the memory initialization needs the #PF 241 * handler from the early_idt_handler_array to initialize the early page 242 * tables. 243 */ 244 void __init idt_setup_early_pf(void) 245 { 246 idt_setup_from_table(idt_table, early_pf_idts, 247 ARRAY_SIZE(early_pf_idts), true); 248 } 249 #endif 250 251 static void __init idt_map_in_cea(void) 252 { 253 /* 254 * Set the IDT descriptor to a fixed read-only location in the cpu 255 * entry area, so that the "sidt" instruction will not leak the 256 * location of the kernel, and to defend the IDT against arbitrary 257 * memory write vulnerabilities. 258 */ 259 cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table), 260 PAGE_KERNEL_RO); 261 idt_descr.address = CPU_ENTRY_AREA_RO_IDT; 262 } 263 264 /** 265 * idt_setup_apic_and_irq_gates - Setup APIC/SMP and normal interrupt gates 266 */ 267 void __init idt_setup_apic_and_irq_gates(void) 268 { 269 int i = FIRST_EXTERNAL_VECTOR; 270 void *entry; 271 272 idt_setup_from_table(idt_table, apic_idts, ARRAY_SIZE(apic_idts), true); 273 274 for_each_clear_bit_from(i, system_vectors, FIRST_SYSTEM_VECTOR) { 275 entry = irq_entries_start + 8 * (i - FIRST_EXTERNAL_VECTOR); 276 set_intr_gate(i, entry); 277 } 278 279 #ifdef CONFIG_X86_LOCAL_APIC 280 for_each_clear_bit_from(i, system_vectors, NR_VECTORS) { 281 /* 282 * Don't set the non assigned system vectors in the 283 * system_vectors bitmap. Otherwise they show up in 284 * /proc/interrupts. 285 */ 286 entry = spurious_entries_start + 8 * (i - FIRST_SYSTEM_VECTOR); 287 set_intr_gate(i, entry); 288 } 289 #endif 290 /* Map IDT into CPU entry area and reload it. */ 291 idt_map_in_cea(); 292 load_idt(&idt_descr); 293 294 /* Make the IDT table read only */ 295 set_memory_ro((unsigned long)&idt_table, 1); 296 297 idt_setup_done = true; 298 } 299 300 /** 301 * idt_setup_early_handler - Initializes the idt table with early handlers 302 */ 303 void __init idt_setup_early_handler(void) 304 { 305 int i; 306 307 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) 308 set_intr_gate(i, early_idt_handler_array[i]); 309 #ifdef CONFIG_X86_32 310 for ( ; i < NR_VECTORS; i++) 311 set_intr_gate(i, early_ignore_irq); 312 #endif 313 load_idt(&idt_descr); 314 } 315 316 /** 317 * idt_invalidate - Invalidate interrupt descriptor table 318 */ 319 void idt_invalidate(void) 320 { 321 static const struct desc_ptr idt = { .address = 0, .size = 0 }; 322 323 load_idt(&idt); 324 } 325 326 void __init alloc_intr_gate(unsigned int n, const void *addr) 327 { 328 if (WARN_ON(n < FIRST_SYSTEM_VECTOR)) 329 return; 330 331 if (WARN_ON(idt_setup_done)) 332 return; 333 334 if (!WARN_ON(test_and_set_bit(n, system_vectors))) 335 set_intr_gate(n, addr); 336 } 337