1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Interrupt descriptor table related code 4 */ 5 #include <linux/interrupt.h> 6 7 #include <asm/cpu_entry_area.h> 8 #include <asm/set_memory.h> 9 #include <asm/traps.h> 10 #include <asm/proto.h> 11 #include <asm/desc.h> 12 #include <asm/hw_irq.h> 13 #include <asm/idtentry.h> 14 15 #define DPL0 0x0 16 #define DPL3 0x3 17 18 #define DEFAULT_STACK 0 19 20 #define G(_vector, _addr, _ist, _type, _dpl, _segment) \ 21 { \ 22 .vector = _vector, \ 23 .bits.ist = _ist, \ 24 .bits.type = _type, \ 25 .bits.dpl = _dpl, \ 26 .bits.p = 1, \ 27 .addr = _addr, \ 28 .segment = _segment, \ 29 } 30 31 /* Interrupt gate */ 32 #define INTG(_vector, _addr) \ 33 G(_vector, _addr, DEFAULT_STACK, GATE_INTERRUPT, DPL0, __KERNEL_CS) 34 35 /* System interrupt gate */ 36 #define SYSG(_vector, _addr) \ 37 G(_vector, _addr, DEFAULT_STACK, GATE_INTERRUPT, DPL3, __KERNEL_CS) 38 39 #ifdef CONFIG_X86_64 40 /* 41 * Interrupt gate with interrupt stack. The _ist index is the index in 42 * the tss.ist[] array, but for the descriptor it needs to start at 1. 43 */ 44 #define ISTG(_vector, _addr, _ist) \ 45 G(_vector, _addr, _ist + 1, GATE_INTERRUPT, DPL0, __KERNEL_CS) 46 #else 47 #define ISTG(_vector, _addr, _ist) INTG(_vector, _addr) 48 #endif 49 50 /* Task gate */ 51 #define TSKG(_vector, _gdt) \ 52 G(_vector, NULL, DEFAULT_STACK, GATE_TASK, DPL0, _gdt << 3) 53 54 #define IDT_TABLE_SIZE (IDT_ENTRIES * sizeof(gate_desc)) 55 56 static bool idt_setup_done __initdata; 57 58 /* 59 * Early traps running on the DEFAULT_STACK because the other interrupt 60 * stacks work only after cpu_init(). 61 */ 62 static const __initconst struct idt_data early_idts[] = { 63 INTG(X86_TRAP_DB, asm_exc_debug), 64 SYSG(X86_TRAP_BP, asm_exc_int3), 65 66 #ifdef CONFIG_X86_32 67 /* 68 * Not possible on 64-bit. See idt_setup_early_pf() for details. 69 */ 70 INTG(X86_TRAP_PF, asm_exc_page_fault), 71 #endif 72 #ifdef CONFIG_INTEL_TDX_GUEST 73 INTG(X86_TRAP_VE, asm_exc_virtualization_exception), 74 #endif 75 }; 76 77 /* 78 * The default IDT entries which are set up in trap_init() before 79 * cpu_init() is invoked. Interrupt stacks cannot be used at that point and 80 * the traps which use them are reinitialized with IST after cpu_init() has 81 * set up TSS. 82 */ 83 static const __initconst struct idt_data def_idts[] = { 84 INTG(X86_TRAP_DE, asm_exc_divide_error), 85 ISTG(X86_TRAP_NMI, asm_exc_nmi, IST_INDEX_NMI), 86 INTG(X86_TRAP_BR, asm_exc_bounds), 87 INTG(X86_TRAP_UD, asm_exc_invalid_op), 88 INTG(X86_TRAP_NM, asm_exc_device_not_available), 89 INTG(X86_TRAP_OLD_MF, asm_exc_coproc_segment_overrun), 90 INTG(X86_TRAP_TS, asm_exc_invalid_tss), 91 INTG(X86_TRAP_NP, asm_exc_segment_not_present), 92 INTG(X86_TRAP_SS, asm_exc_stack_segment), 93 INTG(X86_TRAP_GP, asm_exc_general_protection), 94 INTG(X86_TRAP_SPURIOUS, asm_exc_spurious_interrupt_bug), 95 INTG(X86_TRAP_MF, asm_exc_coprocessor_error), 96 INTG(X86_TRAP_AC, asm_exc_alignment_check), 97 INTG(X86_TRAP_XF, asm_exc_simd_coprocessor_error), 98 99 #ifdef CONFIG_X86_32 100 TSKG(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS), 101 #else 102 ISTG(X86_TRAP_DF, asm_exc_double_fault, IST_INDEX_DF), 103 #endif 104 ISTG(X86_TRAP_DB, asm_exc_debug, IST_INDEX_DB), 105 106 #ifdef CONFIG_X86_MCE 107 ISTG(X86_TRAP_MC, asm_exc_machine_check, IST_INDEX_MCE), 108 #endif 109 110 #ifdef CONFIG_X86_CET 111 INTG(X86_TRAP_CP, asm_exc_control_protection), 112 #endif 113 114 #ifdef CONFIG_AMD_MEM_ENCRYPT 115 ISTG(X86_TRAP_VC, asm_exc_vmm_communication, IST_INDEX_VC), 116 #endif 117 118 SYSG(X86_TRAP_OF, asm_exc_overflow), 119 #if defined(CONFIG_IA32_EMULATION) 120 SYSG(IA32_SYSCALL_VECTOR, asm_int80_emulation), 121 #elif defined(CONFIG_X86_32) 122 SYSG(IA32_SYSCALL_VECTOR, entry_INT80_32), 123 #endif 124 }; 125 126 /* 127 * The APIC and SMP idt entries 128 */ 129 static const __initconst struct idt_data apic_idts[] = { 130 #ifdef CONFIG_SMP 131 INTG(RESCHEDULE_VECTOR, asm_sysvec_reschedule_ipi), 132 INTG(CALL_FUNCTION_VECTOR, asm_sysvec_call_function), 133 INTG(CALL_FUNCTION_SINGLE_VECTOR, asm_sysvec_call_function_single), 134 INTG(REBOOT_VECTOR, asm_sysvec_reboot), 135 #endif 136 137 #ifdef CONFIG_X86_THERMAL_VECTOR 138 INTG(THERMAL_APIC_VECTOR, asm_sysvec_thermal), 139 #endif 140 141 #ifdef CONFIG_X86_MCE_THRESHOLD 142 INTG(THRESHOLD_APIC_VECTOR, asm_sysvec_threshold), 143 #endif 144 145 #ifdef CONFIG_X86_MCE_AMD 146 INTG(DEFERRED_ERROR_VECTOR, asm_sysvec_deferred_error), 147 #endif 148 149 #ifdef CONFIG_X86_LOCAL_APIC 150 INTG(LOCAL_TIMER_VECTOR, asm_sysvec_apic_timer_interrupt), 151 INTG(X86_PLATFORM_IPI_VECTOR, asm_sysvec_x86_platform_ipi), 152 # ifdef CONFIG_HAVE_KVM 153 INTG(POSTED_INTR_VECTOR, asm_sysvec_kvm_posted_intr_ipi), 154 INTG(POSTED_INTR_WAKEUP_VECTOR, asm_sysvec_kvm_posted_intr_wakeup_ipi), 155 INTG(POSTED_INTR_NESTED_VECTOR, asm_sysvec_kvm_posted_intr_nested_ipi), 156 # endif 157 # ifdef CONFIG_IRQ_WORK 158 INTG(IRQ_WORK_VECTOR, asm_sysvec_irq_work), 159 # endif 160 INTG(SPURIOUS_APIC_VECTOR, asm_sysvec_spurious_apic_interrupt), 161 INTG(ERROR_APIC_VECTOR, asm_sysvec_error_interrupt), 162 #endif 163 }; 164 165 /* Must be page-aligned because the real IDT is used in the cpu entry area */ 166 static gate_desc idt_table[IDT_ENTRIES] __page_aligned_bss; 167 168 static struct desc_ptr idt_descr __ro_after_init = { 169 .size = IDT_TABLE_SIZE - 1, 170 .address = (unsigned long) idt_table, 171 }; 172 173 void load_current_idt(void) 174 { 175 lockdep_assert_irqs_disabled(); 176 load_idt(&idt_descr); 177 } 178 179 #ifdef CONFIG_X86_F00F_BUG 180 bool idt_is_f00f_address(unsigned long address) 181 { 182 return ((address - idt_descr.address) >> 3) == 6; 183 } 184 #endif 185 186 static __init void 187 idt_setup_from_table(gate_desc *idt, const struct idt_data *t, int size, bool sys) 188 { 189 gate_desc desc; 190 191 for (; size > 0; t++, size--) { 192 idt_init_desc(&desc, t); 193 write_idt_entry(idt, t->vector, &desc); 194 if (sys) 195 set_bit(t->vector, system_vectors); 196 } 197 } 198 199 static __init void set_intr_gate(unsigned int n, const void *addr) 200 { 201 struct idt_data data; 202 203 init_idt_data(&data, n, addr); 204 205 idt_setup_from_table(idt_table, &data, 1, false); 206 } 207 208 /** 209 * idt_setup_early_traps - Initialize the idt table with early traps 210 * 211 * On X8664 these traps do not use interrupt stacks as they can't work 212 * before cpu_init() is invoked and sets up TSS. The IST variants are 213 * installed after that. 214 */ 215 void __init idt_setup_early_traps(void) 216 { 217 idt_setup_from_table(idt_table, early_idts, ARRAY_SIZE(early_idts), 218 true); 219 load_idt(&idt_descr); 220 } 221 222 /** 223 * idt_setup_traps - Initialize the idt table with default traps 224 */ 225 void __init idt_setup_traps(void) 226 { 227 idt_setup_from_table(idt_table, def_idts, ARRAY_SIZE(def_idts), true); 228 } 229 230 #ifdef CONFIG_X86_64 231 /* 232 * Early traps running on the DEFAULT_STACK because the other interrupt 233 * stacks work only after cpu_init(). 234 */ 235 static const __initconst struct idt_data early_pf_idts[] = { 236 INTG(X86_TRAP_PF, asm_exc_page_fault), 237 }; 238 239 /** 240 * idt_setup_early_pf - Initialize the idt table with early pagefault handler 241 * 242 * On X8664 this does not use interrupt stacks as they can't work before 243 * cpu_init() is invoked and sets up TSS. The IST variant is installed 244 * after that. 245 * 246 * Note, that X86_64 cannot install the real #PF handler in 247 * idt_setup_early_traps() because the memory initialization needs the #PF 248 * handler from the early_idt_handler_array to initialize the early page 249 * tables. 250 */ 251 void __init idt_setup_early_pf(void) 252 { 253 idt_setup_from_table(idt_table, early_pf_idts, 254 ARRAY_SIZE(early_pf_idts), true); 255 } 256 #endif 257 258 static void __init idt_map_in_cea(void) 259 { 260 /* 261 * Set the IDT descriptor to a fixed read-only location in the cpu 262 * entry area, so that the "sidt" instruction will not leak the 263 * location of the kernel, and to defend the IDT against arbitrary 264 * memory write vulnerabilities. 265 */ 266 cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table), 267 PAGE_KERNEL_RO); 268 idt_descr.address = CPU_ENTRY_AREA_RO_IDT; 269 } 270 271 /** 272 * idt_setup_apic_and_irq_gates - Setup APIC/SMP and normal interrupt gates 273 */ 274 void __init idt_setup_apic_and_irq_gates(void) 275 { 276 int i = FIRST_EXTERNAL_VECTOR; 277 void *entry; 278 279 idt_setup_from_table(idt_table, apic_idts, ARRAY_SIZE(apic_idts), true); 280 281 for_each_clear_bit_from(i, system_vectors, FIRST_SYSTEM_VECTOR) { 282 entry = irq_entries_start + IDT_ALIGN * (i - FIRST_EXTERNAL_VECTOR); 283 set_intr_gate(i, entry); 284 } 285 286 #ifdef CONFIG_X86_LOCAL_APIC 287 for_each_clear_bit_from(i, system_vectors, NR_VECTORS) { 288 /* 289 * Don't set the non assigned system vectors in the 290 * system_vectors bitmap. Otherwise they show up in 291 * /proc/interrupts. 292 */ 293 entry = spurious_entries_start + IDT_ALIGN * (i - FIRST_SYSTEM_VECTOR); 294 set_intr_gate(i, entry); 295 } 296 #endif 297 /* Map IDT into CPU entry area and reload it. */ 298 idt_map_in_cea(); 299 load_idt(&idt_descr); 300 301 /* Make the IDT table read only */ 302 set_memory_ro((unsigned long)&idt_table, 1); 303 304 idt_setup_done = true; 305 } 306 307 /** 308 * idt_setup_early_handler - Initializes the idt table with early handlers 309 */ 310 void __init idt_setup_early_handler(void) 311 { 312 int i; 313 314 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) 315 set_intr_gate(i, early_idt_handler_array[i]); 316 #ifdef CONFIG_X86_32 317 for ( ; i < NR_VECTORS; i++) 318 set_intr_gate(i, early_ignore_irq); 319 #endif 320 load_idt(&idt_descr); 321 } 322 323 /** 324 * idt_invalidate - Invalidate interrupt descriptor table 325 */ 326 void idt_invalidate(void) 327 { 328 static const struct desc_ptr idt = { .address = 0, .size = 0 }; 329 330 load_idt(&idt); 331 } 332 333 void __init alloc_intr_gate(unsigned int n, const void *addr) 334 { 335 if (WARN_ON(n < FIRST_SYSTEM_VECTOR)) 336 return; 337 338 if (WARN_ON(idt_setup_done)) 339 return; 340 341 if (!WARN_ON(test_and_set_bit(n, system_vectors))) 342 set_intr_gate(n, addr); 343 } 344