1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Common interrupt code for 32 and 64 bit 4 */ 5 #include <linux/cpu.h> 6 #include <linux/interrupt.h> 7 #include <linux/kernel_stat.h> 8 #include <linux/of.h> 9 #include <linux/seq_file.h> 10 #include <linux/smp.h> 11 #include <linux/ftrace.h> 12 #include <linux/delay.h> 13 #include <linux/export.h> 14 #include <linux/irq.h> 15 16 #include <asm/apic.h> 17 #include <asm/io_apic.h> 18 #include <asm/irq.h> 19 #include <asm/mce.h> 20 #include <asm/hw_irq.h> 21 #include <asm/desc.h> 22 23 #define CREATE_TRACE_POINTS 24 #include <asm/trace/irq_vectors.h> 25 26 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 27 EXPORT_PER_CPU_SYMBOL(irq_stat); 28 29 DEFINE_PER_CPU(struct pt_regs *, irq_regs); 30 EXPORT_PER_CPU_SYMBOL(irq_regs); 31 32 atomic_t irq_err_count; 33 34 /* 35 * 'what should we do if we get a hw irq event on an illegal vector'. 36 * each architecture has to answer this themselves. 37 */ 38 void ack_bad_irq(unsigned int irq) 39 { 40 if (printk_ratelimit()) 41 pr_err("unexpected IRQ trap at vector %02x\n", irq); 42 43 /* 44 * Currently unexpected vectors happen only on SMP and APIC. 45 * We _must_ ack these because every local APIC has only N 46 * irq slots per priority level, and a 'hanging, unacked' IRQ 47 * holds up an irq slot - in excessive cases (when multiple 48 * unexpected vectors occur) that might lock up the APIC 49 * completely. 50 * But only ack when the APIC is enabled -AK 51 */ 52 ack_APIC_irq(); 53 } 54 55 #define irq_stats(x) (&per_cpu(irq_stat, x)) 56 /* 57 * /proc/interrupts printing for arch specific interrupts 58 */ 59 int arch_show_interrupts(struct seq_file *p, int prec) 60 { 61 int j; 62 63 seq_printf(p, "%*s: ", prec, "NMI"); 64 for_each_online_cpu(j) 65 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count); 66 seq_puts(p, " Non-maskable interrupts\n"); 67 #ifdef CONFIG_X86_LOCAL_APIC 68 seq_printf(p, "%*s: ", prec, "LOC"); 69 for_each_online_cpu(j) 70 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); 71 seq_puts(p, " Local timer interrupts\n"); 72 73 seq_printf(p, "%*s: ", prec, "SPU"); 74 for_each_online_cpu(j) 75 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); 76 seq_puts(p, " Spurious interrupts\n"); 77 seq_printf(p, "%*s: ", prec, "PMI"); 78 for_each_online_cpu(j) 79 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); 80 seq_puts(p, " Performance monitoring interrupts\n"); 81 seq_printf(p, "%*s: ", prec, "IWI"); 82 for_each_online_cpu(j) 83 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs); 84 seq_puts(p, " IRQ work interrupts\n"); 85 seq_printf(p, "%*s: ", prec, "RTR"); 86 for_each_online_cpu(j) 87 seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count); 88 seq_puts(p, " APIC ICR read retries\n"); 89 if (x86_platform_ipi_callback) { 90 seq_printf(p, "%*s: ", prec, "PLT"); 91 for_each_online_cpu(j) 92 seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis); 93 seq_puts(p, " Platform interrupts\n"); 94 } 95 #endif 96 #ifdef CONFIG_SMP 97 seq_printf(p, "%*s: ", prec, "RES"); 98 for_each_online_cpu(j) 99 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); 100 seq_puts(p, " Rescheduling interrupts\n"); 101 seq_printf(p, "%*s: ", prec, "CAL"); 102 for_each_online_cpu(j) 103 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); 104 seq_puts(p, " Function call interrupts\n"); 105 seq_printf(p, "%*s: ", prec, "TLB"); 106 for_each_online_cpu(j) 107 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); 108 seq_puts(p, " TLB shootdowns\n"); 109 #endif 110 #ifdef CONFIG_X86_THERMAL_VECTOR 111 seq_printf(p, "%*s: ", prec, "TRM"); 112 for_each_online_cpu(j) 113 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); 114 seq_puts(p, " Thermal event interrupts\n"); 115 #endif 116 #ifdef CONFIG_X86_MCE_THRESHOLD 117 seq_printf(p, "%*s: ", prec, "THR"); 118 for_each_online_cpu(j) 119 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); 120 seq_puts(p, " Threshold APIC interrupts\n"); 121 #endif 122 #ifdef CONFIG_X86_MCE_AMD 123 seq_printf(p, "%*s: ", prec, "DFR"); 124 for_each_online_cpu(j) 125 seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count); 126 seq_puts(p, " Deferred Error APIC interrupts\n"); 127 #endif 128 #ifdef CONFIG_X86_MCE 129 seq_printf(p, "%*s: ", prec, "MCE"); 130 for_each_online_cpu(j) 131 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j)); 132 seq_puts(p, " Machine check exceptions\n"); 133 seq_printf(p, "%*s: ", prec, "MCP"); 134 for_each_online_cpu(j) 135 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); 136 seq_puts(p, " Machine check polls\n"); 137 #endif 138 #ifdef CONFIG_X86_HV_CALLBACK_VECTOR 139 if (test_bit(HYPERVISOR_CALLBACK_VECTOR, system_vectors)) { 140 seq_printf(p, "%*s: ", prec, "HYP"); 141 for_each_online_cpu(j) 142 seq_printf(p, "%10u ", 143 irq_stats(j)->irq_hv_callback_count); 144 seq_puts(p, " Hypervisor callback interrupts\n"); 145 } 146 #endif 147 #if IS_ENABLED(CONFIG_HYPERV) 148 if (test_bit(HYPERV_REENLIGHTENMENT_VECTOR, system_vectors)) { 149 seq_printf(p, "%*s: ", prec, "HRE"); 150 for_each_online_cpu(j) 151 seq_printf(p, "%10u ", 152 irq_stats(j)->irq_hv_reenlightenment_count); 153 seq_puts(p, " Hyper-V reenlightenment interrupts\n"); 154 } 155 if (test_bit(HYPERV_STIMER0_VECTOR, system_vectors)) { 156 seq_printf(p, "%*s: ", prec, "HVS"); 157 for_each_online_cpu(j) 158 seq_printf(p, "%10u ", 159 irq_stats(j)->hyperv_stimer0_count); 160 seq_puts(p, " Hyper-V stimer0 interrupts\n"); 161 } 162 #endif 163 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); 164 #if defined(CONFIG_X86_IO_APIC) 165 seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); 166 #endif 167 #ifdef CONFIG_HAVE_KVM 168 seq_printf(p, "%*s: ", prec, "PIN"); 169 for_each_online_cpu(j) 170 seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis); 171 seq_puts(p, " Posted-interrupt notification event\n"); 172 173 seq_printf(p, "%*s: ", prec, "NPI"); 174 for_each_online_cpu(j) 175 seq_printf(p, "%10u ", 176 irq_stats(j)->kvm_posted_intr_nested_ipis); 177 seq_puts(p, " Nested posted-interrupt event\n"); 178 179 seq_printf(p, "%*s: ", prec, "PIW"); 180 for_each_online_cpu(j) 181 seq_printf(p, "%10u ", 182 irq_stats(j)->kvm_posted_intr_wakeup_ipis); 183 seq_puts(p, " Posted-interrupt wakeup event\n"); 184 #endif 185 return 0; 186 } 187 188 /* 189 * /proc/stat helpers 190 */ 191 u64 arch_irq_stat_cpu(unsigned int cpu) 192 { 193 u64 sum = irq_stats(cpu)->__nmi_count; 194 195 #ifdef CONFIG_X86_LOCAL_APIC 196 sum += irq_stats(cpu)->apic_timer_irqs; 197 sum += irq_stats(cpu)->irq_spurious_count; 198 sum += irq_stats(cpu)->apic_perf_irqs; 199 sum += irq_stats(cpu)->apic_irq_work_irqs; 200 sum += irq_stats(cpu)->icr_read_retry_count; 201 if (x86_platform_ipi_callback) 202 sum += irq_stats(cpu)->x86_platform_ipis; 203 #endif 204 #ifdef CONFIG_SMP 205 sum += irq_stats(cpu)->irq_resched_count; 206 sum += irq_stats(cpu)->irq_call_count; 207 #endif 208 #ifdef CONFIG_X86_THERMAL_VECTOR 209 sum += irq_stats(cpu)->irq_thermal_count; 210 #endif 211 #ifdef CONFIG_X86_MCE_THRESHOLD 212 sum += irq_stats(cpu)->irq_threshold_count; 213 #endif 214 #ifdef CONFIG_X86_MCE 215 sum += per_cpu(mce_exception_count, cpu); 216 sum += per_cpu(mce_poll_count, cpu); 217 #endif 218 return sum; 219 } 220 221 u64 arch_irq_stat(void) 222 { 223 u64 sum = atomic_read(&irq_err_count); 224 return sum; 225 } 226 227 228 /* 229 * do_IRQ handles all normal device IRQ's (the special 230 * SMP cross-CPU interrupts have their own specific 231 * handlers). 232 */ 233 __visible void __irq_entry do_IRQ(struct pt_regs *regs) 234 { 235 struct pt_regs *old_regs = set_irq_regs(regs); 236 struct irq_desc * desc; 237 /* high bit used in ret_from_ code */ 238 unsigned vector = ~regs->orig_ax; 239 240 entering_irq(); 241 242 /* entering_irq() tells RCU that we're not quiescent. Check it. */ 243 RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU"); 244 245 desc = __this_cpu_read(vector_irq[vector]); 246 if (likely(!IS_ERR_OR_NULL(desc))) { 247 if (IS_ENABLED(CONFIG_X86_32)) 248 handle_irq(desc, regs); 249 else 250 generic_handle_irq_desc(desc); 251 } else { 252 ack_APIC_irq(); 253 254 if (desc == VECTOR_UNUSED) { 255 pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n", 256 __func__, smp_processor_id(), 257 vector); 258 } else { 259 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); 260 } 261 } 262 263 exiting_irq(); 264 265 set_irq_regs(old_regs); 266 } 267 268 #ifdef CONFIG_X86_LOCAL_APIC 269 /* Function pointer for generic interrupt vector handling */ 270 void (*x86_platform_ipi_callback)(void) = NULL; 271 /* 272 * Handler for X86_PLATFORM_IPI_VECTOR. 273 */ 274 __visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs) 275 { 276 struct pt_regs *old_regs = set_irq_regs(regs); 277 278 entering_ack_irq(); 279 trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR); 280 inc_irq_stat(x86_platform_ipis); 281 if (x86_platform_ipi_callback) 282 x86_platform_ipi_callback(); 283 trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR); 284 exiting_irq(); 285 set_irq_regs(old_regs); 286 } 287 #endif 288 289 #ifdef CONFIG_HAVE_KVM 290 static void dummy_handler(void) {} 291 static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler; 292 293 void kvm_set_posted_intr_wakeup_handler(void (*handler)(void)) 294 { 295 if (handler) 296 kvm_posted_intr_wakeup_handler = handler; 297 else 298 kvm_posted_intr_wakeup_handler = dummy_handler; 299 } 300 EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler); 301 302 /* 303 * Handler for POSTED_INTERRUPT_VECTOR. 304 */ 305 __visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs) 306 { 307 struct pt_regs *old_regs = set_irq_regs(regs); 308 309 entering_ack_irq(); 310 inc_irq_stat(kvm_posted_intr_ipis); 311 exiting_irq(); 312 set_irq_regs(old_regs); 313 } 314 315 /* 316 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR. 317 */ 318 __visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs) 319 { 320 struct pt_regs *old_regs = set_irq_regs(regs); 321 322 entering_ack_irq(); 323 inc_irq_stat(kvm_posted_intr_wakeup_ipis); 324 kvm_posted_intr_wakeup_handler(); 325 exiting_irq(); 326 set_irq_regs(old_regs); 327 } 328 329 /* 330 * Handler for POSTED_INTERRUPT_NESTED_VECTOR. 331 */ 332 __visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs) 333 { 334 struct pt_regs *old_regs = set_irq_regs(regs); 335 336 entering_ack_irq(); 337 inc_irq_stat(kvm_posted_intr_nested_ipis); 338 exiting_irq(); 339 set_irq_regs(old_regs); 340 } 341 #endif 342 343 344 #ifdef CONFIG_HOTPLUG_CPU 345 /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ 346 void fixup_irqs(void) 347 { 348 unsigned int irr, vector; 349 struct irq_desc *desc; 350 struct irq_data *data; 351 struct irq_chip *chip; 352 353 irq_migrate_all_off_this_cpu(); 354 355 /* 356 * We can remove mdelay() and then send spuriuous interrupts to 357 * new cpu targets for all the irqs that were handled previously by 358 * this cpu. While it works, I have seen spurious interrupt messages 359 * (nothing wrong but still...). 360 * 361 * So for now, retain mdelay(1) and check the IRR and then send those 362 * interrupts to new targets as this cpu is already offlined... 363 */ 364 mdelay(1); 365 366 /* 367 * We can walk the vector array of this cpu without holding 368 * vector_lock because the cpu is already marked !online, so 369 * nothing else will touch it. 370 */ 371 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 372 if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector]))) 373 continue; 374 375 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); 376 if (irr & (1 << (vector % 32))) { 377 desc = __this_cpu_read(vector_irq[vector]); 378 379 raw_spin_lock(&desc->lock); 380 data = irq_desc_get_irq_data(desc); 381 chip = irq_data_get_irq_chip(data); 382 if (chip->irq_retrigger) { 383 chip->irq_retrigger(data); 384 __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED); 385 } 386 raw_spin_unlock(&desc->lock); 387 } 388 if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED) 389 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); 390 } 391 } 392 #endif 393