1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Common interrupt code for 32 and 64 bit 4 */ 5 #include <linux/cpu.h> 6 #include <linux/interrupt.h> 7 #include <linux/kernel_stat.h> 8 #include <linux/of.h> 9 #include <linux/seq_file.h> 10 #include <linux/smp.h> 11 #include <linux/ftrace.h> 12 #include <linux/delay.h> 13 #include <linux/export.h> 14 #include <linux/irq.h> 15 16 #include <asm/irq_stack.h> 17 #include <asm/apic.h> 18 #include <asm/io_apic.h> 19 #include <asm/irq.h> 20 #include <asm/mce.h> 21 #include <asm/hw_irq.h> 22 #include <asm/desc.h> 23 #include <asm/traps.h> 24 25 #define CREATE_TRACE_POINTS 26 #include <asm/trace/irq_vectors.h> 27 28 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 29 EXPORT_PER_CPU_SYMBOL(irq_stat); 30 31 atomic_t irq_err_count; 32 33 /* 34 * 'what should we do if we get a hw irq event on an illegal vector'. 35 * each architecture has to answer this themselves. 36 */ 37 void ack_bad_irq(unsigned int irq) 38 { 39 if (printk_ratelimit()) 40 pr_err("unexpected IRQ trap at vector %02x\n", irq); 41 42 /* 43 * Currently unexpected vectors happen only on SMP and APIC. 44 * We _must_ ack these because every local APIC has only N 45 * irq slots per priority level, and a 'hanging, unacked' IRQ 46 * holds up an irq slot - in excessive cases (when multiple 47 * unexpected vectors occur) that might lock up the APIC 48 * completely. 49 * But only ack when the APIC is enabled -AK 50 */ 51 ack_APIC_irq(); 52 } 53 54 #define irq_stats(x) (&per_cpu(irq_stat, x)) 55 /* 56 * /proc/interrupts printing for arch specific interrupts 57 */ 58 int arch_show_interrupts(struct seq_file *p, int prec) 59 { 60 int j; 61 62 seq_printf(p, "%*s: ", prec, "NMI"); 63 for_each_online_cpu(j) 64 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count); 65 seq_puts(p, " Non-maskable interrupts\n"); 66 #ifdef CONFIG_X86_LOCAL_APIC 67 seq_printf(p, "%*s: ", prec, "LOC"); 68 for_each_online_cpu(j) 69 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); 70 seq_puts(p, " Local timer interrupts\n"); 71 72 seq_printf(p, "%*s: ", prec, "SPU"); 73 for_each_online_cpu(j) 74 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); 75 seq_puts(p, " Spurious interrupts\n"); 76 seq_printf(p, "%*s: ", prec, "PMI"); 77 for_each_online_cpu(j) 78 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); 79 seq_puts(p, " Performance monitoring interrupts\n"); 80 seq_printf(p, "%*s: ", prec, "IWI"); 81 for_each_online_cpu(j) 82 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs); 83 seq_puts(p, " IRQ work interrupts\n"); 84 seq_printf(p, "%*s: ", prec, "RTR"); 85 for_each_online_cpu(j) 86 seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count); 87 seq_puts(p, " APIC ICR read retries\n"); 88 if (x86_platform_ipi_callback) { 89 seq_printf(p, "%*s: ", prec, "PLT"); 90 for_each_online_cpu(j) 91 seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis); 92 seq_puts(p, " Platform interrupts\n"); 93 } 94 #endif 95 #ifdef CONFIG_SMP 96 seq_printf(p, "%*s: ", prec, "RES"); 97 for_each_online_cpu(j) 98 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); 99 seq_puts(p, " Rescheduling interrupts\n"); 100 seq_printf(p, "%*s: ", prec, "CAL"); 101 for_each_online_cpu(j) 102 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); 103 seq_puts(p, " Function call interrupts\n"); 104 seq_printf(p, "%*s: ", prec, "TLB"); 105 for_each_online_cpu(j) 106 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); 107 seq_puts(p, " TLB shootdowns\n"); 108 #endif 109 #ifdef CONFIG_X86_THERMAL_VECTOR 110 seq_printf(p, "%*s: ", prec, "TRM"); 111 for_each_online_cpu(j) 112 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); 113 seq_puts(p, " Thermal event interrupts\n"); 114 #endif 115 #ifdef CONFIG_X86_MCE_THRESHOLD 116 seq_printf(p, "%*s: ", prec, "THR"); 117 for_each_online_cpu(j) 118 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); 119 seq_puts(p, " Threshold APIC interrupts\n"); 120 #endif 121 #ifdef CONFIG_X86_MCE_AMD 122 seq_printf(p, "%*s: ", prec, "DFR"); 123 for_each_online_cpu(j) 124 seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count); 125 seq_puts(p, " Deferred Error APIC interrupts\n"); 126 #endif 127 #ifdef CONFIG_X86_MCE 128 seq_printf(p, "%*s: ", prec, "MCE"); 129 for_each_online_cpu(j) 130 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j)); 131 seq_puts(p, " Machine check exceptions\n"); 132 seq_printf(p, "%*s: ", prec, "MCP"); 133 for_each_online_cpu(j) 134 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); 135 seq_puts(p, " Machine check polls\n"); 136 #endif 137 #ifdef CONFIG_X86_HV_CALLBACK_VECTOR 138 if (test_bit(HYPERVISOR_CALLBACK_VECTOR, system_vectors)) { 139 seq_printf(p, "%*s: ", prec, "HYP"); 140 for_each_online_cpu(j) 141 seq_printf(p, "%10u ", 142 irq_stats(j)->irq_hv_callback_count); 143 seq_puts(p, " Hypervisor callback interrupts\n"); 144 } 145 #endif 146 #if IS_ENABLED(CONFIG_HYPERV) 147 if (test_bit(HYPERV_REENLIGHTENMENT_VECTOR, system_vectors)) { 148 seq_printf(p, "%*s: ", prec, "HRE"); 149 for_each_online_cpu(j) 150 seq_printf(p, "%10u ", 151 irq_stats(j)->irq_hv_reenlightenment_count); 152 seq_puts(p, " Hyper-V reenlightenment interrupts\n"); 153 } 154 if (test_bit(HYPERV_STIMER0_VECTOR, system_vectors)) { 155 seq_printf(p, "%*s: ", prec, "HVS"); 156 for_each_online_cpu(j) 157 seq_printf(p, "%10u ", 158 irq_stats(j)->hyperv_stimer0_count); 159 seq_puts(p, " Hyper-V stimer0 interrupts\n"); 160 } 161 #endif 162 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); 163 #if defined(CONFIG_X86_IO_APIC) 164 seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); 165 #endif 166 #ifdef CONFIG_HAVE_KVM 167 seq_printf(p, "%*s: ", prec, "PIN"); 168 for_each_online_cpu(j) 169 seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis); 170 seq_puts(p, " Posted-interrupt notification event\n"); 171 172 seq_printf(p, "%*s: ", prec, "NPI"); 173 for_each_online_cpu(j) 174 seq_printf(p, "%10u ", 175 irq_stats(j)->kvm_posted_intr_nested_ipis); 176 seq_puts(p, " Nested posted-interrupt event\n"); 177 178 seq_printf(p, "%*s: ", prec, "PIW"); 179 for_each_online_cpu(j) 180 seq_printf(p, "%10u ", 181 irq_stats(j)->kvm_posted_intr_wakeup_ipis); 182 seq_puts(p, " Posted-interrupt wakeup event\n"); 183 #endif 184 return 0; 185 } 186 187 /* 188 * /proc/stat helpers 189 */ 190 u64 arch_irq_stat_cpu(unsigned int cpu) 191 { 192 u64 sum = irq_stats(cpu)->__nmi_count; 193 194 #ifdef CONFIG_X86_LOCAL_APIC 195 sum += irq_stats(cpu)->apic_timer_irqs; 196 sum += irq_stats(cpu)->irq_spurious_count; 197 sum += irq_stats(cpu)->apic_perf_irqs; 198 sum += irq_stats(cpu)->apic_irq_work_irqs; 199 sum += irq_stats(cpu)->icr_read_retry_count; 200 if (x86_platform_ipi_callback) 201 sum += irq_stats(cpu)->x86_platform_ipis; 202 #endif 203 #ifdef CONFIG_SMP 204 sum += irq_stats(cpu)->irq_resched_count; 205 sum += irq_stats(cpu)->irq_call_count; 206 #endif 207 #ifdef CONFIG_X86_THERMAL_VECTOR 208 sum += irq_stats(cpu)->irq_thermal_count; 209 #endif 210 #ifdef CONFIG_X86_MCE_THRESHOLD 211 sum += irq_stats(cpu)->irq_threshold_count; 212 #endif 213 #ifdef CONFIG_X86_MCE 214 sum += per_cpu(mce_exception_count, cpu); 215 sum += per_cpu(mce_poll_count, cpu); 216 #endif 217 return sum; 218 } 219 220 u64 arch_irq_stat(void) 221 { 222 u64 sum = atomic_read(&irq_err_count); 223 return sum; 224 } 225 226 static __always_inline void handle_irq(struct irq_desc *desc, 227 struct pt_regs *regs) 228 { 229 if (IS_ENABLED(CONFIG_X86_64)) 230 run_on_irqstack_cond(desc->handle_irq, desc, regs); 231 else 232 __handle_irq(desc, regs); 233 } 234 235 /* 236 * common_interrupt() handles all normal device IRQ's (the special SMP 237 * cross-CPU interrupts have their own entry points). 238 */ 239 DEFINE_IDTENTRY_IRQ(common_interrupt) 240 { 241 struct pt_regs *old_regs = set_irq_regs(regs); 242 struct irq_desc *desc; 243 244 /* entry code tells RCU that we're not quiescent. Check it. */ 245 RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU"); 246 247 desc = __this_cpu_read(vector_irq[vector]); 248 if (likely(!IS_ERR_OR_NULL(desc))) { 249 handle_irq(desc, regs); 250 } else { 251 ack_APIC_irq(); 252 253 if (desc == VECTOR_UNUSED) { 254 pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n", 255 __func__, smp_processor_id(), 256 vector); 257 } else { 258 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); 259 } 260 } 261 262 set_irq_regs(old_regs); 263 } 264 265 #ifdef CONFIG_X86_LOCAL_APIC 266 /* Function pointer for generic interrupt vector handling */ 267 void (*x86_platform_ipi_callback)(void) = NULL; 268 /* 269 * Handler for X86_PLATFORM_IPI_VECTOR. 270 */ 271 DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platform_ipi) 272 { 273 struct pt_regs *old_regs = set_irq_regs(regs); 274 275 ack_APIC_irq(); 276 trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR); 277 inc_irq_stat(x86_platform_ipis); 278 if (x86_platform_ipi_callback) 279 x86_platform_ipi_callback(); 280 trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR); 281 set_irq_regs(old_regs); 282 } 283 #endif 284 285 #ifdef CONFIG_HAVE_KVM 286 static void dummy_handler(void) {} 287 static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler; 288 289 void kvm_set_posted_intr_wakeup_handler(void (*handler)(void)) 290 { 291 if (handler) 292 kvm_posted_intr_wakeup_handler = handler; 293 else 294 kvm_posted_intr_wakeup_handler = dummy_handler; 295 } 296 EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler); 297 298 /* 299 * Handler for POSTED_INTERRUPT_VECTOR. 300 */ 301 DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_ipi) 302 { 303 ack_APIC_irq(); 304 inc_irq_stat(kvm_posted_intr_ipis); 305 } 306 307 /* 308 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR. 309 */ 310 DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_posted_intr_wakeup_ipi) 311 { 312 ack_APIC_irq(); 313 inc_irq_stat(kvm_posted_intr_wakeup_ipis); 314 kvm_posted_intr_wakeup_handler(); 315 } 316 317 /* 318 * Handler for POSTED_INTERRUPT_NESTED_VECTOR. 319 */ 320 DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_nested_ipi) 321 { 322 ack_APIC_irq(); 323 inc_irq_stat(kvm_posted_intr_nested_ipis); 324 } 325 #endif 326 327 328 #ifdef CONFIG_HOTPLUG_CPU 329 /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ 330 void fixup_irqs(void) 331 { 332 unsigned int irr, vector; 333 struct irq_desc *desc; 334 struct irq_data *data; 335 struct irq_chip *chip; 336 337 irq_migrate_all_off_this_cpu(); 338 339 /* 340 * We can remove mdelay() and then send spuriuous interrupts to 341 * new cpu targets for all the irqs that were handled previously by 342 * this cpu. While it works, I have seen spurious interrupt messages 343 * (nothing wrong but still...). 344 * 345 * So for now, retain mdelay(1) and check the IRR and then send those 346 * interrupts to new targets as this cpu is already offlined... 347 */ 348 mdelay(1); 349 350 /* 351 * We can walk the vector array of this cpu without holding 352 * vector_lock because the cpu is already marked !online, so 353 * nothing else will touch it. 354 */ 355 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 356 if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector]))) 357 continue; 358 359 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); 360 if (irr & (1 << (vector % 32))) { 361 desc = __this_cpu_read(vector_irq[vector]); 362 363 raw_spin_lock(&desc->lock); 364 data = irq_desc_get_irq_data(desc); 365 chip = irq_data_get_irq_chip(data); 366 if (chip->irq_retrigger) { 367 chip->irq_retrigger(data); 368 __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED); 369 } 370 raw_spin_unlock(&desc->lock); 371 } 372 if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED) 373 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); 374 } 375 } 376 #endif 377