1 /* 2 * Common interrupt code for 32 and 64 bit 3 */ 4 #include <linux/cpu.h> 5 #include <linux/interrupt.h> 6 #include <linux/kernel_stat.h> 7 #include <linux/of.h> 8 #include <linux/seq_file.h> 9 #include <linux/smp.h> 10 #include <linux/ftrace.h> 11 #include <linux/delay.h> 12 #include <linux/export.h> 13 14 #include <asm/apic.h> 15 #include <asm/io_apic.h> 16 #include <asm/irq.h> 17 #include <asm/mce.h> 18 #include <asm/hw_irq.h> 19 #include <asm/desc.h> 20 21 #define CREATE_TRACE_POINTS 22 #include <asm/trace/irq_vectors.h> 23 24 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 25 EXPORT_PER_CPU_SYMBOL(irq_stat); 26 27 DEFINE_PER_CPU(struct pt_regs *, irq_regs); 28 EXPORT_PER_CPU_SYMBOL(irq_regs); 29 30 atomic_t irq_err_count; 31 32 /* 33 * 'what should we do if we get a hw irq event on an illegal vector'. 34 * each architecture has to answer this themselves. 35 */ 36 void ack_bad_irq(unsigned int irq) 37 { 38 if (printk_ratelimit()) 39 pr_err("unexpected IRQ trap at vector %02x\n", irq); 40 41 /* 42 * Currently unexpected vectors happen only on SMP and APIC. 43 * We _must_ ack these because every local APIC has only N 44 * irq slots per priority level, and a 'hanging, unacked' IRQ 45 * holds up an irq slot - in excessive cases (when multiple 46 * unexpected vectors occur) that might lock up the APIC 47 * completely. 48 * But only ack when the APIC is enabled -AK 49 */ 50 ack_APIC_irq(); 51 } 52 53 #define irq_stats(x) (&per_cpu(irq_stat, x)) 54 /* 55 * /proc/interrupts printing for arch specific interrupts 56 */ 57 int arch_show_interrupts(struct seq_file *p, int prec) 58 { 59 int j; 60 61 seq_printf(p, "%*s: ", prec, "NMI"); 62 for_each_online_cpu(j) 63 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count); 64 seq_puts(p, " Non-maskable interrupts\n"); 65 #ifdef CONFIG_X86_LOCAL_APIC 66 seq_printf(p, "%*s: ", prec, "LOC"); 67 for_each_online_cpu(j) 68 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); 69 seq_puts(p, " Local timer interrupts\n"); 70 71 seq_printf(p, "%*s: ", prec, "SPU"); 72 for_each_online_cpu(j) 73 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); 74 seq_puts(p, " Spurious interrupts\n"); 75 seq_printf(p, "%*s: ", prec, "PMI"); 76 for_each_online_cpu(j) 77 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); 78 seq_puts(p, " Performance monitoring interrupts\n"); 79 seq_printf(p, "%*s: ", prec, "IWI"); 80 for_each_online_cpu(j) 81 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs); 82 seq_puts(p, " IRQ work interrupts\n"); 83 seq_printf(p, "%*s: ", prec, "RTR"); 84 for_each_online_cpu(j) 85 seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count); 86 seq_puts(p, " APIC ICR read retries\n"); 87 if (x86_platform_ipi_callback) { 88 seq_printf(p, "%*s: ", prec, "PLT"); 89 for_each_online_cpu(j) 90 seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis); 91 seq_puts(p, " Platform interrupts\n"); 92 } 93 #endif 94 #ifdef CONFIG_SMP 95 seq_printf(p, "%*s: ", prec, "RES"); 96 for_each_online_cpu(j) 97 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); 98 seq_puts(p, " Rescheduling interrupts\n"); 99 seq_printf(p, "%*s: ", prec, "CAL"); 100 for_each_online_cpu(j) 101 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); 102 seq_puts(p, " Function call interrupts\n"); 103 seq_printf(p, "%*s: ", prec, "TLB"); 104 for_each_online_cpu(j) 105 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); 106 seq_puts(p, " TLB shootdowns\n"); 107 #endif 108 #ifdef CONFIG_X86_THERMAL_VECTOR 109 seq_printf(p, "%*s: ", prec, "TRM"); 110 for_each_online_cpu(j) 111 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); 112 seq_puts(p, " Thermal event interrupts\n"); 113 #endif 114 #ifdef CONFIG_X86_MCE_THRESHOLD 115 seq_printf(p, "%*s: ", prec, "THR"); 116 for_each_online_cpu(j) 117 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); 118 seq_puts(p, " Threshold APIC interrupts\n"); 119 #endif 120 #ifdef CONFIG_X86_MCE_AMD 121 seq_printf(p, "%*s: ", prec, "DFR"); 122 for_each_online_cpu(j) 123 seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count); 124 seq_puts(p, " Deferred Error APIC interrupts\n"); 125 #endif 126 #ifdef CONFIG_X86_MCE 127 seq_printf(p, "%*s: ", prec, "MCE"); 128 for_each_online_cpu(j) 129 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j)); 130 seq_puts(p, " Machine check exceptions\n"); 131 seq_printf(p, "%*s: ", prec, "MCP"); 132 for_each_online_cpu(j) 133 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); 134 seq_puts(p, " Machine check polls\n"); 135 #endif 136 #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN) 137 if (test_bit(HYPERVISOR_CALLBACK_VECTOR, system_vectors)) { 138 seq_printf(p, "%*s: ", prec, "HYP"); 139 for_each_online_cpu(j) 140 seq_printf(p, "%10u ", 141 irq_stats(j)->irq_hv_callback_count); 142 seq_puts(p, " Hypervisor callback interrupts\n"); 143 } 144 #endif 145 #if IS_ENABLED(CONFIG_HYPERV) 146 if (test_bit(HYPERV_REENLIGHTENMENT_VECTOR, system_vectors)) { 147 seq_printf(p, "%*s: ", prec, "HRE"); 148 for_each_online_cpu(j) 149 seq_printf(p, "%10u ", 150 irq_stats(j)->irq_hv_reenlightenment_count); 151 seq_puts(p, " Hyper-V reenlightenment interrupts\n"); 152 } 153 #endif 154 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); 155 #if defined(CONFIG_X86_IO_APIC) 156 seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); 157 #endif 158 #ifdef CONFIG_HAVE_KVM 159 seq_printf(p, "%*s: ", prec, "PIN"); 160 for_each_online_cpu(j) 161 seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis); 162 seq_puts(p, " Posted-interrupt notification event\n"); 163 164 seq_printf(p, "%*s: ", prec, "NPI"); 165 for_each_online_cpu(j) 166 seq_printf(p, "%10u ", 167 irq_stats(j)->kvm_posted_intr_nested_ipis); 168 seq_puts(p, " Nested posted-interrupt event\n"); 169 170 seq_printf(p, "%*s: ", prec, "PIW"); 171 for_each_online_cpu(j) 172 seq_printf(p, "%10u ", 173 irq_stats(j)->kvm_posted_intr_wakeup_ipis); 174 seq_puts(p, " Posted-interrupt wakeup event\n"); 175 #endif 176 return 0; 177 } 178 179 /* 180 * /proc/stat helpers 181 */ 182 u64 arch_irq_stat_cpu(unsigned int cpu) 183 { 184 u64 sum = irq_stats(cpu)->__nmi_count; 185 186 #ifdef CONFIG_X86_LOCAL_APIC 187 sum += irq_stats(cpu)->apic_timer_irqs; 188 sum += irq_stats(cpu)->irq_spurious_count; 189 sum += irq_stats(cpu)->apic_perf_irqs; 190 sum += irq_stats(cpu)->apic_irq_work_irqs; 191 sum += irq_stats(cpu)->icr_read_retry_count; 192 if (x86_platform_ipi_callback) 193 sum += irq_stats(cpu)->x86_platform_ipis; 194 #endif 195 #ifdef CONFIG_SMP 196 sum += irq_stats(cpu)->irq_resched_count; 197 sum += irq_stats(cpu)->irq_call_count; 198 #endif 199 #ifdef CONFIG_X86_THERMAL_VECTOR 200 sum += irq_stats(cpu)->irq_thermal_count; 201 #endif 202 #ifdef CONFIG_X86_MCE_THRESHOLD 203 sum += irq_stats(cpu)->irq_threshold_count; 204 #endif 205 #ifdef CONFIG_X86_MCE 206 sum += per_cpu(mce_exception_count, cpu); 207 sum += per_cpu(mce_poll_count, cpu); 208 #endif 209 return sum; 210 } 211 212 u64 arch_irq_stat(void) 213 { 214 u64 sum = atomic_read(&irq_err_count); 215 return sum; 216 } 217 218 219 /* 220 * do_IRQ handles all normal device IRQ's (the special 221 * SMP cross-CPU interrupts have their own specific 222 * handlers). 223 */ 224 __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) 225 { 226 struct pt_regs *old_regs = set_irq_regs(regs); 227 struct irq_desc * desc; 228 /* high bit used in ret_from_ code */ 229 unsigned vector = ~regs->orig_ax; 230 231 entering_irq(); 232 233 /* entering_irq() tells RCU that we're not quiescent. Check it. */ 234 RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU"); 235 236 desc = __this_cpu_read(vector_irq[vector]); 237 238 if (!handle_irq(desc, regs)) { 239 ack_APIC_irq(); 240 241 if (desc != VECTOR_RETRIGGERED) { 242 pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n", 243 __func__, smp_processor_id(), 244 vector); 245 } else { 246 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); 247 } 248 } 249 250 exiting_irq(); 251 252 set_irq_regs(old_regs); 253 return 1; 254 } 255 256 #ifdef CONFIG_X86_LOCAL_APIC 257 /* Function pointer for generic interrupt vector handling */ 258 void (*x86_platform_ipi_callback)(void) = NULL; 259 /* 260 * Handler for X86_PLATFORM_IPI_VECTOR. 261 */ 262 __visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs) 263 { 264 struct pt_regs *old_regs = set_irq_regs(regs); 265 266 entering_ack_irq(); 267 trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR); 268 inc_irq_stat(x86_platform_ipis); 269 if (x86_platform_ipi_callback) 270 x86_platform_ipi_callback(); 271 trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR); 272 exiting_irq(); 273 set_irq_regs(old_regs); 274 } 275 #endif 276 277 #ifdef CONFIG_HAVE_KVM 278 static void dummy_handler(void) {} 279 static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler; 280 281 void kvm_set_posted_intr_wakeup_handler(void (*handler)(void)) 282 { 283 if (handler) 284 kvm_posted_intr_wakeup_handler = handler; 285 else 286 kvm_posted_intr_wakeup_handler = dummy_handler; 287 } 288 EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler); 289 290 /* 291 * Handler for POSTED_INTERRUPT_VECTOR. 292 */ 293 __visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs) 294 { 295 struct pt_regs *old_regs = set_irq_regs(regs); 296 297 entering_ack_irq(); 298 inc_irq_stat(kvm_posted_intr_ipis); 299 exiting_irq(); 300 set_irq_regs(old_regs); 301 } 302 303 /* 304 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR. 305 */ 306 __visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs) 307 { 308 struct pt_regs *old_regs = set_irq_regs(regs); 309 310 entering_ack_irq(); 311 inc_irq_stat(kvm_posted_intr_wakeup_ipis); 312 kvm_posted_intr_wakeup_handler(); 313 exiting_irq(); 314 set_irq_regs(old_regs); 315 } 316 317 /* 318 * Handler for POSTED_INTERRUPT_NESTED_VECTOR. 319 */ 320 __visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs) 321 { 322 struct pt_regs *old_regs = set_irq_regs(regs); 323 324 entering_ack_irq(); 325 inc_irq_stat(kvm_posted_intr_nested_ipis); 326 exiting_irq(); 327 set_irq_regs(old_regs); 328 } 329 #endif 330 331 332 #ifdef CONFIG_HOTPLUG_CPU 333 /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ 334 void fixup_irqs(void) 335 { 336 unsigned int irr, vector; 337 struct irq_desc *desc; 338 struct irq_data *data; 339 struct irq_chip *chip; 340 341 irq_migrate_all_off_this_cpu(); 342 343 /* 344 * We can remove mdelay() and then send spuriuous interrupts to 345 * new cpu targets for all the irqs that were handled previously by 346 * this cpu. While it works, I have seen spurious interrupt messages 347 * (nothing wrong but still...). 348 * 349 * So for now, retain mdelay(1) and check the IRR and then send those 350 * interrupts to new targets as this cpu is already offlined... 351 */ 352 mdelay(1); 353 354 /* 355 * We can walk the vector array of this cpu without holding 356 * vector_lock because the cpu is already marked !online, so 357 * nothing else will touch it. 358 */ 359 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 360 if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector]))) 361 continue; 362 363 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); 364 if (irr & (1 << (vector % 32))) { 365 desc = __this_cpu_read(vector_irq[vector]); 366 367 raw_spin_lock(&desc->lock); 368 data = irq_desc_get_irq_data(desc); 369 chip = irq_data_get_irq_chip(data); 370 if (chip->irq_retrigger) { 371 chip->irq_retrigger(data); 372 __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED); 373 } 374 raw_spin_unlock(&desc->lock); 375 } 376 if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED) 377 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); 378 } 379 } 380 #endif 381