1 /* 2 * Common interrupt code for 32 and 64 bit 3 */ 4 #include <linux/cpu.h> 5 #include <linux/interrupt.h> 6 #include <linux/kernel_stat.h> 7 #include <linux/of.h> 8 #include <linux/seq_file.h> 9 #include <linux/smp.h> 10 #include <linux/ftrace.h> 11 #include <linux/delay.h> 12 #include <linux/export.h> 13 14 #include <asm/apic.h> 15 #include <asm/io_apic.h> 16 #include <asm/irq.h> 17 #include <asm/idle.h> 18 #include <asm/mce.h> 19 #include <asm/hw_irq.h> 20 21 atomic_t irq_err_count; 22 23 /* Function pointer for generic interrupt vector handling */ 24 void (*x86_platform_ipi_callback)(void) = NULL; 25 26 /* 27 * 'what should we do if we get a hw irq event on an illegal vector'. 28 * each architecture has to answer this themselves. 29 */ 30 void ack_bad_irq(unsigned int irq) 31 { 32 if (printk_ratelimit()) 33 pr_err("unexpected IRQ trap at vector %02x\n", irq); 34 35 /* 36 * Currently unexpected vectors happen only on SMP and APIC. 37 * We _must_ ack these because every local APIC has only N 38 * irq slots per priority level, and a 'hanging, unacked' IRQ 39 * holds up an irq slot - in excessive cases (when multiple 40 * unexpected vectors occur) that might lock up the APIC 41 * completely. 42 * But only ack when the APIC is enabled -AK 43 */ 44 ack_APIC_irq(); 45 } 46 47 #define irq_stats(x) (&per_cpu(irq_stat, x)) 48 /* 49 * /proc/interrupts printing for arch specific interrupts 50 */ 51 int arch_show_interrupts(struct seq_file *p, int prec) 52 { 53 int j; 54 55 seq_printf(p, "%*s: ", prec, "NMI"); 56 for_each_online_cpu(j) 57 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count); 58 seq_printf(p, " Non-maskable interrupts\n"); 59 #ifdef CONFIG_X86_LOCAL_APIC 60 seq_printf(p, "%*s: ", prec, "LOC"); 61 for_each_online_cpu(j) 62 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); 63 seq_printf(p, " Local timer interrupts\n"); 64 65 seq_printf(p, "%*s: ", prec, "SPU"); 66 for_each_online_cpu(j) 67 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); 68 seq_printf(p, " Spurious interrupts\n"); 69 seq_printf(p, "%*s: ", prec, "PMI"); 70 for_each_online_cpu(j) 71 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); 72 seq_printf(p, " Performance monitoring interrupts\n"); 73 seq_printf(p, "%*s: ", prec, "IWI"); 74 for_each_online_cpu(j) 75 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs); 76 seq_printf(p, " IRQ work interrupts\n"); 77 #endif 78 if (x86_platform_ipi_callback) { 79 seq_printf(p, "%*s: ", prec, "PLT"); 80 for_each_online_cpu(j) 81 seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis); 82 seq_printf(p, " Platform interrupts\n"); 83 } 84 #ifdef CONFIG_SMP 85 seq_printf(p, "%*s: ", prec, "RES"); 86 for_each_online_cpu(j) 87 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); 88 seq_printf(p, " Rescheduling interrupts\n"); 89 seq_printf(p, "%*s: ", prec, "CAL"); 90 for_each_online_cpu(j) 91 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); 92 seq_printf(p, " Function call interrupts\n"); 93 seq_printf(p, "%*s: ", prec, "TLB"); 94 for_each_online_cpu(j) 95 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); 96 seq_printf(p, " TLB shootdowns\n"); 97 #endif 98 #ifdef CONFIG_X86_THERMAL_VECTOR 99 seq_printf(p, "%*s: ", prec, "TRM"); 100 for_each_online_cpu(j) 101 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); 102 seq_printf(p, " Thermal event interrupts\n"); 103 #endif 104 #ifdef CONFIG_X86_MCE_THRESHOLD 105 seq_printf(p, "%*s: ", prec, "THR"); 106 for_each_online_cpu(j) 107 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); 108 seq_printf(p, " Threshold APIC interrupts\n"); 109 #endif 110 #ifdef CONFIG_X86_MCE 111 seq_printf(p, "%*s: ", prec, "MCE"); 112 for_each_online_cpu(j) 113 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j)); 114 seq_printf(p, " Machine check exceptions\n"); 115 seq_printf(p, "%*s: ", prec, "MCP"); 116 for_each_online_cpu(j) 117 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); 118 seq_printf(p, " Machine check polls\n"); 119 #endif 120 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); 121 #if defined(CONFIG_X86_IO_APIC) 122 seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); 123 #endif 124 return 0; 125 } 126 127 /* 128 * /proc/stat helpers 129 */ 130 u64 arch_irq_stat_cpu(unsigned int cpu) 131 { 132 u64 sum = irq_stats(cpu)->__nmi_count; 133 134 #ifdef CONFIG_X86_LOCAL_APIC 135 sum += irq_stats(cpu)->apic_timer_irqs; 136 sum += irq_stats(cpu)->irq_spurious_count; 137 sum += irq_stats(cpu)->apic_perf_irqs; 138 sum += irq_stats(cpu)->apic_irq_work_irqs; 139 #endif 140 if (x86_platform_ipi_callback) 141 sum += irq_stats(cpu)->x86_platform_ipis; 142 #ifdef CONFIG_SMP 143 sum += irq_stats(cpu)->irq_resched_count; 144 sum += irq_stats(cpu)->irq_call_count; 145 sum += irq_stats(cpu)->irq_tlb_count; 146 #endif 147 #ifdef CONFIG_X86_THERMAL_VECTOR 148 sum += irq_stats(cpu)->irq_thermal_count; 149 #endif 150 #ifdef CONFIG_X86_MCE_THRESHOLD 151 sum += irq_stats(cpu)->irq_threshold_count; 152 #endif 153 #ifdef CONFIG_X86_MCE 154 sum += per_cpu(mce_exception_count, cpu); 155 sum += per_cpu(mce_poll_count, cpu); 156 #endif 157 return sum; 158 } 159 160 u64 arch_irq_stat(void) 161 { 162 u64 sum = atomic_read(&irq_err_count); 163 164 #ifdef CONFIG_X86_IO_APIC 165 sum += atomic_read(&irq_mis_count); 166 #endif 167 return sum; 168 } 169 170 171 /* 172 * do_IRQ handles all normal device IRQ's (the special 173 * SMP cross-CPU interrupts have their own specific 174 * handlers). 175 */ 176 unsigned int __irq_entry do_IRQ(struct pt_regs *regs) 177 { 178 struct pt_regs *old_regs = set_irq_regs(regs); 179 180 /* high bit used in ret_from_ code */ 181 unsigned vector = ~regs->orig_ax; 182 unsigned irq; 183 184 exit_idle(); 185 irq_enter(); 186 187 irq = __this_cpu_read(vector_irq[vector]); 188 189 if (!handle_irq(irq, regs)) { 190 ack_APIC_irq(); 191 192 if (printk_ratelimit()) 193 pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n", 194 __func__, smp_processor_id(), vector, irq); 195 } 196 197 irq_exit(); 198 199 set_irq_regs(old_regs); 200 return 1; 201 } 202 203 /* 204 * Handler for X86_PLATFORM_IPI_VECTOR. 205 */ 206 void smp_x86_platform_ipi(struct pt_regs *regs) 207 { 208 struct pt_regs *old_regs = set_irq_regs(regs); 209 210 ack_APIC_irq(); 211 212 exit_idle(); 213 214 irq_enter(); 215 216 inc_irq_stat(x86_platform_ipis); 217 218 if (x86_platform_ipi_callback) 219 x86_platform_ipi_callback(); 220 221 irq_exit(); 222 223 set_irq_regs(old_regs); 224 } 225 226 EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); 227 228 #ifdef CONFIG_HOTPLUG_CPU 229 /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ 230 void fixup_irqs(void) 231 { 232 unsigned int irq, vector; 233 static int warned; 234 struct irq_desc *desc; 235 struct irq_data *data; 236 struct irq_chip *chip; 237 238 for_each_irq_desc(irq, desc) { 239 int break_affinity = 0; 240 int set_affinity = 1; 241 const struct cpumask *affinity; 242 243 if (!desc) 244 continue; 245 if (irq == 2) 246 continue; 247 248 /* interrupt's are disabled at this point */ 249 raw_spin_lock(&desc->lock); 250 251 data = irq_desc_get_irq_data(desc); 252 affinity = data->affinity; 253 if (!irq_has_action(irq) || irqd_is_per_cpu(data) || 254 cpumask_subset(affinity, cpu_online_mask)) { 255 raw_spin_unlock(&desc->lock); 256 continue; 257 } 258 259 /* 260 * Complete the irq move. This cpu is going down and for 261 * non intr-remapping case, we can't wait till this interrupt 262 * arrives at this cpu before completing the irq move. 263 */ 264 irq_force_complete_move(irq); 265 266 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { 267 break_affinity = 1; 268 affinity = cpu_all_mask; 269 } 270 271 chip = irq_data_get_irq_chip(data); 272 if (!irqd_can_move_in_process_context(data) && chip->irq_mask) 273 chip->irq_mask(data); 274 275 if (chip->irq_set_affinity) 276 chip->irq_set_affinity(data, affinity, true); 277 else if (!(warned++)) 278 set_affinity = 0; 279 280 if (!irqd_can_move_in_process_context(data) && 281 !irqd_irq_disabled(data) && chip->irq_unmask) 282 chip->irq_unmask(data); 283 284 raw_spin_unlock(&desc->lock); 285 286 if (break_affinity && set_affinity) 287 printk("Broke affinity for irq %i\n", irq); 288 else if (!set_affinity) 289 printk("Cannot set affinity for irq %i\n", irq); 290 } 291 292 /* 293 * We can remove mdelay() and then send spuriuous interrupts to 294 * new cpu targets for all the irqs that were handled previously by 295 * this cpu. While it works, I have seen spurious interrupt messages 296 * (nothing wrong but still...). 297 * 298 * So for now, retain mdelay(1) and check the IRR and then send those 299 * interrupts to new targets as this cpu is already offlined... 300 */ 301 mdelay(1); 302 303 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 304 unsigned int irr; 305 306 if (__this_cpu_read(vector_irq[vector]) < 0) 307 continue; 308 309 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); 310 if (irr & (1 << (vector % 32))) { 311 irq = __this_cpu_read(vector_irq[vector]); 312 313 desc = irq_to_desc(irq); 314 data = irq_desc_get_irq_data(desc); 315 chip = irq_data_get_irq_chip(data); 316 raw_spin_lock(&desc->lock); 317 if (chip->irq_retrigger) 318 chip->irq_retrigger(data); 319 raw_spin_unlock(&desc->lock); 320 } 321 } 322 } 323 #endif 324