1 /* 2 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar 3 * 4 * This file contains the lowest level x86_64-specific interrupt 5 * entry and irq statistics code. All the remaining irq logic is 6 * done by the generic kernel/irq/ code and in the 7 * x86_64-specific irq controller code. (e.g. i8259.c and 8 * io_apic.c.) 9 */ 10 11 #include <linux/kernel_stat.h> 12 #include <linux/interrupt.h> 13 #include <linux/seq_file.h> 14 #include <linux/module.h> 15 #include <linux/delay.h> 16 #include <asm/uaccess.h> 17 #include <asm/io_apic.h> 18 #include <asm/idle.h> 19 #include <asm/smp.h> 20 21 atomic_t irq_err_count; 22 23 /* 24 * 'what should we do if we get a hw irq event on an illegal vector'. 25 * each architecture has to answer this themselves. 26 */ 27 void ack_bad_irq(unsigned int irq) 28 { 29 printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq); 30 /* 31 * Currently unexpected vectors happen only on SMP and APIC. 32 * We _must_ ack these because every local APIC has only N 33 * irq slots per priority level, and a 'hanging, unacked' IRQ 34 * holds up an irq slot - in excessive cases (when multiple 35 * unexpected vectors occur) that might lock up the APIC 36 * completely. 37 * But don't ack when the APIC is disabled. -AK 38 */ 39 if (!disable_apic) 40 ack_APIC_irq(); 41 } 42 43 #ifdef CONFIG_DEBUG_STACKOVERFLOW 44 /* 45 * Probabilistic stack overflow check: 46 * 47 * Only check the stack in process context, because everything else 48 * runs on the big interrupt stacks. Checking reliably is too expensive, 49 * so we just check from interrupts. 50 */ 51 static inline void stack_overflow_check(struct pt_regs *regs) 52 { 53 u64 curbase = (u64)task_stack_page(current); 54 static unsigned long warned = -60*HZ; 55 56 if (regs->sp >= curbase && regs->sp <= curbase + THREAD_SIZE && 57 regs->sp < curbase + sizeof(struct thread_info) + 128 && 58 time_after(jiffies, warned + 60*HZ)) { 59 printk("do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n", 60 current->comm, curbase, regs->sp); 61 show_stack(NULL,NULL); 62 warned = jiffies; 63 } 64 } 65 #endif 66 67 /* 68 * Generic, controller-independent functions: 69 */ 70 71 int show_interrupts(struct seq_file *p, void *v) 72 { 73 int i = *(loff_t *) v, j; 74 struct irqaction * action; 75 unsigned long flags; 76 77 if (i == 0) { 78 seq_printf(p, " "); 79 for_each_online_cpu(j) 80 seq_printf(p, "CPU%-8d",j); 81 seq_putc(p, '\n'); 82 } 83 84 if (i < NR_IRQS) { 85 unsigned any_count = 0; 86 87 spin_lock_irqsave(&irq_desc[i].lock, flags); 88 #ifndef CONFIG_SMP 89 any_count = kstat_irqs(i); 90 #else 91 for_each_online_cpu(j) 92 any_count |= kstat_cpu(j).irqs[i]; 93 #endif 94 action = irq_desc[i].action; 95 if (!action && !any_count) 96 goto skip; 97 seq_printf(p, "%3d: ",i); 98 #ifndef CONFIG_SMP 99 seq_printf(p, "%10u ", kstat_irqs(i)); 100 #else 101 for_each_online_cpu(j) 102 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 103 #endif 104 seq_printf(p, " %8s", irq_desc[i].chip->name); 105 seq_printf(p, "-%-8s", irq_desc[i].name); 106 107 if (action) { 108 seq_printf(p, " %s", action->name); 109 while ((action = action->next) != NULL) 110 seq_printf(p, ", %s", action->name); 111 } 112 seq_putc(p, '\n'); 113 skip: 114 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 115 } else if (i == NR_IRQS) { 116 seq_printf(p, "NMI: "); 117 for_each_online_cpu(j) 118 seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); 119 seq_printf(p, " Non-maskable interrupts\n"); 120 seq_printf(p, "LOC: "); 121 for_each_online_cpu(j) 122 seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs); 123 seq_printf(p, " Local timer interrupts\n"); 124 #ifdef CONFIG_SMP 125 seq_printf(p, "RES: "); 126 for_each_online_cpu(j) 127 seq_printf(p, "%10u ", cpu_pda(j)->irq_resched_count); 128 seq_printf(p, " Rescheduling interrupts\n"); 129 seq_printf(p, "CAL: "); 130 for_each_online_cpu(j) 131 seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count); 132 seq_printf(p, " function call interrupts\n"); 133 seq_printf(p, "TLB: "); 134 for_each_online_cpu(j) 135 seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count); 136 seq_printf(p, " TLB shootdowns\n"); 137 #endif 138 #ifdef CONFIG_X86_MCE 139 seq_printf(p, "TRM: "); 140 for_each_online_cpu(j) 141 seq_printf(p, "%10u ", cpu_pda(j)->irq_thermal_count); 142 seq_printf(p, " Thermal event interrupts\n"); 143 seq_printf(p, "THR: "); 144 for_each_online_cpu(j) 145 seq_printf(p, "%10u ", cpu_pda(j)->irq_threshold_count); 146 seq_printf(p, " Threshold APIC interrupts\n"); 147 #endif 148 seq_printf(p, "SPU: "); 149 for_each_online_cpu(j) 150 seq_printf(p, "%10u ", cpu_pda(j)->irq_spurious_count); 151 seq_printf(p, " Spurious interrupts\n"); 152 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 153 } 154 return 0; 155 } 156 157 /* 158 * /proc/stat helpers 159 */ 160 u64 arch_irq_stat_cpu(unsigned int cpu) 161 { 162 u64 sum = cpu_pda(cpu)->__nmi_count; 163 164 sum += cpu_pda(cpu)->apic_timer_irqs; 165 #ifdef CONFIG_SMP 166 sum += cpu_pda(cpu)->irq_resched_count; 167 sum += cpu_pda(cpu)->irq_call_count; 168 sum += cpu_pda(cpu)->irq_tlb_count; 169 #endif 170 #ifdef CONFIG_X86_MCE 171 sum += cpu_pda(cpu)->irq_thermal_count; 172 sum += cpu_pda(cpu)->irq_threshold_count; 173 #endif 174 sum += cpu_pda(cpu)->irq_spurious_count; 175 return sum; 176 } 177 178 u64 arch_irq_stat(void) 179 { 180 return atomic_read(&irq_err_count); 181 } 182 183 /* 184 * do_IRQ handles all normal device IRQ's (the special 185 * SMP cross-CPU interrupts have their own specific 186 * handlers). 187 */ 188 asmlinkage unsigned int do_IRQ(struct pt_regs *regs) 189 { 190 struct pt_regs *old_regs = set_irq_regs(regs); 191 192 /* high bit used in ret_from_ code */ 193 unsigned vector = ~regs->orig_ax; 194 unsigned irq; 195 196 exit_idle(); 197 irq_enter(); 198 irq = __get_cpu_var(vector_irq)[vector]; 199 200 #ifdef CONFIG_DEBUG_STACKOVERFLOW 201 stack_overflow_check(regs); 202 #endif 203 204 if (likely(irq < NR_IRQS)) 205 generic_handle_irq(irq); 206 else { 207 if (!disable_apic) 208 ack_APIC_irq(); 209 210 if (printk_ratelimit()) 211 printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n", 212 __func__, smp_processor_id(), vector); 213 } 214 215 irq_exit(); 216 217 set_irq_regs(old_regs); 218 return 1; 219 } 220 221 #ifdef CONFIG_HOTPLUG_CPU 222 void fixup_irqs(cpumask_t map) 223 { 224 unsigned int irq; 225 static int warned; 226 227 for (irq = 0; irq < NR_IRQS; irq++) { 228 cpumask_t mask; 229 int break_affinity = 0; 230 int set_affinity = 1; 231 232 if (irq == 2) 233 continue; 234 235 /* interrupt's are disabled at this point */ 236 spin_lock(&irq_desc[irq].lock); 237 238 if (!irq_has_action(irq) || 239 cpus_equal(irq_desc[irq].affinity, map)) { 240 spin_unlock(&irq_desc[irq].lock); 241 continue; 242 } 243 244 cpus_and(mask, irq_desc[irq].affinity, map); 245 if (cpus_empty(mask)) { 246 break_affinity = 1; 247 mask = map; 248 } 249 250 if (irq_desc[irq].chip->mask) 251 irq_desc[irq].chip->mask(irq); 252 253 if (irq_desc[irq].chip->set_affinity) 254 irq_desc[irq].chip->set_affinity(irq, mask); 255 else if (!(warned++)) 256 set_affinity = 0; 257 258 if (irq_desc[irq].chip->unmask) 259 irq_desc[irq].chip->unmask(irq); 260 261 spin_unlock(&irq_desc[irq].lock); 262 263 if (break_affinity && set_affinity) 264 printk("Broke affinity for irq %i\n", irq); 265 else if (!set_affinity) 266 printk("Cannot set affinity for irq %i\n", irq); 267 } 268 269 /* That doesn't seem sufficient. Give it 1ms. */ 270 local_irq_enable(); 271 mdelay(1); 272 local_irq_disable(); 273 } 274 #endif 275 276 extern void call_softirq(void); 277 278 asmlinkage void do_softirq(void) 279 { 280 __u32 pending; 281 unsigned long flags; 282 283 if (in_interrupt()) 284 return; 285 286 local_irq_save(flags); 287 pending = local_softirq_pending(); 288 /* Switch to interrupt stack */ 289 if (pending) { 290 call_softirq(); 291 WARN_ON_ONCE(softirq_count()); 292 } 293 local_irq_restore(flags); 294 } 295