1 /* 2 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar 3 * 4 * This file contains the lowest level x86_64-specific interrupt 5 * entry and irq statistics code. All the remaining irq logic is 6 * done by the generic kernel/irq/ code and in the 7 * x86_64-specific irq controller code. (e.g. i8259.c and 8 * io_apic.c.) 9 */ 10 11 #include <linux/kernel_stat.h> 12 #include <linux/interrupt.h> 13 #include <linux/seq_file.h> 14 #include <linux/module.h> 15 #include <linux/delay.h> 16 #include <asm/uaccess.h> 17 #include <asm/io_apic.h> 18 #include <asm/idle.h> 19 #include <asm/smp.h> 20 21 atomic_t irq_err_count; 22 23 /* 24 * 'what should we do if we get a hw irq event on an illegal vector'. 25 * each architecture has to answer this themselves. 26 */ 27 void ack_bad_irq(unsigned int irq) 28 { 29 printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq); 30 /* 31 * Currently unexpected vectors happen only on SMP and APIC. 32 * We _must_ ack these because every local APIC has only N 33 * irq slots per priority level, and a 'hanging, unacked' IRQ 34 * holds up an irq slot - in excessive cases (when multiple 35 * unexpected vectors occur) that might lock up the APIC 36 * completely. 37 * But don't ack when the APIC is disabled. -AK 38 */ 39 if (!disable_apic) 40 ack_APIC_irq(); 41 } 42 43 #ifdef CONFIG_DEBUG_STACKOVERFLOW 44 /* 45 * Probabilistic stack overflow check: 46 * 47 * Only check the stack in process context, because everything else 48 * runs on the big interrupt stacks. Checking reliably is too expensive, 49 * so we just check from interrupts. 50 */ 51 static inline void stack_overflow_check(struct pt_regs *regs) 52 { 53 u64 curbase = (u64)task_stack_page(current); 54 static unsigned long warned = -60*HZ; 55 56 if (regs->sp >= curbase && regs->sp <= curbase + THREAD_SIZE && 57 regs->sp < curbase + sizeof(struct thread_info) + 128 && 58 time_after(jiffies, warned + 60*HZ)) { 59 printk("do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n", 60 current->comm, curbase, regs->sp); 61 show_stack(NULL,NULL); 62 warned = jiffies; 63 } 64 } 65 #endif 66 67 /* 68 * Generic, controller-independent functions: 69 */ 70 71 int show_interrupts(struct seq_file *p, void *v) 72 { 73 int i = *(loff_t *) v, j; 74 struct irqaction * action; 75 unsigned long flags; 76 77 if (i == 0) { 78 seq_printf(p, " "); 79 for_each_online_cpu(j) 80 seq_printf(p, "CPU%-8d",j); 81 seq_putc(p, '\n'); 82 } 83 84 if (i < NR_IRQS) { 85 unsigned any_count = 0; 86 87 spin_lock_irqsave(&irq_desc[i].lock, flags); 88 #ifndef CONFIG_SMP 89 any_count = kstat_irqs(i); 90 #else 91 for_each_online_cpu(j) 92 any_count |= kstat_cpu(j).irqs[i]; 93 #endif 94 action = irq_desc[i].action; 95 if (!action && !any_count) 96 goto skip; 97 seq_printf(p, "%3d: ",i); 98 #ifndef CONFIG_SMP 99 seq_printf(p, "%10u ", kstat_irqs(i)); 100 #else 101 for_each_online_cpu(j) 102 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 103 #endif 104 seq_printf(p, " %8s", irq_desc[i].chip->name); 105 seq_printf(p, "-%-8s", irq_desc[i].name); 106 107 if (action) { 108 seq_printf(p, " %s", action->name); 109 while ((action = action->next) != NULL) 110 seq_printf(p, ", %s", action->name); 111 } 112 seq_putc(p, '\n'); 113 skip: 114 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 115 } else if (i == NR_IRQS) { 116 seq_printf(p, "NMI: "); 117 for_each_online_cpu(j) 118 seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); 119 seq_printf(p, " Non-maskable interrupts\n"); 120 seq_printf(p, "LOC: "); 121 for_each_online_cpu(j) 122 seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs); 123 seq_printf(p, " Local timer interrupts\n"); 124 #ifdef CONFIG_SMP 125 seq_printf(p, "RES: "); 126 for_each_online_cpu(j) 127 seq_printf(p, "%10u ", cpu_pda(j)->irq_resched_count); 128 seq_printf(p, " Rescheduling interrupts\n"); 129 seq_printf(p, "CAL: "); 130 for_each_online_cpu(j) 131 seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count); 132 seq_printf(p, " function call interrupts\n"); 133 seq_printf(p, "TLB: "); 134 for_each_online_cpu(j) 135 seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count); 136 seq_printf(p, " TLB shootdowns\n"); 137 #endif 138 seq_printf(p, "TRM: "); 139 for_each_online_cpu(j) 140 seq_printf(p, "%10u ", cpu_pda(j)->irq_thermal_count); 141 seq_printf(p, " Thermal event interrupts\n"); 142 seq_printf(p, "THR: "); 143 for_each_online_cpu(j) 144 seq_printf(p, "%10u ", cpu_pda(j)->irq_threshold_count); 145 seq_printf(p, " Threshold APIC interrupts\n"); 146 seq_printf(p, "SPU: "); 147 for_each_online_cpu(j) 148 seq_printf(p, "%10u ", cpu_pda(j)->irq_spurious_count); 149 seq_printf(p, " Spurious interrupts\n"); 150 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 151 } 152 return 0; 153 } 154 155 /* 156 * do_IRQ handles all normal device IRQ's (the special 157 * SMP cross-CPU interrupts have their own specific 158 * handlers). 159 */ 160 asmlinkage unsigned int do_IRQ(struct pt_regs *regs) 161 { 162 struct pt_regs *old_regs = set_irq_regs(regs); 163 164 /* high bit used in ret_from_ code */ 165 unsigned vector = ~regs->orig_ax; 166 unsigned irq; 167 168 exit_idle(); 169 irq_enter(); 170 irq = __get_cpu_var(vector_irq)[vector]; 171 172 #ifdef CONFIG_DEBUG_STACKOVERFLOW 173 stack_overflow_check(regs); 174 #endif 175 176 if (likely(irq < NR_IRQS)) 177 generic_handle_irq(irq); 178 else { 179 if (!disable_apic) 180 ack_APIC_irq(); 181 182 if (printk_ratelimit()) 183 printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n", 184 __func__, smp_processor_id(), vector); 185 } 186 187 irq_exit(); 188 189 set_irq_regs(old_regs); 190 return 1; 191 } 192 193 #ifdef CONFIG_HOTPLUG_CPU 194 void fixup_irqs(cpumask_t map) 195 { 196 unsigned int irq; 197 static int warned; 198 199 for (irq = 0; irq < NR_IRQS; irq++) { 200 cpumask_t mask; 201 int break_affinity = 0; 202 int set_affinity = 1; 203 204 if (irq == 2) 205 continue; 206 207 /* interrupt's are disabled at this point */ 208 spin_lock(&irq_desc[irq].lock); 209 210 if (!irq_has_action(irq) || 211 cpus_equal(irq_desc[irq].affinity, map)) { 212 spin_unlock(&irq_desc[irq].lock); 213 continue; 214 } 215 216 cpus_and(mask, irq_desc[irq].affinity, map); 217 if (cpus_empty(mask)) { 218 break_affinity = 1; 219 mask = map; 220 } 221 222 if (irq_desc[irq].chip->mask) 223 irq_desc[irq].chip->mask(irq); 224 225 if (irq_desc[irq].chip->set_affinity) 226 irq_desc[irq].chip->set_affinity(irq, mask); 227 else if (!(warned++)) 228 set_affinity = 0; 229 230 if (irq_desc[irq].chip->unmask) 231 irq_desc[irq].chip->unmask(irq); 232 233 spin_unlock(&irq_desc[irq].lock); 234 235 if (break_affinity && set_affinity) 236 printk("Broke affinity for irq %i\n", irq); 237 else if (!set_affinity) 238 printk("Cannot set affinity for irq %i\n", irq); 239 } 240 241 /* That doesn't seem sufficient. Give it 1ms. */ 242 local_irq_enable(); 243 mdelay(1); 244 local_irq_disable(); 245 } 246 #endif 247 248 extern void call_softirq(void); 249 250 asmlinkage void do_softirq(void) 251 { 252 __u32 pending; 253 unsigned long flags; 254 255 if (in_interrupt()) 256 return; 257 258 local_irq_save(flags); 259 pending = local_softirq_pending(); 260 /* Switch to interrupt stack */ 261 if (pending) { 262 call_softirq(); 263 WARN_ON_ONCE(softirq_count()); 264 } 265 local_irq_restore(flags); 266 } 267