1 /* 2 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar 3 * 4 * This file contains the lowest level x86-specific interrupt 5 * entry, irq-stacks and irq statistics code. All the remaining 6 * irq logic is done by the generic kernel/irq/ code and 7 * by the x86-specific irq controller code. (e.g. i8259.c and 8 * io_apic.c.) 9 */ 10 11 #include <linux/module.h> 12 #include <linux/seq_file.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/notifier.h> 16 #include <linux/cpu.h> 17 #include <linux/delay.h> 18 #include <linux/uaccess.h> 19 #include <linux/percpu.h> 20 21 #include <asm/apic.h> 22 23 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 24 EXPORT_PER_CPU_SYMBOL(irq_stat); 25 26 DEFINE_PER_CPU(struct pt_regs *, irq_regs); 27 EXPORT_PER_CPU_SYMBOL(irq_regs); 28 29 #ifdef CONFIG_DEBUG_STACKOVERFLOW 30 /* Debugging check for stack overflow: is there less than 1KB free? */ 31 static int check_stack_overflow(void) 32 { 33 long sp; 34 35 __asm__ __volatile__("andl %%esp,%0" : 36 "=r" (sp) : "0" (THREAD_SIZE - 1)); 37 38 return sp < (sizeof(struct thread_info) + STACK_WARN); 39 } 40 41 static void print_stack_overflow(void) 42 { 43 printk(KERN_WARNING "low stack detected by irq handler\n"); 44 dump_stack(); 45 } 46 47 #else 48 static inline int check_stack_overflow(void) { return 0; } 49 static inline void print_stack_overflow(void) { } 50 #endif 51 52 #ifdef CONFIG_4KSTACKS 53 /* 54 * per-CPU IRQ handling contexts (thread information and stack) 55 */ 56 union irq_ctx { 57 struct thread_info tinfo; 58 u32 stack[THREAD_SIZE/sizeof(u32)]; 59 } __attribute__((aligned(PAGE_SIZE))); 60 61 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); 62 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx); 63 64 static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, hardirq_stack); 65 static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, softirq_stack); 66 67 static void call_on_stack(void *func, void *stack) 68 { 69 asm volatile("xchgl %%ebx,%%esp \n" 70 "call *%%edi \n" 71 "movl %%ebx,%%esp \n" 72 : "=b" (stack) 73 : "0" (stack), 74 "D"(func) 75 : "memory", "cc", "edx", "ecx", "eax"); 76 } 77 78 static inline int 79 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) 80 { 81 union irq_ctx *curctx, *irqctx; 82 u32 *isp, arg1, arg2; 83 84 curctx = (union irq_ctx *) current_thread_info(); 85 irqctx = __get_cpu_var(hardirq_ctx); 86 87 /* 88 * this is where we switch to the IRQ stack. However, if we are 89 * already using the IRQ stack (because we interrupted a hardirq 90 * handler) we can't do that and just have to keep using the 91 * current stack (which is the irq stack already after all) 92 */ 93 if (unlikely(curctx == irqctx)) 94 return 0; 95 96 /* build the stack frame on the IRQ stack */ 97 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); 98 irqctx->tinfo.task = curctx->tinfo.task; 99 irqctx->tinfo.previous_esp = current_stack_pointer; 100 101 /* 102 * Copy the softirq bits in preempt_count so that the 103 * softirq checks work in the hardirq context. 104 */ 105 irqctx->tinfo.preempt_count = 106 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | 107 (curctx->tinfo.preempt_count & SOFTIRQ_MASK); 108 109 if (unlikely(overflow)) 110 call_on_stack(print_stack_overflow, isp); 111 112 asm volatile("xchgl %%ebx,%%esp \n" 113 "call *%%edi \n" 114 "movl %%ebx,%%esp \n" 115 : "=a" (arg1), "=d" (arg2), "=b" (isp) 116 : "0" (irq), "1" (desc), "2" (isp), 117 "D" (desc->handle_irq) 118 : "memory", "cc", "ecx"); 119 return 1; 120 } 121 122 /* 123 * allocate per-cpu stacks for hardirq and for softirq processing 124 */ 125 void __cpuinit irq_ctx_init(int cpu) 126 { 127 union irq_ctx *irqctx; 128 129 if (per_cpu(hardirq_ctx, cpu)) 130 return; 131 132 irqctx = &per_cpu(hardirq_stack, cpu); 133 irqctx->tinfo.task = NULL; 134 irqctx->tinfo.exec_domain = NULL; 135 irqctx->tinfo.cpu = cpu; 136 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; 137 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 138 139 per_cpu(hardirq_ctx, cpu) = irqctx; 140 141 irqctx = &per_cpu(softirq_stack, cpu); 142 irqctx->tinfo.task = NULL; 143 irqctx->tinfo.exec_domain = NULL; 144 irqctx->tinfo.cpu = cpu; 145 irqctx->tinfo.preempt_count = 0; 146 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 147 148 per_cpu(softirq_ctx, cpu) = irqctx; 149 150 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", 151 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); 152 } 153 154 void irq_ctx_exit(int cpu) 155 { 156 per_cpu(hardirq_ctx, cpu) = NULL; 157 } 158 159 asmlinkage void do_softirq(void) 160 { 161 unsigned long flags; 162 struct thread_info *curctx; 163 union irq_ctx *irqctx; 164 u32 *isp; 165 166 if (in_interrupt()) 167 return; 168 169 local_irq_save(flags); 170 171 if (local_softirq_pending()) { 172 curctx = current_thread_info(); 173 irqctx = __get_cpu_var(softirq_ctx); 174 irqctx->tinfo.task = curctx->task; 175 irqctx->tinfo.previous_esp = current_stack_pointer; 176 177 /* build the stack frame on the softirq stack */ 178 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); 179 180 call_on_stack(__do_softirq, isp); 181 /* 182 * Shouldnt happen, we returned above if in_interrupt(): 183 */ 184 WARN_ON_ONCE(softirq_count()); 185 } 186 187 local_irq_restore(flags); 188 } 189 190 #else 191 static inline int 192 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; } 193 #endif 194 195 bool handle_irq(unsigned irq, struct pt_regs *regs) 196 { 197 struct irq_desc *desc; 198 int overflow; 199 200 overflow = check_stack_overflow(); 201 202 desc = irq_to_desc(irq); 203 if (unlikely(!desc)) 204 return false; 205 206 if (!execute_on_irq_stack(overflow, desc, irq)) { 207 if (unlikely(overflow)) 208 print_stack_overflow(); 209 desc->handle_irq(irq, desc); 210 } 211 212 return true; 213 } 214