19a163ed8SThomas Gleixner /* 29a163ed8SThomas Gleixner * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar 39a163ed8SThomas Gleixner * 49a163ed8SThomas Gleixner * This file contains the lowest level x86-specific interrupt 59a163ed8SThomas Gleixner * entry, irq-stacks and irq statistics code. All the remaining 69a163ed8SThomas Gleixner * irq logic is done by the generic kernel/irq/ code and 79a163ed8SThomas Gleixner * by the x86-specific irq controller code. (e.g. i8259.c and 89a163ed8SThomas Gleixner * io_apic.c.) 99a163ed8SThomas Gleixner */ 109a163ed8SThomas Gleixner 119a163ed8SThomas Gleixner #include <linux/module.h> 129a163ed8SThomas Gleixner #include <linux/seq_file.h> 139a163ed8SThomas Gleixner #include <linux/interrupt.h> 149a163ed8SThomas Gleixner #include <linux/kernel_stat.h> 159a163ed8SThomas Gleixner #include <linux/notifier.h> 169a163ed8SThomas Gleixner #include <linux/cpu.h> 179a163ed8SThomas Gleixner #include <linux/delay.h> 1872ade5f9SJaswinder Singh Rajput #include <linux/uaccess.h> 1942f8faecSLai Jiangshan #include <linux/percpu.h> 205c1eb089SEric Dumazet #include <linux/mm.h> 219a163ed8SThomas Gleixner 229a163ed8SThomas Gleixner #include <asm/apic.h> 239a163ed8SThomas Gleixner 249a163ed8SThomas Gleixner DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 259a163ed8SThomas Gleixner EXPORT_PER_CPU_SYMBOL(irq_stat); 269a163ed8SThomas Gleixner 279a163ed8SThomas Gleixner DEFINE_PER_CPU(struct pt_regs *, irq_regs); 289a163ed8SThomas Gleixner EXPORT_PER_CPU_SYMBOL(irq_regs); 299a163ed8SThomas Gleixner 30de9b10afSThomas Gleixner #ifdef CONFIG_DEBUG_STACKOVERFLOW 3153b56502SIngo Molnar 3253b56502SIngo Molnar int sysctl_panic_on_stackoverflow __read_mostly; 3353b56502SIngo Molnar 34de9b10afSThomas Gleixner /* Debugging check for stack overflow: is there less than 1KB free? */ 35de9b10afSThomas Gleixner static int check_stack_overflow(void) 36de9b10afSThomas Gleixner { 37de9b10afSThomas Gleixner long sp; 38de9b10afSThomas Gleixner 39de9b10afSThomas Gleixner __asm__ __volatile__("andl %%esp,%0" : 40de9b10afSThomas Gleixner "=r" (sp) : "0" (THREAD_SIZE - 1)); 41de9b10afSThomas Gleixner 42de9b10afSThomas Gleixner return sp < (sizeof(struct thread_info) + STACK_WARN); 43de9b10afSThomas Gleixner } 44de9b10afSThomas Gleixner 45de9b10afSThomas Gleixner static void print_stack_overflow(void) 46de9b10afSThomas Gleixner { 47de9b10afSThomas Gleixner printk(KERN_WARNING "low stack detected by irq handler\n"); 48de9b10afSThomas Gleixner dump_stack(); 4955af7796SMitsuo Hayasaka if (sysctl_panic_on_stackoverflow) 5055af7796SMitsuo Hayasaka panic("low stack detected by irq handler - check messages\n"); 51de9b10afSThomas Gleixner } 52de9b10afSThomas Gleixner 53de9b10afSThomas Gleixner #else 54de9b10afSThomas Gleixner static inline int check_stack_overflow(void) { return 0; } 55de9b10afSThomas Gleixner static inline void print_stack_overflow(void) { } 56de9b10afSThomas Gleixner #endif 57de9b10afSThomas Gleixner 589a163ed8SThomas Gleixner /* 599a163ed8SThomas Gleixner * per-CPU IRQ handling contexts (thread information and stack) 609a163ed8SThomas Gleixner */ 619a163ed8SThomas Gleixner union irq_ctx { 629a163ed8SThomas Gleixner struct thread_info tinfo; 639a163ed8SThomas Gleixner u32 stack[THREAD_SIZE/sizeof(u32)]; 6425897374SChristoph Hellwig } __attribute__((aligned(THREAD_SIZE))); 659a163ed8SThomas Gleixner 6642f8faecSLai Jiangshan static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); 6742f8faecSLai Jiangshan static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx); 689a163ed8SThomas Gleixner 69403d8efcSThomas Gleixner static void call_on_stack(void *func, void *stack) 709a163ed8SThomas Gleixner { 71403d8efcSThomas Gleixner asm volatile("xchgl %%ebx,%%esp \n" 7204b361abSAndi Kleen "call *%%edi \n" 7304b361abSAndi Kleen "movl %%ebx,%%esp \n" 74403d8efcSThomas Gleixner : "=b" (stack) 75403d8efcSThomas Gleixner : "0" (stack), 7604b361abSAndi Kleen "D"(func) 77403d8efcSThomas Gleixner : "memory", "cc", "edx", "ecx", "eax"); 7804b361abSAndi Kleen } 7904b361abSAndi Kleen 80de9b10afSThomas Gleixner static inline int 81de9b10afSThomas Gleixner execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) 829a163ed8SThomas Gleixner { 839a163ed8SThomas Gleixner union irq_ctx *curctx, *irqctx; 84403d8efcSThomas Gleixner u32 *isp, arg1, arg2; 859a163ed8SThomas Gleixner 869a163ed8SThomas Gleixner curctx = (union irq_ctx *) current_thread_info(); 870a3aee0dSTejun Heo irqctx = __this_cpu_read(hardirq_ctx); 889a163ed8SThomas Gleixner 899a163ed8SThomas Gleixner /* 909a163ed8SThomas Gleixner * this is where we switch to the IRQ stack. However, if we are 919a163ed8SThomas Gleixner * already using the IRQ stack (because we interrupted a hardirq 929a163ed8SThomas Gleixner * handler) we can't do that and just have to keep using the 939a163ed8SThomas Gleixner * current stack (which is the irq stack already after all) 949a163ed8SThomas Gleixner */ 95de9b10afSThomas Gleixner if (unlikely(curctx == irqctx)) 96de9b10afSThomas Gleixner return 0; 979a163ed8SThomas Gleixner 989a163ed8SThomas Gleixner /* build the stack frame on the IRQ stack */ 999a163ed8SThomas Gleixner isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); 1009a163ed8SThomas Gleixner irqctx->tinfo.task = curctx->tinfo.task; 1019a163ed8SThomas Gleixner irqctx->tinfo.previous_esp = current_stack_pointer; 1029a163ed8SThomas Gleixner 103986cb48cSLinus Torvalds /* Copy the preempt_count so that the [soft]irq checks work. */ 104986cb48cSLinus Torvalds irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count; 1059a163ed8SThomas Gleixner 10604b361abSAndi Kleen if (unlikely(overflow)) 107403d8efcSThomas Gleixner call_on_stack(print_stack_overflow, isp); 10804b361abSAndi Kleen 109403d8efcSThomas Gleixner asm volatile("xchgl %%ebx,%%esp \n" 1109a163ed8SThomas Gleixner "call *%%edi \n" 1119a163ed8SThomas Gleixner "movl %%ebx,%%esp \n" 112403d8efcSThomas Gleixner : "=a" (arg1), "=d" (arg2), "=b" (isp) 1139a163ed8SThomas Gleixner : "0" (irq), "1" (desc), "2" (isp), 1149a163ed8SThomas Gleixner "D" (desc->handle_irq) 115403d8efcSThomas Gleixner : "memory", "cc", "ecx"); 1169a163ed8SThomas Gleixner return 1; 1179a163ed8SThomas Gleixner } 1189a163ed8SThomas Gleixner 1199a163ed8SThomas Gleixner /* 1209a163ed8SThomas Gleixner * allocate per-cpu stacks for hardirq and for softirq processing 1219a163ed8SThomas Gleixner */ 122403d8efcSThomas Gleixner void __cpuinit irq_ctx_init(int cpu) 1239a163ed8SThomas Gleixner { 1249a163ed8SThomas Gleixner union irq_ctx *irqctx; 1259a163ed8SThomas Gleixner 12642f8faecSLai Jiangshan if (per_cpu(hardirq_ctx, cpu)) 1279a163ed8SThomas Gleixner return; 1289a163ed8SThomas Gleixner 1295c1eb089SEric Dumazet irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), 13038e7c572SThomas Gleixner THREADINFO_GFP, 13138e7c572SThomas Gleixner THREAD_SIZE_ORDER)); 1327b698ea3SBrian Gerst memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); 1339a163ed8SThomas Gleixner irqctx->tinfo.cpu = cpu; 1349a163ed8SThomas Gleixner irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; 1359a163ed8SThomas Gleixner irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 1369a163ed8SThomas Gleixner 13742f8faecSLai Jiangshan per_cpu(hardirq_ctx, cpu) = irqctx; 1389a163ed8SThomas Gleixner 1395c1eb089SEric Dumazet irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), 14038e7c572SThomas Gleixner THREADINFO_GFP, 14138e7c572SThomas Gleixner THREAD_SIZE_ORDER)); 1427b698ea3SBrian Gerst memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); 1439a163ed8SThomas Gleixner irqctx->tinfo.cpu = cpu; 1449a163ed8SThomas Gleixner irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 1459a163ed8SThomas Gleixner 14642f8faecSLai Jiangshan per_cpu(softirq_ctx, cpu) = irqctx; 1479a163ed8SThomas Gleixner 148403d8efcSThomas Gleixner printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", 14942f8faecSLai Jiangshan cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); 1509a163ed8SThomas Gleixner } 1519a163ed8SThomas Gleixner 1529a163ed8SThomas Gleixner asmlinkage void do_softirq(void) 1539a163ed8SThomas Gleixner { 1549a163ed8SThomas Gleixner unsigned long flags; 1559a163ed8SThomas Gleixner struct thread_info *curctx; 1569a163ed8SThomas Gleixner union irq_ctx *irqctx; 1579a163ed8SThomas Gleixner u32 *isp; 1589a163ed8SThomas Gleixner 1599a163ed8SThomas Gleixner if (in_interrupt()) 1609a163ed8SThomas Gleixner return; 1619a163ed8SThomas Gleixner 1629a163ed8SThomas Gleixner local_irq_save(flags); 1639a163ed8SThomas Gleixner 1649a163ed8SThomas Gleixner if (local_softirq_pending()) { 1659a163ed8SThomas Gleixner curctx = current_thread_info(); 1660a3aee0dSTejun Heo irqctx = __this_cpu_read(softirq_ctx); 1679a163ed8SThomas Gleixner irqctx->tinfo.task = curctx->task; 1689a163ed8SThomas Gleixner irqctx->tinfo.previous_esp = current_stack_pointer; 1699a163ed8SThomas Gleixner 1709a163ed8SThomas Gleixner /* build the stack frame on the softirq stack */ 1719a163ed8SThomas Gleixner isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); 1729a163ed8SThomas Gleixner 173403d8efcSThomas Gleixner call_on_stack(__do_softirq, isp); 1749a163ed8SThomas Gleixner /* 1750d2eb44fSLucas De Marchi * Shouldn't happen, we returned above if in_interrupt(): 1769a163ed8SThomas Gleixner */ 1779a163ed8SThomas Gleixner WARN_ON_ONCE(softirq_count()); 1789a163ed8SThomas Gleixner } 1799a163ed8SThomas Gleixner 1809a163ed8SThomas Gleixner local_irq_restore(flags); 1819a163ed8SThomas Gleixner } 182403d8efcSThomas Gleixner 1839b2b76a3SJeremy Fitzhardinge bool handle_irq(unsigned irq, struct pt_regs *regs) 1849b2b76a3SJeremy Fitzhardinge { 1859b2b76a3SJeremy Fitzhardinge struct irq_desc *desc; 1869b2b76a3SJeremy Fitzhardinge int overflow; 1879b2b76a3SJeremy Fitzhardinge 1889b2b76a3SJeremy Fitzhardinge overflow = check_stack_overflow(); 1899b2b76a3SJeremy Fitzhardinge 1909b2b76a3SJeremy Fitzhardinge desc = irq_to_desc(irq); 1919b2b76a3SJeremy Fitzhardinge if (unlikely(!desc)) 1929b2b76a3SJeremy Fitzhardinge return false; 1939b2b76a3SJeremy Fitzhardinge 194986cb48cSLinus Torvalds if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) { 1959b2b76a3SJeremy Fitzhardinge if (unlikely(overflow)) 1969b2b76a3SJeremy Fitzhardinge print_stack_overflow(); 1979b2b76a3SJeremy Fitzhardinge desc->handle_irq(irq, desc); 1989b2b76a3SJeremy Fitzhardinge } 1999b2b76a3SJeremy Fitzhardinge 2009b2b76a3SJeremy Fitzhardinge return true; 2019b2b76a3SJeremy Fitzhardinge } 202