1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 29a163ed8SThomas Gleixner /* 39a163ed8SThomas Gleixner * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar 49a163ed8SThomas Gleixner * 59a163ed8SThomas Gleixner * This file contains the lowest level x86-specific interrupt 69a163ed8SThomas Gleixner * entry, irq-stacks and irq statistics code. All the remaining 79a163ed8SThomas Gleixner * irq logic is done by the generic kernel/irq/ code and 89a163ed8SThomas Gleixner * by the x86-specific irq controller code. (e.g. i8259.c and 99a163ed8SThomas Gleixner * io_apic.c.) 109a163ed8SThomas Gleixner */ 119a163ed8SThomas Gleixner 129a163ed8SThomas Gleixner #include <linux/seq_file.h> 139a163ed8SThomas Gleixner #include <linux/interrupt.h> 14447ae316SNicolai Stange #include <linux/irq.h> 159a163ed8SThomas Gleixner #include <linux/kernel_stat.h> 169a163ed8SThomas Gleixner #include <linux/notifier.h> 179a163ed8SThomas Gleixner #include <linux/cpu.h> 189a163ed8SThomas Gleixner #include <linux/delay.h> 1972ade5f9SJaswinder Singh Rajput #include <linux/uaccess.h> 2042f8faecSLai Jiangshan #include <linux/percpu.h> 215c1eb089SEric Dumazet #include <linux/mm.h> 229a163ed8SThomas Gleixner 239a163ed8SThomas Gleixner #include <asm/apic.h> 247614e913SAndi Kleen #include <asm/nospec-branch.h> 25db1cc7aeSThomas Gleixner #include <asm/softirq_stack.h> 269a163ed8SThomas Gleixner 27de9b10afSThomas Gleixner #ifdef CONFIG_DEBUG_STACKOVERFLOW 2853b56502SIngo Molnar 2953b56502SIngo Molnar int sysctl_panic_on_stackoverflow __read_mostly; 3053b56502SIngo Molnar 31de9b10afSThomas Gleixner /* Debugging check for stack overflow: is there less than 1KB free? */ 32de9b10afSThomas Gleixner static int check_stack_overflow(void) 33de9b10afSThomas Gleixner { 34de9b10afSThomas Gleixner long sp; 35de9b10afSThomas Gleixner 36de9b10afSThomas Gleixner __asm__ __volatile__("andl %%esp,%0" : 37de9b10afSThomas Gleixner "=r" (sp) : "0" (THREAD_SIZE - 1)); 38de9b10afSThomas Gleixner 39de9b10afSThomas Gleixner return sp < (sizeof(struct thread_info) + STACK_WARN); 40de9b10afSThomas Gleixner } 41de9b10afSThomas Gleixner 42de9b10afSThomas Gleixner static void print_stack_overflow(void) 43de9b10afSThomas Gleixner { 44de9b10afSThomas Gleixner printk(KERN_WARNING "low stack detected by irq handler\n"); 45de9b10afSThomas Gleixner dump_stack(); 4655af7796SMitsuo Hayasaka if (sysctl_panic_on_stackoverflow) 4755af7796SMitsuo Hayasaka panic("low stack detected by irq handler - check messages\n"); 48de9b10afSThomas Gleixner } 49de9b10afSThomas Gleixner 50de9b10afSThomas Gleixner #else 51de9b10afSThomas Gleixner static inline int check_stack_overflow(void) { return 0; } 52de9b10afSThomas Gleixner static inline void print_stack_overflow(void) { } 53de9b10afSThomas Gleixner #endif 54de9b10afSThomas Gleixner 55a754fe2bSThomas Gleixner DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr); 56a754fe2bSThomas Gleixner DEFINE_PER_CPU(struct irq_stack *, softirq_stack_ptr); 579a163ed8SThomas Gleixner 58403d8efcSThomas Gleixner static void call_on_stack(void *func, void *stack) 599a163ed8SThomas Gleixner { 60403d8efcSThomas Gleixner asm volatile("xchgl %%ebx,%%esp \n" 617614e913SAndi Kleen CALL_NOSPEC 6204b361abSAndi Kleen "movl %%ebx,%%esp \n" 63403d8efcSThomas Gleixner : "=b" (stack) 64403d8efcSThomas Gleixner : "0" (stack), 657614e913SAndi Kleen [thunk_target] "D"(func) 66403d8efcSThomas Gleixner : "memory", "cc", "edx", "ecx", "eax"); 6704b361abSAndi Kleen } 6804b361abSAndi Kleen 69198d208dSSteven Rostedt static inline void *current_stack(void) 70198d208dSSteven Rostedt { 71196bd485SAndrey Ryabinin return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1)); 72198d208dSSteven Rostedt } 73198d208dSSteven Rostedt 74bd0b9ac4SThomas Gleixner static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) 759a163ed8SThomas Gleixner { 76198d208dSSteven Rostedt struct irq_stack *curstk, *irqstk; 77bd0b9ac4SThomas Gleixner u32 *isp, *prev_esp, arg1; 789a163ed8SThomas Gleixner 79198d208dSSteven Rostedt curstk = (struct irq_stack *) current_stack(); 80a754fe2bSThomas Gleixner irqstk = __this_cpu_read(hardirq_stack_ptr); 819a163ed8SThomas Gleixner 829a163ed8SThomas Gleixner /* 839a163ed8SThomas Gleixner * this is where we switch to the IRQ stack. However, if we are 849a163ed8SThomas Gleixner * already using the IRQ stack (because we interrupted a hardirq 859a163ed8SThomas Gleixner * handler) we can't do that and just have to keep using the 869a163ed8SThomas Gleixner * current stack (which is the irq stack already after all) 879a163ed8SThomas Gleixner */ 88198d208dSSteven Rostedt if (unlikely(curstk == irqstk)) 89de9b10afSThomas Gleixner return 0; 909a163ed8SThomas Gleixner 91198d208dSSteven Rostedt isp = (u32 *) ((char *)irqstk + sizeof(*irqstk)); 92198d208dSSteven Rostedt 93198d208dSSteven Rostedt /* Save the next esp at the bottom of the stack */ 94198d208dSSteven Rostedt prev_esp = (u32 *)irqstk; 95196bd485SAndrey Ryabinin *prev_esp = current_stack_pointer; 969a163ed8SThomas Gleixner 9704b361abSAndi Kleen if (unlikely(overflow)) 98403d8efcSThomas Gleixner call_on_stack(print_stack_overflow, isp); 9904b361abSAndi Kleen 100403d8efcSThomas Gleixner asm volatile("xchgl %%ebx,%%esp \n" 1017614e913SAndi Kleen CALL_NOSPEC 1029a163ed8SThomas Gleixner "movl %%ebx,%%esp \n" 103bd0b9ac4SThomas Gleixner : "=a" (arg1), "=b" (isp) 104bd0b9ac4SThomas Gleixner : "0" (desc), "1" (isp), 1057614e913SAndi Kleen [thunk_target] "D" (desc->handle_irq) 106403d8efcSThomas Gleixner : "memory", "cc", "ecx"); 1079a163ed8SThomas Gleixner return 1; 1089a163ed8SThomas Gleixner } 1099a163ed8SThomas Gleixner 1109a163ed8SThomas Gleixner /* 11166c7ceb4SThomas Gleixner * Allocate per-cpu stacks for hardirq and softirq processing 1129a163ed8SThomas Gleixner */ 11366c7ceb4SThomas Gleixner int irq_init_percpu_irqstack(unsigned int cpu) 1149a163ed8SThomas Gleixner { 11566c7ceb4SThomas Gleixner int node = cpu_to_node(cpu); 11666c7ceb4SThomas Gleixner struct page *ph, *ps; 1179a163ed8SThomas Gleixner 118a754fe2bSThomas Gleixner if (per_cpu(hardirq_stack_ptr, cpu)) 11966c7ceb4SThomas Gleixner return 0; 1209a163ed8SThomas Gleixner 12166c7ceb4SThomas Gleixner ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER); 12266c7ceb4SThomas Gleixner if (!ph) 12366c7ceb4SThomas Gleixner return -ENOMEM; 12466c7ceb4SThomas Gleixner ps = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER); 12566c7ceb4SThomas Gleixner if (!ps) { 12666c7ceb4SThomas Gleixner __free_pages(ph, THREAD_SIZE_ORDER); 12766c7ceb4SThomas Gleixner return -ENOMEM; 12866c7ceb4SThomas Gleixner } 1299a163ed8SThomas Gleixner 13066c7ceb4SThomas Gleixner per_cpu(hardirq_stack_ptr, cpu) = page_address(ph); 13166c7ceb4SThomas Gleixner per_cpu(softirq_stack_ptr, cpu) = page_address(ps); 13266c7ceb4SThomas Gleixner return 0; 1339a163ed8SThomas Gleixner } 1349a163ed8SThomas Gleixner 135*8cbb2b50SSebastian Andrzej Siewior #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK 1367d65f4a6SFrederic Weisbecker void do_softirq_own_stack(void) 1379a163ed8SThomas Gleixner { 138198d208dSSteven Rostedt struct irq_stack *irqstk; 1390788aa6aSSteven Rostedt u32 *isp, *prev_esp; 1409a163ed8SThomas Gleixner 141a754fe2bSThomas Gleixner irqstk = __this_cpu_read(softirq_stack_ptr); 1429a163ed8SThomas Gleixner 1439a163ed8SThomas Gleixner /* build the stack frame on the softirq stack */ 144198d208dSSteven Rostedt isp = (u32 *) ((char *)irqstk + sizeof(*irqstk)); 1459a163ed8SThomas Gleixner 1460788aa6aSSteven Rostedt /* Push the previous esp onto the stack */ 147198d208dSSteven Rostedt prev_esp = (u32 *)irqstk; 148196bd485SAndrey Ryabinin *prev_esp = current_stack_pointer; 1490788aa6aSSteven Rostedt 150403d8efcSThomas Gleixner call_on_stack(__do_softirq, isp); 1519a163ed8SThomas Gleixner } 152441e9036SThomas Gleixner #endif 153403d8efcSThomas Gleixner 1547c2a5736SThomas Gleixner void __handle_irq(struct irq_desc *desc, struct pt_regs *regs) 1559b2b76a3SJeremy Fitzhardinge { 156bd0b9ac4SThomas Gleixner int overflow = check_stack_overflow(); 1579b2b76a3SJeremy Fitzhardinge 158bd0b9ac4SThomas Gleixner if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) { 1599b2b76a3SJeremy Fitzhardinge if (unlikely(overflow)) 1609b2b76a3SJeremy Fitzhardinge print_stack_overflow(); 161bd0b9ac4SThomas Gleixner generic_handle_irq_desc(desc); 1629b2b76a3SJeremy Fitzhardinge } 1639b2b76a3SJeremy Fitzhardinge } 164