xref: /openbmc/linux/arch/x86/kernel/irq_32.c (revision d7b6d709)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
29a163ed8SThomas Gleixner /*
39a163ed8SThomas Gleixner  *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
49a163ed8SThomas Gleixner  *
59a163ed8SThomas Gleixner  * This file contains the lowest level x86-specific interrupt
69a163ed8SThomas Gleixner  * entry, irq-stacks and irq statistics code. All the remaining
79a163ed8SThomas Gleixner  * irq logic is done by the generic kernel/irq/ code and
89a163ed8SThomas Gleixner  * by the x86-specific irq controller code. (e.g. i8259.c and
99a163ed8SThomas Gleixner  * io_apic.c.)
109a163ed8SThomas Gleixner  */
119a163ed8SThomas Gleixner 
129a163ed8SThomas Gleixner #include <linux/seq_file.h>
139a163ed8SThomas Gleixner #include <linux/interrupt.h>
14447ae316SNicolai Stange #include <linux/irq.h>
159a163ed8SThomas Gleixner #include <linux/kernel_stat.h>
169a163ed8SThomas Gleixner #include <linux/notifier.h>
179a163ed8SThomas Gleixner #include <linux/cpu.h>
189a163ed8SThomas Gleixner #include <linux/delay.h>
1972ade5f9SJaswinder Singh Rajput #include <linux/uaccess.h>
2042f8faecSLai Jiangshan #include <linux/percpu.h>
215c1eb089SEric Dumazet #include <linux/mm.h>
229a163ed8SThomas Gleixner 
239a163ed8SThomas Gleixner #include <asm/apic.h>
247614e913SAndi Kleen #include <asm/nospec-branch.h>
25db1cc7aeSThomas Gleixner #include <asm/softirq_stack.h>
269a163ed8SThomas Gleixner 
27de9b10afSThomas Gleixner #ifdef CONFIG_DEBUG_STACKOVERFLOW
2853b56502SIngo Molnar 
2953b56502SIngo Molnar int sysctl_panic_on_stackoverflow __read_mostly;
3053b56502SIngo Molnar 
31de9b10afSThomas Gleixner /* Debugging check for stack overflow: is there less than 1KB free? */
check_stack_overflow(void)32de9b10afSThomas Gleixner static int check_stack_overflow(void)
33de9b10afSThomas Gleixner {
34de9b10afSThomas Gleixner 	long sp;
35de9b10afSThomas Gleixner 
36de9b10afSThomas Gleixner 	__asm__ __volatile__("andl %%esp,%0" :
37de9b10afSThomas Gleixner 			     "=r" (sp) : "0" (THREAD_SIZE - 1));
38de9b10afSThomas Gleixner 
39de9b10afSThomas Gleixner 	return sp < (sizeof(struct thread_info) + STACK_WARN);
40de9b10afSThomas Gleixner }
41de9b10afSThomas Gleixner 
print_stack_overflow(void)42de9b10afSThomas Gleixner static void print_stack_overflow(void)
43de9b10afSThomas Gleixner {
44de9b10afSThomas Gleixner 	printk(KERN_WARNING "low stack detected by irq handler\n");
45de9b10afSThomas Gleixner 	dump_stack();
4655af7796SMitsuo Hayasaka 	if (sysctl_panic_on_stackoverflow)
4755af7796SMitsuo Hayasaka 		panic("low stack detected by irq handler - check messages\n");
48de9b10afSThomas Gleixner }
49de9b10afSThomas Gleixner 
50de9b10afSThomas Gleixner #else
check_stack_overflow(void)51de9b10afSThomas Gleixner static inline int check_stack_overflow(void) { return 0; }
print_stack_overflow(void)52de9b10afSThomas Gleixner static inline void print_stack_overflow(void) { }
53de9b10afSThomas Gleixner #endif
54de9b10afSThomas Gleixner 
call_on_stack(void * func,void * stack)55403d8efcSThomas Gleixner static void call_on_stack(void *func, void *stack)
569a163ed8SThomas Gleixner {
57403d8efcSThomas Gleixner 	asm volatile("xchgl	%%ebx,%%esp	\n"
587614e913SAndi Kleen 		     CALL_NOSPEC
5904b361abSAndi Kleen 		     "movl	%%ebx,%%esp	\n"
60403d8efcSThomas Gleixner 		     : "=b" (stack)
61403d8efcSThomas Gleixner 		     : "0" (stack),
627614e913SAndi Kleen 		       [thunk_target] "D"(func)
63403d8efcSThomas Gleixner 		     : "memory", "cc", "edx", "ecx", "eax");
6404b361abSAndi Kleen }
6504b361abSAndi Kleen 
current_stack(void)66198d208dSSteven Rostedt static inline void *current_stack(void)
67198d208dSSteven Rostedt {
68196bd485SAndrey Ryabinin 	return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
69198d208dSSteven Rostedt }
70198d208dSSteven Rostedt 
execute_on_irq_stack(int overflow,struct irq_desc * desc)71bd0b9ac4SThomas Gleixner static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
729a163ed8SThomas Gleixner {
73198d208dSSteven Rostedt 	struct irq_stack *curstk, *irqstk;
74bd0b9ac4SThomas Gleixner 	u32 *isp, *prev_esp, arg1;
759a163ed8SThomas Gleixner 
76198d208dSSteven Rostedt 	curstk = (struct irq_stack *) current_stack();
77*d7b6d709SThomas Gleixner 	irqstk = __this_cpu_read(pcpu_hot.hardirq_stack_ptr);
789a163ed8SThomas Gleixner 
799a163ed8SThomas Gleixner 	/*
809a163ed8SThomas Gleixner 	 * this is where we switch to the IRQ stack. However, if we are
819a163ed8SThomas Gleixner 	 * already using the IRQ stack (because we interrupted a hardirq
829a163ed8SThomas Gleixner 	 * handler) we can't do that and just have to keep using the
839a163ed8SThomas Gleixner 	 * current stack (which is the irq stack already after all)
849a163ed8SThomas Gleixner 	 */
85198d208dSSteven Rostedt 	if (unlikely(curstk == irqstk))
86de9b10afSThomas Gleixner 		return 0;
879a163ed8SThomas Gleixner 
88198d208dSSteven Rostedt 	isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
89198d208dSSteven Rostedt 
90198d208dSSteven Rostedt 	/* Save the next esp at the bottom of the stack */
91198d208dSSteven Rostedt 	prev_esp = (u32 *)irqstk;
92196bd485SAndrey Ryabinin 	*prev_esp = current_stack_pointer;
939a163ed8SThomas Gleixner 
9404b361abSAndi Kleen 	if (unlikely(overflow))
95403d8efcSThomas Gleixner 		call_on_stack(print_stack_overflow, isp);
9604b361abSAndi Kleen 
97403d8efcSThomas Gleixner 	asm volatile("xchgl	%%ebx,%%esp	\n"
987614e913SAndi Kleen 		     CALL_NOSPEC
999a163ed8SThomas Gleixner 		     "movl	%%ebx,%%esp	\n"
100bd0b9ac4SThomas Gleixner 		     : "=a" (arg1), "=b" (isp)
101bd0b9ac4SThomas Gleixner 		     :  "0" (desc),   "1" (isp),
1027614e913SAndi Kleen 			[thunk_target] "D" (desc->handle_irq)
103403d8efcSThomas Gleixner 		     : "memory", "cc", "ecx");
1049a163ed8SThomas Gleixner 	return 1;
1059a163ed8SThomas Gleixner }
1069a163ed8SThomas Gleixner 
1079a163ed8SThomas Gleixner /*
10866c7ceb4SThomas Gleixner  * Allocate per-cpu stacks for hardirq and softirq processing
1099a163ed8SThomas Gleixner  */
irq_init_percpu_irqstack(unsigned int cpu)11066c7ceb4SThomas Gleixner int irq_init_percpu_irqstack(unsigned int cpu)
1119a163ed8SThomas Gleixner {
11266c7ceb4SThomas Gleixner 	int node = cpu_to_node(cpu);
11366c7ceb4SThomas Gleixner 	struct page *ph, *ps;
1149a163ed8SThomas Gleixner 
115*d7b6d709SThomas Gleixner 	if (per_cpu(pcpu_hot.hardirq_stack_ptr, cpu))
11666c7ceb4SThomas Gleixner 		return 0;
1179a163ed8SThomas Gleixner 
11866c7ceb4SThomas Gleixner 	ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
11966c7ceb4SThomas Gleixner 	if (!ph)
12066c7ceb4SThomas Gleixner 		return -ENOMEM;
12166c7ceb4SThomas Gleixner 	ps = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
12266c7ceb4SThomas Gleixner 	if (!ps) {
12366c7ceb4SThomas Gleixner 		__free_pages(ph, THREAD_SIZE_ORDER);
12466c7ceb4SThomas Gleixner 		return -ENOMEM;
12566c7ceb4SThomas Gleixner 	}
1269a163ed8SThomas Gleixner 
127*d7b6d709SThomas Gleixner 	per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = page_address(ph);
128*d7b6d709SThomas Gleixner 	per_cpu(pcpu_hot.softirq_stack_ptr, cpu) = page_address(ps);
12966c7ceb4SThomas Gleixner 	return 0;
1309a163ed8SThomas Gleixner }
1319a163ed8SThomas Gleixner 
1328cbb2b50SSebastian Andrzej Siewior #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
do_softirq_own_stack(void)1337d65f4a6SFrederic Weisbecker void do_softirq_own_stack(void)
1349a163ed8SThomas Gleixner {
135198d208dSSteven Rostedt 	struct irq_stack *irqstk;
1360788aa6aSSteven Rostedt 	u32 *isp, *prev_esp;
1379a163ed8SThomas Gleixner 
138*d7b6d709SThomas Gleixner 	irqstk = __this_cpu_read(pcpu_hot.softirq_stack_ptr);
1399a163ed8SThomas Gleixner 
1409a163ed8SThomas Gleixner 	/* build the stack frame on the softirq stack */
141198d208dSSteven Rostedt 	isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
1429a163ed8SThomas Gleixner 
1430788aa6aSSteven Rostedt 	/* Push the previous esp onto the stack */
144198d208dSSteven Rostedt 	prev_esp = (u32 *)irqstk;
145196bd485SAndrey Ryabinin 	*prev_esp = current_stack_pointer;
1460788aa6aSSteven Rostedt 
147403d8efcSThomas Gleixner 	call_on_stack(__do_softirq, isp);
1489a163ed8SThomas Gleixner }
149441e9036SThomas Gleixner #endif
150403d8efcSThomas Gleixner 
__handle_irq(struct irq_desc * desc,struct pt_regs * regs)1517c2a5736SThomas Gleixner void __handle_irq(struct irq_desc *desc, struct pt_regs *regs)
1529b2b76a3SJeremy Fitzhardinge {
153bd0b9ac4SThomas Gleixner 	int overflow = check_stack_overflow();
1549b2b76a3SJeremy Fitzhardinge 
155bd0b9ac4SThomas Gleixner 	if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) {
1569b2b76a3SJeremy Fitzhardinge 		if (unlikely(overflow))
1579b2b76a3SJeremy Fitzhardinge 			print_stack_overflow();
158bd0b9ac4SThomas Gleixner 		generic_handle_irq_desc(desc);
1599b2b76a3SJeremy Fitzhardinge 	}
1609b2b76a3SJeremy Fitzhardinge }
161