xref: /openbmc/linux/arch/x86/include/asm/switch_to.h (revision 65417d9f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_SWITCH_TO_H
3 #define _ASM_X86_SWITCH_TO_H
4 
5 #include <linux/sched/task_stack.h>
6 
7 struct task_struct; /* one of the stranger aspects of C forward declarations */
8 
9 struct task_struct *__switch_to_asm(struct task_struct *prev,
10 				    struct task_struct *next);
11 
12 __visible struct task_struct *__switch_to(struct task_struct *prev,
13 					  struct task_struct *next);
14 struct tss_struct;
15 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
16 		      struct tss_struct *tss);
17 
18 /* This runs runs on the previous thread's stack. */
19 static inline void prepare_switch_to(struct task_struct *prev,
20 				     struct task_struct *next)
21 {
22 #ifdef CONFIG_VMAP_STACK
23 	/*
24 	 * If we switch to a stack that has a top-level paging entry
25 	 * that is not present in the current mm, the resulting #PF will
26 	 * will be promoted to a double-fault and we'll panic.  Probe
27 	 * the new stack now so that vmalloc_fault can fix up the page
28 	 * tables if needed.  This can only happen if we use a stack
29 	 * in vmap space.
30 	 *
31 	 * We assume that the stack is aligned so that it never spans
32 	 * more than one top-level paging entry.
33 	 *
34 	 * To minimize cache pollution, just follow the stack pointer.
35 	 */
36 	READ_ONCE(*(unsigned char *)next->thread.sp);
37 #endif
38 }
39 
40 asmlinkage void ret_from_fork(void);
41 
42 /*
43  * This is the structure pointed to by thread.sp for an inactive task.  The
44  * order of the fields must match the code in __switch_to_asm().
45  */
46 struct inactive_task_frame {
47 #ifdef CONFIG_X86_64
48 	unsigned long r15;
49 	unsigned long r14;
50 	unsigned long r13;
51 	unsigned long r12;
52 #else
53 	unsigned long si;
54 	unsigned long di;
55 #endif
56 	unsigned long bx;
57 
58 	/*
59 	 * These two fields must be together.  They form a stack frame header,
60 	 * needed by get_frame_pointer().
61 	 */
62 	unsigned long bp;
63 	unsigned long ret_addr;
64 };
65 
66 struct fork_frame {
67 	struct inactive_task_frame frame;
68 	struct pt_regs regs;
69 };
70 
71 #define switch_to(prev, next, last)					\
72 do {									\
73 	prepare_switch_to(prev, next);					\
74 									\
75 	((last) = __switch_to_asm((prev), (next)));			\
76 } while (0)
77 
78 #ifdef CONFIG_X86_32
79 static inline void refresh_sysenter_cs(struct thread_struct *thread)
80 {
81 	/* Only happens when SEP is enabled, no need to test "SEP"arately: */
82 	if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs))
83 		return;
84 
85 	this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs);
86 	wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
87 }
88 #endif
89 
90 /* This is used when switching tasks or entering/exiting vm86 mode. */
91 static inline void update_sp0(struct task_struct *task)
92 {
93 #ifdef CONFIG_X86_32
94 	load_sp0(task->thread.sp0);
95 #else
96 	load_sp0(task_top_of_stack(task));
97 #endif
98 }
99 
100 #endif /* _ASM_X86_SWITCH_TO_H */
101