xref: /openbmc/linux/arch/x86/include/asm/sync_core.h (revision 29c37341)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_SYNC_CORE_H
3 #define _ASM_X86_SYNC_CORE_H
4 
5 #include <linux/preempt.h>
6 #include <asm/processor.h>
7 #include <asm/cpufeature.h>
8 
9 #ifdef CONFIG_X86_32
10 static inline void iret_to_self(void)
11 {
12 	asm volatile (
13 		"pushfl\n\t"
14 		"pushl %%cs\n\t"
15 		"pushl $1f\n\t"
16 		"iret\n\t"
17 		"1:"
18 		: ASM_CALL_CONSTRAINT : : "memory");
19 }
20 #else
21 static inline void iret_to_self(void)
22 {
23 	unsigned int tmp;
24 
25 	asm volatile (
26 		"mov %%ss, %0\n\t"
27 		"pushq %q0\n\t"
28 		"pushq %%rsp\n\t"
29 		"addq $8, (%%rsp)\n\t"
30 		"pushfq\n\t"
31 		"mov %%cs, %0\n\t"
32 		"pushq %q0\n\t"
33 		"pushq $1f\n\t"
34 		"iretq\n\t"
35 		"1:"
36 		: "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
37 }
38 #endif /* CONFIG_X86_32 */
39 
40 /*
41  * This function forces the icache and prefetched instruction stream to
42  * catch up with reality in two very specific cases:
43  *
44  *  a) Text was modified using one virtual address and is about to be executed
45  *     from the same physical page at a different virtual address.
46  *
47  *  b) Text was modified on a different CPU, may subsequently be
48  *     executed on this CPU, and you want to make sure the new version
49  *     gets executed.  This generally means you're calling this in a IPI.
50  *
51  * If you're calling this for a different reason, you're probably doing
52  * it wrong.
53  */
54 static inline void sync_core(void)
55 {
56 	/*
57 	 * There are quite a few ways to do this.  IRET-to-self is nice
58 	 * because it works on every CPU, at any CPL (so it's compatible
59 	 * with paravirtualization), and it never exits to a hypervisor.
60 	 * The only down sides are that it's a bit slow (it seems to be
61 	 * a bit more than 2x slower than the fastest options) and that
62 	 * it unmasks NMIs.  The "push %cs" is needed because, in
63 	 * paravirtual environments, __KERNEL_CS may not be a valid CS
64 	 * value when we do IRET directly.
65 	 *
66 	 * In case NMI unmasking or performance ever becomes a problem,
67 	 * the next best option appears to be MOV-to-CR2 and an
68 	 * unconditional jump.  That sequence also works on all CPUs,
69 	 * but it will fault at CPL3 (i.e. Xen PV).
70 	 *
71 	 * CPUID is the conventional way, but it's nasty: it doesn't
72 	 * exist on some 486-like CPUs, and it usually exits to a
73 	 * hypervisor.
74 	 *
75 	 * Like all of Linux's memory ordering operations, this is a
76 	 * compiler barrier as well.
77 	 */
78 	iret_to_self();
79 }
80 
81 /*
82  * Ensure that a core serializing instruction is issued before returning
83  * to user-mode. x86 implements return to user-space through sysexit,
84  * sysrel, and sysretq, which are not core serializing.
85  */
86 static inline void sync_core_before_usermode(void)
87 {
88 	/* With PTI, we unconditionally serialize before running user code. */
89 	if (static_cpu_has(X86_FEATURE_PTI))
90 		return;
91 	/*
92 	 * Return from interrupt and NMI is done through iret, which is core
93 	 * serializing.
94 	 */
95 	if (in_irq() || in_nmi())
96 		return;
97 	sync_core();
98 }
99 
100 #endif /* _ASM_X86_SYNC_CORE_H */
101