xref: /openbmc/linux/arch/x86/include/asm/sync_core.h (revision a493d1ca)
1ac1ab12aSMathieu Desnoyers /* SPDX-License-Identifier: GPL-2.0 */
2ac1ab12aSMathieu Desnoyers #ifndef _ASM_X86_SYNC_CORE_H
3ac1ab12aSMathieu Desnoyers #define _ASM_X86_SYNC_CORE_H
4ac1ab12aSMathieu Desnoyers 
5ac1ab12aSMathieu Desnoyers #include <linux/preempt.h>
6ac1ab12aSMathieu Desnoyers #include <asm/processor.h>
7ac1ab12aSMathieu Desnoyers #include <asm/cpufeature.h>
8bf9c912fSRicardo Neri #include <asm/special_insns.h>
9ac1ab12aSMathieu Desnoyers 
10f69ca629SRicardo Neri #ifdef CONFIG_X86_32
iret_to_self(void)11f69ca629SRicardo Neri static inline void iret_to_self(void)
12f69ca629SRicardo Neri {
13f69ca629SRicardo Neri 	asm volatile (
14f69ca629SRicardo Neri 		"pushfl\n\t"
15f69ca629SRicardo Neri 		"pushl %%cs\n\t"
16f69ca629SRicardo Neri 		"pushl $1f\n\t"
17f69ca629SRicardo Neri 		"iret\n\t"
18f69ca629SRicardo Neri 		"1:"
19f69ca629SRicardo Neri 		: ASM_CALL_CONSTRAINT : : "memory");
20f69ca629SRicardo Neri }
21f69ca629SRicardo Neri #else
iret_to_self(void)22f69ca629SRicardo Neri static inline void iret_to_self(void)
23f69ca629SRicardo Neri {
24f69ca629SRicardo Neri 	unsigned int tmp;
25f69ca629SRicardo Neri 
26f69ca629SRicardo Neri 	asm volatile (
27f69ca629SRicardo Neri 		"mov %%ss, %0\n\t"
28f69ca629SRicardo Neri 		"pushq %q0\n\t"
29f69ca629SRicardo Neri 		"pushq %%rsp\n\t"
30f69ca629SRicardo Neri 		"addq $8, (%%rsp)\n\t"
31f69ca629SRicardo Neri 		"pushfq\n\t"
32f69ca629SRicardo Neri 		"mov %%cs, %0\n\t"
33f69ca629SRicardo Neri 		"pushq %q0\n\t"
34f69ca629SRicardo Neri 		"pushq $1f\n\t"
35f69ca629SRicardo Neri 		"iretq\n\t"
36f69ca629SRicardo Neri 		"1:"
37f69ca629SRicardo Neri 		: "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
38f69ca629SRicardo Neri }
39f69ca629SRicardo Neri #endif /* CONFIG_X86_32 */
40f69ca629SRicardo Neri 
41ac1ab12aSMathieu Desnoyers /*
429998a983SRicardo Neri  * This function forces the icache and prefetched instruction stream to
439998a983SRicardo Neri  * catch up with reality in two very specific cases:
449998a983SRicardo Neri  *
459998a983SRicardo Neri  *  a) Text was modified using one virtual address and is about to be executed
469998a983SRicardo Neri  *     from the same physical page at a different virtual address.
479998a983SRicardo Neri  *
489998a983SRicardo Neri  *  b) Text was modified on a different CPU, may subsequently be
499998a983SRicardo Neri  *     executed on this CPU, and you want to make sure the new version
5040eb0cb4SIngo Molnar  *     gets executed.  This generally means you're calling this in an IPI.
519998a983SRicardo Neri  *
529998a983SRicardo Neri  * If you're calling this for a different reason, you're probably doing
539998a983SRicardo Neri  * it wrong.
5440eb0cb4SIngo Molnar  *
5540eb0cb4SIngo Molnar  * Like all of Linux's memory ordering operations, this is a
5640eb0cb4SIngo Molnar  * compiler barrier as well.
579998a983SRicardo Neri  */
sync_core(void)589998a983SRicardo Neri static inline void sync_core(void)
599998a983SRicardo Neri {
609998a983SRicardo Neri 	/*
61bf9c912fSRicardo Neri 	 * The SERIALIZE instruction is the most straightforward way to
6240eb0cb4SIngo Molnar 	 * do this, but it is not universally available.
63bf9c912fSRicardo Neri 	 */
64bf9c912fSRicardo Neri 	if (static_cpu_has(X86_FEATURE_SERIALIZE)) {
65bf9c912fSRicardo Neri 		serialize();
66bf9c912fSRicardo Neri 		return;
67bf9c912fSRicardo Neri 	}
68bf9c912fSRicardo Neri 
69bf9c912fSRicardo Neri 	/*
70bf9c912fSRicardo Neri 	 * For all other processors, there are quite a few ways to do this.
71bf9c912fSRicardo Neri 	 * IRET-to-self is nice because it works on every CPU, at any CPL
72bf9c912fSRicardo Neri 	 * (so it's compatible with paravirtualization), and it never exits
73bf9c912fSRicardo Neri 	 * to a hypervisor.  The only downsides are that it's a bit slow
74bf9c912fSRicardo Neri 	 * (it seems to be a bit more than 2x slower than the fastest
7540eb0cb4SIngo Molnar 	 * options) and that it unmasks NMIs.  The "push %cs" is needed,
7640eb0cb4SIngo Molnar 	 * because in paravirtual environments __KERNEL_CS may not be a
77bf9c912fSRicardo Neri 	 * valid CS value when we do IRET directly.
789998a983SRicardo Neri 	 *
799998a983SRicardo Neri 	 * In case NMI unmasking or performance ever becomes a problem,
809998a983SRicardo Neri 	 * the next best option appears to be MOV-to-CR2 and an
819998a983SRicardo Neri 	 * unconditional jump.  That sequence also works on all CPUs,
829998a983SRicardo Neri 	 * but it will fault at CPL3 (i.e. Xen PV).
839998a983SRicardo Neri 	 *
849998a983SRicardo Neri 	 * CPUID is the conventional way, but it's nasty: it doesn't
859998a983SRicardo Neri 	 * exist on some 486-like CPUs, and it usually exits to a
869998a983SRicardo Neri 	 * hypervisor.
879998a983SRicardo Neri 	 */
88f69ca629SRicardo Neri 	iret_to_self();
899998a983SRicardo Neri }
909998a983SRicardo Neri 
919998a983SRicardo Neri /*
92ac1ab12aSMathieu Desnoyers  * Ensure that a core serializing instruction is issued before returning
93ac1ab12aSMathieu Desnoyers  * to user-mode. x86 implements return to user-space through sysexit,
94ac1ab12aSMathieu Desnoyers  * sysrel, and sysretq, which are not core serializing.
95ac1ab12aSMathieu Desnoyers  */
sync_core_before_usermode(void)96ac1ab12aSMathieu Desnoyers static inline void sync_core_before_usermode(void)
97ac1ab12aSMathieu Desnoyers {
98ac1ab12aSMathieu Desnoyers 	/* With PTI, we unconditionally serialize before running user code. */
99ac1ab12aSMathieu Desnoyers 	if (static_cpu_has(X86_FEATURE_PTI))
100ac1ab12aSMathieu Desnoyers 		return;
101a493d1caSAndy Lutomirski 
102ac1ab12aSMathieu Desnoyers 	/*
103a493d1caSAndy Lutomirski 	 * Even if we're in an interrupt, we might reschedule before returning,
104a493d1caSAndy Lutomirski 	 * in which case we could switch to a different thread in the same mm
105a493d1caSAndy Lutomirski 	 * and return using SYSRET or SYSEXIT.  Instead of trying to keep
106a493d1caSAndy Lutomirski 	 * track of our need to sync the core, just sync right away.
107ac1ab12aSMathieu Desnoyers 	 */
108ac1ab12aSMathieu Desnoyers 	sync_core();
109ac1ab12aSMathieu Desnoyers }
110ac1ab12aSMathieu Desnoyers 
111ac1ab12aSMathieu Desnoyers #endif /* _ASM_X86_SYNC_CORE_H */
112