xref: /openbmc/linux/arch/x86/include/asm/sync_core.h (revision f69ca629)
1ac1ab12aSMathieu Desnoyers /* SPDX-License-Identifier: GPL-2.0 */
2ac1ab12aSMathieu Desnoyers #ifndef _ASM_X86_SYNC_CORE_H
3ac1ab12aSMathieu Desnoyers #define _ASM_X86_SYNC_CORE_H
4ac1ab12aSMathieu Desnoyers 
5ac1ab12aSMathieu Desnoyers #include <linux/preempt.h>
6ac1ab12aSMathieu Desnoyers #include <asm/processor.h>
7ac1ab12aSMathieu Desnoyers #include <asm/cpufeature.h>
8ac1ab12aSMathieu Desnoyers 
9f69ca629SRicardo Neri #ifdef CONFIG_X86_32
10f69ca629SRicardo Neri static inline void iret_to_self(void)
11f69ca629SRicardo Neri {
12f69ca629SRicardo Neri 	asm volatile (
13f69ca629SRicardo Neri 		"pushfl\n\t"
14f69ca629SRicardo Neri 		"pushl %%cs\n\t"
15f69ca629SRicardo Neri 		"pushl $1f\n\t"
16f69ca629SRicardo Neri 		"iret\n\t"
17f69ca629SRicardo Neri 		"1:"
18f69ca629SRicardo Neri 		: ASM_CALL_CONSTRAINT : : "memory");
19f69ca629SRicardo Neri }
20f69ca629SRicardo Neri #else
21f69ca629SRicardo Neri static inline void iret_to_self(void)
22f69ca629SRicardo Neri {
23f69ca629SRicardo Neri 	unsigned int tmp;
24f69ca629SRicardo Neri 
25f69ca629SRicardo Neri 	asm volatile (
26f69ca629SRicardo Neri 		"mov %%ss, %0\n\t"
27f69ca629SRicardo Neri 		"pushq %q0\n\t"
28f69ca629SRicardo Neri 		"pushq %%rsp\n\t"
29f69ca629SRicardo Neri 		"addq $8, (%%rsp)\n\t"
30f69ca629SRicardo Neri 		"pushfq\n\t"
31f69ca629SRicardo Neri 		"mov %%cs, %0\n\t"
32f69ca629SRicardo Neri 		"pushq %q0\n\t"
33f69ca629SRicardo Neri 		"pushq $1f\n\t"
34f69ca629SRicardo Neri 		"iretq\n\t"
35f69ca629SRicardo Neri 		"1:"
36f69ca629SRicardo Neri 		: "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
37f69ca629SRicardo Neri }
38f69ca629SRicardo Neri #endif /* CONFIG_X86_32 */
39f69ca629SRicardo Neri 
40ac1ab12aSMathieu Desnoyers /*
419998a983SRicardo Neri  * This function forces the icache and prefetched instruction stream to
429998a983SRicardo Neri  * catch up with reality in two very specific cases:
439998a983SRicardo Neri  *
449998a983SRicardo Neri  *  a) Text was modified using one virtual address and is about to be executed
459998a983SRicardo Neri  *     from the same physical page at a different virtual address.
469998a983SRicardo Neri  *
479998a983SRicardo Neri  *  b) Text was modified on a different CPU, may subsequently be
489998a983SRicardo Neri  *     executed on this CPU, and you want to make sure the new version
499998a983SRicardo Neri  *     gets executed.  This generally means you're calling this in a IPI.
509998a983SRicardo Neri  *
519998a983SRicardo Neri  * If you're calling this for a different reason, you're probably doing
529998a983SRicardo Neri  * it wrong.
539998a983SRicardo Neri  */
549998a983SRicardo Neri static inline void sync_core(void)
559998a983SRicardo Neri {
569998a983SRicardo Neri 	/*
579998a983SRicardo Neri 	 * There are quite a few ways to do this.  IRET-to-self is nice
589998a983SRicardo Neri 	 * because it works on every CPU, at any CPL (so it's compatible
599998a983SRicardo Neri 	 * with paravirtualization), and it never exits to a hypervisor.
609998a983SRicardo Neri 	 * The only down sides are that it's a bit slow (it seems to be
619998a983SRicardo Neri 	 * a bit more than 2x slower than the fastest options) and that
629998a983SRicardo Neri 	 * it unmasks NMIs.  The "push %cs" is needed because, in
639998a983SRicardo Neri 	 * paravirtual environments, __KERNEL_CS may not be a valid CS
649998a983SRicardo Neri 	 * value when we do IRET directly.
659998a983SRicardo Neri 	 *
669998a983SRicardo Neri 	 * In case NMI unmasking or performance ever becomes a problem,
679998a983SRicardo Neri 	 * the next best option appears to be MOV-to-CR2 and an
689998a983SRicardo Neri 	 * unconditional jump.  That sequence also works on all CPUs,
699998a983SRicardo Neri 	 * but it will fault at CPL3 (i.e. Xen PV).
709998a983SRicardo Neri 	 *
719998a983SRicardo Neri 	 * CPUID is the conventional way, but it's nasty: it doesn't
729998a983SRicardo Neri 	 * exist on some 486-like CPUs, and it usually exits to a
739998a983SRicardo Neri 	 * hypervisor.
749998a983SRicardo Neri 	 *
759998a983SRicardo Neri 	 * Like all of Linux's memory ordering operations, this is a
769998a983SRicardo Neri 	 * compiler barrier as well.
779998a983SRicardo Neri 	 */
78f69ca629SRicardo Neri 	iret_to_self();
799998a983SRicardo Neri }
809998a983SRicardo Neri 
819998a983SRicardo Neri /*
82ac1ab12aSMathieu Desnoyers  * Ensure that a core serializing instruction is issued before returning
83ac1ab12aSMathieu Desnoyers  * to user-mode. x86 implements return to user-space through sysexit,
84ac1ab12aSMathieu Desnoyers  * sysrel, and sysretq, which are not core serializing.
85ac1ab12aSMathieu Desnoyers  */
86ac1ab12aSMathieu Desnoyers static inline void sync_core_before_usermode(void)
87ac1ab12aSMathieu Desnoyers {
88ac1ab12aSMathieu Desnoyers 	/* With PTI, we unconditionally serialize before running user code. */
89ac1ab12aSMathieu Desnoyers 	if (static_cpu_has(X86_FEATURE_PTI))
90ac1ab12aSMathieu Desnoyers 		return;
91ac1ab12aSMathieu Desnoyers 	/*
92ac1ab12aSMathieu Desnoyers 	 * Return from interrupt and NMI is done through iret, which is core
93ac1ab12aSMathieu Desnoyers 	 * serializing.
94ac1ab12aSMathieu Desnoyers 	 */
95ac1ab12aSMathieu Desnoyers 	if (in_irq() || in_nmi())
96ac1ab12aSMathieu Desnoyers 		return;
97ac1ab12aSMathieu Desnoyers 	sync_core();
98ac1ab12aSMathieu Desnoyers }
99ac1ab12aSMathieu Desnoyers 
100ac1ab12aSMathieu Desnoyers #endif /* _ASM_X86_SYNC_CORE_H */
101