xref: /openbmc/linux/arch/x86/include/asm/sync_core.h (revision 9998a983)
1ac1ab12aSMathieu Desnoyers /* SPDX-License-Identifier: GPL-2.0 */
2ac1ab12aSMathieu Desnoyers #ifndef _ASM_X86_SYNC_CORE_H
3ac1ab12aSMathieu Desnoyers #define _ASM_X86_SYNC_CORE_H
4ac1ab12aSMathieu Desnoyers 
5ac1ab12aSMathieu Desnoyers #include <linux/preempt.h>
6ac1ab12aSMathieu Desnoyers #include <asm/processor.h>
7ac1ab12aSMathieu Desnoyers #include <asm/cpufeature.h>
8ac1ab12aSMathieu Desnoyers 
9ac1ab12aSMathieu Desnoyers /*
109998a983SRicardo Neri  * This function forces the icache and prefetched instruction stream to
119998a983SRicardo Neri  * catch up with reality in two very specific cases:
129998a983SRicardo Neri  *
139998a983SRicardo Neri  *  a) Text was modified using one virtual address and is about to be executed
149998a983SRicardo Neri  *     from the same physical page at a different virtual address.
159998a983SRicardo Neri  *
169998a983SRicardo Neri  *  b) Text was modified on a different CPU, may subsequently be
179998a983SRicardo Neri  *     executed on this CPU, and you want to make sure the new version
189998a983SRicardo Neri  *     gets executed.  This generally means you're calling this in a IPI.
199998a983SRicardo Neri  *
209998a983SRicardo Neri  * If you're calling this for a different reason, you're probably doing
219998a983SRicardo Neri  * it wrong.
229998a983SRicardo Neri  */
239998a983SRicardo Neri static inline void sync_core(void)
249998a983SRicardo Neri {
259998a983SRicardo Neri 	/*
269998a983SRicardo Neri 	 * There are quite a few ways to do this.  IRET-to-self is nice
279998a983SRicardo Neri 	 * because it works on every CPU, at any CPL (so it's compatible
289998a983SRicardo Neri 	 * with paravirtualization), and it never exits to a hypervisor.
299998a983SRicardo Neri 	 * The only down sides are that it's a bit slow (it seems to be
309998a983SRicardo Neri 	 * a bit more than 2x slower than the fastest options) and that
319998a983SRicardo Neri 	 * it unmasks NMIs.  The "push %cs" is needed because, in
329998a983SRicardo Neri 	 * paravirtual environments, __KERNEL_CS may not be a valid CS
339998a983SRicardo Neri 	 * value when we do IRET directly.
349998a983SRicardo Neri 	 *
359998a983SRicardo Neri 	 * In case NMI unmasking or performance ever becomes a problem,
369998a983SRicardo Neri 	 * the next best option appears to be MOV-to-CR2 and an
379998a983SRicardo Neri 	 * unconditional jump.  That sequence also works on all CPUs,
389998a983SRicardo Neri 	 * but it will fault at CPL3 (i.e. Xen PV).
399998a983SRicardo Neri 	 *
409998a983SRicardo Neri 	 * CPUID is the conventional way, but it's nasty: it doesn't
419998a983SRicardo Neri 	 * exist on some 486-like CPUs, and it usually exits to a
429998a983SRicardo Neri 	 * hypervisor.
439998a983SRicardo Neri 	 *
449998a983SRicardo Neri 	 * Like all of Linux's memory ordering operations, this is a
459998a983SRicardo Neri 	 * compiler barrier as well.
469998a983SRicardo Neri 	 */
479998a983SRicardo Neri #ifdef CONFIG_X86_32
489998a983SRicardo Neri 	asm volatile (
499998a983SRicardo Neri 		"pushfl\n\t"
509998a983SRicardo Neri 		"pushl %%cs\n\t"
519998a983SRicardo Neri 		"pushl $1f\n\t"
529998a983SRicardo Neri 		"iret\n\t"
539998a983SRicardo Neri 		"1:"
549998a983SRicardo Neri 		: ASM_CALL_CONSTRAINT : : "memory");
559998a983SRicardo Neri #else
569998a983SRicardo Neri 	unsigned int tmp;
579998a983SRicardo Neri 
589998a983SRicardo Neri 	asm volatile (
599998a983SRicardo Neri 		"mov %%ss, %0\n\t"
609998a983SRicardo Neri 		"pushq %q0\n\t"
619998a983SRicardo Neri 		"pushq %%rsp\n\t"
629998a983SRicardo Neri 		"addq $8, (%%rsp)\n\t"
639998a983SRicardo Neri 		"pushfq\n\t"
649998a983SRicardo Neri 		"mov %%cs, %0\n\t"
659998a983SRicardo Neri 		"pushq %q0\n\t"
669998a983SRicardo Neri 		"pushq $1f\n\t"
679998a983SRicardo Neri 		"iretq\n\t"
689998a983SRicardo Neri 		"1:"
699998a983SRicardo Neri 		: "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
709998a983SRicardo Neri #endif
719998a983SRicardo Neri }
729998a983SRicardo Neri 
739998a983SRicardo Neri /*
74ac1ab12aSMathieu Desnoyers  * Ensure that a core serializing instruction is issued before returning
75ac1ab12aSMathieu Desnoyers  * to user-mode. x86 implements return to user-space through sysexit,
76ac1ab12aSMathieu Desnoyers  * sysrel, and sysretq, which are not core serializing.
77ac1ab12aSMathieu Desnoyers  */
78ac1ab12aSMathieu Desnoyers static inline void sync_core_before_usermode(void)
79ac1ab12aSMathieu Desnoyers {
80ac1ab12aSMathieu Desnoyers 	/* With PTI, we unconditionally serialize before running user code. */
81ac1ab12aSMathieu Desnoyers 	if (static_cpu_has(X86_FEATURE_PTI))
82ac1ab12aSMathieu Desnoyers 		return;
83ac1ab12aSMathieu Desnoyers 	/*
84ac1ab12aSMathieu Desnoyers 	 * Return from interrupt and NMI is done through iret, which is core
85ac1ab12aSMathieu Desnoyers 	 * serializing.
86ac1ab12aSMathieu Desnoyers 	 */
87ac1ab12aSMathieu Desnoyers 	if (in_irq() || in_nmi())
88ac1ab12aSMathieu Desnoyers 		return;
89ac1ab12aSMathieu Desnoyers 	sync_core();
90ac1ab12aSMathieu Desnoyers }
91ac1ab12aSMathieu Desnoyers 
92ac1ab12aSMathieu Desnoyers #endif /* _ASM_X86_SYNC_CORE_H */
93