xref: /openbmc/linux/arch/x86/include/asm/nospec-branch.h (revision 7f2e85840871f199057e65232ebde846192ed989)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
4 #define _ASM_X86_NOSPEC_BRANCH_H_
5 
6 #include <asm/alternative.h>
7 #include <asm/alternative-asm.h>
8 #include <asm/cpufeatures.h>
9 #include <asm/msr-index.h>
10 
11 #ifdef __ASSEMBLY__
12 
13 /*
14  * This should be used immediately before a retpoline alternative.  It tells
15  * objtool where the retpolines are so that it can make sense of the control
16  * flow by just reading the original instruction(s) and ignoring the
17  * alternatives.
18  */
19 .macro ANNOTATE_NOSPEC_ALTERNATIVE
20 	.Lannotate_\@:
21 	.pushsection .discard.nospec
22 	.long .Lannotate_\@ - .
23 	.popsection
24 .endm
25 
26 /*
27  * These are the bare retpoline primitives for indirect jmp and call.
28  * Do not use these directly; they only exist to make the ALTERNATIVE
29  * invocation below less ugly.
30  */
31 .macro RETPOLINE_JMP reg:req
32 	call	.Ldo_rop_\@
33 .Lspec_trap_\@:
34 	pause
35 	lfence
36 	jmp	.Lspec_trap_\@
37 .Ldo_rop_\@:
38 	mov	\reg, (%_ASM_SP)
39 	ret
40 .endm
41 
42 /*
43  * This is a wrapper around RETPOLINE_JMP so the called function in reg
44  * returns to the instruction after the macro.
45  */
46 .macro RETPOLINE_CALL reg:req
47 	jmp	.Ldo_call_\@
48 .Ldo_retpoline_jmp_\@:
49 	RETPOLINE_JMP \reg
50 .Ldo_call_\@:
51 	call	.Ldo_retpoline_jmp_\@
52 .endm
53 
54 /*
55  * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
56  * indirect jmp/call which may be susceptible to the Spectre variant 2
57  * attack.
58  */
59 .macro JMP_NOSPEC reg:req
60 #ifdef CONFIG_RETPOLINE
61 	ANNOTATE_NOSPEC_ALTERNATIVE
62 	ALTERNATIVE_2 __stringify(jmp *\reg),				\
63 		__stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE,	\
64 		__stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
65 #else
66 	jmp	*\reg
67 #endif
68 .endm
69 
70 .macro CALL_NOSPEC reg:req
71 #ifdef CONFIG_RETPOLINE
72 	ANNOTATE_NOSPEC_ALTERNATIVE
73 	ALTERNATIVE_2 __stringify(call *\reg),				\
74 		__stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
75 		__stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
76 #else
77 	call	*\reg
78 #endif
79 .endm
80 
81 /* This clobbers the BX register */
82 .macro FILL_RETURN_BUFFER nr:req ftr:req
83 #ifdef CONFIG_RETPOLINE
84 	ALTERNATIVE "", "call __clear_rsb", \ftr
85 #endif
86 .endm
87 
88 #else /* __ASSEMBLY__ */
89 
90 #define ANNOTATE_NOSPEC_ALTERNATIVE				\
91 	"999:\n\t"						\
92 	".pushsection .discard.nospec\n\t"			\
93 	".long 999b - .\n\t"					\
94 	".popsection\n\t"
95 
96 #if defined(CONFIG_X86_64) && defined(RETPOLINE)
97 
98 /*
99  * Since the inline asm uses the %V modifier which is only in newer GCC,
100  * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
101  */
102 # define CALL_NOSPEC						\
103 	ANNOTATE_NOSPEC_ALTERNATIVE				\
104 	ALTERNATIVE(						\
105 	"call *%[thunk_target]\n",				\
106 	"call __x86_indirect_thunk_%V[thunk_target]\n",		\
107 	X86_FEATURE_RETPOLINE)
108 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
109 
110 #elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
111 /*
112  * For i386 we use the original ret-equivalent retpoline, because
113  * otherwise we'll run out of registers. We don't care about CET
114  * here, anyway.
115  */
116 # define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n",	\
117 	"       jmp    904f;\n"					\
118 	"       .align 16\n"					\
119 	"901:	call   903f;\n"					\
120 	"902:	pause;\n"					\
121 	"    	lfence;\n"					\
122 	"       jmp    902b;\n"					\
123 	"       .align 16\n"					\
124 	"903:	addl   $4, %%esp;\n"				\
125 	"       pushl  %[thunk_target];\n"			\
126 	"       ret;\n"						\
127 	"       .align 16\n"					\
128 	"904:	call   901b;\n",				\
129 	X86_FEATURE_RETPOLINE)
130 
131 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
132 #else /* No retpoline for C / inline asm */
133 # define CALL_NOSPEC "call *%[thunk_target]\n"
134 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
135 #endif
136 
137 /* The Spectre V2 mitigation variants */
138 enum spectre_v2_mitigation {
139 	SPECTRE_V2_NONE,
140 	SPECTRE_V2_RETPOLINE_MINIMAL,
141 	SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
142 	SPECTRE_V2_RETPOLINE_GENERIC,
143 	SPECTRE_V2_RETPOLINE_AMD,
144 	SPECTRE_V2_IBRS,
145 };
146 
147 extern char __indirect_thunk_start[];
148 extern char __indirect_thunk_end[];
149 
150 /*
151  * On VMEXIT we must ensure that no RSB predictions learned in the guest
152  * can be followed in the host, by overwriting the RSB completely. Both
153  * retpoline and IBRS mitigations for Spectre v2 need this; only on future
154  * CPUs with IBRS_ALL *might* it be avoided.
155  */
156 static inline void vmexit_fill_RSB(void)
157 {
158 #ifdef CONFIG_RETPOLINE
159 	alternative_input("",
160 			  "call __fill_rsb",
161 			  X86_FEATURE_RETPOLINE,
162 			  ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory"));
163 #endif
164 }
165 
166 static inline void indirect_branch_prediction_barrier(void)
167 {
168 	asm volatile(ALTERNATIVE("",
169 				 "movl %[msr], %%ecx\n\t"
170 				 "movl %[val], %%eax\n\t"
171 				 "movl $0, %%edx\n\t"
172 				 "wrmsr",
173 				 X86_FEATURE_USE_IBPB)
174 		     : : [msr] "i" (MSR_IA32_PRED_CMD),
175 			 [val] "i" (PRED_CMD_IBPB)
176 		     : "eax", "ecx", "edx", "memory");
177 }
178 
179 #endif /* __ASSEMBLY__ */
180 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
181