1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
4 #define _ASM_X86_NOSPEC_BRANCH_H_
5 
6 #include <asm/alternative.h>
7 #include <asm/alternative-asm.h>
8 #include <asm/cpufeatures.h>
9 #include <asm/msr-index.h>
10 
11 /*
12  * Fill the CPU return stack buffer.
13  *
14  * Each entry in the RSB, if used for a speculative 'ret', contains an
15  * infinite 'pause; lfence; jmp' loop to capture speculative execution.
16  *
17  * This is required in various cases for retpoline and IBRS-based
18  * mitigations for the Spectre variant 2 vulnerability. Sometimes to
19  * eliminate potentially bogus entries from the RSB, and sometimes
20  * purely to ensure that it doesn't get empty, which on some CPUs would
21  * allow predictions from other (unwanted!) sources to be used.
22  *
23  * We define a CPP macro such that it can be used from both .S files and
24  * inline assembly. It's possible to do a .macro and then include that
25  * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
26  */
27 
28 #define RSB_CLEAR_LOOPS		32	/* To forcibly overwrite all entries */
29 #define RSB_FILL_LOOPS		16	/* To avoid underflow */
30 
31 /*
32  * Google experimented with loop-unrolling and this turned out to be
33  * the optimal version — two calls, each with their own speculation
34  * trap should their return address end up getting used, in a loop.
35  */
36 #define __FILL_RETURN_BUFFER(reg, nr, sp)	\
37 	mov	$(nr/2), reg;			\
38 771:						\
39 	call	772f;				\
40 773:	/* speculation trap */			\
41 	pause;					\
42 	lfence;					\
43 	jmp	773b;				\
44 772:						\
45 	call	774f;				\
46 775:	/* speculation trap */			\
47 	pause;					\
48 	lfence;					\
49 	jmp	775b;				\
50 774:						\
51 	dec	reg;				\
52 	jnz	771b;				\
53 	add	$(BITS_PER_LONG/8) * nr, sp;
54 
55 #ifdef __ASSEMBLY__
56 
57 /*
58  * This should be used immediately before a retpoline alternative.  It tells
59  * objtool where the retpolines are so that it can make sense of the control
60  * flow by just reading the original instruction(s) and ignoring the
61  * alternatives.
62  */
63 .macro ANNOTATE_NOSPEC_ALTERNATIVE
64 	.Lannotate_\@:
65 	.pushsection .discard.nospec
66 	.long .Lannotate_\@ - .
67 	.popsection
68 .endm
69 
70 /*
71  * This should be used immediately before an indirect jump/call. It tells
72  * objtool the subsequent indirect jump/call is vouched safe for retpoline
73  * builds.
74  */
75 .macro ANNOTATE_RETPOLINE_SAFE
76 	.Lannotate_\@:
77 	.pushsection .discard.retpoline_safe
78 	_ASM_PTR .Lannotate_\@
79 	.popsection
80 .endm
81 
82 /*
83  * These are the bare retpoline primitives for indirect jmp and call.
84  * Do not use these directly; they only exist to make the ALTERNATIVE
85  * invocation below less ugly.
86  */
87 .macro RETPOLINE_JMP reg:req
88 	call	.Ldo_rop_\@
89 .Lspec_trap_\@:
90 	pause
91 	lfence
92 	jmp	.Lspec_trap_\@
93 .Ldo_rop_\@:
94 	mov	\reg, (%_ASM_SP)
95 	ret
96 .endm
97 
98 /*
99  * This is a wrapper around RETPOLINE_JMP so the called function in reg
100  * returns to the instruction after the macro.
101  */
102 .macro RETPOLINE_CALL reg:req
103 	jmp	.Ldo_call_\@
104 .Ldo_retpoline_jmp_\@:
105 	RETPOLINE_JMP \reg
106 .Ldo_call_\@:
107 	call	.Ldo_retpoline_jmp_\@
108 .endm
109 
110 /*
111  * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
112  * indirect jmp/call which may be susceptible to the Spectre variant 2
113  * attack.
114  */
115 .macro JMP_NOSPEC reg:req
116 #ifdef CONFIG_RETPOLINE
117 	ANNOTATE_NOSPEC_ALTERNATIVE
118 	ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg),	\
119 		__stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE,	\
120 		__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
121 #else
122 	jmp	*\reg
123 #endif
124 .endm
125 
126 .macro CALL_NOSPEC reg:req
127 #ifdef CONFIG_RETPOLINE
128 	ANNOTATE_NOSPEC_ALTERNATIVE
129 	ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg),	\
130 		__stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
131 		__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD
132 #else
133 	call	*\reg
134 #endif
135 .endm
136 
137  /*
138   * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
139   * monstrosity above, manually.
140   */
141 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
142 #ifdef CONFIG_RETPOLINE
143 	ANNOTATE_NOSPEC_ALTERNATIVE
144 	ALTERNATIVE "jmp .Lskip_rsb_\@",				\
145 		__stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP))	\
146 		\ftr
147 .Lskip_rsb_\@:
148 #endif
149 .endm
150 
151 #else /* __ASSEMBLY__ */
152 
153 #define ANNOTATE_NOSPEC_ALTERNATIVE				\
154 	"999:\n\t"						\
155 	".pushsection .discard.nospec\n\t"			\
156 	".long 999b - .\n\t"					\
157 	".popsection\n\t"
158 
159 #define ANNOTATE_RETPOLINE_SAFE					\
160 	"999:\n\t"						\
161 	".pushsection .discard.retpoline_safe\n\t"		\
162 	_ASM_PTR " 999b\n\t"					\
163 	".popsection\n\t"
164 
165 #if defined(CONFIG_X86_64) && defined(RETPOLINE)
166 
167 /*
168  * Since the inline asm uses the %V modifier which is only in newer GCC,
169  * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
170  */
171 # define CALL_NOSPEC						\
172 	ANNOTATE_NOSPEC_ALTERNATIVE				\
173 	ALTERNATIVE(						\
174 	ANNOTATE_RETPOLINE_SAFE					\
175 	"call *%[thunk_target]\n",				\
176 	"call __x86_indirect_thunk_%V[thunk_target]\n",		\
177 	X86_FEATURE_RETPOLINE)
178 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
179 
180 #elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
181 /*
182  * For i386 we use the original ret-equivalent retpoline, because
183  * otherwise we'll run out of registers. We don't care about CET
184  * here, anyway.
185  */
186 # define CALL_NOSPEC						\
187 	ALTERNATIVE(						\
188 	ANNOTATE_RETPOLINE_SAFE					\
189 	"call *%[thunk_target]\n",				\
190 	"       jmp    904f;\n"					\
191 	"       .align 16\n"					\
192 	"901:	call   903f;\n"					\
193 	"902:	pause;\n"					\
194 	"    	lfence;\n"					\
195 	"       jmp    902b;\n"					\
196 	"       .align 16\n"					\
197 	"903:	addl   $4, %%esp;\n"				\
198 	"       pushl  %[thunk_target];\n"			\
199 	"       ret;\n"						\
200 	"       .align 16\n"					\
201 	"904:	call   901b;\n",				\
202 	X86_FEATURE_RETPOLINE)
203 
204 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
205 #else /* No retpoline for C / inline asm */
206 # define CALL_NOSPEC "call *%[thunk_target]\n"
207 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
208 #endif
209 
210 /* The Spectre V2 mitigation variants */
211 enum spectre_v2_mitigation {
212 	SPECTRE_V2_NONE,
213 	SPECTRE_V2_RETPOLINE_MINIMAL,
214 	SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
215 	SPECTRE_V2_RETPOLINE_GENERIC,
216 	SPECTRE_V2_RETPOLINE_AMD,
217 	SPECTRE_V2_IBRS_ENHANCED,
218 };
219 
220 /* The Speculative Store Bypass disable variants */
221 enum ssb_mitigation {
222 	SPEC_STORE_BYPASS_NONE,
223 	SPEC_STORE_BYPASS_DISABLE,
224 	SPEC_STORE_BYPASS_PRCTL,
225 	SPEC_STORE_BYPASS_SECCOMP,
226 };
227 
228 extern char __indirect_thunk_start[];
229 extern char __indirect_thunk_end[];
230 
231 /*
232  * On VMEXIT we must ensure that no RSB predictions learned in the guest
233  * can be followed in the host, by overwriting the RSB completely. Both
234  * retpoline and IBRS mitigations for Spectre v2 need this; only on future
235  * CPUs with IBRS_ALL *might* it be avoided.
236  */
237 static inline void vmexit_fill_RSB(void)
238 {
239 #ifdef CONFIG_RETPOLINE
240 	unsigned long loops;
241 
242 	asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
243 		      ALTERNATIVE("jmp 910f",
244 				  __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
245 				  X86_FEATURE_RETPOLINE)
246 		      "910:"
247 		      : "=r" (loops), ASM_CALL_CONSTRAINT
248 		      : : "memory" );
249 #endif
250 }
251 
252 static __always_inline
253 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
254 {
255 	asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
256 		: : "c" (msr),
257 		    "a" ((u32)val),
258 		    "d" ((u32)(val >> 32)),
259 		    [feature] "i" (feature)
260 		: "memory");
261 }
262 
263 static inline void indirect_branch_prediction_barrier(void)
264 {
265 	u64 val = PRED_CMD_IBPB;
266 
267 	alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
268 }
269 
270 /* The Intel SPEC CTRL MSR base value cache */
271 extern u64 x86_spec_ctrl_base;
272 
273 /*
274  * With retpoline, we must use IBRS to restrict branch prediction
275  * before calling into firmware.
276  *
277  * (Implemented as CPP macros due to header hell.)
278  */
279 #define firmware_restrict_branch_speculation_start()			\
280 do {									\
281 	u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS;			\
282 									\
283 	preempt_disable();						\
284 	alternative_msr_write(MSR_IA32_SPEC_CTRL, val,			\
285 			      X86_FEATURE_USE_IBRS_FW);			\
286 } while (0)
287 
288 #define firmware_restrict_branch_speculation_end()			\
289 do {									\
290 	u64 val = x86_spec_ctrl_base;					\
291 									\
292 	alternative_msr_write(MSR_IA32_SPEC_CTRL, val,			\
293 			      X86_FEATURE_USE_IBRS_FW);			\
294 	preempt_enable();						\
295 } while (0)
296 
297 #endif /* __ASSEMBLY__ */
298 
299 /*
300  * Below is used in the eBPF JIT compiler and emits the byte sequence
301  * for the following assembly:
302  *
303  * With retpolines configured:
304  *
305  *    callq do_rop
306  *  spec_trap:
307  *    pause
308  *    lfence
309  *    jmp spec_trap
310  *  do_rop:
311  *    mov %rax,(%rsp) for x86_64
312  *    mov %edx,(%esp) for x86_32
313  *    retq
314  *
315  * Without retpolines configured:
316  *
317  *    jmp *%rax for x86_64
318  *    jmp *%edx for x86_32
319  */
320 #ifdef CONFIG_RETPOLINE
321 # ifdef CONFIG_X86_64
322 #  define RETPOLINE_RAX_BPF_JIT_SIZE	17
323 #  define RETPOLINE_RAX_BPF_JIT()				\
324 do {								\
325 	EMIT1_off32(0xE8, 7);	 /* callq do_rop */		\
326 	/* spec_trap: */					\
327 	EMIT2(0xF3, 0x90);       /* pause */			\
328 	EMIT3(0x0F, 0xAE, 0xE8); /* lfence */			\
329 	EMIT2(0xEB, 0xF9);       /* jmp spec_trap */		\
330 	/* do_rop: */						\
331 	EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */	\
332 	EMIT1(0xC3);             /* retq */			\
333 } while (0)
334 # else /* !CONFIG_X86_64 */
335 #  define RETPOLINE_EDX_BPF_JIT()				\
336 do {								\
337 	EMIT1_off32(0xE8, 7);	 /* call do_rop */		\
338 	/* spec_trap: */					\
339 	EMIT2(0xF3, 0x90);       /* pause */			\
340 	EMIT3(0x0F, 0xAE, 0xE8); /* lfence */			\
341 	EMIT2(0xEB, 0xF9);       /* jmp spec_trap */		\
342 	/* do_rop: */						\
343 	EMIT3(0x89, 0x14, 0x24); /* mov %edx,(%esp) */		\
344 	EMIT1(0xC3);             /* ret */			\
345 } while (0)
346 # endif
347 #else /* !CONFIG_RETPOLINE */
348 # ifdef CONFIG_X86_64
349 #  define RETPOLINE_RAX_BPF_JIT_SIZE	2
350 #  define RETPOLINE_RAX_BPF_JIT()				\
351 	EMIT2(0xFF, 0xE0);       /* jmp *%rax */
352 # else /* !CONFIG_X86_64 */
353 #  define RETPOLINE_EDX_BPF_JIT()				\
354 	EMIT2(0xFF, 0xE2)        /* jmp *%edx */
355 # endif
356 #endif
357 
358 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
359