1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_ 4 #define _ASM_X86_NOSPEC_BRANCH_H_ 5 6 #include <linux/static_key.h> 7 #include <linux/objtool.h> 8 9 #include <asm/alternative.h> 10 #include <asm/cpufeatures.h> 11 #include <asm/msr-index.h> 12 #include <asm/unwind_hints.h> 13 14 /* 15 * Fill the CPU return stack buffer. 16 * 17 * Each entry in the RSB, if used for a speculative 'ret', contains an 18 * infinite 'pause; lfence; jmp' loop to capture speculative execution. 19 * 20 * This is required in various cases for retpoline and IBRS-based 21 * mitigations for the Spectre variant 2 vulnerability. Sometimes to 22 * eliminate potentially bogus entries from the RSB, and sometimes 23 * purely to ensure that it doesn't get empty, which on some CPUs would 24 * allow predictions from other (unwanted!) sources to be used. 25 * 26 * We define a CPP macro such that it can be used from both .S files and 27 * inline assembly. It's possible to do a .macro and then include that 28 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there. 29 */ 30 31 #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ 32 33 /* 34 * Google experimented with loop-unrolling and this turned out to be 35 * the optimal version - two calls, each with their own speculation 36 * trap should their return address end up getting used, in a loop. 37 */ 38 #define __FILL_RETURN_BUFFER(reg, nr, sp) \ 39 mov $(nr/2), reg; \ 40 771: \ 41 ANNOTATE_INTRA_FUNCTION_CALL; \ 42 call 772f; \ 43 773: /* speculation trap */ \ 44 UNWIND_HINT_EMPTY; \ 45 pause; \ 46 lfence; \ 47 jmp 773b; \ 48 772: \ 49 ANNOTATE_INTRA_FUNCTION_CALL; \ 50 call 774f; \ 51 775: /* speculation trap */ \ 52 UNWIND_HINT_EMPTY; \ 53 pause; \ 54 lfence; \ 55 jmp 775b; \ 56 774: \ 57 add $(BITS_PER_LONG/8) * 2, sp; \ 58 dec reg; \ 59 jnz 771b; 60 61 #ifdef __ASSEMBLY__ 62 63 /* 64 * This should be used immediately before an indirect jump/call. It tells 65 * objtool the subsequent indirect jump/call is vouched safe for retpoline 66 * builds. 67 */ 68 .macro ANNOTATE_RETPOLINE_SAFE 69 .Lannotate_\@: 70 .pushsection .discard.retpoline_safe 71 _ASM_PTR .Lannotate_\@ 72 .popsection 73 .endm 74 75 /* 76 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple 77 * indirect jmp/call which may be susceptible to the Spectre variant 2 78 * attack. 79 */ 80 .macro JMP_NOSPEC reg:req 81 #ifdef CONFIG_RETPOLINE 82 ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \ 83 __stringify(jmp __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \ 84 __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_AMD 85 #else 86 jmp *%\reg 87 #endif 88 .endm 89 90 .macro CALL_NOSPEC reg:req 91 #ifdef CONFIG_RETPOLINE 92 ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \ 93 __stringify(call __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \ 94 __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_AMD 95 #else 96 call *%\reg 97 #endif 98 .endm 99 100 /* 101 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP 102 * monstrosity above, manually. 103 */ 104 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req 105 #ifdef CONFIG_RETPOLINE 106 ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr 107 __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP) 108 .Lskip_rsb_\@: 109 #endif 110 .endm 111 112 #else /* __ASSEMBLY__ */ 113 114 #define ANNOTATE_RETPOLINE_SAFE \ 115 "999:\n\t" \ 116 ".pushsection .discard.retpoline_safe\n\t" \ 117 _ASM_PTR " 999b\n\t" \ 118 ".popsection\n\t" 119 120 #ifdef CONFIG_RETPOLINE 121 #ifdef CONFIG_X86_64 122 123 /* 124 * Inline asm uses the %V modifier which is only in newer GCC 125 * which is ensured when CONFIG_RETPOLINE is defined. 126 */ 127 # define CALL_NOSPEC \ 128 ALTERNATIVE_2( \ 129 ANNOTATE_RETPOLINE_SAFE \ 130 "call *%[thunk_target]\n", \ 131 "call __x86_indirect_thunk_%V[thunk_target]\n", \ 132 X86_FEATURE_RETPOLINE, \ 133 "lfence;\n" \ 134 ANNOTATE_RETPOLINE_SAFE \ 135 "call *%[thunk_target]\n", \ 136 X86_FEATURE_RETPOLINE_AMD) 137 138 # define THUNK_TARGET(addr) [thunk_target] "r" (addr) 139 140 #else /* CONFIG_X86_32 */ 141 /* 142 * For i386 we use the original ret-equivalent retpoline, because 143 * otherwise we'll run out of registers. We don't care about CET 144 * here, anyway. 145 */ 146 # define CALL_NOSPEC \ 147 ALTERNATIVE_2( \ 148 ANNOTATE_RETPOLINE_SAFE \ 149 "call *%[thunk_target]\n", \ 150 " jmp 904f;\n" \ 151 " .align 16\n" \ 152 "901: call 903f;\n" \ 153 "902: pause;\n" \ 154 " lfence;\n" \ 155 " jmp 902b;\n" \ 156 " .align 16\n" \ 157 "903: lea 4(%%esp), %%esp;\n" \ 158 " pushl %[thunk_target];\n" \ 159 " ret;\n" \ 160 " .align 16\n" \ 161 "904: call 901b;\n", \ 162 X86_FEATURE_RETPOLINE, \ 163 "lfence;\n" \ 164 ANNOTATE_RETPOLINE_SAFE \ 165 "call *%[thunk_target]\n", \ 166 X86_FEATURE_RETPOLINE_AMD) 167 168 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) 169 #endif 170 #else /* No retpoline for C / inline asm */ 171 # define CALL_NOSPEC "call *%[thunk_target]\n" 172 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) 173 #endif 174 175 /* The Spectre V2 mitigation variants */ 176 enum spectre_v2_mitigation { 177 SPECTRE_V2_NONE, 178 SPECTRE_V2_RETPOLINE_GENERIC, 179 SPECTRE_V2_RETPOLINE_AMD, 180 SPECTRE_V2_IBRS_ENHANCED, 181 }; 182 183 /* The indirect branch speculation control variants */ 184 enum spectre_v2_user_mitigation { 185 SPECTRE_V2_USER_NONE, 186 SPECTRE_V2_USER_STRICT, 187 SPECTRE_V2_USER_STRICT_PREFERRED, 188 SPECTRE_V2_USER_PRCTL, 189 SPECTRE_V2_USER_SECCOMP, 190 }; 191 192 /* The Speculative Store Bypass disable variants */ 193 enum ssb_mitigation { 194 SPEC_STORE_BYPASS_NONE, 195 SPEC_STORE_BYPASS_DISABLE, 196 SPEC_STORE_BYPASS_PRCTL, 197 SPEC_STORE_BYPASS_SECCOMP, 198 }; 199 200 extern char __indirect_thunk_start[]; 201 extern char __indirect_thunk_end[]; 202 203 static __always_inline 204 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) 205 { 206 asm volatile(ALTERNATIVE("", "wrmsr", %c[feature]) 207 : : "c" (msr), 208 "a" ((u32)val), 209 "d" ((u32)(val >> 32)), 210 [feature] "i" (feature) 211 : "memory"); 212 } 213 214 static inline void indirect_branch_prediction_barrier(void) 215 { 216 u64 val = PRED_CMD_IBPB; 217 218 alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); 219 } 220 221 /* The Intel SPEC CTRL MSR base value cache */ 222 extern u64 x86_spec_ctrl_base; 223 224 /* 225 * With retpoline, we must use IBRS to restrict branch prediction 226 * before calling into firmware. 227 * 228 * (Implemented as CPP macros due to header hell.) 229 */ 230 #define firmware_restrict_branch_speculation_start() \ 231 do { \ 232 u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \ 233 \ 234 preempt_disable(); \ 235 alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ 236 X86_FEATURE_USE_IBRS_FW); \ 237 } while (0) 238 239 #define firmware_restrict_branch_speculation_end() \ 240 do { \ 241 u64 val = x86_spec_ctrl_base; \ 242 \ 243 alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ 244 X86_FEATURE_USE_IBRS_FW); \ 245 preempt_enable(); \ 246 } while (0) 247 248 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp); 249 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); 250 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); 251 252 DECLARE_STATIC_KEY_FALSE(mds_user_clear); 253 DECLARE_STATIC_KEY_FALSE(mds_idle_clear); 254 255 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); 256 257 #include <asm/segment.h> 258 259 /** 260 * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability 261 * 262 * This uses the otherwise unused and obsolete VERW instruction in 263 * combination with microcode which triggers a CPU buffer flush when the 264 * instruction is executed. 265 */ 266 static __always_inline void mds_clear_cpu_buffers(void) 267 { 268 static const u16 ds = __KERNEL_DS; 269 270 /* 271 * Has to be the memory-operand variant because only that 272 * guarantees the CPU buffer flush functionality according to 273 * documentation. The register-operand variant does not. 274 * Works with any segment selector, but a valid writable 275 * data segment is the fastest variant. 276 * 277 * "cc" clobber is required because VERW modifies ZF. 278 */ 279 asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc"); 280 } 281 282 /** 283 * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability 284 * 285 * Clear CPU buffers if the corresponding static key is enabled 286 */ 287 static __always_inline void mds_user_clear_cpu_buffers(void) 288 { 289 if (static_branch_likely(&mds_user_clear)) 290 mds_clear_cpu_buffers(); 291 } 292 293 /** 294 * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability 295 * 296 * Clear CPU buffers if the corresponding static key is enabled 297 */ 298 static inline void mds_idle_clear_cpu_buffers(void) 299 { 300 if (static_branch_likely(&mds_idle_clear)) 301 mds_clear_cpu_buffers(); 302 } 303 304 #endif /* __ASSEMBLY__ */ 305 306 /* 307 * Below is used in the eBPF JIT compiler and emits the byte sequence 308 * for the following assembly: 309 * 310 * With retpolines configured: 311 * 312 * callq do_rop 313 * spec_trap: 314 * pause 315 * lfence 316 * jmp spec_trap 317 * do_rop: 318 * mov %rcx,(%rsp) for x86_64 319 * mov %edx,(%esp) for x86_32 320 * retq 321 * 322 * Without retpolines configured: 323 * 324 * jmp *%rcx for x86_64 325 * jmp *%edx for x86_32 326 */ 327 #ifdef CONFIG_RETPOLINE 328 # ifdef CONFIG_X86_64 329 # define RETPOLINE_RCX_BPF_JIT_SIZE 17 330 # define RETPOLINE_RCX_BPF_JIT() \ 331 do { \ 332 EMIT1_off32(0xE8, 7); /* callq do_rop */ \ 333 /* spec_trap: */ \ 334 EMIT2(0xF3, 0x90); /* pause */ \ 335 EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \ 336 EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ 337 /* do_rop: */ \ 338 EMIT4(0x48, 0x89, 0x0C, 0x24); /* mov %rcx,(%rsp) */ \ 339 EMIT1(0xC3); /* retq */ \ 340 } while (0) 341 # else /* !CONFIG_X86_64 */ 342 # define RETPOLINE_EDX_BPF_JIT() \ 343 do { \ 344 EMIT1_off32(0xE8, 7); /* call do_rop */ \ 345 /* spec_trap: */ \ 346 EMIT2(0xF3, 0x90); /* pause */ \ 347 EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \ 348 EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ 349 /* do_rop: */ \ 350 EMIT3(0x89, 0x14, 0x24); /* mov %edx,(%esp) */ \ 351 EMIT1(0xC3); /* ret */ \ 352 } while (0) 353 # endif 354 #else /* !CONFIG_RETPOLINE */ 355 # ifdef CONFIG_X86_64 356 # define RETPOLINE_RCX_BPF_JIT_SIZE 2 357 # define RETPOLINE_RCX_BPF_JIT() \ 358 EMIT2(0xFF, 0xE1); /* jmp *%rcx */ 359 # else /* !CONFIG_X86_64 */ 360 # define RETPOLINE_EDX_BPF_JIT() \ 361 EMIT2(0xFF, 0xE2) /* jmp *%edx */ 362 # endif 363 #endif 364 365 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ 366