1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_ 4 #define _ASM_X86_NOSPEC_BRANCH_H_ 5 6 #include <linux/static_key.h> 7 #include <linux/objtool.h> 8 #include <linux/linkage.h> 9 10 #include <asm/alternative.h> 11 #include <asm/cpufeatures.h> 12 #include <asm/msr-index.h> 13 #include <asm/unwind_hints.h> 14 #include <asm/percpu.h> 15 #include <asm/current.h> 16 17 /* 18 * Call depth tracking for Intel SKL CPUs to address the RSB underflow 19 * issue in software. 20 * 21 * The tracking does not use a counter. It uses uses arithmetic shift 22 * right on call entry and logical shift left on return. 23 * 24 * The depth tracking variable is initialized to 0x8000.... when the call 25 * depth is zero. The arithmetic shift right sign extends the MSB and 26 * saturates after the 12th call. The shift count is 5 for both directions 27 * so the tracking covers 12 nested calls. 28 * 29 * Call 30 * 0: 0x8000000000000000 0x0000000000000000 31 * 1: 0xfc00000000000000 0xf000000000000000 32 * ... 33 * 11: 0xfffffffffffffff8 0xfffffffffffffc00 34 * 12: 0xffffffffffffffff 0xffffffffffffffe0 35 * 36 * After a return buffer fill the depth is credited 12 calls before the 37 * next stuffing has to take place. 38 * 39 * There is a inaccuracy for situations like this: 40 * 41 * 10 calls 42 * 5 returns 43 * 3 calls 44 * 4 returns 45 * 3 calls 46 * .... 47 * 48 * The shift count might cause this to be off by one in either direction, 49 * but there is still a cushion vs. the RSB depth. The algorithm does not 50 * claim to be perfect and it can be speculated around by the CPU, but it 51 * is considered that it obfuscates the problem enough to make exploitation 52 * extremly difficult. 53 */ 54 #define RET_DEPTH_SHIFT 5 55 #define RSB_RET_STUFF_LOOPS 16 56 #define RET_DEPTH_INIT 0x8000000000000000ULL 57 #define RET_DEPTH_INIT_FROM_CALL 0xfc00000000000000ULL 58 #define RET_DEPTH_CREDIT 0xffffffffffffffffULL 59 60 #ifdef CONFIG_CALL_THUNKS_DEBUG 61 # define CALL_THUNKS_DEBUG_INC_CALLS \ 62 incq %gs:__x86_call_count; 63 # define CALL_THUNKS_DEBUG_INC_RETS \ 64 incq %gs:__x86_ret_count; 65 # define CALL_THUNKS_DEBUG_INC_STUFFS \ 66 incq %gs:__x86_stuffs_count; 67 # define CALL_THUNKS_DEBUG_INC_CTXSW \ 68 incq %gs:__x86_ctxsw_count; 69 #else 70 # define CALL_THUNKS_DEBUG_INC_CALLS 71 # define CALL_THUNKS_DEBUG_INC_RETS 72 # define CALL_THUNKS_DEBUG_INC_STUFFS 73 # define CALL_THUNKS_DEBUG_INC_CTXSW 74 #endif 75 76 #if defined(CONFIG_CALL_DEPTH_TRACKING) && !defined(COMPILE_OFFSETS) 77 78 #include <asm/asm-offsets.h> 79 80 #define CREDIT_CALL_DEPTH \ 81 movq $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth); 82 83 #define ASM_CREDIT_CALL_DEPTH \ 84 movq $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth); 85 86 #define RESET_CALL_DEPTH \ 87 xor %eax, %eax; \ 88 bts $63, %rax; \ 89 movq %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth); 90 91 #define RESET_CALL_DEPTH_FROM_CALL \ 92 movb $0xfc, %al; \ 93 shl $56, %rax; \ 94 movq %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth); \ 95 CALL_THUNKS_DEBUG_INC_CALLS 96 97 #define INCREMENT_CALL_DEPTH \ 98 sarq $5, %gs:pcpu_hot + X86_call_depth; \ 99 CALL_THUNKS_DEBUG_INC_CALLS 100 101 #define ASM_INCREMENT_CALL_DEPTH \ 102 sarq $5, PER_CPU_VAR(pcpu_hot + X86_call_depth); \ 103 CALL_THUNKS_DEBUG_INC_CALLS 104 105 #else 106 #define CREDIT_CALL_DEPTH 107 #define ASM_CREDIT_CALL_DEPTH 108 #define RESET_CALL_DEPTH 109 #define INCREMENT_CALL_DEPTH 110 #define ASM_INCREMENT_CALL_DEPTH 111 #define RESET_CALL_DEPTH_FROM_CALL 112 #endif 113 114 /* 115 * Fill the CPU return stack buffer. 116 * 117 * Each entry in the RSB, if used for a speculative 'ret', contains an 118 * infinite 'pause; lfence; jmp' loop to capture speculative execution. 119 * 120 * This is required in various cases for retpoline and IBRS-based 121 * mitigations for the Spectre variant 2 vulnerability. Sometimes to 122 * eliminate potentially bogus entries from the RSB, and sometimes 123 * purely to ensure that it doesn't get empty, which on some CPUs would 124 * allow predictions from other (unwanted!) sources to be used. 125 * 126 * We define a CPP macro such that it can be used from both .S files and 127 * inline assembly. It's possible to do a .macro and then include that 128 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there. 129 */ 130 131 #define RETPOLINE_THUNK_SIZE 32 132 #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ 133 134 /* 135 * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN. 136 */ 137 #define __FILL_RETURN_SLOT \ 138 ANNOTATE_INTRA_FUNCTION_CALL; \ 139 call 772f; \ 140 int3; \ 141 772: 142 143 /* 144 * Stuff the entire RSB. 145 * 146 * Google experimented with loop-unrolling and this turned out to be 147 * the optimal version - two calls, each with their own speculation 148 * trap should their return address end up getting used, in a loop. 149 */ 150 #ifdef CONFIG_X86_64 151 #define __FILL_RETURN_BUFFER(reg, nr) \ 152 mov $(nr/2), reg; \ 153 771: \ 154 __FILL_RETURN_SLOT \ 155 __FILL_RETURN_SLOT \ 156 add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \ 157 dec reg; \ 158 jnz 771b; \ 159 /* barrier for jnz misprediction */ \ 160 lfence; \ 161 ASM_CREDIT_CALL_DEPTH \ 162 CALL_THUNKS_DEBUG_INC_CTXSW 163 #else 164 /* 165 * i386 doesn't unconditionally have LFENCE, as such it can't 166 * do a loop. 167 */ 168 #define __FILL_RETURN_BUFFER(reg, nr) \ 169 .rept nr; \ 170 __FILL_RETURN_SLOT; \ 171 .endr; \ 172 add $(BITS_PER_LONG/8) * nr, %_ASM_SP; 173 #endif 174 175 /* 176 * Stuff a single RSB slot. 177 * 178 * To mitigate Post-Barrier RSB speculation, one CALL instruction must be 179 * forced to retire before letting a RET instruction execute. 180 * 181 * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed 182 * before this point. 183 */ 184 #define __FILL_ONE_RETURN \ 185 __FILL_RETURN_SLOT \ 186 add $(BITS_PER_LONG/8), %_ASM_SP; \ 187 lfence; 188 189 #ifdef __ASSEMBLY__ 190 191 /* 192 * This should be used immediately before an indirect jump/call. It tells 193 * objtool the subsequent indirect jump/call is vouched safe for retpoline 194 * builds. 195 */ 196 .macro ANNOTATE_RETPOLINE_SAFE 197 .Lhere_\@: 198 .pushsection .discard.retpoline_safe 199 .long .Lhere_\@ - . 200 .popsection 201 .endm 202 203 /* 204 * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions 205 * vs RETBleed validation. 206 */ 207 #define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE 208 209 /* 210 * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should 211 * eventually turn into it's own annotation. 212 */ 213 .macro VALIDATE_UNRET_END 214 #if defined(CONFIG_NOINSTR_VALIDATION) && defined(CONFIG_CPU_UNRET_ENTRY) 215 ANNOTATE_RETPOLINE_SAFE 216 nop 217 #endif 218 .endm 219 220 /* 221 * Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call 222 * to the retpoline thunk with a CS prefix when the register requires 223 * a RAX prefix byte to encode. Also see apply_retpolines(). 224 */ 225 .macro __CS_PREFIX reg:req 226 .irp rs,r8,r9,r10,r11,r12,r13,r14,r15 227 .ifc \reg,\rs 228 .byte 0x2e 229 .endif 230 .endr 231 .endm 232 233 /* 234 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple 235 * indirect jmp/call which may be susceptible to the Spectre variant 2 236 * attack. 237 * 238 * NOTE: these do not take kCFI into account and are thus not comparable to C 239 * indirect calls, take care when using. The target of these should be an ENDBR 240 * instruction irrespective of kCFI. 241 */ 242 .macro JMP_NOSPEC reg:req 243 #ifdef CONFIG_RETPOLINE 244 __CS_PREFIX \reg 245 jmp __x86_indirect_thunk_\reg 246 #else 247 jmp *%\reg 248 int3 249 #endif 250 .endm 251 252 .macro CALL_NOSPEC reg:req 253 #ifdef CONFIG_RETPOLINE 254 __CS_PREFIX \reg 255 call __x86_indirect_thunk_\reg 256 #else 257 call *%\reg 258 #endif 259 .endm 260 261 /* 262 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP 263 * monstrosity above, manually. 264 */ 265 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS) 266 ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \ 267 __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \ 268 __stringify(nop;nop;__FILL_ONE_RETURN), \ftr2 269 270 .Lskip_rsb_\@: 271 .endm 272 273 #ifdef CONFIG_CPU_UNRET_ENTRY 274 #define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret" 275 #else 276 #define CALL_ZEN_UNTRAIN_RET "" 277 #endif 278 279 /* 280 * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the 281 * return thunk isn't mapped into the userspace tables (then again, AMD 282 * typically has NO_MELTDOWN). 283 * 284 * While zen_untrain_ret() doesn't clobber anything but requires stack, 285 * entry_ibpb() will clobber AX, CX, DX. 286 * 287 * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point 288 * where we have a stack but before any RET instruction. 289 */ 290 .macro UNTRAIN_RET 291 #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ 292 defined(CONFIG_CALL_DEPTH_TRACKING) 293 VALIDATE_UNRET_END 294 ALTERNATIVE_3 "", \ 295 CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ 296 "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ 297 __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH 298 #endif 299 .endm 300 301 .macro UNTRAIN_RET_FROM_CALL 302 #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ 303 defined(CONFIG_CALL_DEPTH_TRACKING) 304 VALIDATE_UNRET_END 305 ALTERNATIVE_3 "", \ 306 CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ 307 "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ 308 __stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH 309 #endif 310 .endm 311 312 313 .macro CALL_DEPTH_ACCOUNT 314 #ifdef CONFIG_CALL_DEPTH_TRACKING 315 ALTERNATIVE "", \ 316 __stringify(ASM_INCREMENT_CALL_DEPTH), X86_FEATURE_CALL_DEPTH 317 #endif 318 .endm 319 320 #else /* __ASSEMBLY__ */ 321 322 #define ANNOTATE_RETPOLINE_SAFE \ 323 "999:\n\t" \ 324 ".pushsection .discard.retpoline_safe\n\t" \ 325 ".long 999b - .\n\t" \ 326 ".popsection\n\t" 327 328 typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE]; 329 extern retpoline_thunk_t __x86_indirect_thunk_array[]; 330 extern retpoline_thunk_t __x86_indirect_call_thunk_array[]; 331 extern retpoline_thunk_t __x86_indirect_jump_thunk_array[]; 332 333 extern void __x86_return_thunk(void); 334 extern void zen_untrain_ret(void); 335 extern void entry_ibpb(void); 336 337 #ifdef CONFIG_CALL_THUNKS 338 extern void (*x86_return_thunk)(void); 339 #else 340 #define x86_return_thunk (&__x86_return_thunk) 341 #endif 342 343 #ifdef CONFIG_CALL_DEPTH_TRACKING 344 extern void __x86_return_skl(void); 345 346 static inline void x86_set_skl_return_thunk(void) 347 { 348 x86_return_thunk = &__x86_return_skl; 349 } 350 351 #define CALL_DEPTH_ACCOUNT \ 352 ALTERNATIVE("", \ 353 __stringify(INCREMENT_CALL_DEPTH), \ 354 X86_FEATURE_CALL_DEPTH) 355 356 #ifdef CONFIG_CALL_THUNKS_DEBUG 357 DECLARE_PER_CPU(u64, __x86_call_count); 358 DECLARE_PER_CPU(u64, __x86_ret_count); 359 DECLARE_PER_CPU(u64, __x86_stuffs_count); 360 DECLARE_PER_CPU(u64, __x86_ctxsw_count); 361 #endif 362 #else 363 static inline void x86_set_skl_return_thunk(void) {} 364 365 #define CALL_DEPTH_ACCOUNT "" 366 367 #endif 368 369 #ifdef CONFIG_RETPOLINE 370 371 #define GEN(reg) \ 372 extern retpoline_thunk_t __x86_indirect_thunk_ ## reg; 373 #include <asm/GEN-for-each-reg.h> 374 #undef GEN 375 376 #define GEN(reg) \ 377 extern retpoline_thunk_t __x86_indirect_call_thunk_ ## reg; 378 #include <asm/GEN-for-each-reg.h> 379 #undef GEN 380 381 #define GEN(reg) \ 382 extern retpoline_thunk_t __x86_indirect_jump_thunk_ ## reg; 383 #include <asm/GEN-for-each-reg.h> 384 #undef GEN 385 386 #ifdef CONFIG_X86_64 387 388 /* 389 * Inline asm uses the %V modifier which is only in newer GCC 390 * which is ensured when CONFIG_RETPOLINE is defined. 391 */ 392 # define CALL_NOSPEC \ 393 ALTERNATIVE_2( \ 394 ANNOTATE_RETPOLINE_SAFE \ 395 "call *%[thunk_target]\n", \ 396 "call __x86_indirect_thunk_%V[thunk_target]\n", \ 397 X86_FEATURE_RETPOLINE, \ 398 "lfence;\n" \ 399 ANNOTATE_RETPOLINE_SAFE \ 400 "call *%[thunk_target]\n", \ 401 X86_FEATURE_RETPOLINE_LFENCE) 402 403 # define THUNK_TARGET(addr) [thunk_target] "r" (addr) 404 405 #else /* CONFIG_X86_32 */ 406 /* 407 * For i386 we use the original ret-equivalent retpoline, because 408 * otherwise we'll run out of registers. We don't care about CET 409 * here, anyway. 410 */ 411 # define CALL_NOSPEC \ 412 ALTERNATIVE_2( \ 413 ANNOTATE_RETPOLINE_SAFE \ 414 "call *%[thunk_target]\n", \ 415 " jmp 904f;\n" \ 416 " .align 16\n" \ 417 "901: call 903f;\n" \ 418 "902: pause;\n" \ 419 " lfence;\n" \ 420 " jmp 902b;\n" \ 421 " .align 16\n" \ 422 "903: lea 4(%%esp), %%esp;\n" \ 423 " pushl %[thunk_target];\n" \ 424 " ret;\n" \ 425 " .align 16\n" \ 426 "904: call 901b;\n", \ 427 X86_FEATURE_RETPOLINE, \ 428 "lfence;\n" \ 429 ANNOTATE_RETPOLINE_SAFE \ 430 "call *%[thunk_target]\n", \ 431 X86_FEATURE_RETPOLINE_LFENCE) 432 433 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) 434 #endif 435 #else /* No retpoline for C / inline asm */ 436 # define CALL_NOSPEC "call *%[thunk_target]\n" 437 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) 438 #endif 439 440 /* The Spectre V2 mitigation variants */ 441 enum spectre_v2_mitigation { 442 SPECTRE_V2_NONE, 443 SPECTRE_V2_RETPOLINE, 444 SPECTRE_V2_LFENCE, 445 SPECTRE_V2_EIBRS, 446 SPECTRE_V2_EIBRS_RETPOLINE, 447 SPECTRE_V2_EIBRS_LFENCE, 448 SPECTRE_V2_IBRS, 449 }; 450 451 /* The indirect branch speculation control variants */ 452 enum spectre_v2_user_mitigation { 453 SPECTRE_V2_USER_NONE, 454 SPECTRE_V2_USER_STRICT, 455 SPECTRE_V2_USER_STRICT_PREFERRED, 456 SPECTRE_V2_USER_PRCTL, 457 SPECTRE_V2_USER_SECCOMP, 458 }; 459 460 /* The Speculative Store Bypass disable variants */ 461 enum ssb_mitigation { 462 SPEC_STORE_BYPASS_NONE, 463 SPEC_STORE_BYPASS_DISABLE, 464 SPEC_STORE_BYPASS_PRCTL, 465 SPEC_STORE_BYPASS_SECCOMP, 466 }; 467 468 extern char __indirect_thunk_start[]; 469 extern char __indirect_thunk_end[]; 470 471 static __always_inline 472 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) 473 { 474 asm volatile(ALTERNATIVE("", "wrmsr", %c[feature]) 475 : : "c" (msr), 476 "a" ((u32)val), 477 "d" ((u32)(val >> 32)), 478 [feature] "i" (feature) 479 : "memory"); 480 } 481 482 static inline void indirect_branch_prediction_barrier(void) 483 { 484 u64 val = PRED_CMD_IBPB; 485 486 alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); 487 } 488 489 /* The Intel SPEC CTRL MSR base value cache */ 490 extern u64 x86_spec_ctrl_base; 491 DECLARE_PER_CPU(u64, x86_spec_ctrl_current); 492 extern void update_spec_ctrl_cond(u64 val); 493 extern u64 spec_ctrl_current(void); 494 495 /* 496 * With retpoline, we must use IBRS to restrict branch prediction 497 * before calling into firmware. 498 * 499 * (Implemented as CPP macros due to header hell.) 500 */ 501 #define firmware_restrict_branch_speculation_start() \ 502 do { \ 503 preempt_disable(); \ 504 alternative_msr_write(MSR_IA32_SPEC_CTRL, \ 505 spec_ctrl_current() | SPEC_CTRL_IBRS, \ 506 X86_FEATURE_USE_IBRS_FW); \ 507 alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \ 508 X86_FEATURE_USE_IBPB_FW); \ 509 } while (0) 510 511 #define firmware_restrict_branch_speculation_end() \ 512 do { \ 513 alternative_msr_write(MSR_IA32_SPEC_CTRL, \ 514 spec_ctrl_current(), \ 515 X86_FEATURE_USE_IBRS_FW); \ 516 preempt_enable(); \ 517 } while (0) 518 519 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp); 520 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); 521 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); 522 523 DECLARE_STATIC_KEY_FALSE(mds_user_clear); 524 DECLARE_STATIC_KEY_FALSE(mds_idle_clear); 525 526 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); 527 528 DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear); 529 530 #include <asm/segment.h> 531 532 /** 533 * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability 534 * 535 * This uses the otherwise unused and obsolete VERW instruction in 536 * combination with microcode which triggers a CPU buffer flush when the 537 * instruction is executed. 538 */ 539 static __always_inline void mds_clear_cpu_buffers(void) 540 { 541 static const u16 ds = __KERNEL_DS; 542 543 /* 544 * Has to be the memory-operand variant because only that 545 * guarantees the CPU buffer flush functionality according to 546 * documentation. The register-operand variant does not. 547 * Works with any segment selector, but a valid writable 548 * data segment is the fastest variant. 549 * 550 * "cc" clobber is required because VERW modifies ZF. 551 */ 552 asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc"); 553 } 554 555 /** 556 * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability 557 * 558 * Clear CPU buffers if the corresponding static key is enabled 559 */ 560 static __always_inline void mds_user_clear_cpu_buffers(void) 561 { 562 if (static_branch_likely(&mds_user_clear)) 563 mds_clear_cpu_buffers(); 564 } 565 566 /** 567 * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability 568 * 569 * Clear CPU buffers if the corresponding static key is enabled 570 */ 571 static __always_inline void mds_idle_clear_cpu_buffers(void) 572 { 573 if (static_branch_likely(&mds_idle_clear)) 574 mds_clear_cpu_buffers(); 575 } 576 577 #endif /* __ASSEMBLY__ */ 578 579 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ 580