1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1994 Linus Torvalds 4 * 5 * Cyrix stuff, June 1998 by: 6 * - Rafael R. Reilova (moved everything from head.S), 7 * <rreilova@ececs.uc.edu> 8 * - Channing Corn (tests & fixes), 9 * - Andrew D. Balsa (code cleanup). 10 */ 11 #include <linux/init.h> 12 #include <linux/utsname.h> 13 #include <linux/cpu.h> 14 #include <linux/module.h> 15 #include <linux/nospec.h> 16 #include <linux/prctl.h> 17 #include <linux/sched/smt.h> 18 #include <linux/pgtable.h> 19 #include <linux/bpf.h> 20 21 #include <asm/spec-ctrl.h> 22 #include <asm/cmdline.h> 23 #include <asm/bugs.h> 24 #include <asm/processor.h> 25 #include <asm/processor-flags.h> 26 #include <asm/fpu/api.h> 27 #include <asm/msr.h> 28 #include <asm/vmx.h> 29 #include <asm/paravirt.h> 30 #include <asm/alternative.h> 31 #include <asm/set_memory.h> 32 #include <asm/intel-family.h> 33 #include <asm/e820/api.h> 34 #include <asm/hypervisor.h> 35 #include <asm/tlbflush.h> 36 37 #include "cpu.h" 38 39 static void __init spectre_v1_select_mitigation(void); 40 static void __init spectre_v2_select_mitigation(void); 41 static void __init ssb_select_mitigation(void); 42 static void __init l1tf_select_mitigation(void); 43 static void __init mds_select_mitigation(void); 44 static void __init mds_print_mitigation(void); 45 static void __init taa_select_mitigation(void); 46 static void __init srbds_select_mitigation(void); 47 static void __init l1d_flush_select_mitigation(void); 48 49 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ 50 u64 x86_spec_ctrl_base; 51 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); 52 static DEFINE_MUTEX(spec_ctrl_mutex); 53 54 /* 55 * The vendor and possibly platform specific bits which can be modified in 56 * x86_spec_ctrl_base. 57 */ 58 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; 59 60 /* 61 * AMD specific MSR info for Speculative Store Bypass control. 62 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). 63 */ 64 u64 __ro_after_init x86_amd_ls_cfg_base; 65 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; 66 67 /* Control conditional STIBP in switch_to() */ 68 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); 69 /* Control conditional IBPB in switch_mm() */ 70 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); 71 /* Control unconditional IBPB in switch_mm() */ 72 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); 73 74 /* Control MDS CPU buffer clear before returning to user space */ 75 DEFINE_STATIC_KEY_FALSE(mds_user_clear); 76 EXPORT_SYMBOL_GPL(mds_user_clear); 77 /* Control MDS CPU buffer clear before idling (halt, mwait) */ 78 DEFINE_STATIC_KEY_FALSE(mds_idle_clear); 79 EXPORT_SYMBOL_GPL(mds_idle_clear); 80 81 /* 82 * Controls whether l1d flush based mitigations are enabled, 83 * based on hw features and admin setting via boot parameter 84 * defaults to false 85 */ 86 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); 87 88 void __init check_bugs(void) 89 { 90 identify_boot_cpu(); 91 92 /* 93 * identify_boot_cpu() initialized SMT support information, let the 94 * core code know. 95 */ 96 cpu_smt_check_topology(); 97 98 if (!IS_ENABLED(CONFIG_SMP)) { 99 pr_info("CPU: "); 100 print_cpu_info(&boot_cpu_data); 101 } 102 103 /* 104 * Read the SPEC_CTRL MSR to account for reserved bits which may 105 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD 106 * init code as it is not enumerated and depends on the family. 107 */ 108 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) 109 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 110 111 /* Allow STIBP in MSR_SPEC_CTRL if supported */ 112 if (boot_cpu_has(X86_FEATURE_STIBP)) 113 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP; 114 115 /* Select the proper CPU mitigations before patching alternatives: */ 116 spectre_v1_select_mitigation(); 117 spectre_v2_select_mitigation(); 118 ssb_select_mitigation(); 119 l1tf_select_mitigation(); 120 mds_select_mitigation(); 121 taa_select_mitigation(); 122 srbds_select_mitigation(); 123 l1d_flush_select_mitigation(); 124 125 /* 126 * As MDS and TAA mitigations are inter-related, print MDS 127 * mitigation until after TAA mitigation selection is done. 128 */ 129 mds_print_mitigation(); 130 131 arch_smt_update(); 132 133 #ifdef CONFIG_X86_32 134 /* 135 * Check whether we are able to run this kernel safely on SMP. 136 * 137 * - i386 is no longer supported. 138 * - In order to run on anything without a TSC, we need to be 139 * compiled for a i486. 140 */ 141 if (boot_cpu_data.x86 < 4) 142 panic("Kernel requires i486+ for 'invlpg' and other features"); 143 144 init_utsname()->machine[1] = 145 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 146 alternative_instructions(); 147 148 fpu__init_check_bugs(); 149 #else /* CONFIG_X86_64 */ 150 alternative_instructions(); 151 152 /* 153 * Make sure the first 2MB area is not mapped by huge pages 154 * There are typically fixed size MTRRs in there and overlapping 155 * MTRRs into large pages causes slow downs. 156 * 157 * Right now we don't do that with gbpages because there seems 158 * very little benefit for that case. 159 */ 160 if (!direct_gbpages) 161 set_memory_4k((unsigned long)__va(0), 1); 162 #endif 163 } 164 165 void 166 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) 167 { 168 u64 msrval, guestval, hostval = x86_spec_ctrl_base; 169 struct thread_info *ti = current_thread_info(); 170 171 /* Is MSR_SPEC_CTRL implemented ? */ 172 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) { 173 /* 174 * Restrict guest_spec_ctrl to supported values. Clear the 175 * modifiable bits in the host base value and or the 176 * modifiable bits from the guest value. 177 */ 178 guestval = hostval & ~x86_spec_ctrl_mask; 179 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; 180 181 /* SSBD controlled in MSR_SPEC_CTRL */ 182 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || 183 static_cpu_has(X86_FEATURE_AMD_SSBD)) 184 hostval |= ssbd_tif_to_spec_ctrl(ti->flags); 185 186 /* Conditional STIBP enabled? */ 187 if (static_branch_unlikely(&switch_to_cond_stibp)) 188 hostval |= stibp_tif_to_spec_ctrl(ti->flags); 189 190 if (hostval != guestval) { 191 msrval = setguest ? guestval : hostval; 192 wrmsrl(MSR_IA32_SPEC_CTRL, msrval); 193 } 194 } 195 196 /* 197 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update 198 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. 199 */ 200 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && 201 !static_cpu_has(X86_FEATURE_VIRT_SSBD)) 202 return; 203 204 /* 205 * If the host has SSBD mitigation enabled, force it in the host's 206 * virtual MSR value. If its not permanently enabled, evaluate 207 * current's TIF_SSBD thread flag. 208 */ 209 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) 210 hostval = SPEC_CTRL_SSBD; 211 else 212 hostval = ssbd_tif_to_spec_ctrl(ti->flags); 213 214 /* Sanitize the guest value */ 215 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; 216 217 if (hostval != guestval) { 218 unsigned long tif; 219 220 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : 221 ssbd_spec_ctrl_to_tif(hostval); 222 223 speculation_ctrl_update(tif); 224 } 225 } 226 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); 227 228 static void x86_amd_ssb_disable(void) 229 { 230 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; 231 232 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) 233 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); 234 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) 235 wrmsrl(MSR_AMD64_LS_CFG, msrval); 236 } 237 238 #undef pr_fmt 239 #define pr_fmt(fmt) "MDS: " fmt 240 241 /* Default mitigation for MDS-affected CPUs */ 242 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL; 243 static bool mds_nosmt __ro_after_init = false; 244 245 static const char * const mds_strings[] = { 246 [MDS_MITIGATION_OFF] = "Vulnerable", 247 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", 248 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", 249 }; 250 251 static void __init mds_select_mitigation(void) 252 { 253 if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) { 254 mds_mitigation = MDS_MITIGATION_OFF; 255 return; 256 } 257 258 if (mds_mitigation == MDS_MITIGATION_FULL) { 259 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) 260 mds_mitigation = MDS_MITIGATION_VMWERV; 261 262 static_branch_enable(&mds_user_clear); 263 264 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && 265 (mds_nosmt || cpu_mitigations_auto_nosmt())) 266 cpu_smt_disable(false); 267 } 268 } 269 270 static void __init mds_print_mitigation(void) 271 { 272 if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) 273 return; 274 275 pr_info("%s\n", mds_strings[mds_mitigation]); 276 } 277 278 static int __init mds_cmdline(char *str) 279 { 280 if (!boot_cpu_has_bug(X86_BUG_MDS)) 281 return 0; 282 283 if (!str) 284 return -EINVAL; 285 286 if (!strcmp(str, "off")) 287 mds_mitigation = MDS_MITIGATION_OFF; 288 else if (!strcmp(str, "full")) 289 mds_mitigation = MDS_MITIGATION_FULL; 290 else if (!strcmp(str, "full,nosmt")) { 291 mds_mitigation = MDS_MITIGATION_FULL; 292 mds_nosmt = true; 293 } 294 295 return 0; 296 } 297 early_param("mds", mds_cmdline); 298 299 #undef pr_fmt 300 #define pr_fmt(fmt) "TAA: " fmt 301 302 enum taa_mitigations { 303 TAA_MITIGATION_OFF, 304 TAA_MITIGATION_UCODE_NEEDED, 305 TAA_MITIGATION_VERW, 306 TAA_MITIGATION_TSX_DISABLED, 307 }; 308 309 /* Default mitigation for TAA-affected CPUs */ 310 static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW; 311 static bool taa_nosmt __ro_after_init; 312 313 static const char * const taa_strings[] = { 314 [TAA_MITIGATION_OFF] = "Vulnerable", 315 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", 316 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", 317 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled", 318 }; 319 320 static void __init taa_select_mitigation(void) 321 { 322 u64 ia32_cap; 323 324 if (!boot_cpu_has_bug(X86_BUG_TAA)) { 325 taa_mitigation = TAA_MITIGATION_OFF; 326 return; 327 } 328 329 /* TSX previously disabled by tsx=off */ 330 if (!boot_cpu_has(X86_FEATURE_RTM)) { 331 taa_mitigation = TAA_MITIGATION_TSX_DISABLED; 332 goto out; 333 } 334 335 if (cpu_mitigations_off()) { 336 taa_mitigation = TAA_MITIGATION_OFF; 337 return; 338 } 339 340 /* 341 * TAA mitigation via VERW is turned off if both 342 * tsx_async_abort=off and mds=off are specified. 343 */ 344 if (taa_mitigation == TAA_MITIGATION_OFF && 345 mds_mitigation == MDS_MITIGATION_OFF) 346 goto out; 347 348 if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) 349 taa_mitigation = TAA_MITIGATION_VERW; 350 else 351 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; 352 353 /* 354 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. 355 * A microcode update fixes this behavior to clear CPU buffers. It also 356 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the 357 * ARCH_CAP_TSX_CTRL_MSR bit. 358 * 359 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode 360 * update is required. 361 */ 362 ia32_cap = x86_read_arch_cap_msr(); 363 if ( (ia32_cap & ARCH_CAP_MDS_NO) && 364 !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)) 365 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; 366 367 /* 368 * TSX is enabled, select alternate mitigation for TAA which is 369 * the same as MDS. Enable MDS static branch to clear CPU buffers. 370 * 371 * For guests that can't determine whether the correct microcode is 372 * present on host, enable the mitigation for UCODE_NEEDED as well. 373 */ 374 static_branch_enable(&mds_user_clear); 375 376 if (taa_nosmt || cpu_mitigations_auto_nosmt()) 377 cpu_smt_disable(false); 378 379 /* 380 * Update MDS mitigation, if necessary, as the mds_user_clear is 381 * now enabled for TAA mitigation. 382 */ 383 if (mds_mitigation == MDS_MITIGATION_OFF && 384 boot_cpu_has_bug(X86_BUG_MDS)) { 385 mds_mitigation = MDS_MITIGATION_FULL; 386 mds_select_mitigation(); 387 } 388 out: 389 pr_info("%s\n", taa_strings[taa_mitigation]); 390 } 391 392 static int __init tsx_async_abort_parse_cmdline(char *str) 393 { 394 if (!boot_cpu_has_bug(X86_BUG_TAA)) 395 return 0; 396 397 if (!str) 398 return -EINVAL; 399 400 if (!strcmp(str, "off")) { 401 taa_mitigation = TAA_MITIGATION_OFF; 402 } else if (!strcmp(str, "full")) { 403 taa_mitigation = TAA_MITIGATION_VERW; 404 } else if (!strcmp(str, "full,nosmt")) { 405 taa_mitigation = TAA_MITIGATION_VERW; 406 taa_nosmt = true; 407 } 408 409 return 0; 410 } 411 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); 412 413 #undef pr_fmt 414 #define pr_fmt(fmt) "SRBDS: " fmt 415 416 enum srbds_mitigations { 417 SRBDS_MITIGATION_OFF, 418 SRBDS_MITIGATION_UCODE_NEEDED, 419 SRBDS_MITIGATION_FULL, 420 SRBDS_MITIGATION_TSX_OFF, 421 SRBDS_MITIGATION_HYPERVISOR, 422 }; 423 424 static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL; 425 426 static const char * const srbds_strings[] = { 427 [SRBDS_MITIGATION_OFF] = "Vulnerable", 428 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 429 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", 430 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", 431 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", 432 }; 433 434 static bool srbds_off; 435 436 void update_srbds_msr(void) 437 { 438 u64 mcu_ctrl; 439 440 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 441 return; 442 443 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 444 return; 445 446 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) 447 return; 448 449 rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 450 451 switch (srbds_mitigation) { 452 case SRBDS_MITIGATION_OFF: 453 case SRBDS_MITIGATION_TSX_OFF: 454 mcu_ctrl |= RNGDS_MITG_DIS; 455 break; 456 case SRBDS_MITIGATION_FULL: 457 mcu_ctrl &= ~RNGDS_MITG_DIS; 458 break; 459 default: 460 break; 461 } 462 463 wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 464 } 465 466 static void __init srbds_select_mitigation(void) 467 { 468 u64 ia32_cap; 469 470 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 471 return; 472 473 /* 474 * Check to see if this is one of the MDS_NO systems supporting 475 * TSX that are only exposed to SRBDS when TSX is enabled. 476 */ 477 ia32_cap = x86_read_arch_cap_msr(); 478 if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM)) 479 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; 480 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 481 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; 482 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) 483 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; 484 else if (cpu_mitigations_off() || srbds_off) 485 srbds_mitigation = SRBDS_MITIGATION_OFF; 486 487 update_srbds_msr(); 488 pr_info("%s\n", srbds_strings[srbds_mitigation]); 489 } 490 491 static int __init srbds_parse_cmdline(char *str) 492 { 493 if (!str) 494 return -EINVAL; 495 496 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 497 return 0; 498 499 srbds_off = !strcmp(str, "off"); 500 return 0; 501 } 502 early_param("srbds", srbds_parse_cmdline); 503 504 #undef pr_fmt 505 #define pr_fmt(fmt) "L1D Flush : " fmt 506 507 enum l1d_flush_mitigations { 508 L1D_FLUSH_OFF = 0, 509 L1D_FLUSH_ON, 510 }; 511 512 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF; 513 514 static void __init l1d_flush_select_mitigation(void) 515 { 516 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) 517 return; 518 519 static_branch_enable(&switch_mm_cond_l1d_flush); 520 pr_info("Conditional flush on switch_mm() enabled\n"); 521 } 522 523 static int __init l1d_flush_parse_cmdline(char *str) 524 { 525 if (!strcmp(str, "on")) 526 l1d_flush_mitigation = L1D_FLUSH_ON; 527 528 return 0; 529 } 530 early_param("l1d_flush", l1d_flush_parse_cmdline); 531 532 #undef pr_fmt 533 #define pr_fmt(fmt) "Spectre V1 : " fmt 534 535 enum spectre_v1_mitigation { 536 SPECTRE_V1_MITIGATION_NONE, 537 SPECTRE_V1_MITIGATION_AUTO, 538 }; 539 540 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init = 541 SPECTRE_V1_MITIGATION_AUTO; 542 543 static const char * const spectre_v1_strings[] = { 544 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers", 545 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization", 546 }; 547 548 /* 549 * Does SMAP provide full mitigation against speculative kernel access to 550 * userspace? 551 */ 552 static bool smap_works_speculatively(void) 553 { 554 if (!boot_cpu_has(X86_FEATURE_SMAP)) 555 return false; 556 557 /* 558 * On CPUs which are vulnerable to Meltdown, SMAP does not 559 * prevent speculative access to user data in the L1 cache. 560 * Consider SMAP to be non-functional as a mitigation on these 561 * CPUs. 562 */ 563 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) 564 return false; 565 566 return true; 567 } 568 569 static void __init spectre_v1_select_mitigation(void) 570 { 571 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) { 572 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 573 return; 574 } 575 576 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { 577 /* 578 * With Spectre v1, a user can speculatively control either 579 * path of a conditional swapgs with a user-controlled GS 580 * value. The mitigation is to add lfences to both code paths. 581 * 582 * If FSGSBASE is enabled, the user can put a kernel address in 583 * GS, in which case SMAP provides no protection. 584 * 585 * If FSGSBASE is disabled, the user can only put a user space 586 * address in GS. That makes an attack harder, but still 587 * possible if there's no SMAP protection. 588 */ 589 if (boot_cpu_has(X86_FEATURE_FSGSBASE) || 590 !smap_works_speculatively()) { 591 /* 592 * Mitigation can be provided from SWAPGS itself or 593 * PTI as the CR3 write in the Meltdown mitigation 594 * is serializing. 595 * 596 * If neither is there, mitigate with an LFENCE to 597 * stop speculation through swapgs. 598 */ 599 if (boot_cpu_has_bug(X86_BUG_SWAPGS) && 600 !boot_cpu_has(X86_FEATURE_PTI)) 601 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER); 602 603 /* 604 * Enable lfences in the kernel entry (non-swapgs) 605 * paths, to prevent user entry from speculatively 606 * skipping swapgs. 607 */ 608 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL); 609 } 610 } 611 612 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]); 613 } 614 615 static int __init nospectre_v1_cmdline(char *str) 616 { 617 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 618 return 0; 619 } 620 early_param("nospectre_v1", nospectre_v1_cmdline); 621 622 #undef pr_fmt 623 #define pr_fmt(fmt) "Spectre V2 : " fmt 624 625 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = 626 SPECTRE_V2_NONE; 627 628 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = 629 SPECTRE_V2_USER_NONE; 630 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = 631 SPECTRE_V2_USER_NONE; 632 633 #ifdef CONFIG_RETPOLINE 634 static bool spectre_v2_bad_module; 635 636 bool retpoline_module_ok(bool has_retpoline) 637 { 638 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) 639 return true; 640 641 pr_err("System may be vulnerable to spectre v2\n"); 642 spectre_v2_bad_module = true; 643 return false; 644 } 645 646 static inline const char *spectre_v2_module_string(void) 647 { 648 return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; 649 } 650 #else 651 static inline const char *spectre_v2_module_string(void) { return ""; } 652 #endif 653 654 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" 655 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" 656 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" 657 658 #ifdef CONFIG_BPF_SYSCALL 659 void unpriv_ebpf_notify(int new_state) 660 { 661 if (new_state) 662 return; 663 664 /* Unprivileged eBPF is enabled */ 665 666 switch (spectre_v2_enabled) { 667 case SPECTRE_V2_EIBRS: 668 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); 669 break; 670 case SPECTRE_V2_EIBRS_LFENCE: 671 if (sched_smt_active()) 672 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); 673 break; 674 default: 675 break; 676 } 677 } 678 #endif 679 680 static inline bool match_option(const char *arg, int arglen, const char *opt) 681 { 682 int len = strlen(opt); 683 684 return len == arglen && !strncmp(arg, opt, len); 685 } 686 687 /* The kernel command line selection for spectre v2 */ 688 enum spectre_v2_mitigation_cmd { 689 SPECTRE_V2_CMD_NONE, 690 SPECTRE_V2_CMD_AUTO, 691 SPECTRE_V2_CMD_FORCE, 692 SPECTRE_V2_CMD_RETPOLINE, 693 SPECTRE_V2_CMD_RETPOLINE_GENERIC, 694 SPECTRE_V2_CMD_RETPOLINE_LFENCE, 695 SPECTRE_V2_CMD_EIBRS, 696 SPECTRE_V2_CMD_EIBRS_RETPOLINE, 697 SPECTRE_V2_CMD_EIBRS_LFENCE, 698 }; 699 700 enum spectre_v2_user_cmd { 701 SPECTRE_V2_USER_CMD_NONE, 702 SPECTRE_V2_USER_CMD_AUTO, 703 SPECTRE_V2_USER_CMD_FORCE, 704 SPECTRE_V2_USER_CMD_PRCTL, 705 SPECTRE_V2_USER_CMD_PRCTL_IBPB, 706 SPECTRE_V2_USER_CMD_SECCOMP, 707 SPECTRE_V2_USER_CMD_SECCOMP_IBPB, 708 }; 709 710 static const char * const spectre_v2_user_strings[] = { 711 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", 712 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", 713 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", 714 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", 715 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", 716 }; 717 718 static const struct { 719 const char *option; 720 enum spectre_v2_user_cmd cmd; 721 bool secure; 722 } v2_user_options[] __initconst = { 723 { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, 724 { "off", SPECTRE_V2_USER_CMD_NONE, false }, 725 { "on", SPECTRE_V2_USER_CMD_FORCE, true }, 726 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false }, 727 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false }, 728 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false }, 729 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, 730 }; 731 732 static void __init spec_v2_user_print_cond(const char *reason, bool secure) 733 { 734 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) 735 pr_info("spectre_v2_user=%s forced on command line.\n", reason); 736 } 737 738 static enum spectre_v2_user_cmd __init 739 spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd) 740 { 741 char arg[20]; 742 int ret, i; 743 744 switch (v2_cmd) { 745 case SPECTRE_V2_CMD_NONE: 746 return SPECTRE_V2_USER_CMD_NONE; 747 case SPECTRE_V2_CMD_FORCE: 748 return SPECTRE_V2_USER_CMD_FORCE; 749 default: 750 break; 751 } 752 753 ret = cmdline_find_option(boot_command_line, "spectre_v2_user", 754 arg, sizeof(arg)); 755 if (ret < 0) 756 return SPECTRE_V2_USER_CMD_AUTO; 757 758 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { 759 if (match_option(arg, ret, v2_user_options[i].option)) { 760 spec_v2_user_print_cond(v2_user_options[i].option, 761 v2_user_options[i].secure); 762 return v2_user_options[i].cmd; 763 } 764 } 765 766 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); 767 return SPECTRE_V2_USER_CMD_AUTO; 768 } 769 770 static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode) 771 { 772 return (mode == SPECTRE_V2_EIBRS || 773 mode == SPECTRE_V2_EIBRS_RETPOLINE || 774 mode == SPECTRE_V2_EIBRS_LFENCE); 775 } 776 777 static void __init 778 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) 779 { 780 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; 781 bool smt_possible = IS_ENABLED(CONFIG_SMP); 782 enum spectre_v2_user_cmd cmd; 783 784 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) 785 return; 786 787 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || 788 cpu_smt_control == CPU_SMT_NOT_SUPPORTED) 789 smt_possible = false; 790 791 cmd = spectre_v2_parse_user_cmdline(v2_cmd); 792 switch (cmd) { 793 case SPECTRE_V2_USER_CMD_NONE: 794 goto set_mode; 795 case SPECTRE_V2_USER_CMD_FORCE: 796 mode = SPECTRE_V2_USER_STRICT; 797 break; 798 case SPECTRE_V2_USER_CMD_AUTO: 799 case SPECTRE_V2_USER_CMD_PRCTL: 800 case SPECTRE_V2_USER_CMD_PRCTL_IBPB: 801 mode = SPECTRE_V2_USER_PRCTL; 802 break; 803 case SPECTRE_V2_USER_CMD_SECCOMP: 804 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: 805 if (IS_ENABLED(CONFIG_SECCOMP)) 806 mode = SPECTRE_V2_USER_SECCOMP; 807 else 808 mode = SPECTRE_V2_USER_PRCTL; 809 break; 810 } 811 812 /* Initialize Indirect Branch Prediction Barrier */ 813 if (boot_cpu_has(X86_FEATURE_IBPB)) { 814 setup_force_cpu_cap(X86_FEATURE_USE_IBPB); 815 816 spectre_v2_user_ibpb = mode; 817 switch (cmd) { 818 case SPECTRE_V2_USER_CMD_FORCE: 819 case SPECTRE_V2_USER_CMD_PRCTL_IBPB: 820 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: 821 static_branch_enable(&switch_mm_always_ibpb); 822 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 823 break; 824 case SPECTRE_V2_USER_CMD_PRCTL: 825 case SPECTRE_V2_USER_CMD_AUTO: 826 case SPECTRE_V2_USER_CMD_SECCOMP: 827 static_branch_enable(&switch_mm_cond_ibpb); 828 break; 829 default: 830 break; 831 } 832 833 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", 834 static_key_enabled(&switch_mm_always_ibpb) ? 835 "always-on" : "conditional"); 836 } 837 838 /* 839 * If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not 840 * required. 841 */ 842 if (!boot_cpu_has(X86_FEATURE_STIBP) || 843 !smt_possible || 844 spectre_v2_in_eibrs_mode(spectre_v2_enabled)) 845 return; 846 847 /* 848 * At this point, an STIBP mode other than "off" has been set. 849 * If STIBP support is not being forced, check if STIBP always-on 850 * is preferred. 851 */ 852 if (mode != SPECTRE_V2_USER_STRICT && 853 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) 854 mode = SPECTRE_V2_USER_STRICT_PREFERRED; 855 856 spectre_v2_user_stibp = mode; 857 858 set_mode: 859 pr_info("%s\n", spectre_v2_user_strings[mode]); 860 } 861 862 static const char * const spectre_v2_strings[] = { 863 [SPECTRE_V2_NONE] = "Vulnerable", 864 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", 865 [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", 866 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS", 867 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE", 868 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines", 869 }; 870 871 static const struct { 872 const char *option; 873 enum spectre_v2_mitigation_cmd cmd; 874 bool secure; 875 } mitigation_options[] __initconst = { 876 { "off", SPECTRE_V2_CMD_NONE, false }, 877 { "on", SPECTRE_V2_CMD_FORCE, true }, 878 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, 879 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, 880 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, 881 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, 882 { "eibrs", SPECTRE_V2_CMD_EIBRS, false }, 883 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false }, 884 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false }, 885 { "auto", SPECTRE_V2_CMD_AUTO, false }, 886 }; 887 888 static void __init spec_v2_print_cond(const char *reason, bool secure) 889 { 890 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) 891 pr_info("%s selected on command line.\n", reason); 892 } 893 894 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) 895 { 896 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; 897 char arg[20]; 898 int ret, i; 899 900 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") || 901 cpu_mitigations_off()) 902 return SPECTRE_V2_CMD_NONE; 903 904 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); 905 if (ret < 0) 906 return SPECTRE_V2_CMD_AUTO; 907 908 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { 909 if (!match_option(arg, ret, mitigation_options[i].option)) 910 continue; 911 cmd = mitigation_options[i].cmd; 912 break; 913 } 914 915 if (i >= ARRAY_SIZE(mitigation_options)) { 916 pr_err("unknown option (%s). Switching to AUTO select\n", arg); 917 return SPECTRE_V2_CMD_AUTO; 918 } 919 920 if ((cmd == SPECTRE_V2_CMD_RETPOLINE || 921 cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || 922 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || 923 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 924 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 925 !IS_ENABLED(CONFIG_RETPOLINE)) { 926 pr_err("%s selected but not compiled in. Switching to AUTO select\n", 927 mitigation_options[i].option); 928 return SPECTRE_V2_CMD_AUTO; 929 } 930 931 if ((cmd == SPECTRE_V2_CMD_EIBRS || 932 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 933 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 934 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 935 pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n", 936 mitigation_options[i].option); 937 return SPECTRE_V2_CMD_AUTO; 938 } 939 940 if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || 941 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) && 942 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { 943 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n", 944 mitigation_options[i].option); 945 return SPECTRE_V2_CMD_AUTO; 946 } 947 948 spec_v2_print_cond(mitigation_options[i].option, 949 mitigation_options[i].secure); 950 return cmd; 951 } 952 953 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) 954 { 955 if (!IS_ENABLED(CONFIG_RETPOLINE)) { 956 pr_err("Kernel not compiled with retpoline; no mitigation available!"); 957 return SPECTRE_V2_NONE; 958 } 959 960 return SPECTRE_V2_RETPOLINE; 961 } 962 963 static void __init spectre_v2_select_mitigation(void) 964 { 965 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); 966 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE; 967 968 /* 969 * If the CPU is not affected and the command line mode is NONE or AUTO 970 * then nothing to do. 971 */ 972 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && 973 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO)) 974 return; 975 976 switch (cmd) { 977 case SPECTRE_V2_CMD_NONE: 978 return; 979 980 case SPECTRE_V2_CMD_FORCE: 981 case SPECTRE_V2_CMD_AUTO: 982 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 983 mode = SPECTRE_V2_EIBRS; 984 break; 985 } 986 987 mode = spectre_v2_select_retpoline(); 988 break; 989 990 case SPECTRE_V2_CMD_RETPOLINE_LFENCE: 991 pr_err(SPECTRE_V2_LFENCE_MSG); 992 mode = SPECTRE_V2_LFENCE; 993 break; 994 995 case SPECTRE_V2_CMD_RETPOLINE_GENERIC: 996 mode = SPECTRE_V2_RETPOLINE; 997 break; 998 999 case SPECTRE_V2_CMD_RETPOLINE: 1000 mode = spectre_v2_select_retpoline(); 1001 break; 1002 1003 case SPECTRE_V2_CMD_EIBRS: 1004 mode = SPECTRE_V2_EIBRS; 1005 break; 1006 1007 case SPECTRE_V2_CMD_EIBRS_LFENCE: 1008 mode = SPECTRE_V2_EIBRS_LFENCE; 1009 break; 1010 1011 case SPECTRE_V2_CMD_EIBRS_RETPOLINE: 1012 mode = SPECTRE_V2_EIBRS_RETPOLINE; 1013 break; 1014 } 1015 1016 if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) 1017 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); 1018 1019 if (spectre_v2_in_eibrs_mode(mode)) { 1020 /* Force it so VMEXIT will restore correctly */ 1021 x86_spec_ctrl_base |= SPEC_CTRL_IBRS; 1022 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 1023 } 1024 1025 switch (mode) { 1026 case SPECTRE_V2_NONE: 1027 case SPECTRE_V2_EIBRS: 1028 break; 1029 1030 case SPECTRE_V2_LFENCE: 1031 case SPECTRE_V2_EIBRS_LFENCE: 1032 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); 1033 fallthrough; 1034 1035 case SPECTRE_V2_RETPOLINE: 1036 case SPECTRE_V2_EIBRS_RETPOLINE: 1037 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 1038 break; 1039 } 1040 1041 spectre_v2_enabled = mode; 1042 pr_info("%s\n", spectre_v2_strings[mode]); 1043 1044 /* 1045 * If spectre v2 protection has been enabled, unconditionally fill 1046 * RSB during a context switch; this protects against two independent 1047 * issues: 1048 * 1049 * - RSB underflow (and switch to BTB) on Skylake+ 1050 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs 1051 */ 1052 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); 1053 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); 1054 1055 /* 1056 * Retpoline means the kernel is safe because it has no indirect 1057 * branches. Enhanced IBRS protects firmware too, so, enable restricted 1058 * speculation around firmware calls only when Enhanced IBRS isn't 1059 * supported. 1060 * 1061 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because 1062 * the user might select retpoline on the kernel command line and if 1063 * the CPU supports Enhanced IBRS, kernel might un-intentionally not 1064 * enable IBRS around firmware calls. 1065 */ 1066 if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) { 1067 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); 1068 pr_info("Enabling Restricted Speculation for firmware calls\n"); 1069 } 1070 1071 /* Set up IBPB and STIBP depending on the general spectre V2 command */ 1072 spectre_v2_user_select_mitigation(cmd); 1073 } 1074 1075 static void update_stibp_msr(void * __unused) 1076 { 1077 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 1078 } 1079 1080 /* Update x86_spec_ctrl_base in case SMT state changed. */ 1081 static void update_stibp_strict(void) 1082 { 1083 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; 1084 1085 if (sched_smt_active()) 1086 mask |= SPEC_CTRL_STIBP; 1087 1088 if (mask == x86_spec_ctrl_base) 1089 return; 1090 1091 pr_info("Update user space SMT mitigation: STIBP %s\n", 1092 mask & SPEC_CTRL_STIBP ? "always-on" : "off"); 1093 x86_spec_ctrl_base = mask; 1094 on_each_cpu(update_stibp_msr, NULL, 1); 1095 } 1096 1097 /* Update the static key controlling the evaluation of TIF_SPEC_IB */ 1098 static void update_indir_branch_cond(void) 1099 { 1100 if (sched_smt_active()) 1101 static_branch_enable(&switch_to_cond_stibp); 1102 else 1103 static_branch_disable(&switch_to_cond_stibp); 1104 } 1105 1106 #undef pr_fmt 1107 #define pr_fmt(fmt) fmt 1108 1109 /* Update the static key controlling the MDS CPU buffer clear in idle */ 1110 static void update_mds_branch_idle(void) 1111 { 1112 /* 1113 * Enable the idle clearing if SMT is active on CPUs which are 1114 * affected only by MSBDS and not any other MDS variant. 1115 * 1116 * The other variants cannot be mitigated when SMT is enabled, so 1117 * clearing the buffers on idle just to prevent the Store Buffer 1118 * repartitioning leak would be a window dressing exercise. 1119 */ 1120 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) 1121 return; 1122 1123 if (sched_smt_active()) 1124 static_branch_enable(&mds_idle_clear); 1125 else 1126 static_branch_disable(&mds_idle_clear); 1127 } 1128 1129 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" 1130 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" 1131 1132 void cpu_bugs_smt_update(void) 1133 { 1134 mutex_lock(&spec_ctrl_mutex); 1135 1136 if (sched_smt_active() && unprivileged_ebpf_enabled() && 1137 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) 1138 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); 1139 1140 switch (spectre_v2_user_stibp) { 1141 case SPECTRE_V2_USER_NONE: 1142 break; 1143 case SPECTRE_V2_USER_STRICT: 1144 case SPECTRE_V2_USER_STRICT_PREFERRED: 1145 update_stibp_strict(); 1146 break; 1147 case SPECTRE_V2_USER_PRCTL: 1148 case SPECTRE_V2_USER_SECCOMP: 1149 update_indir_branch_cond(); 1150 break; 1151 } 1152 1153 switch (mds_mitigation) { 1154 case MDS_MITIGATION_FULL: 1155 case MDS_MITIGATION_VMWERV: 1156 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) 1157 pr_warn_once(MDS_MSG_SMT); 1158 update_mds_branch_idle(); 1159 break; 1160 case MDS_MITIGATION_OFF: 1161 break; 1162 } 1163 1164 switch (taa_mitigation) { 1165 case TAA_MITIGATION_VERW: 1166 case TAA_MITIGATION_UCODE_NEEDED: 1167 if (sched_smt_active()) 1168 pr_warn_once(TAA_MSG_SMT); 1169 break; 1170 case TAA_MITIGATION_TSX_DISABLED: 1171 case TAA_MITIGATION_OFF: 1172 break; 1173 } 1174 1175 mutex_unlock(&spec_ctrl_mutex); 1176 } 1177 1178 #undef pr_fmt 1179 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt 1180 1181 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE; 1182 1183 /* The kernel command line selection */ 1184 enum ssb_mitigation_cmd { 1185 SPEC_STORE_BYPASS_CMD_NONE, 1186 SPEC_STORE_BYPASS_CMD_AUTO, 1187 SPEC_STORE_BYPASS_CMD_ON, 1188 SPEC_STORE_BYPASS_CMD_PRCTL, 1189 SPEC_STORE_BYPASS_CMD_SECCOMP, 1190 }; 1191 1192 static const char * const ssb_strings[] = { 1193 [SPEC_STORE_BYPASS_NONE] = "Vulnerable", 1194 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", 1195 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", 1196 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", 1197 }; 1198 1199 static const struct { 1200 const char *option; 1201 enum ssb_mitigation_cmd cmd; 1202 } ssb_mitigation_options[] __initconst = { 1203 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ 1204 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ 1205 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ 1206 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ 1207 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */ 1208 }; 1209 1210 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) 1211 { 1212 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO; 1213 char arg[20]; 1214 int ret, i; 1215 1216 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") || 1217 cpu_mitigations_off()) { 1218 return SPEC_STORE_BYPASS_CMD_NONE; 1219 } else { 1220 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", 1221 arg, sizeof(arg)); 1222 if (ret < 0) 1223 return SPEC_STORE_BYPASS_CMD_AUTO; 1224 1225 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { 1226 if (!match_option(arg, ret, ssb_mitigation_options[i].option)) 1227 continue; 1228 1229 cmd = ssb_mitigation_options[i].cmd; 1230 break; 1231 } 1232 1233 if (i >= ARRAY_SIZE(ssb_mitigation_options)) { 1234 pr_err("unknown option (%s). Switching to AUTO select\n", arg); 1235 return SPEC_STORE_BYPASS_CMD_AUTO; 1236 } 1237 } 1238 1239 return cmd; 1240 } 1241 1242 static enum ssb_mitigation __init __ssb_select_mitigation(void) 1243 { 1244 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; 1245 enum ssb_mitigation_cmd cmd; 1246 1247 if (!boot_cpu_has(X86_FEATURE_SSBD)) 1248 return mode; 1249 1250 cmd = ssb_parse_cmdline(); 1251 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && 1252 (cmd == SPEC_STORE_BYPASS_CMD_NONE || 1253 cmd == SPEC_STORE_BYPASS_CMD_AUTO)) 1254 return mode; 1255 1256 switch (cmd) { 1257 case SPEC_STORE_BYPASS_CMD_SECCOMP: 1258 /* 1259 * Choose prctl+seccomp as the default mode if seccomp is 1260 * enabled. 1261 */ 1262 if (IS_ENABLED(CONFIG_SECCOMP)) 1263 mode = SPEC_STORE_BYPASS_SECCOMP; 1264 else 1265 mode = SPEC_STORE_BYPASS_PRCTL; 1266 break; 1267 case SPEC_STORE_BYPASS_CMD_ON: 1268 mode = SPEC_STORE_BYPASS_DISABLE; 1269 break; 1270 case SPEC_STORE_BYPASS_CMD_AUTO: 1271 case SPEC_STORE_BYPASS_CMD_PRCTL: 1272 mode = SPEC_STORE_BYPASS_PRCTL; 1273 break; 1274 case SPEC_STORE_BYPASS_CMD_NONE: 1275 break; 1276 } 1277 1278 /* 1279 * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper 1280 * bit in the mask to allow guests to use the mitigation even in the 1281 * case where the host does not enable it. 1282 */ 1283 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || 1284 static_cpu_has(X86_FEATURE_AMD_SSBD)) { 1285 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; 1286 } 1287 1288 /* 1289 * We have three CPU feature flags that are in play here: 1290 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. 1291 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass 1292 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation 1293 */ 1294 if (mode == SPEC_STORE_BYPASS_DISABLE) { 1295 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); 1296 /* 1297 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may 1298 * use a completely different MSR and bit dependent on family. 1299 */ 1300 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && 1301 !static_cpu_has(X86_FEATURE_AMD_SSBD)) { 1302 x86_amd_ssb_disable(); 1303 } else { 1304 x86_spec_ctrl_base |= SPEC_CTRL_SSBD; 1305 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 1306 } 1307 } 1308 1309 return mode; 1310 } 1311 1312 static void ssb_select_mitigation(void) 1313 { 1314 ssb_mode = __ssb_select_mitigation(); 1315 1316 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 1317 pr_info("%s\n", ssb_strings[ssb_mode]); 1318 } 1319 1320 #undef pr_fmt 1321 #define pr_fmt(fmt) "Speculation prctl: " fmt 1322 1323 static void task_update_spec_tif(struct task_struct *tsk) 1324 { 1325 /* Force the update of the real TIF bits */ 1326 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); 1327 1328 /* 1329 * Immediately update the speculation control MSRs for the current 1330 * task, but for a non-current task delay setting the CPU 1331 * mitigation until it is scheduled next. 1332 * 1333 * This can only happen for SECCOMP mitigation. For PRCTL it's 1334 * always the current task. 1335 */ 1336 if (tsk == current) 1337 speculation_ctrl_update_current(); 1338 } 1339 1340 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl) 1341 { 1342 1343 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) 1344 return -EPERM; 1345 1346 switch (ctrl) { 1347 case PR_SPEC_ENABLE: 1348 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); 1349 return 0; 1350 case PR_SPEC_DISABLE: 1351 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); 1352 return 0; 1353 default: 1354 return -ERANGE; 1355 } 1356 } 1357 1358 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) 1359 { 1360 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && 1361 ssb_mode != SPEC_STORE_BYPASS_SECCOMP) 1362 return -ENXIO; 1363 1364 switch (ctrl) { 1365 case PR_SPEC_ENABLE: 1366 /* If speculation is force disabled, enable is not allowed */ 1367 if (task_spec_ssb_force_disable(task)) 1368 return -EPERM; 1369 task_clear_spec_ssb_disable(task); 1370 task_clear_spec_ssb_noexec(task); 1371 task_update_spec_tif(task); 1372 break; 1373 case PR_SPEC_DISABLE: 1374 task_set_spec_ssb_disable(task); 1375 task_clear_spec_ssb_noexec(task); 1376 task_update_spec_tif(task); 1377 break; 1378 case PR_SPEC_FORCE_DISABLE: 1379 task_set_spec_ssb_disable(task); 1380 task_set_spec_ssb_force_disable(task); 1381 task_clear_spec_ssb_noexec(task); 1382 task_update_spec_tif(task); 1383 break; 1384 case PR_SPEC_DISABLE_NOEXEC: 1385 if (task_spec_ssb_force_disable(task)) 1386 return -EPERM; 1387 task_set_spec_ssb_disable(task); 1388 task_set_spec_ssb_noexec(task); 1389 task_update_spec_tif(task); 1390 break; 1391 default: 1392 return -ERANGE; 1393 } 1394 return 0; 1395 } 1396 1397 static bool is_spec_ib_user_controlled(void) 1398 { 1399 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || 1400 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || 1401 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || 1402 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP; 1403 } 1404 1405 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) 1406 { 1407 switch (ctrl) { 1408 case PR_SPEC_ENABLE: 1409 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 1410 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 1411 return 0; 1412 1413 /* 1414 * With strict mode for both IBPB and STIBP, the instruction 1415 * code paths avoid checking this task flag and instead, 1416 * unconditionally run the instruction. However, STIBP and IBPB 1417 * are independent and either can be set to conditionally 1418 * enabled regardless of the mode of the other. 1419 * 1420 * If either is set to conditional, allow the task flag to be 1421 * updated, unless it was force-disabled by a previous prctl 1422 * call. Currently, this is possible on an AMD CPU which has the 1423 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the 1424 * kernel is booted with 'spectre_v2_user=seccomp', then 1425 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and 1426 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED. 1427 */ 1428 if (!is_spec_ib_user_controlled() || 1429 task_spec_ib_force_disable(task)) 1430 return -EPERM; 1431 1432 task_clear_spec_ib_disable(task); 1433 task_update_spec_tif(task); 1434 break; 1435 case PR_SPEC_DISABLE: 1436 case PR_SPEC_FORCE_DISABLE: 1437 /* 1438 * Indirect branch speculation is always allowed when 1439 * mitigation is force disabled. 1440 */ 1441 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 1442 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 1443 return -EPERM; 1444 1445 if (!is_spec_ib_user_controlled()) 1446 return 0; 1447 1448 task_set_spec_ib_disable(task); 1449 if (ctrl == PR_SPEC_FORCE_DISABLE) 1450 task_set_spec_ib_force_disable(task); 1451 task_update_spec_tif(task); 1452 break; 1453 default: 1454 return -ERANGE; 1455 } 1456 return 0; 1457 } 1458 1459 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, 1460 unsigned long ctrl) 1461 { 1462 switch (which) { 1463 case PR_SPEC_STORE_BYPASS: 1464 return ssb_prctl_set(task, ctrl); 1465 case PR_SPEC_INDIRECT_BRANCH: 1466 return ib_prctl_set(task, ctrl); 1467 case PR_SPEC_L1D_FLUSH: 1468 return l1d_flush_prctl_set(task, ctrl); 1469 default: 1470 return -ENODEV; 1471 } 1472 } 1473 1474 #ifdef CONFIG_SECCOMP 1475 void arch_seccomp_spec_mitigate(struct task_struct *task) 1476 { 1477 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) 1478 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); 1479 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || 1480 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) 1481 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); 1482 } 1483 #endif 1484 1485 static int l1d_flush_prctl_get(struct task_struct *task) 1486 { 1487 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) 1488 return PR_SPEC_FORCE_DISABLE; 1489 1490 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH)) 1491 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 1492 else 1493 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 1494 } 1495 1496 static int ssb_prctl_get(struct task_struct *task) 1497 { 1498 switch (ssb_mode) { 1499 case SPEC_STORE_BYPASS_DISABLE: 1500 return PR_SPEC_DISABLE; 1501 case SPEC_STORE_BYPASS_SECCOMP: 1502 case SPEC_STORE_BYPASS_PRCTL: 1503 if (task_spec_ssb_force_disable(task)) 1504 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 1505 if (task_spec_ssb_noexec(task)) 1506 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; 1507 if (task_spec_ssb_disable(task)) 1508 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 1509 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 1510 default: 1511 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 1512 return PR_SPEC_ENABLE; 1513 return PR_SPEC_NOT_AFFECTED; 1514 } 1515 } 1516 1517 static int ib_prctl_get(struct task_struct *task) 1518 { 1519 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 1520 return PR_SPEC_NOT_AFFECTED; 1521 1522 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 1523 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 1524 return PR_SPEC_ENABLE; 1525 else if (is_spec_ib_user_controlled()) { 1526 if (task_spec_ib_force_disable(task)) 1527 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 1528 if (task_spec_ib_disable(task)) 1529 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 1530 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 1531 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || 1532 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || 1533 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) 1534 return PR_SPEC_DISABLE; 1535 else 1536 return PR_SPEC_NOT_AFFECTED; 1537 } 1538 1539 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 1540 { 1541 switch (which) { 1542 case PR_SPEC_STORE_BYPASS: 1543 return ssb_prctl_get(task); 1544 case PR_SPEC_INDIRECT_BRANCH: 1545 return ib_prctl_get(task); 1546 case PR_SPEC_L1D_FLUSH: 1547 return l1d_flush_prctl_get(task); 1548 default: 1549 return -ENODEV; 1550 } 1551 } 1552 1553 void x86_spec_ctrl_setup_ap(void) 1554 { 1555 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) 1556 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 1557 1558 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) 1559 x86_amd_ssb_disable(); 1560 } 1561 1562 bool itlb_multihit_kvm_mitigation; 1563 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); 1564 1565 #undef pr_fmt 1566 #define pr_fmt(fmt) "L1TF: " fmt 1567 1568 /* Default mitigation for L1TF-affected CPUs */ 1569 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH; 1570 #if IS_ENABLED(CONFIG_KVM_INTEL) 1571 EXPORT_SYMBOL_GPL(l1tf_mitigation); 1572 #endif 1573 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; 1574 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); 1575 1576 /* 1577 * These CPUs all support 44bits physical address space internally in the 1578 * cache but CPUID can report a smaller number of physical address bits. 1579 * 1580 * The L1TF mitigation uses the top most address bit for the inversion of 1581 * non present PTEs. When the installed memory reaches into the top most 1582 * address bit due to memory holes, which has been observed on machines 1583 * which report 36bits physical address bits and have 32G RAM installed, 1584 * then the mitigation range check in l1tf_select_mitigation() triggers. 1585 * This is a false positive because the mitigation is still possible due to 1586 * the fact that the cache uses 44bit internally. Use the cache bits 1587 * instead of the reported physical bits and adjust them on the affected 1588 * machines to 44bit if the reported bits are less than 44. 1589 */ 1590 static void override_cache_bits(struct cpuinfo_x86 *c) 1591 { 1592 if (c->x86 != 6) 1593 return; 1594 1595 switch (c->x86_model) { 1596 case INTEL_FAM6_NEHALEM: 1597 case INTEL_FAM6_WESTMERE: 1598 case INTEL_FAM6_SANDYBRIDGE: 1599 case INTEL_FAM6_IVYBRIDGE: 1600 case INTEL_FAM6_HASWELL: 1601 case INTEL_FAM6_HASWELL_L: 1602 case INTEL_FAM6_HASWELL_G: 1603 case INTEL_FAM6_BROADWELL: 1604 case INTEL_FAM6_BROADWELL_G: 1605 case INTEL_FAM6_SKYLAKE_L: 1606 case INTEL_FAM6_SKYLAKE: 1607 case INTEL_FAM6_KABYLAKE_L: 1608 case INTEL_FAM6_KABYLAKE: 1609 if (c->x86_cache_bits < 44) 1610 c->x86_cache_bits = 44; 1611 break; 1612 } 1613 } 1614 1615 static void __init l1tf_select_mitigation(void) 1616 { 1617 u64 half_pa; 1618 1619 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 1620 return; 1621 1622 if (cpu_mitigations_off()) 1623 l1tf_mitigation = L1TF_MITIGATION_OFF; 1624 else if (cpu_mitigations_auto_nosmt()) 1625 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 1626 1627 override_cache_bits(&boot_cpu_data); 1628 1629 switch (l1tf_mitigation) { 1630 case L1TF_MITIGATION_OFF: 1631 case L1TF_MITIGATION_FLUSH_NOWARN: 1632 case L1TF_MITIGATION_FLUSH: 1633 break; 1634 case L1TF_MITIGATION_FLUSH_NOSMT: 1635 case L1TF_MITIGATION_FULL: 1636 cpu_smt_disable(false); 1637 break; 1638 case L1TF_MITIGATION_FULL_FORCE: 1639 cpu_smt_disable(true); 1640 break; 1641 } 1642 1643 #if CONFIG_PGTABLE_LEVELS == 2 1644 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); 1645 return; 1646 #endif 1647 1648 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; 1649 if (l1tf_mitigation != L1TF_MITIGATION_OFF && 1650 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { 1651 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); 1652 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", 1653 half_pa); 1654 pr_info("However, doing so will make a part of your RAM unusable.\n"); 1655 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n"); 1656 return; 1657 } 1658 1659 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); 1660 } 1661 1662 static int __init l1tf_cmdline(char *str) 1663 { 1664 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 1665 return 0; 1666 1667 if (!str) 1668 return -EINVAL; 1669 1670 if (!strcmp(str, "off")) 1671 l1tf_mitigation = L1TF_MITIGATION_OFF; 1672 else if (!strcmp(str, "flush,nowarn")) 1673 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN; 1674 else if (!strcmp(str, "flush")) 1675 l1tf_mitigation = L1TF_MITIGATION_FLUSH; 1676 else if (!strcmp(str, "flush,nosmt")) 1677 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 1678 else if (!strcmp(str, "full")) 1679 l1tf_mitigation = L1TF_MITIGATION_FULL; 1680 else if (!strcmp(str, "full,force")) 1681 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE; 1682 1683 return 0; 1684 } 1685 early_param("l1tf", l1tf_cmdline); 1686 1687 #undef pr_fmt 1688 #define pr_fmt(fmt) fmt 1689 1690 #ifdef CONFIG_SYSFS 1691 1692 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" 1693 1694 #if IS_ENABLED(CONFIG_KVM_INTEL) 1695 static const char * const l1tf_vmx_states[] = { 1696 [VMENTER_L1D_FLUSH_AUTO] = "auto", 1697 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", 1698 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", 1699 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", 1700 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", 1701 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" 1702 }; 1703 1704 static ssize_t l1tf_show_state(char *buf) 1705 { 1706 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) 1707 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); 1708 1709 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || 1710 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && 1711 sched_smt_active())) { 1712 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, 1713 l1tf_vmx_states[l1tf_vmx_mitigation]); 1714 } 1715 1716 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, 1717 l1tf_vmx_states[l1tf_vmx_mitigation], 1718 sched_smt_active() ? "vulnerable" : "disabled"); 1719 } 1720 1721 static ssize_t itlb_multihit_show_state(char *buf) 1722 { 1723 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || 1724 !boot_cpu_has(X86_FEATURE_VMX)) 1725 return sprintf(buf, "KVM: Mitigation: VMX unsupported\n"); 1726 else if (!(cr4_read_shadow() & X86_CR4_VMXE)) 1727 return sprintf(buf, "KVM: Mitigation: VMX disabled\n"); 1728 else if (itlb_multihit_kvm_mitigation) 1729 return sprintf(buf, "KVM: Mitigation: Split huge pages\n"); 1730 else 1731 return sprintf(buf, "KVM: Vulnerable\n"); 1732 } 1733 #else 1734 static ssize_t l1tf_show_state(char *buf) 1735 { 1736 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); 1737 } 1738 1739 static ssize_t itlb_multihit_show_state(char *buf) 1740 { 1741 return sprintf(buf, "Processor vulnerable\n"); 1742 } 1743 #endif 1744 1745 static ssize_t mds_show_state(char *buf) 1746 { 1747 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 1748 return sprintf(buf, "%s; SMT Host state unknown\n", 1749 mds_strings[mds_mitigation]); 1750 } 1751 1752 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { 1753 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], 1754 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : 1755 sched_smt_active() ? "mitigated" : "disabled")); 1756 } 1757 1758 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], 1759 sched_smt_active() ? "vulnerable" : "disabled"); 1760 } 1761 1762 static ssize_t tsx_async_abort_show_state(char *buf) 1763 { 1764 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || 1765 (taa_mitigation == TAA_MITIGATION_OFF)) 1766 return sprintf(buf, "%s\n", taa_strings[taa_mitigation]); 1767 1768 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 1769 return sprintf(buf, "%s; SMT Host state unknown\n", 1770 taa_strings[taa_mitigation]); 1771 } 1772 1773 return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], 1774 sched_smt_active() ? "vulnerable" : "disabled"); 1775 } 1776 1777 static char *stibp_state(void) 1778 { 1779 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled)) 1780 return ""; 1781 1782 switch (spectre_v2_user_stibp) { 1783 case SPECTRE_V2_USER_NONE: 1784 return ", STIBP: disabled"; 1785 case SPECTRE_V2_USER_STRICT: 1786 return ", STIBP: forced"; 1787 case SPECTRE_V2_USER_STRICT_PREFERRED: 1788 return ", STIBP: always-on"; 1789 case SPECTRE_V2_USER_PRCTL: 1790 case SPECTRE_V2_USER_SECCOMP: 1791 if (static_key_enabled(&switch_to_cond_stibp)) 1792 return ", STIBP: conditional"; 1793 } 1794 return ""; 1795 } 1796 1797 static char *ibpb_state(void) 1798 { 1799 if (boot_cpu_has(X86_FEATURE_IBPB)) { 1800 if (static_key_enabled(&switch_mm_always_ibpb)) 1801 return ", IBPB: always-on"; 1802 if (static_key_enabled(&switch_mm_cond_ibpb)) 1803 return ", IBPB: conditional"; 1804 return ", IBPB: disabled"; 1805 } 1806 return ""; 1807 } 1808 1809 static ssize_t spectre_v2_show_state(char *buf) 1810 { 1811 if (spectre_v2_enabled == SPECTRE_V2_LFENCE) 1812 return sprintf(buf, "Vulnerable: LFENCE\n"); 1813 1814 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) 1815 return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); 1816 1817 if (sched_smt_active() && unprivileged_ebpf_enabled() && 1818 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) 1819 return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); 1820 1821 return sprintf(buf, "%s%s%s%s%s%s\n", 1822 spectre_v2_strings[spectre_v2_enabled], 1823 ibpb_state(), 1824 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", 1825 stibp_state(), 1826 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", 1827 spectre_v2_module_string()); 1828 } 1829 1830 static ssize_t srbds_show_state(char *buf) 1831 { 1832 return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]); 1833 } 1834 1835 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, 1836 char *buf, unsigned int bug) 1837 { 1838 if (!boot_cpu_has_bug(bug)) 1839 return sprintf(buf, "Not affected\n"); 1840 1841 switch (bug) { 1842 case X86_BUG_CPU_MELTDOWN: 1843 if (boot_cpu_has(X86_FEATURE_PTI)) 1844 return sprintf(buf, "Mitigation: PTI\n"); 1845 1846 if (hypervisor_is_type(X86_HYPER_XEN_PV)) 1847 return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); 1848 1849 break; 1850 1851 case X86_BUG_SPECTRE_V1: 1852 return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); 1853 1854 case X86_BUG_SPECTRE_V2: 1855 return spectre_v2_show_state(buf); 1856 1857 case X86_BUG_SPEC_STORE_BYPASS: 1858 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); 1859 1860 case X86_BUG_L1TF: 1861 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) 1862 return l1tf_show_state(buf); 1863 break; 1864 1865 case X86_BUG_MDS: 1866 return mds_show_state(buf); 1867 1868 case X86_BUG_TAA: 1869 return tsx_async_abort_show_state(buf); 1870 1871 case X86_BUG_ITLB_MULTIHIT: 1872 return itlb_multihit_show_state(buf); 1873 1874 case X86_BUG_SRBDS: 1875 return srbds_show_state(buf); 1876 1877 default: 1878 break; 1879 } 1880 1881 return sprintf(buf, "Vulnerable\n"); 1882 } 1883 1884 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 1885 { 1886 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); 1887 } 1888 1889 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 1890 { 1891 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); 1892 } 1893 1894 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 1895 { 1896 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); 1897 } 1898 1899 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 1900 { 1901 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); 1902 } 1903 1904 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) 1905 { 1906 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); 1907 } 1908 1909 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf) 1910 { 1911 return cpu_show_common(dev, attr, buf, X86_BUG_MDS); 1912 } 1913 1914 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf) 1915 { 1916 return cpu_show_common(dev, attr, buf, X86_BUG_TAA); 1917 } 1918 1919 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf) 1920 { 1921 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); 1922 } 1923 1924 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) 1925 { 1926 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); 1927 } 1928 #endif 1929