1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1994 Linus Torvalds 4 * 5 * Cyrix stuff, June 1998 by: 6 * - Rafael R. Reilova (moved everything from head.S), 7 * <rreilova@ececs.uc.edu> 8 * - Channing Corn (tests & fixes), 9 * - Andrew D. Balsa (code cleanup). 10 */ 11 #include <linux/init.h> 12 #include <linux/utsname.h> 13 #include <linux/cpu.h> 14 #include <linux/module.h> 15 #include <linux/nospec.h> 16 #include <linux/prctl.h> 17 #include <linux/sched/smt.h> 18 #include <linux/pgtable.h> 19 #include <linux/bpf.h> 20 21 #include <asm/spec-ctrl.h> 22 #include <asm/cmdline.h> 23 #include <asm/bugs.h> 24 #include <asm/processor.h> 25 #include <asm/processor-flags.h> 26 #include <asm/fpu/api.h> 27 #include <asm/msr.h> 28 #include <asm/vmx.h> 29 #include <asm/paravirt.h> 30 #include <asm/alternative.h> 31 #include <asm/set_memory.h> 32 #include <asm/intel-family.h> 33 #include <asm/e820/api.h> 34 #include <asm/hypervisor.h> 35 #include <asm/tlbflush.h> 36 #include <asm/cpu.h> 37 38 #include "cpu.h" 39 40 static void __init spectre_v1_select_mitigation(void); 41 static void __init spectre_v2_select_mitigation(void); 42 static void __init retbleed_select_mitigation(void); 43 static void __init spectre_v2_user_select_mitigation(void); 44 static void __init ssb_select_mitigation(void); 45 static void __init l1tf_select_mitigation(void); 46 static void __init mds_select_mitigation(void); 47 static void __init md_clear_update_mitigation(void); 48 static void __init md_clear_select_mitigation(void); 49 static void __init taa_select_mitigation(void); 50 static void __init mmio_select_mitigation(void); 51 static void __init srbds_select_mitigation(void); 52 static void __init l1d_flush_select_mitigation(void); 53 54 /* The base value of the SPEC_CTRL MSR without task-specific bits set */ 55 u64 x86_spec_ctrl_base; 56 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); 57 58 /* The current value of the SPEC_CTRL MSR with task-specific bits set */ 59 DEFINE_PER_CPU(u64, x86_spec_ctrl_current); 60 EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); 61 62 static DEFINE_MUTEX(spec_ctrl_mutex); 63 64 /* Update SPEC_CTRL MSR and its cached copy unconditionally */ 65 static void update_spec_ctrl(u64 val) 66 { 67 this_cpu_write(x86_spec_ctrl_current, val); 68 wrmsrl(MSR_IA32_SPEC_CTRL, val); 69 } 70 71 /* 72 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ 73 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). 74 */ 75 void update_spec_ctrl_cond(u64 val) 76 { 77 if (this_cpu_read(x86_spec_ctrl_current) == val) 78 return; 79 80 this_cpu_write(x86_spec_ctrl_current, val); 81 82 /* 83 * When KERNEL_IBRS this MSR is written on return-to-user, unless 84 * forced the update can be delayed until that time. 85 */ 86 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) 87 wrmsrl(MSR_IA32_SPEC_CTRL, val); 88 } 89 90 noinstr u64 spec_ctrl_current(void) 91 { 92 return this_cpu_read(x86_spec_ctrl_current); 93 } 94 EXPORT_SYMBOL_GPL(spec_ctrl_current); 95 96 /* 97 * AMD specific MSR info for Speculative Store Bypass control. 98 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). 99 */ 100 u64 __ro_after_init x86_amd_ls_cfg_base; 101 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; 102 103 /* Control conditional STIBP in switch_to() */ 104 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); 105 /* Control conditional IBPB in switch_mm() */ 106 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); 107 /* Control unconditional IBPB in switch_mm() */ 108 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); 109 110 /* Control MDS CPU buffer clear before returning to user space */ 111 DEFINE_STATIC_KEY_FALSE(mds_user_clear); 112 EXPORT_SYMBOL_GPL(mds_user_clear); 113 /* Control MDS CPU buffer clear before idling (halt, mwait) */ 114 DEFINE_STATIC_KEY_FALSE(mds_idle_clear); 115 EXPORT_SYMBOL_GPL(mds_idle_clear); 116 117 /* 118 * Controls whether l1d flush based mitigations are enabled, 119 * based on hw features and admin setting via boot parameter 120 * defaults to false 121 */ 122 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); 123 124 /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */ 125 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); 126 EXPORT_SYMBOL_GPL(mmio_stale_data_clear); 127 128 void __init check_bugs(void) 129 { 130 identify_boot_cpu(); 131 132 /* 133 * identify_boot_cpu() initialized SMT support information, let the 134 * core code know. 135 */ 136 cpu_smt_check_topology(); 137 138 if (!IS_ENABLED(CONFIG_SMP)) { 139 pr_info("CPU: "); 140 print_cpu_info(&boot_cpu_data); 141 } 142 143 /* 144 * Read the SPEC_CTRL MSR to account for reserved bits which may 145 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD 146 * init code as it is not enumerated and depends on the family. 147 */ 148 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) { 149 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 150 151 /* 152 * Previously running kernel (kexec), may have some controls 153 * turned ON. Clear them and let the mitigations setup below 154 * rediscover them based on configuration. 155 */ 156 x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK; 157 } 158 159 /* Select the proper CPU mitigations before patching alternatives: */ 160 spectre_v1_select_mitigation(); 161 spectre_v2_select_mitigation(); 162 /* 163 * retbleed_select_mitigation() relies on the state set by 164 * spectre_v2_select_mitigation(); specifically it wants to know about 165 * spectre_v2=ibrs. 166 */ 167 retbleed_select_mitigation(); 168 /* 169 * spectre_v2_user_select_mitigation() relies on the state set by 170 * retbleed_select_mitigation(); specifically the STIBP selection is 171 * forced for UNRET or IBPB. 172 */ 173 spectre_v2_user_select_mitigation(); 174 ssb_select_mitigation(); 175 l1tf_select_mitigation(); 176 md_clear_select_mitigation(); 177 srbds_select_mitigation(); 178 l1d_flush_select_mitigation(); 179 180 arch_smt_update(); 181 182 #ifdef CONFIG_X86_32 183 /* 184 * Check whether we are able to run this kernel safely on SMP. 185 * 186 * - i386 is no longer supported. 187 * - In order to run on anything without a TSC, we need to be 188 * compiled for a i486. 189 */ 190 if (boot_cpu_data.x86 < 4) 191 panic("Kernel requires i486+ for 'invlpg' and other features"); 192 193 init_utsname()->machine[1] = 194 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 195 alternative_instructions(); 196 197 fpu__init_check_bugs(); 198 #else /* CONFIG_X86_64 */ 199 alternative_instructions(); 200 201 /* 202 * Make sure the first 2MB area is not mapped by huge pages 203 * There are typically fixed size MTRRs in there and overlapping 204 * MTRRs into large pages causes slow downs. 205 * 206 * Right now we don't do that with gbpages because there seems 207 * very little benefit for that case. 208 */ 209 if (!direct_gbpages) 210 set_memory_4k((unsigned long)__va(0), 1); 211 #endif 212 } 213 214 /* 215 * NOTE: This function is *only* called for SVM, since Intel uses 216 * MSR_IA32_SPEC_CTRL for SSBD. 217 */ 218 void 219 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest) 220 { 221 u64 guestval, hostval; 222 struct thread_info *ti = current_thread_info(); 223 224 /* 225 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update 226 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. 227 */ 228 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && 229 !static_cpu_has(X86_FEATURE_VIRT_SSBD)) 230 return; 231 232 /* 233 * If the host has SSBD mitigation enabled, force it in the host's 234 * virtual MSR value. If its not permanently enabled, evaluate 235 * current's TIF_SSBD thread flag. 236 */ 237 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) 238 hostval = SPEC_CTRL_SSBD; 239 else 240 hostval = ssbd_tif_to_spec_ctrl(ti->flags); 241 242 /* Sanitize the guest value */ 243 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; 244 245 if (hostval != guestval) { 246 unsigned long tif; 247 248 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : 249 ssbd_spec_ctrl_to_tif(hostval); 250 251 speculation_ctrl_update(tif); 252 } 253 } 254 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); 255 256 static void x86_amd_ssb_disable(void) 257 { 258 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; 259 260 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) 261 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); 262 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) 263 wrmsrl(MSR_AMD64_LS_CFG, msrval); 264 } 265 266 #undef pr_fmt 267 #define pr_fmt(fmt) "MDS: " fmt 268 269 /* Default mitigation for MDS-affected CPUs */ 270 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL; 271 static bool mds_nosmt __ro_after_init = false; 272 273 static const char * const mds_strings[] = { 274 [MDS_MITIGATION_OFF] = "Vulnerable", 275 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", 276 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", 277 }; 278 279 static void __init mds_select_mitigation(void) 280 { 281 if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) { 282 mds_mitigation = MDS_MITIGATION_OFF; 283 return; 284 } 285 286 if (mds_mitigation == MDS_MITIGATION_FULL) { 287 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) 288 mds_mitigation = MDS_MITIGATION_VMWERV; 289 290 static_branch_enable(&mds_user_clear); 291 292 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && 293 (mds_nosmt || cpu_mitigations_auto_nosmt())) 294 cpu_smt_disable(false); 295 } 296 } 297 298 static int __init mds_cmdline(char *str) 299 { 300 if (!boot_cpu_has_bug(X86_BUG_MDS)) 301 return 0; 302 303 if (!str) 304 return -EINVAL; 305 306 if (!strcmp(str, "off")) 307 mds_mitigation = MDS_MITIGATION_OFF; 308 else if (!strcmp(str, "full")) 309 mds_mitigation = MDS_MITIGATION_FULL; 310 else if (!strcmp(str, "full,nosmt")) { 311 mds_mitigation = MDS_MITIGATION_FULL; 312 mds_nosmt = true; 313 } 314 315 return 0; 316 } 317 early_param("mds", mds_cmdline); 318 319 #undef pr_fmt 320 #define pr_fmt(fmt) "TAA: " fmt 321 322 enum taa_mitigations { 323 TAA_MITIGATION_OFF, 324 TAA_MITIGATION_UCODE_NEEDED, 325 TAA_MITIGATION_VERW, 326 TAA_MITIGATION_TSX_DISABLED, 327 }; 328 329 /* Default mitigation for TAA-affected CPUs */ 330 static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW; 331 static bool taa_nosmt __ro_after_init; 332 333 static const char * const taa_strings[] = { 334 [TAA_MITIGATION_OFF] = "Vulnerable", 335 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", 336 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", 337 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled", 338 }; 339 340 static void __init taa_select_mitigation(void) 341 { 342 u64 ia32_cap; 343 344 if (!boot_cpu_has_bug(X86_BUG_TAA)) { 345 taa_mitigation = TAA_MITIGATION_OFF; 346 return; 347 } 348 349 /* TSX previously disabled by tsx=off */ 350 if (!boot_cpu_has(X86_FEATURE_RTM)) { 351 taa_mitigation = TAA_MITIGATION_TSX_DISABLED; 352 return; 353 } 354 355 if (cpu_mitigations_off()) { 356 taa_mitigation = TAA_MITIGATION_OFF; 357 return; 358 } 359 360 /* 361 * TAA mitigation via VERW is turned off if both 362 * tsx_async_abort=off and mds=off are specified. 363 */ 364 if (taa_mitigation == TAA_MITIGATION_OFF && 365 mds_mitigation == MDS_MITIGATION_OFF) 366 return; 367 368 if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) 369 taa_mitigation = TAA_MITIGATION_VERW; 370 else 371 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; 372 373 /* 374 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. 375 * A microcode update fixes this behavior to clear CPU buffers. It also 376 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the 377 * ARCH_CAP_TSX_CTRL_MSR bit. 378 * 379 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode 380 * update is required. 381 */ 382 ia32_cap = x86_read_arch_cap_msr(); 383 if ( (ia32_cap & ARCH_CAP_MDS_NO) && 384 !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)) 385 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; 386 387 /* 388 * TSX is enabled, select alternate mitigation for TAA which is 389 * the same as MDS. Enable MDS static branch to clear CPU buffers. 390 * 391 * For guests that can't determine whether the correct microcode is 392 * present on host, enable the mitigation for UCODE_NEEDED as well. 393 */ 394 static_branch_enable(&mds_user_clear); 395 396 if (taa_nosmt || cpu_mitigations_auto_nosmt()) 397 cpu_smt_disable(false); 398 } 399 400 static int __init tsx_async_abort_parse_cmdline(char *str) 401 { 402 if (!boot_cpu_has_bug(X86_BUG_TAA)) 403 return 0; 404 405 if (!str) 406 return -EINVAL; 407 408 if (!strcmp(str, "off")) { 409 taa_mitigation = TAA_MITIGATION_OFF; 410 } else if (!strcmp(str, "full")) { 411 taa_mitigation = TAA_MITIGATION_VERW; 412 } else if (!strcmp(str, "full,nosmt")) { 413 taa_mitigation = TAA_MITIGATION_VERW; 414 taa_nosmt = true; 415 } 416 417 return 0; 418 } 419 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); 420 421 #undef pr_fmt 422 #define pr_fmt(fmt) "MMIO Stale Data: " fmt 423 424 enum mmio_mitigations { 425 MMIO_MITIGATION_OFF, 426 MMIO_MITIGATION_UCODE_NEEDED, 427 MMIO_MITIGATION_VERW, 428 }; 429 430 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */ 431 static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW; 432 static bool mmio_nosmt __ro_after_init = false; 433 434 static const char * const mmio_strings[] = { 435 [MMIO_MITIGATION_OFF] = "Vulnerable", 436 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", 437 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", 438 }; 439 440 static void __init mmio_select_mitigation(void) 441 { 442 u64 ia32_cap; 443 444 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || 445 boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) || 446 cpu_mitigations_off()) { 447 mmio_mitigation = MMIO_MITIGATION_OFF; 448 return; 449 } 450 451 if (mmio_mitigation == MMIO_MITIGATION_OFF) 452 return; 453 454 ia32_cap = x86_read_arch_cap_msr(); 455 456 /* 457 * Enable CPU buffer clear mitigation for host and VMM, if also affected 458 * by MDS or TAA. Otherwise, enable mitigation for VMM only. 459 */ 460 if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) && 461 boot_cpu_has(X86_FEATURE_RTM))) 462 static_branch_enable(&mds_user_clear); 463 else 464 static_branch_enable(&mmio_stale_data_clear); 465 466 /* 467 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can 468 * be propagated to uncore buffers, clearing the Fill buffers on idle 469 * is required irrespective of SMT state. 470 */ 471 if (!(ia32_cap & ARCH_CAP_FBSDP_NO)) 472 static_branch_enable(&mds_idle_clear); 473 474 /* 475 * Check if the system has the right microcode. 476 * 477 * CPU Fill buffer clear mitigation is enumerated by either an explicit 478 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS 479 * affected systems. 480 */ 481 if ((ia32_cap & ARCH_CAP_FB_CLEAR) || 482 (boot_cpu_has(X86_FEATURE_MD_CLEAR) && 483 boot_cpu_has(X86_FEATURE_FLUSH_L1D) && 484 !(ia32_cap & ARCH_CAP_MDS_NO))) 485 mmio_mitigation = MMIO_MITIGATION_VERW; 486 else 487 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; 488 489 if (mmio_nosmt || cpu_mitigations_auto_nosmt()) 490 cpu_smt_disable(false); 491 } 492 493 static int __init mmio_stale_data_parse_cmdline(char *str) 494 { 495 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 496 return 0; 497 498 if (!str) 499 return -EINVAL; 500 501 if (!strcmp(str, "off")) { 502 mmio_mitigation = MMIO_MITIGATION_OFF; 503 } else if (!strcmp(str, "full")) { 504 mmio_mitigation = MMIO_MITIGATION_VERW; 505 } else if (!strcmp(str, "full,nosmt")) { 506 mmio_mitigation = MMIO_MITIGATION_VERW; 507 mmio_nosmt = true; 508 } 509 510 return 0; 511 } 512 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); 513 514 #undef pr_fmt 515 #define pr_fmt(fmt) "" fmt 516 517 static void __init md_clear_update_mitigation(void) 518 { 519 if (cpu_mitigations_off()) 520 return; 521 522 if (!static_key_enabled(&mds_user_clear)) 523 goto out; 524 525 /* 526 * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data 527 * mitigation, if necessary. 528 */ 529 if (mds_mitigation == MDS_MITIGATION_OFF && 530 boot_cpu_has_bug(X86_BUG_MDS)) { 531 mds_mitigation = MDS_MITIGATION_FULL; 532 mds_select_mitigation(); 533 } 534 if (taa_mitigation == TAA_MITIGATION_OFF && 535 boot_cpu_has_bug(X86_BUG_TAA)) { 536 taa_mitigation = TAA_MITIGATION_VERW; 537 taa_select_mitigation(); 538 } 539 if (mmio_mitigation == MMIO_MITIGATION_OFF && 540 boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { 541 mmio_mitigation = MMIO_MITIGATION_VERW; 542 mmio_select_mitigation(); 543 } 544 out: 545 if (boot_cpu_has_bug(X86_BUG_MDS)) 546 pr_info("MDS: %s\n", mds_strings[mds_mitigation]); 547 if (boot_cpu_has_bug(X86_BUG_TAA)) 548 pr_info("TAA: %s\n", taa_strings[taa_mitigation]); 549 if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 550 pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); 551 else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) 552 pr_info("MMIO Stale Data: Unknown: No mitigations\n"); 553 } 554 555 static void __init md_clear_select_mitigation(void) 556 { 557 mds_select_mitigation(); 558 taa_select_mitigation(); 559 mmio_select_mitigation(); 560 561 /* 562 * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update 563 * and print their mitigation after MDS, TAA and MMIO Stale Data 564 * mitigation selection is done. 565 */ 566 md_clear_update_mitigation(); 567 } 568 569 #undef pr_fmt 570 #define pr_fmt(fmt) "SRBDS: " fmt 571 572 enum srbds_mitigations { 573 SRBDS_MITIGATION_OFF, 574 SRBDS_MITIGATION_UCODE_NEEDED, 575 SRBDS_MITIGATION_FULL, 576 SRBDS_MITIGATION_TSX_OFF, 577 SRBDS_MITIGATION_HYPERVISOR, 578 }; 579 580 static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL; 581 582 static const char * const srbds_strings[] = { 583 [SRBDS_MITIGATION_OFF] = "Vulnerable", 584 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 585 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", 586 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", 587 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", 588 }; 589 590 static bool srbds_off; 591 592 void update_srbds_msr(void) 593 { 594 u64 mcu_ctrl; 595 596 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 597 return; 598 599 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 600 return; 601 602 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) 603 return; 604 605 /* 606 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX 607 * being disabled and it hasn't received the SRBDS MSR microcode. 608 */ 609 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) 610 return; 611 612 rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 613 614 switch (srbds_mitigation) { 615 case SRBDS_MITIGATION_OFF: 616 case SRBDS_MITIGATION_TSX_OFF: 617 mcu_ctrl |= RNGDS_MITG_DIS; 618 break; 619 case SRBDS_MITIGATION_FULL: 620 mcu_ctrl &= ~RNGDS_MITG_DIS; 621 break; 622 default: 623 break; 624 } 625 626 wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 627 } 628 629 static void __init srbds_select_mitigation(void) 630 { 631 u64 ia32_cap; 632 633 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 634 return; 635 636 /* 637 * Check to see if this is one of the MDS_NO systems supporting TSX that 638 * are only exposed to SRBDS when TSX is enabled or when CPU is affected 639 * by Processor MMIO Stale Data vulnerability. 640 */ 641 ia32_cap = x86_read_arch_cap_msr(); 642 if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && 643 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 644 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; 645 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 646 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; 647 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) 648 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; 649 else if (cpu_mitigations_off() || srbds_off) 650 srbds_mitigation = SRBDS_MITIGATION_OFF; 651 652 update_srbds_msr(); 653 pr_info("%s\n", srbds_strings[srbds_mitigation]); 654 } 655 656 static int __init srbds_parse_cmdline(char *str) 657 { 658 if (!str) 659 return -EINVAL; 660 661 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 662 return 0; 663 664 srbds_off = !strcmp(str, "off"); 665 return 0; 666 } 667 early_param("srbds", srbds_parse_cmdline); 668 669 #undef pr_fmt 670 #define pr_fmt(fmt) "L1D Flush : " fmt 671 672 enum l1d_flush_mitigations { 673 L1D_FLUSH_OFF = 0, 674 L1D_FLUSH_ON, 675 }; 676 677 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF; 678 679 static void __init l1d_flush_select_mitigation(void) 680 { 681 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) 682 return; 683 684 static_branch_enable(&switch_mm_cond_l1d_flush); 685 pr_info("Conditional flush on switch_mm() enabled\n"); 686 } 687 688 static int __init l1d_flush_parse_cmdline(char *str) 689 { 690 if (!strcmp(str, "on")) 691 l1d_flush_mitigation = L1D_FLUSH_ON; 692 693 return 0; 694 } 695 early_param("l1d_flush", l1d_flush_parse_cmdline); 696 697 #undef pr_fmt 698 #define pr_fmt(fmt) "Spectre V1 : " fmt 699 700 enum spectre_v1_mitigation { 701 SPECTRE_V1_MITIGATION_NONE, 702 SPECTRE_V1_MITIGATION_AUTO, 703 }; 704 705 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init = 706 SPECTRE_V1_MITIGATION_AUTO; 707 708 static const char * const spectre_v1_strings[] = { 709 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers", 710 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization", 711 }; 712 713 /* 714 * Does SMAP provide full mitigation against speculative kernel access to 715 * userspace? 716 */ 717 static bool smap_works_speculatively(void) 718 { 719 if (!boot_cpu_has(X86_FEATURE_SMAP)) 720 return false; 721 722 /* 723 * On CPUs which are vulnerable to Meltdown, SMAP does not 724 * prevent speculative access to user data in the L1 cache. 725 * Consider SMAP to be non-functional as a mitigation on these 726 * CPUs. 727 */ 728 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) 729 return false; 730 731 return true; 732 } 733 734 static void __init spectre_v1_select_mitigation(void) 735 { 736 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) { 737 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 738 return; 739 } 740 741 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { 742 /* 743 * With Spectre v1, a user can speculatively control either 744 * path of a conditional swapgs with a user-controlled GS 745 * value. The mitigation is to add lfences to both code paths. 746 * 747 * If FSGSBASE is enabled, the user can put a kernel address in 748 * GS, in which case SMAP provides no protection. 749 * 750 * If FSGSBASE is disabled, the user can only put a user space 751 * address in GS. That makes an attack harder, but still 752 * possible if there's no SMAP protection. 753 */ 754 if (boot_cpu_has(X86_FEATURE_FSGSBASE) || 755 !smap_works_speculatively()) { 756 /* 757 * Mitigation can be provided from SWAPGS itself or 758 * PTI as the CR3 write in the Meltdown mitigation 759 * is serializing. 760 * 761 * If neither is there, mitigate with an LFENCE to 762 * stop speculation through swapgs. 763 */ 764 if (boot_cpu_has_bug(X86_BUG_SWAPGS) && 765 !boot_cpu_has(X86_FEATURE_PTI)) 766 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER); 767 768 /* 769 * Enable lfences in the kernel entry (non-swapgs) 770 * paths, to prevent user entry from speculatively 771 * skipping swapgs. 772 */ 773 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL); 774 } 775 } 776 777 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]); 778 } 779 780 static int __init nospectre_v1_cmdline(char *str) 781 { 782 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 783 return 0; 784 } 785 early_param("nospectre_v1", nospectre_v1_cmdline); 786 787 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = 788 SPECTRE_V2_NONE; 789 790 #undef pr_fmt 791 #define pr_fmt(fmt) "RETBleed: " fmt 792 793 enum retbleed_mitigation { 794 RETBLEED_MITIGATION_NONE, 795 RETBLEED_MITIGATION_UNRET, 796 RETBLEED_MITIGATION_IBPB, 797 RETBLEED_MITIGATION_IBRS, 798 RETBLEED_MITIGATION_EIBRS, 799 RETBLEED_MITIGATION_STUFF, 800 }; 801 802 enum retbleed_mitigation_cmd { 803 RETBLEED_CMD_OFF, 804 RETBLEED_CMD_AUTO, 805 RETBLEED_CMD_UNRET, 806 RETBLEED_CMD_IBPB, 807 RETBLEED_CMD_STUFF, 808 }; 809 810 static const char * const retbleed_strings[] = { 811 [RETBLEED_MITIGATION_NONE] = "Vulnerable", 812 [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", 813 [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB", 814 [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", 815 [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", 816 [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing", 817 }; 818 819 static enum retbleed_mitigation retbleed_mitigation __ro_after_init = 820 RETBLEED_MITIGATION_NONE; 821 static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init = 822 RETBLEED_CMD_AUTO; 823 824 static int __ro_after_init retbleed_nosmt = false; 825 826 static int __init retbleed_parse_cmdline(char *str) 827 { 828 if (!str) 829 return -EINVAL; 830 831 while (str) { 832 char *next = strchr(str, ','); 833 if (next) { 834 *next = 0; 835 next++; 836 } 837 838 if (!strcmp(str, "off")) { 839 retbleed_cmd = RETBLEED_CMD_OFF; 840 } else if (!strcmp(str, "auto")) { 841 retbleed_cmd = RETBLEED_CMD_AUTO; 842 } else if (!strcmp(str, "unret")) { 843 retbleed_cmd = RETBLEED_CMD_UNRET; 844 } else if (!strcmp(str, "ibpb")) { 845 retbleed_cmd = RETBLEED_CMD_IBPB; 846 } else if (!strcmp(str, "stuff")) { 847 retbleed_cmd = RETBLEED_CMD_STUFF; 848 } else if (!strcmp(str, "nosmt")) { 849 retbleed_nosmt = true; 850 } else if (!strcmp(str, "force")) { 851 setup_force_cpu_bug(X86_BUG_RETBLEED); 852 } else { 853 pr_err("Ignoring unknown retbleed option (%s).", str); 854 } 855 856 str = next; 857 } 858 859 return 0; 860 } 861 early_param("retbleed", retbleed_parse_cmdline); 862 863 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" 864 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" 865 866 static void __init retbleed_select_mitigation(void) 867 { 868 bool mitigate_smt = false; 869 870 if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) 871 return; 872 873 switch (retbleed_cmd) { 874 case RETBLEED_CMD_OFF: 875 return; 876 877 case RETBLEED_CMD_UNRET: 878 if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) { 879 retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 880 } else { 881 pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n"); 882 goto do_cmd_auto; 883 } 884 break; 885 886 case RETBLEED_CMD_IBPB: 887 if (!boot_cpu_has(X86_FEATURE_IBPB)) { 888 pr_err("WARNING: CPU does not support IBPB.\n"); 889 goto do_cmd_auto; 890 } else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { 891 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 892 } else { 893 pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); 894 goto do_cmd_auto; 895 } 896 break; 897 898 case RETBLEED_CMD_STUFF: 899 if (IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING) && 900 spectre_v2_enabled == SPECTRE_V2_RETPOLINE) { 901 retbleed_mitigation = RETBLEED_MITIGATION_STUFF; 902 903 } else { 904 if (IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING)) 905 pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n"); 906 else 907 pr_err("WARNING: kernel not compiled with CALL_DEPTH_TRACKING.\n"); 908 909 goto do_cmd_auto; 910 } 911 break; 912 913 do_cmd_auto: 914 case RETBLEED_CMD_AUTO: 915 default: 916 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 917 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { 918 if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) 919 retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 920 else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB)) 921 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 922 } 923 924 /* 925 * The Intel mitigation (IBRS or eIBRS) was already selected in 926 * spectre_v2_select_mitigation(). 'retbleed_mitigation' will 927 * be set accordingly below. 928 */ 929 930 break; 931 } 932 933 switch (retbleed_mitigation) { 934 case RETBLEED_MITIGATION_UNRET: 935 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 936 setup_force_cpu_cap(X86_FEATURE_UNRET); 937 938 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 939 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 940 pr_err(RETBLEED_UNTRAIN_MSG); 941 942 mitigate_smt = true; 943 break; 944 945 case RETBLEED_MITIGATION_IBPB: 946 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); 947 mitigate_smt = true; 948 break; 949 950 case RETBLEED_MITIGATION_STUFF: 951 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 952 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); 953 x86_set_skl_return_thunk(); 954 break; 955 956 default: 957 break; 958 } 959 960 if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && 961 (retbleed_nosmt || cpu_mitigations_auto_nosmt())) 962 cpu_smt_disable(false); 963 964 /* 965 * Let IBRS trump all on Intel without affecting the effects of the 966 * retbleed= cmdline option except for call depth based stuffing 967 */ 968 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 969 switch (spectre_v2_enabled) { 970 case SPECTRE_V2_IBRS: 971 retbleed_mitigation = RETBLEED_MITIGATION_IBRS; 972 break; 973 case SPECTRE_V2_EIBRS: 974 case SPECTRE_V2_EIBRS_RETPOLINE: 975 case SPECTRE_V2_EIBRS_LFENCE: 976 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; 977 break; 978 default: 979 if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) 980 pr_err(RETBLEED_INTEL_MSG); 981 } 982 } 983 984 pr_info("%s\n", retbleed_strings[retbleed_mitigation]); 985 } 986 987 #undef pr_fmt 988 #define pr_fmt(fmt) "Spectre V2 : " fmt 989 990 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = 991 SPECTRE_V2_USER_NONE; 992 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = 993 SPECTRE_V2_USER_NONE; 994 995 #ifdef CONFIG_RETPOLINE 996 static bool spectre_v2_bad_module; 997 998 bool retpoline_module_ok(bool has_retpoline) 999 { 1000 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) 1001 return true; 1002 1003 pr_err("System may be vulnerable to spectre v2\n"); 1004 spectre_v2_bad_module = true; 1005 return false; 1006 } 1007 1008 static inline const char *spectre_v2_module_string(void) 1009 { 1010 return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; 1011 } 1012 #else 1013 static inline const char *spectre_v2_module_string(void) { return ""; } 1014 #endif 1015 1016 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" 1017 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" 1018 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" 1019 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n" 1020 1021 #ifdef CONFIG_BPF_SYSCALL 1022 void unpriv_ebpf_notify(int new_state) 1023 { 1024 if (new_state) 1025 return; 1026 1027 /* Unprivileged eBPF is enabled */ 1028 1029 switch (spectre_v2_enabled) { 1030 case SPECTRE_V2_EIBRS: 1031 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); 1032 break; 1033 case SPECTRE_V2_EIBRS_LFENCE: 1034 if (sched_smt_active()) 1035 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); 1036 break; 1037 default: 1038 break; 1039 } 1040 } 1041 #endif 1042 1043 static inline bool match_option(const char *arg, int arglen, const char *opt) 1044 { 1045 int len = strlen(opt); 1046 1047 return len == arglen && !strncmp(arg, opt, len); 1048 } 1049 1050 /* The kernel command line selection for spectre v2 */ 1051 enum spectre_v2_mitigation_cmd { 1052 SPECTRE_V2_CMD_NONE, 1053 SPECTRE_V2_CMD_AUTO, 1054 SPECTRE_V2_CMD_FORCE, 1055 SPECTRE_V2_CMD_RETPOLINE, 1056 SPECTRE_V2_CMD_RETPOLINE_GENERIC, 1057 SPECTRE_V2_CMD_RETPOLINE_LFENCE, 1058 SPECTRE_V2_CMD_EIBRS, 1059 SPECTRE_V2_CMD_EIBRS_RETPOLINE, 1060 SPECTRE_V2_CMD_EIBRS_LFENCE, 1061 SPECTRE_V2_CMD_IBRS, 1062 }; 1063 1064 enum spectre_v2_user_cmd { 1065 SPECTRE_V2_USER_CMD_NONE, 1066 SPECTRE_V2_USER_CMD_AUTO, 1067 SPECTRE_V2_USER_CMD_FORCE, 1068 SPECTRE_V2_USER_CMD_PRCTL, 1069 SPECTRE_V2_USER_CMD_PRCTL_IBPB, 1070 SPECTRE_V2_USER_CMD_SECCOMP, 1071 SPECTRE_V2_USER_CMD_SECCOMP_IBPB, 1072 }; 1073 1074 static const char * const spectre_v2_user_strings[] = { 1075 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", 1076 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", 1077 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", 1078 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", 1079 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", 1080 }; 1081 1082 static const struct { 1083 const char *option; 1084 enum spectre_v2_user_cmd cmd; 1085 bool secure; 1086 } v2_user_options[] __initconst = { 1087 { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, 1088 { "off", SPECTRE_V2_USER_CMD_NONE, false }, 1089 { "on", SPECTRE_V2_USER_CMD_FORCE, true }, 1090 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false }, 1091 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false }, 1092 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false }, 1093 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, 1094 }; 1095 1096 static void __init spec_v2_user_print_cond(const char *reason, bool secure) 1097 { 1098 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) 1099 pr_info("spectre_v2_user=%s forced on command line.\n", reason); 1100 } 1101 1102 static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd; 1103 1104 static enum spectre_v2_user_cmd __init 1105 spectre_v2_parse_user_cmdline(void) 1106 { 1107 char arg[20]; 1108 int ret, i; 1109 1110 switch (spectre_v2_cmd) { 1111 case SPECTRE_V2_CMD_NONE: 1112 return SPECTRE_V2_USER_CMD_NONE; 1113 case SPECTRE_V2_CMD_FORCE: 1114 return SPECTRE_V2_USER_CMD_FORCE; 1115 default: 1116 break; 1117 } 1118 1119 ret = cmdline_find_option(boot_command_line, "spectre_v2_user", 1120 arg, sizeof(arg)); 1121 if (ret < 0) 1122 return SPECTRE_V2_USER_CMD_AUTO; 1123 1124 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { 1125 if (match_option(arg, ret, v2_user_options[i].option)) { 1126 spec_v2_user_print_cond(v2_user_options[i].option, 1127 v2_user_options[i].secure); 1128 return v2_user_options[i].cmd; 1129 } 1130 } 1131 1132 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); 1133 return SPECTRE_V2_USER_CMD_AUTO; 1134 } 1135 1136 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) 1137 { 1138 return mode == SPECTRE_V2_IBRS || 1139 mode == SPECTRE_V2_EIBRS || 1140 mode == SPECTRE_V2_EIBRS_RETPOLINE || 1141 mode == SPECTRE_V2_EIBRS_LFENCE; 1142 } 1143 1144 static void __init 1145 spectre_v2_user_select_mitigation(void) 1146 { 1147 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; 1148 bool smt_possible = IS_ENABLED(CONFIG_SMP); 1149 enum spectre_v2_user_cmd cmd; 1150 1151 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) 1152 return; 1153 1154 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || 1155 cpu_smt_control == CPU_SMT_NOT_SUPPORTED) 1156 smt_possible = false; 1157 1158 cmd = spectre_v2_parse_user_cmdline(); 1159 switch (cmd) { 1160 case SPECTRE_V2_USER_CMD_NONE: 1161 goto set_mode; 1162 case SPECTRE_V2_USER_CMD_FORCE: 1163 mode = SPECTRE_V2_USER_STRICT; 1164 break; 1165 case SPECTRE_V2_USER_CMD_AUTO: 1166 case SPECTRE_V2_USER_CMD_PRCTL: 1167 case SPECTRE_V2_USER_CMD_PRCTL_IBPB: 1168 mode = SPECTRE_V2_USER_PRCTL; 1169 break; 1170 case SPECTRE_V2_USER_CMD_SECCOMP: 1171 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: 1172 if (IS_ENABLED(CONFIG_SECCOMP)) 1173 mode = SPECTRE_V2_USER_SECCOMP; 1174 else 1175 mode = SPECTRE_V2_USER_PRCTL; 1176 break; 1177 } 1178 1179 /* Initialize Indirect Branch Prediction Barrier */ 1180 if (boot_cpu_has(X86_FEATURE_IBPB)) { 1181 setup_force_cpu_cap(X86_FEATURE_USE_IBPB); 1182 1183 spectre_v2_user_ibpb = mode; 1184 switch (cmd) { 1185 case SPECTRE_V2_USER_CMD_FORCE: 1186 case SPECTRE_V2_USER_CMD_PRCTL_IBPB: 1187 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: 1188 static_branch_enable(&switch_mm_always_ibpb); 1189 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1190 break; 1191 case SPECTRE_V2_USER_CMD_PRCTL: 1192 case SPECTRE_V2_USER_CMD_AUTO: 1193 case SPECTRE_V2_USER_CMD_SECCOMP: 1194 static_branch_enable(&switch_mm_cond_ibpb); 1195 break; 1196 default: 1197 break; 1198 } 1199 1200 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", 1201 static_key_enabled(&switch_mm_always_ibpb) ? 1202 "always-on" : "conditional"); 1203 } 1204 1205 /* 1206 * If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible, 1207 * STIBP is not required. 1208 */ 1209 if (!boot_cpu_has(X86_FEATURE_STIBP) || 1210 !smt_possible || 1211 spectre_v2_in_ibrs_mode(spectre_v2_enabled)) 1212 return; 1213 1214 /* 1215 * At this point, an STIBP mode other than "off" has been set. 1216 * If STIBP support is not being forced, check if STIBP always-on 1217 * is preferred. 1218 */ 1219 if (mode != SPECTRE_V2_USER_STRICT && 1220 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) 1221 mode = SPECTRE_V2_USER_STRICT_PREFERRED; 1222 1223 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || 1224 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { 1225 if (mode != SPECTRE_V2_USER_STRICT && 1226 mode != SPECTRE_V2_USER_STRICT_PREFERRED) 1227 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n"); 1228 mode = SPECTRE_V2_USER_STRICT_PREFERRED; 1229 } 1230 1231 spectre_v2_user_stibp = mode; 1232 1233 set_mode: 1234 pr_info("%s\n", spectre_v2_user_strings[mode]); 1235 } 1236 1237 static const char * const spectre_v2_strings[] = { 1238 [SPECTRE_V2_NONE] = "Vulnerable", 1239 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", 1240 [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", 1241 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS", 1242 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE", 1243 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines", 1244 [SPECTRE_V2_IBRS] = "Mitigation: IBRS", 1245 }; 1246 1247 static const struct { 1248 const char *option; 1249 enum spectre_v2_mitigation_cmd cmd; 1250 bool secure; 1251 } mitigation_options[] __initconst = { 1252 { "off", SPECTRE_V2_CMD_NONE, false }, 1253 { "on", SPECTRE_V2_CMD_FORCE, true }, 1254 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, 1255 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, 1256 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, 1257 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, 1258 { "eibrs", SPECTRE_V2_CMD_EIBRS, false }, 1259 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false }, 1260 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false }, 1261 { "auto", SPECTRE_V2_CMD_AUTO, false }, 1262 { "ibrs", SPECTRE_V2_CMD_IBRS, false }, 1263 }; 1264 1265 static void __init spec_v2_print_cond(const char *reason, bool secure) 1266 { 1267 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) 1268 pr_info("%s selected on command line.\n", reason); 1269 } 1270 1271 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) 1272 { 1273 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; 1274 char arg[20]; 1275 int ret, i; 1276 1277 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") || 1278 cpu_mitigations_off()) 1279 return SPECTRE_V2_CMD_NONE; 1280 1281 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); 1282 if (ret < 0) 1283 return SPECTRE_V2_CMD_AUTO; 1284 1285 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { 1286 if (!match_option(arg, ret, mitigation_options[i].option)) 1287 continue; 1288 cmd = mitigation_options[i].cmd; 1289 break; 1290 } 1291 1292 if (i >= ARRAY_SIZE(mitigation_options)) { 1293 pr_err("unknown option (%s). Switching to AUTO select\n", arg); 1294 return SPECTRE_V2_CMD_AUTO; 1295 } 1296 1297 if ((cmd == SPECTRE_V2_CMD_RETPOLINE || 1298 cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || 1299 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || 1300 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 1301 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 1302 !IS_ENABLED(CONFIG_RETPOLINE)) { 1303 pr_err("%s selected but not compiled in. Switching to AUTO select\n", 1304 mitigation_options[i].option); 1305 return SPECTRE_V2_CMD_AUTO; 1306 } 1307 1308 if ((cmd == SPECTRE_V2_CMD_EIBRS || 1309 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 1310 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 1311 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 1312 pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n", 1313 mitigation_options[i].option); 1314 return SPECTRE_V2_CMD_AUTO; 1315 } 1316 1317 if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || 1318 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) && 1319 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { 1320 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n", 1321 mitigation_options[i].option); 1322 return SPECTRE_V2_CMD_AUTO; 1323 } 1324 1325 if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) { 1326 pr_err("%s selected but not compiled in. Switching to AUTO select\n", 1327 mitigation_options[i].option); 1328 return SPECTRE_V2_CMD_AUTO; 1329 } 1330 1331 if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { 1332 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", 1333 mitigation_options[i].option); 1334 return SPECTRE_V2_CMD_AUTO; 1335 } 1336 1337 if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { 1338 pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n", 1339 mitigation_options[i].option); 1340 return SPECTRE_V2_CMD_AUTO; 1341 } 1342 1343 if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) { 1344 pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n", 1345 mitigation_options[i].option); 1346 return SPECTRE_V2_CMD_AUTO; 1347 } 1348 1349 spec_v2_print_cond(mitigation_options[i].option, 1350 mitigation_options[i].secure); 1351 return cmd; 1352 } 1353 1354 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) 1355 { 1356 if (!IS_ENABLED(CONFIG_RETPOLINE)) { 1357 pr_err("Kernel not compiled with retpoline; no mitigation available!"); 1358 return SPECTRE_V2_NONE; 1359 } 1360 1361 return SPECTRE_V2_RETPOLINE; 1362 } 1363 1364 /* Disable in-kernel use of non-RSB RET predictors */ 1365 static void __init spec_ctrl_disable_kernel_rrsba(void) 1366 { 1367 u64 ia32_cap; 1368 1369 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) 1370 return; 1371 1372 ia32_cap = x86_read_arch_cap_msr(); 1373 1374 if (ia32_cap & ARCH_CAP_RRSBA) { 1375 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; 1376 update_spec_ctrl(x86_spec_ctrl_base); 1377 } 1378 } 1379 1380 static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode) 1381 { 1382 /* 1383 * Similar to context switches, there are two types of RSB attacks 1384 * after VM exit: 1385 * 1386 * 1) RSB underflow 1387 * 1388 * 2) Poisoned RSB entry 1389 * 1390 * When retpoline is enabled, both are mitigated by filling/clearing 1391 * the RSB. 1392 * 1393 * When IBRS is enabled, while #1 would be mitigated by the IBRS branch 1394 * prediction isolation protections, RSB still needs to be cleared 1395 * because of #2. Note that SMEP provides no protection here, unlike 1396 * user-space-poisoned RSB entries. 1397 * 1398 * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB 1399 * bug is present then a LITE version of RSB protection is required, 1400 * just a single call needs to retire before a RET is executed. 1401 */ 1402 switch (mode) { 1403 case SPECTRE_V2_NONE: 1404 return; 1405 1406 case SPECTRE_V2_EIBRS_LFENCE: 1407 case SPECTRE_V2_EIBRS: 1408 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { 1409 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); 1410 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); 1411 } 1412 return; 1413 1414 case SPECTRE_V2_EIBRS_RETPOLINE: 1415 case SPECTRE_V2_RETPOLINE: 1416 case SPECTRE_V2_LFENCE: 1417 case SPECTRE_V2_IBRS: 1418 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); 1419 pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n"); 1420 return; 1421 } 1422 1423 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit"); 1424 dump_stack(); 1425 } 1426 1427 static void __init spectre_v2_select_mitigation(void) 1428 { 1429 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); 1430 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE; 1431 1432 /* 1433 * If the CPU is not affected and the command line mode is NONE or AUTO 1434 * then nothing to do. 1435 */ 1436 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && 1437 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO)) 1438 return; 1439 1440 switch (cmd) { 1441 case SPECTRE_V2_CMD_NONE: 1442 return; 1443 1444 case SPECTRE_V2_CMD_FORCE: 1445 case SPECTRE_V2_CMD_AUTO: 1446 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 1447 mode = SPECTRE_V2_EIBRS; 1448 break; 1449 } 1450 1451 if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) && 1452 boot_cpu_has_bug(X86_BUG_RETBLEED) && 1453 retbleed_cmd != RETBLEED_CMD_OFF && 1454 retbleed_cmd != RETBLEED_CMD_STUFF && 1455 boot_cpu_has(X86_FEATURE_IBRS) && 1456 boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 1457 mode = SPECTRE_V2_IBRS; 1458 break; 1459 } 1460 1461 mode = spectre_v2_select_retpoline(); 1462 break; 1463 1464 case SPECTRE_V2_CMD_RETPOLINE_LFENCE: 1465 pr_err(SPECTRE_V2_LFENCE_MSG); 1466 mode = SPECTRE_V2_LFENCE; 1467 break; 1468 1469 case SPECTRE_V2_CMD_RETPOLINE_GENERIC: 1470 mode = SPECTRE_V2_RETPOLINE; 1471 break; 1472 1473 case SPECTRE_V2_CMD_RETPOLINE: 1474 mode = spectre_v2_select_retpoline(); 1475 break; 1476 1477 case SPECTRE_V2_CMD_IBRS: 1478 mode = SPECTRE_V2_IBRS; 1479 break; 1480 1481 case SPECTRE_V2_CMD_EIBRS: 1482 mode = SPECTRE_V2_EIBRS; 1483 break; 1484 1485 case SPECTRE_V2_CMD_EIBRS_LFENCE: 1486 mode = SPECTRE_V2_EIBRS_LFENCE; 1487 break; 1488 1489 case SPECTRE_V2_CMD_EIBRS_RETPOLINE: 1490 mode = SPECTRE_V2_EIBRS_RETPOLINE; 1491 break; 1492 } 1493 1494 if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) 1495 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); 1496 1497 if (spectre_v2_in_ibrs_mode(mode)) { 1498 if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) { 1499 msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); 1500 } else { 1501 x86_spec_ctrl_base |= SPEC_CTRL_IBRS; 1502 update_spec_ctrl(x86_spec_ctrl_base); 1503 } 1504 } 1505 1506 switch (mode) { 1507 case SPECTRE_V2_NONE: 1508 case SPECTRE_V2_EIBRS: 1509 break; 1510 1511 case SPECTRE_V2_IBRS: 1512 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); 1513 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) 1514 pr_warn(SPECTRE_V2_IBRS_PERF_MSG); 1515 break; 1516 1517 case SPECTRE_V2_LFENCE: 1518 case SPECTRE_V2_EIBRS_LFENCE: 1519 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); 1520 fallthrough; 1521 1522 case SPECTRE_V2_RETPOLINE: 1523 case SPECTRE_V2_EIBRS_RETPOLINE: 1524 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 1525 break; 1526 } 1527 1528 /* 1529 * Disable alternate RSB predictions in kernel when indirect CALLs and 1530 * JMPs gets protection against BHI and Intramode-BTI, but RET 1531 * prediction from a non-RSB predictor is still a risk. 1532 */ 1533 if (mode == SPECTRE_V2_EIBRS_LFENCE || 1534 mode == SPECTRE_V2_EIBRS_RETPOLINE || 1535 mode == SPECTRE_V2_RETPOLINE) 1536 spec_ctrl_disable_kernel_rrsba(); 1537 1538 spectre_v2_enabled = mode; 1539 pr_info("%s\n", spectre_v2_strings[mode]); 1540 1541 /* 1542 * If Spectre v2 protection has been enabled, fill the RSB during a 1543 * context switch. In general there are two types of RSB attacks 1544 * across context switches, for which the CALLs/RETs may be unbalanced. 1545 * 1546 * 1) RSB underflow 1547 * 1548 * Some Intel parts have "bottomless RSB". When the RSB is empty, 1549 * speculated return targets may come from the branch predictor, 1550 * which could have a user-poisoned BTB or BHB entry. 1551 * 1552 * AMD has it even worse: *all* returns are speculated from the BTB, 1553 * regardless of the state of the RSB. 1554 * 1555 * When IBRS or eIBRS is enabled, the "user -> kernel" attack 1556 * scenario is mitigated by the IBRS branch prediction isolation 1557 * properties, so the RSB buffer filling wouldn't be necessary to 1558 * protect against this type of attack. 1559 * 1560 * The "user -> user" attack scenario is mitigated by RSB filling. 1561 * 1562 * 2) Poisoned RSB entry 1563 * 1564 * If the 'next' in-kernel return stack is shorter than 'prev', 1565 * 'next' could be tricked into speculating with a user-poisoned RSB 1566 * entry. 1567 * 1568 * The "user -> kernel" attack scenario is mitigated by SMEP and 1569 * eIBRS. 1570 * 1571 * The "user -> user" scenario, also known as SpectreBHB, requires 1572 * RSB clearing. 1573 * 1574 * So to mitigate all cases, unconditionally fill RSB on context 1575 * switches. 1576 * 1577 * FIXME: Is this pointless for retbleed-affected AMD? 1578 */ 1579 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); 1580 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); 1581 1582 spectre_v2_determine_rsb_fill_type_at_vmexit(mode); 1583 1584 /* 1585 * Retpoline protects the kernel, but doesn't protect firmware. IBRS 1586 * and Enhanced IBRS protect firmware too, so enable IBRS around 1587 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't 1588 * otherwise enabled. 1589 * 1590 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because 1591 * the user might select retpoline on the kernel command line and if 1592 * the CPU supports Enhanced IBRS, kernel might un-intentionally not 1593 * enable IBRS around firmware calls. 1594 */ 1595 if (boot_cpu_has_bug(X86_BUG_RETBLEED) && 1596 boot_cpu_has(X86_FEATURE_IBPB) && 1597 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 1598 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { 1599 1600 if (retbleed_cmd != RETBLEED_CMD_IBPB) { 1601 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); 1602 pr_info("Enabling Speculation Barrier for firmware calls\n"); 1603 } 1604 1605 } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { 1606 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); 1607 pr_info("Enabling Restricted Speculation for firmware calls\n"); 1608 } 1609 1610 /* Set up IBPB and STIBP depending on the general spectre V2 command */ 1611 spectre_v2_cmd = cmd; 1612 } 1613 1614 static void update_stibp_msr(void * __unused) 1615 { 1616 u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); 1617 update_spec_ctrl(val); 1618 } 1619 1620 /* Update x86_spec_ctrl_base in case SMT state changed. */ 1621 static void update_stibp_strict(void) 1622 { 1623 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; 1624 1625 if (sched_smt_active()) 1626 mask |= SPEC_CTRL_STIBP; 1627 1628 if (mask == x86_spec_ctrl_base) 1629 return; 1630 1631 pr_info("Update user space SMT mitigation: STIBP %s\n", 1632 mask & SPEC_CTRL_STIBP ? "always-on" : "off"); 1633 x86_spec_ctrl_base = mask; 1634 on_each_cpu(update_stibp_msr, NULL, 1); 1635 } 1636 1637 /* Update the static key controlling the evaluation of TIF_SPEC_IB */ 1638 static void update_indir_branch_cond(void) 1639 { 1640 if (sched_smt_active()) 1641 static_branch_enable(&switch_to_cond_stibp); 1642 else 1643 static_branch_disable(&switch_to_cond_stibp); 1644 } 1645 1646 #undef pr_fmt 1647 #define pr_fmt(fmt) fmt 1648 1649 /* Update the static key controlling the MDS CPU buffer clear in idle */ 1650 static void update_mds_branch_idle(void) 1651 { 1652 u64 ia32_cap = x86_read_arch_cap_msr(); 1653 1654 /* 1655 * Enable the idle clearing if SMT is active on CPUs which are 1656 * affected only by MSBDS and not any other MDS variant. 1657 * 1658 * The other variants cannot be mitigated when SMT is enabled, so 1659 * clearing the buffers on idle just to prevent the Store Buffer 1660 * repartitioning leak would be a window dressing exercise. 1661 */ 1662 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) 1663 return; 1664 1665 if (sched_smt_active()) { 1666 static_branch_enable(&mds_idle_clear); 1667 } else if (mmio_mitigation == MMIO_MITIGATION_OFF || 1668 (ia32_cap & ARCH_CAP_FBSDP_NO)) { 1669 static_branch_disable(&mds_idle_clear); 1670 } 1671 } 1672 1673 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" 1674 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" 1675 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" 1676 1677 void cpu_bugs_smt_update(void) 1678 { 1679 mutex_lock(&spec_ctrl_mutex); 1680 1681 if (sched_smt_active() && unprivileged_ebpf_enabled() && 1682 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) 1683 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); 1684 1685 switch (spectre_v2_user_stibp) { 1686 case SPECTRE_V2_USER_NONE: 1687 break; 1688 case SPECTRE_V2_USER_STRICT: 1689 case SPECTRE_V2_USER_STRICT_PREFERRED: 1690 update_stibp_strict(); 1691 break; 1692 case SPECTRE_V2_USER_PRCTL: 1693 case SPECTRE_V2_USER_SECCOMP: 1694 update_indir_branch_cond(); 1695 break; 1696 } 1697 1698 switch (mds_mitigation) { 1699 case MDS_MITIGATION_FULL: 1700 case MDS_MITIGATION_VMWERV: 1701 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) 1702 pr_warn_once(MDS_MSG_SMT); 1703 update_mds_branch_idle(); 1704 break; 1705 case MDS_MITIGATION_OFF: 1706 break; 1707 } 1708 1709 switch (taa_mitigation) { 1710 case TAA_MITIGATION_VERW: 1711 case TAA_MITIGATION_UCODE_NEEDED: 1712 if (sched_smt_active()) 1713 pr_warn_once(TAA_MSG_SMT); 1714 break; 1715 case TAA_MITIGATION_TSX_DISABLED: 1716 case TAA_MITIGATION_OFF: 1717 break; 1718 } 1719 1720 switch (mmio_mitigation) { 1721 case MMIO_MITIGATION_VERW: 1722 case MMIO_MITIGATION_UCODE_NEEDED: 1723 if (sched_smt_active()) 1724 pr_warn_once(MMIO_MSG_SMT); 1725 break; 1726 case MMIO_MITIGATION_OFF: 1727 break; 1728 } 1729 1730 mutex_unlock(&spec_ctrl_mutex); 1731 } 1732 1733 #undef pr_fmt 1734 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt 1735 1736 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE; 1737 1738 /* The kernel command line selection */ 1739 enum ssb_mitigation_cmd { 1740 SPEC_STORE_BYPASS_CMD_NONE, 1741 SPEC_STORE_BYPASS_CMD_AUTO, 1742 SPEC_STORE_BYPASS_CMD_ON, 1743 SPEC_STORE_BYPASS_CMD_PRCTL, 1744 SPEC_STORE_BYPASS_CMD_SECCOMP, 1745 }; 1746 1747 static const char * const ssb_strings[] = { 1748 [SPEC_STORE_BYPASS_NONE] = "Vulnerable", 1749 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", 1750 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", 1751 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", 1752 }; 1753 1754 static const struct { 1755 const char *option; 1756 enum ssb_mitigation_cmd cmd; 1757 } ssb_mitigation_options[] __initconst = { 1758 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ 1759 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ 1760 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ 1761 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ 1762 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */ 1763 }; 1764 1765 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) 1766 { 1767 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO; 1768 char arg[20]; 1769 int ret, i; 1770 1771 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") || 1772 cpu_mitigations_off()) { 1773 return SPEC_STORE_BYPASS_CMD_NONE; 1774 } else { 1775 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", 1776 arg, sizeof(arg)); 1777 if (ret < 0) 1778 return SPEC_STORE_BYPASS_CMD_AUTO; 1779 1780 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { 1781 if (!match_option(arg, ret, ssb_mitigation_options[i].option)) 1782 continue; 1783 1784 cmd = ssb_mitigation_options[i].cmd; 1785 break; 1786 } 1787 1788 if (i >= ARRAY_SIZE(ssb_mitigation_options)) { 1789 pr_err("unknown option (%s). Switching to AUTO select\n", arg); 1790 return SPEC_STORE_BYPASS_CMD_AUTO; 1791 } 1792 } 1793 1794 return cmd; 1795 } 1796 1797 static enum ssb_mitigation __init __ssb_select_mitigation(void) 1798 { 1799 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; 1800 enum ssb_mitigation_cmd cmd; 1801 1802 if (!boot_cpu_has(X86_FEATURE_SSBD)) 1803 return mode; 1804 1805 cmd = ssb_parse_cmdline(); 1806 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && 1807 (cmd == SPEC_STORE_BYPASS_CMD_NONE || 1808 cmd == SPEC_STORE_BYPASS_CMD_AUTO)) 1809 return mode; 1810 1811 switch (cmd) { 1812 case SPEC_STORE_BYPASS_CMD_SECCOMP: 1813 /* 1814 * Choose prctl+seccomp as the default mode if seccomp is 1815 * enabled. 1816 */ 1817 if (IS_ENABLED(CONFIG_SECCOMP)) 1818 mode = SPEC_STORE_BYPASS_SECCOMP; 1819 else 1820 mode = SPEC_STORE_BYPASS_PRCTL; 1821 break; 1822 case SPEC_STORE_BYPASS_CMD_ON: 1823 mode = SPEC_STORE_BYPASS_DISABLE; 1824 break; 1825 case SPEC_STORE_BYPASS_CMD_AUTO: 1826 case SPEC_STORE_BYPASS_CMD_PRCTL: 1827 mode = SPEC_STORE_BYPASS_PRCTL; 1828 break; 1829 case SPEC_STORE_BYPASS_CMD_NONE: 1830 break; 1831 } 1832 1833 /* 1834 * We have three CPU feature flags that are in play here: 1835 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. 1836 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass 1837 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation 1838 */ 1839 if (mode == SPEC_STORE_BYPASS_DISABLE) { 1840 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); 1841 /* 1842 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may 1843 * use a completely different MSR and bit dependent on family. 1844 */ 1845 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && 1846 !static_cpu_has(X86_FEATURE_AMD_SSBD)) { 1847 x86_amd_ssb_disable(); 1848 } else { 1849 x86_spec_ctrl_base |= SPEC_CTRL_SSBD; 1850 update_spec_ctrl(x86_spec_ctrl_base); 1851 } 1852 } 1853 1854 return mode; 1855 } 1856 1857 static void ssb_select_mitigation(void) 1858 { 1859 ssb_mode = __ssb_select_mitigation(); 1860 1861 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 1862 pr_info("%s\n", ssb_strings[ssb_mode]); 1863 } 1864 1865 #undef pr_fmt 1866 #define pr_fmt(fmt) "Speculation prctl: " fmt 1867 1868 static void task_update_spec_tif(struct task_struct *tsk) 1869 { 1870 /* Force the update of the real TIF bits */ 1871 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); 1872 1873 /* 1874 * Immediately update the speculation control MSRs for the current 1875 * task, but for a non-current task delay setting the CPU 1876 * mitigation until it is scheduled next. 1877 * 1878 * This can only happen for SECCOMP mitigation. For PRCTL it's 1879 * always the current task. 1880 */ 1881 if (tsk == current) 1882 speculation_ctrl_update_current(); 1883 } 1884 1885 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl) 1886 { 1887 1888 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) 1889 return -EPERM; 1890 1891 switch (ctrl) { 1892 case PR_SPEC_ENABLE: 1893 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); 1894 return 0; 1895 case PR_SPEC_DISABLE: 1896 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); 1897 return 0; 1898 default: 1899 return -ERANGE; 1900 } 1901 } 1902 1903 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) 1904 { 1905 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && 1906 ssb_mode != SPEC_STORE_BYPASS_SECCOMP) 1907 return -ENXIO; 1908 1909 switch (ctrl) { 1910 case PR_SPEC_ENABLE: 1911 /* If speculation is force disabled, enable is not allowed */ 1912 if (task_spec_ssb_force_disable(task)) 1913 return -EPERM; 1914 task_clear_spec_ssb_disable(task); 1915 task_clear_spec_ssb_noexec(task); 1916 task_update_spec_tif(task); 1917 break; 1918 case PR_SPEC_DISABLE: 1919 task_set_spec_ssb_disable(task); 1920 task_clear_spec_ssb_noexec(task); 1921 task_update_spec_tif(task); 1922 break; 1923 case PR_SPEC_FORCE_DISABLE: 1924 task_set_spec_ssb_disable(task); 1925 task_set_spec_ssb_force_disable(task); 1926 task_clear_spec_ssb_noexec(task); 1927 task_update_spec_tif(task); 1928 break; 1929 case PR_SPEC_DISABLE_NOEXEC: 1930 if (task_spec_ssb_force_disable(task)) 1931 return -EPERM; 1932 task_set_spec_ssb_disable(task); 1933 task_set_spec_ssb_noexec(task); 1934 task_update_spec_tif(task); 1935 break; 1936 default: 1937 return -ERANGE; 1938 } 1939 return 0; 1940 } 1941 1942 static bool is_spec_ib_user_controlled(void) 1943 { 1944 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || 1945 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || 1946 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || 1947 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP; 1948 } 1949 1950 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) 1951 { 1952 switch (ctrl) { 1953 case PR_SPEC_ENABLE: 1954 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 1955 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 1956 return 0; 1957 1958 /* 1959 * With strict mode for both IBPB and STIBP, the instruction 1960 * code paths avoid checking this task flag and instead, 1961 * unconditionally run the instruction. However, STIBP and IBPB 1962 * are independent and either can be set to conditionally 1963 * enabled regardless of the mode of the other. 1964 * 1965 * If either is set to conditional, allow the task flag to be 1966 * updated, unless it was force-disabled by a previous prctl 1967 * call. Currently, this is possible on an AMD CPU which has the 1968 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the 1969 * kernel is booted with 'spectre_v2_user=seccomp', then 1970 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and 1971 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED. 1972 */ 1973 if (!is_spec_ib_user_controlled() || 1974 task_spec_ib_force_disable(task)) 1975 return -EPERM; 1976 1977 task_clear_spec_ib_disable(task); 1978 task_update_spec_tif(task); 1979 break; 1980 case PR_SPEC_DISABLE: 1981 case PR_SPEC_FORCE_DISABLE: 1982 /* 1983 * Indirect branch speculation is always allowed when 1984 * mitigation is force disabled. 1985 */ 1986 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 1987 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 1988 return -EPERM; 1989 1990 if (!is_spec_ib_user_controlled()) 1991 return 0; 1992 1993 task_set_spec_ib_disable(task); 1994 if (ctrl == PR_SPEC_FORCE_DISABLE) 1995 task_set_spec_ib_force_disable(task); 1996 task_update_spec_tif(task); 1997 if (task == current) 1998 indirect_branch_prediction_barrier(); 1999 break; 2000 default: 2001 return -ERANGE; 2002 } 2003 return 0; 2004 } 2005 2006 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, 2007 unsigned long ctrl) 2008 { 2009 switch (which) { 2010 case PR_SPEC_STORE_BYPASS: 2011 return ssb_prctl_set(task, ctrl); 2012 case PR_SPEC_INDIRECT_BRANCH: 2013 return ib_prctl_set(task, ctrl); 2014 case PR_SPEC_L1D_FLUSH: 2015 return l1d_flush_prctl_set(task, ctrl); 2016 default: 2017 return -ENODEV; 2018 } 2019 } 2020 2021 #ifdef CONFIG_SECCOMP 2022 void arch_seccomp_spec_mitigate(struct task_struct *task) 2023 { 2024 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) 2025 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); 2026 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || 2027 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) 2028 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); 2029 } 2030 #endif 2031 2032 static int l1d_flush_prctl_get(struct task_struct *task) 2033 { 2034 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) 2035 return PR_SPEC_FORCE_DISABLE; 2036 2037 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH)) 2038 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2039 else 2040 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2041 } 2042 2043 static int ssb_prctl_get(struct task_struct *task) 2044 { 2045 switch (ssb_mode) { 2046 case SPEC_STORE_BYPASS_DISABLE: 2047 return PR_SPEC_DISABLE; 2048 case SPEC_STORE_BYPASS_SECCOMP: 2049 case SPEC_STORE_BYPASS_PRCTL: 2050 if (task_spec_ssb_force_disable(task)) 2051 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 2052 if (task_spec_ssb_noexec(task)) 2053 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; 2054 if (task_spec_ssb_disable(task)) 2055 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2056 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2057 default: 2058 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 2059 return PR_SPEC_ENABLE; 2060 return PR_SPEC_NOT_AFFECTED; 2061 } 2062 } 2063 2064 static int ib_prctl_get(struct task_struct *task) 2065 { 2066 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 2067 return PR_SPEC_NOT_AFFECTED; 2068 2069 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 2070 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 2071 return PR_SPEC_ENABLE; 2072 else if (is_spec_ib_user_controlled()) { 2073 if (task_spec_ib_force_disable(task)) 2074 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 2075 if (task_spec_ib_disable(task)) 2076 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2077 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2078 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || 2079 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || 2080 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) 2081 return PR_SPEC_DISABLE; 2082 else 2083 return PR_SPEC_NOT_AFFECTED; 2084 } 2085 2086 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 2087 { 2088 switch (which) { 2089 case PR_SPEC_STORE_BYPASS: 2090 return ssb_prctl_get(task); 2091 case PR_SPEC_INDIRECT_BRANCH: 2092 return ib_prctl_get(task); 2093 case PR_SPEC_L1D_FLUSH: 2094 return l1d_flush_prctl_get(task); 2095 default: 2096 return -ENODEV; 2097 } 2098 } 2099 2100 void x86_spec_ctrl_setup_ap(void) 2101 { 2102 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) 2103 update_spec_ctrl(x86_spec_ctrl_base); 2104 2105 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) 2106 x86_amd_ssb_disable(); 2107 } 2108 2109 bool itlb_multihit_kvm_mitigation; 2110 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); 2111 2112 #undef pr_fmt 2113 #define pr_fmt(fmt) "L1TF: " fmt 2114 2115 /* Default mitigation for L1TF-affected CPUs */ 2116 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH; 2117 #if IS_ENABLED(CONFIG_KVM_INTEL) 2118 EXPORT_SYMBOL_GPL(l1tf_mitigation); 2119 #endif 2120 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; 2121 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); 2122 2123 /* 2124 * These CPUs all support 44bits physical address space internally in the 2125 * cache but CPUID can report a smaller number of physical address bits. 2126 * 2127 * The L1TF mitigation uses the top most address bit for the inversion of 2128 * non present PTEs. When the installed memory reaches into the top most 2129 * address bit due to memory holes, which has been observed on machines 2130 * which report 36bits physical address bits and have 32G RAM installed, 2131 * then the mitigation range check in l1tf_select_mitigation() triggers. 2132 * This is a false positive because the mitigation is still possible due to 2133 * the fact that the cache uses 44bit internally. Use the cache bits 2134 * instead of the reported physical bits and adjust them on the affected 2135 * machines to 44bit if the reported bits are less than 44. 2136 */ 2137 static void override_cache_bits(struct cpuinfo_x86 *c) 2138 { 2139 if (c->x86 != 6) 2140 return; 2141 2142 switch (c->x86_model) { 2143 case INTEL_FAM6_NEHALEM: 2144 case INTEL_FAM6_WESTMERE: 2145 case INTEL_FAM6_SANDYBRIDGE: 2146 case INTEL_FAM6_IVYBRIDGE: 2147 case INTEL_FAM6_HASWELL: 2148 case INTEL_FAM6_HASWELL_L: 2149 case INTEL_FAM6_HASWELL_G: 2150 case INTEL_FAM6_BROADWELL: 2151 case INTEL_FAM6_BROADWELL_G: 2152 case INTEL_FAM6_SKYLAKE_L: 2153 case INTEL_FAM6_SKYLAKE: 2154 case INTEL_FAM6_KABYLAKE_L: 2155 case INTEL_FAM6_KABYLAKE: 2156 if (c->x86_cache_bits < 44) 2157 c->x86_cache_bits = 44; 2158 break; 2159 } 2160 } 2161 2162 static void __init l1tf_select_mitigation(void) 2163 { 2164 u64 half_pa; 2165 2166 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 2167 return; 2168 2169 if (cpu_mitigations_off()) 2170 l1tf_mitigation = L1TF_MITIGATION_OFF; 2171 else if (cpu_mitigations_auto_nosmt()) 2172 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 2173 2174 override_cache_bits(&boot_cpu_data); 2175 2176 switch (l1tf_mitigation) { 2177 case L1TF_MITIGATION_OFF: 2178 case L1TF_MITIGATION_FLUSH_NOWARN: 2179 case L1TF_MITIGATION_FLUSH: 2180 break; 2181 case L1TF_MITIGATION_FLUSH_NOSMT: 2182 case L1TF_MITIGATION_FULL: 2183 cpu_smt_disable(false); 2184 break; 2185 case L1TF_MITIGATION_FULL_FORCE: 2186 cpu_smt_disable(true); 2187 break; 2188 } 2189 2190 #if CONFIG_PGTABLE_LEVELS == 2 2191 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); 2192 return; 2193 #endif 2194 2195 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; 2196 if (l1tf_mitigation != L1TF_MITIGATION_OFF && 2197 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { 2198 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); 2199 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", 2200 half_pa); 2201 pr_info("However, doing so will make a part of your RAM unusable.\n"); 2202 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n"); 2203 return; 2204 } 2205 2206 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); 2207 } 2208 2209 static int __init l1tf_cmdline(char *str) 2210 { 2211 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 2212 return 0; 2213 2214 if (!str) 2215 return -EINVAL; 2216 2217 if (!strcmp(str, "off")) 2218 l1tf_mitigation = L1TF_MITIGATION_OFF; 2219 else if (!strcmp(str, "flush,nowarn")) 2220 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN; 2221 else if (!strcmp(str, "flush")) 2222 l1tf_mitigation = L1TF_MITIGATION_FLUSH; 2223 else if (!strcmp(str, "flush,nosmt")) 2224 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 2225 else if (!strcmp(str, "full")) 2226 l1tf_mitigation = L1TF_MITIGATION_FULL; 2227 else if (!strcmp(str, "full,force")) 2228 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE; 2229 2230 return 0; 2231 } 2232 early_param("l1tf", l1tf_cmdline); 2233 2234 #undef pr_fmt 2235 #define pr_fmt(fmt) fmt 2236 2237 #ifdef CONFIG_SYSFS 2238 2239 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" 2240 2241 #if IS_ENABLED(CONFIG_KVM_INTEL) 2242 static const char * const l1tf_vmx_states[] = { 2243 [VMENTER_L1D_FLUSH_AUTO] = "auto", 2244 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", 2245 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", 2246 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", 2247 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", 2248 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" 2249 }; 2250 2251 static ssize_t l1tf_show_state(char *buf) 2252 { 2253 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) 2254 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); 2255 2256 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || 2257 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && 2258 sched_smt_active())) { 2259 return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, 2260 l1tf_vmx_states[l1tf_vmx_mitigation]); 2261 } 2262 2263 return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, 2264 l1tf_vmx_states[l1tf_vmx_mitigation], 2265 sched_smt_active() ? "vulnerable" : "disabled"); 2266 } 2267 2268 static ssize_t itlb_multihit_show_state(char *buf) 2269 { 2270 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || 2271 !boot_cpu_has(X86_FEATURE_VMX)) 2272 return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n"); 2273 else if (!(cr4_read_shadow() & X86_CR4_VMXE)) 2274 return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n"); 2275 else if (itlb_multihit_kvm_mitigation) 2276 return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n"); 2277 else 2278 return sysfs_emit(buf, "KVM: Vulnerable\n"); 2279 } 2280 #else 2281 static ssize_t l1tf_show_state(char *buf) 2282 { 2283 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); 2284 } 2285 2286 static ssize_t itlb_multihit_show_state(char *buf) 2287 { 2288 return sysfs_emit(buf, "Processor vulnerable\n"); 2289 } 2290 #endif 2291 2292 static ssize_t mds_show_state(char *buf) 2293 { 2294 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 2295 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 2296 mds_strings[mds_mitigation]); 2297 } 2298 2299 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { 2300 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], 2301 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : 2302 sched_smt_active() ? "mitigated" : "disabled")); 2303 } 2304 2305 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], 2306 sched_smt_active() ? "vulnerable" : "disabled"); 2307 } 2308 2309 static ssize_t tsx_async_abort_show_state(char *buf) 2310 { 2311 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || 2312 (taa_mitigation == TAA_MITIGATION_OFF)) 2313 return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]); 2314 2315 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 2316 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 2317 taa_strings[taa_mitigation]); 2318 } 2319 2320 return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], 2321 sched_smt_active() ? "vulnerable" : "disabled"); 2322 } 2323 2324 static ssize_t mmio_stale_data_show_state(char *buf) 2325 { 2326 if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) 2327 return sysfs_emit(buf, "Unknown: No mitigations\n"); 2328 2329 if (mmio_mitigation == MMIO_MITIGATION_OFF) 2330 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); 2331 2332 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 2333 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 2334 mmio_strings[mmio_mitigation]); 2335 } 2336 2337 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], 2338 sched_smt_active() ? "vulnerable" : "disabled"); 2339 } 2340 2341 static char *stibp_state(void) 2342 { 2343 if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) 2344 return ""; 2345 2346 switch (spectre_v2_user_stibp) { 2347 case SPECTRE_V2_USER_NONE: 2348 return ", STIBP: disabled"; 2349 case SPECTRE_V2_USER_STRICT: 2350 return ", STIBP: forced"; 2351 case SPECTRE_V2_USER_STRICT_PREFERRED: 2352 return ", STIBP: always-on"; 2353 case SPECTRE_V2_USER_PRCTL: 2354 case SPECTRE_V2_USER_SECCOMP: 2355 if (static_key_enabled(&switch_to_cond_stibp)) 2356 return ", STIBP: conditional"; 2357 } 2358 return ""; 2359 } 2360 2361 static char *ibpb_state(void) 2362 { 2363 if (boot_cpu_has(X86_FEATURE_IBPB)) { 2364 if (static_key_enabled(&switch_mm_always_ibpb)) 2365 return ", IBPB: always-on"; 2366 if (static_key_enabled(&switch_mm_cond_ibpb)) 2367 return ", IBPB: conditional"; 2368 return ", IBPB: disabled"; 2369 } 2370 return ""; 2371 } 2372 2373 static char *pbrsb_eibrs_state(void) 2374 { 2375 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { 2376 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || 2377 boot_cpu_has(X86_FEATURE_RSB_VMEXIT)) 2378 return ", PBRSB-eIBRS: SW sequence"; 2379 else 2380 return ", PBRSB-eIBRS: Vulnerable"; 2381 } else { 2382 return ", PBRSB-eIBRS: Not affected"; 2383 } 2384 } 2385 2386 static ssize_t spectre_v2_show_state(char *buf) 2387 { 2388 if (spectre_v2_enabled == SPECTRE_V2_LFENCE) 2389 return sysfs_emit(buf, "Vulnerable: LFENCE\n"); 2390 2391 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) 2392 return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); 2393 2394 if (sched_smt_active() && unprivileged_ebpf_enabled() && 2395 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) 2396 return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); 2397 2398 return sysfs_emit(buf, "%s%s%s%s%s%s%s\n", 2399 spectre_v2_strings[spectre_v2_enabled], 2400 ibpb_state(), 2401 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", 2402 stibp_state(), 2403 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", 2404 pbrsb_eibrs_state(), 2405 spectre_v2_module_string()); 2406 } 2407 2408 static ssize_t srbds_show_state(char *buf) 2409 { 2410 return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]); 2411 } 2412 2413 static ssize_t retbleed_show_state(char *buf) 2414 { 2415 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || 2416 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { 2417 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 2418 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 2419 return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); 2420 2421 return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation], 2422 !sched_smt_active() ? "disabled" : 2423 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || 2424 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? 2425 "enabled with STIBP protection" : "vulnerable"); 2426 } 2427 2428 return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]); 2429 } 2430 2431 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, 2432 char *buf, unsigned int bug) 2433 { 2434 if (!boot_cpu_has_bug(bug)) 2435 return sysfs_emit(buf, "Not affected\n"); 2436 2437 switch (bug) { 2438 case X86_BUG_CPU_MELTDOWN: 2439 if (boot_cpu_has(X86_FEATURE_PTI)) 2440 return sysfs_emit(buf, "Mitigation: PTI\n"); 2441 2442 if (hypervisor_is_type(X86_HYPER_XEN_PV)) 2443 return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); 2444 2445 break; 2446 2447 case X86_BUG_SPECTRE_V1: 2448 return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); 2449 2450 case X86_BUG_SPECTRE_V2: 2451 return spectre_v2_show_state(buf); 2452 2453 case X86_BUG_SPEC_STORE_BYPASS: 2454 return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]); 2455 2456 case X86_BUG_L1TF: 2457 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) 2458 return l1tf_show_state(buf); 2459 break; 2460 2461 case X86_BUG_MDS: 2462 return mds_show_state(buf); 2463 2464 case X86_BUG_TAA: 2465 return tsx_async_abort_show_state(buf); 2466 2467 case X86_BUG_ITLB_MULTIHIT: 2468 return itlb_multihit_show_state(buf); 2469 2470 case X86_BUG_SRBDS: 2471 return srbds_show_state(buf); 2472 2473 case X86_BUG_MMIO_STALE_DATA: 2474 case X86_BUG_MMIO_UNKNOWN: 2475 return mmio_stale_data_show_state(buf); 2476 2477 case X86_BUG_RETBLEED: 2478 return retbleed_show_state(buf); 2479 2480 default: 2481 break; 2482 } 2483 2484 return sysfs_emit(buf, "Vulnerable\n"); 2485 } 2486 2487 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 2488 { 2489 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); 2490 } 2491 2492 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 2493 { 2494 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); 2495 } 2496 2497 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 2498 { 2499 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); 2500 } 2501 2502 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 2503 { 2504 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); 2505 } 2506 2507 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) 2508 { 2509 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); 2510 } 2511 2512 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf) 2513 { 2514 return cpu_show_common(dev, attr, buf, X86_BUG_MDS); 2515 } 2516 2517 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf) 2518 { 2519 return cpu_show_common(dev, attr, buf, X86_BUG_TAA); 2520 } 2521 2522 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf) 2523 { 2524 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); 2525 } 2526 2527 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) 2528 { 2529 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); 2530 } 2531 2532 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) 2533 { 2534 if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) 2535 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN); 2536 else 2537 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); 2538 } 2539 2540 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) 2541 { 2542 return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); 2543 } 2544 #endif 2545