1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1994 Linus Torvalds 4 * 5 * Cyrix stuff, June 1998 by: 6 * - Rafael R. Reilova (moved everything from head.S), 7 * <rreilova@ececs.uc.edu> 8 * - Channing Corn (tests & fixes), 9 * - Andrew D. Balsa (code cleanup). 10 */ 11 #include <linux/init.h> 12 #include <linux/utsname.h> 13 #include <linux/cpu.h> 14 #include <linux/module.h> 15 #include <linux/nospec.h> 16 #include <linux/prctl.h> 17 #include <linux/sched/smt.h> 18 #include <linux/pgtable.h> 19 #include <linux/bpf.h> 20 21 #include <asm/spec-ctrl.h> 22 #include <asm/cmdline.h> 23 #include <asm/bugs.h> 24 #include <asm/processor.h> 25 #include <asm/processor-flags.h> 26 #include <asm/fpu/api.h> 27 #include <asm/msr.h> 28 #include <asm/vmx.h> 29 #include <asm/paravirt.h> 30 #include <asm/alternative.h> 31 #include <asm/set_memory.h> 32 #include <asm/intel-family.h> 33 #include <asm/e820/api.h> 34 #include <asm/hypervisor.h> 35 #include <asm/tlbflush.h> 36 37 #include "cpu.h" 38 39 static void __init spectre_v1_select_mitigation(void); 40 static void __init spectre_v2_select_mitigation(void); 41 static void __init retbleed_select_mitigation(void); 42 static void __init spectre_v2_user_select_mitigation(void); 43 static void __init ssb_select_mitigation(void); 44 static void __init l1tf_select_mitigation(void); 45 static void __init mds_select_mitigation(void); 46 static void __init md_clear_update_mitigation(void); 47 static void __init md_clear_select_mitigation(void); 48 static void __init taa_select_mitigation(void); 49 static void __init mmio_select_mitigation(void); 50 static void __init srbds_select_mitigation(void); 51 static void __init l1d_flush_select_mitigation(void); 52 53 /* The base value of the SPEC_CTRL MSR without task-specific bits set */ 54 u64 x86_spec_ctrl_base; 55 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); 56 57 /* The current value of the SPEC_CTRL MSR with task-specific bits set */ 58 DEFINE_PER_CPU(u64, x86_spec_ctrl_current); 59 EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); 60 61 static DEFINE_MUTEX(spec_ctrl_mutex); 62 63 /* 64 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ 65 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). 66 */ 67 void write_spec_ctrl_current(u64 val, bool force) 68 { 69 if (this_cpu_read(x86_spec_ctrl_current) == val) 70 return; 71 72 this_cpu_write(x86_spec_ctrl_current, val); 73 74 /* 75 * When KERNEL_IBRS this MSR is written on return-to-user, unless 76 * forced the update can be delayed until that time. 77 */ 78 if (force || !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) 79 wrmsrl(MSR_IA32_SPEC_CTRL, val); 80 } 81 82 u64 spec_ctrl_current(void) 83 { 84 return this_cpu_read(x86_spec_ctrl_current); 85 } 86 EXPORT_SYMBOL_GPL(spec_ctrl_current); 87 88 /* 89 * AMD specific MSR info for Speculative Store Bypass control. 90 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). 91 */ 92 u64 __ro_after_init x86_amd_ls_cfg_base; 93 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; 94 95 /* Control conditional STIBP in switch_to() */ 96 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); 97 /* Control conditional IBPB in switch_mm() */ 98 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); 99 /* Control unconditional IBPB in switch_mm() */ 100 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); 101 102 /* Control MDS CPU buffer clear before returning to user space */ 103 DEFINE_STATIC_KEY_FALSE(mds_user_clear); 104 EXPORT_SYMBOL_GPL(mds_user_clear); 105 /* Control MDS CPU buffer clear before idling (halt, mwait) */ 106 DEFINE_STATIC_KEY_FALSE(mds_idle_clear); 107 EXPORT_SYMBOL_GPL(mds_idle_clear); 108 109 /* 110 * Controls whether l1d flush based mitigations are enabled, 111 * based on hw features and admin setting via boot parameter 112 * defaults to false 113 */ 114 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); 115 116 /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */ 117 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); 118 EXPORT_SYMBOL_GPL(mmio_stale_data_clear); 119 120 void __init check_bugs(void) 121 { 122 identify_boot_cpu(); 123 124 /* 125 * identify_boot_cpu() initialized SMT support information, let the 126 * core code know. 127 */ 128 cpu_smt_check_topology(); 129 130 if (!IS_ENABLED(CONFIG_SMP)) { 131 pr_info("CPU: "); 132 print_cpu_info(&boot_cpu_data); 133 } 134 135 /* 136 * Read the SPEC_CTRL MSR to account for reserved bits which may 137 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD 138 * init code as it is not enumerated and depends on the family. 139 */ 140 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) 141 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 142 143 /* Select the proper CPU mitigations before patching alternatives: */ 144 spectre_v1_select_mitigation(); 145 spectre_v2_select_mitigation(); 146 /* 147 * retbleed_select_mitigation() relies on the state set by 148 * spectre_v2_select_mitigation(); specifically it wants to know about 149 * spectre_v2=ibrs. 150 */ 151 retbleed_select_mitigation(); 152 /* 153 * spectre_v2_user_select_mitigation() relies on the state set by 154 * retbleed_select_mitigation(); specifically the STIBP selection is 155 * forced for UNRET or IBPB. 156 */ 157 spectre_v2_user_select_mitigation(); 158 ssb_select_mitigation(); 159 l1tf_select_mitigation(); 160 md_clear_select_mitigation(); 161 srbds_select_mitigation(); 162 l1d_flush_select_mitigation(); 163 164 arch_smt_update(); 165 166 #ifdef CONFIG_X86_32 167 /* 168 * Check whether we are able to run this kernel safely on SMP. 169 * 170 * - i386 is no longer supported. 171 * - In order to run on anything without a TSC, we need to be 172 * compiled for a i486. 173 */ 174 if (boot_cpu_data.x86 < 4) 175 panic("Kernel requires i486+ for 'invlpg' and other features"); 176 177 init_utsname()->machine[1] = 178 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 179 alternative_instructions(); 180 181 fpu__init_check_bugs(); 182 #else /* CONFIG_X86_64 */ 183 alternative_instructions(); 184 185 /* 186 * Make sure the first 2MB area is not mapped by huge pages 187 * There are typically fixed size MTRRs in there and overlapping 188 * MTRRs into large pages causes slow downs. 189 * 190 * Right now we don't do that with gbpages because there seems 191 * very little benefit for that case. 192 */ 193 if (!direct_gbpages) 194 set_memory_4k((unsigned long)__va(0), 1); 195 #endif 196 } 197 198 /* 199 * NOTE: This function is *only* called for SVM, since Intel uses 200 * MSR_IA32_SPEC_CTRL for SSBD. 201 */ 202 void 203 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest) 204 { 205 u64 guestval, hostval; 206 struct thread_info *ti = current_thread_info(); 207 208 /* 209 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update 210 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. 211 */ 212 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && 213 !static_cpu_has(X86_FEATURE_VIRT_SSBD)) 214 return; 215 216 /* 217 * If the host has SSBD mitigation enabled, force it in the host's 218 * virtual MSR value. If its not permanently enabled, evaluate 219 * current's TIF_SSBD thread flag. 220 */ 221 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) 222 hostval = SPEC_CTRL_SSBD; 223 else 224 hostval = ssbd_tif_to_spec_ctrl(ti->flags); 225 226 /* Sanitize the guest value */ 227 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; 228 229 if (hostval != guestval) { 230 unsigned long tif; 231 232 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : 233 ssbd_spec_ctrl_to_tif(hostval); 234 235 speculation_ctrl_update(tif); 236 } 237 } 238 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); 239 240 static void x86_amd_ssb_disable(void) 241 { 242 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; 243 244 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) 245 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); 246 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) 247 wrmsrl(MSR_AMD64_LS_CFG, msrval); 248 } 249 250 #undef pr_fmt 251 #define pr_fmt(fmt) "MDS: " fmt 252 253 /* Default mitigation for MDS-affected CPUs */ 254 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL; 255 static bool mds_nosmt __ro_after_init = false; 256 257 static const char * const mds_strings[] = { 258 [MDS_MITIGATION_OFF] = "Vulnerable", 259 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", 260 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", 261 }; 262 263 static void __init mds_select_mitigation(void) 264 { 265 if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) { 266 mds_mitigation = MDS_MITIGATION_OFF; 267 return; 268 } 269 270 if (mds_mitigation == MDS_MITIGATION_FULL) { 271 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) 272 mds_mitigation = MDS_MITIGATION_VMWERV; 273 274 static_branch_enable(&mds_user_clear); 275 276 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && 277 (mds_nosmt || cpu_mitigations_auto_nosmt())) 278 cpu_smt_disable(false); 279 } 280 } 281 282 static int __init mds_cmdline(char *str) 283 { 284 if (!boot_cpu_has_bug(X86_BUG_MDS)) 285 return 0; 286 287 if (!str) 288 return -EINVAL; 289 290 if (!strcmp(str, "off")) 291 mds_mitigation = MDS_MITIGATION_OFF; 292 else if (!strcmp(str, "full")) 293 mds_mitigation = MDS_MITIGATION_FULL; 294 else if (!strcmp(str, "full,nosmt")) { 295 mds_mitigation = MDS_MITIGATION_FULL; 296 mds_nosmt = true; 297 } 298 299 return 0; 300 } 301 early_param("mds", mds_cmdline); 302 303 #undef pr_fmt 304 #define pr_fmt(fmt) "TAA: " fmt 305 306 enum taa_mitigations { 307 TAA_MITIGATION_OFF, 308 TAA_MITIGATION_UCODE_NEEDED, 309 TAA_MITIGATION_VERW, 310 TAA_MITIGATION_TSX_DISABLED, 311 }; 312 313 /* Default mitigation for TAA-affected CPUs */ 314 static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW; 315 static bool taa_nosmt __ro_after_init; 316 317 static const char * const taa_strings[] = { 318 [TAA_MITIGATION_OFF] = "Vulnerable", 319 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", 320 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", 321 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled", 322 }; 323 324 static void __init taa_select_mitigation(void) 325 { 326 u64 ia32_cap; 327 328 if (!boot_cpu_has_bug(X86_BUG_TAA)) { 329 taa_mitigation = TAA_MITIGATION_OFF; 330 return; 331 } 332 333 /* TSX previously disabled by tsx=off */ 334 if (!boot_cpu_has(X86_FEATURE_RTM)) { 335 taa_mitigation = TAA_MITIGATION_TSX_DISABLED; 336 return; 337 } 338 339 if (cpu_mitigations_off()) { 340 taa_mitigation = TAA_MITIGATION_OFF; 341 return; 342 } 343 344 /* 345 * TAA mitigation via VERW is turned off if both 346 * tsx_async_abort=off and mds=off are specified. 347 */ 348 if (taa_mitigation == TAA_MITIGATION_OFF && 349 mds_mitigation == MDS_MITIGATION_OFF) 350 return; 351 352 if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) 353 taa_mitigation = TAA_MITIGATION_VERW; 354 else 355 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; 356 357 /* 358 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. 359 * A microcode update fixes this behavior to clear CPU buffers. It also 360 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the 361 * ARCH_CAP_TSX_CTRL_MSR bit. 362 * 363 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode 364 * update is required. 365 */ 366 ia32_cap = x86_read_arch_cap_msr(); 367 if ( (ia32_cap & ARCH_CAP_MDS_NO) && 368 !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)) 369 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; 370 371 /* 372 * TSX is enabled, select alternate mitigation for TAA which is 373 * the same as MDS. Enable MDS static branch to clear CPU buffers. 374 * 375 * For guests that can't determine whether the correct microcode is 376 * present on host, enable the mitigation for UCODE_NEEDED as well. 377 */ 378 static_branch_enable(&mds_user_clear); 379 380 if (taa_nosmt || cpu_mitigations_auto_nosmt()) 381 cpu_smt_disable(false); 382 } 383 384 static int __init tsx_async_abort_parse_cmdline(char *str) 385 { 386 if (!boot_cpu_has_bug(X86_BUG_TAA)) 387 return 0; 388 389 if (!str) 390 return -EINVAL; 391 392 if (!strcmp(str, "off")) { 393 taa_mitigation = TAA_MITIGATION_OFF; 394 } else if (!strcmp(str, "full")) { 395 taa_mitigation = TAA_MITIGATION_VERW; 396 } else if (!strcmp(str, "full,nosmt")) { 397 taa_mitigation = TAA_MITIGATION_VERW; 398 taa_nosmt = true; 399 } 400 401 return 0; 402 } 403 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); 404 405 #undef pr_fmt 406 #define pr_fmt(fmt) "MMIO Stale Data: " fmt 407 408 enum mmio_mitigations { 409 MMIO_MITIGATION_OFF, 410 MMIO_MITIGATION_UCODE_NEEDED, 411 MMIO_MITIGATION_VERW, 412 }; 413 414 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */ 415 static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW; 416 static bool mmio_nosmt __ro_after_init = false; 417 418 static const char * const mmio_strings[] = { 419 [MMIO_MITIGATION_OFF] = "Vulnerable", 420 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", 421 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", 422 }; 423 424 static void __init mmio_select_mitigation(void) 425 { 426 u64 ia32_cap; 427 428 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || 429 boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) || 430 cpu_mitigations_off()) { 431 mmio_mitigation = MMIO_MITIGATION_OFF; 432 return; 433 } 434 435 if (mmio_mitigation == MMIO_MITIGATION_OFF) 436 return; 437 438 ia32_cap = x86_read_arch_cap_msr(); 439 440 /* 441 * Enable CPU buffer clear mitigation for host and VMM, if also affected 442 * by MDS or TAA. Otherwise, enable mitigation for VMM only. 443 */ 444 if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) && 445 boot_cpu_has(X86_FEATURE_RTM))) 446 static_branch_enable(&mds_user_clear); 447 else 448 static_branch_enable(&mmio_stale_data_clear); 449 450 /* 451 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can 452 * be propagated to uncore buffers, clearing the Fill buffers on idle 453 * is required irrespective of SMT state. 454 */ 455 if (!(ia32_cap & ARCH_CAP_FBSDP_NO)) 456 static_branch_enable(&mds_idle_clear); 457 458 /* 459 * Check if the system has the right microcode. 460 * 461 * CPU Fill buffer clear mitigation is enumerated by either an explicit 462 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS 463 * affected systems. 464 */ 465 if ((ia32_cap & ARCH_CAP_FB_CLEAR) || 466 (boot_cpu_has(X86_FEATURE_MD_CLEAR) && 467 boot_cpu_has(X86_FEATURE_FLUSH_L1D) && 468 !(ia32_cap & ARCH_CAP_MDS_NO))) 469 mmio_mitigation = MMIO_MITIGATION_VERW; 470 else 471 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; 472 473 if (mmio_nosmt || cpu_mitigations_auto_nosmt()) 474 cpu_smt_disable(false); 475 } 476 477 static int __init mmio_stale_data_parse_cmdline(char *str) 478 { 479 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 480 return 0; 481 482 if (!str) 483 return -EINVAL; 484 485 if (!strcmp(str, "off")) { 486 mmio_mitigation = MMIO_MITIGATION_OFF; 487 } else if (!strcmp(str, "full")) { 488 mmio_mitigation = MMIO_MITIGATION_VERW; 489 } else if (!strcmp(str, "full,nosmt")) { 490 mmio_mitigation = MMIO_MITIGATION_VERW; 491 mmio_nosmt = true; 492 } 493 494 return 0; 495 } 496 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); 497 498 #undef pr_fmt 499 #define pr_fmt(fmt) "" fmt 500 501 static void __init md_clear_update_mitigation(void) 502 { 503 if (cpu_mitigations_off()) 504 return; 505 506 if (!static_key_enabled(&mds_user_clear)) 507 goto out; 508 509 /* 510 * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data 511 * mitigation, if necessary. 512 */ 513 if (mds_mitigation == MDS_MITIGATION_OFF && 514 boot_cpu_has_bug(X86_BUG_MDS)) { 515 mds_mitigation = MDS_MITIGATION_FULL; 516 mds_select_mitigation(); 517 } 518 if (taa_mitigation == TAA_MITIGATION_OFF && 519 boot_cpu_has_bug(X86_BUG_TAA)) { 520 taa_mitigation = TAA_MITIGATION_VERW; 521 taa_select_mitigation(); 522 } 523 if (mmio_mitigation == MMIO_MITIGATION_OFF && 524 boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { 525 mmio_mitigation = MMIO_MITIGATION_VERW; 526 mmio_select_mitigation(); 527 } 528 out: 529 if (boot_cpu_has_bug(X86_BUG_MDS)) 530 pr_info("MDS: %s\n", mds_strings[mds_mitigation]); 531 if (boot_cpu_has_bug(X86_BUG_TAA)) 532 pr_info("TAA: %s\n", taa_strings[taa_mitigation]); 533 if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 534 pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); 535 else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) 536 pr_info("MMIO Stale Data: Unknown: No mitigations\n"); 537 } 538 539 static void __init md_clear_select_mitigation(void) 540 { 541 mds_select_mitigation(); 542 taa_select_mitigation(); 543 mmio_select_mitigation(); 544 545 /* 546 * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update 547 * and print their mitigation after MDS, TAA and MMIO Stale Data 548 * mitigation selection is done. 549 */ 550 md_clear_update_mitigation(); 551 } 552 553 #undef pr_fmt 554 #define pr_fmt(fmt) "SRBDS: " fmt 555 556 enum srbds_mitigations { 557 SRBDS_MITIGATION_OFF, 558 SRBDS_MITIGATION_UCODE_NEEDED, 559 SRBDS_MITIGATION_FULL, 560 SRBDS_MITIGATION_TSX_OFF, 561 SRBDS_MITIGATION_HYPERVISOR, 562 }; 563 564 static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL; 565 566 static const char * const srbds_strings[] = { 567 [SRBDS_MITIGATION_OFF] = "Vulnerable", 568 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 569 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", 570 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", 571 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", 572 }; 573 574 static bool srbds_off; 575 576 void update_srbds_msr(void) 577 { 578 u64 mcu_ctrl; 579 580 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 581 return; 582 583 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 584 return; 585 586 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) 587 return; 588 589 /* 590 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX 591 * being disabled and it hasn't received the SRBDS MSR microcode. 592 */ 593 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) 594 return; 595 596 rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 597 598 switch (srbds_mitigation) { 599 case SRBDS_MITIGATION_OFF: 600 case SRBDS_MITIGATION_TSX_OFF: 601 mcu_ctrl |= RNGDS_MITG_DIS; 602 break; 603 case SRBDS_MITIGATION_FULL: 604 mcu_ctrl &= ~RNGDS_MITG_DIS; 605 break; 606 default: 607 break; 608 } 609 610 wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 611 } 612 613 static void __init srbds_select_mitigation(void) 614 { 615 u64 ia32_cap; 616 617 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 618 return; 619 620 /* 621 * Check to see if this is one of the MDS_NO systems supporting TSX that 622 * are only exposed to SRBDS when TSX is enabled or when CPU is affected 623 * by Processor MMIO Stale Data vulnerability. 624 */ 625 ia32_cap = x86_read_arch_cap_msr(); 626 if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && 627 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 628 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; 629 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 630 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; 631 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) 632 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; 633 else if (cpu_mitigations_off() || srbds_off) 634 srbds_mitigation = SRBDS_MITIGATION_OFF; 635 636 update_srbds_msr(); 637 pr_info("%s\n", srbds_strings[srbds_mitigation]); 638 } 639 640 static int __init srbds_parse_cmdline(char *str) 641 { 642 if (!str) 643 return -EINVAL; 644 645 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 646 return 0; 647 648 srbds_off = !strcmp(str, "off"); 649 return 0; 650 } 651 early_param("srbds", srbds_parse_cmdline); 652 653 #undef pr_fmt 654 #define pr_fmt(fmt) "L1D Flush : " fmt 655 656 enum l1d_flush_mitigations { 657 L1D_FLUSH_OFF = 0, 658 L1D_FLUSH_ON, 659 }; 660 661 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF; 662 663 static void __init l1d_flush_select_mitigation(void) 664 { 665 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) 666 return; 667 668 static_branch_enable(&switch_mm_cond_l1d_flush); 669 pr_info("Conditional flush on switch_mm() enabled\n"); 670 } 671 672 static int __init l1d_flush_parse_cmdline(char *str) 673 { 674 if (!strcmp(str, "on")) 675 l1d_flush_mitigation = L1D_FLUSH_ON; 676 677 return 0; 678 } 679 early_param("l1d_flush", l1d_flush_parse_cmdline); 680 681 #undef pr_fmt 682 #define pr_fmt(fmt) "Spectre V1 : " fmt 683 684 enum spectre_v1_mitigation { 685 SPECTRE_V1_MITIGATION_NONE, 686 SPECTRE_V1_MITIGATION_AUTO, 687 }; 688 689 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init = 690 SPECTRE_V1_MITIGATION_AUTO; 691 692 static const char * const spectre_v1_strings[] = { 693 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers", 694 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization", 695 }; 696 697 /* 698 * Does SMAP provide full mitigation against speculative kernel access to 699 * userspace? 700 */ 701 static bool smap_works_speculatively(void) 702 { 703 if (!boot_cpu_has(X86_FEATURE_SMAP)) 704 return false; 705 706 /* 707 * On CPUs which are vulnerable to Meltdown, SMAP does not 708 * prevent speculative access to user data in the L1 cache. 709 * Consider SMAP to be non-functional as a mitigation on these 710 * CPUs. 711 */ 712 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) 713 return false; 714 715 return true; 716 } 717 718 static void __init spectre_v1_select_mitigation(void) 719 { 720 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) { 721 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 722 return; 723 } 724 725 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { 726 /* 727 * With Spectre v1, a user can speculatively control either 728 * path of a conditional swapgs with a user-controlled GS 729 * value. The mitigation is to add lfences to both code paths. 730 * 731 * If FSGSBASE is enabled, the user can put a kernel address in 732 * GS, in which case SMAP provides no protection. 733 * 734 * If FSGSBASE is disabled, the user can only put a user space 735 * address in GS. That makes an attack harder, but still 736 * possible if there's no SMAP protection. 737 */ 738 if (boot_cpu_has(X86_FEATURE_FSGSBASE) || 739 !smap_works_speculatively()) { 740 /* 741 * Mitigation can be provided from SWAPGS itself or 742 * PTI as the CR3 write in the Meltdown mitigation 743 * is serializing. 744 * 745 * If neither is there, mitigate with an LFENCE to 746 * stop speculation through swapgs. 747 */ 748 if (boot_cpu_has_bug(X86_BUG_SWAPGS) && 749 !boot_cpu_has(X86_FEATURE_PTI)) 750 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER); 751 752 /* 753 * Enable lfences in the kernel entry (non-swapgs) 754 * paths, to prevent user entry from speculatively 755 * skipping swapgs. 756 */ 757 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL); 758 } 759 } 760 761 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]); 762 } 763 764 static int __init nospectre_v1_cmdline(char *str) 765 { 766 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 767 return 0; 768 } 769 early_param("nospectre_v1", nospectre_v1_cmdline); 770 771 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = 772 SPECTRE_V2_NONE; 773 774 #undef pr_fmt 775 #define pr_fmt(fmt) "RETBleed: " fmt 776 777 enum retbleed_mitigation { 778 RETBLEED_MITIGATION_NONE, 779 RETBLEED_MITIGATION_UNRET, 780 RETBLEED_MITIGATION_IBPB, 781 RETBLEED_MITIGATION_IBRS, 782 RETBLEED_MITIGATION_EIBRS, 783 }; 784 785 enum retbleed_mitigation_cmd { 786 RETBLEED_CMD_OFF, 787 RETBLEED_CMD_AUTO, 788 RETBLEED_CMD_UNRET, 789 RETBLEED_CMD_IBPB, 790 }; 791 792 static const char * const retbleed_strings[] = { 793 [RETBLEED_MITIGATION_NONE] = "Vulnerable", 794 [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", 795 [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB", 796 [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", 797 [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", 798 }; 799 800 static enum retbleed_mitigation retbleed_mitigation __ro_after_init = 801 RETBLEED_MITIGATION_NONE; 802 static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init = 803 RETBLEED_CMD_AUTO; 804 805 static int __ro_after_init retbleed_nosmt = false; 806 807 static int __init retbleed_parse_cmdline(char *str) 808 { 809 if (!str) 810 return -EINVAL; 811 812 while (str) { 813 char *next = strchr(str, ','); 814 if (next) { 815 *next = 0; 816 next++; 817 } 818 819 if (!strcmp(str, "off")) { 820 retbleed_cmd = RETBLEED_CMD_OFF; 821 } else if (!strcmp(str, "auto")) { 822 retbleed_cmd = RETBLEED_CMD_AUTO; 823 } else if (!strcmp(str, "unret")) { 824 retbleed_cmd = RETBLEED_CMD_UNRET; 825 } else if (!strcmp(str, "ibpb")) { 826 retbleed_cmd = RETBLEED_CMD_IBPB; 827 } else if (!strcmp(str, "nosmt")) { 828 retbleed_nosmt = true; 829 } else { 830 pr_err("Ignoring unknown retbleed option (%s).", str); 831 } 832 833 str = next; 834 } 835 836 return 0; 837 } 838 early_param("retbleed", retbleed_parse_cmdline); 839 840 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" 841 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" 842 843 static void __init retbleed_select_mitigation(void) 844 { 845 bool mitigate_smt = false; 846 847 if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) 848 return; 849 850 switch (retbleed_cmd) { 851 case RETBLEED_CMD_OFF: 852 return; 853 854 case RETBLEED_CMD_UNRET: 855 if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) { 856 retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 857 } else { 858 pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n"); 859 goto do_cmd_auto; 860 } 861 break; 862 863 case RETBLEED_CMD_IBPB: 864 if (!boot_cpu_has(X86_FEATURE_IBPB)) { 865 pr_err("WARNING: CPU does not support IBPB.\n"); 866 goto do_cmd_auto; 867 } else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { 868 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 869 } else { 870 pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); 871 goto do_cmd_auto; 872 } 873 break; 874 875 do_cmd_auto: 876 case RETBLEED_CMD_AUTO: 877 default: 878 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 879 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { 880 if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) 881 retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 882 else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB)) 883 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 884 } 885 886 /* 887 * The Intel mitigation (IBRS or eIBRS) was already selected in 888 * spectre_v2_select_mitigation(). 'retbleed_mitigation' will 889 * be set accordingly below. 890 */ 891 892 break; 893 } 894 895 switch (retbleed_mitigation) { 896 case RETBLEED_MITIGATION_UNRET: 897 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 898 setup_force_cpu_cap(X86_FEATURE_UNRET); 899 900 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 901 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 902 pr_err(RETBLEED_UNTRAIN_MSG); 903 904 mitigate_smt = true; 905 break; 906 907 case RETBLEED_MITIGATION_IBPB: 908 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); 909 mitigate_smt = true; 910 break; 911 912 default: 913 break; 914 } 915 916 if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && 917 (retbleed_nosmt || cpu_mitigations_auto_nosmt())) 918 cpu_smt_disable(false); 919 920 /* 921 * Let IBRS trump all on Intel without affecting the effects of the 922 * retbleed= cmdline option. 923 */ 924 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 925 switch (spectre_v2_enabled) { 926 case SPECTRE_V2_IBRS: 927 retbleed_mitigation = RETBLEED_MITIGATION_IBRS; 928 break; 929 case SPECTRE_V2_EIBRS: 930 case SPECTRE_V2_EIBRS_RETPOLINE: 931 case SPECTRE_V2_EIBRS_LFENCE: 932 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; 933 break; 934 default: 935 pr_err(RETBLEED_INTEL_MSG); 936 } 937 } 938 939 pr_info("%s\n", retbleed_strings[retbleed_mitigation]); 940 } 941 942 #undef pr_fmt 943 #define pr_fmt(fmt) "Spectre V2 : " fmt 944 945 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = 946 SPECTRE_V2_USER_NONE; 947 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = 948 SPECTRE_V2_USER_NONE; 949 950 #ifdef CONFIG_RETPOLINE 951 static bool spectre_v2_bad_module; 952 953 bool retpoline_module_ok(bool has_retpoline) 954 { 955 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) 956 return true; 957 958 pr_err("System may be vulnerable to spectre v2\n"); 959 spectre_v2_bad_module = true; 960 return false; 961 } 962 963 static inline const char *spectre_v2_module_string(void) 964 { 965 return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; 966 } 967 #else 968 static inline const char *spectre_v2_module_string(void) { return ""; } 969 #endif 970 971 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" 972 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" 973 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" 974 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n" 975 976 #ifdef CONFIG_BPF_SYSCALL 977 void unpriv_ebpf_notify(int new_state) 978 { 979 if (new_state) 980 return; 981 982 /* Unprivileged eBPF is enabled */ 983 984 switch (spectre_v2_enabled) { 985 case SPECTRE_V2_EIBRS: 986 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); 987 break; 988 case SPECTRE_V2_EIBRS_LFENCE: 989 if (sched_smt_active()) 990 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); 991 break; 992 default: 993 break; 994 } 995 } 996 #endif 997 998 static inline bool match_option(const char *arg, int arglen, const char *opt) 999 { 1000 int len = strlen(opt); 1001 1002 return len == arglen && !strncmp(arg, opt, len); 1003 } 1004 1005 /* The kernel command line selection for spectre v2 */ 1006 enum spectre_v2_mitigation_cmd { 1007 SPECTRE_V2_CMD_NONE, 1008 SPECTRE_V2_CMD_AUTO, 1009 SPECTRE_V2_CMD_FORCE, 1010 SPECTRE_V2_CMD_RETPOLINE, 1011 SPECTRE_V2_CMD_RETPOLINE_GENERIC, 1012 SPECTRE_V2_CMD_RETPOLINE_LFENCE, 1013 SPECTRE_V2_CMD_EIBRS, 1014 SPECTRE_V2_CMD_EIBRS_RETPOLINE, 1015 SPECTRE_V2_CMD_EIBRS_LFENCE, 1016 SPECTRE_V2_CMD_IBRS, 1017 }; 1018 1019 enum spectre_v2_user_cmd { 1020 SPECTRE_V2_USER_CMD_NONE, 1021 SPECTRE_V2_USER_CMD_AUTO, 1022 SPECTRE_V2_USER_CMD_FORCE, 1023 SPECTRE_V2_USER_CMD_PRCTL, 1024 SPECTRE_V2_USER_CMD_PRCTL_IBPB, 1025 SPECTRE_V2_USER_CMD_SECCOMP, 1026 SPECTRE_V2_USER_CMD_SECCOMP_IBPB, 1027 }; 1028 1029 static const char * const spectre_v2_user_strings[] = { 1030 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", 1031 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", 1032 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", 1033 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", 1034 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", 1035 }; 1036 1037 static const struct { 1038 const char *option; 1039 enum spectre_v2_user_cmd cmd; 1040 bool secure; 1041 } v2_user_options[] __initconst = { 1042 { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, 1043 { "off", SPECTRE_V2_USER_CMD_NONE, false }, 1044 { "on", SPECTRE_V2_USER_CMD_FORCE, true }, 1045 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false }, 1046 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false }, 1047 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false }, 1048 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, 1049 }; 1050 1051 static void __init spec_v2_user_print_cond(const char *reason, bool secure) 1052 { 1053 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) 1054 pr_info("spectre_v2_user=%s forced on command line.\n", reason); 1055 } 1056 1057 static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd; 1058 1059 static enum spectre_v2_user_cmd __init 1060 spectre_v2_parse_user_cmdline(void) 1061 { 1062 char arg[20]; 1063 int ret, i; 1064 1065 switch (spectre_v2_cmd) { 1066 case SPECTRE_V2_CMD_NONE: 1067 return SPECTRE_V2_USER_CMD_NONE; 1068 case SPECTRE_V2_CMD_FORCE: 1069 return SPECTRE_V2_USER_CMD_FORCE; 1070 default: 1071 break; 1072 } 1073 1074 ret = cmdline_find_option(boot_command_line, "spectre_v2_user", 1075 arg, sizeof(arg)); 1076 if (ret < 0) 1077 return SPECTRE_V2_USER_CMD_AUTO; 1078 1079 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { 1080 if (match_option(arg, ret, v2_user_options[i].option)) { 1081 spec_v2_user_print_cond(v2_user_options[i].option, 1082 v2_user_options[i].secure); 1083 return v2_user_options[i].cmd; 1084 } 1085 } 1086 1087 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); 1088 return SPECTRE_V2_USER_CMD_AUTO; 1089 } 1090 1091 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) 1092 { 1093 return mode == SPECTRE_V2_IBRS || 1094 mode == SPECTRE_V2_EIBRS || 1095 mode == SPECTRE_V2_EIBRS_RETPOLINE || 1096 mode == SPECTRE_V2_EIBRS_LFENCE; 1097 } 1098 1099 static void __init 1100 spectre_v2_user_select_mitigation(void) 1101 { 1102 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; 1103 bool smt_possible = IS_ENABLED(CONFIG_SMP); 1104 enum spectre_v2_user_cmd cmd; 1105 1106 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) 1107 return; 1108 1109 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || 1110 cpu_smt_control == CPU_SMT_NOT_SUPPORTED) 1111 smt_possible = false; 1112 1113 cmd = spectre_v2_parse_user_cmdline(); 1114 switch (cmd) { 1115 case SPECTRE_V2_USER_CMD_NONE: 1116 goto set_mode; 1117 case SPECTRE_V2_USER_CMD_FORCE: 1118 mode = SPECTRE_V2_USER_STRICT; 1119 break; 1120 case SPECTRE_V2_USER_CMD_AUTO: 1121 case SPECTRE_V2_USER_CMD_PRCTL: 1122 case SPECTRE_V2_USER_CMD_PRCTL_IBPB: 1123 mode = SPECTRE_V2_USER_PRCTL; 1124 break; 1125 case SPECTRE_V2_USER_CMD_SECCOMP: 1126 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: 1127 if (IS_ENABLED(CONFIG_SECCOMP)) 1128 mode = SPECTRE_V2_USER_SECCOMP; 1129 else 1130 mode = SPECTRE_V2_USER_PRCTL; 1131 break; 1132 } 1133 1134 /* Initialize Indirect Branch Prediction Barrier */ 1135 if (boot_cpu_has(X86_FEATURE_IBPB)) { 1136 setup_force_cpu_cap(X86_FEATURE_USE_IBPB); 1137 1138 spectre_v2_user_ibpb = mode; 1139 switch (cmd) { 1140 case SPECTRE_V2_USER_CMD_FORCE: 1141 case SPECTRE_V2_USER_CMD_PRCTL_IBPB: 1142 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: 1143 static_branch_enable(&switch_mm_always_ibpb); 1144 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1145 break; 1146 case SPECTRE_V2_USER_CMD_PRCTL: 1147 case SPECTRE_V2_USER_CMD_AUTO: 1148 case SPECTRE_V2_USER_CMD_SECCOMP: 1149 static_branch_enable(&switch_mm_cond_ibpb); 1150 break; 1151 default: 1152 break; 1153 } 1154 1155 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", 1156 static_key_enabled(&switch_mm_always_ibpb) ? 1157 "always-on" : "conditional"); 1158 } 1159 1160 /* 1161 * If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible, 1162 * STIBP is not required. 1163 */ 1164 if (!boot_cpu_has(X86_FEATURE_STIBP) || 1165 !smt_possible || 1166 spectre_v2_in_ibrs_mode(spectre_v2_enabled)) 1167 return; 1168 1169 /* 1170 * At this point, an STIBP mode other than "off" has been set. 1171 * If STIBP support is not being forced, check if STIBP always-on 1172 * is preferred. 1173 */ 1174 if (mode != SPECTRE_V2_USER_STRICT && 1175 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) 1176 mode = SPECTRE_V2_USER_STRICT_PREFERRED; 1177 1178 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || 1179 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { 1180 if (mode != SPECTRE_V2_USER_STRICT && 1181 mode != SPECTRE_V2_USER_STRICT_PREFERRED) 1182 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n"); 1183 mode = SPECTRE_V2_USER_STRICT_PREFERRED; 1184 } 1185 1186 spectre_v2_user_stibp = mode; 1187 1188 set_mode: 1189 pr_info("%s\n", spectre_v2_user_strings[mode]); 1190 } 1191 1192 static const char * const spectre_v2_strings[] = { 1193 [SPECTRE_V2_NONE] = "Vulnerable", 1194 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", 1195 [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", 1196 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS", 1197 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE", 1198 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines", 1199 [SPECTRE_V2_IBRS] = "Mitigation: IBRS", 1200 }; 1201 1202 static const struct { 1203 const char *option; 1204 enum spectre_v2_mitigation_cmd cmd; 1205 bool secure; 1206 } mitigation_options[] __initconst = { 1207 { "off", SPECTRE_V2_CMD_NONE, false }, 1208 { "on", SPECTRE_V2_CMD_FORCE, true }, 1209 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, 1210 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, 1211 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, 1212 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, 1213 { "eibrs", SPECTRE_V2_CMD_EIBRS, false }, 1214 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false }, 1215 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false }, 1216 { "auto", SPECTRE_V2_CMD_AUTO, false }, 1217 { "ibrs", SPECTRE_V2_CMD_IBRS, false }, 1218 }; 1219 1220 static void __init spec_v2_print_cond(const char *reason, bool secure) 1221 { 1222 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) 1223 pr_info("%s selected on command line.\n", reason); 1224 } 1225 1226 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) 1227 { 1228 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; 1229 char arg[20]; 1230 int ret, i; 1231 1232 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") || 1233 cpu_mitigations_off()) 1234 return SPECTRE_V2_CMD_NONE; 1235 1236 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); 1237 if (ret < 0) 1238 return SPECTRE_V2_CMD_AUTO; 1239 1240 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { 1241 if (!match_option(arg, ret, mitigation_options[i].option)) 1242 continue; 1243 cmd = mitigation_options[i].cmd; 1244 break; 1245 } 1246 1247 if (i >= ARRAY_SIZE(mitigation_options)) { 1248 pr_err("unknown option (%s). Switching to AUTO select\n", arg); 1249 return SPECTRE_V2_CMD_AUTO; 1250 } 1251 1252 if ((cmd == SPECTRE_V2_CMD_RETPOLINE || 1253 cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || 1254 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || 1255 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 1256 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 1257 !IS_ENABLED(CONFIG_RETPOLINE)) { 1258 pr_err("%s selected but not compiled in. Switching to AUTO select\n", 1259 mitigation_options[i].option); 1260 return SPECTRE_V2_CMD_AUTO; 1261 } 1262 1263 if ((cmd == SPECTRE_V2_CMD_EIBRS || 1264 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 1265 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 1266 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 1267 pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n", 1268 mitigation_options[i].option); 1269 return SPECTRE_V2_CMD_AUTO; 1270 } 1271 1272 if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || 1273 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) && 1274 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { 1275 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n", 1276 mitigation_options[i].option); 1277 return SPECTRE_V2_CMD_AUTO; 1278 } 1279 1280 if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) { 1281 pr_err("%s selected but not compiled in. Switching to AUTO select\n", 1282 mitigation_options[i].option); 1283 return SPECTRE_V2_CMD_AUTO; 1284 } 1285 1286 if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { 1287 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", 1288 mitigation_options[i].option); 1289 return SPECTRE_V2_CMD_AUTO; 1290 } 1291 1292 if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { 1293 pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n", 1294 mitigation_options[i].option); 1295 return SPECTRE_V2_CMD_AUTO; 1296 } 1297 1298 if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) { 1299 pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n", 1300 mitigation_options[i].option); 1301 return SPECTRE_V2_CMD_AUTO; 1302 } 1303 1304 spec_v2_print_cond(mitigation_options[i].option, 1305 mitigation_options[i].secure); 1306 return cmd; 1307 } 1308 1309 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) 1310 { 1311 if (!IS_ENABLED(CONFIG_RETPOLINE)) { 1312 pr_err("Kernel not compiled with retpoline; no mitigation available!"); 1313 return SPECTRE_V2_NONE; 1314 } 1315 1316 return SPECTRE_V2_RETPOLINE; 1317 } 1318 1319 /* Disable in-kernel use of non-RSB RET predictors */ 1320 static void __init spec_ctrl_disable_kernel_rrsba(void) 1321 { 1322 u64 ia32_cap; 1323 1324 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) 1325 return; 1326 1327 ia32_cap = x86_read_arch_cap_msr(); 1328 1329 if (ia32_cap & ARCH_CAP_RRSBA) { 1330 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; 1331 write_spec_ctrl_current(x86_spec_ctrl_base, true); 1332 } 1333 } 1334 1335 static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode) 1336 { 1337 /* 1338 * Similar to context switches, there are two types of RSB attacks 1339 * after VM exit: 1340 * 1341 * 1) RSB underflow 1342 * 1343 * 2) Poisoned RSB entry 1344 * 1345 * When retpoline is enabled, both are mitigated by filling/clearing 1346 * the RSB. 1347 * 1348 * When IBRS is enabled, while #1 would be mitigated by the IBRS branch 1349 * prediction isolation protections, RSB still needs to be cleared 1350 * because of #2. Note that SMEP provides no protection here, unlike 1351 * user-space-poisoned RSB entries. 1352 * 1353 * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB 1354 * bug is present then a LITE version of RSB protection is required, 1355 * just a single call needs to retire before a RET is executed. 1356 */ 1357 switch (mode) { 1358 case SPECTRE_V2_NONE: 1359 return; 1360 1361 case SPECTRE_V2_EIBRS_LFENCE: 1362 case SPECTRE_V2_EIBRS: 1363 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { 1364 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); 1365 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); 1366 } 1367 return; 1368 1369 case SPECTRE_V2_EIBRS_RETPOLINE: 1370 case SPECTRE_V2_RETPOLINE: 1371 case SPECTRE_V2_LFENCE: 1372 case SPECTRE_V2_IBRS: 1373 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); 1374 pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n"); 1375 return; 1376 } 1377 1378 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit"); 1379 dump_stack(); 1380 } 1381 1382 static void __init spectre_v2_select_mitigation(void) 1383 { 1384 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); 1385 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE; 1386 1387 /* 1388 * If the CPU is not affected and the command line mode is NONE or AUTO 1389 * then nothing to do. 1390 */ 1391 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && 1392 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO)) 1393 return; 1394 1395 switch (cmd) { 1396 case SPECTRE_V2_CMD_NONE: 1397 return; 1398 1399 case SPECTRE_V2_CMD_FORCE: 1400 case SPECTRE_V2_CMD_AUTO: 1401 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 1402 mode = SPECTRE_V2_EIBRS; 1403 break; 1404 } 1405 1406 if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) && 1407 boot_cpu_has_bug(X86_BUG_RETBLEED) && 1408 retbleed_cmd != RETBLEED_CMD_OFF && 1409 boot_cpu_has(X86_FEATURE_IBRS) && 1410 boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 1411 mode = SPECTRE_V2_IBRS; 1412 break; 1413 } 1414 1415 mode = spectre_v2_select_retpoline(); 1416 break; 1417 1418 case SPECTRE_V2_CMD_RETPOLINE_LFENCE: 1419 pr_err(SPECTRE_V2_LFENCE_MSG); 1420 mode = SPECTRE_V2_LFENCE; 1421 break; 1422 1423 case SPECTRE_V2_CMD_RETPOLINE_GENERIC: 1424 mode = SPECTRE_V2_RETPOLINE; 1425 break; 1426 1427 case SPECTRE_V2_CMD_RETPOLINE: 1428 mode = spectre_v2_select_retpoline(); 1429 break; 1430 1431 case SPECTRE_V2_CMD_IBRS: 1432 mode = SPECTRE_V2_IBRS; 1433 break; 1434 1435 case SPECTRE_V2_CMD_EIBRS: 1436 mode = SPECTRE_V2_EIBRS; 1437 break; 1438 1439 case SPECTRE_V2_CMD_EIBRS_LFENCE: 1440 mode = SPECTRE_V2_EIBRS_LFENCE; 1441 break; 1442 1443 case SPECTRE_V2_CMD_EIBRS_RETPOLINE: 1444 mode = SPECTRE_V2_EIBRS_RETPOLINE; 1445 break; 1446 } 1447 1448 if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) 1449 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); 1450 1451 if (spectre_v2_in_ibrs_mode(mode)) { 1452 x86_spec_ctrl_base |= SPEC_CTRL_IBRS; 1453 write_spec_ctrl_current(x86_spec_ctrl_base, true); 1454 } 1455 1456 switch (mode) { 1457 case SPECTRE_V2_NONE: 1458 case SPECTRE_V2_EIBRS: 1459 break; 1460 1461 case SPECTRE_V2_IBRS: 1462 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); 1463 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) 1464 pr_warn(SPECTRE_V2_IBRS_PERF_MSG); 1465 break; 1466 1467 case SPECTRE_V2_LFENCE: 1468 case SPECTRE_V2_EIBRS_LFENCE: 1469 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); 1470 fallthrough; 1471 1472 case SPECTRE_V2_RETPOLINE: 1473 case SPECTRE_V2_EIBRS_RETPOLINE: 1474 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 1475 break; 1476 } 1477 1478 /* 1479 * Disable alternate RSB predictions in kernel when indirect CALLs and 1480 * JMPs gets protection against BHI and Intramode-BTI, but RET 1481 * prediction from a non-RSB predictor is still a risk. 1482 */ 1483 if (mode == SPECTRE_V2_EIBRS_LFENCE || 1484 mode == SPECTRE_V2_EIBRS_RETPOLINE || 1485 mode == SPECTRE_V2_RETPOLINE) 1486 spec_ctrl_disable_kernel_rrsba(); 1487 1488 spectre_v2_enabled = mode; 1489 pr_info("%s\n", spectre_v2_strings[mode]); 1490 1491 /* 1492 * If Spectre v2 protection has been enabled, fill the RSB during a 1493 * context switch. In general there are two types of RSB attacks 1494 * across context switches, for which the CALLs/RETs may be unbalanced. 1495 * 1496 * 1) RSB underflow 1497 * 1498 * Some Intel parts have "bottomless RSB". When the RSB is empty, 1499 * speculated return targets may come from the branch predictor, 1500 * which could have a user-poisoned BTB or BHB entry. 1501 * 1502 * AMD has it even worse: *all* returns are speculated from the BTB, 1503 * regardless of the state of the RSB. 1504 * 1505 * When IBRS or eIBRS is enabled, the "user -> kernel" attack 1506 * scenario is mitigated by the IBRS branch prediction isolation 1507 * properties, so the RSB buffer filling wouldn't be necessary to 1508 * protect against this type of attack. 1509 * 1510 * The "user -> user" attack scenario is mitigated by RSB filling. 1511 * 1512 * 2) Poisoned RSB entry 1513 * 1514 * If the 'next' in-kernel return stack is shorter than 'prev', 1515 * 'next' could be tricked into speculating with a user-poisoned RSB 1516 * entry. 1517 * 1518 * The "user -> kernel" attack scenario is mitigated by SMEP and 1519 * eIBRS. 1520 * 1521 * The "user -> user" scenario, also known as SpectreBHB, requires 1522 * RSB clearing. 1523 * 1524 * So to mitigate all cases, unconditionally fill RSB on context 1525 * switches. 1526 * 1527 * FIXME: Is this pointless for retbleed-affected AMD? 1528 */ 1529 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); 1530 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); 1531 1532 spectre_v2_determine_rsb_fill_type_at_vmexit(mode); 1533 1534 /* 1535 * Retpoline protects the kernel, but doesn't protect firmware. IBRS 1536 * and Enhanced IBRS protect firmware too, so enable IBRS around 1537 * firmware calls only when IBRS / Enhanced IBRS aren't otherwise 1538 * enabled. 1539 * 1540 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because 1541 * the user might select retpoline on the kernel command line and if 1542 * the CPU supports Enhanced IBRS, kernel might un-intentionally not 1543 * enable IBRS around firmware calls. 1544 */ 1545 if (boot_cpu_has_bug(X86_BUG_RETBLEED) && 1546 boot_cpu_has(X86_FEATURE_IBPB) && 1547 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 1548 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { 1549 1550 if (retbleed_cmd != RETBLEED_CMD_IBPB) { 1551 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); 1552 pr_info("Enabling Speculation Barrier for firmware calls\n"); 1553 } 1554 1555 } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { 1556 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); 1557 pr_info("Enabling Restricted Speculation for firmware calls\n"); 1558 } 1559 1560 /* Set up IBPB and STIBP depending on the general spectre V2 command */ 1561 spectre_v2_cmd = cmd; 1562 } 1563 1564 static void update_stibp_msr(void * __unused) 1565 { 1566 u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); 1567 write_spec_ctrl_current(val, true); 1568 } 1569 1570 /* Update x86_spec_ctrl_base in case SMT state changed. */ 1571 static void update_stibp_strict(void) 1572 { 1573 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; 1574 1575 if (sched_smt_active()) 1576 mask |= SPEC_CTRL_STIBP; 1577 1578 if (mask == x86_spec_ctrl_base) 1579 return; 1580 1581 pr_info("Update user space SMT mitigation: STIBP %s\n", 1582 mask & SPEC_CTRL_STIBP ? "always-on" : "off"); 1583 x86_spec_ctrl_base = mask; 1584 on_each_cpu(update_stibp_msr, NULL, 1); 1585 } 1586 1587 /* Update the static key controlling the evaluation of TIF_SPEC_IB */ 1588 static void update_indir_branch_cond(void) 1589 { 1590 if (sched_smt_active()) 1591 static_branch_enable(&switch_to_cond_stibp); 1592 else 1593 static_branch_disable(&switch_to_cond_stibp); 1594 } 1595 1596 #undef pr_fmt 1597 #define pr_fmt(fmt) fmt 1598 1599 /* Update the static key controlling the MDS CPU buffer clear in idle */ 1600 static void update_mds_branch_idle(void) 1601 { 1602 u64 ia32_cap = x86_read_arch_cap_msr(); 1603 1604 /* 1605 * Enable the idle clearing if SMT is active on CPUs which are 1606 * affected only by MSBDS and not any other MDS variant. 1607 * 1608 * The other variants cannot be mitigated when SMT is enabled, so 1609 * clearing the buffers on idle just to prevent the Store Buffer 1610 * repartitioning leak would be a window dressing exercise. 1611 */ 1612 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) 1613 return; 1614 1615 if (sched_smt_active()) { 1616 static_branch_enable(&mds_idle_clear); 1617 } else if (mmio_mitigation == MMIO_MITIGATION_OFF || 1618 (ia32_cap & ARCH_CAP_FBSDP_NO)) { 1619 static_branch_disable(&mds_idle_clear); 1620 } 1621 } 1622 1623 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" 1624 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" 1625 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" 1626 1627 void cpu_bugs_smt_update(void) 1628 { 1629 mutex_lock(&spec_ctrl_mutex); 1630 1631 if (sched_smt_active() && unprivileged_ebpf_enabled() && 1632 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) 1633 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); 1634 1635 switch (spectre_v2_user_stibp) { 1636 case SPECTRE_V2_USER_NONE: 1637 break; 1638 case SPECTRE_V2_USER_STRICT: 1639 case SPECTRE_V2_USER_STRICT_PREFERRED: 1640 update_stibp_strict(); 1641 break; 1642 case SPECTRE_V2_USER_PRCTL: 1643 case SPECTRE_V2_USER_SECCOMP: 1644 update_indir_branch_cond(); 1645 break; 1646 } 1647 1648 switch (mds_mitigation) { 1649 case MDS_MITIGATION_FULL: 1650 case MDS_MITIGATION_VMWERV: 1651 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) 1652 pr_warn_once(MDS_MSG_SMT); 1653 update_mds_branch_idle(); 1654 break; 1655 case MDS_MITIGATION_OFF: 1656 break; 1657 } 1658 1659 switch (taa_mitigation) { 1660 case TAA_MITIGATION_VERW: 1661 case TAA_MITIGATION_UCODE_NEEDED: 1662 if (sched_smt_active()) 1663 pr_warn_once(TAA_MSG_SMT); 1664 break; 1665 case TAA_MITIGATION_TSX_DISABLED: 1666 case TAA_MITIGATION_OFF: 1667 break; 1668 } 1669 1670 switch (mmio_mitigation) { 1671 case MMIO_MITIGATION_VERW: 1672 case MMIO_MITIGATION_UCODE_NEEDED: 1673 if (sched_smt_active()) 1674 pr_warn_once(MMIO_MSG_SMT); 1675 break; 1676 case MMIO_MITIGATION_OFF: 1677 break; 1678 } 1679 1680 mutex_unlock(&spec_ctrl_mutex); 1681 } 1682 1683 #undef pr_fmt 1684 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt 1685 1686 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE; 1687 1688 /* The kernel command line selection */ 1689 enum ssb_mitigation_cmd { 1690 SPEC_STORE_BYPASS_CMD_NONE, 1691 SPEC_STORE_BYPASS_CMD_AUTO, 1692 SPEC_STORE_BYPASS_CMD_ON, 1693 SPEC_STORE_BYPASS_CMD_PRCTL, 1694 SPEC_STORE_BYPASS_CMD_SECCOMP, 1695 }; 1696 1697 static const char * const ssb_strings[] = { 1698 [SPEC_STORE_BYPASS_NONE] = "Vulnerable", 1699 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", 1700 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", 1701 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", 1702 }; 1703 1704 static const struct { 1705 const char *option; 1706 enum ssb_mitigation_cmd cmd; 1707 } ssb_mitigation_options[] __initconst = { 1708 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ 1709 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ 1710 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ 1711 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ 1712 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */ 1713 }; 1714 1715 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) 1716 { 1717 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO; 1718 char arg[20]; 1719 int ret, i; 1720 1721 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") || 1722 cpu_mitigations_off()) { 1723 return SPEC_STORE_BYPASS_CMD_NONE; 1724 } else { 1725 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", 1726 arg, sizeof(arg)); 1727 if (ret < 0) 1728 return SPEC_STORE_BYPASS_CMD_AUTO; 1729 1730 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { 1731 if (!match_option(arg, ret, ssb_mitigation_options[i].option)) 1732 continue; 1733 1734 cmd = ssb_mitigation_options[i].cmd; 1735 break; 1736 } 1737 1738 if (i >= ARRAY_SIZE(ssb_mitigation_options)) { 1739 pr_err("unknown option (%s). Switching to AUTO select\n", arg); 1740 return SPEC_STORE_BYPASS_CMD_AUTO; 1741 } 1742 } 1743 1744 return cmd; 1745 } 1746 1747 static enum ssb_mitigation __init __ssb_select_mitigation(void) 1748 { 1749 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; 1750 enum ssb_mitigation_cmd cmd; 1751 1752 if (!boot_cpu_has(X86_FEATURE_SSBD)) 1753 return mode; 1754 1755 cmd = ssb_parse_cmdline(); 1756 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && 1757 (cmd == SPEC_STORE_BYPASS_CMD_NONE || 1758 cmd == SPEC_STORE_BYPASS_CMD_AUTO)) 1759 return mode; 1760 1761 switch (cmd) { 1762 case SPEC_STORE_BYPASS_CMD_SECCOMP: 1763 /* 1764 * Choose prctl+seccomp as the default mode if seccomp is 1765 * enabled. 1766 */ 1767 if (IS_ENABLED(CONFIG_SECCOMP)) 1768 mode = SPEC_STORE_BYPASS_SECCOMP; 1769 else 1770 mode = SPEC_STORE_BYPASS_PRCTL; 1771 break; 1772 case SPEC_STORE_BYPASS_CMD_ON: 1773 mode = SPEC_STORE_BYPASS_DISABLE; 1774 break; 1775 case SPEC_STORE_BYPASS_CMD_AUTO: 1776 case SPEC_STORE_BYPASS_CMD_PRCTL: 1777 mode = SPEC_STORE_BYPASS_PRCTL; 1778 break; 1779 case SPEC_STORE_BYPASS_CMD_NONE: 1780 break; 1781 } 1782 1783 /* 1784 * We have three CPU feature flags that are in play here: 1785 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. 1786 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass 1787 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation 1788 */ 1789 if (mode == SPEC_STORE_BYPASS_DISABLE) { 1790 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); 1791 /* 1792 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may 1793 * use a completely different MSR and bit dependent on family. 1794 */ 1795 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && 1796 !static_cpu_has(X86_FEATURE_AMD_SSBD)) { 1797 x86_amd_ssb_disable(); 1798 } else { 1799 x86_spec_ctrl_base |= SPEC_CTRL_SSBD; 1800 write_spec_ctrl_current(x86_spec_ctrl_base, true); 1801 } 1802 } 1803 1804 return mode; 1805 } 1806 1807 static void ssb_select_mitigation(void) 1808 { 1809 ssb_mode = __ssb_select_mitigation(); 1810 1811 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 1812 pr_info("%s\n", ssb_strings[ssb_mode]); 1813 } 1814 1815 #undef pr_fmt 1816 #define pr_fmt(fmt) "Speculation prctl: " fmt 1817 1818 static void task_update_spec_tif(struct task_struct *tsk) 1819 { 1820 /* Force the update of the real TIF bits */ 1821 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); 1822 1823 /* 1824 * Immediately update the speculation control MSRs for the current 1825 * task, but for a non-current task delay setting the CPU 1826 * mitigation until it is scheduled next. 1827 * 1828 * This can only happen for SECCOMP mitigation. For PRCTL it's 1829 * always the current task. 1830 */ 1831 if (tsk == current) 1832 speculation_ctrl_update_current(); 1833 } 1834 1835 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl) 1836 { 1837 1838 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) 1839 return -EPERM; 1840 1841 switch (ctrl) { 1842 case PR_SPEC_ENABLE: 1843 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); 1844 return 0; 1845 case PR_SPEC_DISABLE: 1846 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); 1847 return 0; 1848 default: 1849 return -ERANGE; 1850 } 1851 } 1852 1853 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) 1854 { 1855 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && 1856 ssb_mode != SPEC_STORE_BYPASS_SECCOMP) 1857 return -ENXIO; 1858 1859 switch (ctrl) { 1860 case PR_SPEC_ENABLE: 1861 /* If speculation is force disabled, enable is not allowed */ 1862 if (task_spec_ssb_force_disable(task)) 1863 return -EPERM; 1864 task_clear_spec_ssb_disable(task); 1865 task_clear_spec_ssb_noexec(task); 1866 task_update_spec_tif(task); 1867 break; 1868 case PR_SPEC_DISABLE: 1869 task_set_spec_ssb_disable(task); 1870 task_clear_spec_ssb_noexec(task); 1871 task_update_spec_tif(task); 1872 break; 1873 case PR_SPEC_FORCE_DISABLE: 1874 task_set_spec_ssb_disable(task); 1875 task_set_spec_ssb_force_disable(task); 1876 task_clear_spec_ssb_noexec(task); 1877 task_update_spec_tif(task); 1878 break; 1879 case PR_SPEC_DISABLE_NOEXEC: 1880 if (task_spec_ssb_force_disable(task)) 1881 return -EPERM; 1882 task_set_spec_ssb_disable(task); 1883 task_set_spec_ssb_noexec(task); 1884 task_update_spec_tif(task); 1885 break; 1886 default: 1887 return -ERANGE; 1888 } 1889 return 0; 1890 } 1891 1892 static bool is_spec_ib_user_controlled(void) 1893 { 1894 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || 1895 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || 1896 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || 1897 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP; 1898 } 1899 1900 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) 1901 { 1902 switch (ctrl) { 1903 case PR_SPEC_ENABLE: 1904 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 1905 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 1906 return 0; 1907 1908 /* 1909 * With strict mode for both IBPB and STIBP, the instruction 1910 * code paths avoid checking this task flag and instead, 1911 * unconditionally run the instruction. However, STIBP and IBPB 1912 * are independent and either can be set to conditionally 1913 * enabled regardless of the mode of the other. 1914 * 1915 * If either is set to conditional, allow the task flag to be 1916 * updated, unless it was force-disabled by a previous prctl 1917 * call. Currently, this is possible on an AMD CPU which has the 1918 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the 1919 * kernel is booted with 'spectre_v2_user=seccomp', then 1920 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and 1921 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED. 1922 */ 1923 if (!is_spec_ib_user_controlled() || 1924 task_spec_ib_force_disable(task)) 1925 return -EPERM; 1926 1927 task_clear_spec_ib_disable(task); 1928 task_update_spec_tif(task); 1929 break; 1930 case PR_SPEC_DISABLE: 1931 case PR_SPEC_FORCE_DISABLE: 1932 /* 1933 * Indirect branch speculation is always allowed when 1934 * mitigation is force disabled. 1935 */ 1936 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 1937 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 1938 return -EPERM; 1939 1940 if (!is_spec_ib_user_controlled()) 1941 return 0; 1942 1943 task_set_spec_ib_disable(task); 1944 if (ctrl == PR_SPEC_FORCE_DISABLE) 1945 task_set_spec_ib_force_disable(task); 1946 task_update_spec_tif(task); 1947 break; 1948 default: 1949 return -ERANGE; 1950 } 1951 return 0; 1952 } 1953 1954 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, 1955 unsigned long ctrl) 1956 { 1957 switch (which) { 1958 case PR_SPEC_STORE_BYPASS: 1959 return ssb_prctl_set(task, ctrl); 1960 case PR_SPEC_INDIRECT_BRANCH: 1961 return ib_prctl_set(task, ctrl); 1962 case PR_SPEC_L1D_FLUSH: 1963 return l1d_flush_prctl_set(task, ctrl); 1964 default: 1965 return -ENODEV; 1966 } 1967 } 1968 1969 #ifdef CONFIG_SECCOMP 1970 void arch_seccomp_spec_mitigate(struct task_struct *task) 1971 { 1972 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) 1973 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); 1974 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || 1975 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) 1976 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); 1977 } 1978 #endif 1979 1980 static int l1d_flush_prctl_get(struct task_struct *task) 1981 { 1982 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) 1983 return PR_SPEC_FORCE_DISABLE; 1984 1985 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH)) 1986 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 1987 else 1988 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 1989 } 1990 1991 static int ssb_prctl_get(struct task_struct *task) 1992 { 1993 switch (ssb_mode) { 1994 case SPEC_STORE_BYPASS_DISABLE: 1995 return PR_SPEC_DISABLE; 1996 case SPEC_STORE_BYPASS_SECCOMP: 1997 case SPEC_STORE_BYPASS_PRCTL: 1998 if (task_spec_ssb_force_disable(task)) 1999 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 2000 if (task_spec_ssb_noexec(task)) 2001 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; 2002 if (task_spec_ssb_disable(task)) 2003 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2004 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2005 default: 2006 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 2007 return PR_SPEC_ENABLE; 2008 return PR_SPEC_NOT_AFFECTED; 2009 } 2010 } 2011 2012 static int ib_prctl_get(struct task_struct *task) 2013 { 2014 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 2015 return PR_SPEC_NOT_AFFECTED; 2016 2017 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 2018 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 2019 return PR_SPEC_ENABLE; 2020 else if (is_spec_ib_user_controlled()) { 2021 if (task_spec_ib_force_disable(task)) 2022 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 2023 if (task_spec_ib_disable(task)) 2024 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2025 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2026 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || 2027 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || 2028 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) 2029 return PR_SPEC_DISABLE; 2030 else 2031 return PR_SPEC_NOT_AFFECTED; 2032 } 2033 2034 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 2035 { 2036 switch (which) { 2037 case PR_SPEC_STORE_BYPASS: 2038 return ssb_prctl_get(task); 2039 case PR_SPEC_INDIRECT_BRANCH: 2040 return ib_prctl_get(task); 2041 case PR_SPEC_L1D_FLUSH: 2042 return l1d_flush_prctl_get(task); 2043 default: 2044 return -ENODEV; 2045 } 2046 } 2047 2048 void x86_spec_ctrl_setup_ap(void) 2049 { 2050 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) 2051 write_spec_ctrl_current(x86_spec_ctrl_base, true); 2052 2053 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) 2054 x86_amd_ssb_disable(); 2055 } 2056 2057 bool itlb_multihit_kvm_mitigation; 2058 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); 2059 2060 #undef pr_fmt 2061 #define pr_fmt(fmt) "L1TF: " fmt 2062 2063 /* Default mitigation for L1TF-affected CPUs */ 2064 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH; 2065 #if IS_ENABLED(CONFIG_KVM_INTEL) 2066 EXPORT_SYMBOL_GPL(l1tf_mitigation); 2067 #endif 2068 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; 2069 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); 2070 2071 /* 2072 * These CPUs all support 44bits physical address space internally in the 2073 * cache but CPUID can report a smaller number of physical address bits. 2074 * 2075 * The L1TF mitigation uses the top most address bit for the inversion of 2076 * non present PTEs. When the installed memory reaches into the top most 2077 * address bit due to memory holes, which has been observed on machines 2078 * which report 36bits physical address bits and have 32G RAM installed, 2079 * then the mitigation range check in l1tf_select_mitigation() triggers. 2080 * This is a false positive because the mitigation is still possible due to 2081 * the fact that the cache uses 44bit internally. Use the cache bits 2082 * instead of the reported physical bits and adjust them on the affected 2083 * machines to 44bit if the reported bits are less than 44. 2084 */ 2085 static void override_cache_bits(struct cpuinfo_x86 *c) 2086 { 2087 if (c->x86 != 6) 2088 return; 2089 2090 switch (c->x86_model) { 2091 case INTEL_FAM6_NEHALEM: 2092 case INTEL_FAM6_WESTMERE: 2093 case INTEL_FAM6_SANDYBRIDGE: 2094 case INTEL_FAM6_IVYBRIDGE: 2095 case INTEL_FAM6_HASWELL: 2096 case INTEL_FAM6_HASWELL_L: 2097 case INTEL_FAM6_HASWELL_G: 2098 case INTEL_FAM6_BROADWELL: 2099 case INTEL_FAM6_BROADWELL_G: 2100 case INTEL_FAM6_SKYLAKE_L: 2101 case INTEL_FAM6_SKYLAKE: 2102 case INTEL_FAM6_KABYLAKE_L: 2103 case INTEL_FAM6_KABYLAKE: 2104 if (c->x86_cache_bits < 44) 2105 c->x86_cache_bits = 44; 2106 break; 2107 } 2108 } 2109 2110 static void __init l1tf_select_mitigation(void) 2111 { 2112 u64 half_pa; 2113 2114 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 2115 return; 2116 2117 if (cpu_mitigations_off()) 2118 l1tf_mitigation = L1TF_MITIGATION_OFF; 2119 else if (cpu_mitigations_auto_nosmt()) 2120 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 2121 2122 override_cache_bits(&boot_cpu_data); 2123 2124 switch (l1tf_mitigation) { 2125 case L1TF_MITIGATION_OFF: 2126 case L1TF_MITIGATION_FLUSH_NOWARN: 2127 case L1TF_MITIGATION_FLUSH: 2128 break; 2129 case L1TF_MITIGATION_FLUSH_NOSMT: 2130 case L1TF_MITIGATION_FULL: 2131 cpu_smt_disable(false); 2132 break; 2133 case L1TF_MITIGATION_FULL_FORCE: 2134 cpu_smt_disable(true); 2135 break; 2136 } 2137 2138 #if CONFIG_PGTABLE_LEVELS == 2 2139 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); 2140 return; 2141 #endif 2142 2143 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; 2144 if (l1tf_mitigation != L1TF_MITIGATION_OFF && 2145 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { 2146 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); 2147 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", 2148 half_pa); 2149 pr_info("However, doing so will make a part of your RAM unusable.\n"); 2150 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n"); 2151 return; 2152 } 2153 2154 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); 2155 } 2156 2157 static int __init l1tf_cmdline(char *str) 2158 { 2159 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 2160 return 0; 2161 2162 if (!str) 2163 return -EINVAL; 2164 2165 if (!strcmp(str, "off")) 2166 l1tf_mitigation = L1TF_MITIGATION_OFF; 2167 else if (!strcmp(str, "flush,nowarn")) 2168 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN; 2169 else if (!strcmp(str, "flush")) 2170 l1tf_mitigation = L1TF_MITIGATION_FLUSH; 2171 else if (!strcmp(str, "flush,nosmt")) 2172 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 2173 else if (!strcmp(str, "full")) 2174 l1tf_mitigation = L1TF_MITIGATION_FULL; 2175 else if (!strcmp(str, "full,force")) 2176 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE; 2177 2178 return 0; 2179 } 2180 early_param("l1tf", l1tf_cmdline); 2181 2182 #undef pr_fmt 2183 #define pr_fmt(fmt) fmt 2184 2185 #ifdef CONFIG_SYSFS 2186 2187 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" 2188 2189 #if IS_ENABLED(CONFIG_KVM_INTEL) 2190 static const char * const l1tf_vmx_states[] = { 2191 [VMENTER_L1D_FLUSH_AUTO] = "auto", 2192 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", 2193 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", 2194 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", 2195 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", 2196 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" 2197 }; 2198 2199 static ssize_t l1tf_show_state(char *buf) 2200 { 2201 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) 2202 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); 2203 2204 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || 2205 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && 2206 sched_smt_active())) { 2207 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, 2208 l1tf_vmx_states[l1tf_vmx_mitigation]); 2209 } 2210 2211 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, 2212 l1tf_vmx_states[l1tf_vmx_mitigation], 2213 sched_smt_active() ? "vulnerable" : "disabled"); 2214 } 2215 2216 static ssize_t itlb_multihit_show_state(char *buf) 2217 { 2218 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || 2219 !boot_cpu_has(X86_FEATURE_VMX)) 2220 return sprintf(buf, "KVM: Mitigation: VMX unsupported\n"); 2221 else if (!(cr4_read_shadow() & X86_CR4_VMXE)) 2222 return sprintf(buf, "KVM: Mitigation: VMX disabled\n"); 2223 else if (itlb_multihit_kvm_mitigation) 2224 return sprintf(buf, "KVM: Mitigation: Split huge pages\n"); 2225 else 2226 return sprintf(buf, "KVM: Vulnerable\n"); 2227 } 2228 #else 2229 static ssize_t l1tf_show_state(char *buf) 2230 { 2231 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); 2232 } 2233 2234 static ssize_t itlb_multihit_show_state(char *buf) 2235 { 2236 return sprintf(buf, "Processor vulnerable\n"); 2237 } 2238 #endif 2239 2240 static ssize_t mds_show_state(char *buf) 2241 { 2242 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 2243 return sprintf(buf, "%s; SMT Host state unknown\n", 2244 mds_strings[mds_mitigation]); 2245 } 2246 2247 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { 2248 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], 2249 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : 2250 sched_smt_active() ? "mitigated" : "disabled")); 2251 } 2252 2253 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], 2254 sched_smt_active() ? "vulnerable" : "disabled"); 2255 } 2256 2257 static ssize_t tsx_async_abort_show_state(char *buf) 2258 { 2259 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || 2260 (taa_mitigation == TAA_MITIGATION_OFF)) 2261 return sprintf(buf, "%s\n", taa_strings[taa_mitigation]); 2262 2263 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 2264 return sprintf(buf, "%s; SMT Host state unknown\n", 2265 taa_strings[taa_mitigation]); 2266 } 2267 2268 return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], 2269 sched_smt_active() ? "vulnerable" : "disabled"); 2270 } 2271 2272 static ssize_t mmio_stale_data_show_state(char *buf) 2273 { 2274 if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) 2275 return sysfs_emit(buf, "Unknown: No mitigations\n"); 2276 2277 if (mmio_mitigation == MMIO_MITIGATION_OFF) 2278 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); 2279 2280 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 2281 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 2282 mmio_strings[mmio_mitigation]); 2283 } 2284 2285 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], 2286 sched_smt_active() ? "vulnerable" : "disabled"); 2287 } 2288 2289 static char *stibp_state(void) 2290 { 2291 if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) 2292 return ""; 2293 2294 switch (spectre_v2_user_stibp) { 2295 case SPECTRE_V2_USER_NONE: 2296 return ", STIBP: disabled"; 2297 case SPECTRE_V2_USER_STRICT: 2298 return ", STIBP: forced"; 2299 case SPECTRE_V2_USER_STRICT_PREFERRED: 2300 return ", STIBP: always-on"; 2301 case SPECTRE_V2_USER_PRCTL: 2302 case SPECTRE_V2_USER_SECCOMP: 2303 if (static_key_enabled(&switch_to_cond_stibp)) 2304 return ", STIBP: conditional"; 2305 } 2306 return ""; 2307 } 2308 2309 static char *ibpb_state(void) 2310 { 2311 if (boot_cpu_has(X86_FEATURE_IBPB)) { 2312 if (static_key_enabled(&switch_mm_always_ibpb)) 2313 return ", IBPB: always-on"; 2314 if (static_key_enabled(&switch_mm_cond_ibpb)) 2315 return ", IBPB: conditional"; 2316 return ", IBPB: disabled"; 2317 } 2318 return ""; 2319 } 2320 2321 static char *pbrsb_eibrs_state(void) 2322 { 2323 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { 2324 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || 2325 boot_cpu_has(X86_FEATURE_RSB_VMEXIT)) 2326 return ", PBRSB-eIBRS: SW sequence"; 2327 else 2328 return ", PBRSB-eIBRS: Vulnerable"; 2329 } else { 2330 return ", PBRSB-eIBRS: Not affected"; 2331 } 2332 } 2333 2334 static ssize_t spectre_v2_show_state(char *buf) 2335 { 2336 if (spectre_v2_enabled == SPECTRE_V2_LFENCE) 2337 return sprintf(buf, "Vulnerable: LFENCE\n"); 2338 2339 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) 2340 return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); 2341 2342 if (sched_smt_active() && unprivileged_ebpf_enabled() && 2343 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) 2344 return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); 2345 2346 return sprintf(buf, "%s%s%s%s%s%s%s\n", 2347 spectre_v2_strings[spectre_v2_enabled], 2348 ibpb_state(), 2349 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", 2350 stibp_state(), 2351 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", 2352 pbrsb_eibrs_state(), 2353 spectre_v2_module_string()); 2354 } 2355 2356 static ssize_t srbds_show_state(char *buf) 2357 { 2358 return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]); 2359 } 2360 2361 static ssize_t retbleed_show_state(char *buf) 2362 { 2363 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || 2364 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { 2365 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 2366 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 2367 return sprintf(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); 2368 2369 return sprintf(buf, "%s; SMT %s\n", 2370 retbleed_strings[retbleed_mitigation], 2371 !sched_smt_active() ? "disabled" : 2372 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || 2373 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? 2374 "enabled with STIBP protection" : "vulnerable"); 2375 } 2376 2377 return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]); 2378 } 2379 2380 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, 2381 char *buf, unsigned int bug) 2382 { 2383 if (!boot_cpu_has_bug(bug)) 2384 return sprintf(buf, "Not affected\n"); 2385 2386 switch (bug) { 2387 case X86_BUG_CPU_MELTDOWN: 2388 if (boot_cpu_has(X86_FEATURE_PTI)) 2389 return sprintf(buf, "Mitigation: PTI\n"); 2390 2391 if (hypervisor_is_type(X86_HYPER_XEN_PV)) 2392 return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); 2393 2394 break; 2395 2396 case X86_BUG_SPECTRE_V1: 2397 return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); 2398 2399 case X86_BUG_SPECTRE_V2: 2400 return spectre_v2_show_state(buf); 2401 2402 case X86_BUG_SPEC_STORE_BYPASS: 2403 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); 2404 2405 case X86_BUG_L1TF: 2406 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) 2407 return l1tf_show_state(buf); 2408 break; 2409 2410 case X86_BUG_MDS: 2411 return mds_show_state(buf); 2412 2413 case X86_BUG_TAA: 2414 return tsx_async_abort_show_state(buf); 2415 2416 case X86_BUG_ITLB_MULTIHIT: 2417 return itlb_multihit_show_state(buf); 2418 2419 case X86_BUG_SRBDS: 2420 return srbds_show_state(buf); 2421 2422 case X86_BUG_MMIO_STALE_DATA: 2423 case X86_BUG_MMIO_UNKNOWN: 2424 return mmio_stale_data_show_state(buf); 2425 2426 case X86_BUG_RETBLEED: 2427 return retbleed_show_state(buf); 2428 2429 default: 2430 break; 2431 } 2432 2433 return sprintf(buf, "Vulnerable\n"); 2434 } 2435 2436 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 2437 { 2438 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); 2439 } 2440 2441 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 2442 { 2443 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); 2444 } 2445 2446 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 2447 { 2448 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); 2449 } 2450 2451 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 2452 { 2453 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); 2454 } 2455 2456 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) 2457 { 2458 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); 2459 } 2460 2461 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf) 2462 { 2463 return cpu_show_common(dev, attr, buf, X86_BUG_MDS); 2464 } 2465 2466 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf) 2467 { 2468 return cpu_show_common(dev, attr, buf, X86_BUG_TAA); 2469 } 2470 2471 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf) 2472 { 2473 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); 2474 } 2475 2476 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) 2477 { 2478 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); 2479 } 2480 2481 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) 2482 { 2483 if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) 2484 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN); 2485 else 2486 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); 2487 } 2488 2489 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) 2490 { 2491 return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); 2492 } 2493 #endif 2494