1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1994 Linus Torvalds 4 * 5 * Cyrix stuff, June 1998 by: 6 * - Rafael R. Reilova (moved everything from head.S), 7 * <rreilova@ececs.uc.edu> 8 * - Channing Corn (tests & fixes), 9 * - Andrew D. Balsa (code cleanup). 10 */ 11 #include <linux/init.h> 12 #include <linux/cpu.h> 13 #include <linux/module.h> 14 #include <linux/nospec.h> 15 #include <linux/prctl.h> 16 #include <linux/sched/smt.h> 17 #include <linux/pgtable.h> 18 #include <linux/bpf.h> 19 20 #include <asm/spec-ctrl.h> 21 #include <asm/cmdline.h> 22 #include <asm/bugs.h> 23 #include <asm/processor.h> 24 #include <asm/processor-flags.h> 25 #include <asm/fpu/api.h> 26 #include <asm/msr.h> 27 #include <asm/vmx.h> 28 #include <asm/paravirt.h> 29 #include <asm/intel-family.h> 30 #include <asm/e820/api.h> 31 #include <asm/hypervisor.h> 32 #include <asm/tlbflush.h> 33 #include <asm/cpu.h> 34 35 #include "cpu.h" 36 37 static void __init spectre_v1_select_mitigation(void); 38 static void __init spectre_v2_select_mitigation(void); 39 static void __init retbleed_select_mitigation(void); 40 static void __init spectre_v2_user_select_mitigation(void); 41 static void __init ssb_select_mitigation(void); 42 static void __init l1tf_select_mitigation(void); 43 static void __init mds_select_mitigation(void); 44 static void __init md_clear_update_mitigation(void); 45 static void __init md_clear_select_mitigation(void); 46 static void __init taa_select_mitigation(void); 47 static void __init mmio_select_mitigation(void); 48 static void __init srbds_select_mitigation(void); 49 static void __init l1d_flush_select_mitigation(void); 50 51 /* The base value of the SPEC_CTRL MSR without task-specific bits set */ 52 u64 x86_spec_ctrl_base; 53 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); 54 55 /* The current value of the SPEC_CTRL MSR with task-specific bits set */ 56 DEFINE_PER_CPU(u64, x86_spec_ctrl_current); 57 EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); 58 59 static DEFINE_MUTEX(spec_ctrl_mutex); 60 61 /* Update SPEC_CTRL MSR and its cached copy unconditionally */ 62 static void update_spec_ctrl(u64 val) 63 { 64 this_cpu_write(x86_spec_ctrl_current, val); 65 wrmsrl(MSR_IA32_SPEC_CTRL, val); 66 } 67 68 /* 69 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ 70 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). 71 */ 72 void update_spec_ctrl_cond(u64 val) 73 { 74 if (this_cpu_read(x86_spec_ctrl_current) == val) 75 return; 76 77 this_cpu_write(x86_spec_ctrl_current, val); 78 79 /* 80 * When KERNEL_IBRS this MSR is written on return-to-user, unless 81 * forced the update can be delayed until that time. 82 */ 83 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) 84 wrmsrl(MSR_IA32_SPEC_CTRL, val); 85 } 86 87 noinstr u64 spec_ctrl_current(void) 88 { 89 return this_cpu_read(x86_spec_ctrl_current); 90 } 91 EXPORT_SYMBOL_GPL(spec_ctrl_current); 92 93 /* 94 * AMD specific MSR info for Speculative Store Bypass control. 95 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). 96 */ 97 u64 __ro_after_init x86_amd_ls_cfg_base; 98 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; 99 100 /* Control conditional STIBP in switch_to() */ 101 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); 102 /* Control conditional IBPB in switch_mm() */ 103 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); 104 /* Control unconditional IBPB in switch_mm() */ 105 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); 106 107 /* Control MDS CPU buffer clear before returning to user space */ 108 DEFINE_STATIC_KEY_FALSE(mds_user_clear); 109 EXPORT_SYMBOL_GPL(mds_user_clear); 110 /* Control MDS CPU buffer clear before idling (halt, mwait) */ 111 DEFINE_STATIC_KEY_FALSE(mds_idle_clear); 112 EXPORT_SYMBOL_GPL(mds_idle_clear); 113 114 /* 115 * Controls whether l1d flush based mitigations are enabled, 116 * based on hw features and admin setting via boot parameter 117 * defaults to false 118 */ 119 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); 120 121 /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */ 122 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); 123 EXPORT_SYMBOL_GPL(mmio_stale_data_clear); 124 125 void __init cpu_select_mitigations(void) 126 { 127 /* 128 * Read the SPEC_CTRL MSR to account for reserved bits which may 129 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD 130 * init code as it is not enumerated and depends on the family. 131 */ 132 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) { 133 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 134 135 /* 136 * Previously running kernel (kexec), may have some controls 137 * turned ON. Clear them and let the mitigations setup below 138 * rediscover them based on configuration. 139 */ 140 x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK; 141 } 142 143 /* Select the proper CPU mitigations before patching alternatives: */ 144 spectre_v1_select_mitigation(); 145 spectre_v2_select_mitigation(); 146 /* 147 * retbleed_select_mitigation() relies on the state set by 148 * spectre_v2_select_mitigation(); specifically it wants to know about 149 * spectre_v2=ibrs. 150 */ 151 retbleed_select_mitigation(); 152 /* 153 * spectre_v2_user_select_mitigation() relies on the state set by 154 * retbleed_select_mitigation(); specifically the STIBP selection is 155 * forced for UNRET or IBPB. 156 */ 157 spectre_v2_user_select_mitigation(); 158 ssb_select_mitigation(); 159 l1tf_select_mitigation(); 160 md_clear_select_mitigation(); 161 srbds_select_mitigation(); 162 l1d_flush_select_mitigation(); 163 } 164 165 /* 166 * NOTE: This function is *only* called for SVM, since Intel uses 167 * MSR_IA32_SPEC_CTRL for SSBD. 168 */ 169 void 170 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest) 171 { 172 u64 guestval, hostval; 173 struct thread_info *ti = current_thread_info(); 174 175 /* 176 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update 177 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. 178 */ 179 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && 180 !static_cpu_has(X86_FEATURE_VIRT_SSBD)) 181 return; 182 183 /* 184 * If the host has SSBD mitigation enabled, force it in the host's 185 * virtual MSR value. If its not permanently enabled, evaluate 186 * current's TIF_SSBD thread flag. 187 */ 188 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) 189 hostval = SPEC_CTRL_SSBD; 190 else 191 hostval = ssbd_tif_to_spec_ctrl(ti->flags); 192 193 /* Sanitize the guest value */ 194 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; 195 196 if (hostval != guestval) { 197 unsigned long tif; 198 199 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : 200 ssbd_spec_ctrl_to_tif(hostval); 201 202 speculation_ctrl_update(tif); 203 } 204 } 205 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); 206 207 static void x86_amd_ssb_disable(void) 208 { 209 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; 210 211 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) 212 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); 213 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) 214 wrmsrl(MSR_AMD64_LS_CFG, msrval); 215 } 216 217 #undef pr_fmt 218 #define pr_fmt(fmt) "MDS: " fmt 219 220 /* Default mitigation for MDS-affected CPUs */ 221 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL; 222 static bool mds_nosmt __ro_after_init = false; 223 224 static const char * const mds_strings[] = { 225 [MDS_MITIGATION_OFF] = "Vulnerable", 226 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", 227 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", 228 }; 229 230 static void __init mds_select_mitigation(void) 231 { 232 if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) { 233 mds_mitigation = MDS_MITIGATION_OFF; 234 return; 235 } 236 237 if (mds_mitigation == MDS_MITIGATION_FULL) { 238 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) 239 mds_mitigation = MDS_MITIGATION_VMWERV; 240 241 static_branch_enable(&mds_user_clear); 242 243 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && 244 (mds_nosmt || cpu_mitigations_auto_nosmt())) 245 cpu_smt_disable(false); 246 } 247 } 248 249 static int __init mds_cmdline(char *str) 250 { 251 if (!boot_cpu_has_bug(X86_BUG_MDS)) 252 return 0; 253 254 if (!str) 255 return -EINVAL; 256 257 if (!strcmp(str, "off")) 258 mds_mitigation = MDS_MITIGATION_OFF; 259 else if (!strcmp(str, "full")) 260 mds_mitigation = MDS_MITIGATION_FULL; 261 else if (!strcmp(str, "full,nosmt")) { 262 mds_mitigation = MDS_MITIGATION_FULL; 263 mds_nosmt = true; 264 } 265 266 return 0; 267 } 268 early_param("mds", mds_cmdline); 269 270 #undef pr_fmt 271 #define pr_fmt(fmt) "TAA: " fmt 272 273 enum taa_mitigations { 274 TAA_MITIGATION_OFF, 275 TAA_MITIGATION_UCODE_NEEDED, 276 TAA_MITIGATION_VERW, 277 TAA_MITIGATION_TSX_DISABLED, 278 }; 279 280 /* Default mitigation for TAA-affected CPUs */ 281 static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW; 282 static bool taa_nosmt __ro_after_init; 283 284 static const char * const taa_strings[] = { 285 [TAA_MITIGATION_OFF] = "Vulnerable", 286 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", 287 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", 288 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled", 289 }; 290 291 static void __init taa_select_mitigation(void) 292 { 293 u64 ia32_cap; 294 295 if (!boot_cpu_has_bug(X86_BUG_TAA)) { 296 taa_mitigation = TAA_MITIGATION_OFF; 297 return; 298 } 299 300 /* TSX previously disabled by tsx=off */ 301 if (!boot_cpu_has(X86_FEATURE_RTM)) { 302 taa_mitigation = TAA_MITIGATION_TSX_DISABLED; 303 return; 304 } 305 306 if (cpu_mitigations_off()) { 307 taa_mitigation = TAA_MITIGATION_OFF; 308 return; 309 } 310 311 /* 312 * TAA mitigation via VERW is turned off if both 313 * tsx_async_abort=off and mds=off are specified. 314 */ 315 if (taa_mitigation == TAA_MITIGATION_OFF && 316 mds_mitigation == MDS_MITIGATION_OFF) 317 return; 318 319 if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) 320 taa_mitigation = TAA_MITIGATION_VERW; 321 else 322 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; 323 324 /* 325 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. 326 * A microcode update fixes this behavior to clear CPU buffers. It also 327 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the 328 * ARCH_CAP_TSX_CTRL_MSR bit. 329 * 330 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode 331 * update is required. 332 */ 333 ia32_cap = x86_read_arch_cap_msr(); 334 if ( (ia32_cap & ARCH_CAP_MDS_NO) && 335 !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)) 336 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; 337 338 /* 339 * TSX is enabled, select alternate mitigation for TAA which is 340 * the same as MDS. Enable MDS static branch to clear CPU buffers. 341 * 342 * For guests that can't determine whether the correct microcode is 343 * present on host, enable the mitigation for UCODE_NEEDED as well. 344 */ 345 static_branch_enable(&mds_user_clear); 346 347 if (taa_nosmt || cpu_mitigations_auto_nosmt()) 348 cpu_smt_disable(false); 349 } 350 351 static int __init tsx_async_abort_parse_cmdline(char *str) 352 { 353 if (!boot_cpu_has_bug(X86_BUG_TAA)) 354 return 0; 355 356 if (!str) 357 return -EINVAL; 358 359 if (!strcmp(str, "off")) { 360 taa_mitigation = TAA_MITIGATION_OFF; 361 } else if (!strcmp(str, "full")) { 362 taa_mitigation = TAA_MITIGATION_VERW; 363 } else if (!strcmp(str, "full,nosmt")) { 364 taa_mitigation = TAA_MITIGATION_VERW; 365 taa_nosmt = true; 366 } 367 368 return 0; 369 } 370 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); 371 372 #undef pr_fmt 373 #define pr_fmt(fmt) "MMIO Stale Data: " fmt 374 375 enum mmio_mitigations { 376 MMIO_MITIGATION_OFF, 377 MMIO_MITIGATION_UCODE_NEEDED, 378 MMIO_MITIGATION_VERW, 379 }; 380 381 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */ 382 static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW; 383 static bool mmio_nosmt __ro_after_init = false; 384 385 static const char * const mmio_strings[] = { 386 [MMIO_MITIGATION_OFF] = "Vulnerable", 387 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", 388 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", 389 }; 390 391 static void __init mmio_select_mitigation(void) 392 { 393 u64 ia32_cap; 394 395 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || 396 boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) || 397 cpu_mitigations_off()) { 398 mmio_mitigation = MMIO_MITIGATION_OFF; 399 return; 400 } 401 402 if (mmio_mitigation == MMIO_MITIGATION_OFF) 403 return; 404 405 ia32_cap = x86_read_arch_cap_msr(); 406 407 /* 408 * Enable CPU buffer clear mitigation for host and VMM, if also affected 409 * by MDS or TAA. Otherwise, enable mitigation for VMM only. 410 */ 411 if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) && 412 boot_cpu_has(X86_FEATURE_RTM))) 413 static_branch_enable(&mds_user_clear); 414 else 415 static_branch_enable(&mmio_stale_data_clear); 416 417 /* 418 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can 419 * be propagated to uncore buffers, clearing the Fill buffers on idle 420 * is required irrespective of SMT state. 421 */ 422 if (!(ia32_cap & ARCH_CAP_FBSDP_NO)) 423 static_branch_enable(&mds_idle_clear); 424 425 /* 426 * Check if the system has the right microcode. 427 * 428 * CPU Fill buffer clear mitigation is enumerated by either an explicit 429 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS 430 * affected systems. 431 */ 432 if ((ia32_cap & ARCH_CAP_FB_CLEAR) || 433 (boot_cpu_has(X86_FEATURE_MD_CLEAR) && 434 boot_cpu_has(X86_FEATURE_FLUSH_L1D) && 435 !(ia32_cap & ARCH_CAP_MDS_NO))) 436 mmio_mitigation = MMIO_MITIGATION_VERW; 437 else 438 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; 439 440 if (mmio_nosmt || cpu_mitigations_auto_nosmt()) 441 cpu_smt_disable(false); 442 } 443 444 static int __init mmio_stale_data_parse_cmdline(char *str) 445 { 446 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 447 return 0; 448 449 if (!str) 450 return -EINVAL; 451 452 if (!strcmp(str, "off")) { 453 mmio_mitigation = MMIO_MITIGATION_OFF; 454 } else if (!strcmp(str, "full")) { 455 mmio_mitigation = MMIO_MITIGATION_VERW; 456 } else if (!strcmp(str, "full,nosmt")) { 457 mmio_mitigation = MMIO_MITIGATION_VERW; 458 mmio_nosmt = true; 459 } 460 461 return 0; 462 } 463 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); 464 465 #undef pr_fmt 466 #define pr_fmt(fmt) "" fmt 467 468 static void __init md_clear_update_mitigation(void) 469 { 470 if (cpu_mitigations_off()) 471 return; 472 473 if (!static_key_enabled(&mds_user_clear)) 474 goto out; 475 476 /* 477 * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data 478 * mitigation, if necessary. 479 */ 480 if (mds_mitigation == MDS_MITIGATION_OFF && 481 boot_cpu_has_bug(X86_BUG_MDS)) { 482 mds_mitigation = MDS_MITIGATION_FULL; 483 mds_select_mitigation(); 484 } 485 if (taa_mitigation == TAA_MITIGATION_OFF && 486 boot_cpu_has_bug(X86_BUG_TAA)) { 487 taa_mitigation = TAA_MITIGATION_VERW; 488 taa_select_mitigation(); 489 } 490 if (mmio_mitigation == MMIO_MITIGATION_OFF && 491 boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { 492 mmio_mitigation = MMIO_MITIGATION_VERW; 493 mmio_select_mitigation(); 494 } 495 out: 496 if (boot_cpu_has_bug(X86_BUG_MDS)) 497 pr_info("MDS: %s\n", mds_strings[mds_mitigation]); 498 if (boot_cpu_has_bug(X86_BUG_TAA)) 499 pr_info("TAA: %s\n", taa_strings[taa_mitigation]); 500 if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 501 pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); 502 else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) 503 pr_info("MMIO Stale Data: Unknown: No mitigations\n"); 504 } 505 506 static void __init md_clear_select_mitigation(void) 507 { 508 mds_select_mitigation(); 509 taa_select_mitigation(); 510 mmio_select_mitigation(); 511 512 /* 513 * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update 514 * and print their mitigation after MDS, TAA and MMIO Stale Data 515 * mitigation selection is done. 516 */ 517 md_clear_update_mitigation(); 518 } 519 520 #undef pr_fmt 521 #define pr_fmt(fmt) "SRBDS: " fmt 522 523 enum srbds_mitigations { 524 SRBDS_MITIGATION_OFF, 525 SRBDS_MITIGATION_UCODE_NEEDED, 526 SRBDS_MITIGATION_FULL, 527 SRBDS_MITIGATION_TSX_OFF, 528 SRBDS_MITIGATION_HYPERVISOR, 529 }; 530 531 static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL; 532 533 static const char * const srbds_strings[] = { 534 [SRBDS_MITIGATION_OFF] = "Vulnerable", 535 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 536 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", 537 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", 538 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", 539 }; 540 541 static bool srbds_off; 542 543 void update_srbds_msr(void) 544 { 545 u64 mcu_ctrl; 546 547 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 548 return; 549 550 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 551 return; 552 553 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) 554 return; 555 556 /* 557 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX 558 * being disabled and it hasn't received the SRBDS MSR microcode. 559 */ 560 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) 561 return; 562 563 rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 564 565 switch (srbds_mitigation) { 566 case SRBDS_MITIGATION_OFF: 567 case SRBDS_MITIGATION_TSX_OFF: 568 mcu_ctrl |= RNGDS_MITG_DIS; 569 break; 570 case SRBDS_MITIGATION_FULL: 571 mcu_ctrl &= ~RNGDS_MITG_DIS; 572 break; 573 default: 574 break; 575 } 576 577 wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 578 } 579 580 static void __init srbds_select_mitigation(void) 581 { 582 u64 ia32_cap; 583 584 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 585 return; 586 587 /* 588 * Check to see if this is one of the MDS_NO systems supporting TSX that 589 * are only exposed to SRBDS when TSX is enabled or when CPU is affected 590 * by Processor MMIO Stale Data vulnerability. 591 */ 592 ia32_cap = x86_read_arch_cap_msr(); 593 if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && 594 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 595 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; 596 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 597 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; 598 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) 599 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; 600 else if (cpu_mitigations_off() || srbds_off) 601 srbds_mitigation = SRBDS_MITIGATION_OFF; 602 603 update_srbds_msr(); 604 pr_info("%s\n", srbds_strings[srbds_mitigation]); 605 } 606 607 static int __init srbds_parse_cmdline(char *str) 608 { 609 if (!str) 610 return -EINVAL; 611 612 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 613 return 0; 614 615 srbds_off = !strcmp(str, "off"); 616 return 0; 617 } 618 early_param("srbds", srbds_parse_cmdline); 619 620 #undef pr_fmt 621 #define pr_fmt(fmt) "L1D Flush : " fmt 622 623 enum l1d_flush_mitigations { 624 L1D_FLUSH_OFF = 0, 625 L1D_FLUSH_ON, 626 }; 627 628 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF; 629 630 static void __init l1d_flush_select_mitigation(void) 631 { 632 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) 633 return; 634 635 static_branch_enable(&switch_mm_cond_l1d_flush); 636 pr_info("Conditional flush on switch_mm() enabled\n"); 637 } 638 639 static int __init l1d_flush_parse_cmdline(char *str) 640 { 641 if (!strcmp(str, "on")) 642 l1d_flush_mitigation = L1D_FLUSH_ON; 643 644 return 0; 645 } 646 early_param("l1d_flush", l1d_flush_parse_cmdline); 647 648 #undef pr_fmt 649 #define pr_fmt(fmt) "Spectre V1 : " fmt 650 651 enum spectre_v1_mitigation { 652 SPECTRE_V1_MITIGATION_NONE, 653 SPECTRE_V1_MITIGATION_AUTO, 654 }; 655 656 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init = 657 SPECTRE_V1_MITIGATION_AUTO; 658 659 static const char * const spectre_v1_strings[] = { 660 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers", 661 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization", 662 }; 663 664 /* 665 * Does SMAP provide full mitigation against speculative kernel access to 666 * userspace? 667 */ 668 static bool smap_works_speculatively(void) 669 { 670 if (!boot_cpu_has(X86_FEATURE_SMAP)) 671 return false; 672 673 /* 674 * On CPUs which are vulnerable to Meltdown, SMAP does not 675 * prevent speculative access to user data in the L1 cache. 676 * Consider SMAP to be non-functional as a mitigation on these 677 * CPUs. 678 */ 679 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) 680 return false; 681 682 return true; 683 } 684 685 static void __init spectre_v1_select_mitigation(void) 686 { 687 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) { 688 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 689 return; 690 } 691 692 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { 693 /* 694 * With Spectre v1, a user can speculatively control either 695 * path of a conditional swapgs with a user-controlled GS 696 * value. The mitigation is to add lfences to both code paths. 697 * 698 * If FSGSBASE is enabled, the user can put a kernel address in 699 * GS, in which case SMAP provides no protection. 700 * 701 * If FSGSBASE is disabled, the user can only put a user space 702 * address in GS. That makes an attack harder, but still 703 * possible if there's no SMAP protection. 704 */ 705 if (boot_cpu_has(X86_FEATURE_FSGSBASE) || 706 !smap_works_speculatively()) { 707 /* 708 * Mitigation can be provided from SWAPGS itself or 709 * PTI as the CR3 write in the Meltdown mitigation 710 * is serializing. 711 * 712 * If neither is there, mitigate with an LFENCE to 713 * stop speculation through swapgs. 714 */ 715 if (boot_cpu_has_bug(X86_BUG_SWAPGS) && 716 !boot_cpu_has(X86_FEATURE_PTI)) 717 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER); 718 719 /* 720 * Enable lfences in the kernel entry (non-swapgs) 721 * paths, to prevent user entry from speculatively 722 * skipping swapgs. 723 */ 724 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL); 725 } 726 } 727 728 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]); 729 } 730 731 static int __init nospectre_v1_cmdline(char *str) 732 { 733 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 734 return 0; 735 } 736 early_param("nospectre_v1", nospectre_v1_cmdline); 737 738 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE; 739 740 #undef pr_fmt 741 #define pr_fmt(fmt) "RETBleed: " fmt 742 743 enum retbleed_mitigation { 744 RETBLEED_MITIGATION_NONE, 745 RETBLEED_MITIGATION_UNRET, 746 RETBLEED_MITIGATION_IBPB, 747 RETBLEED_MITIGATION_IBRS, 748 RETBLEED_MITIGATION_EIBRS, 749 RETBLEED_MITIGATION_STUFF, 750 }; 751 752 enum retbleed_mitigation_cmd { 753 RETBLEED_CMD_OFF, 754 RETBLEED_CMD_AUTO, 755 RETBLEED_CMD_UNRET, 756 RETBLEED_CMD_IBPB, 757 RETBLEED_CMD_STUFF, 758 }; 759 760 static const char * const retbleed_strings[] = { 761 [RETBLEED_MITIGATION_NONE] = "Vulnerable", 762 [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", 763 [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB", 764 [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", 765 [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", 766 [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing", 767 }; 768 769 static enum retbleed_mitigation retbleed_mitigation __ro_after_init = 770 RETBLEED_MITIGATION_NONE; 771 static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init = 772 RETBLEED_CMD_AUTO; 773 774 static int __ro_after_init retbleed_nosmt = false; 775 776 static int __init retbleed_parse_cmdline(char *str) 777 { 778 if (!str) 779 return -EINVAL; 780 781 while (str) { 782 char *next = strchr(str, ','); 783 if (next) { 784 *next = 0; 785 next++; 786 } 787 788 if (!strcmp(str, "off")) { 789 retbleed_cmd = RETBLEED_CMD_OFF; 790 } else if (!strcmp(str, "auto")) { 791 retbleed_cmd = RETBLEED_CMD_AUTO; 792 } else if (!strcmp(str, "unret")) { 793 retbleed_cmd = RETBLEED_CMD_UNRET; 794 } else if (!strcmp(str, "ibpb")) { 795 retbleed_cmd = RETBLEED_CMD_IBPB; 796 } else if (!strcmp(str, "stuff")) { 797 retbleed_cmd = RETBLEED_CMD_STUFF; 798 } else if (!strcmp(str, "nosmt")) { 799 retbleed_nosmt = true; 800 } else if (!strcmp(str, "force")) { 801 setup_force_cpu_bug(X86_BUG_RETBLEED); 802 } else { 803 pr_err("Ignoring unknown retbleed option (%s).", str); 804 } 805 806 str = next; 807 } 808 809 return 0; 810 } 811 early_param("retbleed", retbleed_parse_cmdline); 812 813 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" 814 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" 815 816 static void __init retbleed_select_mitigation(void) 817 { 818 bool mitigate_smt = false; 819 820 if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) 821 return; 822 823 switch (retbleed_cmd) { 824 case RETBLEED_CMD_OFF: 825 return; 826 827 case RETBLEED_CMD_UNRET: 828 if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) { 829 retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 830 } else { 831 pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n"); 832 goto do_cmd_auto; 833 } 834 break; 835 836 case RETBLEED_CMD_IBPB: 837 if (!boot_cpu_has(X86_FEATURE_IBPB)) { 838 pr_err("WARNING: CPU does not support IBPB.\n"); 839 goto do_cmd_auto; 840 } else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { 841 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 842 } else { 843 pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); 844 goto do_cmd_auto; 845 } 846 break; 847 848 case RETBLEED_CMD_STUFF: 849 if (IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING) && 850 spectre_v2_enabled == SPECTRE_V2_RETPOLINE) { 851 retbleed_mitigation = RETBLEED_MITIGATION_STUFF; 852 853 } else { 854 if (IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING)) 855 pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n"); 856 else 857 pr_err("WARNING: kernel not compiled with CALL_DEPTH_TRACKING.\n"); 858 859 goto do_cmd_auto; 860 } 861 break; 862 863 do_cmd_auto: 864 case RETBLEED_CMD_AUTO: 865 default: 866 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 867 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { 868 if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) 869 retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 870 else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB)) 871 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 872 } 873 874 /* 875 * The Intel mitigation (IBRS or eIBRS) was already selected in 876 * spectre_v2_select_mitigation(). 'retbleed_mitigation' will 877 * be set accordingly below. 878 */ 879 880 break; 881 } 882 883 switch (retbleed_mitigation) { 884 case RETBLEED_MITIGATION_UNRET: 885 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 886 setup_force_cpu_cap(X86_FEATURE_UNRET); 887 888 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 889 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 890 pr_err(RETBLEED_UNTRAIN_MSG); 891 892 mitigate_smt = true; 893 break; 894 895 case RETBLEED_MITIGATION_IBPB: 896 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); 897 mitigate_smt = true; 898 break; 899 900 case RETBLEED_MITIGATION_STUFF: 901 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 902 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); 903 x86_set_skl_return_thunk(); 904 break; 905 906 default: 907 break; 908 } 909 910 if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && 911 (retbleed_nosmt || cpu_mitigations_auto_nosmt())) 912 cpu_smt_disable(false); 913 914 /* 915 * Let IBRS trump all on Intel without affecting the effects of the 916 * retbleed= cmdline option except for call depth based stuffing 917 */ 918 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 919 switch (spectre_v2_enabled) { 920 case SPECTRE_V2_IBRS: 921 retbleed_mitigation = RETBLEED_MITIGATION_IBRS; 922 break; 923 case SPECTRE_V2_EIBRS: 924 case SPECTRE_V2_EIBRS_RETPOLINE: 925 case SPECTRE_V2_EIBRS_LFENCE: 926 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; 927 break; 928 default: 929 if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) 930 pr_err(RETBLEED_INTEL_MSG); 931 } 932 } 933 934 pr_info("%s\n", retbleed_strings[retbleed_mitigation]); 935 } 936 937 #undef pr_fmt 938 #define pr_fmt(fmt) "Spectre V2 : " fmt 939 940 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = 941 SPECTRE_V2_USER_NONE; 942 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = 943 SPECTRE_V2_USER_NONE; 944 945 #ifdef CONFIG_RETPOLINE 946 static bool spectre_v2_bad_module; 947 948 bool retpoline_module_ok(bool has_retpoline) 949 { 950 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) 951 return true; 952 953 pr_err("System may be vulnerable to spectre v2\n"); 954 spectre_v2_bad_module = true; 955 return false; 956 } 957 958 static inline const char *spectre_v2_module_string(void) 959 { 960 return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; 961 } 962 #else 963 static inline const char *spectre_v2_module_string(void) { return ""; } 964 #endif 965 966 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" 967 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" 968 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" 969 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n" 970 971 #ifdef CONFIG_BPF_SYSCALL 972 void unpriv_ebpf_notify(int new_state) 973 { 974 if (new_state) 975 return; 976 977 /* Unprivileged eBPF is enabled */ 978 979 switch (spectre_v2_enabled) { 980 case SPECTRE_V2_EIBRS: 981 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); 982 break; 983 case SPECTRE_V2_EIBRS_LFENCE: 984 if (sched_smt_active()) 985 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); 986 break; 987 default: 988 break; 989 } 990 } 991 #endif 992 993 static inline bool match_option(const char *arg, int arglen, const char *opt) 994 { 995 int len = strlen(opt); 996 997 return len == arglen && !strncmp(arg, opt, len); 998 } 999 1000 /* The kernel command line selection for spectre v2 */ 1001 enum spectre_v2_mitigation_cmd { 1002 SPECTRE_V2_CMD_NONE, 1003 SPECTRE_V2_CMD_AUTO, 1004 SPECTRE_V2_CMD_FORCE, 1005 SPECTRE_V2_CMD_RETPOLINE, 1006 SPECTRE_V2_CMD_RETPOLINE_GENERIC, 1007 SPECTRE_V2_CMD_RETPOLINE_LFENCE, 1008 SPECTRE_V2_CMD_EIBRS, 1009 SPECTRE_V2_CMD_EIBRS_RETPOLINE, 1010 SPECTRE_V2_CMD_EIBRS_LFENCE, 1011 SPECTRE_V2_CMD_IBRS, 1012 }; 1013 1014 enum spectre_v2_user_cmd { 1015 SPECTRE_V2_USER_CMD_NONE, 1016 SPECTRE_V2_USER_CMD_AUTO, 1017 SPECTRE_V2_USER_CMD_FORCE, 1018 SPECTRE_V2_USER_CMD_PRCTL, 1019 SPECTRE_V2_USER_CMD_PRCTL_IBPB, 1020 SPECTRE_V2_USER_CMD_SECCOMP, 1021 SPECTRE_V2_USER_CMD_SECCOMP_IBPB, 1022 }; 1023 1024 static const char * const spectre_v2_user_strings[] = { 1025 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", 1026 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", 1027 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", 1028 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", 1029 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", 1030 }; 1031 1032 static const struct { 1033 const char *option; 1034 enum spectre_v2_user_cmd cmd; 1035 bool secure; 1036 } v2_user_options[] __initconst = { 1037 { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, 1038 { "off", SPECTRE_V2_USER_CMD_NONE, false }, 1039 { "on", SPECTRE_V2_USER_CMD_FORCE, true }, 1040 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false }, 1041 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false }, 1042 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false }, 1043 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, 1044 }; 1045 1046 static void __init spec_v2_user_print_cond(const char *reason, bool secure) 1047 { 1048 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) 1049 pr_info("spectre_v2_user=%s forced on command line.\n", reason); 1050 } 1051 1052 static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd; 1053 1054 static enum spectre_v2_user_cmd __init 1055 spectre_v2_parse_user_cmdline(void) 1056 { 1057 char arg[20]; 1058 int ret, i; 1059 1060 switch (spectre_v2_cmd) { 1061 case SPECTRE_V2_CMD_NONE: 1062 return SPECTRE_V2_USER_CMD_NONE; 1063 case SPECTRE_V2_CMD_FORCE: 1064 return SPECTRE_V2_USER_CMD_FORCE; 1065 default: 1066 break; 1067 } 1068 1069 ret = cmdline_find_option(boot_command_line, "spectre_v2_user", 1070 arg, sizeof(arg)); 1071 if (ret < 0) 1072 return SPECTRE_V2_USER_CMD_AUTO; 1073 1074 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { 1075 if (match_option(arg, ret, v2_user_options[i].option)) { 1076 spec_v2_user_print_cond(v2_user_options[i].option, 1077 v2_user_options[i].secure); 1078 return v2_user_options[i].cmd; 1079 } 1080 } 1081 1082 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); 1083 return SPECTRE_V2_USER_CMD_AUTO; 1084 } 1085 1086 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) 1087 { 1088 return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS; 1089 } 1090 1091 static void __init 1092 spectre_v2_user_select_mitigation(void) 1093 { 1094 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; 1095 bool smt_possible = IS_ENABLED(CONFIG_SMP); 1096 enum spectre_v2_user_cmd cmd; 1097 1098 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) 1099 return; 1100 1101 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || 1102 cpu_smt_control == CPU_SMT_NOT_SUPPORTED) 1103 smt_possible = false; 1104 1105 cmd = spectre_v2_parse_user_cmdline(); 1106 switch (cmd) { 1107 case SPECTRE_V2_USER_CMD_NONE: 1108 goto set_mode; 1109 case SPECTRE_V2_USER_CMD_FORCE: 1110 mode = SPECTRE_V2_USER_STRICT; 1111 break; 1112 case SPECTRE_V2_USER_CMD_AUTO: 1113 case SPECTRE_V2_USER_CMD_PRCTL: 1114 case SPECTRE_V2_USER_CMD_PRCTL_IBPB: 1115 mode = SPECTRE_V2_USER_PRCTL; 1116 break; 1117 case SPECTRE_V2_USER_CMD_SECCOMP: 1118 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: 1119 if (IS_ENABLED(CONFIG_SECCOMP)) 1120 mode = SPECTRE_V2_USER_SECCOMP; 1121 else 1122 mode = SPECTRE_V2_USER_PRCTL; 1123 break; 1124 } 1125 1126 /* Initialize Indirect Branch Prediction Barrier */ 1127 if (boot_cpu_has(X86_FEATURE_IBPB)) { 1128 setup_force_cpu_cap(X86_FEATURE_USE_IBPB); 1129 1130 spectre_v2_user_ibpb = mode; 1131 switch (cmd) { 1132 case SPECTRE_V2_USER_CMD_FORCE: 1133 case SPECTRE_V2_USER_CMD_PRCTL_IBPB: 1134 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: 1135 static_branch_enable(&switch_mm_always_ibpb); 1136 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1137 break; 1138 case SPECTRE_V2_USER_CMD_PRCTL: 1139 case SPECTRE_V2_USER_CMD_AUTO: 1140 case SPECTRE_V2_USER_CMD_SECCOMP: 1141 static_branch_enable(&switch_mm_cond_ibpb); 1142 break; 1143 default: 1144 break; 1145 } 1146 1147 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", 1148 static_key_enabled(&switch_mm_always_ibpb) ? 1149 "always-on" : "conditional"); 1150 } 1151 1152 /* 1153 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP 1154 * is not required. 1155 * 1156 * Intel's Enhanced IBRS also protects against cross-thread branch target 1157 * injection in user-mode as the IBRS bit remains always set which 1158 * implicitly enables cross-thread protections. However, in legacy IBRS 1159 * mode, the IBRS bit is set only on kernel entry and cleared on return 1160 * to userspace. AMD Automatic IBRS also does not protect userspace. 1161 * These modes therefore disable the implicit cross-thread protection, 1162 * so allow for STIBP to be selected in those cases. 1163 */ 1164 if (!boot_cpu_has(X86_FEATURE_STIBP) || 1165 !smt_possible || 1166 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 1167 !boot_cpu_has(X86_FEATURE_AUTOIBRS))) 1168 return; 1169 1170 /* 1171 * At this point, an STIBP mode other than "off" has been set. 1172 * If STIBP support is not being forced, check if STIBP always-on 1173 * is preferred. 1174 */ 1175 if (mode != SPECTRE_V2_USER_STRICT && 1176 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) 1177 mode = SPECTRE_V2_USER_STRICT_PREFERRED; 1178 1179 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || 1180 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { 1181 if (mode != SPECTRE_V2_USER_STRICT && 1182 mode != SPECTRE_V2_USER_STRICT_PREFERRED) 1183 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n"); 1184 mode = SPECTRE_V2_USER_STRICT_PREFERRED; 1185 } 1186 1187 spectre_v2_user_stibp = mode; 1188 1189 set_mode: 1190 pr_info("%s\n", spectre_v2_user_strings[mode]); 1191 } 1192 1193 static const char * const spectre_v2_strings[] = { 1194 [SPECTRE_V2_NONE] = "Vulnerable", 1195 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", 1196 [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", 1197 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS", 1198 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE", 1199 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines", 1200 [SPECTRE_V2_IBRS] = "Mitigation: IBRS", 1201 }; 1202 1203 static const struct { 1204 const char *option; 1205 enum spectre_v2_mitigation_cmd cmd; 1206 bool secure; 1207 } mitigation_options[] __initconst = { 1208 { "off", SPECTRE_V2_CMD_NONE, false }, 1209 { "on", SPECTRE_V2_CMD_FORCE, true }, 1210 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, 1211 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, 1212 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, 1213 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, 1214 { "eibrs", SPECTRE_V2_CMD_EIBRS, false }, 1215 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false }, 1216 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false }, 1217 { "auto", SPECTRE_V2_CMD_AUTO, false }, 1218 { "ibrs", SPECTRE_V2_CMD_IBRS, false }, 1219 }; 1220 1221 static void __init spec_v2_print_cond(const char *reason, bool secure) 1222 { 1223 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) 1224 pr_info("%s selected on command line.\n", reason); 1225 } 1226 1227 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) 1228 { 1229 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; 1230 char arg[20]; 1231 int ret, i; 1232 1233 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") || 1234 cpu_mitigations_off()) 1235 return SPECTRE_V2_CMD_NONE; 1236 1237 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); 1238 if (ret < 0) 1239 return SPECTRE_V2_CMD_AUTO; 1240 1241 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { 1242 if (!match_option(arg, ret, mitigation_options[i].option)) 1243 continue; 1244 cmd = mitigation_options[i].cmd; 1245 break; 1246 } 1247 1248 if (i >= ARRAY_SIZE(mitigation_options)) { 1249 pr_err("unknown option (%s). Switching to AUTO select\n", arg); 1250 return SPECTRE_V2_CMD_AUTO; 1251 } 1252 1253 if ((cmd == SPECTRE_V2_CMD_RETPOLINE || 1254 cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || 1255 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || 1256 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 1257 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 1258 !IS_ENABLED(CONFIG_RETPOLINE)) { 1259 pr_err("%s selected but not compiled in. Switching to AUTO select\n", 1260 mitigation_options[i].option); 1261 return SPECTRE_V2_CMD_AUTO; 1262 } 1263 1264 if ((cmd == SPECTRE_V2_CMD_EIBRS || 1265 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 1266 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 1267 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 1268 pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n", 1269 mitigation_options[i].option); 1270 return SPECTRE_V2_CMD_AUTO; 1271 } 1272 1273 if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || 1274 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) && 1275 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { 1276 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n", 1277 mitigation_options[i].option); 1278 return SPECTRE_V2_CMD_AUTO; 1279 } 1280 1281 if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) { 1282 pr_err("%s selected but not compiled in. Switching to AUTO select\n", 1283 mitigation_options[i].option); 1284 return SPECTRE_V2_CMD_AUTO; 1285 } 1286 1287 if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { 1288 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", 1289 mitigation_options[i].option); 1290 return SPECTRE_V2_CMD_AUTO; 1291 } 1292 1293 if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { 1294 pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n", 1295 mitigation_options[i].option); 1296 return SPECTRE_V2_CMD_AUTO; 1297 } 1298 1299 if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) { 1300 pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n", 1301 mitigation_options[i].option); 1302 return SPECTRE_V2_CMD_AUTO; 1303 } 1304 1305 spec_v2_print_cond(mitigation_options[i].option, 1306 mitigation_options[i].secure); 1307 return cmd; 1308 } 1309 1310 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) 1311 { 1312 if (!IS_ENABLED(CONFIG_RETPOLINE)) { 1313 pr_err("Kernel not compiled with retpoline; no mitigation available!"); 1314 return SPECTRE_V2_NONE; 1315 } 1316 1317 return SPECTRE_V2_RETPOLINE; 1318 } 1319 1320 /* Disable in-kernel use of non-RSB RET predictors */ 1321 static void __init spec_ctrl_disable_kernel_rrsba(void) 1322 { 1323 u64 ia32_cap; 1324 1325 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) 1326 return; 1327 1328 ia32_cap = x86_read_arch_cap_msr(); 1329 1330 if (ia32_cap & ARCH_CAP_RRSBA) { 1331 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; 1332 update_spec_ctrl(x86_spec_ctrl_base); 1333 } 1334 } 1335 1336 static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode) 1337 { 1338 /* 1339 * Similar to context switches, there are two types of RSB attacks 1340 * after VM exit: 1341 * 1342 * 1) RSB underflow 1343 * 1344 * 2) Poisoned RSB entry 1345 * 1346 * When retpoline is enabled, both are mitigated by filling/clearing 1347 * the RSB. 1348 * 1349 * When IBRS is enabled, while #1 would be mitigated by the IBRS branch 1350 * prediction isolation protections, RSB still needs to be cleared 1351 * because of #2. Note that SMEP provides no protection here, unlike 1352 * user-space-poisoned RSB entries. 1353 * 1354 * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB 1355 * bug is present then a LITE version of RSB protection is required, 1356 * just a single call needs to retire before a RET is executed. 1357 */ 1358 switch (mode) { 1359 case SPECTRE_V2_NONE: 1360 return; 1361 1362 case SPECTRE_V2_EIBRS_LFENCE: 1363 case SPECTRE_V2_EIBRS: 1364 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { 1365 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); 1366 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); 1367 } 1368 return; 1369 1370 case SPECTRE_V2_EIBRS_RETPOLINE: 1371 case SPECTRE_V2_RETPOLINE: 1372 case SPECTRE_V2_LFENCE: 1373 case SPECTRE_V2_IBRS: 1374 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); 1375 pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n"); 1376 return; 1377 } 1378 1379 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit"); 1380 dump_stack(); 1381 } 1382 1383 static void __init spectre_v2_select_mitigation(void) 1384 { 1385 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); 1386 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE; 1387 1388 /* 1389 * If the CPU is not affected and the command line mode is NONE or AUTO 1390 * then nothing to do. 1391 */ 1392 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && 1393 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO)) 1394 return; 1395 1396 switch (cmd) { 1397 case SPECTRE_V2_CMD_NONE: 1398 return; 1399 1400 case SPECTRE_V2_CMD_FORCE: 1401 case SPECTRE_V2_CMD_AUTO: 1402 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 1403 mode = SPECTRE_V2_EIBRS; 1404 break; 1405 } 1406 1407 if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) && 1408 boot_cpu_has_bug(X86_BUG_RETBLEED) && 1409 retbleed_cmd != RETBLEED_CMD_OFF && 1410 retbleed_cmd != RETBLEED_CMD_STUFF && 1411 boot_cpu_has(X86_FEATURE_IBRS) && 1412 boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 1413 mode = SPECTRE_V2_IBRS; 1414 break; 1415 } 1416 1417 mode = spectre_v2_select_retpoline(); 1418 break; 1419 1420 case SPECTRE_V2_CMD_RETPOLINE_LFENCE: 1421 pr_err(SPECTRE_V2_LFENCE_MSG); 1422 mode = SPECTRE_V2_LFENCE; 1423 break; 1424 1425 case SPECTRE_V2_CMD_RETPOLINE_GENERIC: 1426 mode = SPECTRE_V2_RETPOLINE; 1427 break; 1428 1429 case SPECTRE_V2_CMD_RETPOLINE: 1430 mode = spectre_v2_select_retpoline(); 1431 break; 1432 1433 case SPECTRE_V2_CMD_IBRS: 1434 mode = SPECTRE_V2_IBRS; 1435 break; 1436 1437 case SPECTRE_V2_CMD_EIBRS: 1438 mode = SPECTRE_V2_EIBRS; 1439 break; 1440 1441 case SPECTRE_V2_CMD_EIBRS_LFENCE: 1442 mode = SPECTRE_V2_EIBRS_LFENCE; 1443 break; 1444 1445 case SPECTRE_V2_CMD_EIBRS_RETPOLINE: 1446 mode = SPECTRE_V2_EIBRS_RETPOLINE; 1447 break; 1448 } 1449 1450 if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) 1451 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); 1452 1453 if (spectre_v2_in_ibrs_mode(mode)) { 1454 if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) { 1455 msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); 1456 } else { 1457 x86_spec_ctrl_base |= SPEC_CTRL_IBRS; 1458 update_spec_ctrl(x86_spec_ctrl_base); 1459 } 1460 } 1461 1462 switch (mode) { 1463 case SPECTRE_V2_NONE: 1464 case SPECTRE_V2_EIBRS: 1465 break; 1466 1467 case SPECTRE_V2_IBRS: 1468 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); 1469 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) 1470 pr_warn(SPECTRE_V2_IBRS_PERF_MSG); 1471 break; 1472 1473 case SPECTRE_V2_LFENCE: 1474 case SPECTRE_V2_EIBRS_LFENCE: 1475 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); 1476 fallthrough; 1477 1478 case SPECTRE_V2_RETPOLINE: 1479 case SPECTRE_V2_EIBRS_RETPOLINE: 1480 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 1481 break; 1482 } 1483 1484 /* 1485 * Disable alternate RSB predictions in kernel when indirect CALLs and 1486 * JMPs gets protection against BHI and Intramode-BTI, but RET 1487 * prediction from a non-RSB predictor is still a risk. 1488 */ 1489 if (mode == SPECTRE_V2_EIBRS_LFENCE || 1490 mode == SPECTRE_V2_EIBRS_RETPOLINE || 1491 mode == SPECTRE_V2_RETPOLINE) 1492 spec_ctrl_disable_kernel_rrsba(); 1493 1494 spectre_v2_enabled = mode; 1495 pr_info("%s\n", spectre_v2_strings[mode]); 1496 1497 /* 1498 * If Spectre v2 protection has been enabled, fill the RSB during a 1499 * context switch. In general there are two types of RSB attacks 1500 * across context switches, for which the CALLs/RETs may be unbalanced. 1501 * 1502 * 1) RSB underflow 1503 * 1504 * Some Intel parts have "bottomless RSB". When the RSB is empty, 1505 * speculated return targets may come from the branch predictor, 1506 * which could have a user-poisoned BTB or BHB entry. 1507 * 1508 * AMD has it even worse: *all* returns are speculated from the BTB, 1509 * regardless of the state of the RSB. 1510 * 1511 * When IBRS or eIBRS is enabled, the "user -> kernel" attack 1512 * scenario is mitigated by the IBRS branch prediction isolation 1513 * properties, so the RSB buffer filling wouldn't be necessary to 1514 * protect against this type of attack. 1515 * 1516 * The "user -> user" attack scenario is mitigated by RSB filling. 1517 * 1518 * 2) Poisoned RSB entry 1519 * 1520 * If the 'next' in-kernel return stack is shorter than 'prev', 1521 * 'next' could be tricked into speculating with a user-poisoned RSB 1522 * entry. 1523 * 1524 * The "user -> kernel" attack scenario is mitigated by SMEP and 1525 * eIBRS. 1526 * 1527 * The "user -> user" scenario, also known as SpectreBHB, requires 1528 * RSB clearing. 1529 * 1530 * So to mitigate all cases, unconditionally fill RSB on context 1531 * switches. 1532 * 1533 * FIXME: Is this pointless for retbleed-affected AMD? 1534 */ 1535 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); 1536 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); 1537 1538 spectre_v2_determine_rsb_fill_type_at_vmexit(mode); 1539 1540 /* 1541 * Retpoline protects the kernel, but doesn't protect firmware. IBRS 1542 * and Enhanced IBRS protect firmware too, so enable IBRS around 1543 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't 1544 * otherwise enabled. 1545 * 1546 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because 1547 * the user might select retpoline on the kernel command line and if 1548 * the CPU supports Enhanced IBRS, kernel might un-intentionally not 1549 * enable IBRS around firmware calls. 1550 */ 1551 if (boot_cpu_has_bug(X86_BUG_RETBLEED) && 1552 boot_cpu_has(X86_FEATURE_IBPB) && 1553 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 1554 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { 1555 1556 if (retbleed_cmd != RETBLEED_CMD_IBPB) { 1557 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); 1558 pr_info("Enabling Speculation Barrier for firmware calls\n"); 1559 } 1560 1561 } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { 1562 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); 1563 pr_info("Enabling Restricted Speculation for firmware calls\n"); 1564 } 1565 1566 /* Set up IBPB and STIBP depending on the general spectre V2 command */ 1567 spectre_v2_cmd = cmd; 1568 } 1569 1570 static void update_stibp_msr(void * __unused) 1571 { 1572 u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); 1573 update_spec_ctrl(val); 1574 } 1575 1576 /* Update x86_spec_ctrl_base in case SMT state changed. */ 1577 static void update_stibp_strict(void) 1578 { 1579 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; 1580 1581 if (sched_smt_active()) 1582 mask |= SPEC_CTRL_STIBP; 1583 1584 if (mask == x86_spec_ctrl_base) 1585 return; 1586 1587 pr_info("Update user space SMT mitigation: STIBP %s\n", 1588 mask & SPEC_CTRL_STIBP ? "always-on" : "off"); 1589 x86_spec_ctrl_base = mask; 1590 on_each_cpu(update_stibp_msr, NULL, 1); 1591 } 1592 1593 /* Update the static key controlling the evaluation of TIF_SPEC_IB */ 1594 static void update_indir_branch_cond(void) 1595 { 1596 if (sched_smt_active()) 1597 static_branch_enable(&switch_to_cond_stibp); 1598 else 1599 static_branch_disable(&switch_to_cond_stibp); 1600 } 1601 1602 #undef pr_fmt 1603 #define pr_fmt(fmt) fmt 1604 1605 /* Update the static key controlling the MDS CPU buffer clear in idle */ 1606 static void update_mds_branch_idle(void) 1607 { 1608 u64 ia32_cap = x86_read_arch_cap_msr(); 1609 1610 /* 1611 * Enable the idle clearing if SMT is active on CPUs which are 1612 * affected only by MSBDS and not any other MDS variant. 1613 * 1614 * The other variants cannot be mitigated when SMT is enabled, so 1615 * clearing the buffers on idle just to prevent the Store Buffer 1616 * repartitioning leak would be a window dressing exercise. 1617 */ 1618 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) 1619 return; 1620 1621 if (sched_smt_active()) { 1622 static_branch_enable(&mds_idle_clear); 1623 } else if (mmio_mitigation == MMIO_MITIGATION_OFF || 1624 (ia32_cap & ARCH_CAP_FBSDP_NO)) { 1625 static_branch_disable(&mds_idle_clear); 1626 } 1627 } 1628 1629 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" 1630 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" 1631 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" 1632 1633 void cpu_bugs_smt_update(void) 1634 { 1635 mutex_lock(&spec_ctrl_mutex); 1636 1637 if (sched_smt_active() && unprivileged_ebpf_enabled() && 1638 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) 1639 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); 1640 1641 switch (spectre_v2_user_stibp) { 1642 case SPECTRE_V2_USER_NONE: 1643 break; 1644 case SPECTRE_V2_USER_STRICT: 1645 case SPECTRE_V2_USER_STRICT_PREFERRED: 1646 update_stibp_strict(); 1647 break; 1648 case SPECTRE_V2_USER_PRCTL: 1649 case SPECTRE_V2_USER_SECCOMP: 1650 update_indir_branch_cond(); 1651 break; 1652 } 1653 1654 switch (mds_mitigation) { 1655 case MDS_MITIGATION_FULL: 1656 case MDS_MITIGATION_VMWERV: 1657 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) 1658 pr_warn_once(MDS_MSG_SMT); 1659 update_mds_branch_idle(); 1660 break; 1661 case MDS_MITIGATION_OFF: 1662 break; 1663 } 1664 1665 switch (taa_mitigation) { 1666 case TAA_MITIGATION_VERW: 1667 case TAA_MITIGATION_UCODE_NEEDED: 1668 if (sched_smt_active()) 1669 pr_warn_once(TAA_MSG_SMT); 1670 break; 1671 case TAA_MITIGATION_TSX_DISABLED: 1672 case TAA_MITIGATION_OFF: 1673 break; 1674 } 1675 1676 switch (mmio_mitigation) { 1677 case MMIO_MITIGATION_VERW: 1678 case MMIO_MITIGATION_UCODE_NEEDED: 1679 if (sched_smt_active()) 1680 pr_warn_once(MMIO_MSG_SMT); 1681 break; 1682 case MMIO_MITIGATION_OFF: 1683 break; 1684 } 1685 1686 mutex_unlock(&spec_ctrl_mutex); 1687 } 1688 1689 #undef pr_fmt 1690 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt 1691 1692 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE; 1693 1694 /* The kernel command line selection */ 1695 enum ssb_mitigation_cmd { 1696 SPEC_STORE_BYPASS_CMD_NONE, 1697 SPEC_STORE_BYPASS_CMD_AUTO, 1698 SPEC_STORE_BYPASS_CMD_ON, 1699 SPEC_STORE_BYPASS_CMD_PRCTL, 1700 SPEC_STORE_BYPASS_CMD_SECCOMP, 1701 }; 1702 1703 static const char * const ssb_strings[] = { 1704 [SPEC_STORE_BYPASS_NONE] = "Vulnerable", 1705 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", 1706 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", 1707 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", 1708 }; 1709 1710 static const struct { 1711 const char *option; 1712 enum ssb_mitigation_cmd cmd; 1713 } ssb_mitigation_options[] __initconst = { 1714 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ 1715 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ 1716 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ 1717 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ 1718 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */ 1719 }; 1720 1721 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) 1722 { 1723 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO; 1724 char arg[20]; 1725 int ret, i; 1726 1727 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") || 1728 cpu_mitigations_off()) { 1729 return SPEC_STORE_BYPASS_CMD_NONE; 1730 } else { 1731 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", 1732 arg, sizeof(arg)); 1733 if (ret < 0) 1734 return SPEC_STORE_BYPASS_CMD_AUTO; 1735 1736 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { 1737 if (!match_option(arg, ret, ssb_mitigation_options[i].option)) 1738 continue; 1739 1740 cmd = ssb_mitigation_options[i].cmd; 1741 break; 1742 } 1743 1744 if (i >= ARRAY_SIZE(ssb_mitigation_options)) { 1745 pr_err("unknown option (%s). Switching to AUTO select\n", arg); 1746 return SPEC_STORE_BYPASS_CMD_AUTO; 1747 } 1748 } 1749 1750 return cmd; 1751 } 1752 1753 static enum ssb_mitigation __init __ssb_select_mitigation(void) 1754 { 1755 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; 1756 enum ssb_mitigation_cmd cmd; 1757 1758 if (!boot_cpu_has(X86_FEATURE_SSBD)) 1759 return mode; 1760 1761 cmd = ssb_parse_cmdline(); 1762 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && 1763 (cmd == SPEC_STORE_BYPASS_CMD_NONE || 1764 cmd == SPEC_STORE_BYPASS_CMD_AUTO)) 1765 return mode; 1766 1767 switch (cmd) { 1768 case SPEC_STORE_BYPASS_CMD_SECCOMP: 1769 /* 1770 * Choose prctl+seccomp as the default mode if seccomp is 1771 * enabled. 1772 */ 1773 if (IS_ENABLED(CONFIG_SECCOMP)) 1774 mode = SPEC_STORE_BYPASS_SECCOMP; 1775 else 1776 mode = SPEC_STORE_BYPASS_PRCTL; 1777 break; 1778 case SPEC_STORE_BYPASS_CMD_ON: 1779 mode = SPEC_STORE_BYPASS_DISABLE; 1780 break; 1781 case SPEC_STORE_BYPASS_CMD_AUTO: 1782 case SPEC_STORE_BYPASS_CMD_PRCTL: 1783 mode = SPEC_STORE_BYPASS_PRCTL; 1784 break; 1785 case SPEC_STORE_BYPASS_CMD_NONE: 1786 break; 1787 } 1788 1789 /* 1790 * We have three CPU feature flags that are in play here: 1791 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. 1792 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass 1793 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation 1794 */ 1795 if (mode == SPEC_STORE_BYPASS_DISABLE) { 1796 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); 1797 /* 1798 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may 1799 * use a completely different MSR and bit dependent on family. 1800 */ 1801 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && 1802 !static_cpu_has(X86_FEATURE_AMD_SSBD)) { 1803 x86_amd_ssb_disable(); 1804 } else { 1805 x86_spec_ctrl_base |= SPEC_CTRL_SSBD; 1806 update_spec_ctrl(x86_spec_ctrl_base); 1807 } 1808 } 1809 1810 return mode; 1811 } 1812 1813 static void ssb_select_mitigation(void) 1814 { 1815 ssb_mode = __ssb_select_mitigation(); 1816 1817 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 1818 pr_info("%s\n", ssb_strings[ssb_mode]); 1819 } 1820 1821 #undef pr_fmt 1822 #define pr_fmt(fmt) "Speculation prctl: " fmt 1823 1824 static void task_update_spec_tif(struct task_struct *tsk) 1825 { 1826 /* Force the update of the real TIF bits */ 1827 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); 1828 1829 /* 1830 * Immediately update the speculation control MSRs for the current 1831 * task, but for a non-current task delay setting the CPU 1832 * mitigation until it is scheduled next. 1833 * 1834 * This can only happen for SECCOMP mitigation. For PRCTL it's 1835 * always the current task. 1836 */ 1837 if (tsk == current) 1838 speculation_ctrl_update_current(); 1839 } 1840 1841 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl) 1842 { 1843 1844 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) 1845 return -EPERM; 1846 1847 switch (ctrl) { 1848 case PR_SPEC_ENABLE: 1849 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); 1850 return 0; 1851 case PR_SPEC_DISABLE: 1852 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); 1853 return 0; 1854 default: 1855 return -ERANGE; 1856 } 1857 } 1858 1859 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) 1860 { 1861 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && 1862 ssb_mode != SPEC_STORE_BYPASS_SECCOMP) 1863 return -ENXIO; 1864 1865 switch (ctrl) { 1866 case PR_SPEC_ENABLE: 1867 /* If speculation is force disabled, enable is not allowed */ 1868 if (task_spec_ssb_force_disable(task)) 1869 return -EPERM; 1870 task_clear_spec_ssb_disable(task); 1871 task_clear_spec_ssb_noexec(task); 1872 task_update_spec_tif(task); 1873 break; 1874 case PR_SPEC_DISABLE: 1875 task_set_spec_ssb_disable(task); 1876 task_clear_spec_ssb_noexec(task); 1877 task_update_spec_tif(task); 1878 break; 1879 case PR_SPEC_FORCE_DISABLE: 1880 task_set_spec_ssb_disable(task); 1881 task_set_spec_ssb_force_disable(task); 1882 task_clear_spec_ssb_noexec(task); 1883 task_update_spec_tif(task); 1884 break; 1885 case PR_SPEC_DISABLE_NOEXEC: 1886 if (task_spec_ssb_force_disable(task)) 1887 return -EPERM; 1888 task_set_spec_ssb_disable(task); 1889 task_set_spec_ssb_noexec(task); 1890 task_update_spec_tif(task); 1891 break; 1892 default: 1893 return -ERANGE; 1894 } 1895 return 0; 1896 } 1897 1898 static bool is_spec_ib_user_controlled(void) 1899 { 1900 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || 1901 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || 1902 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || 1903 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP; 1904 } 1905 1906 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) 1907 { 1908 switch (ctrl) { 1909 case PR_SPEC_ENABLE: 1910 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 1911 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 1912 return 0; 1913 1914 /* 1915 * With strict mode for both IBPB and STIBP, the instruction 1916 * code paths avoid checking this task flag and instead, 1917 * unconditionally run the instruction. However, STIBP and IBPB 1918 * are independent and either can be set to conditionally 1919 * enabled regardless of the mode of the other. 1920 * 1921 * If either is set to conditional, allow the task flag to be 1922 * updated, unless it was force-disabled by a previous prctl 1923 * call. Currently, this is possible on an AMD CPU which has the 1924 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the 1925 * kernel is booted with 'spectre_v2_user=seccomp', then 1926 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and 1927 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED. 1928 */ 1929 if (!is_spec_ib_user_controlled() || 1930 task_spec_ib_force_disable(task)) 1931 return -EPERM; 1932 1933 task_clear_spec_ib_disable(task); 1934 task_update_spec_tif(task); 1935 break; 1936 case PR_SPEC_DISABLE: 1937 case PR_SPEC_FORCE_DISABLE: 1938 /* 1939 * Indirect branch speculation is always allowed when 1940 * mitigation is force disabled. 1941 */ 1942 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 1943 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 1944 return -EPERM; 1945 1946 if (!is_spec_ib_user_controlled()) 1947 return 0; 1948 1949 task_set_spec_ib_disable(task); 1950 if (ctrl == PR_SPEC_FORCE_DISABLE) 1951 task_set_spec_ib_force_disable(task); 1952 task_update_spec_tif(task); 1953 if (task == current) 1954 indirect_branch_prediction_barrier(); 1955 break; 1956 default: 1957 return -ERANGE; 1958 } 1959 return 0; 1960 } 1961 1962 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, 1963 unsigned long ctrl) 1964 { 1965 switch (which) { 1966 case PR_SPEC_STORE_BYPASS: 1967 return ssb_prctl_set(task, ctrl); 1968 case PR_SPEC_INDIRECT_BRANCH: 1969 return ib_prctl_set(task, ctrl); 1970 case PR_SPEC_L1D_FLUSH: 1971 return l1d_flush_prctl_set(task, ctrl); 1972 default: 1973 return -ENODEV; 1974 } 1975 } 1976 1977 #ifdef CONFIG_SECCOMP 1978 void arch_seccomp_spec_mitigate(struct task_struct *task) 1979 { 1980 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) 1981 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); 1982 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || 1983 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) 1984 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); 1985 } 1986 #endif 1987 1988 static int l1d_flush_prctl_get(struct task_struct *task) 1989 { 1990 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) 1991 return PR_SPEC_FORCE_DISABLE; 1992 1993 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH)) 1994 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 1995 else 1996 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 1997 } 1998 1999 static int ssb_prctl_get(struct task_struct *task) 2000 { 2001 switch (ssb_mode) { 2002 case SPEC_STORE_BYPASS_DISABLE: 2003 return PR_SPEC_DISABLE; 2004 case SPEC_STORE_BYPASS_SECCOMP: 2005 case SPEC_STORE_BYPASS_PRCTL: 2006 if (task_spec_ssb_force_disable(task)) 2007 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 2008 if (task_spec_ssb_noexec(task)) 2009 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; 2010 if (task_spec_ssb_disable(task)) 2011 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2012 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2013 default: 2014 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 2015 return PR_SPEC_ENABLE; 2016 return PR_SPEC_NOT_AFFECTED; 2017 } 2018 } 2019 2020 static int ib_prctl_get(struct task_struct *task) 2021 { 2022 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 2023 return PR_SPEC_NOT_AFFECTED; 2024 2025 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 2026 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 2027 return PR_SPEC_ENABLE; 2028 else if (is_spec_ib_user_controlled()) { 2029 if (task_spec_ib_force_disable(task)) 2030 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 2031 if (task_spec_ib_disable(task)) 2032 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2033 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2034 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || 2035 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || 2036 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) 2037 return PR_SPEC_DISABLE; 2038 else 2039 return PR_SPEC_NOT_AFFECTED; 2040 } 2041 2042 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 2043 { 2044 switch (which) { 2045 case PR_SPEC_STORE_BYPASS: 2046 return ssb_prctl_get(task); 2047 case PR_SPEC_INDIRECT_BRANCH: 2048 return ib_prctl_get(task); 2049 case PR_SPEC_L1D_FLUSH: 2050 return l1d_flush_prctl_get(task); 2051 default: 2052 return -ENODEV; 2053 } 2054 } 2055 2056 void x86_spec_ctrl_setup_ap(void) 2057 { 2058 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) 2059 update_spec_ctrl(x86_spec_ctrl_base); 2060 2061 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) 2062 x86_amd_ssb_disable(); 2063 } 2064 2065 bool itlb_multihit_kvm_mitigation; 2066 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); 2067 2068 #undef pr_fmt 2069 #define pr_fmt(fmt) "L1TF: " fmt 2070 2071 /* Default mitigation for L1TF-affected CPUs */ 2072 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH; 2073 #if IS_ENABLED(CONFIG_KVM_INTEL) 2074 EXPORT_SYMBOL_GPL(l1tf_mitigation); 2075 #endif 2076 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; 2077 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); 2078 2079 /* 2080 * These CPUs all support 44bits physical address space internally in the 2081 * cache but CPUID can report a smaller number of physical address bits. 2082 * 2083 * The L1TF mitigation uses the top most address bit for the inversion of 2084 * non present PTEs. When the installed memory reaches into the top most 2085 * address bit due to memory holes, which has been observed on machines 2086 * which report 36bits physical address bits and have 32G RAM installed, 2087 * then the mitigation range check in l1tf_select_mitigation() triggers. 2088 * This is a false positive because the mitigation is still possible due to 2089 * the fact that the cache uses 44bit internally. Use the cache bits 2090 * instead of the reported physical bits and adjust them on the affected 2091 * machines to 44bit if the reported bits are less than 44. 2092 */ 2093 static void override_cache_bits(struct cpuinfo_x86 *c) 2094 { 2095 if (c->x86 != 6) 2096 return; 2097 2098 switch (c->x86_model) { 2099 case INTEL_FAM6_NEHALEM: 2100 case INTEL_FAM6_WESTMERE: 2101 case INTEL_FAM6_SANDYBRIDGE: 2102 case INTEL_FAM6_IVYBRIDGE: 2103 case INTEL_FAM6_HASWELL: 2104 case INTEL_FAM6_HASWELL_L: 2105 case INTEL_FAM6_HASWELL_G: 2106 case INTEL_FAM6_BROADWELL: 2107 case INTEL_FAM6_BROADWELL_G: 2108 case INTEL_FAM6_SKYLAKE_L: 2109 case INTEL_FAM6_SKYLAKE: 2110 case INTEL_FAM6_KABYLAKE_L: 2111 case INTEL_FAM6_KABYLAKE: 2112 if (c->x86_cache_bits < 44) 2113 c->x86_cache_bits = 44; 2114 break; 2115 } 2116 } 2117 2118 static void __init l1tf_select_mitigation(void) 2119 { 2120 u64 half_pa; 2121 2122 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 2123 return; 2124 2125 if (cpu_mitigations_off()) 2126 l1tf_mitigation = L1TF_MITIGATION_OFF; 2127 else if (cpu_mitigations_auto_nosmt()) 2128 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 2129 2130 override_cache_bits(&boot_cpu_data); 2131 2132 switch (l1tf_mitigation) { 2133 case L1TF_MITIGATION_OFF: 2134 case L1TF_MITIGATION_FLUSH_NOWARN: 2135 case L1TF_MITIGATION_FLUSH: 2136 break; 2137 case L1TF_MITIGATION_FLUSH_NOSMT: 2138 case L1TF_MITIGATION_FULL: 2139 cpu_smt_disable(false); 2140 break; 2141 case L1TF_MITIGATION_FULL_FORCE: 2142 cpu_smt_disable(true); 2143 break; 2144 } 2145 2146 #if CONFIG_PGTABLE_LEVELS == 2 2147 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); 2148 return; 2149 #endif 2150 2151 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; 2152 if (l1tf_mitigation != L1TF_MITIGATION_OFF && 2153 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { 2154 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); 2155 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", 2156 half_pa); 2157 pr_info("However, doing so will make a part of your RAM unusable.\n"); 2158 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n"); 2159 return; 2160 } 2161 2162 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); 2163 } 2164 2165 static int __init l1tf_cmdline(char *str) 2166 { 2167 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 2168 return 0; 2169 2170 if (!str) 2171 return -EINVAL; 2172 2173 if (!strcmp(str, "off")) 2174 l1tf_mitigation = L1TF_MITIGATION_OFF; 2175 else if (!strcmp(str, "flush,nowarn")) 2176 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN; 2177 else if (!strcmp(str, "flush")) 2178 l1tf_mitigation = L1TF_MITIGATION_FLUSH; 2179 else if (!strcmp(str, "flush,nosmt")) 2180 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 2181 else if (!strcmp(str, "full")) 2182 l1tf_mitigation = L1TF_MITIGATION_FULL; 2183 else if (!strcmp(str, "full,force")) 2184 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE; 2185 2186 return 0; 2187 } 2188 early_param("l1tf", l1tf_cmdline); 2189 2190 #undef pr_fmt 2191 #define pr_fmt(fmt) fmt 2192 2193 #ifdef CONFIG_SYSFS 2194 2195 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" 2196 2197 #if IS_ENABLED(CONFIG_KVM_INTEL) 2198 static const char * const l1tf_vmx_states[] = { 2199 [VMENTER_L1D_FLUSH_AUTO] = "auto", 2200 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", 2201 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", 2202 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", 2203 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", 2204 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" 2205 }; 2206 2207 static ssize_t l1tf_show_state(char *buf) 2208 { 2209 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) 2210 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); 2211 2212 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || 2213 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && 2214 sched_smt_active())) { 2215 return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, 2216 l1tf_vmx_states[l1tf_vmx_mitigation]); 2217 } 2218 2219 return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, 2220 l1tf_vmx_states[l1tf_vmx_mitigation], 2221 sched_smt_active() ? "vulnerable" : "disabled"); 2222 } 2223 2224 static ssize_t itlb_multihit_show_state(char *buf) 2225 { 2226 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || 2227 !boot_cpu_has(X86_FEATURE_VMX)) 2228 return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n"); 2229 else if (!(cr4_read_shadow() & X86_CR4_VMXE)) 2230 return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n"); 2231 else if (itlb_multihit_kvm_mitigation) 2232 return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n"); 2233 else 2234 return sysfs_emit(buf, "KVM: Vulnerable\n"); 2235 } 2236 #else 2237 static ssize_t l1tf_show_state(char *buf) 2238 { 2239 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); 2240 } 2241 2242 static ssize_t itlb_multihit_show_state(char *buf) 2243 { 2244 return sysfs_emit(buf, "Processor vulnerable\n"); 2245 } 2246 #endif 2247 2248 static ssize_t mds_show_state(char *buf) 2249 { 2250 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 2251 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 2252 mds_strings[mds_mitigation]); 2253 } 2254 2255 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { 2256 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], 2257 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : 2258 sched_smt_active() ? "mitigated" : "disabled")); 2259 } 2260 2261 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], 2262 sched_smt_active() ? "vulnerable" : "disabled"); 2263 } 2264 2265 static ssize_t tsx_async_abort_show_state(char *buf) 2266 { 2267 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || 2268 (taa_mitigation == TAA_MITIGATION_OFF)) 2269 return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]); 2270 2271 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 2272 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 2273 taa_strings[taa_mitigation]); 2274 } 2275 2276 return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], 2277 sched_smt_active() ? "vulnerable" : "disabled"); 2278 } 2279 2280 static ssize_t mmio_stale_data_show_state(char *buf) 2281 { 2282 if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) 2283 return sysfs_emit(buf, "Unknown: No mitigations\n"); 2284 2285 if (mmio_mitigation == MMIO_MITIGATION_OFF) 2286 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); 2287 2288 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 2289 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 2290 mmio_strings[mmio_mitigation]); 2291 } 2292 2293 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], 2294 sched_smt_active() ? "vulnerable" : "disabled"); 2295 } 2296 2297 static char *stibp_state(void) 2298 { 2299 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 2300 !boot_cpu_has(X86_FEATURE_AUTOIBRS)) 2301 return ""; 2302 2303 switch (spectre_v2_user_stibp) { 2304 case SPECTRE_V2_USER_NONE: 2305 return ", STIBP: disabled"; 2306 case SPECTRE_V2_USER_STRICT: 2307 return ", STIBP: forced"; 2308 case SPECTRE_V2_USER_STRICT_PREFERRED: 2309 return ", STIBP: always-on"; 2310 case SPECTRE_V2_USER_PRCTL: 2311 case SPECTRE_V2_USER_SECCOMP: 2312 if (static_key_enabled(&switch_to_cond_stibp)) 2313 return ", STIBP: conditional"; 2314 } 2315 return ""; 2316 } 2317 2318 static char *ibpb_state(void) 2319 { 2320 if (boot_cpu_has(X86_FEATURE_IBPB)) { 2321 if (static_key_enabled(&switch_mm_always_ibpb)) 2322 return ", IBPB: always-on"; 2323 if (static_key_enabled(&switch_mm_cond_ibpb)) 2324 return ", IBPB: conditional"; 2325 return ", IBPB: disabled"; 2326 } 2327 return ""; 2328 } 2329 2330 static char *pbrsb_eibrs_state(void) 2331 { 2332 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { 2333 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || 2334 boot_cpu_has(X86_FEATURE_RSB_VMEXIT)) 2335 return ", PBRSB-eIBRS: SW sequence"; 2336 else 2337 return ", PBRSB-eIBRS: Vulnerable"; 2338 } else { 2339 return ", PBRSB-eIBRS: Not affected"; 2340 } 2341 } 2342 2343 static ssize_t spectre_v2_show_state(char *buf) 2344 { 2345 if (spectre_v2_enabled == SPECTRE_V2_LFENCE) 2346 return sysfs_emit(buf, "Vulnerable: LFENCE\n"); 2347 2348 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) 2349 return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); 2350 2351 if (sched_smt_active() && unprivileged_ebpf_enabled() && 2352 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) 2353 return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); 2354 2355 return sysfs_emit(buf, "%s%s%s%s%s%s%s\n", 2356 spectre_v2_strings[spectre_v2_enabled], 2357 ibpb_state(), 2358 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", 2359 stibp_state(), 2360 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", 2361 pbrsb_eibrs_state(), 2362 spectre_v2_module_string()); 2363 } 2364 2365 static ssize_t srbds_show_state(char *buf) 2366 { 2367 return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]); 2368 } 2369 2370 static ssize_t retbleed_show_state(char *buf) 2371 { 2372 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || 2373 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { 2374 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 2375 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 2376 return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); 2377 2378 return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation], 2379 !sched_smt_active() ? "disabled" : 2380 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || 2381 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? 2382 "enabled with STIBP protection" : "vulnerable"); 2383 } 2384 2385 return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]); 2386 } 2387 2388 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, 2389 char *buf, unsigned int bug) 2390 { 2391 if (!boot_cpu_has_bug(bug)) 2392 return sysfs_emit(buf, "Not affected\n"); 2393 2394 switch (bug) { 2395 case X86_BUG_CPU_MELTDOWN: 2396 if (boot_cpu_has(X86_FEATURE_PTI)) 2397 return sysfs_emit(buf, "Mitigation: PTI\n"); 2398 2399 if (hypervisor_is_type(X86_HYPER_XEN_PV)) 2400 return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); 2401 2402 break; 2403 2404 case X86_BUG_SPECTRE_V1: 2405 return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); 2406 2407 case X86_BUG_SPECTRE_V2: 2408 return spectre_v2_show_state(buf); 2409 2410 case X86_BUG_SPEC_STORE_BYPASS: 2411 return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]); 2412 2413 case X86_BUG_L1TF: 2414 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) 2415 return l1tf_show_state(buf); 2416 break; 2417 2418 case X86_BUG_MDS: 2419 return mds_show_state(buf); 2420 2421 case X86_BUG_TAA: 2422 return tsx_async_abort_show_state(buf); 2423 2424 case X86_BUG_ITLB_MULTIHIT: 2425 return itlb_multihit_show_state(buf); 2426 2427 case X86_BUG_SRBDS: 2428 return srbds_show_state(buf); 2429 2430 case X86_BUG_MMIO_STALE_DATA: 2431 case X86_BUG_MMIO_UNKNOWN: 2432 return mmio_stale_data_show_state(buf); 2433 2434 case X86_BUG_RETBLEED: 2435 return retbleed_show_state(buf); 2436 2437 default: 2438 break; 2439 } 2440 2441 return sysfs_emit(buf, "Vulnerable\n"); 2442 } 2443 2444 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 2445 { 2446 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); 2447 } 2448 2449 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 2450 { 2451 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); 2452 } 2453 2454 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 2455 { 2456 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); 2457 } 2458 2459 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 2460 { 2461 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); 2462 } 2463 2464 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) 2465 { 2466 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); 2467 } 2468 2469 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf) 2470 { 2471 return cpu_show_common(dev, attr, buf, X86_BUG_MDS); 2472 } 2473 2474 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf) 2475 { 2476 return cpu_show_common(dev, attr, buf, X86_BUG_TAA); 2477 } 2478 2479 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf) 2480 { 2481 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); 2482 } 2483 2484 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) 2485 { 2486 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); 2487 } 2488 2489 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) 2490 { 2491 if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) 2492 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN); 2493 else 2494 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); 2495 } 2496 2497 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) 2498 { 2499 return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); 2500 } 2501 #endif 2502