1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1994 Linus Torvalds 4 * 5 * Cyrix stuff, June 1998 by: 6 * - Rafael R. Reilova (moved everything from head.S), 7 * <rreilova@ececs.uc.edu> 8 * - Channing Corn (tests & fixes), 9 * - Andrew D. Balsa (code cleanup). 10 */ 11 #include <linux/init.h> 12 #include <linux/cpu.h> 13 #include <linux/module.h> 14 #include <linux/nospec.h> 15 #include <linux/prctl.h> 16 #include <linux/sched/smt.h> 17 #include <linux/pgtable.h> 18 #include <linux/bpf.h> 19 20 #include <asm/spec-ctrl.h> 21 #include <asm/cmdline.h> 22 #include <asm/bugs.h> 23 #include <asm/processor.h> 24 #include <asm/processor-flags.h> 25 #include <asm/fpu/api.h> 26 #include <asm/msr.h> 27 #include <asm/vmx.h> 28 #include <asm/paravirt.h> 29 #include <asm/intel-family.h> 30 #include <asm/e820/api.h> 31 #include <asm/hypervisor.h> 32 #include <asm/tlbflush.h> 33 #include <asm/cpu.h> 34 35 #include "cpu.h" 36 37 static void __init spectre_v1_select_mitigation(void); 38 static void __init spectre_v2_select_mitigation(void); 39 static void __init retbleed_select_mitigation(void); 40 static void __init spectre_v2_user_select_mitigation(void); 41 static void __init ssb_select_mitigation(void); 42 static void __init l1tf_select_mitigation(void); 43 static void __init mds_select_mitigation(void); 44 static void __init md_clear_update_mitigation(void); 45 static void __init md_clear_select_mitigation(void); 46 static void __init taa_select_mitigation(void); 47 static void __init mmio_select_mitigation(void); 48 static void __init srbds_select_mitigation(void); 49 static void __init l1d_flush_select_mitigation(void); 50 static void __init srso_select_mitigation(void); 51 static void __init gds_select_mitigation(void); 52 53 /* The base value of the SPEC_CTRL MSR without task-specific bits set */ 54 u64 x86_spec_ctrl_base; 55 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); 56 57 /* The current value of the SPEC_CTRL MSR with task-specific bits set */ 58 DEFINE_PER_CPU(u64, x86_spec_ctrl_current); 59 EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); 60 61 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; 62 EXPORT_SYMBOL_GPL(x86_pred_cmd); 63 64 static DEFINE_MUTEX(spec_ctrl_mutex); 65 66 void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; 67 68 /* Update SPEC_CTRL MSR and its cached copy unconditionally */ 69 static void update_spec_ctrl(u64 val) 70 { 71 this_cpu_write(x86_spec_ctrl_current, val); 72 wrmsrl(MSR_IA32_SPEC_CTRL, val); 73 } 74 75 /* 76 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ 77 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). 78 */ 79 void update_spec_ctrl_cond(u64 val) 80 { 81 if (this_cpu_read(x86_spec_ctrl_current) == val) 82 return; 83 84 this_cpu_write(x86_spec_ctrl_current, val); 85 86 /* 87 * When KERNEL_IBRS this MSR is written on return-to-user, unless 88 * forced the update can be delayed until that time. 89 */ 90 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) 91 wrmsrl(MSR_IA32_SPEC_CTRL, val); 92 } 93 94 noinstr u64 spec_ctrl_current(void) 95 { 96 return this_cpu_read(x86_spec_ctrl_current); 97 } 98 EXPORT_SYMBOL_GPL(spec_ctrl_current); 99 100 /* 101 * AMD specific MSR info for Speculative Store Bypass control. 102 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). 103 */ 104 u64 __ro_after_init x86_amd_ls_cfg_base; 105 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; 106 107 /* Control conditional STIBP in switch_to() */ 108 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); 109 /* Control conditional IBPB in switch_mm() */ 110 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); 111 /* Control unconditional IBPB in switch_mm() */ 112 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); 113 114 /* Control MDS CPU buffer clear before returning to user space */ 115 DEFINE_STATIC_KEY_FALSE(mds_user_clear); 116 EXPORT_SYMBOL_GPL(mds_user_clear); 117 /* Control MDS CPU buffer clear before idling (halt, mwait) */ 118 DEFINE_STATIC_KEY_FALSE(mds_idle_clear); 119 EXPORT_SYMBOL_GPL(mds_idle_clear); 120 121 /* 122 * Controls whether l1d flush based mitigations are enabled, 123 * based on hw features and admin setting via boot parameter 124 * defaults to false 125 */ 126 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); 127 128 /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */ 129 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); 130 EXPORT_SYMBOL_GPL(mmio_stale_data_clear); 131 132 void __init cpu_select_mitigations(void) 133 { 134 /* 135 * Read the SPEC_CTRL MSR to account for reserved bits which may 136 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD 137 * init code as it is not enumerated and depends on the family. 138 */ 139 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) { 140 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 141 142 /* 143 * Previously running kernel (kexec), may have some controls 144 * turned ON. Clear them and let the mitigations setup below 145 * rediscover them based on configuration. 146 */ 147 x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK; 148 } 149 150 /* Select the proper CPU mitigations before patching alternatives: */ 151 spectre_v1_select_mitigation(); 152 spectre_v2_select_mitigation(); 153 /* 154 * retbleed_select_mitigation() relies on the state set by 155 * spectre_v2_select_mitigation(); specifically it wants to know about 156 * spectre_v2=ibrs. 157 */ 158 retbleed_select_mitigation(); 159 /* 160 * spectre_v2_user_select_mitigation() relies on the state set by 161 * retbleed_select_mitigation(); specifically the STIBP selection is 162 * forced for UNRET or IBPB. 163 */ 164 spectre_v2_user_select_mitigation(); 165 ssb_select_mitigation(); 166 l1tf_select_mitigation(); 167 md_clear_select_mitigation(); 168 srbds_select_mitigation(); 169 l1d_flush_select_mitigation(); 170 171 /* 172 * srso_select_mitigation() depends and must run after 173 * retbleed_select_mitigation(). 174 */ 175 srso_select_mitigation(); 176 gds_select_mitigation(); 177 } 178 179 /* 180 * NOTE: This function is *only* called for SVM, since Intel uses 181 * MSR_IA32_SPEC_CTRL for SSBD. 182 */ 183 void 184 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest) 185 { 186 u64 guestval, hostval; 187 struct thread_info *ti = current_thread_info(); 188 189 /* 190 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update 191 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. 192 */ 193 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && 194 !static_cpu_has(X86_FEATURE_VIRT_SSBD)) 195 return; 196 197 /* 198 * If the host has SSBD mitigation enabled, force it in the host's 199 * virtual MSR value. If its not permanently enabled, evaluate 200 * current's TIF_SSBD thread flag. 201 */ 202 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) 203 hostval = SPEC_CTRL_SSBD; 204 else 205 hostval = ssbd_tif_to_spec_ctrl(ti->flags); 206 207 /* Sanitize the guest value */ 208 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; 209 210 if (hostval != guestval) { 211 unsigned long tif; 212 213 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : 214 ssbd_spec_ctrl_to_tif(hostval); 215 216 speculation_ctrl_update(tif); 217 } 218 } 219 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); 220 221 static void x86_amd_ssb_disable(void) 222 { 223 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; 224 225 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) 226 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); 227 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) 228 wrmsrl(MSR_AMD64_LS_CFG, msrval); 229 } 230 231 #undef pr_fmt 232 #define pr_fmt(fmt) "MDS: " fmt 233 234 /* Default mitigation for MDS-affected CPUs */ 235 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL; 236 static bool mds_nosmt __ro_after_init = false; 237 238 static const char * const mds_strings[] = { 239 [MDS_MITIGATION_OFF] = "Vulnerable", 240 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", 241 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", 242 }; 243 244 static void __init mds_select_mitigation(void) 245 { 246 if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) { 247 mds_mitigation = MDS_MITIGATION_OFF; 248 return; 249 } 250 251 if (mds_mitigation == MDS_MITIGATION_FULL) { 252 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) 253 mds_mitigation = MDS_MITIGATION_VMWERV; 254 255 static_branch_enable(&mds_user_clear); 256 257 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && 258 (mds_nosmt || cpu_mitigations_auto_nosmt())) 259 cpu_smt_disable(false); 260 } 261 } 262 263 static int __init mds_cmdline(char *str) 264 { 265 if (!boot_cpu_has_bug(X86_BUG_MDS)) 266 return 0; 267 268 if (!str) 269 return -EINVAL; 270 271 if (!strcmp(str, "off")) 272 mds_mitigation = MDS_MITIGATION_OFF; 273 else if (!strcmp(str, "full")) 274 mds_mitigation = MDS_MITIGATION_FULL; 275 else if (!strcmp(str, "full,nosmt")) { 276 mds_mitigation = MDS_MITIGATION_FULL; 277 mds_nosmt = true; 278 } 279 280 return 0; 281 } 282 early_param("mds", mds_cmdline); 283 284 #undef pr_fmt 285 #define pr_fmt(fmt) "TAA: " fmt 286 287 enum taa_mitigations { 288 TAA_MITIGATION_OFF, 289 TAA_MITIGATION_UCODE_NEEDED, 290 TAA_MITIGATION_VERW, 291 TAA_MITIGATION_TSX_DISABLED, 292 }; 293 294 /* Default mitigation for TAA-affected CPUs */ 295 static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW; 296 static bool taa_nosmt __ro_after_init; 297 298 static const char * const taa_strings[] = { 299 [TAA_MITIGATION_OFF] = "Vulnerable", 300 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", 301 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", 302 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled", 303 }; 304 305 static void __init taa_select_mitigation(void) 306 { 307 u64 ia32_cap; 308 309 if (!boot_cpu_has_bug(X86_BUG_TAA)) { 310 taa_mitigation = TAA_MITIGATION_OFF; 311 return; 312 } 313 314 /* TSX previously disabled by tsx=off */ 315 if (!boot_cpu_has(X86_FEATURE_RTM)) { 316 taa_mitigation = TAA_MITIGATION_TSX_DISABLED; 317 return; 318 } 319 320 if (cpu_mitigations_off()) { 321 taa_mitigation = TAA_MITIGATION_OFF; 322 return; 323 } 324 325 /* 326 * TAA mitigation via VERW is turned off if both 327 * tsx_async_abort=off and mds=off are specified. 328 */ 329 if (taa_mitigation == TAA_MITIGATION_OFF && 330 mds_mitigation == MDS_MITIGATION_OFF) 331 return; 332 333 if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) 334 taa_mitigation = TAA_MITIGATION_VERW; 335 else 336 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; 337 338 /* 339 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. 340 * A microcode update fixes this behavior to clear CPU buffers. It also 341 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the 342 * ARCH_CAP_TSX_CTRL_MSR bit. 343 * 344 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode 345 * update is required. 346 */ 347 ia32_cap = x86_read_arch_cap_msr(); 348 if ( (ia32_cap & ARCH_CAP_MDS_NO) && 349 !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)) 350 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; 351 352 /* 353 * TSX is enabled, select alternate mitigation for TAA which is 354 * the same as MDS. Enable MDS static branch to clear CPU buffers. 355 * 356 * For guests that can't determine whether the correct microcode is 357 * present on host, enable the mitigation for UCODE_NEEDED as well. 358 */ 359 static_branch_enable(&mds_user_clear); 360 361 if (taa_nosmt || cpu_mitigations_auto_nosmt()) 362 cpu_smt_disable(false); 363 } 364 365 static int __init tsx_async_abort_parse_cmdline(char *str) 366 { 367 if (!boot_cpu_has_bug(X86_BUG_TAA)) 368 return 0; 369 370 if (!str) 371 return -EINVAL; 372 373 if (!strcmp(str, "off")) { 374 taa_mitigation = TAA_MITIGATION_OFF; 375 } else if (!strcmp(str, "full")) { 376 taa_mitigation = TAA_MITIGATION_VERW; 377 } else if (!strcmp(str, "full,nosmt")) { 378 taa_mitigation = TAA_MITIGATION_VERW; 379 taa_nosmt = true; 380 } 381 382 return 0; 383 } 384 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); 385 386 #undef pr_fmt 387 #define pr_fmt(fmt) "MMIO Stale Data: " fmt 388 389 enum mmio_mitigations { 390 MMIO_MITIGATION_OFF, 391 MMIO_MITIGATION_UCODE_NEEDED, 392 MMIO_MITIGATION_VERW, 393 }; 394 395 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */ 396 static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW; 397 static bool mmio_nosmt __ro_after_init = false; 398 399 static const char * const mmio_strings[] = { 400 [MMIO_MITIGATION_OFF] = "Vulnerable", 401 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", 402 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", 403 }; 404 405 static void __init mmio_select_mitigation(void) 406 { 407 u64 ia32_cap; 408 409 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || 410 boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) || 411 cpu_mitigations_off()) { 412 mmio_mitigation = MMIO_MITIGATION_OFF; 413 return; 414 } 415 416 if (mmio_mitigation == MMIO_MITIGATION_OFF) 417 return; 418 419 ia32_cap = x86_read_arch_cap_msr(); 420 421 /* 422 * Enable CPU buffer clear mitigation for host and VMM, if also affected 423 * by MDS or TAA. Otherwise, enable mitigation for VMM only. 424 */ 425 if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) && 426 boot_cpu_has(X86_FEATURE_RTM))) 427 static_branch_enable(&mds_user_clear); 428 else 429 static_branch_enable(&mmio_stale_data_clear); 430 431 /* 432 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can 433 * be propagated to uncore buffers, clearing the Fill buffers on idle 434 * is required irrespective of SMT state. 435 */ 436 if (!(ia32_cap & ARCH_CAP_FBSDP_NO)) 437 static_branch_enable(&mds_idle_clear); 438 439 /* 440 * Check if the system has the right microcode. 441 * 442 * CPU Fill buffer clear mitigation is enumerated by either an explicit 443 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS 444 * affected systems. 445 */ 446 if ((ia32_cap & ARCH_CAP_FB_CLEAR) || 447 (boot_cpu_has(X86_FEATURE_MD_CLEAR) && 448 boot_cpu_has(X86_FEATURE_FLUSH_L1D) && 449 !(ia32_cap & ARCH_CAP_MDS_NO))) 450 mmio_mitigation = MMIO_MITIGATION_VERW; 451 else 452 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; 453 454 if (mmio_nosmt || cpu_mitigations_auto_nosmt()) 455 cpu_smt_disable(false); 456 } 457 458 static int __init mmio_stale_data_parse_cmdline(char *str) 459 { 460 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 461 return 0; 462 463 if (!str) 464 return -EINVAL; 465 466 if (!strcmp(str, "off")) { 467 mmio_mitigation = MMIO_MITIGATION_OFF; 468 } else if (!strcmp(str, "full")) { 469 mmio_mitigation = MMIO_MITIGATION_VERW; 470 } else if (!strcmp(str, "full,nosmt")) { 471 mmio_mitigation = MMIO_MITIGATION_VERW; 472 mmio_nosmt = true; 473 } 474 475 return 0; 476 } 477 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); 478 479 #undef pr_fmt 480 #define pr_fmt(fmt) "" fmt 481 482 static void __init md_clear_update_mitigation(void) 483 { 484 if (cpu_mitigations_off()) 485 return; 486 487 if (!static_key_enabled(&mds_user_clear)) 488 goto out; 489 490 /* 491 * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data 492 * mitigation, if necessary. 493 */ 494 if (mds_mitigation == MDS_MITIGATION_OFF && 495 boot_cpu_has_bug(X86_BUG_MDS)) { 496 mds_mitigation = MDS_MITIGATION_FULL; 497 mds_select_mitigation(); 498 } 499 if (taa_mitigation == TAA_MITIGATION_OFF && 500 boot_cpu_has_bug(X86_BUG_TAA)) { 501 taa_mitigation = TAA_MITIGATION_VERW; 502 taa_select_mitigation(); 503 } 504 if (mmio_mitigation == MMIO_MITIGATION_OFF && 505 boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { 506 mmio_mitigation = MMIO_MITIGATION_VERW; 507 mmio_select_mitigation(); 508 } 509 out: 510 if (boot_cpu_has_bug(X86_BUG_MDS)) 511 pr_info("MDS: %s\n", mds_strings[mds_mitigation]); 512 if (boot_cpu_has_bug(X86_BUG_TAA)) 513 pr_info("TAA: %s\n", taa_strings[taa_mitigation]); 514 if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 515 pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); 516 else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) 517 pr_info("MMIO Stale Data: Unknown: No mitigations\n"); 518 } 519 520 static void __init md_clear_select_mitigation(void) 521 { 522 mds_select_mitigation(); 523 taa_select_mitigation(); 524 mmio_select_mitigation(); 525 526 /* 527 * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update 528 * and print their mitigation after MDS, TAA and MMIO Stale Data 529 * mitigation selection is done. 530 */ 531 md_clear_update_mitigation(); 532 } 533 534 #undef pr_fmt 535 #define pr_fmt(fmt) "SRBDS: " fmt 536 537 enum srbds_mitigations { 538 SRBDS_MITIGATION_OFF, 539 SRBDS_MITIGATION_UCODE_NEEDED, 540 SRBDS_MITIGATION_FULL, 541 SRBDS_MITIGATION_TSX_OFF, 542 SRBDS_MITIGATION_HYPERVISOR, 543 }; 544 545 static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL; 546 547 static const char * const srbds_strings[] = { 548 [SRBDS_MITIGATION_OFF] = "Vulnerable", 549 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 550 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", 551 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", 552 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", 553 }; 554 555 static bool srbds_off; 556 557 void update_srbds_msr(void) 558 { 559 u64 mcu_ctrl; 560 561 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 562 return; 563 564 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 565 return; 566 567 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) 568 return; 569 570 /* 571 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX 572 * being disabled and it hasn't received the SRBDS MSR microcode. 573 */ 574 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) 575 return; 576 577 rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 578 579 switch (srbds_mitigation) { 580 case SRBDS_MITIGATION_OFF: 581 case SRBDS_MITIGATION_TSX_OFF: 582 mcu_ctrl |= RNGDS_MITG_DIS; 583 break; 584 case SRBDS_MITIGATION_FULL: 585 mcu_ctrl &= ~RNGDS_MITG_DIS; 586 break; 587 default: 588 break; 589 } 590 591 wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 592 } 593 594 static void __init srbds_select_mitigation(void) 595 { 596 u64 ia32_cap; 597 598 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 599 return; 600 601 /* 602 * Check to see if this is one of the MDS_NO systems supporting TSX that 603 * are only exposed to SRBDS when TSX is enabled or when CPU is affected 604 * by Processor MMIO Stale Data vulnerability. 605 */ 606 ia32_cap = x86_read_arch_cap_msr(); 607 if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && 608 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 609 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; 610 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 611 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; 612 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) 613 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; 614 else if (cpu_mitigations_off() || srbds_off) 615 srbds_mitigation = SRBDS_MITIGATION_OFF; 616 617 update_srbds_msr(); 618 pr_info("%s\n", srbds_strings[srbds_mitigation]); 619 } 620 621 static int __init srbds_parse_cmdline(char *str) 622 { 623 if (!str) 624 return -EINVAL; 625 626 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 627 return 0; 628 629 srbds_off = !strcmp(str, "off"); 630 return 0; 631 } 632 early_param("srbds", srbds_parse_cmdline); 633 634 #undef pr_fmt 635 #define pr_fmt(fmt) "L1D Flush : " fmt 636 637 enum l1d_flush_mitigations { 638 L1D_FLUSH_OFF = 0, 639 L1D_FLUSH_ON, 640 }; 641 642 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF; 643 644 static void __init l1d_flush_select_mitigation(void) 645 { 646 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) 647 return; 648 649 static_branch_enable(&switch_mm_cond_l1d_flush); 650 pr_info("Conditional flush on switch_mm() enabled\n"); 651 } 652 653 static int __init l1d_flush_parse_cmdline(char *str) 654 { 655 if (!strcmp(str, "on")) 656 l1d_flush_mitigation = L1D_FLUSH_ON; 657 658 return 0; 659 } 660 early_param("l1d_flush", l1d_flush_parse_cmdline); 661 662 #undef pr_fmt 663 #define pr_fmt(fmt) "GDS: " fmt 664 665 enum gds_mitigations { 666 GDS_MITIGATION_OFF, 667 GDS_MITIGATION_UCODE_NEEDED, 668 GDS_MITIGATION_FORCE, 669 GDS_MITIGATION_FULL, 670 GDS_MITIGATION_FULL_LOCKED, 671 GDS_MITIGATION_HYPERVISOR, 672 }; 673 674 #if IS_ENABLED(CONFIG_GDS_FORCE_MITIGATION) 675 static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE; 676 #else 677 static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL; 678 #endif 679 680 static const char * const gds_strings[] = { 681 [GDS_MITIGATION_OFF] = "Vulnerable", 682 [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 683 [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode", 684 [GDS_MITIGATION_FULL] = "Mitigation: Microcode", 685 [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)", 686 [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", 687 }; 688 689 bool gds_ucode_mitigated(void) 690 { 691 return (gds_mitigation == GDS_MITIGATION_FULL || 692 gds_mitigation == GDS_MITIGATION_FULL_LOCKED); 693 } 694 EXPORT_SYMBOL_GPL(gds_ucode_mitigated); 695 696 void update_gds_msr(void) 697 { 698 u64 mcu_ctrl_after; 699 u64 mcu_ctrl; 700 701 switch (gds_mitigation) { 702 case GDS_MITIGATION_OFF: 703 rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 704 mcu_ctrl |= GDS_MITG_DIS; 705 break; 706 case GDS_MITIGATION_FULL_LOCKED: 707 /* 708 * The LOCKED state comes from the boot CPU. APs might not have 709 * the same state. Make sure the mitigation is enabled on all 710 * CPUs. 711 */ 712 case GDS_MITIGATION_FULL: 713 rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 714 mcu_ctrl &= ~GDS_MITG_DIS; 715 break; 716 case GDS_MITIGATION_FORCE: 717 case GDS_MITIGATION_UCODE_NEEDED: 718 case GDS_MITIGATION_HYPERVISOR: 719 return; 720 }; 721 722 wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 723 724 /* 725 * Check to make sure that the WRMSR value was not ignored. Writes to 726 * GDS_MITG_DIS will be ignored if this processor is locked but the boot 727 * processor was not. 728 */ 729 rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); 730 WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after); 731 } 732 733 static void __init gds_select_mitigation(void) 734 { 735 u64 mcu_ctrl; 736 737 if (!boot_cpu_has_bug(X86_BUG_GDS)) 738 return; 739 740 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 741 gds_mitigation = GDS_MITIGATION_HYPERVISOR; 742 goto out; 743 } 744 745 if (cpu_mitigations_off()) 746 gds_mitigation = GDS_MITIGATION_OFF; 747 /* Will verify below that mitigation _can_ be disabled */ 748 749 /* No microcode */ 750 if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) { 751 if (gds_mitigation == GDS_MITIGATION_FORCE) { 752 /* 753 * This only needs to be done on the boot CPU so do it 754 * here rather than in update_gds_msr() 755 */ 756 setup_clear_cpu_cap(X86_FEATURE_AVX); 757 pr_warn("Microcode update needed! Disabling AVX as mitigation.\n"); 758 } else { 759 gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; 760 } 761 goto out; 762 } 763 764 /* Microcode has mitigation, use it */ 765 if (gds_mitigation == GDS_MITIGATION_FORCE) 766 gds_mitigation = GDS_MITIGATION_FULL; 767 768 rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 769 if (mcu_ctrl & GDS_MITG_LOCKED) { 770 if (gds_mitigation == GDS_MITIGATION_OFF) 771 pr_warn("Mitigation locked. Disable failed.\n"); 772 773 /* 774 * The mitigation is selected from the boot CPU. All other CPUs 775 * _should_ have the same state. If the boot CPU isn't locked 776 * but others are then update_gds_msr() will WARN() of the state 777 * mismatch. If the boot CPU is locked update_gds_msr() will 778 * ensure the other CPUs have the mitigation enabled. 779 */ 780 gds_mitigation = GDS_MITIGATION_FULL_LOCKED; 781 } 782 783 update_gds_msr(); 784 out: 785 pr_info("%s\n", gds_strings[gds_mitigation]); 786 } 787 788 static int __init gds_parse_cmdline(char *str) 789 { 790 if (!str) 791 return -EINVAL; 792 793 if (!boot_cpu_has_bug(X86_BUG_GDS)) 794 return 0; 795 796 if (!strcmp(str, "off")) 797 gds_mitigation = GDS_MITIGATION_OFF; 798 else if (!strcmp(str, "force")) 799 gds_mitigation = GDS_MITIGATION_FORCE; 800 801 return 0; 802 } 803 early_param("gather_data_sampling", gds_parse_cmdline); 804 805 #undef pr_fmt 806 #define pr_fmt(fmt) "Spectre V1 : " fmt 807 808 enum spectre_v1_mitigation { 809 SPECTRE_V1_MITIGATION_NONE, 810 SPECTRE_V1_MITIGATION_AUTO, 811 }; 812 813 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init = 814 SPECTRE_V1_MITIGATION_AUTO; 815 816 static const char * const spectre_v1_strings[] = { 817 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers", 818 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization", 819 }; 820 821 /* 822 * Does SMAP provide full mitigation against speculative kernel access to 823 * userspace? 824 */ 825 static bool smap_works_speculatively(void) 826 { 827 if (!boot_cpu_has(X86_FEATURE_SMAP)) 828 return false; 829 830 /* 831 * On CPUs which are vulnerable to Meltdown, SMAP does not 832 * prevent speculative access to user data in the L1 cache. 833 * Consider SMAP to be non-functional as a mitigation on these 834 * CPUs. 835 */ 836 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) 837 return false; 838 839 return true; 840 } 841 842 static void __init spectre_v1_select_mitigation(void) 843 { 844 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) { 845 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 846 return; 847 } 848 849 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { 850 /* 851 * With Spectre v1, a user can speculatively control either 852 * path of a conditional swapgs with a user-controlled GS 853 * value. The mitigation is to add lfences to both code paths. 854 * 855 * If FSGSBASE is enabled, the user can put a kernel address in 856 * GS, in which case SMAP provides no protection. 857 * 858 * If FSGSBASE is disabled, the user can only put a user space 859 * address in GS. That makes an attack harder, but still 860 * possible if there's no SMAP protection. 861 */ 862 if (boot_cpu_has(X86_FEATURE_FSGSBASE) || 863 !smap_works_speculatively()) { 864 /* 865 * Mitigation can be provided from SWAPGS itself or 866 * PTI as the CR3 write in the Meltdown mitigation 867 * is serializing. 868 * 869 * If neither is there, mitigate with an LFENCE to 870 * stop speculation through swapgs. 871 */ 872 if (boot_cpu_has_bug(X86_BUG_SWAPGS) && 873 !boot_cpu_has(X86_FEATURE_PTI)) 874 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER); 875 876 /* 877 * Enable lfences in the kernel entry (non-swapgs) 878 * paths, to prevent user entry from speculatively 879 * skipping swapgs. 880 */ 881 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL); 882 } 883 } 884 885 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]); 886 } 887 888 static int __init nospectre_v1_cmdline(char *str) 889 { 890 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 891 return 0; 892 } 893 early_param("nospectre_v1", nospectre_v1_cmdline); 894 895 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE; 896 897 #undef pr_fmt 898 #define pr_fmt(fmt) "RETBleed: " fmt 899 900 enum retbleed_mitigation { 901 RETBLEED_MITIGATION_NONE, 902 RETBLEED_MITIGATION_UNRET, 903 RETBLEED_MITIGATION_IBPB, 904 RETBLEED_MITIGATION_IBRS, 905 RETBLEED_MITIGATION_EIBRS, 906 RETBLEED_MITIGATION_STUFF, 907 }; 908 909 enum retbleed_mitigation_cmd { 910 RETBLEED_CMD_OFF, 911 RETBLEED_CMD_AUTO, 912 RETBLEED_CMD_UNRET, 913 RETBLEED_CMD_IBPB, 914 RETBLEED_CMD_STUFF, 915 }; 916 917 static const char * const retbleed_strings[] = { 918 [RETBLEED_MITIGATION_NONE] = "Vulnerable", 919 [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", 920 [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB", 921 [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", 922 [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", 923 [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing", 924 }; 925 926 static enum retbleed_mitigation retbleed_mitigation __ro_after_init = 927 RETBLEED_MITIGATION_NONE; 928 static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init = 929 RETBLEED_CMD_AUTO; 930 931 static int __ro_after_init retbleed_nosmt = false; 932 933 static int __init retbleed_parse_cmdline(char *str) 934 { 935 if (!str) 936 return -EINVAL; 937 938 while (str) { 939 char *next = strchr(str, ','); 940 if (next) { 941 *next = 0; 942 next++; 943 } 944 945 if (!strcmp(str, "off")) { 946 retbleed_cmd = RETBLEED_CMD_OFF; 947 } else if (!strcmp(str, "auto")) { 948 retbleed_cmd = RETBLEED_CMD_AUTO; 949 } else if (!strcmp(str, "unret")) { 950 retbleed_cmd = RETBLEED_CMD_UNRET; 951 } else if (!strcmp(str, "ibpb")) { 952 retbleed_cmd = RETBLEED_CMD_IBPB; 953 } else if (!strcmp(str, "stuff")) { 954 retbleed_cmd = RETBLEED_CMD_STUFF; 955 } else if (!strcmp(str, "nosmt")) { 956 retbleed_nosmt = true; 957 } else if (!strcmp(str, "force")) { 958 setup_force_cpu_bug(X86_BUG_RETBLEED); 959 } else { 960 pr_err("Ignoring unknown retbleed option (%s).", str); 961 } 962 963 str = next; 964 } 965 966 return 0; 967 } 968 early_param("retbleed", retbleed_parse_cmdline); 969 970 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" 971 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" 972 973 static void __init retbleed_select_mitigation(void) 974 { 975 bool mitigate_smt = false; 976 977 if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) 978 return; 979 980 switch (retbleed_cmd) { 981 case RETBLEED_CMD_OFF: 982 return; 983 984 case RETBLEED_CMD_UNRET: 985 if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) { 986 retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 987 } else { 988 pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n"); 989 goto do_cmd_auto; 990 } 991 break; 992 993 case RETBLEED_CMD_IBPB: 994 if (!boot_cpu_has(X86_FEATURE_IBPB)) { 995 pr_err("WARNING: CPU does not support IBPB.\n"); 996 goto do_cmd_auto; 997 } else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { 998 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 999 } else { 1000 pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); 1001 goto do_cmd_auto; 1002 } 1003 break; 1004 1005 case RETBLEED_CMD_STUFF: 1006 if (IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING) && 1007 spectre_v2_enabled == SPECTRE_V2_RETPOLINE) { 1008 retbleed_mitigation = RETBLEED_MITIGATION_STUFF; 1009 1010 } else { 1011 if (IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING)) 1012 pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n"); 1013 else 1014 pr_err("WARNING: kernel not compiled with CALL_DEPTH_TRACKING.\n"); 1015 1016 goto do_cmd_auto; 1017 } 1018 break; 1019 1020 do_cmd_auto: 1021 case RETBLEED_CMD_AUTO: 1022 default: 1023 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 1024 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { 1025 if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) 1026 retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 1027 else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB)) 1028 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 1029 } 1030 1031 /* 1032 * The Intel mitigation (IBRS or eIBRS) was already selected in 1033 * spectre_v2_select_mitigation(). 'retbleed_mitigation' will 1034 * be set accordingly below. 1035 */ 1036 1037 break; 1038 } 1039 1040 switch (retbleed_mitigation) { 1041 case RETBLEED_MITIGATION_UNRET: 1042 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1043 setup_force_cpu_cap(X86_FEATURE_UNRET); 1044 1045 if (IS_ENABLED(CONFIG_RETHUNK)) 1046 x86_return_thunk = retbleed_return_thunk; 1047 1048 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 1049 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 1050 pr_err(RETBLEED_UNTRAIN_MSG); 1051 1052 mitigate_smt = true; 1053 break; 1054 1055 case RETBLEED_MITIGATION_IBPB: 1056 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); 1057 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); 1058 mitigate_smt = true; 1059 break; 1060 1061 case RETBLEED_MITIGATION_STUFF: 1062 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1063 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); 1064 x86_set_skl_return_thunk(); 1065 break; 1066 1067 default: 1068 break; 1069 } 1070 1071 if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && 1072 (retbleed_nosmt || cpu_mitigations_auto_nosmt())) 1073 cpu_smt_disable(false); 1074 1075 /* 1076 * Let IBRS trump all on Intel without affecting the effects of the 1077 * retbleed= cmdline option except for call depth based stuffing 1078 */ 1079 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 1080 switch (spectre_v2_enabled) { 1081 case SPECTRE_V2_IBRS: 1082 retbleed_mitigation = RETBLEED_MITIGATION_IBRS; 1083 break; 1084 case SPECTRE_V2_EIBRS: 1085 case SPECTRE_V2_EIBRS_RETPOLINE: 1086 case SPECTRE_V2_EIBRS_LFENCE: 1087 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; 1088 break; 1089 default: 1090 if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) 1091 pr_err(RETBLEED_INTEL_MSG); 1092 } 1093 } 1094 1095 pr_info("%s\n", retbleed_strings[retbleed_mitigation]); 1096 } 1097 1098 #undef pr_fmt 1099 #define pr_fmt(fmt) "Spectre V2 : " fmt 1100 1101 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = 1102 SPECTRE_V2_USER_NONE; 1103 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = 1104 SPECTRE_V2_USER_NONE; 1105 1106 #ifdef CONFIG_RETPOLINE 1107 static bool spectre_v2_bad_module; 1108 1109 bool retpoline_module_ok(bool has_retpoline) 1110 { 1111 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) 1112 return true; 1113 1114 pr_err("System may be vulnerable to spectre v2\n"); 1115 spectre_v2_bad_module = true; 1116 return false; 1117 } 1118 1119 static inline const char *spectre_v2_module_string(void) 1120 { 1121 return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; 1122 } 1123 #else 1124 static inline const char *spectre_v2_module_string(void) { return ""; } 1125 #endif 1126 1127 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" 1128 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" 1129 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" 1130 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n" 1131 1132 #ifdef CONFIG_BPF_SYSCALL 1133 void unpriv_ebpf_notify(int new_state) 1134 { 1135 if (new_state) 1136 return; 1137 1138 /* Unprivileged eBPF is enabled */ 1139 1140 switch (spectre_v2_enabled) { 1141 case SPECTRE_V2_EIBRS: 1142 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); 1143 break; 1144 case SPECTRE_V2_EIBRS_LFENCE: 1145 if (sched_smt_active()) 1146 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); 1147 break; 1148 default: 1149 break; 1150 } 1151 } 1152 #endif 1153 1154 static inline bool match_option(const char *arg, int arglen, const char *opt) 1155 { 1156 int len = strlen(opt); 1157 1158 return len == arglen && !strncmp(arg, opt, len); 1159 } 1160 1161 /* The kernel command line selection for spectre v2 */ 1162 enum spectre_v2_mitigation_cmd { 1163 SPECTRE_V2_CMD_NONE, 1164 SPECTRE_V2_CMD_AUTO, 1165 SPECTRE_V2_CMD_FORCE, 1166 SPECTRE_V2_CMD_RETPOLINE, 1167 SPECTRE_V2_CMD_RETPOLINE_GENERIC, 1168 SPECTRE_V2_CMD_RETPOLINE_LFENCE, 1169 SPECTRE_V2_CMD_EIBRS, 1170 SPECTRE_V2_CMD_EIBRS_RETPOLINE, 1171 SPECTRE_V2_CMD_EIBRS_LFENCE, 1172 SPECTRE_V2_CMD_IBRS, 1173 }; 1174 1175 enum spectre_v2_user_cmd { 1176 SPECTRE_V2_USER_CMD_NONE, 1177 SPECTRE_V2_USER_CMD_AUTO, 1178 SPECTRE_V2_USER_CMD_FORCE, 1179 SPECTRE_V2_USER_CMD_PRCTL, 1180 SPECTRE_V2_USER_CMD_PRCTL_IBPB, 1181 SPECTRE_V2_USER_CMD_SECCOMP, 1182 SPECTRE_V2_USER_CMD_SECCOMP_IBPB, 1183 }; 1184 1185 static const char * const spectre_v2_user_strings[] = { 1186 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", 1187 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", 1188 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", 1189 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", 1190 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", 1191 }; 1192 1193 static const struct { 1194 const char *option; 1195 enum spectre_v2_user_cmd cmd; 1196 bool secure; 1197 } v2_user_options[] __initconst = { 1198 { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, 1199 { "off", SPECTRE_V2_USER_CMD_NONE, false }, 1200 { "on", SPECTRE_V2_USER_CMD_FORCE, true }, 1201 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false }, 1202 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false }, 1203 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false }, 1204 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, 1205 }; 1206 1207 static void __init spec_v2_user_print_cond(const char *reason, bool secure) 1208 { 1209 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) 1210 pr_info("spectre_v2_user=%s forced on command line.\n", reason); 1211 } 1212 1213 static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd; 1214 1215 static enum spectre_v2_user_cmd __init 1216 spectre_v2_parse_user_cmdline(void) 1217 { 1218 char arg[20]; 1219 int ret, i; 1220 1221 switch (spectre_v2_cmd) { 1222 case SPECTRE_V2_CMD_NONE: 1223 return SPECTRE_V2_USER_CMD_NONE; 1224 case SPECTRE_V2_CMD_FORCE: 1225 return SPECTRE_V2_USER_CMD_FORCE; 1226 default: 1227 break; 1228 } 1229 1230 ret = cmdline_find_option(boot_command_line, "spectre_v2_user", 1231 arg, sizeof(arg)); 1232 if (ret < 0) 1233 return SPECTRE_V2_USER_CMD_AUTO; 1234 1235 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { 1236 if (match_option(arg, ret, v2_user_options[i].option)) { 1237 spec_v2_user_print_cond(v2_user_options[i].option, 1238 v2_user_options[i].secure); 1239 return v2_user_options[i].cmd; 1240 } 1241 } 1242 1243 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); 1244 return SPECTRE_V2_USER_CMD_AUTO; 1245 } 1246 1247 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) 1248 { 1249 return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS; 1250 } 1251 1252 static void __init 1253 spectre_v2_user_select_mitigation(void) 1254 { 1255 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; 1256 bool smt_possible = IS_ENABLED(CONFIG_SMP); 1257 enum spectre_v2_user_cmd cmd; 1258 1259 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) 1260 return; 1261 1262 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || 1263 cpu_smt_control == CPU_SMT_NOT_SUPPORTED) 1264 smt_possible = false; 1265 1266 cmd = spectre_v2_parse_user_cmdline(); 1267 switch (cmd) { 1268 case SPECTRE_V2_USER_CMD_NONE: 1269 goto set_mode; 1270 case SPECTRE_V2_USER_CMD_FORCE: 1271 mode = SPECTRE_V2_USER_STRICT; 1272 break; 1273 case SPECTRE_V2_USER_CMD_AUTO: 1274 case SPECTRE_V2_USER_CMD_PRCTL: 1275 case SPECTRE_V2_USER_CMD_PRCTL_IBPB: 1276 mode = SPECTRE_V2_USER_PRCTL; 1277 break; 1278 case SPECTRE_V2_USER_CMD_SECCOMP: 1279 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: 1280 if (IS_ENABLED(CONFIG_SECCOMP)) 1281 mode = SPECTRE_V2_USER_SECCOMP; 1282 else 1283 mode = SPECTRE_V2_USER_PRCTL; 1284 break; 1285 } 1286 1287 /* Initialize Indirect Branch Prediction Barrier */ 1288 if (boot_cpu_has(X86_FEATURE_IBPB)) { 1289 setup_force_cpu_cap(X86_FEATURE_USE_IBPB); 1290 1291 spectre_v2_user_ibpb = mode; 1292 switch (cmd) { 1293 case SPECTRE_V2_USER_CMD_FORCE: 1294 case SPECTRE_V2_USER_CMD_PRCTL_IBPB: 1295 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: 1296 static_branch_enable(&switch_mm_always_ibpb); 1297 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1298 break; 1299 case SPECTRE_V2_USER_CMD_PRCTL: 1300 case SPECTRE_V2_USER_CMD_AUTO: 1301 case SPECTRE_V2_USER_CMD_SECCOMP: 1302 static_branch_enable(&switch_mm_cond_ibpb); 1303 break; 1304 default: 1305 break; 1306 } 1307 1308 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", 1309 static_key_enabled(&switch_mm_always_ibpb) ? 1310 "always-on" : "conditional"); 1311 } 1312 1313 /* 1314 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP 1315 * is not required. 1316 * 1317 * Intel's Enhanced IBRS also protects against cross-thread branch target 1318 * injection in user-mode as the IBRS bit remains always set which 1319 * implicitly enables cross-thread protections. However, in legacy IBRS 1320 * mode, the IBRS bit is set only on kernel entry and cleared on return 1321 * to userspace. AMD Automatic IBRS also does not protect userspace. 1322 * These modes therefore disable the implicit cross-thread protection, 1323 * so allow for STIBP to be selected in those cases. 1324 */ 1325 if (!boot_cpu_has(X86_FEATURE_STIBP) || 1326 !smt_possible || 1327 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 1328 !boot_cpu_has(X86_FEATURE_AUTOIBRS))) 1329 return; 1330 1331 /* 1332 * At this point, an STIBP mode other than "off" has been set. 1333 * If STIBP support is not being forced, check if STIBP always-on 1334 * is preferred. 1335 */ 1336 if (mode != SPECTRE_V2_USER_STRICT && 1337 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) 1338 mode = SPECTRE_V2_USER_STRICT_PREFERRED; 1339 1340 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || 1341 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { 1342 if (mode != SPECTRE_V2_USER_STRICT && 1343 mode != SPECTRE_V2_USER_STRICT_PREFERRED) 1344 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n"); 1345 mode = SPECTRE_V2_USER_STRICT_PREFERRED; 1346 } 1347 1348 spectre_v2_user_stibp = mode; 1349 1350 set_mode: 1351 pr_info("%s\n", spectre_v2_user_strings[mode]); 1352 } 1353 1354 static const char * const spectre_v2_strings[] = { 1355 [SPECTRE_V2_NONE] = "Vulnerable", 1356 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", 1357 [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", 1358 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS", 1359 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE", 1360 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines", 1361 [SPECTRE_V2_IBRS] = "Mitigation: IBRS", 1362 }; 1363 1364 static const struct { 1365 const char *option; 1366 enum spectre_v2_mitigation_cmd cmd; 1367 bool secure; 1368 } mitigation_options[] __initconst = { 1369 { "off", SPECTRE_V2_CMD_NONE, false }, 1370 { "on", SPECTRE_V2_CMD_FORCE, true }, 1371 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, 1372 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, 1373 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, 1374 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, 1375 { "eibrs", SPECTRE_V2_CMD_EIBRS, false }, 1376 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false }, 1377 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false }, 1378 { "auto", SPECTRE_V2_CMD_AUTO, false }, 1379 { "ibrs", SPECTRE_V2_CMD_IBRS, false }, 1380 }; 1381 1382 static void __init spec_v2_print_cond(const char *reason, bool secure) 1383 { 1384 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) 1385 pr_info("%s selected on command line.\n", reason); 1386 } 1387 1388 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) 1389 { 1390 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; 1391 char arg[20]; 1392 int ret, i; 1393 1394 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") || 1395 cpu_mitigations_off()) 1396 return SPECTRE_V2_CMD_NONE; 1397 1398 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); 1399 if (ret < 0) 1400 return SPECTRE_V2_CMD_AUTO; 1401 1402 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { 1403 if (!match_option(arg, ret, mitigation_options[i].option)) 1404 continue; 1405 cmd = mitigation_options[i].cmd; 1406 break; 1407 } 1408 1409 if (i >= ARRAY_SIZE(mitigation_options)) { 1410 pr_err("unknown option (%s). Switching to AUTO select\n", arg); 1411 return SPECTRE_V2_CMD_AUTO; 1412 } 1413 1414 if ((cmd == SPECTRE_V2_CMD_RETPOLINE || 1415 cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || 1416 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || 1417 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 1418 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 1419 !IS_ENABLED(CONFIG_RETPOLINE)) { 1420 pr_err("%s selected but not compiled in. Switching to AUTO select\n", 1421 mitigation_options[i].option); 1422 return SPECTRE_V2_CMD_AUTO; 1423 } 1424 1425 if ((cmd == SPECTRE_V2_CMD_EIBRS || 1426 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 1427 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 1428 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 1429 pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n", 1430 mitigation_options[i].option); 1431 return SPECTRE_V2_CMD_AUTO; 1432 } 1433 1434 if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || 1435 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) && 1436 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { 1437 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n", 1438 mitigation_options[i].option); 1439 return SPECTRE_V2_CMD_AUTO; 1440 } 1441 1442 if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) { 1443 pr_err("%s selected but not compiled in. Switching to AUTO select\n", 1444 mitigation_options[i].option); 1445 return SPECTRE_V2_CMD_AUTO; 1446 } 1447 1448 if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { 1449 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", 1450 mitigation_options[i].option); 1451 return SPECTRE_V2_CMD_AUTO; 1452 } 1453 1454 if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { 1455 pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n", 1456 mitigation_options[i].option); 1457 return SPECTRE_V2_CMD_AUTO; 1458 } 1459 1460 if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) { 1461 pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n", 1462 mitigation_options[i].option); 1463 return SPECTRE_V2_CMD_AUTO; 1464 } 1465 1466 spec_v2_print_cond(mitigation_options[i].option, 1467 mitigation_options[i].secure); 1468 return cmd; 1469 } 1470 1471 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) 1472 { 1473 if (!IS_ENABLED(CONFIG_RETPOLINE)) { 1474 pr_err("Kernel not compiled with retpoline; no mitigation available!"); 1475 return SPECTRE_V2_NONE; 1476 } 1477 1478 return SPECTRE_V2_RETPOLINE; 1479 } 1480 1481 /* Disable in-kernel use of non-RSB RET predictors */ 1482 static void __init spec_ctrl_disable_kernel_rrsba(void) 1483 { 1484 u64 ia32_cap; 1485 1486 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) 1487 return; 1488 1489 ia32_cap = x86_read_arch_cap_msr(); 1490 1491 if (ia32_cap & ARCH_CAP_RRSBA) { 1492 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; 1493 update_spec_ctrl(x86_spec_ctrl_base); 1494 } 1495 } 1496 1497 static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode) 1498 { 1499 /* 1500 * Similar to context switches, there are two types of RSB attacks 1501 * after VM exit: 1502 * 1503 * 1) RSB underflow 1504 * 1505 * 2) Poisoned RSB entry 1506 * 1507 * When retpoline is enabled, both are mitigated by filling/clearing 1508 * the RSB. 1509 * 1510 * When IBRS is enabled, while #1 would be mitigated by the IBRS branch 1511 * prediction isolation protections, RSB still needs to be cleared 1512 * because of #2. Note that SMEP provides no protection here, unlike 1513 * user-space-poisoned RSB entries. 1514 * 1515 * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB 1516 * bug is present then a LITE version of RSB protection is required, 1517 * just a single call needs to retire before a RET is executed. 1518 */ 1519 switch (mode) { 1520 case SPECTRE_V2_NONE: 1521 return; 1522 1523 case SPECTRE_V2_EIBRS_LFENCE: 1524 case SPECTRE_V2_EIBRS: 1525 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { 1526 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); 1527 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); 1528 } 1529 return; 1530 1531 case SPECTRE_V2_EIBRS_RETPOLINE: 1532 case SPECTRE_V2_RETPOLINE: 1533 case SPECTRE_V2_LFENCE: 1534 case SPECTRE_V2_IBRS: 1535 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); 1536 pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n"); 1537 return; 1538 } 1539 1540 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit"); 1541 dump_stack(); 1542 } 1543 1544 static void __init spectre_v2_select_mitigation(void) 1545 { 1546 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); 1547 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE; 1548 1549 /* 1550 * If the CPU is not affected and the command line mode is NONE or AUTO 1551 * then nothing to do. 1552 */ 1553 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && 1554 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO)) 1555 return; 1556 1557 switch (cmd) { 1558 case SPECTRE_V2_CMD_NONE: 1559 return; 1560 1561 case SPECTRE_V2_CMD_FORCE: 1562 case SPECTRE_V2_CMD_AUTO: 1563 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 1564 mode = SPECTRE_V2_EIBRS; 1565 break; 1566 } 1567 1568 if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) && 1569 boot_cpu_has_bug(X86_BUG_RETBLEED) && 1570 retbleed_cmd != RETBLEED_CMD_OFF && 1571 retbleed_cmd != RETBLEED_CMD_STUFF && 1572 boot_cpu_has(X86_FEATURE_IBRS) && 1573 boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 1574 mode = SPECTRE_V2_IBRS; 1575 break; 1576 } 1577 1578 mode = spectre_v2_select_retpoline(); 1579 break; 1580 1581 case SPECTRE_V2_CMD_RETPOLINE_LFENCE: 1582 pr_err(SPECTRE_V2_LFENCE_MSG); 1583 mode = SPECTRE_V2_LFENCE; 1584 break; 1585 1586 case SPECTRE_V2_CMD_RETPOLINE_GENERIC: 1587 mode = SPECTRE_V2_RETPOLINE; 1588 break; 1589 1590 case SPECTRE_V2_CMD_RETPOLINE: 1591 mode = spectre_v2_select_retpoline(); 1592 break; 1593 1594 case SPECTRE_V2_CMD_IBRS: 1595 mode = SPECTRE_V2_IBRS; 1596 break; 1597 1598 case SPECTRE_V2_CMD_EIBRS: 1599 mode = SPECTRE_V2_EIBRS; 1600 break; 1601 1602 case SPECTRE_V2_CMD_EIBRS_LFENCE: 1603 mode = SPECTRE_V2_EIBRS_LFENCE; 1604 break; 1605 1606 case SPECTRE_V2_CMD_EIBRS_RETPOLINE: 1607 mode = SPECTRE_V2_EIBRS_RETPOLINE; 1608 break; 1609 } 1610 1611 if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) 1612 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); 1613 1614 if (spectre_v2_in_ibrs_mode(mode)) { 1615 if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) { 1616 msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); 1617 } else { 1618 x86_spec_ctrl_base |= SPEC_CTRL_IBRS; 1619 update_spec_ctrl(x86_spec_ctrl_base); 1620 } 1621 } 1622 1623 switch (mode) { 1624 case SPECTRE_V2_NONE: 1625 case SPECTRE_V2_EIBRS: 1626 break; 1627 1628 case SPECTRE_V2_IBRS: 1629 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); 1630 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) 1631 pr_warn(SPECTRE_V2_IBRS_PERF_MSG); 1632 break; 1633 1634 case SPECTRE_V2_LFENCE: 1635 case SPECTRE_V2_EIBRS_LFENCE: 1636 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); 1637 fallthrough; 1638 1639 case SPECTRE_V2_RETPOLINE: 1640 case SPECTRE_V2_EIBRS_RETPOLINE: 1641 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 1642 break; 1643 } 1644 1645 /* 1646 * Disable alternate RSB predictions in kernel when indirect CALLs and 1647 * JMPs gets protection against BHI and Intramode-BTI, but RET 1648 * prediction from a non-RSB predictor is still a risk. 1649 */ 1650 if (mode == SPECTRE_V2_EIBRS_LFENCE || 1651 mode == SPECTRE_V2_EIBRS_RETPOLINE || 1652 mode == SPECTRE_V2_RETPOLINE) 1653 spec_ctrl_disable_kernel_rrsba(); 1654 1655 spectre_v2_enabled = mode; 1656 pr_info("%s\n", spectre_v2_strings[mode]); 1657 1658 /* 1659 * If Spectre v2 protection has been enabled, fill the RSB during a 1660 * context switch. In general there are two types of RSB attacks 1661 * across context switches, for which the CALLs/RETs may be unbalanced. 1662 * 1663 * 1) RSB underflow 1664 * 1665 * Some Intel parts have "bottomless RSB". When the RSB is empty, 1666 * speculated return targets may come from the branch predictor, 1667 * which could have a user-poisoned BTB or BHB entry. 1668 * 1669 * AMD has it even worse: *all* returns are speculated from the BTB, 1670 * regardless of the state of the RSB. 1671 * 1672 * When IBRS or eIBRS is enabled, the "user -> kernel" attack 1673 * scenario is mitigated by the IBRS branch prediction isolation 1674 * properties, so the RSB buffer filling wouldn't be necessary to 1675 * protect against this type of attack. 1676 * 1677 * The "user -> user" attack scenario is mitigated by RSB filling. 1678 * 1679 * 2) Poisoned RSB entry 1680 * 1681 * If the 'next' in-kernel return stack is shorter than 'prev', 1682 * 'next' could be tricked into speculating with a user-poisoned RSB 1683 * entry. 1684 * 1685 * The "user -> kernel" attack scenario is mitigated by SMEP and 1686 * eIBRS. 1687 * 1688 * The "user -> user" scenario, also known as SpectreBHB, requires 1689 * RSB clearing. 1690 * 1691 * So to mitigate all cases, unconditionally fill RSB on context 1692 * switches. 1693 * 1694 * FIXME: Is this pointless for retbleed-affected AMD? 1695 */ 1696 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); 1697 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); 1698 1699 spectre_v2_determine_rsb_fill_type_at_vmexit(mode); 1700 1701 /* 1702 * Retpoline protects the kernel, but doesn't protect firmware. IBRS 1703 * and Enhanced IBRS protect firmware too, so enable IBRS around 1704 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't 1705 * otherwise enabled. 1706 * 1707 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because 1708 * the user might select retpoline on the kernel command line and if 1709 * the CPU supports Enhanced IBRS, kernel might un-intentionally not 1710 * enable IBRS around firmware calls. 1711 */ 1712 if (boot_cpu_has_bug(X86_BUG_RETBLEED) && 1713 boot_cpu_has(X86_FEATURE_IBPB) && 1714 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 1715 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { 1716 1717 if (retbleed_cmd != RETBLEED_CMD_IBPB) { 1718 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); 1719 pr_info("Enabling Speculation Barrier for firmware calls\n"); 1720 } 1721 1722 } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { 1723 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); 1724 pr_info("Enabling Restricted Speculation for firmware calls\n"); 1725 } 1726 1727 /* Set up IBPB and STIBP depending on the general spectre V2 command */ 1728 spectre_v2_cmd = cmd; 1729 } 1730 1731 static void update_stibp_msr(void * __unused) 1732 { 1733 u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); 1734 update_spec_ctrl(val); 1735 } 1736 1737 /* Update x86_spec_ctrl_base in case SMT state changed. */ 1738 static void update_stibp_strict(void) 1739 { 1740 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; 1741 1742 if (sched_smt_active()) 1743 mask |= SPEC_CTRL_STIBP; 1744 1745 if (mask == x86_spec_ctrl_base) 1746 return; 1747 1748 pr_info("Update user space SMT mitigation: STIBP %s\n", 1749 mask & SPEC_CTRL_STIBP ? "always-on" : "off"); 1750 x86_spec_ctrl_base = mask; 1751 on_each_cpu(update_stibp_msr, NULL, 1); 1752 } 1753 1754 /* Update the static key controlling the evaluation of TIF_SPEC_IB */ 1755 static void update_indir_branch_cond(void) 1756 { 1757 if (sched_smt_active()) 1758 static_branch_enable(&switch_to_cond_stibp); 1759 else 1760 static_branch_disable(&switch_to_cond_stibp); 1761 } 1762 1763 #undef pr_fmt 1764 #define pr_fmt(fmt) fmt 1765 1766 /* Update the static key controlling the MDS CPU buffer clear in idle */ 1767 static void update_mds_branch_idle(void) 1768 { 1769 u64 ia32_cap = x86_read_arch_cap_msr(); 1770 1771 /* 1772 * Enable the idle clearing if SMT is active on CPUs which are 1773 * affected only by MSBDS and not any other MDS variant. 1774 * 1775 * The other variants cannot be mitigated when SMT is enabled, so 1776 * clearing the buffers on idle just to prevent the Store Buffer 1777 * repartitioning leak would be a window dressing exercise. 1778 */ 1779 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) 1780 return; 1781 1782 if (sched_smt_active()) { 1783 static_branch_enable(&mds_idle_clear); 1784 } else if (mmio_mitigation == MMIO_MITIGATION_OFF || 1785 (ia32_cap & ARCH_CAP_FBSDP_NO)) { 1786 static_branch_disable(&mds_idle_clear); 1787 } 1788 } 1789 1790 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" 1791 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" 1792 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" 1793 1794 void cpu_bugs_smt_update(void) 1795 { 1796 mutex_lock(&spec_ctrl_mutex); 1797 1798 if (sched_smt_active() && unprivileged_ebpf_enabled() && 1799 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) 1800 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); 1801 1802 switch (spectre_v2_user_stibp) { 1803 case SPECTRE_V2_USER_NONE: 1804 break; 1805 case SPECTRE_V2_USER_STRICT: 1806 case SPECTRE_V2_USER_STRICT_PREFERRED: 1807 update_stibp_strict(); 1808 break; 1809 case SPECTRE_V2_USER_PRCTL: 1810 case SPECTRE_V2_USER_SECCOMP: 1811 update_indir_branch_cond(); 1812 break; 1813 } 1814 1815 switch (mds_mitigation) { 1816 case MDS_MITIGATION_FULL: 1817 case MDS_MITIGATION_VMWERV: 1818 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) 1819 pr_warn_once(MDS_MSG_SMT); 1820 update_mds_branch_idle(); 1821 break; 1822 case MDS_MITIGATION_OFF: 1823 break; 1824 } 1825 1826 switch (taa_mitigation) { 1827 case TAA_MITIGATION_VERW: 1828 case TAA_MITIGATION_UCODE_NEEDED: 1829 if (sched_smt_active()) 1830 pr_warn_once(TAA_MSG_SMT); 1831 break; 1832 case TAA_MITIGATION_TSX_DISABLED: 1833 case TAA_MITIGATION_OFF: 1834 break; 1835 } 1836 1837 switch (mmio_mitigation) { 1838 case MMIO_MITIGATION_VERW: 1839 case MMIO_MITIGATION_UCODE_NEEDED: 1840 if (sched_smt_active()) 1841 pr_warn_once(MMIO_MSG_SMT); 1842 break; 1843 case MMIO_MITIGATION_OFF: 1844 break; 1845 } 1846 1847 mutex_unlock(&spec_ctrl_mutex); 1848 } 1849 1850 #undef pr_fmt 1851 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt 1852 1853 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE; 1854 1855 /* The kernel command line selection */ 1856 enum ssb_mitigation_cmd { 1857 SPEC_STORE_BYPASS_CMD_NONE, 1858 SPEC_STORE_BYPASS_CMD_AUTO, 1859 SPEC_STORE_BYPASS_CMD_ON, 1860 SPEC_STORE_BYPASS_CMD_PRCTL, 1861 SPEC_STORE_BYPASS_CMD_SECCOMP, 1862 }; 1863 1864 static const char * const ssb_strings[] = { 1865 [SPEC_STORE_BYPASS_NONE] = "Vulnerable", 1866 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", 1867 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", 1868 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", 1869 }; 1870 1871 static const struct { 1872 const char *option; 1873 enum ssb_mitigation_cmd cmd; 1874 } ssb_mitigation_options[] __initconst = { 1875 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ 1876 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ 1877 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ 1878 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ 1879 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */ 1880 }; 1881 1882 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) 1883 { 1884 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO; 1885 char arg[20]; 1886 int ret, i; 1887 1888 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") || 1889 cpu_mitigations_off()) { 1890 return SPEC_STORE_BYPASS_CMD_NONE; 1891 } else { 1892 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", 1893 arg, sizeof(arg)); 1894 if (ret < 0) 1895 return SPEC_STORE_BYPASS_CMD_AUTO; 1896 1897 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { 1898 if (!match_option(arg, ret, ssb_mitigation_options[i].option)) 1899 continue; 1900 1901 cmd = ssb_mitigation_options[i].cmd; 1902 break; 1903 } 1904 1905 if (i >= ARRAY_SIZE(ssb_mitigation_options)) { 1906 pr_err("unknown option (%s). Switching to AUTO select\n", arg); 1907 return SPEC_STORE_BYPASS_CMD_AUTO; 1908 } 1909 } 1910 1911 return cmd; 1912 } 1913 1914 static enum ssb_mitigation __init __ssb_select_mitigation(void) 1915 { 1916 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; 1917 enum ssb_mitigation_cmd cmd; 1918 1919 if (!boot_cpu_has(X86_FEATURE_SSBD)) 1920 return mode; 1921 1922 cmd = ssb_parse_cmdline(); 1923 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && 1924 (cmd == SPEC_STORE_BYPASS_CMD_NONE || 1925 cmd == SPEC_STORE_BYPASS_CMD_AUTO)) 1926 return mode; 1927 1928 switch (cmd) { 1929 case SPEC_STORE_BYPASS_CMD_SECCOMP: 1930 /* 1931 * Choose prctl+seccomp as the default mode if seccomp is 1932 * enabled. 1933 */ 1934 if (IS_ENABLED(CONFIG_SECCOMP)) 1935 mode = SPEC_STORE_BYPASS_SECCOMP; 1936 else 1937 mode = SPEC_STORE_BYPASS_PRCTL; 1938 break; 1939 case SPEC_STORE_BYPASS_CMD_ON: 1940 mode = SPEC_STORE_BYPASS_DISABLE; 1941 break; 1942 case SPEC_STORE_BYPASS_CMD_AUTO: 1943 case SPEC_STORE_BYPASS_CMD_PRCTL: 1944 mode = SPEC_STORE_BYPASS_PRCTL; 1945 break; 1946 case SPEC_STORE_BYPASS_CMD_NONE: 1947 break; 1948 } 1949 1950 /* 1951 * We have three CPU feature flags that are in play here: 1952 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. 1953 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass 1954 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation 1955 */ 1956 if (mode == SPEC_STORE_BYPASS_DISABLE) { 1957 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); 1958 /* 1959 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may 1960 * use a completely different MSR and bit dependent on family. 1961 */ 1962 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && 1963 !static_cpu_has(X86_FEATURE_AMD_SSBD)) { 1964 x86_amd_ssb_disable(); 1965 } else { 1966 x86_spec_ctrl_base |= SPEC_CTRL_SSBD; 1967 update_spec_ctrl(x86_spec_ctrl_base); 1968 } 1969 } 1970 1971 return mode; 1972 } 1973 1974 static void ssb_select_mitigation(void) 1975 { 1976 ssb_mode = __ssb_select_mitigation(); 1977 1978 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 1979 pr_info("%s\n", ssb_strings[ssb_mode]); 1980 } 1981 1982 #undef pr_fmt 1983 #define pr_fmt(fmt) "Speculation prctl: " fmt 1984 1985 static void task_update_spec_tif(struct task_struct *tsk) 1986 { 1987 /* Force the update of the real TIF bits */ 1988 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); 1989 1990 /* 1991 * Immediately update the speculation control MSRs for the current 1992 * task, but for a non-current task delay setting the CPU 1993 * mitigation until it is scheduled next. 1994 * 1995 * This can only happen for SECCOMP mitigation. For PRCTL it's 1996 * always the current task. 1997 */ 1998 if (tsk == current) 1999 speculation_ctrl_update_current(); 2000 } 2001 2002 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl) 2003 { 2004 2005 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) 2006 return -EPERM; 2007 2008 switch (ctrl) { 2009 case PR_SPEC_ENABLE: 2010 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); 2011 return 0; 2012 case PR_SPEC_DISABLE: 2013 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); 2014 return 0; 2015 default: 2016 return -ERANGE; 2017 } 2018 } 2019 2020 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) 2021 { 2022 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && 2023 ssb_mode != SPEC_STORE_BYPASS_SECCOMP) 2024 return -ENXIO; 2025 2026 switch (ctrl) { 2027 case PR_SPEC_ENABLE: 2028 /* If speculation is force disabled, enable is not allowed */ 2029 if (task_spec_ssb_force_disable(task)) 2030 return -EPERM; 2031 task_clear_spec_ssb_disable(task); 2032 task_clear_spec_ssb_noexec(task); 2033 task_update_spec_tif(task); 2034 break; 2035 case PR_SPEC_DISABLE: 2036 task_set_spec_ssb_disable(task); 2037 task_clear_spec_ssb_noexec(task); 2038 task_update_spec_tif(task); 2039 break; 2040 case PR_SPEC_FORCE_DISABLE: 2041 task_set_spec_ssb_disable(task); 2042 task_set_spec_ssb_force_disable(task); 2043 task_clear_spec_ssb_noexec(task); 2044 task_update_spec_tif(task); 2045 break; 2046 case PR_SPEC_DISABLE_NOEXEC: 2047 if (task_spec_ssb_force_disable(task)) 2048 return -EPERM; 2049 task_set_spec_ssb_disable(task); 2050 task_set_spec_ssb_noexec(task); 2051 task_update_spec_tif(task); 2052 break; 2053 default: 2054 return -ERANGE; 2055 } 2056 return 0; 2057 } 2058 2059 static bool is_spec_ib_user_controlled(void) 2060 { 2061 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || 2062 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || 2063 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || 2064 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP; 2065 } 2066 2067 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) 2068 { 2069 switch (ctrl) { 2070 case PR_SPEC_ENABLE: 2071 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 2072 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 2073 return 0; 2074 2075 /* 2076 * With strict mode for both IBPB and STIBP, the instruction 2077 * code paths avoid checking this task flag and instead, 2078 * unconditionally run the instruction. However, STIBP and IBPB 2079 * are independent and either can be set to conditionally 2080 * enabled regardless of the mode of the other. 2081 * 2082 * If either is set to conditional, allow the task flag to be 2083 * updated, unless it was force-disabled by a previous prctl 2084 * call. Currently, this is possible on an AMD CPU which has the 2085 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the 2086 * kernel is booted with 'spectre_v2_user=seccomp', then 2087 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and 2088 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED. 2089 */ 2090 if (!is_spec_ib_user_controlled() || 2091 task_spec_ib_force_disable(task)) 2092 return -EPERM; 2093 2094 task_clear_spec_ib_disable(task); 2095 task_update_spec_tif(task); 2096 break; 2097 case PR_SPEC_DISABLE: 2098 case PR_SPEC_FORCE_DISABLE: 2099 /* 2100 * Indirect branch speculation is always allowed when 2101 * mitigation is force disabled. 2102 */ 2103 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 2104 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 2105 return -EPERM; 2106 2107 if (!is_spec_ib_user_controlled()) 2108 return 0; 2109 2110 task_set_spec_ib_disable(task); 2111 if (ctrl == PR_SPEC_FORCE_DISABLE) 2112 task_set_spec_ib_force_disable(task); 2113 task_update_spec_tif(task); 2114 if (task == current) 2115 indirect_branch_prediction_barrier(); 2116 break; 2117 default: 2118 return -ERANGE; 2119 } 2120 return 0; 2121 } 2122 2123 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, 2124 unsigned long ctrl) 2125 { 2126 switch (which) { 2127 case PR_SPEC_STORE_BYPASS: 2128 return ssb_prctl_set(task, ctrl); 2129 case PR_SPEC_INDIRECT_BRANCH: 2130 return ib_prctl_set(task, ctrl); 2131 case PR_SPEC_L1D_FLUSH: 2132 return l1d_flush_prctl_set(task, ctrl); 2133 default: 2134 return -ENODEV; 2135 } 2136 } 2137 2138 #ifdef CONFIG_SECCOMP 2139 void arch_seccomp_spec_mitigate(struct task_struct *task) 2140 { 2141 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) 2142 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); 2143 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || 2144 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) 2145 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); 2146 } 2147 #endif 2148 2149 static int l1d_flush_prctl_get(struct task_struct *task) 2150 { 2151 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) 2152 return PR_SPEC_FORCE_DISABLE; 2153 2154 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH)) 2155 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2156 else 2157 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2158 } 2159 2160 static int ssb_prctl_get(struct task_struct *task) 2161 { 2162 switch (ssb_mode) { 2163 case SPEC_STORE_BYPASS_DISABLE: 2164 return PR_SPEC_DISABLE; 2165 case SPEC_STORE_BYPASS_SECCOMP: 2166 case SPEC_STORE_BYPASS_PRCTL: 2167 if (task_spec_ssb_force_disable(task)) 2168 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 2169 if (task_spec_ssb_noexec(task)) 2170 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; 2171 if (task_spec_ssb_disable(task)) 2172 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2173 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2174 default: 2175 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 2176 return PR_SPEC_ENABLE; 2177 return PR_SPEC_NOT_AFFECTED; 2178 } 2179 } 2180 2181 static int ib_prctl_get(struct task_struct *task) 2182 { 2183 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 2184 return PR_SPEC_NOT_AFFECTED; 2185 2186 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 2187 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 2188 return PR_SPEC_ENABLE; 2189 else if (is_spec_ib_user_controlled()) { 2190 if (task_spec_ib_force_disable(task)) 2191 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 2192 if (task_spec_ib_disable(task)) 2193 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2194 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2195 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || 2196 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || 2197 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) 2198 return PR_SPEC_DISABLE; 2199 else 2200 return PR_SPEC_NOT_AFFECTED; 2201 } 2202 2203 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 2204 { 2205 switch (which) { 2206 case PR_SPEC_STORE_BYPASS: 2207 return ssb_prctl_get(task); 2208 case PR_SPEC_INDIRECT_BRANCH: 2209 return ib_prctl_get(task); 2210 case PR_SPEC_L1D_FLUSH: 2211 return l1d_flush_prctl_get(task); 2212 default: 2213 return -ENODEV; 2214 } 2215 } 2216 2217 void x86_spec_ctrl_setup_ap(void) 2218 { 2219 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) 2220 update_spec_ctrl(x86_spec_ctrl_base); 2221 2222 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) 2223 x86_amd_ssb_disable(); 2224 } 2225 2226 bool itlb_multihit_kvm_mitigation; 2227 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); 2228 2229 #undef pr_fmt 2230 #define pr_fmt(fmt) "L1TF: " fmt 2231 2232 /* Default mitigation for L1TF-affected CPUs */ 2233 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH; 2234 #if IS_ENABLED(CONFIG_KVM_INTEL) 2235 EXPORT_SYMBOL_GPL(l1tf_mitigation); 2236 #endif 2237 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; 2238 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); 2239 2240 /* 2241 * These CPUs all support 44bits physical address space internally in the 2242 * cache but CPUID can report a smaller number of physical address bits. 2243 * 2244 * The L1TF mitigation uses the top most address bit for the inversion of 2245 * non present PTEs. When the installed memory reaches into the top most 2246 * address bit due to memory holes, which has been observed on machines 2247 * which report 36bits physical address bits and have 32G RAM installed, 2248 * then the mitigation range check in l1tf_select_mitigation() triggers. 2249 * This is a false positive because the mitigation is still possible due to 2250 * the fact that the cache uses 44bit internally. Use the cache bits 2251 * instead of the reported physical bits and adjust them on the affected 2252 * machines to 44bit if the reported bits are less than 44. 2253 */ 2254 static void override_cache_bits(struct cpuinfo_x86 *c) 2255 { 2256 if (c->x86 != 6) 2257 return; 2258 2259 switch (c->x86_model) { 2260 case INTEL_FAM6_NEHALEM: 2261 case INTEL_FAM6_WESTMERE: 2262 case INTEL_FAM6_SANDYBRIDGE: 2263 case INTEL_FAM6_IVYBRIDGE: 2264 case INTEL_FAM6_HASWELL: 2265 case INTEL_FAM6_HASWELL_L: 2266 case INTEL_FAM6_HASWELL_G: 2267 case INTEL_FAM6_BROADWELL: 2268 case INTEL_FAM6_BROADWELL_G: 2269 case INTEL_FAM6_SKYLAKE_L: 2270 case INTEL_FAM6_SKYLAKE: 2271 case INTEL_FAM6_KABYLAKE_L: 2272 case INTEL_FAM6_KABYLAKE: 2273 if (c->x86_cache_bits < 44) 2274 c->x86_cache_bits = 44; 2275 break; 2276 } 2277 } 2278 2279 static void __init l1tf_select_mitigation(void) 2280 { 2281 u64 half_pa; 2282 2283 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 2284 return; 2285 2286 if (cpu_mitigations_off()) 2287 l1tf_mitigation = L1TF_MITIGATION_OFF; 2288 else if (cpu_mitigations_auto_nosmt()) 2289 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 2290 2291 override_cache_bits(&boot_cpu_data); 2292 2293 switch (l1tf_mitigation) { 2294 case L1TF_MITIGATION_OFF: 2295 case L1TF_MITIGATION_FLUSH_NOWARN: 2296 case L1TF_MITIGATION_FLUSH: 2297 break; 2298 case L1TF_MITIGATION_FLUSH_NOSMT: 2299 case L1TF_MITIGATION_FULL: 2300 cpu_smt_disable(false); 2301 break; 2302 case L1TF_MITIGATION_FULL_FORCE: 2303 cpu_smt_disable(true); 2304 break; 2305 } 2306 2307 #if CONFIG_PGTABLE_LEVELS == 2 2308 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); 2309 return; 2310 #endif 2311 2312 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; 2313 if (l1tf_mitigation != L1TF_MITIGATION_OFF && 2314 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { 2315 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); 2316 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", 2317 half_pa); 2318 pr_info("However, doing so will make a part of your RAM unusable.\n"); 2319 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n"); 2320 return; 2321 } 2322 2323 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); 2324 } 2325 2326 static int __init l1tf_cmdline(char *str) 2327 { 2328 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 2329 return 0; 2330 2331 if (!str) 2332 return -EINVAL; 2333 2334 if (!strcmp(str, "off")) 2335 l1tf_mitigation = L1TF_MITIGATION_OFF; 2336 else if (!strcmp(str, "flush,nowarn")) 2337 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN; 2338 else if (!strcmp(str, "flush")) 2339 l1tf_mitigation = L1TF_MITIGATION_FLUSH; 2340 else if (!strcmp(str, "flush,nosmt")) 2341 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 2342 else if (!strcmp(str, "full")) 2343 l1tf_mitigation = L1TF_MITIGATION_FULL; 2344 else if (!strcmp(str, "full,force")) 2345 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE; 2346 2347 return 0; 2348 } 2349 early_param("l1tf", l1tf_cmdline); 2350 2351 #undef pr_fmt 2352 #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt 2353 2354 enum srso_mitigation { 2355 SRSO_MITIGATION_NONE, 2356 SRSO_MITIGATION_UCODE_NEEDED, 2357 SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED, 2358 SRSO_MITIGATION_MICROCODE, 2359 SRSO_MITIGATION_SAFE_RET, 2360 SRSO_MITIGATION_IBPB, 2361 SRSO_MITIGATION_IBPB_ON_VMEXIT, 2362 }; 2363 2364 enum srso_mitigation_cmd { 2365 SRSO_CMD_OFF, 2366 SRSO_CMD_MICROCODE, 2367 SRSO_CMD_SAFE_RET, 2368 SRSO_CMD_IBPB, 2369 SRSO_CMD_IBPB_ON_VMEXIT, 2370 }; 2371 2372 static const char * const srso_strings[] = { 2373 [SRSO_MITIGATION_NONE] = "Vulnerable", 2374 [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 2375 [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode", 2376 [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET", 2377 [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET", 2378 [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", 2379 [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only" 2380 }; 2381 2382 static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE; 2383 static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET; 2384 2385 static int __init srso_parse_cmdline(char *str) 2386 { 2387 if (!str) 2388 return -EINVAL; 2389 2390 if (!strcmp(str, "off")) 2391 srso_cmd = SRSO_CMD_OFF; 2392 else if (!strcmp(str, "microcode")) 2393 srso_cmd = SRSO_CMD_MICROCODE; 2394 else if (!strcmp(str, "safe-ret")) 2395 srso_cmd = SRSO_CMD_SAFE_RET; 2396 else if (!strcmp(str, "ibpb")) 2397 srso_cmd = SRSO_CMD_IBPB; 2398 else if (!strcmp(str, "ibpb-vmexit")) 2399 srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT; 2400 else 2401 pr_err("Ignoring unknown SRSO option (%s).", str); 2402 2403 return 0; 2404 } 2405 early_param("spec_rstack_overflow", srso_parse_cmdline); 2406 2407 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options." 2408 2409 static void __init srso_select_mitigation(void) 2410 { 2411 bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE); 2412 2413 if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off()) 2414 goto pred_cmd; 2415 2416 if (has_microcode) { 2417 /* 2418 * Zen1/2 with SMT off aren't vulnerable after the right 2419 * IBPB microcode has been applied. 2420 */ 2421 if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { 2422 setup_force_cpu_cap(X86_FEATURE_SRSO_NO); 2423 return; 2424 } 2425 } 2426 2427 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { 2428 if (has_microcode) { 2429 srso_mitigation = SRSO_MITIGATION_IBPB; 2430 goto out; 2431 } 2432 } else { 2433 pr_warn("IBPB-extending microcode not applied!\n"); 2434 pr_warn(SRSO_NOTICE); 2435 2436 /* may be overwritten by SRSO_CMD_SAFE_RET below */ 2437 srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED; 2438 } 2439 2440 switch (srso_cmd) { 2441 case SRSO_CMD_OFF: 2442 goto pred_cmd; 2443 2444 case SRSO_CMD_MICROCODE: 2445 if (has_microcode) { 2446 srso_mitigation = SRSO_MITIGATION_MICROCODE; 2447 pr_warn(SRSO_NOTICE); 2448 } 2449 break; 2450 2451 case SRSO_CMD_SAFE_RET: 2452 if (IS_ENABLED(CONFIG_CPU_SRSO)) { 2453 /* 2454 * Enable the return thunk for generated code 2455 * like ftrace, static_call, etc. 2456 */ 2457 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 2458 setup_force_cpu_cap(X86_FEATURE_UNRET); 2459 2460 if (boot_cpu_data.x86 == 0x19) { 2461 setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); 2462 x86_return_thunk = srso_alias_return_thunk; 2463 } else { 2464 setup_force_cpu_cap(X86_FEATURE_SRSO); 2465 x86_return_thunk = srso_return_thunk; 2466 } 2467 if (has_microcode) 2468 srso_mitigation = SRSO_MITIGATION_SAFE_RET; 2469 else 2470 srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED; 2471 } else { 2472 pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); 2473 goto pred_cmd; 2474 } 2475 break; 2476 2477 case SRSO_CMD_IBPB: 2478 if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { 2479 if (has_microcode) { 2480 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); 2481 srso_mitigation = SRSO_MITIGATION_IBPB; 2482 } 2483 } else { 2484 pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); 2485 goto pred_cmd; 2486 } 2487 break; 2488 2489 case SRSO_CMD_IBPB_ON_VMEXIT: 2490 if (IS_ENABLED(CONFIG_CPU_SRSO)) { 2491 if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) { 2492 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); 2493 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; 2494 } 2495 } else { 2496 pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); 2497 goto pred_cmd; 2498 } 2499 break; 2500 2501 default: 2502 break; 2503 } 2504 2505 out: 2506 pr_info("%s\n", srso_strings[srso_mitigation]); 2507 2508 pred_cmd: 2509 if ((!boot_cpu_has_bug(X86_BUG_SRSO) || srso_cmd == SRSO_CMD_OFF) && 2510 boot_cpu_has(X86_FEATURE_SBPB)) 2511 x86_pred_cmd = PRED_CMD_SBPB; 2512 } 2513 2514 #undef pr_fmt 2515 #define pr_fmt(fmt) fmt 2516 2517 #ifdef CONFIG_SYSFS 2518 2519 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" 2520 2521 #if IS_ENABLED(CONFIG_KVM_INTEL) 2522 static const char * const l1tf_vmx_states[] = { 2523 [VMENTER_L1D_FLUSH_AUTO] = "auto", 2524 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", 2525 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", 2526 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", 2527 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", 2528 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" 2529 }; 2530 2531 static ssize_t l1tf_show_state(char *buf) 2532 { 2533 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) 2534 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); 2535 2536 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || 2537 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && 2538 sched_smt_active())) { 2539 return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, 2540 l1tf_vmx_states[l1tf_vmx_mitigation]); 2541 } 2542 2543 return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, 2544 l1tf_vmx_states[l1tf_vmx_mitigation], 2545 sched_smt_active() ? "vulnerable" : "disabled"); 2546 } 2547 2548 static ssize_t itlb_multihit_show_state(char *buf) 2549 { 2550 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || 2551 !boot_cpu_has(X86_FEATURE_VMX)) 2552 return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n"); 2553 else if (!(cr4_read_shadow() & X86_CR4_VMXE)) 2554 return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n"); 2555 else if (itlb_multihit_kvm_mitigation) 2556 return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n"); 2557 else 2558 return sysfs_emit(buf, "KVM: Vulnerable\n"); 2559 } 2560 #else 2561 static ssize_t l1tf_show_state(char *buf) 2562 { 2563 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); 2564 } 2565 2566 static ssize_t itlb_multihit_show_state(char *buf) 2567 { 2568 return sysfs_emit(buf, "Processor vulnerable\n"); 2569 } 2570 #endif 2571 2572 static ssize_t mds_show_state(char *buf) 2573 { 2574 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 2575 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 2576 mds_strings[mds_mitigation]); 2577 } 2578 2579 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { 2580 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], 2581 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : 2582 sched_smt_active() ? "mitigated" : "disabled")); 2583 } 2584 2585 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], 2586 sched_smt_active() ? "vulnerable" : "disabled"); 2587 } 2588 2589 static ssize_t tsx_async_abort_show_state(char *buf) 2590 { 2591 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || 2592 (taa_mitigation == TAA_MITIGATION_OFF)) 2593 return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]); 2594 2595 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 2596 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 2597 taa_strings[taa_mitigation]); 2598 } 2599 2600 return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], 2601 sched_smt_active() ? "vulnerable" : "disabled"); 2602 } 2603 2604 static ssize_t mmio_stale_data_show_state(char *buf) 2605 { 2606 if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) 2607 return sysfs_emit(buf, "Unknown: No mitigations\n"); 2608 2609 if (mmio_mitigation == MMIO_MITIGATION_OFF) 2610 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); 2611 2612 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 2613 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 2614 mmio_strings[mmio_mitigation]); 2615 } 2616 2617 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], 2618 sched_smt_active() ? "vulnerable" : "disabled"); 2619 } 2620 2621 static char *stibp_state(void) 2622 { 2623 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 2624 !boot_cpu_has(X86_FEATURE_AUTOIBRS)) 2625 return ""; 2626 2627 switch (spectre_v2_user_stibp) { 2628 case SPECTRE_V2_USER_NONE: 2629 return ", STIBP: disabled"; 2630 case SPECTRE_V2_USER_STRICT: 2631 return ", STIBP: forced"; 2632 case SPECTRE_V2_USER_STRICT_PREFERRED: 2633 return ", STIBP: always-on"; 2634 case SPECTRE_V2_USER_PRCTL: 2635 case SPECTRE_V2_USER_SECCOMP: 2636 if (static_key_enabled(&switch_to_cond_stibp)) 2637 return ", STIBP: conditional"; 2638 } 2639 return ""; 2640 } 2641 2642 static char *ibpb_state(void) 2643 { 2644 if (boot_cpu_has(X86_FEATURE_IBPB)) { 2645 if (static_key_enabled(&switch_mm_always_ibpb)) 2646 return ", IBPB: always-on"; 2647 if (static_key_enabled(&switch_mm_cond_ibpb)) 2648 return ", IBPB: conditional"; 2649 return ", IBPB: disabled"; 2650 } 2651 return ""; 2652 } 2653 2654 static char *pbrsb_eibrs_state(void) 2655 { 2656 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { 2657 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || 2658 boot_cpu_has(X86_FEATURE_RSB_VMEXIT)) 2659 return ", PBRSB-eIBRS: SW sequence"; 2660 else 2661 return ", PBRSB-eIBRS: Vulnerable"; 2662 } else { 2663 return ", PBRSB-eIBRS: Not affected"; 2664 } 2665 } 2666 2667 static ssize_t spectre_v2_show_state(char *buf) 2668 { 2669 if (spectre_v2_enabled == SPECTRE_V2_LFENCE) 2670 return sysfs_emit(buf, "Vulnerable: LFENCE\n"); 2671 2672 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) 2673 return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); 2674 2675 if (sched_smt_active() && unprivileged_ebpf_enabled() && 2676 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) 2677 return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); 2678 2679 return sysfs_emit(buf, "%s%s%s%s%s%s%s\n", 2680 spectre_v2_strings[spectre_v2_enabled], 2681 ibpb_state(), 2682 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", 2683 stibp_state(), 2684 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", 2685 pbrsb_eibrs_state(), 2686 spectre_v2_module_string()); 2687 } 2688 2689 static ssize_t srbds_show_state(char *buf) 2690 { 2691 return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]); 2692 } 2693 2694 static ssize_t retbleed_show_state(char *buf) 2695 { 2696 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || 2697 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { 2698 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 2699 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 2700 return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); 2701 2702 return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation], 2703 !sched_smt_active() ? "disabled" : 2704 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || 2705 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? 2706 "enabled with STIBP protection" : "vulnerable"); 2707 } 2708 2709 return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]); 2710 } 2711 2712 static ssize_t srso_show_state(char *buf) 2713 { 2714 if (boot_cpu_has(X86_FEATURE_SRSO_NO)) 2715 return sysfs_emit(buf, "Mitigation: SMT disabled\n"); 2716 2717 return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]); 2718 } 2719 2720 static ssize_t gds_show_state(char *buf) 2721 { 2722 return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); 2723 } 2724 2725 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, 2726 char *buf, unsigned int bug) 2727 { 2728 if (!boot_cpu_has_bug(bug)) 2729 return sysfs_emit(buf, "Not affected\n"); 2730 2731 switch (bug) { 2732 case X86_BUG_CPU_MELTDOWN: 2733 if (boot_cpu_has(X86_FEATURE_PTI)) 2734 return sysfs_emit(buf, "Mitigation: PTI\n"); 2735 2736 if (hypervisor_is_type(X86_HYPER_XEN_PV)) 2737 return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); 2738 2739 break; 2740 2741 case X86_BUG_SPECTRE_V1: 2742 return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); 2743 2744 case X86_BUG_SPECTRE_V2: 2745 return spectre_v2_show_state(buf); 2746 2747 case X86_BUG_SPEC_STORE_BYPASS: 2748 return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]); 2749 2750 case X86_BUG_L1TF: 2751 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) 2752 return l1tf_show_state(buf); 2753 break; 2754 2755 case X86_BUG_MDS: 2756 return mds_show_state(buf); 2757 2758 case X86_BUG_TAA: 2759 return tsx_async_abort_show_state(buf); 2760 2761 case X86_BUG_ITLB_MULTIHIT: 2762 return itlb_multihit_show_state(buf); 2763 2764 case X86_BUG_SRBDS: 2765 return srbds_show_state(buf); 2766 2767 case X86_BUG_MMIO_STALE_DATA: 2768 case X86_BUG_MMIO_UNKNOWN: 2769 return mmio_stale_data_show_state(buf); 2770 2771 case X86_BUG_RETBLEED: 2772 return retbleed_show_state(buf); 2773 2774 case X86_BUG_SRSO: 2775 return srso_show_state(buf); 2776 2777 case X86_BUG_GDS: 2778 return gds_show_state(buf); 2779 2780 default: 2781 break; 2782 } 2783 2784 return sysfs_emit(buf, "Vulnerable\n"); 2785 } 2786 2787 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 2788 { 2789 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); 2790 } 2791 2792 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 2793 { 2794 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); 2795 } 2796 2797 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 2798 { 2799 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); 2800 } 2801 2802 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 2803 { 2804 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); 2805 } 2806 2807 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) 2808 { 2809 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); 2810 } 2811 2812 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf) 2813 { 2814 return cpu_show_common(dev, attr, buf, X86_BUG_MDS); 2815 } 2816 2817 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf) 2818 { 2819 return cpu_show_common(dev, attr, buf, X86_BUG_TAA); 2820 } 2821 2822 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf) 2823 { 2824 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); 2825 } 2826 2827 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) 2828 { 2829 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); 2830 } 2831 2832 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) 2833 { 2834 if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) 2835 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN); 2836 else 2837 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); 2838 } 2839 2840 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) 2841 { 2842 return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); 2843 } 2844 2845 ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf) 2846 { 2847 return cpu_show_common(dev, attr, buf, X86_BUG_SRSO); 2848 } 2849 2850 ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf) 2851 { 2852 return cpu_show_common(dev, attr, buf, X86_BUG_GDS); 2853 } 2854 #endif 2855