1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1994 Linus Torvalds 4 * 5 * Cyrix stuff, June 1998 by: 6 * - Rafael R. Reilova (moved everything from head.S), 7 * <rreilova@ececs.uc.edu> 8 * - Channing Corn (tests & fixes), 9 * - Andrew D. Balsa (code cleanup). 10 */ 11 #include <linux/init.h> 12 #include <linux/utsname.h> 13 #include <linux/cpu.h> 14 #include <linux/module.h> 15 #include <linux/nospec.h> 16 #include <linux/prctl.h> 17 #include <linux/sched/smt.h> 18 19 #include <asm/spec-ctrl.h> 20 #include <asm/cmdline.h> 21 #include <asm/bugs.h> 22 #include <asm/processor.h> 23 #include <asm/processor-flags.h> 24 #include <asm/fpu/internal.h> 25 #include <asm/msr.h> 26 #include <asm/vmx.h> 27 #include <asm/paravirt.h> 28 #include <asm/alternative.h> 29 #include <asm/pgtable.h> 30 #include <asm/set_memory.h> 31 #include <asm/intel-family.h> 32 #include <asm/e820/api.h> 33 #include <asm/hypervisor.h> 34 35 #include "cpu.h" 36 37 static void __init spectre_v2_select_mitigation(void); 38 static void __init ssb_select_mitigation(void); 39 static void __init l1tf_select_mitigation(void); 40 41 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ 42 u64 x86_spec_ctrl_base; 43 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); 44 static DEFINE_MUTEX(spec_ctrl_mutex); 45 46 /* 47 * The vendor and possibly platform specific bits which can be modified in 48 * x86_spec_ctrl_base. 49 */ 50 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; 51 52 /* 53 * AMD specific MSR info for Speculative Store Bypass control. 54 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). 55 */ 56 u64 __ro_after_init x86_amd_ls_cfg_base; 57 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; 58 59 /* Control conditional STIBP in switch_to() */ 60 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); 61 /* Control conditional IBPB in switch_mm() */ 62 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); 63 /* Control unconditional IBPB in switch_mm() */ 64 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); 65 66 void __init check_bugs(void) 67 { 68 identify_boot_cpu(); 69 70 /* 71 * identify_boot_cpu() initialized SMT support information, let the 72 * core code know. 73 */ 74 cpu_smt_check_topology_early(); 75 76 if (!IS_ENABLED(CONFIG_SMP)) { 77 pr_info("CPU: "); 78 print_cpu_info(&boot_cpu_data); 79 } 80 81 /* 82 * Read the SPEC_CTRL MSR to account for reserved bits which may 83 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD 84 * init code as it is not enumerated and depends on the family. 85 */ 86 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) 87 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 88 89 /* Allow STIBP in MSR_SPEC_CTRL if supported */ 90 if (boot_cpu_has(X86_FEATURE_STIBP)) 91 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP; 92 93 /* Select the proper spectre mitigation before patching alternatives */ 94 spectre_v2_select_mitigation(); 95 96 /* 97 * Select proper mitigation for any exposure to the Speculative Store 98 * Bypass vulnerability. 99 */ 100 ssb_select_mitigation(); 101 102 l1tf_select_mitigation(); 103 104 #ifdef CONFIG_X86_32 105 /* 106 * Check whether we are able to run this kernel safely on SMP. 107 * 108 * - i386 is no longer supported. 109 * - In order to run on anything without a TSC, we need to be 110 * compiled for a i486. 111 */ 112 if (boot_cpu_data.x86 < 4) 113 panic("Kernel requires i486+ for 'invlpg' and other features"); 114 115 init_utsname()->machine[1] = 116 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 117 alternative_instructions(); 118 119 fpu__init_check_bugs(); 120 #else /* CONFIG_X86_64 */ 121 alternative_instructions(); 122 123 /* 124 * Make sure the first 2MB area is not mapped by huge pages 125 * There are typically fixed size MTRRs in there and overlapping 126 * MTRRs into large pages causes slow downs. 127 * 128 * Right now we don't do that with gbpages because there seems 129 * very little benefit for that case. 130 */ 131 if (!direct_gbpages) 132 set_memory_4k((unsigned long)__va(0), 1); 133 #endif 134 } 135 136 void 137 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) 138 { 139 u64 msrval, guestval, hostval = x86_spec_ctrl_base; 140 struct thread_info *ti = current_thread_info(); 141 142 /* Is MSR_SPEC_CTRL implemented ? */ 143 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) { 144 /* 145 * Restrict guest_spec_ctrl to supported values. Clear the 146 * modifiable bits in the host base value and or the 147 * modifiable bits from the guest value. 148 */ 149 guestval = hostval & ~x86_spec_ctrl_mask; 150 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; 151 152 /* SSBD controlled in MSR_SPEC_CTRL */ 153 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || 154 static_cpu_has(X86_FEATURE_AMD_SSBD)) 155 hostval |= ssbd_tif_to_spec_ctrl(ti->flags); 156 157 /* Conditional STIBP enabled? */ 158 if (static_branch_unlikely(&switch_to_cond_stibp)) 159 hostval |= stibp_tif_to_spec_ctrl(ti->flags); 160 161 if (hostval != guestval) { 162 msrval = setguest ? guestval : hostval; 163 wrmsrl(MSR_IA32_SPEC_CTRL, msrval); 164 } 165 } 166 167 /* 168 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update 169 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. 170 */ 171 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && 172 !static_cpu_has(X86_FEATURE_VIRT_SSBD)) 173 return; 174 175 /* 176 * If the host has SSBD mitigation enabled, force it in the host's 177 * virtual MSR value. If its not permanently enabled, evaluate 178 * current's TIF_SSBD thread flag. 179 */ 180 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) 181 hostval = SPEC_CTRL_SSBD; 182 else 183 hostval = ssbd_tif_to_spec_ctrl(ti->flags); 184 185 /* Sanitize the guest value */ 186 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; 187 188 if (hostval != guestval) { 189 unsigned long tif; 190 191 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : 192 ssbd_spec_ctrl_to_tif(hostval); 193 194 speculation_ctrl_update(tif); 195 } 196 } 197 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); 198 199 static void x86_amd_ssb_disable(void) 200 { 201 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; 202 203 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) 204 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); 205 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) 206 wrmsrl(MSR_AMD64_LS_CFG, msrval); 207 } 208 209 #undef pr_fmt 210 #define pr_fmt(fmt) "Spectre V2 : " fmt 211 212 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = 213 SPECTRE_V2_NONE; 214 215 static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = 216 SPECTRE_V2_USER_NONE; 217 218 #ifdef RETPOLINE 219 static bool spectre_v2_bad_module; 220 221 bool retpoline_module_ok(bool has_retpoline) 222 { 223 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) 224 return true; 225 226 pr_err("System may be vulnerable to spectre v2\n"); 227 spectre_v2_bad_module = true; 228 return false; 229 } 230 231 static inline const char *spectre_v2_module_string(void) 232 { 233 return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; 234 } 235 #else 236 static inline const char *spectre_v2_module_string(void) { return ""; } 237 #endif 238 239 static inline bool match_option(const char *arg, int arglen, const char *opt) 240 { 241 int len = strlen(opt); 242 243 return len == arglen && !strncmp(arg, opt, len); 244 } 245 246 /* The kernel command line selection for spectre v2 */ 247 enum spectre_v2_mitigation_cmd { 248 SPECTRE_V2_CMD_NONE, 249 SPECTRE_V2_CMD_AUTO, 250 SPECTRE_V2_CMD_FORCE, 251 SPECTRE_V2_CMD_RETPOLINE, 252 SPECTRE_V2_CMD_RETPOLINE_GENERIC, 253 SPECTRE_V2_CMD_RETPOLINE_AMD, 254 }; 255 256 enum spectre_v2_user_cmd { 257 SPECTRE_V2_USER_CMD_NONE, 258 SPECTRE_V2_USER_CMD_AUTO, 259 SPECTRE_V2_USER_CMD_FORCE, 260 SPECTRE_V2_USER_CMD_PRCTL, 261 SPECTRE_V2_USER_CMD_PRCTL_IBPB, 262 SPECTRE_V2_USER_CMD_SECCOMP, 263 SPECTRE_V2_USER_CMD_SECCOMP_IBPB, 264 }; 265 266 static const char * const spectre_v2_user_strings[] = { 267 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", 268 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", 269 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", 270 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", 271 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", 272 }; 273 274 static const struct { 275 const char *option; 276 enum spectre_v2_user_cmd cmd; 277 bool secure; 278 } v2_user_options[] __initdata = { 279 { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, 280 { "off", SPECTRE_V2_USER_CMD_NONE, false }, 281 { "on", SPECTRE_V2_USER_CMD_FORCE, true }, 282 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false }, 283 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false }, 284 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false }, 285 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, 286 }; 287 288 static void __init spec_v2_user_print_cond(const char *reason, bool secure) 289 { 290 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) 291 pr_info("spectre_v2_user=%s forced on command line.\n", reason); 292 } 293 294 static enum spectre_v2_user_cmd __init 295 spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd) 296 { 297 char arg[20]; 298 int ret, i; 299 300 switch (v2_cmd) { 301 case SPECTRE_V2_CMD_NONE: 302 return SPECTRE_V2_USER_CMD_NONE; 303 case SPECTRE_V2_CMD_FORCE: 304 return SPECTRE_V2_USER_CMD_FORCE; 305 default: 306 break; 307 } 308 309 ret = cmdline_find_option(boot_command_line, "spectre_v2_user", 310 arg, sizeof(arg)); 311 if (ret < 0) 312 return SPECTRE_V2_USER_CMD_AUTO; 313 314 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { 315 if (match_option(arg, ret, v2_user_options[i].option)) { 316 spec_v2_user_print_cond(v2_user_options[i].option, 317 v2_user_options[i].secure); 318 return v2_user_options[i].cmd; 319 } 320 } 321 322 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); 323 return SPECTRE_V2_USER_CMD_AUTO; 324 } 325 326 static void __init 327 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) 328 { 329 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; 330 bool smt_possible = IS_ENABLED(CONFIG_SMP); 331 enum spectre_v2_user_cmd cmd; 332 333 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) 334 return; 335 336 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || 337 cpu_smt_control == CPU_SMT_NOT_SUPPORTED) 338 smt_possible = false; 339 340 cmd = spectre_v2_parse_user_cmdline(v2_cmd); 341 switch (cmd) { 342 case SPECTRE_V2_USER_CMD_NONE: 343 goto set_mode; 344 case SPECTRE_V2_USER_CMD_FORCE: 345 mode = SPECTRE_V2_USER_STRICT; 346 break; 347 case SPECTRE_V2_USER_CMD_PRCTL: 348 case SPECTRE_V2_USER_CMD_PRCTL_IBPB: 349 mode = SPECTRE_V2_USER_PRCTL; 350 break; 351 case SPECTRE_V2_USER_CMD_AUTO: 352 case SPECTRE_V2_USER_CMD_SECCOMP: 353 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: 354 if (IS_ENABLED(CONFIG_SECCOMP)) 355 mode = SPECTRE_V2_USER_SECCOMP; 356 else 357 mode = SPECTRE_V2_USER_PRCTL; 358 break; 359 } 360 361 /* 362 * At this point, an STIBP mode other than "off" has been set. 363 * If STIBP support is not being forced, check if STIBP always-on 364 * is preferred. 365 */ 366 if (mode != SPECTRE_V2_USER_STRICT && 367 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) 368 mode = SPECTRE_V2_USER_STRICT_PREFERRED; 369 370 /* Initialize Indirect Branch Prediction Barrier */ 371 if (boot_cpu_has(X86_FEATURE_IBPB)) { 372 setup_force_cpu_cap(X86_FEATURE_USE_IBPB); 373 374 switch (cmd) { 375 case SPECTRE_V2_USER_CMD_FORCE: 376 case SPECTRE_V2_USER_CMD_PRCTL_IBPB: 377 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: 378 static_branch_enable(&switch_mm_always_ibpb); 379 break; 380 case SPECTRE_V2_USER_CMD_PRCTL: 381 case SPECTRE_V2_USER_CMD_AUTO: 382 case SPECTRE_V2_USER_CMD_SECCOMP: 383 static_branch_enable(&switch_mm_cond_ibpb); 384 break; 385 default: 386 break; 387 } 388 389 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", 390 static_key_enabled(&switch_mm_always_ibpb) ? 391 "always-on" : "conditional"); 392 } 393 394 /* If enhanced IBRS is enabled no STIBP required */ 395 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) 396 return; 397 398 /* 399 * If SMT is not possible or STIBP is not available clear the STIBP 400 * mode. 401 */ 402 if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP)) 403 mode = SPECTRE_V2_USER_NONE; 404 set_mode: 405 spectre_v2_user = mode; 406 /* Only print the STIBP mode when SMT possible */ 407 if (smt_possible) 408 pr_info("%s\n", spectre_v2_user_strings[mode]); 409 } 410 411 static const char * const spectre_v2_strings[] = { 412 [SPECTRE_V2_NONE] = "Vulnerable", 413 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", 414 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", 415 [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS", 416 }; 417 418 static const struct { 419 const char *option; 420 enum spectre_v2_mitigation_cmd cmd; 421 bool secure; 422 } mitigation_options[] __initdata = { 423 { "off", SPECTRE_V2_CMD_NONE, false }, 424 { "on", SPECTRE_V2_CMD_FORCE, true }, 425 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, 426 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, 427 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, 428 { "auto", SPECTRE_V2_CMD_AUTO, false }, 429 }; 430 431 static void __init spec_v2_print_cond(const char *reason, bool secure) 432 { 433 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) 434 pr_info("%s selected on command line.\n", reason); 435 } 436 437 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) 438 { 439 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; 440 char arg[20]; 441 int ret, i; 442 443 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) 444 return SPECTRE_V2_CMD_NONE; 445 446 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); 447 if (ret < 0) 448 return SPECTRE_V2_CMD_AUTO; 449 450 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { 451 if (!match_option(arg, ret, mitigation_options[i].option)) 452 continue; 453 cmd = mitigation_options[i].cmd; 454 break; 455 } 456 457 if (i >= ARRAY_SIZE(mitigation_options)) { 458 pr_err("unknown option (%s). Switching to AUTO select\n", arg); 459 return SPECTRE_V2_CMD_AUTO; 460 } 461 462 if ((cmd == SPECTRE_V2_CMD_RETPOLINE || 463 cmd == SPECTRE_V2_CMD_RETPOLINE_AMD || 464 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) && 465 !IS_ENABLED(CONFIG_RETPOLINE)) { 466 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option); 467 return SPECTRE_V2_CMD_AUTO; 468 } 469 470 if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD && 471 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON && 472 boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { 473 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n"); 474 return SPECTRE_V2_CMD_AUTO; 475 } 476 477 spec_v2_print_cond(mitigation_options[i].option, 478 mitigation_options[i].secure); 479 return cmd; 480 } 481 482 static void __init spectre_v2_select_mitigation(void) 483 { 484 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); 485 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE; 486 487 /* 488 * If the CPU is not affected and the command line mode is NONE or AUTO 489 * then nothing to do. 490 */ 491 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && 492 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO)) 493 return; 494 495 switch (cmd) { 496 case SPECTRE_V2_CMD_NONE: 497 return; 498 499 case SPECTRE_V2_CMD_FORCE: 500 case SPECTRE_V2_CMD_AUTO: 501 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 502 mode = SPECTRE_V2_IBRS_ENHANCED; 503 /* Force it so VMEXIT will restore correctly */ 504 x86_spec_ctrl_base |= SPEC_CTRL_IBRS; 505 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 506 goto specv2_set_mode; 507 } 508 if (IS_ENABLED(CONFIG_RETPOLINE)) 509 goto retpoline_auto; 510 break; 511 case SPECTRE_V2_CMD_RETPOLINE_AMD: 512 if (IS_ENABLED(CONFIG_RETPOLINE)) 513 goto retpoline_amd; 514 break; 515 case SPECTRE_V2_CMD_RETPOLINE_GENERIC: 516 if (IS_ENABLED(CONFIG_RETPOLINE)) 517 goto retpoline_generic; 518 break; 519 case SPECTRE_V2_CMD_RETPOLINE: 520 if (IS_ENABLED(CONFIG_RETPOLINE)) 521 goto retpoline_auto; 522 break; 523 } 524 pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!"); 525 return; 526 527 retpoline_auto: 528 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 529 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { 530 retpoline_amd: 531 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { 532 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); 533 goto retpoline_generic; 534 } 535 mode = SPECTRE_V2_RETPOLINE_AMD; 536 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); 537 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 538 } else { 539 retpoline_generic: 540 mode = SPECTRE_V2_RETPOLINE_GENERIC; 541 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 542 } 543 544 specv2_set_mode: 545 spectre_v2_enabled = mode; 546 pr_info("%s\n", spectre_v2_strings[mode]); 547 548 /* 549 * If spectre v2 protection has been enabled, unconditionally fill 550 * RSB during a context switch; this protects against two independent 551 * issues: 552 * 553 * - RSB underflow (and switch to BTB) on Skylake+ 554 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs 555 */ 556 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); 557 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); 558 559 /* 560 * Retpoline means the kernel is safe because it has no indirect 561 * branches. Enhanced IBRS protects firmware too, so, enable restricted 562 * speculation around firmware calls only when Enhanced IBRS isn't 563 * supported. 564 * 565 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because 566 * the user might select retpoline on the kernel command line and if 567 * the CPU supports Enhanced IBRS, kernel might un-intentionally not 568 * enable IBRS around firmware calls. 569 */ 570 if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) { 571 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); 572 pr_info("Enabling Restricted Speculation for firmware calls\n"); 573 } 574 575 /* Set up IBPB and STIBP depending on the general spectre V2 command */ 576 spectre_v2_user_select_mitigation(cmd); 577 578 /* Enable STIBP if appropriate */ 579 arch_smt_update(); 580 } 581 582 static void update_stibp_msr(void * __unused) 583 { 584 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 585 } 586 587 /* Update x86_spec_ctrl_base in case SMT state changed. */ 588 static void update_stibp_strict(void) 589 { 590 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; 591 592 if (sched_smt_active()) 593 mask |= SPEC_CTRL_STIBP; 594 595 if (mask == x86_spec_ctrl_base) 596 return; 597 598 pr_info("Update user space SMT mitigation: STIBP %s\n", 599 mask & SPEC_CTRL_STIBP ? "always-on" : "off"); 600 x86_spec_ctrl_base = mask; 601 on_each_cpu(update_stibp_msr, NULL, 1); 602 } 603 604 /* Update the static key controlling the evaluation of TIF_SPEC_IB */ 605 static void update_indir_branch_cond(void) 606 { 607 if (sched_smt_active()) 608 static_branch_enable(&switch_to_cond_stibp); 609 else 610 static_branch_disable(&switch_to_cond_stibp); 611 } 612 613 void arch_smt_update(void) 614 { 615 /* Enhanced IBRS implies STIBP. No update required. */ 616 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) 617 return; 618 619 mutex_lock(&spec_ctrl_mutex); 620 621 switch (spectre_v2_user) { 622 case SPECTRE_V2_USER_NONE: 623 break; 624 case SPECTRE_V2_USER_STRICT: 625 case SPECTRE_V2_USER_STRICT_PREFERRED: 626 update_stibp_strict(); 627 break; 628 case SPECTRE_V2_USER_PRCTL: 629 case SPECTRE_V2_USER_SECCOMP: 630 update_indir_branch_cond(); 631 break; 632 } 633 634 mutex_unlock(&spec_ctrl_mutex); 635 } 636 637 #undef pr_fmt 638 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt 639 640 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE; 641 642 /* The kernel command line selection */ 643 enum ssb_mitigation_cmd { 644 SPEC_STORE_BYPASS_CMD_NONE, 645 SPEC_STORE_BYPASS_CMD_AUTO, 646 SPEC_STORE_BYPASS_CMD_ON, 647 SPEC_STORE_BYPASS_CMD_PRCTL, 648 SPEC_STORE_BYPASS_CMD_SECCOMP, 649 }; 650 651 static const char * const ssb_strings[] = { 652 [SPEC_STORE_BYPASS_NONE] = "Vulnerable", 653 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", 654 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", 655 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", 656 }; 657 658 static const struct { 659 const char *option; 660 enum ssb_mitigation_cmd cmd; 661 } ssb_mitigation_options[] __initdata = { 662 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ 663 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ 664 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ 665 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ 666 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */ 667 }; 668 669 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) 670 { 671 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO; 672 char arg[20]; 673 int ret, i; 674 675 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) { 676 return SPEC_STORE_BYPASS_CMD_NONE; 677 } else { 678 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", 679 arg, sizeof(arg)); 680 if (ret < 0) 681 return SPEC_STORE_BYPASS_CMD_AUTO; 682 683 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { 684 if (!match_option(arg, ret, ssb_mitigation_options[i].option)) 685 continue; 686 687 cmd = ssb_mitigation_options[i].cmd; 688 break; 689 } 690 691 if (i >= ARRAY_SIZE(ssb_mitigation_options)) { 692 pr_err("unknown option (%s). Switching to AUTO select\n", arg); 693 return SPEC_STORE_BYPASS_CMD_AUTO; 694 } 695 } 696 697 return cmd; 698 } 699 700 static enum ssb_mitigation __init __ssb_select_mitigation(void) 701 { 702 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; 703 enum ssb_mitigation_cmd cmd; 704 705 if (!boot_cpu_has(X86_FEATURE_SSBD)) 706 return mode; 707 708 cmd = ssb_parse_cmdline(); 709 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && 710 (cmd == SPEC_STORE_BYPASS_CMD_NONE || 711 cmd == SPEC_STORE_BYPASS_CMD_AUTO)) 712 return mode; 713 714 switch (cmd) { 715 case SPEC_STORE_BYPASS_CMD_AUTO: 716 case SPEC_STORE_BYPASS_CMD_SECCOMP: 717 /* 718 * Choose prctl+seccomp as the default mode if seccomp is 719 * enabled. 720 */ 721 if (IS_ENABLED(CONFIG_SECCOMP)) 722 mode = SPEC_STORE_BYPASS_SECCOMP; 723 else 724 mode = SPEC_STORE_BYPASS_PRCTL; 725 break; 726 case SPEC_STORE_BYPASS_CMD_ON: 727 mode = SPEC_STORE_BYPASS_DISABLE; 728 break; 729 case SPEC_STORE_BYPASS_CMD_PRCTL: 730 mode = SPEC_STORE_BYPASS_PRCTL; 731 break; 732 case SPEC_STORE_BYPASS_CMD_NONE: 733 break; 734 } 735 736 /* 737 * We have three CPU feature flags that are in play here: 738 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. 739 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass 740 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation 741 */ 742 if (mode == SPEC_STORE_BYPASS_DISABLE) { 743 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); 744 /* 745 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may 746 * use a completely different MSR and bit dependent on family. 747 */ 748 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && 749 !static_cpu_has(X86_FEATURE_AMD_SSBD)) { 750 x86_amd_ssb_disable(); 751 } else { 752 x86_spec_ctrl_base |= SPEC_CTRL_SSBD; 753 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; 754 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 755 } 756 } 757 758 return mode; 759 } 760 761 static void ssb_select_mitigation(void) 762 { 763 ssb_mode = __ssb_select_mitigation(); 764 765 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 766 pr_info("%s\n", ssb_strings[ssb_mode]); 767 } 768 769 #undef pr_fmt 770 #define pr_fmt(fmt) "Speculation prctl: " fmt 771 772 static void task_update_spec_tif(struct task_struct *tsk) 773 { 774 /* Force the update of the real TIF bits */ 775 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); 776 777 /* 778 * Immediately update the speculation control MSRs for the current 779 * task, but for a non-current task delay setting the CPU 780 * mitigation until it is scheduled next. 781 * 782 * This can only happen for SECCOMP mitigation. For PRCTL it's 783 * always the current task. 784 */ 785 if (tsk == current) 786 speculation_ctrl_update_current(); 787 } 788 789 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) 790 { 791 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && 792 ssb_mode != SPEC_STORE_BYPASS_SECCOMP) 793 return -ENXIO; 794 795 switch (ctrl) { 796 case PR_SPEC_ENABLE: 797 /* If speculation is force disabled, enable is not allowed */ 798 if (task_spec_ssb_force_disable(task)) 799 return -EPERM; 800 task_clear_spec_ssb_disable(task); 801 task_update_spec_tif(task); 802 break; 803 case PR_SPEC_DISABLE: 804 task_set_spec_ssb_disable(task); 805 task_update_spec_tif(task); 806 break; 807 case PR_SPEC_FORCE_DISABLE: 808 task_set_spec_ssb_disable(task); 809 task_set_spec_ssb_force_disable(task); 810 task_update_spec_tif(task); 811 break; 812 default: 813 return -ERANGE; 814 } 815 return 0; 816 } 817 818 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) 819 { 820 switch (ctrl) { 821 case PR_SPEC_ENABLE: 822 if (spectre_v2_user == SPECTRE_V2_USER_NONE) 823 return 0; 824 /* 825 * Indirect branch speculation is always disabled in strict 826 * mode. 827 */ 828 if (spectre_v2_user == SPECTRE_V2_USER_STRICT || 829 spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED) 830 return -EPERM; 831 task_clear_spec_ib_disable(task); 832 task_update_spec_tif(task); 833 break; 834 case PR_SPEC_DISABLE: 835 case PR_SPEC_FORCE_DISABLE: 836 /* 837 * Indirect branch speculation is always allowed when 838 * mitigation is force disabled. 839 */ 840 if (spectre_v2_user == SPECTRE_V2_USER_NONE) 841 return -EPERM; 842 if (spectre_v2_user == SPECTRE_V2_USER_STRICT || 843 spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED) 844 return 0; 845 task_set_spec_ib_disable(task); 846 if (ctrl == PR_SPEC_FORCE_DISABLE) 847 task_set_spec_ib_force_disable(task); 848 task_update_spec_tif(task); 849 break; 850 default: 851 return -ERANGE; 852 } 853 return 0; 854 } 855 856 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, 857 unsigned long ctrl) 858 { 859 switch (which) { 860 case PR_SPEC_STORE_BYPASS: 861 return ssb_prctl_set(task, ctrl); 862 case PR_SPEC_INDIRECT_BRANCH: 863 return ib_prctl_set(task, ctrl); 864 default: 865 return -ENODEV; 866 } 867 } 868 869 #ifdef CONFIG_SECCOMP 870 void arch_seccomp_spec_mitigate(struct task_struct *task) 871 { 872 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) 873 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); 874 if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP) 875 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); 876 } 877 #endif 878 879 static int ssb_prctl_get(struct task_struct *task) 880 { 881 switch (ssb_mode) { 882 case SPEC_STORE_BYPASS_DISABLE: 883 return PR_SPEC_DISABLE; 884 case SPEC_STORE_BYPASS_SECCOMP: 885 case SPEC_STORE_BYPASS_PRCTL: 886 if (task_spec_ssb_force_disable(task)) 887 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 888 if (task_spec_ssb_disable(task)) 889 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 890 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 891 default: 892 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 893 return PR_SPEC_ENABLE; 894 return PR_SPEC_NOT_AFFECTED; 895 } 896 } 897 898 static int ib_prctl_get(struct task_struct *task) 899 { 900 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 901 return PR_SPEC_NOT_AFFECTED; 902 903 switch (spectre_v2_user) { 904 case SPECTRE_V2_USER_NONE: 905 return PR_SPEC_ENABLE; 906 case SPECTRE_V2_USER_PRCTL: 907 case SPECTRE_V2_USER_SECCOMP: 908 if (task_spec_ib_force_disable(task)) 909 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 910 if (task_spec_ib_disable(task)) 911 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 912 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 913 case SPECTRE_V2_USER_STRICT: 914 case SPECTRE_V2_USER_STRICT_PREFERRED: 915 return PR_SPEC_DISABLE; 916 default: 917 return PR_SPEC_NOT_AFFECTED; 918 } 919 } 920 921 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 922 { 923 switch (which) { 924 case PR_SPEC_STORE_BYPASS: 925 return ssb_prctl_get(task); 926 case PR_SPEC_INDIRECT_BRANCH: 927 return ib_prctl_get(task); 928 default: 929 return -ENODEV; 930 } 931 } 932 933 void x86_spec_ctrl_setup_ap(void) 934 { 935 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) 936 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 937 938 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) 939 x86_amd_ssb_disable(); 940 } 941 942 #undef pr_fmt 943 #define pr_fmt(fmt) "L1TF: " fmt 944 945 /* Default mitigation for L1TF-affected CPUs */ 946 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH; 947 #if IS_ENABLED(CONFIG_KVM_INTEL) 948 EXPORT_SYMBOL_GPL(l1tf_mitigation); 949 #endif 950 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; 951 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); 952 953 /* 954 * These CPUs all support 44bits physical address space internally in the 955 * cache but CPUID can report a smaller number of physical address bits. 956 * 957 * The L1TF mitigation uses the top most address bit for the inversion of 958 * non present PTEs. When the installed memory reaches into the top most 959 * address bit due to memory holes, which has been observed on machines 960 * which report 36bits physical address bits and have 32G RAM installed, 961 * then the mitigation range check in l1tf_select_mitigation() triggers. 962 * This is a false positive because the mitigation is still possible due to 963 * the fact that the cache uses 44bit internally. Use the cache bits 964 * instead of the reported physical bits and adjust them on the affected 965 * machines to 44bit if the reported bits are less than 44. 966 */ 967 static void override_cache_bits(struct cpuinfo_x86 *c) 968 { 969 if (c->x86 != 6) 970 return; 971 972 switch (c->x86_model) { 973 case INTEL_FAM6_NEHALEM: 974 case INTEL_FAM6_WESTMERE: 975 case INTEL_FAM6_SANDYBRIDGE: 976 case INTEL_FAM6_IVYBRIDGE: 977 case INTEL_FAM6_HASWELL_CORE: 978 case INTEL_FAM6_HASWELL_ULT: 979 case INTEL_FAM6_HASWELL_GT3E: 980 case INTEL_FAM6_BROADWELL_CORE: 981 case INTEL_FAM6_BROADWELL_GT3E: 982 case INTEL_FAM6_SKYLAKE_MOBILE: 983 case INTEL_FAM6_SKYLAKE_DESKTOP: 984 case INTEL_FAM6_KABYLAKE_MOBILE: 985 case INTEL_FAM6_KABYLAKE_DESKTOP: 986 if (c->x86_cache_bits < 44) 987 c->x86_cache_bits = 44; 988 break; 989 } 990 } 991 992 static void __init l1tf_select_mitigation(void) 993 { 994 u64 half_pa; 995 996 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 997 return; 998 999 override_cache_bits(&boot_cpu_data); 1000 1001 switch (l1tf_mitigation) { 1002 case L1TF_MITIGATION_OFF: 1003 case L1TF_MITIGATION_FLUSH_NOWARN: 1004 case L1TF_MITIGATION_FLUSH: 1005 break; 1006 case L1TF_MITIGATION_FLUSH_NOSMT: 1007 case L1TF_MITIGATION_FULL: 1008 cpu_smt_disable(false); 1009 break; 1010 case L1TF_MITIGATION_FULL_FORCE: 1011 cpu_smt_disable(true); 1012 break; 1013 } 1014 1015 #if CONFIG_PGTABLE_LEVELS == 2 1016 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); 1017 return; 1018 #endif 1019 1020 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; 1021 if (l1tf_mitigation != L1TF_MITIGATION_OFF && 1022 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { 1023 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); 1024 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", 1025 half_pa); 1026 pr_info("However, doing so will make a part of your RAM unusable.\n"); 1027 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n"); 1028 return; 1029 } 1030 1031 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); 1032 } 1033 1034 static int __init l1tf_cmdline(char *str) 1035 { 1036 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 1037 return 0; 1038 1039 if (!str) 1040 return -EINVAL; 1041 1042 if (!strcmp(str, "off")) 1043 l1tf_mitigation = L1TF_MITIGATION_OFF; 1044 else if (!strcmp(str, "flush,nowarn")) 1045 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN; 1046 else if (!strcmp(str, "flush")) 1047 l1tf_mitigation = L1TF_MITIGATION_FLUSH; 1048 else if (!strcmp(str, "flush,nosmt")) 1049 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 1050 else if (!strcmp(str, "full")) 1051 l1tf_mitigation = L1TF_MITIGATION_FULL; 1052 else if (!strcmp(str, "full,force")) 1053 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE; 1054 1055 return 0; 1056 } 1057 early_param("l1tf", l1tf_cmdline); 1058 1059 #undef pr_fmt 1060 1061 #ifdef CONFIG_SYSFS 1062 1063 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" 1064 1065 #if IS_ENABLED(CONFIG_KVM_INTEL) 1066 static const char * const l1tf_vmx_states[] = { 1067 [VMENTER_L1D_FLUSH_AUTO] = "auto", 1068 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", 1069 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", 1070 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", 1071 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", 1072 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" 1073 }; 1074 1075 static ssize_t l1tf_show_state(char *buf) 1076 { 1077 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) 1078 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); 1079 1080 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || 1081 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && 1082 sched_smt_active())) { 1083 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, 1084 l1tf_vmx_states[l1tf_vmx_mitigation]); 1085 } 1086 1087 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, 1088 l1tf_vmx_states[l1tf_vmx_mitigation], 1089 sched_smt_active() ? "vulnerable" : "disabled"); 1090 } 1091 #else 1092 static ssize_t l1tf_show_state(char *buf) 1093 { 1094 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); 1095 } 1096 #endif 1097 1098 static char *stibp_state(void) 1099 { 1100 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) 1101 return ""; 1102 1103 switch (spectre_v2_user) { 1104 case SPECTRE_V2_USER_NONE: 1105 return ", STIBP: disabled"; 1106 case SPECTRE_V2_USER_STRICT: 1107 return ", STIBP: forced"; 1108 case SPECTRE_V2_USER_STRICT_PREFERRED: 1109 return ", STIBP: always-on"; 1110 case SPECTRE_V2_USER_PRCTL: 1111 case SPECTRE_V2_USER_SECCOMP: 1112 if (static_key_enabled(&switch_to_cond_stibp)) 1113 return ", STIBP: conditional"; 1114 } 1115 return ""; 1116 } 1117 1118 static char *ibpb_state(void) 1119 { 1120 if (boot_cpu_has(X86_FEATURE_IBPB)) { 1121 if (static_key_enabled(&switch_mm_always_ibpb)) 1122 return ", IBPB: always-on"; 1123 if (static_key_enabled(&switch_mm_cond_ibpb)) 1124 return ", IBPB: conditional"; 1125 return ", IBPB: disabled"; 1126 } 1127 return ""; 1128 } 1129 1130 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, 1131 char *buf, unsigned int bug) 1132 { 1133 if (!boot_cpu_has_bug(bug)) 1134 return sprintf(buf, "Not affected\n"); 1135 1136 switch (bug) { 1137 case X86_BUG_CPU_MELTDOWN: 1138 if (boot_cpu_has(X86_FEATURE_PTI)) 1139 return sprintf(buf, "Mitigation: PTI\n"); 1140 1141 if (hypervisor_is_type(X86_HYPER_XEN_PV)) 1142 return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); 1143 1144 break; 1145 1146 case X86_BUG_SPECTRE_V1: 1147 return sprintf(buf, "Mitigation: __user pointer sanitization\n"); 1148 1149 case X86_BUG_SPECTRE_V2: 1150 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], 1151 ibpb_state(), 1152 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", 1153 stibp_state(), 1154 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", 1155 spectre_v2_module_string()); 1156 1157 case X86_BUG_SPEC_STORE_BYPASS: 1158 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); 1159 1160 case X86_BUG_L1TF: 1161 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) 1162 return l1tf_show_state(buf); 1163 break; 1164 default: 1165 break; 1166 } 1167 1168 return sprintf(buf, "Vulnerable\n"); 1169 } 1170 1171 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 1172 { 1173 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); 1174 } 1175 1176 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 1177 { 1178 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); 1179 } 1180 1181 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 1182 { 1183 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); 1184 } 1185 1186 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 1187 { 1188 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); 1189 } 1190 1191 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) 1192 { 1193 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); 1194 } 1195 #endif 1196