1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as 4 * detailed at: 5 * 6 * https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability 7 * 8 * This code was originally written hastily under an awful lot of stress and so 9 * aspects of it are somewhat hacky. Unfortunately, changing anything in here 10 * instantly makes me feel ill. Thanks, Jann. Thann. 11 * 12 * Copyright (C) 2018 ARM Ltd, All Rights Reserved. 13 * Copyright (C) 2020 Google LLC 14 * 15 * "If there's something strange in your neighbourhood, who you gonna call?" 16 * 17 * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org> 18 */ 19 20 #include <linux/arm-smccc.h> 21 #include <linux/bpf.h> 22 #include <linux/cpu.h> 23 #include <linux/device.h> 24 #include <linux/nospec.h> 25 #include <linux/prctl.h> 26 #include <linux/sched/task_stack.h> 27 28 #include <asm/debug-monitors.h> 29 #include <asm/insn.h> 30 #include <asm/spectre.h> 31 #include <asm/traps.h> 32 #include <asm/vectors.h> 33 #include <asm/virt.h> 34 35 /* 36 * We try to ensure that the mitigation state can never change as the result of 37 * onlining a late CPU. 38 */ 39 static void update_mitigation_state(enum mitigation_state *oldp, 40 enum mitigation_state new) 41 { 42 enum mitigation_state state; 43 44 do { 45 state = READ_ONCE(*oldp); 46 if (new <= state) 47 break; 48 49 /* Userspace almost certainly can't deal with this. */ 50 if (WARN_ON(system_capabilities_finalized())) 51 break; 52 } while (cmpxchg_relaxed(oldp, state, new) != state); 53 } 54 55 /* 56 * Spectre v1. 57 * 58 * The kernel can't protect userspace for this one: it's each person for 59 * themselves. Advertise what we're doing and be done with it. 60 */ 61 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, 62 char *buf) 63 { 64 return sprintf(buf, "Mitigation: __user pointer sanitization\n"); 65 } 66 67 /* 68 * Spectre v2. 69 * 70 * This one sucks. A CPU is either: 71 * 72 * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2. 73 * - Mitigated in hardware and listed in our "safe list". 74 * - Mitigated in software by firmware. 75 * - Mitigated in software by a CPU-specific dance in the kernel and a 76 * firmware call at EL2. 77 * - Vulnerable. 78 * 79 * It's not unlikely for different CPUs in a big.LITTLE system to fall into 80 * different camps. 81 */ 82 static enum mitigation_state spectre_v2_state; 83 84 static bool __read_mostly __nospectre_v2; 85 static int __init parse_spectre_v2_param(char *str) 86 { 87 __nospectre_v2 = true; 88 return 0; 89 } 90 early_param("nospectre_v2", parse_spectre_v2_param); 91 92 static bool spectre_v2_mitigations_off(void) 93 { 94 bool ret = __nospectre_v2 || cpu_mitigations_off(); 95 96 if (ret) 97 pr_info_once("spectre-v2 mitigation disabled by command line option\n"); 98 99 return ret; 100 } 101 102 static const char *get_bhb_affected_string(enum mitigation_state bhb_state) 103 { 104 switch (bhb_state) { 105 case SPECTRE_UNAFFECTED: 106 return ""; 107 default: 108 case SPECTRE_VULNERABLE: 109 return ", but not BHB"; 110 case SPECTRE_MITIGATED: 111 return ", BHB"; 112 } 113 } 114 115 static bool _unprivileged_ebpf_enabled(void) 116 { 117 #ifdef CONFIG_BPF_SYSCALL 118 return !sysctl_unprivileged_bpf_disabled; 119 #else 120 return false; 121 #endif 122 } 123 124 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, 125 char *buf) 126 { 127 enum mitigation_state bhb_state = arm64_get_spectre_bhb_state(); 128 const char *bhb_str = get_bhb_affected_string(bhb_state); 129 const char *v2_str = "Branch predictor hardening"; 130 131 switch (spectre_v2_state) { 132 case SPECTRE_UNAFFECTED: 133 if (bhb_state == SPECTRE_UNAFFECTED) 134 return sprintf(buf, "Not affected\n"); 135 136 /* 137 * Platforms affected by Spectre-BHB can't report 138 * "Not affected" for Spectre-v2. 139 */ 140 v2_str = "CSV2"; 141 fallthrough; 142 case SPECTRE_MITIGATED: 143 if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled()) 144 return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n"); 145 146 return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str); 147 case SPECTRE_VULNERABLE: 148 fallthrough; 149 default: 150 return sprintf(buf, "Vulnerable\n"); 151 } 152 } 153 154 static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void) 155 { 156 u64 pfr0; 157 static const struct midr_range spectre_v2_safe_list[] = { 158 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), 159 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), 160 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), 161 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), 162 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), 163 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER), 164 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), 165 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), 166 { /* sentinel */ } 167 }; 168 169 /* If the CPU has CSV2 set, we're safe */ 170 pfr0 = read_cpuid(ID_AA64PFR0_EL1); 171 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) 172 return SPECTRE_UNAFFECTED; 173 174 /* Alternatively, we have a list of unaffected CPUs */ 175 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) 176 return SPECTRE_UNAFFECTED; 177 178 return SPECTRE_VULNERABLE; 179 } 180 181 static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void) 182 { 183 int ret; 184 struct arm_smccc_res res; 185 186 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 187 ARM_SMCCC_ARCH_WORKAROUND_1, &res); 188 189 ret = res.a0; 190 switch (ret) { 191 case SMCCC_RET_SUCCESS: 192 return SPECTRE_MITIGATED; 193 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: 194 return SPECTRE_UNAFFECTED; 195 default: 196 fallthrough; 197 case SMCCC_RET_NOT_SUPPORTED: 198 return SPECTRE_VULNERABLE; 199 } 200 } 201 202 bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope) 203 { 204 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 205 206 if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED) 207 return false; 208 209 if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED) 210 return false; 211 212 return true; 213 } 214 215 enum mitigation_state arm64_get_spectre_v2_state(void) 216 { 217 return spectre_v2_state; 218 } 219 220 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); 221 222 static void install_bp_hardening_cb(bp_hardening_cb_t fn) 223 { 224 __this_cpu_write(bp_hardening_data.fn, fn); 225 226 /* 227 * Vinz Clortho takes the hyp_vecs start/end "keys" at 228 * the door when we're a guest. Skip the hyp-vectors work. 229 */ 230 if (!is_hyp_mode_available()) 231 return; 232 233 __this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT); 234 } 235 236 static void call_smc_arch_workaround_1(void) 237 { 238 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); 239 } 240 241 static void call_hvc_arch_workaround_1(void) 242 { 243 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); 244 } 245 246 static void qcom_link_stack_sanitisation(void) 247 { 248 u64 tmp; 249 250 asm volatile("mov %0, x30 \n" 251 ".rept 16 \n" 252 "bl . + 4 \n" 253 ".endr \n" 254 "mov x30, %0 \n" 255 : "=&r" (tmp)); 256 } 257 258 static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void) 259 { 260 u32 midr = read_cpuid_id(); 261 if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) && 262 ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1)) 263 return NULL; 264 265 return qcom_link_stack_sanitisation; 266 } 267 268 static enum mitigation_state spectre_v2_enable_fw_mitigation(void) 269 { 270 bp_hardening_cb_t cb; 271 enum mitigation_state state; 272 273 state = spectre_v2_get_cpu_fw_mitigation_state(); 274 if (state != SPECTRE_MITIGATED) 275 return state; 276 277 if (spectre_v2_mitigations_off()) 278 return SPECTRE_VULNERABLE; 279 280 switch (arm_smccc_1_1_get_conduit()) { 281 case SMCCC_CONDUIT_HVC: 282 cb = call_hvc_arch_workaround_1; 283 break; 284 285 case SMCCC_CONDUIT_SMC: 286 cb = call_smc_arch_workaround_1; 287 break; 288 289 default: 290 return SPECTRE_VULNERABLE; 291 } 292 293 /* 294 * Prefer a CPU-specific workaround if it exists. Note that we 295 * still rely on firmware for the mitigation at EL2. 296 */ 297 cb = spectre_v2_get_sw_mitigation_cb() ?: cb; 298 install_bp_hardening_cb(cb); 299 return SPECTRE_MITIGATED; 300 } 301 302 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused) 303 { 304 enum mitigation_state state; 305 306 WARN_ON(preemptible()); 307 308 state = spectre_v2_get_cpu_hw_mitigation_state(); 309 if (state == SPECTRE_VULNERABLE) 310 state = spectre_v2_enable_fw_mitigation(); 311 312 update_mitigation_state(&spectre_v2_state, state); 313 } 314 315 /* 316 * Spectre-v3a. 317 * 318 * Phew, there's not an awful lot to do here! We just instruct EL2 to use 319 * an indirect trampoline for the hyp vectors so that guests can't read 320 * VBAR_EL2 to defeat randomisation of the hypervisor VA layout. 321 */ 322 bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope) 323 { 324 static const struct midr_range spectre_v3a_unsafe_list[] = { 325 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), 326 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 327 {}, 328 }; 329 330 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 331 return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list); 332 } 333 334 void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused) 335 { 336 struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); 337 338 if (this_cpu_has_cap(ARM64_SPECTRE_V3A)) 339 data->slot += HYP_VECTOR_INDIRECT; 340 } 341 342 /* 343 * Spectre v4. 344 * 345 * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is 346 * either: 347 * 348 * - Mitigated in hardware and listed in our "safe list". 349 * - Mitigated in hardware via PSTATE.SSBS. 350 * - Mitigated in software by firmware (sometimes referred to as SSBD). 351 * 352 * Wait, that doesn't sound so bad, does it? Keep reading... 353 * 354 * A major source of headaches is that the software mitigation is enabled both 355 * on a per-task basis, but can also be forced on for the kernel, necessitating 356 * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs 357 * allow EL0 to toggle SSBS directly, which can end up with the prctl() state 358 * being stale when re-entering the kernel. The usual big.LITTLE caveats apply, 359 * so you can have systems that have both firmware and SSBS mitigations. This 360 * means we actually have to reject late onlining of CPUs with mitigations if 361 * all of the currently onlined CPUs are safelisted, as the mitigation tends to 362 * be opt-in for userspace. Yes, really, the cure is worse than the disease. 363 * 364 * The only good part is that if the firmware mitigation is present, then it is 365 * present for all CPUs, meaning we don't have to worry about late onlining of a 366 * vulnerable CPU if one of the boot CPUs is using the firmware mitigation. 367 * 368 * Give me a VAX-11/780 any day of the week... 369 */ 370 static enum mitigation_state spectre_v4_state; 371 372 /* This is the per-cpu state tracking whether we need to talk to firmware */ 373 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); 374 375 enum spectre_v4_policy { 376 SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, 377 SPECTRE_V4_POLICY_MITIGATION_ENABLED, 378 SPECTRE_V4_POLICY_MITIGATION_DISABLED, 379 }; 380 381 static enum spectre_v4_policy __read_mostly __spectre_v4_policy; 382 383 static const struct spectre_v4_param { 384 const char *str; 385 enum spectre_v4_policy policy; 386 } spectre_v4_params[] = { 387 { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, }, 388 { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, }, 389 { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, }, 390 }; 391 static int __init parse_spectre_v4_param(char *str) 392 { 393 int i; 394 395 if (!str || !str[0]) 396 return -EINVAL; 397 398 for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) { 399 const struct spectre_v4_param *param = &spectre_v4_params[i]; 400 401 if (strncmp(str, param->str, strlen(param->str))) 402 continue; 403 404 __spectre_v4_policy = param->policy; 405 return 0; 406 } 407 408 return -EINVAL; 409 } 410 early_param("ssbd", parse_spectre_v4_param); 411 412 /* 413 * Because this was all written in a rush by people working in different silos, 414 * we've ended up with multiple command line options to control the same thing. 415 * Wrap these up in some helpers, which prefer disabling the mitigation if faced 416 * with contradictory parameters. The mitigation is always either "off", 417 * "dynamic" or "on". 418 */ 419 static bool spectre_v4_mitigations_off(void) 420 { 421 bool ret = cpu_mitigations_off() || 422 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED; 423 424 if (ret) 425 pr_info_once("spectre-v4 mitigation disabled by command-line option\n"); 426 427 return ret; 428 } 429 430 /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */ 431 static bool spectre_v4_mitigations_dynamic(void) 432 { 433 return !spectre_v4_mitigations_off() && 434 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC; 435 } 436 437 static bool spectre_v4_mitigations_on(void) 438 { 439 return !spectre_v4_mitigations_off() && 440 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED; 441 } 442 443 ssize_t cpu_show_spec_store_bypass(struct device *dev, 444 struct device_attribute *attr, char *buf) 445 { 446 switch (spectre_v4_state) { 447 case SPECTRE_UNAFFECTED: 448 return sprintf(buf, "Not affected\n"); 449 case SPECTRE_MITIGATED: 450 return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n"); 451 case SPECTRE_VULNERABLE: 452 fallthrough; 453 default: 454 return sprintf(buf, "Vulnerable\n"); 455 } 456 } 457 458 enum mitigation_state arm64_get_spectre_v4_state(void) 459 { 460 return spectre_v4_state; 461 } 462 463 static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void) 464 { 465 static const struct midr_range spectre_v4_safe_list[] = { 466 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), 467 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), 468 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), 469 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), 470 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), 471 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), 472 { /* sentinel */ }, 473 }; 474 475 if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list)) 476 return SPECTRE_UNAFFECTED; 477 478 /* CPU features are detected first */ 479 if (this_cpu_has_cap(ARM64_SSBS)) 480 return SPECTRE_MITIGATED; 481 482 return SPECTRE_VULNERABLE; 483 } 484 485 static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void) 486 { 487 int ret; 488 struct arm_smccc_res res; 489 490 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 491 ARM_SMCCC_ARCH_WORKAROUND_2, &res); 492 493 ret = res.a0; 494 switch (ret) { 495 case SMCCC_RET_SUCCESS: 496 return SPECTRE_MITIGATED; 497 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: 498 fallthrough; 499 case SMCCC_RET_NOT_REQUIRED: 500 return SPECTRE_UNAFFECTED; 501 default: 502 fallthrough; 503 case SMCCC_RET_NOT_SUPPORTED: 504 return SPECTRE_VULNERABLE; 505 } 506 } 507 508 bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope) 509 { 510 enum mitigation_state state; 511 512 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 513 514 state = spectre_v4_get_cpu_hw_mitigation_state(); 515 if (state == SPECTRE_VULNERABLE) 516 state = spectre_v4_get_cpu_fw_mitigation_state(); 517 518 return state != SPECTRE_UNAFFECTED; 519 } 520 521 static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr) 522 { 523 if (user_mode(regs)) 524 return 1; 525 526 if (instr & BIT(PSTATE_Imm_shift)) 527 regs->pstate |= PSR_SSBS_BIT; 528 else 529 regs->pstate &= ~PSR_SSBS_BIT; 530 531 arm64_skip_faulting_instruction(regs, 4); 532 return 0; 533 } 534 535 static struct undef_hook ssbs_emulation_hook = { 536 .instr_mask = ~(1U << PSTATE_Imm_shift), 537 .instr_val = 0xd500401f | PSTATE_SSBS, 538 .fn = ssbs_emulation_handler, 539 }; 540 541 static enum mitigation_state spectre_v4_enable_hw_mitigation(void) 542 { 543 static bool undef_hook_registered = false; 544 static DEFINE_RAW_SPINLOCK(hook_lock); 545 enum mitigation_state state; 546 547 /* 548 * If the system is mitigated but this CPU doesn't have SSBS, then 549 * we must be on the safelist and there's nothing more to do. 550 */ 551 state = spectre_v4_get_cpu_hw_mitigation_state(); 552 if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS)) 553 return state; 554 555 raw_spin_lock(&hook_lock); 556 if (!undef_hook_registered) { 557 register_undef_hook(&ssbs_emulation_hook); 558 undef_hook_registered = true; 559 } 560 raw_spin_unlock(&hook_lock); 561 562 if (spectre_v4_mitigations_off()) { 563 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS); 564 set_pstate_ssbs(1); 565 return SPECTRE_VULNERABLE; 566 } 567 568 /* SCTLR_EL1.DSSBS was initialised to 0 during boot */ 569 set_pstate_ssbs(0); 570 return SPECTRE_MITIGATED; 571 } 572 573 /* 574 * Patch a branch over the Spectre-v4 mitigation code with a NOP so that 575 * we fallthrough and check whether firmware needs to be called on this CPU. 576 */ 577 void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt, 578 __le32 *origptr, 579 __le32 *updptr, int nr_inst) 580 { 581 BUG_ON(nr_inst != 1); /* Branch -> NOP */ 582 583 if (spectre_v4_mitigations_off()) 584 return; 585 586 if (cpus_have_final_cap(ARM64_SSBS)) 587 return; 588 589 if (spectre_v4_mitigations_dynamic()) 590 *updptr = cpu_to_le32(aarch64_insn_gen_nop()); 591 } 592 593 /* 594 * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction 595 * to call into firmware to adjust the mitigation state. 596 */ 597 void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt, 598 __le32 *origptr, 599 __le32 *updptr, int nr_inst) 600 { 601 u32 insn; 602 603 BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */ 604 605 switch (arm_smccc_1_1_get_conduit()) { 606 case SMCCC_CONDUIT_HVC: 607 insn = aarch64_insn_get_hvc_value(); 608 break; 609 case SMCCC_CONDUIT_SMC: 610 insn = aarch64_insn_get_smc_value(); 611 break; 612 default: 613 return; 614 } 615 616 *updptr = cpu_to_le32(insn); 617 } 618 619 static enum mitigation_state spectre_v4_enable_fw_mitigation(void) 620 { 621 enum mitigation_state state; 622 623 state = spectre_v4_get_cpu_fw_mitigation_state(); 624 if (state != SPECTRE_MITIGATED) 625 return state; 626 627 if (spectre_v4_mitigations_off()) { 628 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL); 629 return SPECTRE_VULNERABLE; 630 } 631 632 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL); 633 634 if (spectre_v4_mitigations_dynamic()) 635 __this_cpu_write(arm64_ssbd_callback_required, 1); 636 637 return SPECTRE_MITIGATED; 638 } 639 640 void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused) 641 { 642 enum mitigation_state state; 643 644 WARN_ON(preemptible()); 645 646 state = spectre_v4_enable_hw_mitigation(); 647 if (state == SPECTRE_VULNERABLE) 648 state = spectre_v4_enable_fw_mitigation(); 649 650 update_mitigation_state(&spectre_v4_state, state); 651 } 652 653 static void __update_pstate_ssbs(struct pt_regs *regs, bool state) 654 { 655 u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT; 656 657 if (state) 658 regs->pstate |= bit; 659 else 660 regs->pstate &= ~bit; 661 } 662 663 void spectre_v4_enable_task_mitigation(struct task_struct *tsk) 664 { 665 struct pt_regs *regs = task_pt_regs(tsk); 666 bool ssbs = false, kthread = tsk->flags & PF_KTHREAD; 667 668 if (spectre_v4_mitigations_off()) 669 ssbs = true; 670 else if (spectre_v4_mitigations_dynamic() && !kthread) 671 ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD); 672 673 __update_pstate_ssbs(regs, ssbs); 674 } 675 676 /* 677 * The Spectre-v4 mitigation can be controlled via a prctl() from userspace. 678 * This is interesting because the "speculation disabled" behaviour can be 679 * configured so that it is preserved across exec(), which means that the 680 * prctl() may be necessary even when PSTATE.SSBS can be toggled directly 681 * from userspace. 682 */ 683 static void ssbd_prctl_enable_mitigation(struct task_struct *task) 684 { 685 task_clear_spec_ssb_noexec(task); 686 task_set_spec_ssb_disable(task); 687 set_tsk_thread_flag(task, TIF_SSBD); 688 } 689 690 static void ssbd_prctl_disable_mitigation(struct task_struct *task) 691 { 692 task_clear_spec_ssb_noexec(task); 693 task_clear_spec_ssb_disable(task); 694 clear_tsk_thread_flag(task, TIF_SSBD); 695 } 696 697 static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl) 698 { 699 switch (ctrl) { 700 case PR_SPEC_ENABLE: 701 /* Enable speculation: disable mitigation */ 702 /* 703 * Force disabled speculation prevents it from being 704 * re-enabled. 705 */ 706 if (task_spec_ssb_force_disable(task)) 707 return -EPERM; 708 709 /* 710 * If the mitigation is forced on, then speculation is forced 711 * off and we again prevent it from being re-enabled. 712 */ 713 if (spectre_v4_mitigations_on()) 714 return -EPERM; 715 716 ssbd_prctl_disable_mitigation(task); 717 break; 718 case PR_SPEC_FORCE_DISABLE: 719 /* Force disable speculation: force enable mitigation */ 720 /* 721 * If the mitigation is forced off, then speculation is forced 722 * on and we prevent it from being disabled. 723 */ 724 if (spectre_v4_mitigations_off()) 725 return -EPERM; 726 727 task_set_spec_ssb_force_disable(task); 728 fallthrough; 729 case PR_SPEC_DISABLE: 730 /* Disable speculation: enable mitigation */ 731 /* Same as PR_SPEC_FORCE_DISABLE */ 732 if (spectre_v4_mitigations_off()) 733 return -EPERM; 734 735 ssbd_prctl_enable_mitigation(task); 736 break; 737 case PR_SPEC_DISABLE_NOEXEC: 738 /* Disable speculation until execve(): enable mitigation */ 739 /* 740 * If the mitigation state is forced one way or the other, then 741 * we must fail now before we try to toggle it on execve(). 742 */ 743 if (task_spec_ssb_force_disable(task) || 744 spectre_v4_mitigations_off() || 745 spectre_v4_mitigations_on()) { 746 return -EPERM; 747 } 748 749 ssbd_prctl_enable_mitigation(task); 750 task_set_spec_ssb_noexec(task); 751 break; 752 default: 753 return -ERANGE; 754 } 755 756 spectre_v4_enable_task_mitigation(task); 757 return 0; 758 } 759 760 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, 761 unsigned long ctrl) 762 { 763 switch (which) { 764 case PR_SPEC_STORE_BYPASS: 765 return ssbd_prctl_set(task, ctrl); 766 default: 767 return -ENODEV; 768 } 769 } 770 771 static int ssbd_prctl_get(struct task_struct *task) 772 { 773 switch (spectre_v4_state) { 774 case SPECTRE_UNAFFECTED: 775 return PR_SPEC_NOT_AFFECTED; 776 case SPECTRE_MITIGATED: 777 if (spectre_v4_mitigations_on()) 778 return PR_SPEC_NOT_AFFECTED; 779 780 if (spectre_v4_mitigations_dynamic()) 781 break; 782 783 /* Mitigations are disabled, so we're vulnerable. */ 784 fallthrough; 785 case SPECTRE_VULNERABLE: 786 fallthrough; 787 default: 788 return PR_SPEC_ENABLE; 789 } 790 791 /* Check the mitigation state for this task */ 792 if (task_spec_ssb_force_disable(task)) 793 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 794 795 if (task_spec_ssb_noexec(task)) 796 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; 797 798 if (task_spec_ssb_disable(task)) 799 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 800 801 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 802 } 803 804 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 805 { 806 switch (which) { 807 case PR_SPEC_STORE_BYPASS: 808 return ssbd_prctl_get(task); 809 default: 810 return -ENODEV; 811 } 812 } 813 814 /* 815 * Spectre BHB. 816 * 817 * A CPU is either: 818 * - Mitigated by a branchy loop a CPU specific number of times, and listed 819 * in our "loop mitigated list". 820 * - Mitigated in software by the firmware Spectre v2 call. 821 * - Has the ClearBHB instruction to perform the mitigation. 822 * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no 823 * software mitigation in the vectors is needed. 824 * - Has CSV2.3, so is unaffected. 825 */ 826 static enum mitigation_state spectre_bhb_state; 827 828 enum mitigation_state arm64_get_spectre_bhb_state(void) 829 { 830 return spectre_bhb_state; 831 } 832 833 enum bhb_mitigation_bits { 834 BHB_LOOP, 835 BHB_FW, 836 BHB_HW, 837 BHB_INSN, 838 }; 839 static unsigned long system_bhb_mitigations; 840 841 /* 842 * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any 843 * SCOPE_SYSTEM call will give the right answer. 844 */ 845 u8 spectre_bhb_loop_affected(int scope) 846 { 847 u8 k = 0; 848 static u8 max_bhb_k; 849 850 if (scope == SCOPE_LOCAL_CPU) { 851 static const struct midr_range spectre_bhb_k32_list[] = { 852 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), 853 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), 854 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), 855 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), 856 MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), 857 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), 858 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), 859 {}, 860 }; 861 static const struct midr_range spectre_bhb_k24_list[] = { 862 MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), 863 MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), 864 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), 865 {}, 866 }; 867 static const struct midr_range spectre_bhb_k8_list[] = { 868 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 869 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), 870 {}, 871 }; 872 873 if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list)) 874 k = 32; 875 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) 876 k = 24; 877 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list)) 878 k = 8; 879 880 max_bhb_k = max(max_bhb_k, k); 881 } else { 882 k = max_bhb_k; 883 } 884 885 return k; 886 } 887 888 static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void) 889 { 890 int ret; 891 struct arm_smccc_res res; 892 893 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 894 ARM_SMCCC_ARCH_WORKAROUND_3, &res); 895 896 ret = res.a0; 897 switch (ret) { 898 case SMCCC_RET_SUCCESS: 899 return SPECTRE_MITIGATED; 900 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: 901 return SPECTRE_UNAFFECTED; 902 default: 903 fallthrough; 904 case SMCCC_RET_NOT_SUPPORTED: 905 return SPECTRE_VULNERABLE; 906 } 907 } 908 909 static bool is_spectre_bhb_fw_affected(int scope) 910 { 911 static bool system_affected; 912 enum mitigation_state fw_state; 913 bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE; 914 static const struct midr_range spectre_bhb_firmware_mitigated_list[] = { 915 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), 916 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), 917 {}, 918 }; 919 bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(), 920 spectre_bhb_firmware_mitigated_list); 921 922 if (scope != SCOPE_LOCAL_CPU) 923 return system_affected; 924 925 fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); 926 if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) { 927 system_affected = true; 928 return true; 929 } 930 931 return false; 932 } 933 934 static bool supports_ecbhb(int scope) 935 { 936 u64 mmfr1; 937 938 if (scope == SCOPE_LOCAL_CPU) 939 mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1); 940 else 941 mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); 942 943 return cpuid_feature_extract_unsigned_field(mmfr1, 944 ID_AA64MMFR1_ECBHB_SHIFT); 945 } 946 947 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, 948 int scope) 949 { 950 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 951 952 if (supports_csv2p3(scope)) 953 return false; 954 955 if (supports_clearbhb(scope)) 956 return true; 957 958 if (spectre_bhb_loop_affected(scope)) 959 return true; 960 961 if (is_spectre_bhb_fw_affected(scope)) 962 return true; 963 964 return false; 965 } 966 967 static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) 968 { 969 const char *v = arm64_get_bp_hardening_vector(slot); 970 971 if (slot < 0) 972 return; 973 974 __this_cpu_write(this_cpu_vector, v); 975 976 /* 977 * When KPTI is in use, the vectors are switched when exiting to 978 * user-space. 979 */ 980 if (arm64_kernel_unmapped_at_el0()) 981 return; 982 983 write_sysreg(v, vbar_el1); 984 isb(); 985 } 986 987 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) 988 { 989 bp_hardening_cb_t cpu_cb; 990 enum mitigation_state fw_state, state = SPECTRE_VULNERABLE; 991 struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); 992 993 if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU)) 994 return; 995 996 if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) { 997 /* No point mitigating Spectre-BHB alone. */ 998 } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) { 999 pr_info_once("spectre-bhb mitigation disabled by compile time option\n"); 1000 } else if (cpu_mitigations_off()) { 1001 pr_info_once("spectre-bhb mitigation disabled by command line option\n"); 1002 } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) { 1003 state = SPECTRE_MITIGATED; 1004 set_bit(BHB_HW, &system_bhb_mitigations); 1005 } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) { 1006 /* 1007 * Ensure KVM uses the indirect vector which will have ClearBHB 1008 * added. 1009 */ 1010 if (!data->slot) 1011 data->slot = HYP_VECTOR_INDIRECT; 1012 1013 this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN); 1014 state = SPECTRE_MITIGATED; 1015 set_bit(BHB_INSN, &system_bhb_mitigations); 1016 } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) { 1017 /* 1018 * Ensure KVM uses the indirect vector which will have the 1019 * branchy-loop added. A57/A72-r0 will already have selected 1020 * the spectre-indirect vector, which is sufficient for BHB 1021 * too. 1022 */ 1023 if (!data->slot) 1024 data->slot = HYP_VECTOR_INDIRECT; 1025 1026 this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP); 1027 state = SPECTRE_MITIGATED; 1028 set_bit(BHB_LOOP, &system_bhb_mitigations); 1029 } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) { 1030 fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); 1031 if (fw_state == SPECTRE_MITIGATED) { 1032 /* 1033 * Ensure KVM uses one of the spectre bp_hardening 1034 * vectors. The indirect vector doesn't include the EL3 1035 * call, so needs upgrading to 1036 * HYP_VECTOR_SPECTRE_INDIRECT. 1037 */ 1038 if (!data->slot || data->slot == HYP_VECTOR_INDIRECT) 1039 data->slot += 1; 1040 1041 this_cpu_set_vectors(EL1_VECTOR_BHB_FW); 1042 1043 /* 1044 * The WA3 call in the vectors supersedes the WA1 call 1045 * made during context-switch. Uninstall any firmware 1046 * bp_hardening callback. 1047 */ 1048 cpu_cb = spectre_v2_get_sw_mitigation_cb(); 1049 if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb) 1050 __this_cpu_write(bp_hardening_data.fn, NULL); 1051 1052 state = SPECTRE_MITIGATED; 1053 set_bit(BHB_FW, &system_bhb_mitigations); 1054 } 1055 } 1056 1057 update_mitigation_state(&spectre_bhb_state, state); 1058 } 1059 1060 /* Patched to NOP when enabled */ 1061 void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt, 1062 __le32 *origptr, 1063 __le32 *updptr, int nr_inst) 1064 { 1065 BUG_ON(nr_inst != 1); 1066 1067 if (test_bit(BHB_LOOP, &system_bhb_mitigations)) 1068 *updptr++ = cpu_to_le32(aarch64_insn_gen_nop()); 1069 } 1070 1071 /* Patched to NOP when enabled */ 1072 void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt, 1073 __le32 *origptr, 1074 __le32 *updptr, int nr_inst) 1075 { 1076 BUG_ON(nr_inst != 1); 1077 1078 if (test_bit(BHB_FW, &system_bhb_mitigations)) 1079 *updptr++ = cpu_to_le32(aarch64_insn_gen_nop()); 1080 } 1081 1082 /* Patched to correct the immediate */ 1083 void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt, 1084 __le32 *origptr, __le32 *updptr, int nr_inst) 1085 { 1086 u8 rd; 1087 u32 insn; 1088 u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM); 1089 1090 BUG_ON(nr_inst != 1); /* MOV -> MOV */ 1091 1092 if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) 1093 return; 1094 1095 insn = le32_to_cpu(*origptr); 1096 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn); 1097 insn = aarch64_insn_gen_movewide(rd, loop_count, 0, 1098 AARCH64_INSN_VARIANT_64BIT, 1099 AARCH64_INSN_MOVEWIDE_ZERO); 1100 *updptr++ = cpu_to_le32(insn); 1101 } 1102 1103 /* Patched to mov WA3 when supported */ 1104 void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt, 1105 __le32 *origptr, __le32 *updptr, int nr_inst) 1106 { 1107 u8 rd; 1108 u32 insn; 1109 1110 BUG_ON(nr_inst != 1); /* MOV -> MOV */ 1111 1112 if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) || 1113 !test_bit(BHB_FW, &system_bhb_mitigations)) 1114 return; 1115 1116 insn = le32_to_cpu(*origptr); 1117 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn); 1118 1119 insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR, 1120 AARCH64_INSN_VARIANT_32BIT, 1121 AARCH64_INSN_REG_ZR, rd, 1122 ARM_SMCCC_ARCH_WORKAROUND_3); 1123 if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT)) 1124 return; 1125 1126 *updptr++ = cpu_to_le32(insn); 1127 } 1128 1129 /* Patched to NOP when not supported */ 1130 void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt, 1131 __le32 *origptr, __le32 *updptr, int nr_inst) 1132 { 1133 BUG_ON(nr_inst != 2); 1134 1135 if (test_bit(BHB_INSN, &system_bhb_mitigations)) 1136 return; 1137 1138 *updptr++ = cpu_to_le32(aarch64_insn_gen_nop()); 1139 *updptr++ = cpu_to_le32(aarch64_insn_gen_nop()); 1140 } 1141 1142 #ifdef CONFIG_BPF_SYSCALL 1143 #define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n" 1144 void unpriv_ebpf_notify(int new_state) 1145 { 1146 if (spectre_v2_state == SPECTRE_VULNERABLE || 1147 spectre_bhb_state != SPECTRE_MITIGATED) 1148 return; 1149 1150 if (!new_state) 1151 pr_err("WARNING: %s", EBPF_WARN); 1152 } 1153 #endif 1154