1455697adSWill Deacon // SPDX-License-Identifier: GPL-2.0-only 2455697adSWill Deacon /* 3c4792b6dSWill Deacon * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as 4455697adSWill Deacon * detailed at: 5455697adSWill Deacon * 6455697adSWill Deacon * https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability 7455697adSWill Deacon * 8455697adSWill Deacon * This code was originally written hastily under an awful lot of stress and so 9455697adSWill Deacon * aspects of it are somewhat hacky. Unfortunately, changing anything in here 10455697adSWill Deacon * instantly makes me feel ill. Thanks, Jann. Thann. 11455697adSWill Deacon * 12455697adSWill Deacon * Copyright (C) 2018 ARM Ltd, All Rights Reserved. 13455697adSWill Deacon * Copyright (C) 2020 Google LLC 14455697adSWill Deacon * 15455697adSWill Deacon * "If there's something strange in your neighbourhood, who you gonna call?" 16455697adSWill Deacon * 17455697adSWill Deacon * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org> 18455697adSWill Deacon */ 19455697adSWill Deacon 20d4647f0aSWill Deacon #include <linux/arm-smccc.h> 21d4647f0aSWill Deacon #include <linux/cpu.h> 22455697adSWill Deacon #include <linux/device.h> 239e78b659SWill Deacon #include <linux/nospec.h> 24d4647f0aSWill Deacon #include <linux/prctl.h> 255c8b0cbdSWill Deacon #include <linux/sched/task_stack.h> 26d4647f0aSWill Deacon 277cda23daSWill Deacon #include <asm/insn.h> 28d4647f0aSWill Deacon #include <asm/spectre.h> 29d4647f0aSWill Deacon #include <asm/traps.h> 30b881cdceSWill Deacon #include <asm/virt.h> 31d4647f0aSWill Deacon 32d4647f0aSWill Deacon /* 33d4647f0aSWill Deacon * We try to ensure that the mitigation state can never change as the result of 34d4647f0aSWill Deacon * onlining a late CPU. 35d4647f0aSWill Deacon */ 36d4647f0aSWill Deacon static void update_mitigation_state(enum mitigation_state *oldp, 37d4647f0aSWill Deacon enum mitigation_state new) 38d4647f0aSWill Deacon { 39d4647f0aSWill Deacon enum mitigation_state state; 40d4647f0aSWill Deacon 41d4647f0aSWill Deacon do { 42d4647f0aSWill Deacon state = READ_ONCE(*oldp); 43d4647f0aSWill Deacon if (new <= state) 44d4647f0aSWill Deacon break; 45d4647f0aSWill Deacon 46d4647f0aSWill Deacon /* Userspace almost certainly can't deal with this. */ 47d4647f0aSWill Deacon if (WARN_ON(system_capabilities_finalized())) 48d4647f0aSWill Deacon break; 49d4647f0aSWill Deacon } while (cmpxchg_relaxed(oldp, state, new) != state); 50d4647f0aSWill Deacon } 51455697adSWill Deacon 52455697adSWill Deacon /* 53455697adSWill Deacon * Spectre v1. 54455697adSWill Deacon * 55455697adSWill Deacon * The kernel can't protect userspace for this one: it's each person for 56455697adSWill Deacon * themselves. Advertise what we're doing and be done with it. 57455697adSWill Deacon */ 58455697adSWill Deacon ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, 59455697adSWill Deacon char *buf) 60455697adSWill Deacon { 61455697adSWill Deacon return sprintf(buf, "Mitigation: __user pointer sanitization\n"); 62455697adSWill Deacon } 63d4647f0aSWill Deacon 64d4647f0aSWill Deacon /* 65d4647f0aSWill Deacon * Spectre v2. 66d4647f0aSWill Deacon * 67d4647f0aSWill Deacon * This one sucks. A CPU is either: 68d4647f0aSWill Deacon * 69d4647f0aSWill Deacon * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2. 70d4647f0aSWill Deacon * - Mitigated in hardware and listed in our "safe list". 71d4647f0aSWill Deacon * - Mitigated in software by firmware. 72ea8f8c99SWill Deacon * - Mitigated in software by a CPU-specific dance in the kernel and a 73ea8f8c99SWill Deacon * firmware call at EL2. 74d4647f0aSWill Deacon * - Vulnerable. 75d4647f0aSWill Deacon * 76d4647f0aSWill Deacon * It's not unlikely for different CPUs in a big.LITTLE system to fall into 77d4647f0aSWill Deacon * different camps. 78d4647f0aSWill Deacon */ 79d4647f0aSWill Deacon static enum mitigation_state spectre_v2_state; 80d4647f0aSWill Deacon 81d4647f0aSWill Deacon static bool __read_mostly __nospectre_v2; 82d4647f0aSWill Deacon static int __init parse_spectre_v2_param(char *str) 83d4647f0aSWill Deacon { 84d4647f0aSWill Deacon __nospectre_v2 = true; 85d4647f0aSWill Deacon return 0; 86d4647f0aSWill Deacon } 87d4647f0aSWill Deacon early_param("nospectre_v2", parse_spectre_v2_param); 88d4647f0aSWill Deacon 89d4647f0aSWill Deacon static bool spectre_v2_mitigations_off(void) 90d4647f0aSWill Deacon { 91d4647f0aSWill Deacon bool ret = __nospectre_v2 || cpu_mitigations_off(); 92d4647f0aSWill Deacon 93d4647f0aSWill Deacon if (ret) 94d4647f0aSWill Deacon pr_info_once("spectre-v2 mitigation disabled by command line option\n"); 95d4647f0aSWill Deacon 96d4647f0aSWill Deacon return ret; 97d4647f0aSWill Deacon } 98d4647f0aSWill Deacon 99d4647f0aSWill Deacon ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, 100d4647f0aSWill Deacon char *buf) 101d4647f0aSWill Deacon { 102d4647f0aSWill Deacon switch (spectre_v2_state) { 103d4647f0aSWill Deacon case SPECTRE_UNAFFECTED: 104d4647f0aSWill Deacon return sprintf(buf, "Not affected\n"); 105d4647f0aSWill Deacon case SPECTRE_MITIGATED: 106d4647f0aSWill Deacon return sprintf(buf, "Mitigation: Branch predictor hardening\n"); 107d4647f0aSWill Deacon case SPECTRE_VULNERABLE: 108d4647f0aSWill Deacon fallthrough; 109d4647f0aSWill Deacon default: 110d4647f0aSWill Deacon return sprintf(buf, "Vulnerable\n"); 111d4647f0aSWill Deacon } 112d4647f0aSWill Deacon } 113d4647f0aSWill Deacon 114d4647f0aSWill Deacon static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void) 115d4647f0aSWill Deacon { 116d4647f0aSWill Deacon u64 pfr0; 117d4647f0aSWill Deacon static const struct midr_range spectre_v2_safe_list[] = { 118d4647f0aSWill Deacon MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), 119d4647f0aSWill Deacon MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), 120d4647f0aSWill Deacon MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), 121d4647f0aSWill Deacon MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), 122d4647f0aSWill Deacon MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), 12338328d40SKonrad Dybcio MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER), 124d4647f0aSWill Deacon MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), 125d4647f0aSWill Deacon MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), 126d4647f0aSWill Deacon { /* sentinel */ } 127d4647f0aSWill Deacon }; 128d4647f0aSWill Deacon 129d4647f0aSWill Deacon /* If the CPU has CSV2 set, we're safe */ 130d4647f0aSWill Deacon pfr0 = read_cpuid(ID_AA64PFR0_EL1); 131d4647f0aSWill Deacon if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) 132d4647f0aSWill Deacon return SPECTRE_UNAFFECTED; 133d4647f0aSWill Deacon 134d4647f0aSWill Deacon /* Alternatively, we have a list of unaffected CPUs */ 135d4647f0aSWill Deacon if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) 136d4647f0aSWill Deacon return SPECTRE_UNAFFECTED; 137d4647f0aSWill Deacon 138d4647f0aSWill Deacon return SPECTRE_VULNERABLE; 139d4647f0aSWill Deacon } 140d4647f0aSWill Deacon 141d4647f0aSWill Deacon static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void) 142d4647f0aSWill Deacon { 143d4647f0aSWill Deacon int ret; 144d4647f0aSWill Deacon struct arm_smccc_res res; 145d4647f0aSWill Deacon 146d4647f0aSWill Deacon arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 147d4647f0aSWill Deacon ARM_SMCCC_ARCH_WORKAROUND_1, &res); 148d4647f0aSWill Deacon 149d4647f0aSWill Deacon ret = res.a0; 150d4647f0aSWill Deacon switch (ret) { 151d4647f0aSWill Deacon case SMCCC_RET_SUCCESS: 152d4647f0aSWill Deacon return SPECTRE_MITIGATED; 153d4647f0aSWill Deacon case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: 154d4647f0aSWill Deacon return SPECTRE_UNAFFECTED; 155d4647f0aSWill Deacon default: 156d4647f0aSWill Deacon fallthrough; 157d4647f0aSWill Deacon case SMCCC_RET_NOT_SUPPORTED: 158d4647f0aSWill Deacon return SPECTRE_VULNERABLE; 159d4647f0aSWill Deacon } 160d4647f0aSWill Deacon } 161d4647f0aSWill Deacon 162d4647f0aSWill Deacon bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope) 163d4647f0aSWill Deacon { 164d4647f0aSWill Deacon WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 165d4647f0aSWill Deacon 166d4647f0aSWill Deacon if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED) 167d4647f0aSWill Deacon return false; 168d4647f0aSWill Deacon 169d4647f0aSWill Deacon if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED) 170d4647f0aSWill Deacon return false; 171d4647f0aSWill Deacon 172d4647f0aSWill Deacon return true; 173d4647f0aSWill Deacon } 174d4647f0aSWill Deacon 175d4647f0aSWill Deacon enum mitigation_state arm64_get_spectre_v2_state(void) 176d4647f0aSWill Deacon { 177d4647f0aSWill Deacon return spectre_v2_state; 178d4647f0aSWill Deacon } 179d4647f0aSWill Deacon 180b881cdceSWill Deacon DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); 181d4647f0aSWill Deacon 182d4647f0aSWill Deacon static void install_bp_hardening_cb(bp_hardening_cb_t fn) 183d4647f0aSWill Deacon { 184b881cdceSWill Deacon __this_cpu_write(bp_hardening_data.fn, fn); 185d4647f0aSWill Deacon 186d4647f0aSWill Deacon /* 18766dd3474SStephen Boyd * Vinz Clortho takes the hyp_vecs start/end "keys" at 18866dd3474SStephen Boyd * the door when we're a guest. Skip the hyp-vectors work. 189d4647f0aSWill Deacon */ 190b881cdceSWill Deacon if (!is_hyp_mode_available()) 191d4647f0aSWill Deacon return; 192d4647f0aSWill Deacon 193b881cdceSWill Deacon __this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT); 194d4647f0aSWill Deacon } 195d4647f0aSWill Deacon 196d4647f0aSWill Deacon static void call_smc_arch_workaround_1(void) 197d4647f0aSWill Deacon { 198d4647f0aSWill Deacon arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); 199d4647f0aSWill Deacon } 200d4647f0aSWill Deacon 201d4647f0aSWill Deacon static void call_hvc_arch_workaround_1(void) 202d4647f0aSWill Deacon { 203d4647f0aSWill Deacon arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); 204d4647f0aSWill Deacon } 205d4647f0aSWill Deacon 206d4647f0aSWill Deacon static void qcom_link_stack_sanitisation(void) 207d4647f0aSWill Deacon { 208d4647f0aSWill Deacon u64 tmp; 209d4647f0aSWill Deacon 210d4647f0aSWill Deacon asm volatile("mov %0, x30 \n" 211d4647f0aSWill Deacon ".rept 16 \n" 212d4647f0aSWill Deacon "bl . + 4 \n" 213d4647f0aSWill Deacon ".endr \n" 214d4647f0aSWill Deacon "mov x30, %0 \n" 215d4647f0aSWill Deacon : "=&r" (tmp)); 216d4647f0aSWill Deacon } 217d4647f0aSWill Deacon 218ea8f8c99SWill Deacon static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void) 219ea8f8c99SWill Deacon { 220ea8f8c99SWill Deacon u32 midr = read_cpuid_id(); 221ea8f8c99SWill Deacon if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) && 222ea8f8c99SWill Deacon ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1)) 223ea8f8c99SWill Deacon return NULL; 224ea8f8c99SWill Deacon 225ea8f8c99SWill Deacon return qcom_link_stack_sanitisation; 226ea8f8c99SWill Deacon } 227ea8f8c99SWill Deacon 228d4647f0aSWill Deacon static enum mitigation_state spectre_v2_enable_fw_mitigation(void) 229d4647f0aSWill Deacon { 230d4647f0aSWill Deacon bp_hardening_cb_t cb; 231d4647f0aSWill Deacon enum mitigation_state state; 232d4647f0aSWill Deacon 233d4647f0aSWill Deacon state = spectre_v2_get_cpu_fw_mitigation_state(); 234d4647f0aSWill Deacon if (state != SPECTRE_MITIGATED) 235d4647f0aSWill Deacon return state; 236d4647f0aSWill Deacon 237d4647f0aSWill Deacon if (spectre_v2_mitigations_off()) 238d4647f0aSWill Deacon return SPECTRE_VULNERABLE; 239d4647f0aSWill Deacon 240d4647f0aSWill Deacon switch (arm_smccc_1_1_get_conduit()) { 241d4647f0aSWill Deacon case SMCCC_CONDUIT_HVC: 242d4647f0aSWill Deacon cb = call_hvc_arch_workaround_1; 243d4647f0aSWill Deacon break; 244d4647f0aSWill Deacon 245d4647f0aSWill Deacon case SMCCC_CONDUIT_SMC: 246d4647f0aSWill Deacon cb = call_smc_arch_workaround_1; 247d4647f0aSWill Deacon break; 248d4647f0aSWill Deacon 249d4647f0aSWill Deacon default: 250d4647f0aSWill Deacon return SPECTRE_VULNERABLE; 251d4647f0aSWill Deacon } 252d4647f0aSWill Deacon 253ea8f8c99SWill Deacon /* 254ea8f8c99SWill Deacon * Prefer a CPU-specific workaround if it exists. Note that we 255ea8f8c99SWill Deacon * still rely on firmware for the mitigation at EL2. 256ea8f8c99SWill Deacon */ 257ea8f8c99SWill Deacon cb = spectre_v2_get_sw_mitigation_cb() ?: cb; 258d4647f0aSWill Deacon install_bp_hardening_cb(cb); 259d4647f0aSWill Deacon return SPECTRE_MITIGATED; 260d4647f0aSWill Deacon } 261d4647f0aSWill Deacon 262d4647f0aSWill Deacon void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused) 263d4647f0aSWill Deacon { 264d4647f0aSWill Deacon enum mitigation_state state; 265d4647f0aSWill Deacon 266d4647f0aSWill Deacon WARN_ON(preemptible()); 267d4647f0aSWill Deacon 268d4647f0aSWill Deacon state = spectre_v2_get_cpu_hw_mitigation_state(); 269d4647f0aSWill Deacon if (state == SPECTRE_VULNERABLE) 270d4647f0aSWill Deacon state = spectre_v2_enable_fw_mitigation(); 271d4647f0aSWill Deacon 272d4647f0aSWill Deacon update_mitigation_state(&spectre_v2_state, state); 273d4647f0aSWill Deacon } 2749e78b659SWill Deacon 275c2876207SWill Deacon /* 276c4792b6dSWill Deacon * Spectre-v3a. 277c4792b6dSWill Deacon * 278c4792b6dSWill Deacon * Phew, there's not an awful lot to do here! We just instruct EL2 to use 279c4792b6dSWill Deacon * an indirect trampoline for the hyp vectors so that guests can't read 280c4792b6dSWill Deacon * VBAR_EL2 to defeat randomisation of the hypervisor VA layout. 281c4792b6dSWill Deacon */ 282cd1f56b9SWill Deacon bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope) 283cd1f56b9SWill Deacon { 284cd1f56b9SWill Deacon static const struct midr_range spectre_v3a_unsafe_list[] = { 285cd1f56b9SWill Deacon MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), 286cd1f56b9SWill Deacon MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 287cd1f56b9SWill Deacon {}, 288cd1f56b9SWill Deacon }; 289cd1f56b9SWill Deacon 290cd1f56b9SWill Deacon WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 291cd1f56b9SWill Deacon return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list); 292cd1f56b9SWill Deacon } 293cd1f56b9SWill Deacon 294c4792b6dSWill Deacon void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused) 295b881cdceSWill Deacon { 296b881cdceSWill Deacon struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); 297b881cdceSWill Deacon 298c4792b6dSWill Deacon if (this_cpu_has_cap(ARM64_SPECTRE_V3A)) 299b881cdceSWill Deacon data->slot += HYP_VECTOR_INDIRECT; 300b881cdceSWill Deacon } 301b881cdceSWill Deacon 302c2876207SWill Deacon /* 303c2876207SWill Deacon * Spectre v4. 304c2876207SWill Deacon * 305c2876207SWill Deacon * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is 306c2876207SWill Deacon * either: 307c2876207SWill Deacon * 308c2876207SWill Deacon * - Mitigated in hardware and listed in our "safe list". 309c2876207SWill Deacon * - Mitigated in hardware via PSTATE.SSBS. 310c2876207SWill Deacon * - Mitigated in software by firmware (sometimes referred to as SSBD). 311c2876207SWill Deacon * 312c2876207SWill Deacon * Wait, that doesn't sound so bad, does it? Keep reading... 313c2876207SWill Deacon * 314c2876207SWill Deacon * A major source of headaches is that the software mitigation is enabled both 315c2876207SWill Deacon * on a per-task basis, but can also be forced on for the kernel, necessitating 316c2876207SWill Deacon * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs 317c2876207SWill Deacon * allow EL0 to toggle SSBS directly, which can end up with the prctl() state 318c2876207SWill Deacon * being stale when re-entering the kernel. The usual big.LITTLE caveats apply, 319c2876207SWill Deacon * so you can have systems that have both firmware and SSBS mitigations. This 320c2876207SWill Deacon * means we actually have to reject late onlining of CPUs with mitigations if 321c2876207SWill Deacon * all of the currently onlined CPUs are safelisted, as the mitigation tends to 322c2876207SWill Deacon * be opt-in for userspace. Yes, really, the cure is worse than the disease. 323c2876207SWill Deacon * 324c2876207SWill Deacon * The only good part is that if the firmware mitigation is present, then it is 325c2876207SWill Deacon * present for all CPUs, meaning we don't have to worry about late onlining of a 326c2876207SWill Deacon * vulnerable CPU if one of the boot CPUs is using the firmware mitigation. 327c2876207SWill Deacon * 328c2876207SWill Deacon * Give me a VAX-11/780 any day of the week... 329c2876207SWill Deacon */ 330c2876207SWill Deacon static enum mitigation_state spectre_v4_state; 3319e78b659SWill Deacon 332c2876207SWill Deacon /* This is the per-cpu state tracking whether we need to talk to firmware */ 333c2876207SWill Deacon DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); 334c2876207SWill Deacon 335c2876207SWill Deacon enum spectre_v4_policy { 336c2876207SWill Deacon SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, 337c2876207SWill Deacon SPECTRE_V4_POLICY_MITIGATION_ENABLED, 338c2876207SWill Deacon SPECTRE_V4_POLICY_MITIGATION_DISABLED, 339c2876207SWill Deacon }; 340c2876207SWill Deacon 341c2876207SWill Deacon static enum spectre_v4_policy __read_mostly __spectre_v4_policy; 342c2876207SWill Deacon 343c2876207SWill Deacon static const struct spectre_v4_param { 344c2876207SWill Deacon const char *str; 345c2876207SWill Deacon enum spectre_v4_policy policy; 346c2876207SWill Deacon } spectre_v4_params[] = { 347c2876207SWill Deacon { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, }, 348c2876207SWill Deacon { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, }, 349c2876207SWill Deacon { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, }, 350c2876207SWill Deacon }; 351c2876207SWill Deacon static int __init parse_spectre_v4_param(char *str) 352c2876207SWill Deacon { 353c2876207SWill Deacon int i; 354c2876207SWill Deacon 355c2876207SWill Deacon if (!str || !str[0]) 356c2876207SWill Deacon return -EINVAL; 357c2876207SWill Deacon 358c2876207SWill Deacon for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) { 359c2876207SWill Deacon const struct spectre_v4_param *param = &spectre_v4_params[i]; 360c2876207SWill Deacon 361c2876207SWill Deacon if (strncmp(str, param->str, strlen(param->str))) 362c2876207SWill Deacon continue; 363c2876207SWill Deacon 364c2876207SWill Deacon __spectre_v4_policy = param->policy; 365c2876207SWill Deacon return 0; 3669e78b659SWill Deacon } 3679e78b659SWill Deacon 368c2876207SWill Deacon return -EINVAL; 369c2876207SWill Deacon } 370c2876207SWill Deacon early_param("ssbd", parse_spectre_v4_param); 3719e78b659SWill Deacon 372c2876207SWill Deacon /* 373c2876207SWill Deacon * Because this was all written in a rush by people working in different silos, 374c2876207SWill Deacon * we've ended up with multiple command line options to control the same thing. 375c2876207SWill Deacon * Wrap these up in some helpers, which prefer disabling the mitigation if faced 376c2876207SWill Deacon * with contradictory parameters. The mitigation is always either "off", 377c2876207SWill Deacon * "dynamic" or "on". 378c2876207SWill Deacon */ 379c2876207SWill Deacon static bool spectre_v4_mitigations_off(void) 380c2876207SWill Deacon { 381c2876207SWill Deacon bool ret = cpu_mitigations_off() || 382c2876207SWill Deacon __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED; 383c2876207SWill Deacon 384c2876207SWill Deacon if (ret) 385c2876207SWill Deacon pr_info_once("spectre-v4 mitigation disabled by command-line option\n"); 386c2876207SWill Deacon 387c2876207SWill Deacon return ret; 388c2876207SWill Deacon } 389c2876207SWill Deacon 390c2876207SWill Deacon /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */ 391c2876207SWill Deacon static bool spectre_v4_mitigations_dynamic(void) 392c2876207SWill Deacon { 393c2876207SWill Deacon return !spectre_v4_mitigations_off() && 394c2876207SWill Deacon __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC; 395c2876207SWill Deacon } 396c2876207SWill Deacon 397c2876207SWill Deacon static bool spectre_v4_mitigations_on(void) 398c2876207SWill Deacon { 399c2876207SWill Deacon return !spectre_v4_mitigations_off() && 400c2876207SWill Deacon __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED; 401c2876207SWill Deacon } 402c2876207SWill Deacon 403c2876207SWill Deacon ssize_t cpu_show_spec_store_bypass(struct device *dev, 404c2876207SWill Deacon struct device_attribute *attr, char *buf) 405c2876207SWill Deacon { 406c2876207SWill Deacon switch (spectre_v4_state) { 407c2876207SWill Deacon case SPECTRE_UNAFFECTED: 408c2876207SWill Deacon return sprintf(buf, "Not affected\n"); 409c2876207SWill Deacon case SPECTRE_MITIGATED: 410c2876207SWill Deacon return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n"); 411c2876207SWill Deacon case SPECTRE_VULNERABLE: 412c2876207SWill Deacon fallthrough; 413c2876207SWill Deacon default: 414c2876207SWill Deacon return sprintf(buf, "Vulnerable\n"); 415c2876207SWill Deacon } 416c2876207SWill Deacon } 417c2876207SWill Deacon 418c2876207SWill Deacon enum mitigation_state arm64_get_spectre_v4_state(void) 419c2876207SWill Deacon { 420c2876207SWill Deacon return spectre_v4_state; 421c2876207SWill Deacon } 422c2876207SWill Deacon 423c2876207SWill Deacon static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void) 424c2876207SWill Deacon { 425c2876207SWill Deacon static const struct midr_range spectre_v4_safe_list[] = { 426c2876207SWill Deacon MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), 427c2876207SWill Deacon MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), 428c2876207SWill Deacon MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), 429c2876207SWill Deacon MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), 430c2876207SWill Deacon MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), 431c2876207SWill Deacon MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), 432c2876207SWill Deacon { /* sentinel */ }, 433c2876207SWill Deacon }; 434c2876207SWill Deacon 435c2876207SWill Deacon if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list)) 436c2876207SWill Deacon return SPECTRE_UNAFFECTED; 437c2876207SWill Deacon 438c2876207SWill Deacon /* CPU features are detected first */ 439c2876207SWill Deacon if (this_cpu_has_cap(ARM64_SSBS)) 440c2876207SWill Deacon return SPECTRE_MITIGATED; 441c2876207SWill Deacon 442c2876207SWill Deacon return SPECTRE_VULNERABLE; 443c2876207SWill Deacon } 444c2876207SWill Deacon 445c2876207SWill Deacon static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void) 446c2876207SWill Deacon { 447c2876207SWill Deacon int ret; 448c2876207SWill Deacon struct arm_smccc_res res; 449c2876207SWill Deacon 450c2876207SWill Deacon arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 451c2876207SWill Deacon ARM_SMCCC_ARCH_WORKAROUND_2, &res); 452c2876207SWill Deacon 453c2876207SWill Deacon ret = res.a0; 454c2876207SWill Deacon switch (ret) { 455c2876207SWill Deacon case SMCCC_RET_SUCCESS: 456c2876207SWill Deacon return SPECTRE_MITIGATED; 457c2876207SWill Deacon case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: 458c2876207SWill Deacon fallthrough; 459c2876207SWill Deacon case SMCCC_RET_NOT_REQUIRED: 460c2876207SWill Deacon return SPECTRE_UNAFFECTED; 461c2876207SWill Deacon default: 462c2876207SWill Deacon fallthrough; 463c2876207SWill Deacon case SMCCC_RET_NOT_SUPPORTED: 464c2876207SWill Deacon return SPECTRE_VULNERABLE; 465c2876207SWill Deacon } 466c2876207SWill Deacon } 467c2876207SWill Deacon 468c2876207SWill Deacon bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope) 469c2876207SWill Deacon { 470c2876207SWill Deacon enum mitigation_state state; 471c2876207SWill Deacon 472c2876207SWill Deacon WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 473c2876207SWill Deacon 474c2876207SWill Deacon state = spectre_v4_get_cpu_hw_mitigation_state(); 475c2876207SWill Deacon if (state == SPECTRE_VULNERABLE) 476c2876207SWill Deacon state = spectre_v4_get_cpu_fw_mitigation_state(); 477c2876207SWill Deacon 478c2876207SWill Deacon return state != SPECTRE_UNAFFECTED; 479c2876207SWill Deacon } 480c2876207SWill Deacon 481c2876207SWill Deacon static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr) 482c2876207SWill Deacon { 483c2876207SWill Deacon if (user_mode(regs)) 484c2876207SWill Deacon return 1; 485c2876207SWill Deacon 486c2876207SWill Deacon if (instr & BIT(PSTATE_Imm_shift)) 487c2876207SWill Deacon regs->pstate |= PSR_SSBS_BIT; 488c2876207SWill Deacon else 489c2876207SWill Deacon regs->pstate &= ~PSR_SSBS_BIT; 490c2876207SWill Deacon 491c2876207SWill Deacon arm64_skip_faulting_instruction(regs, 4); 492c2876207SWill Deacon return 0; 493c2876207SWill Deacon } 494c2876207SWill Deacon 495c2876207SWill Deacon static struct undef_hook ssbs_emulation_hook = { 496c2876207SWill Deacon .instr_mask = ~(1U << PSTATE_Imm_shift), 497c2876207SWill Deacon .instr_val = 0xd500401f | PSTATE_SSBS, 498c2876207SWill Deacon .fn = ssbs_emulation_handler, 499c2876207SWill Deacon }; 500c2876207SWill Deacon 501c2876207SWill Deacon static enum mitigation_state spectre_v4_enable_hw_mitigation(void) 502c2876207SWill Deacon { 503c2876207SWill Deacon static bool undef_hook_registered = false; 504c2876207SWill Deacon static DEFINE_RAW_SPINLOCK(hook_lock); 505c2876207SWill Deacon enum mitigation_state state; 506c2876207SWill Deacon 507c2876207SWill Deacon /* 508c2876207SWill Deacon * If the system is mitigated but this CPU doesn't have SSBS, then 509c2876207SWill Deacon * we must be on the safelist and there's nothing more to do. 510c2876207SWill Deacon */ 511c2876207SWill Deacon state = spectre_v4_get_cpu_hw_mitigation_state(); 512c2876207SWill Deacon if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS)) 513c2876207SWill Deacon return state; 514c2876207SWill Deacon 515c2876207SWill Deacon raw_spin_lock(&hook_lock); 516c2876207SWill Deacon if (!undef_hook_registered) { 517c2876207SWill Deacon register_undef_hook(&ssbs_emulation_hook); 518c2876207SWill Deacon undef_hook_registered = true; 519c2876207SWill Deacon } 520c2876207SWill Deacon raw_spin_unlock(&hook_lock); 521c2876207SWill Deacon 522c2876207SWill Deacon if (spectre_v4_mitigations_off()) { 523c2876207SWill Deacon sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS); 524515d5c8aSMark Rutland set_pstate_ssbs(1); 525c2876207SWill Deacon return SPECTRE_VULNERABLE; 526c2876207SWill Deacon } 527c2876207SWill Deacon 528c2876207SWill Deacon /* SCTLR_EL1.DSSBS was initialised to 0 during boot */ 529515d5c8aSMark Rutland set_pstate_ssbs(0); 530c2876207SWill Deacon return SPECTRE_MITIGATED; 5319e78b659SWill Deacon } 5329e78b659SWill Deacon 5339e78b659SWill Deacon /* 534c2876207SWill Deacon * Patch a branch over the Spectre-v4 mitigation code with a NOP so that 535c2876207SWill Deacon * we fallthrough and check whether firmware needs to be called on this CPU. 536c2876207SWill Deacon */ 537c2876207SWill Deacon void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt, 538c2876207SWill Deacon __le32 *origptr, 539c2876207SWill Deacon __le32 *updptr, int nr_inst) 540c2876207SWill Deacon { 541c2876207SWill Deacon BUG_ON(nr_inst != 1); /* Branch -> NOP */ 542c2876207SWill Deacon 543c2876207SWill Deacon if (spectre_v4_mitigations_off()) 544c2876207SWill Deacon return; 545c2876207SWill Deacon 546c2876207SWill Deacon if (cpus_have_final_cap(ARM64_SSBS)) 547c2876207SWill Deacon return; 548c2876207SWill Deacon 549c2876207SWill Deacon if (spectre_v4_mitigations_dynamic()) 550c2876207SWill Deacon *updptr = cpu_to_le32(aarch64_insn_gen_nop()); 551c2876207SWill Deacon } 552c2876207SWill Deacon 553c2876207SWill Deacon /* 554c2876207SWill Deacon * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction 555c2876207SWill Deacon * to call into firmware to adjust the mitigation state. 556c2876207SWill Deacon */ 557*1b33d486SJames Morse void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt, 558c2876207SWill Deacon __le32 *origptr, 559c2876207SWill Deacon __le32 *updptr, int nr_inst) 560c2876207SWill Deacon { 561c2876207SWill Deacon u32 insn; 562c2876207SWill Deacon 563c2876207SWill Deacon BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */ 564c2876207SWill Deacon 565c2876207SWill Deacon switch (arm_smccc_1_1_get_conduit()) { 566c2876207SWill Deacon case SMCCC_CONDUIT_HVC: 567c2876207SWill Deacon insn = aarch64_insn_get_hvc_value(); 568c2876207SWill Deacon break; 569c2876207SWill Deacon case SMCCC_CONDUIT_SMC: 570c2876207SWill Deacon insn = aarch64_insn_get_smc_value(); 571c2876207SWill Deacon break; 572c2876207SWill Deacon default: 573c2876207SWill Deacon return; 574c2876207SWill Deacon } 575c2876207SWill Deacon 576c2876207SWill Deacon *updptr = cpu_to_le32(insn); 577c2876207SWill Deacon } 578c2876207SWill Deacon 579c2876207SWill Deacon static enum mitigation_state spectre_v4_enable_fw_mitigation(void) 580c2876207SWill Deacon { 581c2876207SWill Deacon enum mitigation_state state; 582c2876207SWill Deacon 583c2876207SWill Deacon state = spectre_v4_get_cpu_fw_mitigation_state(); 584c2876207SWill Deacon if (state != SPECTRE_MITIGATED) 585c2876207SWill Deacon return state; 586c2876207SWill Deacon 587c2876207SWill Deacon if (spectre_v4_mitigations_off()) { 588c2876207SWill Deacon arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL); 589c2876207SWill Deacon return SPECTRE_VULNERABLE; 590c2876207SWill Deacon } 591c2876207SWill Deacon 592c2876207SWill Deacon arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL); 593c2876207SWill Deacon 594c2876207SWill Deacon if (spectre_v4_mitigations_dynamic()) 595c2876207SWill Deacon __this_cpu_write(arm64_ssbd_callback_required, 1); 596c2876207SWill Deacon 597c2876207SWill Deacon return SPECTRE_MITIGATED; 598c2876207SWill Deacon } 599c2876207SWill Deacon 600c2876207SWill Deacon void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused) 601c2876207SWill Deacon { 602c2876207SWill Deacon enum mitigation_state state; 603c2876207SWill Deacon 604c2876207SWill Deacon WARN_ON(preemptible()); 605c2876207SWill Deacon 606c2876207SWill Deacon state = spectre_v4_enable_hw_mitigation(); 607c2876207SWill Deacon if (state == SPECTRE_VULNERABLE) 608c2876207SWill Deacon state = spectre_v4_enable_fw_mitigation(); 609c2876207SWill Deacon 610c2876207SWill Deacon update_mitigation_state(&spectre_v4_state, state); 611c2876207SWill Deacon } 612c2876207SWill Deacon 613c2876207SWill Deacon static void __update_pstate_ssbs(struct pt_regs *regs, bool state) 614c2876207SWill Deacon { 615c2876207SWill Deacon u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT; 616c2876207SWill Deacon 617c2876207SWill Deacon if (state) 618c2876207SWill Deacon regs->pstate |= bit; 619c2876207SWill Deacon else 620c2876207SWill Deacon regs->pstate &= ~bit; 621c2876207SWill Deacon } 622c2876207SWill Deacon 623c2876207SWill Deacon void spectre_v4_enable_task_mitigation(struct task_struct *tsk) 624c2876207SWill Deacon { 625c2876207SWill Deacon struct pt_regs *regs = task_pt_regs(tsk); 626c2876207SWill Deacon bool ssbs = false, kthread = tsk->flags & PF_KTHREAD; 627c2876207SWill Deacon 628c2876207SWill Deacon if (spectre_v4_mitigations_off()) 629c2876207SWill Deacon ssbs = true; 630c2876207SWill Deacon else if (spectre_v4_mitigations_dynamic() && !kthread) 631c2876207SWill Deacon ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD); 632c2876207SWill Deacon 633c2876207SWill Deacon __update_pstate_ssbs(regs, ssbs); 634c2876207SWill Deacon } 635c2876207SWill Deacon 636c2876207SWill Deacon /* 637c2876207SWill Deacon * The Spectre-v4 mitigation can be controlled via a prctl() from userspace. 638c2876207SWill Deacon * This is interesting because the "speculation disabled" behaviour can be 639c2876207SWill Deacon * configured so that it is preserved across exec(), which means that the 640c2876207SWill Deacon * prctl() may be necessary even when PSTATE.SSBS can be toggled directly 641c2876207SWill Deacon * from userspace. 6429e78b659SWill Deacon */ 643780c083aSWill Deacon static void ssbd_prctl_enable_mitigation(struct task_struct *task) 644780c083aSWill Deacon { 645780c083aSWill Deacon task_clear_spec_ssb_noexec(task); 646780c083aSWill Deacon task_set_spec_ssb_disable(task); 647780c083aSWill Deacon set_tsk_thread_flag(task, TIF_SSBD); 648780c083aSWill Deacon } 649780c083aSWill Deacon 650780c083aSWill Deacon static void ssbd_prctl_disable_mitigation(struct task_struct *task) 651780c083aSWill Deacon { 652780c083aSWill Deacon task_clear_spec_ssb_noexec(task); 653780c083aSWill Deacon task_clear_spec_ssb_disable(task); 654780c083aSWill Deacon clear_tsk_thread_flag(task, TIF_SSBD); 655780c083aSWill Deacon } 656780c083aSWill Deacon 6579e78b659SWill Deacon static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl) 6589e78b659SWill Deacon { 6599e78b659SWill Deacon switch (ctrl) { 6609e78b659SWill Deacon case PR_SPEC_ENABLE: 661c2876207SWill Deacon /* Enable speculation: disable mitigation */ 662c2876207SWill Deacon /* 663c2876207SWill Deacon * Force disabled speculation prevents it from being 664c2876207SWill Deacon * re-enabled. 665c2876207SWill Deacon */ 666c2876207SWill Deacon if (task_spec_ssb_force_disable(task)) 6679e78b659SWill Deacon return -EPERM; 6689e78b659SWill Deacon 6699e78b659SWill Deacon /* 670c2876207SWill Deacon * If the mitigation is forced on, then speculation is forced 671c2876207SWill Deacon * off and we again prevent it from being re-enabled. 6729e78b659SWill Deacon */ 673c2876207SWill Deacon if (spectre_v4_mitigations_on()) 6749e78b659SWill Deacon return -EPERM; 675c2876207SWill Deacon 676780c083aSWill Deacon ssbd_prctl_disable_mitigation(task); 6779e78b659SWill Deacon break; 6789e78b659SWill Deacon case PR_SPEC_FORCE_DISABLE: 679c2876207SWill Deacon /* Force disable speculation: force enable mitigation */ 680c2876207SWill Deacon /* 681c2876207SWill Deacon * If the mitigation is forced off, then speculation is forced 682c2876207SWill Deacon * on and we prevent it from being disabled. 683c2876207SWill Deacon */ 684c2876207SWill Deacon if (spectre_v4_mitigations_off()) 6859e78b659SWill Deacon return -EPERM; 686c2876207SWill Deacon 6879e78b659SWill Deacon task_set_spec_ssb_force_disable(task); 688c2876207SWill Deacon fallthrough; 689c2876207SWill Deacon case PR_SPEC_DISABLE: 690c2876207SWill Deacon /* Disable speculation: enable mitigation */ 691c2876207SWill Deacon /* Same as PR_SPEC_FORCE_DISABLE */ 692c2876207SWill Deacon if (spectre_v4_mitigations_off()) 693c2876207SWill Deacon return -EPERM; 694c2876207SWill Deacon 695780c083aSWill Deacon ssbd_prctl_enable_mitigation(task); 696780c083aSWill Deacon break; 697780c083aSWill Deacon case PR_SPEC_DISABLE_NOEXEC: 698780c083aSWill Deacon /* Disable speculation until execve(): enable mitigation */ 699780c083aSWill Deacon /* 700780c083aSWill Deacon * If the mitigation state is forced one way or the other, then 701780c083aSWill Deacon * we must fail now before we try to toggle it on execve(). 702780c083aSWill Deacon */ 703780c083aSWill Deacon if (task_spec_ssb_force_disable(task) || 704780c083aSWill Deacon spectre_v4_mitigations_off() || 705780c083aSWill Deacon spectre_v4_mitigations_on()) { 706780c083aSWill Deacon return -EPERM; 707780c083aSWill Deacon } 708780c083aSWill Deacon 709780c083aSWill Deacon ssbd_prctl_enable_mitigation(task); 710780c083aSWill Deacon task_set_spec_ssb_noexec(task); 7119e78b659SWill Deacon break; 7129e78b659SWill Deacon default: 7139e78b659SWill Deacon return -ERANGE; 7149e78b659SWill Deacon } 7159e78b659SWill Deacon 716c2876207SWill Deacon spectre_v4_enable_task_mitigation(task); 7179e78b659SWill Deacon return 0; 7189e78b659SWill Deacon } 7199e78b659SWill Deacon 7209e78b659SWill Deacon int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, 7219e78b659SWill Deacon unsigned long ctrl) 7229e78b659SWill Deacon { 7239e78b659SWill Deacon switch (which) { 7249e78b659SWill Deacon case PR_SPEC_STORE_BYPASS: 7259e78b659SWill Deacon return ssbd_prctl_set(task, ctrl); 7269e78b659SWill Deacon default: 7279e78b659SWill Deacon return -ENODEV; 7289e78b659SWill Deacon } 7299e78b659SWill Deacon } 7309e78b659SWill Deacon 7319e78b659SWill Deacon static int ssbd_prctl_get(struct task_struct *task) 7329e78b659SWill Deacon { 733c2876207SWill Deacon switch (spectre_v4_state) { 734c2876207SWill Deacon case SPECTRE_UNAFFECTED: 735c2876207SWill Deacon return PR_SPEC_NOT_AFFECTED; 736c2876207SWill Deacon case SPECTRE_MITIGATED: 737c2876207SWill Deacon if (spectre_v4_mitigations_on()) 738c2876207SWill Deacon return PR_SPEC_NOT_AFFECTED; 739c2876207SWill Deacon 740c2876207SWill Deacon if (spectre_v4_mitigations_dynamic()) 741c2876207SWill Deacon break; 742c2876207SWill Deacon 743c2876207SWill Deacon /* Mitigations are disabled, so we're vulnerable. */ 744c2876207SWill Deacon fallthrough; 745c2876207SWill Deacon case SPECTRE_VULNERABLE: 746c2876207SWill Deacon fallthrough; 747c2876207SWill Deacon default: 748c2876207SWill Deacon return PR_SPEC_ENABLE; 749c2876207SWill Deacon } 750c2876207SWill Deacon 751c2876207SWill Deacon /* Check the mitigation state for this task */ 7529e78b659SWill Deacon if (task_spec_ssb_force_disable(task)) 7539e78b659SWill Deacon return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 754c2876207SWill Deacon 755780c083aSWill Deacon if (task_spec_ssb_noexec(task)) 756780c083aSWill Deacon return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; 757780c083aSWill Deacon 7589e78b659SWill Deacon if (task_spec_ssb_disable(task)) 7599e78b659SWill Deacon return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 760c2876207SWill Deacon 7619e78b659SWill Deacon return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 7629e78b659SWill Deacon } 7639e78b659SWill Deacon 7649e78b659SWill Deacon int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 7659e78b659SWill Deacon { 7669e78b659SWill Deacon switch (which) { 7679e78b659SWill Deacon case PR_SPEC_STORE_BYPASS: 7689e78b659SWill Deacon return ssbd_prctl_get(task); 7699e78b659SWill Deacon default: 7709e78b659SWill Deacon return -ENODEV; 7719e78b659SWill Deacon } 7729e78b659SWill Deacon } 773