1455697adSWill Deacon // SPDX-License-Identifier: GPL-2.0-only 2455697adSWill Deacon /* 3455697adSWill Deacon * Handle detection, reporting and mitigation of Spectre v1, v2 and v4, as 4455697adSWill Deacon * detailed at: 5455697adSWill Deacon * 6455697adSWill Deacon * https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability 7455697adSWill Deacon * 8455697adSWill Deacon * This code was originally written hastily under an awful lot of stress and so 9455697adSWill Deacon * aspects of it are somewhat hacky. Unfortunately, changing anything in here 10455697adSWill Deacon * instantly makes me feel ill. Thanks, Jann. Thann. 11455697adSWill Deacon * 12455697adSWill Deacon * Copyright (C) 2018 ARM Ltd, All Rights Reserved. 13455697adSWill Deacon * Copyright (C) 2020 Google LLC 14455697adSWill Deacon * 15455697adSWill Deacon * "If there's something strange in your neighbourhood, who you gonna call?" 16455697adSWill Deacon * 17455697adSWill Deacon * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org> 18455697adSWill Deacon */ 19455697adSWill Deacon 20d4647f0aSWill Deacon #include <linux/arm-smccc.h> 21d4647f0aSWill Deacon #include <linux/cpu.h> 22455697adSWill Deacon #include <linux/device.h> 239e78b659SWill Deacon #include <linux/nospec.h> 24d4647f0aSWill Deacon #include <linux/prctl.h> 255c8b0cbdSWill Deacon #include <linux/sched/task_stack.h> 26d4647f0aSWill Deacon 27d4647f0aSWill Deacon #include <asm/spectre.h> 28d4647f0aSWill Deacon #include <asm/traps.h> 29d4647f0aSWill Deacon 30d4647f0aSWill Deacon /* 31d4647f0aSWill Deacon * We try to ensure that the mitigation state can never change as the result of 32d4647f0aSWill Deacon * onlining a late CPU. 33d4647f0aSWill Deacon */ 34d4647f0aSWill Deacon static void update_mitigation_state(enum mitigation_state *oldp, 35d4647f0aSWill Deacon enum mitigation_state new) 36d4647f0aSWill Deacon { 37d4647f0aSWill Deacon enum mitigation_state state; 38d4647f0aSWill Deacon 39d4647f0aSWill Deacon do { 40d4647f0aSWill Deacon state = READ_ONCE(*oldp); 41d4647f0aSWill Deacon if (new <= state) 42d4647f0aSWill Deacon break; 43d4647f0aSWill Deacon 44d4647f0aSWill Deacon /* Userspace almost certainly can't deal with this. */ 45d4647f0aSWill Deacon if (WARN_ON(system_capabilities_finalized())) 46d4647f0aSWill Deacon break; 47d4647f0aSWill Deacon } while (cmpxchg_relaxed(oldp, state, new) != state); 48d4647f0aSWill Deacon } 49455697adSWill Deacon 50455697adSWill Deacon /* 51455697adSWill Deacon * Spectre v1. 52455697adSWill Deacon * 53455697adSWill Deacon * The kernel can't protect userspace for this one: it's each person for 54455697adSWill Deacon * themselves. Advertise what we're doing and be done with it. 55455697adSWill Deacon */ 56455697adSWill Deacon ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, 57455697adSWill Deacon char *buf) 58455697adSWill Deacon { 59455697adSWill Deacon return sprintf(buf, "Mitigation: __user pointer sanitization\n"); 60455697adSWill Deacon } 61d4647f0aSWill Deacon 62d4647f0aSWill Deacon /* 63d4647f0aSWill Deacon * Spectre v2. 64d4647f0aSWill Deacon * 65d4647f0aSWill Deacon * This one sucks. A CPU is either: 66d4647f0aSWill Deacon * 67d4647f0aSWill Deacon * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2. 68d4647f0aSWill Deacon * - Mitigated in hardware and listed in our "safe list". 69d4647f0aSWill Deacon * - Mitigated in software by firmware. 70ea8f8c99SWill Deacon * - Mitigated in software by a CPU-specific dance in the kernel and a 71ea8f8c99SWill Deacon * firmware call at EL2. 72d4647f0aSWill Deacon * - Vulnerable. 73d4647f0aSWill Deacon * 74d4647f0aSWill Deacon * It's not unlikely for different CPUs in a big.LITTLE system to fall into 75d4647f0aSWill Deacon * different camps. 76d4647f0aSWill Deacon */ 77d4647f0aSWill Deacon static enum mitigation_state spectre_v2_state; 78d4647f0aSWill Deacon 79d4647f0aSWill Deacon static bool __read_mostly __nospectre_v2; 80d4647f0aSWill Deacon static int __init parse_spectre_v2_param(char *str) 81d4647f0aSWill Deacon { 82d4647f0aSWill Deacon __nospectre_v2 = true; 83d4647f0aSWill Deacon return 0; 84d4647f0aSWill Deacon } 85d4647f0aSWill Deacon early_param("nospectre_v2", parse_spectre_v2_param); 86d4647f0aSWill Deacon 87d4647f0aSWill Deacon static bool spectre_v2_mitigations_off(void) 88d4647f0aSWill Deacon { 89d4647f0aSWill Deacon bool ret = __nospectre_v2 || cpu_mitigations_off(); 90d4647f0aSWill Deacon 91d4647f0aSWill Deacon if (ret) 92d4647f0aSWill Deacon pr_info_once("spectre-v2 mitigation disabled by command line option\n"); 93d4647f0aSWill Deacon 94d4647f0aSWill Deacon return ret; 95d4647f0aSWill Deacon } 96d4647f0aSWill Deacon 97d4647f0aSWill Deacon ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, 98d4647f0aSWill Deacon char *buf) 99d4647f0aSWill Deacon { 100d4647f0aSWill Deacon switch (spectre_v2_state) { 101d4647f0aSWill Deacon case SPECTRE_UNAFFECTED: 102d4647f0aSWill Deacon return sprintf(buf, "Not affected\n"); 103d4647f0aSWill Deacon case SPECTRE_MITIGATED: 104d4647f0aSWill Deacon return sprintf(buf, "Mitigation: Branch predictor hardening\n"); 105d4647f0aSWill Deacon case SPECTRE_VULNERABLE: 106d4647f0aSWill Deacon fallthrough; 107d4647f0aSWill Deacon default: 108d4647f0aSWill Deacon return sprintf(buf, "Vulnerable\n"); 109d4647f0aSWill Deacon } 110d4647f0aSWill Deacon } 111d4647f0aSWill Deacon 112d4647f0aSWill Deacon static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void) 113d4647f0aSWill Deacon { 114d4647f0aSWill Deacon u64 pfr0; 115d4647f0aSWill Deacon static const struct midr_range spectre_v2_safe_list[] = { 116d4647f0aSWill Deacon MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), 117d4647f0aSWill Deacon MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), 118d4647f0aSWill Deacon MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), 119d4647f0aSWill Deacon MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), 120d4647f0aSWill Deacon MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), 121d4647f0aSWill Deacon MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), 122d4647f0aSWill Deacon MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), 123d4647f0aSWill Deacon { /* sentinel */ } 124d4647f0aSWill Deacon }; 125d4647f0aSWill Deacon 126d4647f0aSWill Deacon /* If the CPU has CSV2 set, we're safe */ 127d4647f0aSWill Deacon pfr0 = read_cpuid(ID_AA64PFR0_EL1); 128d4647f0aSWill Deacon if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) 129d4647f0aSWill Deacon return SPECTRE_UNAFFECTED; 130d4647f0aSWill Deacon 131d4647f0aSWill Deacon /* Alternatively, we have a list of unaffected CPUs */ 132d4647f0aSWill Deacon if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) 133d4647f0aSWill Deacon return SPECTRE_UNAFFECTED; 134d4647f0aSWill Deacon 135d4647f0aSWill Deacon return SPECTRE_VULNERABLE; 136d4647f0aSWill Deacon } 137d4647f0aSWill Deacon 138d4647f0aSWill Deacon #define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED (1) 139d4647f0aSWill Deacon 140d4647f0aSWill Deacon static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void) 141d4647f0aSWill Deacon { 142d4647f0aSWill Deacon int ret; 143d4647f0aSWill Deacon struct arm_smccc_res res; 144d4647f0aSWill Deacon 145d4647f0aSWill Deacon arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 146d4647f0aSWill Deacon ARM_SMCCC_ARCH_WORKAROUND_1, &res); 147d4647f0aSWill Deacon 148d4647f0aSWill Deacon ret = res.a0; 149d4647f0aSWill Deacon switch (ret) { 150d4647f0aSWill Deacon case SMCCC_RET_SUCCESS: 151d4647f0aSWill Deacon return SPECTRE_MITIGATED; 152d4647f0aSWill Deacon case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: 153d4647f0aSWill Deacon return SPECTRE_UNAFFECTED; 154d4647f0aSWill Deacon default: 155d4647f0aSWill Deacon fallthrough; 156d4647f0aSWill Deacon case SMCCC_RET_NOT_SUPPORTED: 157d4647f0aSWill Deacon return SPECTRE_VULNERABLE; 158d4647f0aSWill Deacon } 159d4647f0aSWill Deacon } 160d4647f0aSWill Deacon 161d4647f0aSWill Deacon bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope) 162d4647f0aSWill Deacon { 163d4647f0aSWill Deacon WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 164d4647f0aSWill Deacon 165d4647f0aSWill Deacon if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED) 166d4647f0aSWill Deacon return false; 167d4647f0aSWill Deacon 168d4647f0aSWill Deacon if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED) 169d4647f0aSWill Deacon return false; 170d4647f0aSWill Deacon 171d4647f0aSWill Deacon return true; 172d4647f0aSWill Deacon } 173d4647f0aSWill Deacon 174d4647f0aSWill Deacon DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); 175d4647f0aSWill Deacon 176d4647f0aSWill Deacon enum mitigation_state arm64_get_spectre_v2_state(void) 177d4647f0aSWill Deacon { 178d4647f0aSWill Deacon return spectre_v2_state; 179d4647f0aSWill Deacon } 180d4647f0aSWill Deacon 181d4647f0aSWill Deacon #ifdef CONFIG_KVM 182d4647f0aSWill Deacon #include <asm/cacheflush.h> 183d4647f0aSWill Deacon #include <asm/kvm_asm.h> 184d4647f0aSWill Deacon 185d4647f0aSWill Deacon atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); 186d4647f0aSWill Deacon 187d4647f0aSWill Deacon static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, 188d4647f0aSWill Deacon const char *hyp_vecs_end) 189d4647f0aSWill Deacon { 190d4647f0aSWill Deacon void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K); 191d4647f0aSWill Deacon int i; 192d4647f0aSWill Deacon 193d4647f0aSWill Deacon for (i = 0; i < SZ_2K; i += 0x80) 194d4647f0aSWill Deacon memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); 195d4647f0aSWill Deacon 196d4647f0aSWill Deacon __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); 197d4647f0aSWill Deacon } 198d4647f0aSWill Deacon 199d4647f0aSWill Deacon static void install_bp_hardening_cb(bp_hardening_cb_t fn) 200d4647f0aSWill Deacon { 201d4647f0aSWill Deacon static DEFINE_RAW_SPINLOCK(bp_lock); 202d4647f0aSWill Deacon int cpu, slot = -1; 203d4647f0aSWill Deacon const char *hyp_vecs_start = __smccc_workaround_1_smc; 204d4647f0aSWill Deacon const char *hyp_vecs_end = __smccc_workaround_1_smc + 205d4647f0aSWill Deacon __SMCCC_WORKAROUND_1_SMC_SZ; 206d4647f0aSWill Deacon 207d4647f0aSWill Deacon /* 20866dd3474SStephen Boyd * Vinz Clortho takes the hyp_vecs start/end "keys" at 20966dd3474SStephen Boyd * the door when we're a guest. Skip the hyp-vectors work. 210d4647f0aSWill Deacon */ 211d4647f0aSWill Deacon if (!is_hyp_mode_available()) { 212d4647f0aSWill Deacon __this_cpu_write(bp_hardening_data.fn, fn); 213d4647f0aSWill Deacon return; 214d4647f0aSWill Deacon } 215d4647f0aSWill Deacon 216d4647f0aSWill Deacon raw_spin_lock(&bp_lock); 217d4647f0aSWill Deacon for_each_possible_cpu(cpu) { 218d4647f0aSWill Deacon if (per_cpu(bp_hardening_data.fn, cpu) == fn) { 219d4647f0aSWill Deacon slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); 220d4647f0aSWill Deacon break; 221d4647f0aSWill Deacon } 222d4647f0aSWill Deacon } 223d4647f0aSWill Deacon 224d4647f0aSWill Deacon if (slot == -1) { 225d4647f0aSWill Deacon slot = atomic_inc_return(&arm64_el2_vector_last_slot); 226d4647f0aSWill Deacon BUG_ON(slot >= BP_HARDEN_EL2_SLOTS); 227d4647f0aSWill Deacon __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); 228d4647f0aSWill Deacon } 229d4647f0aSWill Deacon 230d4647f0aSWill Deacon __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); 231d4647f0aSWill Deacon __this_cpu_write(bp_hardening_data.fn, fn); 232d4647f0aSWill Deacon raw_spin_unlock(&bp_lock); 233d4647f0aSWill Deacon } 234d4647f0aSWill Deacon #else 235d4647f0aSWill Deacon static void install_bp_hardening_cb(bp_hardening_cb_t fn) 236d4647f0aSWill Deacon { 237d4647f0aSWill Deacon __this_cpu_write(bp_hardening_data.fn, fn); 238d4647f0aSWill Deacon } 239d4647f0aSWill Deacon #endif /* CONFIG_KVM */ 240d4647f0aSWill Deacon 241d4647f0aSWill Deacon static void call_smc_arch_workaround_1(void) 242d4647f0aSWill Deacon { 243d4647f0aSWill Deacon arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); 244d4647f0aSWill Deacon } 245d4647f0aSWill Deacon 246d4647f0aSWill Deacon static void call_hvc_arch_workaround_1(void) 247d4647f0aSWill Deacon { 248d4647f0aSWill Deacon arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); 249d4647f0aSWill Deacon } 250d4647f0aSWill Deacon 251d4647f0aSWill Deacon static void qcom_link_stack_sanitisation(void) 252d4647f0aSWill Deacon { 253d4647f0aSWill Deacon u64 tmp; 254d4647f0aSWill Deacon 255d4647f0aSWill Deacon asm volatile("mov %0, x30 \n" 256d4647f0aSWill Deacon ".rept 16 \n" 257d4647f0aSWill Deacon "bl . + 4 \n" 258d4647f0aSWill Deacon ".endr \n" 259d4647f0aSWill Deacon "mov x30, %0 \n" 260d4647f0aSWill Deacon : "=&r" (tmp)); 261d4647f0aSWill Deacon } 262d4647f0aSWill Deacon 263ea8f8c99SWill Deacon static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void) 264ea8f8c99SWill Deacon { 265ea8f8c99SWill Deacon u32 midr = read_cpuid_id(); 266ea8f8c99SWill Deacon if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) && 267ea8f8c99SWill Deacon ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1)) 268ea8f8c99SWill Deacon return NULL; 269ea8f8c99SWill Deacon 270ea8f8c99SWill Deacon return qcom_link_stack_sanitisation; 271ea8f8c99SWill Deacon } 272ea8f8c99SWill Deacon 273d4647f0aSWill Deacon static enum mitigation_state spectre_v2_enable_fw_mitigation(void) 274d4647f0aSWill Deacon { 275d4647f0aSWill Deacon bp_hardening_cb_t cb; 276d4647f0aSWill Deacon enum mitigation_state state; 277d4647f0aSWill Deacon 278d4647f0aSWill Deacon state = spectre_v2_get_cpu_fw_mitigation_state(); 279d4647f0aSWill Deacon if (state != SPECTRE_MITIGATED) 280d4647f0aSWill Deacon return state; 281d4647f0aSWill Deacon 282d4647f0aSWill Deacon if (spectre_v2_mitigations_off()) 283d4647f0aSWill Deacon return SPECTRE_VULNERABLE; 284d4647f0aSWill Deacon 285d4647f0aSWill Deacon switch (arm_smccc_1_1_get_conduit()) { 286d4647f0aSWill Deacon case SMCCC_CONDUIT_HVC: 287d4647f0aSWill Deacon cb = call_hvc_arch_workaround_1; 288d4647f0aSWill Deacon break; 289d4647f0aSWill Deacon 290d4647f0aSWill Deacon case SMCCC_CONDUIT_SMC: 291d4647f0aSWill Deacon cb = call_smc_arch_workaround_1; 292d4647f0aSWill Deacon break; 293d4647f0aSWill Deacon 294d4647f0aSWill Deacon default: 295d4647f0aSWill Deacon return SPECTRE_VULNERABLE; 296d4647f0aSWill Deacon } 297d4647f0aSWill Deacon 298ea8f8c99SWill Deacon /* 299ea8f8c99SWill Deacon * Prefer a CPU-specific workaround if it exists. Note that we 300ea8f8c99SWill Deacon * still rely on firmware for the mitigation at EL2. 301ea8f8c99SWill Deacon */ 302ea8f8c99SWill Deacon cb = spectre_v2_get_sw_mitigation_cb() ?: cb; 303d4647f0aSWill Deacon install_bp_hardening_cb(cb); 304d4647f0aSWill Deacon return SPECTRE_MITIGATED; 305d4647f0aSWill Deacon } 306d4647f0aSWill Deacon 307d4647f0aSWill Deacon void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused) 308d4647f0aSWill Deacon { 309d4647f0aSWill Deacon enum mitigation_state state; 310d4647f0aSWill Deacon 311d4647f0aSWill Deacon WARN_ON(preemptible()); 312d4647f0aSWill Deacon 313d4647f0aSWill Deacon state = spectre_v2_get_cpu_hw_mitigation_state(); 314d4647f0aSWill Deacon if (state == SPECTRE_VULNERABLE) 315d4647f0aSWill Deacon state = spectre_v2_enable_fw_mitigation(); 316d4647f0aSWill Deacon 317d4647f0aSWill Deacon update_mitigation_state(&spectre_v2_state, state); 318d4647f0aSWill Deacon } 3199e78b659SWill Deacon 320c2876207SWill Deacon /* 321c2876207SWill Deacon * Spectre v4. 322c2876207SWill Deacon * 323c2876207SWill Deacon * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is 324c2876207SWill Deacon * either: 325c2876207SWill Deacon * 326c2876207SWill Deacon * - Mitigated in hardware and listed in our "safe list". 327c2876207SWill Deacon * - Mitigated in hardware via PSTATE.SSBS. 328c2876207SWill Deacon * - Mitigated in software by firmware (sometimes referred to as SSBD). 329c2876207SWill Deacon * 330c2876207SWill Deacon * Wait, that doesn't sound so bad, does it? Keep reading... 331c2876207SWill Deacon * 332c2876207SWill Deacon * A major source of headaches is that the software mitigation is enabled both 333c2876207SWill Deacon * on a per-task basis, but can also be forced on for the kernel, necessitating 334c2876207SWill Deacon * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs 335c2876207SWill Deacon * allow EL0 to toggle SSBS directly, which can end up with the prctl() state 336c2876207SWill Deacon * being stale when re-entering the kernel. The usual big.LITTLE caveats apply, 337c2876207SWill Deacon * so you can have systems that have both firmware and SSBS mitigations. This 338c2876207SWill Deacon * means we actually have to reject late onlining of CPUs with mitigations if 339c2876207SWill Deacon * all of the currently onlined CPUs are safelisted, as the mitigation tends to 340c2876207SWill Deacon * be opt-in for userspace. Yes, really, the cure is worse than the disease. 341c2876207SWill Deacon * 342c2876207SWill Deacon * The only good part is that if the firmware mitigation is present, then it is 343c2876207SWill Deacon * present for all CPUs, meaning we don't have to worry about late onlining of a 344c2876207SWill Deacon * vulnerable CPU if one of the boot CPUs is using the firmware mitigation. 345c2876207SWill Deacon * 346c2876207SWill Deacon * Give me a VAX-11/780 any day of the week... 347c2876207SWill Deacon */ 348c2876207SWill Deacon static enum mitigation_state spectre_v4_state; 3499e78b659SWill Deacon 350c2876207SWill Deacon /* This is the per-cpu state tracking whether we need to talk to firmware */ 351c2876207SWill Deacon DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); 352c2876207SWill Deacon 353c2876207SWill Deacon enum spectre_v4_policy { 354c2876207SWill Deacon SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, 355c2876207SWill Deacon SPECTRE_V4_POLICY_MITIGATION_ENABLED, 356c2876207SWill Deacon SPECTRE_V4_POLICY_MITIGATION_DISABLED, 357c2876207SWill Deacon }; 358c2876207SWill Deacon 359c2876207SWill Deacon static enum spectre_v4_policy __read_mostly __spectre_v4_policy; 360c2876207SWill Deacon 361c2876207SWill Deacon static const struct spectre_v4_param { 362c2876207SWill Deacon const char *str; 363c2876207SWill Deacon enum spectre_v4_policy policy; 364c2876207SWill Deacon } spectre_v4_params[] = { 365c2876207SWill Deacon { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, }, 366c2876207SWill Deacon { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, }, 367c2876207SWill Deacon { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, }, 368c2876207SWill Deacon }; 369c2876207SWill Deacon static int __init parse_spectre_v4_param(char *str) 370c2876207SWill Deacon { 371c2876207SWill Deacon int i; 372c2876207SWill Deacon 373c2876207SWill Deacon if (!str || !str[0]) 374c2876207SWill Deacon return -EINVAL; 375c2876207SWill Deacon 376c2876207SWill Deacon for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) { 377c2876207SWill Deacon const struct spectre_v4_param *param = &spectre_v4_params[i]; 378c2876207SWill Deacon 379c2876207SWill Deacon if (strncmp(str, param->str, strlen(param->str))) 380c2876207SWill Deacon continue; 381c2876207SWill Deacon 382c2876207SWill Deacon __spectre_v4_policy = param->policy; 383c2876207SWill Deacon return 0; 3849e78b659SWill Deacon } 3859e78b659SWill Deacon 386c2876207SWill Deacon return -EINVAL; 387c2876207SWill Deacon } 388c2876207SWill Deacon early_param("ssbd", parse_spectre_v4_param); 3899e78b659SWill Deacon 390c2876207SWill Deacon /* 391c2876207SWill Deacon * Because this was all written in a rush by people working in different silos, 392c2876207SWill Deacon * we've ended up with multiple command line options to control the same thing. 393c2876207SWill Deacon * Wrap these up in some helpers, which prefer disabling the mitigation if faced 394c2876207SWill Deacon * with contradictory parameters. The mitigation is always either "off", 395c2876207SWill Deacon * "dynamic" or "on". 396c2876207SWill Deacon */ 397c2876207SWill Deacon static bool spectre_v4_mitigations_off(void) 398c2876207SWill Deacon { 399c2876207SWill Deacon bool ret = cpu_mitigations_off() || 400c2876207SWill Deacon __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED; 401c2876207SWill Deacon 402c2876207SWill Deacon if (ret) 403c2876207SWill Deacon pr_info_once("spectre-v4 mitigation disabled by command-line option\n"); 404c2876207SWill Deacon 405c2876207SWill Deacon return ret; 406c2876207SWill Deacon } 407c2876207SWill Deacon 408c2876207SWill Deacon /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */ 409c2876207SWill Deacon static bool spectre_v4_mitigations_dynamic(void) 410c2876207SWill Deacon { 411c2876207SWill Deacon return !spectre_v4_mitigations_off() && 412c2876207SWill Deacon __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC; 413c2876207SWill Deacon } 414c2876207SWill Deacon 415c2876207SWill Deacon static bool spectre_v4_mitigations_on(void) 416c2876207SWill Deacon { 417c2876207SWill Deacon return !spectre_v4_mitigations_off() && 418c2876207SWill Deacon __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED; 419c2876207SWill Deacon } 420c2876207SWill Deacon 421c2876207SWill Deacon ssize_t cpu_show_spec_store_bypass(struct device *dev, 422c2876207SWill Deacon struct device_attribute *attr, char *buf) 423c2876207SWill Deacon { 424c2876207SWill Deacon switch (spectre_v4_state) { 425c2876207SWill Deacon case SPECTRE_UNAFFECTED: 426c2876207SWill Deacon return sprintf(buf, "Not affected\n"); 427c2876207SWill Deacon case SPECTRE_MITIGATED: 428c2876207SWill Deacon return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n"); 429c2876207SWill Deacon case SPECTRE_VULNERABLE: 430c2876207SWill Deacon fallthrough; 431c2876207SWill Deacon default: 432c2876207SWill Deacon return sprintf(buf, "Vulnerable\n"); 433c2876207SWill Deacon } 434c2876207SWill Deacon } 435c2876207SWill Deacon 436c2876207SWill Deacon enum mitigation_state arm64_get_spectre_v4_state(void) 437c2876207SWill Deacon { 438c2876207SWill Deacon return spectre_v4_state; 439c2876207SWill Deacon } 440c2876207SWill Deacon 441c2876207SWill Deacon static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void) 442c2876207SWill Deacon { 443c2876207SWill Deacon static const struct midr_range spectre_v4_safe_list[] = { 444c2876207SWill Deacon MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), 445c2876207SWill Deacon MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), 446c2876207SWill Deacon MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), 447c2876207SWill Deacon MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), 448c2876207SWill Deacon MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), 449c2876207SWill Deacon MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), 450c2876207SWill Deacon { /* sentinel */ }, 451c2876207SWill Deacon }; 452c2876207SWill Deacon 453c2876207SWill Deacon if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list)) 454c2876207SWill Deacon return SPECTRE_UNAFFECTED; 455c2876207SWill Deacon 456c2876207SWill Deacon /* CPU features are detected first */ 457c2876207SWill Deacon if (this_cpu_has_cap(ARM64_SSBS)) 458c2876207SWill Deacon return SPECTRE_MITIGATED; 459c2876207SWill Deacon 460c2876207SWill Deacon return SPECTRE_VULNERABLE; 461c2876207SWill Deacon } 462c2876207SWill Deacon 463c2876207SWill Deacon static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void) 464c2876207SWill Deacon { 465c2876207SWill Deacon int ret; 466c2876207SWill Deacon struct arm_smccc_res res; 467c2876207SWill Deacon 468c2876207SWill Deacon arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 469c2876207SWill Deacon ARM_SMCCC_ARCH_WORKAROUND_2, &res); 470c2876207SWill Deacon 471c2876207SWill Deacon ret = res.a0; 472c2876207SWill Deacon switch (ret) { 473c2876207SWill Deacon case SMCCC_RET_SUCCESS: 474c2876207SWill Deacon return SPECTRE_MITIGATED; 475c2876207SWill Deacon case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: 476c2876207SWill Deacon fallthrough; 477c2876207SWill Deacon case SMCCC_RET_NOT_REQUIRED: 478c2876207SWill Deacon return SPECTRE_UNAFFECTED; 479c2876207SWill Deacon default: 480c2876207SWill Deacon fallthrough; 481c2876207SWill Deacon case SMCCC_RET_NOT_SUPPORTED: 482c2876207SWill Deacon return SPECTRE_VULNERABLE; 483c2876207SWill Deacon } 484c2876207SWill Deacon } 485c2876207SWill Deacon 486c2876207SWill Deacon bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope) 487c2876207SWill Deacon { 488c2876207SWill Deacon enum mitigation_state state; 489c2876207SWill Deacon 490c2876207SWill Deacon WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 491c2876207SWill Deacon 492c2876207SWill Deacon state = spectre_v4_get_cpu_hw_mitigation_state(); 493c2876207SWill Deacon if (state == SPECTRE_VULNERABLE) 494c2876207SWill Deacon state = spectre_v4_get_cpu_fw_mitigation_state(); 495c2876207SWill Deacon 496c2876207SWill Deacon return state != SPECTRE_UNAFFECTED; 497c2876207SWill Deacon } 498c2876207SWill Deacon 499c2876207SWill Deacon static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr) 500c2876207SWill Deacon { 501c2876207SWill Deacon if (user_mode(regs)) 502c2876207SWill Deacon return 1; 503c2876207SWill Deacon 504c2876207SWill Deacon if (instr & BIT(PSTATE_Imm_shift)) 505c2876207SWill Deacon regs->pstate |= PSR_SSBS_BIT; 506c2876207SWill Deacon else 507c2876207SWill Deacon regs->pstate &= ~PSR_SSBS_BIT; 508c2876207SWill Deacon 509c2876207SWill Deacon arm64_skip_faulting_instruction(regs, 4); 510c2876207SWill Deacon return 0; 511c2876207SWill Deacon } 512c2876207SWill Deacon 513c2876207SWill Deacon static struct undef_hook ssbs_emulation_hook = { 514c2876207SWill Deacon .instr_mask = ~(1U << PSTATE_Imm_shift), 515c2876207SWill Deacon .instr_val = 0xd500401f | PSTATE_SSBS, 516c2876207SWill Deacon .fn = ssbs_emulation_handler, 517c2876207SWill Deacon }; 518c2876207SWill Deacon 519c2876207SWill Deacon static enum mitigation_state spectre_v4_enable_hw_mitigation(void) 520c2876207SWill Deacon { 521c2876207SWill Deacon static bool undef_hook_registered = false; 522c2876207SWill Deacon static DEFINE_RAW_SPINLOCK(hook_lock); 523c2876207SWill Deacon enum mitigation_state state; 524c2876207SWill Deacon 525c2876207SWill Deacon /* 526c2876207SWill Deacon * If the system is mitigated but this CPU doesn't have SSBS, then 527c2876207SWill Deacon * we must be on the safelist and there's nothing more to do. 528c2876207SWill Deacon */ 529c2876207SWill Deacon state = spectre_v4_get_cpu_hw_mitigation_state(); 530c2876207SWill Deacon if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS)) 531c2876207SWill Deacon return state; 532c2876207SWill Deacon 533c2876207SWill Deacon raw_spin_lock(&hook_lock); 534c2876207SWill Deacon if (!undef_hook_registered) { 535c2876207SWill Deacon register_undef_hook(&ssbs_emulation_hook); 536c2876207SWill Deacon undef_hook_registered = true; 537c2876207SWill Deacon } 538c2876207SWill Deacon raw_spin_unlock(&hook_lock); 539c2876207SWill Deacon 540c2876207SWill Deacon if (spectre_v4_mitigations_off()) { 541c2876207SWill Deacon sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS); 542c2876207SWill Deacon asm volatile(SET_PSTATE_SSBS(1)); 543c2876207SWill Deacon return SPECTRE_VULNERABLE; 544c2876207SWill Deacon } 545c2876207SWill Deacon 546c2876207SWill Deacon /* SCTLR_EL1.DSSBS was initialised to 0 during boot */ 547c2876207SWill Deacon asm volatile(SET_PSTATE_SSBS(0)); 548c2876207SWill Deacon return SPECTRE_MITIGATED; 5499e78b659SWill Deacon } 5509e78b659SWill Deacon 5519e78b659SWill Deacon /* 552c2876207SWill Deacon * Patch a branch over the Spectre-v4 mitigation code with a NOP so that 553c2876207SWill Deacon * we fallthrough and check whether firmware needs to be called on this CPU. 554c2876207SWill Deacon */ 555c2876207SWill Deacon void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt, 556c2876207SWill Deacon __le32 *origptr, 557c2876207SWill Deacon __le32 *updptr, int nr_inst) 558c2876207SWill Deacon { 559c2876207SWill Deacon BUG_ON(nr_inst != 1); /* Branch -> NOP */ 560c2876207SWill Deacon 561c2876207SWill Deacon if (spectre_v4_mitigations_off()) 562c2876207SWill Deacon return; 563c2876207SWill Deacon 564c2876207SWill Deacon if (cpus_have_final_cap(ARM64_SSBS)) 565c2876207SWill Deacon return; 566c2876207SWill Deacon 567c2876207SWill Deacon if (spectre_v4_mitigations_dynamic()) 568c2876207SWill Deacon *updptr = cpu_to_le32(aarch64_insn_gen_nop()); 569c2876207SWill Deacon } 570c2876207SWill Deacon 571c2876207SWill Deacon /* 572c2876207SWill Deacon * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction 573c2876207SWill Deacon * to call into firmware to adjust the mitigation state. 574c2876207SWill Deacon */ 575c2876207SWill Deacon void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt, 576c2876207SWill Deacon __le32 *origptr, 577c2876207SWill Deacon __le32 *updptr, int nr_inst) 578c2876207SWill Deacon { 579c2876207SWill Deacon u32 insn; 580c2876207SWill Deacon 581c2876207SWill Deacon BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */ 582c2876207SWill Deacon 583c2876207SWill Deacon switch (arm_smccc_1_1_get_conduit()) { 584c2876207SWill Deacon case SMCCC_CONDUIT_HVC: 585c2876207SWill Deacon insn = aarch64_insn_get_hvc_value(); 586c2876207SWill Deacon break; 587c2876207SWill Deacon case SMCCC_CONDUIT_SMC: 588c2876207SWill Deacon insn = aarch64_insn_get_smc_value(); 589c2876207SWill Deacon break; 590c2876207SWill Deacon default: 591c2876207SWill Deacon return; 592c2876207SWill Deacon } 593c2876207SWill Deacon 594c2876207SWill Deacon *updptr = cpu_to_le32(insn); 595c2876207SWill Deacon } 596c2876207SWill Deacon 597c2876207SWill Deacon static enum mitigation_state spectre_v4_enable_fw_mitigation(void) 598c2876207SWill Deacon { 599c2876207SWill Deacon enum mitigation_state state; 600c2876207SWill Deacon 601c2876207SWill Deacon state = spectre_v4_get_cpu_fw_mitigation_state(); 602c2876207SWill Deacon if (state != SPECTRE_MITIGATED) 603c2876207SWill Deacon return state; 604c2876207SWill Deacon 605c2876207SWill Deacon if (spectre_v4_mitigations_off()) { 606c2876207SWill Deacon arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL); 607c2876207SWill Deacon return SPECTRE_VULNERABLE; 608c2876207SWill Deacon } 609c2876207SWill Deacon 610c2876207SWill Deacon arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL); 611c2876207SWill Deacon 612c2876207SWill Deacon if (spectre_v4_mitigations_dynamic()) 613c2876207SWill Deacon __this_cpu_write(arm64_ssbd_callback_required, 1); 614c2876207SWill Deacon 615c2876207SWill Deacon return SPECTRE_MITIGATED; 616c2876207SWill Deacon } 617c2876207SWill Deacon 618c2876207SWill Deacon void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused) 619c2876207SWill Deacon { 620c2876207SWill Deacon enum mitigation_state state; 621c2876207SWill Deacon 622c2876207SWill Deacon WARN_ON(preemptible()); 623c2876207SWill Deacon 624c2876207SWill Deacon state = spectre_v4_enable_hw_mitigation(); 625c2876207SWill Deacon if (state == SPECTRE_VULNERABLE) 626c2876207SWill Deacon state = spectre_v4_enable_fw_mitigation(); 627c2876207SWill Deacon 628c2876207SWill Deacon update_mitigation_state(&spectre_v4_state, state); 629c2876207SWill Deacon } 630c2876207SWill Deacon 631c2876207SWill Deacon static void __update_pstate_ssbs(struct pt_regs *regs, bool state) 632c2876207SWill Deacon { 633c2876207SWill Deacon u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT; 634c2876207SWill Deacon 635c2876207SWill Deacon if (state) 636c2876207SWill Deacon regs->pstate |= bit; 637c2876207SWill Deacon else 638c2876207SWill Deacon regs->pstate &= ~bit; 639c2876207SWill Deacon } 640c2876207SWill Deacon 641c2876207SWill Deacon void spectre_v4_enable_task_mitigation(struct task_struct *tsk) 642c2876207SWill Deacon { 643c2876207SWill Deacon struct pt_regs *regs = task_pt_regs(tsk); 644c2876207SWill Deacon bool ssbs = false, kthread = tsk->flags & PF_KTHREAD; 645c2876207SWill Deacon 646c2876207SWill Deacon if (spectre_v4_mitigations_off()) 647c2876207SWill Deacon ssbs = true; 648c2876207SWill Deacon else if (spectre_v4_mitigations_dynamic() && !kthread) 649c2876207SWill Deacon ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD); 650c2876207SWill Deacon 651c2876207SWill Deacon __update_pstate_ssbs(regs, ssbs); 652c2876207SWill Deacon } 653c2876207SWill Deacon 654c2876207SWill Deacon /* 655c2876207SWill Deacon * The Spectre-v4 mitigation can be controlled via a prctl() from userspace. 656c2876207SWill Deacon * This is interesting because the "speculation disabled" behaviour can be 657c2876207SWill Deacon * configured so that it is preserved across exec(), which means that the 658c2876207SWill Deacon * prctl() may be necessary even when PSTATE.SSBS can be toggled directly 659c2876207SWill Deacon * from userspace. 6609e78b659SWill Deacon */ 661780c083aSWill Deacon static void ssbd_prctl_enable_mitigation(struct task_struct *task) 662780c083aSWill Deacon { 663780c083aSWill Deacon task_clear_spec_ssb_noexec(task); 664780c083aSWill Deacon task_set_spec_ssb_disable(task); 665780c083aSWill Deacon set_tsk_thread_flag(task, TIF_SSBD); 666780c083aSWill Deacon } 667780c083aSWill Deacon 668780c083aSWill Deacon static void ssbd_prctl_disable_mitigation(struct task_struct *task) 669780c083aSWill Deacon { 670780c083aSWill Deacon task_clear_spec_ssb_noexec(task); 671780c083aSWill Deacon task_clear_spec_ssb_disable(task); 672780c083aSWill Deacon clear_tsk_thread_flag(task, TIF_SSBD); 673780c083aSWill Deacon } 674780c083aSWill Deacon 6759e78b659SWill Deacon static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl) 6769e78b659SWill Deacon { 6779e78b659SWill Deacon switch (ctrl) { 6789e78b659SWill Deacon case PR_SPEC_ENABLE: 679c2876207SWill Deacon /* Enable speculation: disable mitigation */ 680c2876207SWill Deacon /* 681c2876207SWill Deacon * Force disabled speculation prevents it from being 682c2876207SWill Deacon * re-enabled. 683c2876207SWill Deacon */ 684c2876207SWill Deacon if (task_spec_ssb_force_disable(task)) 6859e78b659SWill Deacon return -EPERM; 6869e78b659SWill Deacon 6879e78b659SWill Deacon /* 688c2876207SWill Deacon * If the mitigation is forced on, then speculation is forced 689c2876207SWill Deacon * off and we again prevent it from being re-enabled. 6909e78b659SWill Deacon */ 691c2876207SWill Deacon if (spectre_v4_mitigations_on()) 6929e78b659SWill Deacon return -EPERM; 693c2876207SWill Deacon 694780c083aSWill Deacon ssbd_prctl_disable_mitigation(task); 6959e78b659SWill Deacon break; 6969e78b659SWill Deacon case PR_SPEC_FORCE_DISABLE: 697c2876207SWill Deacon /* Force disable speculation: force enable mitigation */ 698c2876207SWill Deacon /* 699c2876207SWill Deacon * If the mitigation is forced off, then speculation is forced 700c2876207SWill Deacon * on and we prevent it from being disabled. 701c2876207SWill Deacon */ 702c2876207SWill Deacon if (spectre_v4_mitigations_off()) 7039e78b659SWill Deacon return -EPERM; 704c2876207SWill Deacon 7059e78b659SWill Deacon task_set_spec_ssb_force_disable(task); 706c2876207SWill Deacon fallthrough; 707c2876207SWill Deacon case PR_SPEC_DISABLE: 708c2876207SWill Deacon /* Disable speculation: enable mitigation */ 709c2876207SWill Deacon /* Same as PR_SPEC_FORCE_DISABLE */ 710c2876207SWill Deacon if (spectre_v4_mitigations_off()) 711c2876207SWill Deacon return -EPERM; 712c2876207SWill Deacon 713780c083aSWill Deacon ssbd_prctl_enable_mitigation(task); 714780c083aSWill Deacon break; 715780c083aSWill Deacon case PR_SPEC_DISABLE_NOEXEC: 716780c083aSWill Deacon /* Disable speculation until execve(): enable mitigation */ 717780c083aSWill Deacon /* 718780c083aSWill Deacon * If the mitigation state is forced one way or the other, then 719780c083aSWill Deacon * we must fail now before we try to toggle it on execve(). 720780c083aSWill Deacon */ 721780c083aSWill Deacon if (task_spec_ssb_force_disable(task) || 722780c083aSWill Deacon spectre_v4_mitigations_off() || 723780c083aSWill Deacon spectre_v4_mitigations_on()) { 724780c083aSWill Deacon return -EPERM; 725780c083aSWill Deacon } 726780c083aSWill Deacon 727780c083aSWill Deacon ssbd_prctl_enable_mitigation(task); 728780c083aSWill Deacon task_set_spec_ssb_noexec(task); 7299e78b659SWill Deacon break; 7309e78b659SWill Deacon default: 7319e78b659SWill Deacon return -ERANGE; 7329e78b659SWill Deacon } 7339e78b659SWill Deacon 734c2876207SWill Deacon spectre_v4_enable_task_mitigation(task); 7359e78b659SWill Deacon return 0; 7369e78b659SWill Deacon } 7379e78b659SWill Deacon 7389e78b659SWill Deacon int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, 7399e78b659SWill Deacon unsigned long ctrl) 7409e78b659SWill Deacon { 7419e78b659SWill Deacon switch (which) { 7429e78b659SWill Deacon case PR_SPEC_STORE_BYPASS: 7439e78b659SWill Deacon return ssbd_prctl_set(task, ctrl); 7449e78b659SWill Deacon default: 7459e78b659SWill Deacon return -ENODEV; 7469e78b659SWill Deacon } 7479e78b659SWill Deacon } 7489e78b659SWill Deacon 7499e78b659SWill Deacon static int ssbd_prctl_get(struct task_struct *task) 7509e78b659SWill Deacon { 751c2876207SWill Deacon switch (spectre_v4_state) { 752c2876207SWill Deacon case SPECTRE_UNAFFECTED: 753c2876207SWill Deacon return PR_SPEC_NOT_AFFECTED; 754c2876207SWill Deacon case SPECTRE_MITIGATED: 755c2876207SWill Deacon if (spectre_v4_mitigations_on()) 756c2876207SWill Deacon return PR_SPEC_NOT_AFFECTED; 757c2876207SWill Deacon 758c2876207SWill Deacon if (spectre_v4_mitigations_dynamic()) 759c2876207SWill Deacon break; 760c2876207SWill Deacon 761c2876207SWill Deacon /* Mitigations are disabled, so we're vulnerable. */ 762c2876207SWill Deacon fallthrough; 763c2876207SWill Deacon case SPECTRE_VULNERABLE: 764c2876207SWill Deacon fallthrough; 765c2876207SWill Deacon default: 766c2876207SWill Deacon return PR_SPEC_ENABLE; 767c2876207SWill Deacon } 768c2876207SWill Deacon 769c2876207SWill Deacon /* Check the mitigation state for this task */ 7709e78b659SWill Deacon if (task_spec_ssb_force_disable(task)) 7719e78b659SWill Deacon return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 772c2876207SWill Deacon 773780c083aSWill Deacon if (task_spec_ssb_noexec(task)) 774780c083aSWill Deacon return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; 775780c083aSWill Deacon 7769e78b659SWill Deacon if (task_spec_ssb_disable(task)) 7779e78b659SWill Deacon return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 778c2876207SWill Deacon 7799e78b659SWill Deacon return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 7809e78b659SWill Deacon } 7819e78b659SWill Deacon 7829e78b659SWill Deacon int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 7839e78b659SWill Deacon { 7849e78b659SWill Deacon switch (which) { 7859e78b659SWill Deacon case PR_SPEC_STORE_BYPASS: 7869e78b659SWill Deacon return ssbd_prctl_get(task); 7879e78b659SWill Deacon default: 7889e78b659SWill Deacon return -ENODEV; 7899e78b659SWill Deacon } 7909e78b659SWill Deacon } 791